Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- deepseek/lib/libtinfow.so.6 +3 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py +11 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py +573 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py +214 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py +148 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__init__.py +17 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/noisy_layer.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/gru_gate.py +58 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/multi_head_attention.py +61 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/noisy_layer.py +118 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py +147 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/skip_connection.py +46 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/misc.py +90 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py +292 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_action_dist.py +735 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py +552 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py +142 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/visionnet.py +264 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/misc.cpython-310.pyc +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/models/utils.py +280 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/actors.py +258 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/annotations.py +213 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/exploration.py +209 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/gaussian_noise.py +247 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/ornstein_uhlenbeck_noise.py +273 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/parameter_noise.py +440 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/per_worker_gaussian_noise.py +49 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/slate_soft_q.py +46 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/filter.py +420 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/images.py +60 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/numpy.py +606 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__init__.py +44 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/simple_replay_buffer.py +0 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/serialization.py +418 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py +65 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py +1817 -0
- deepseek/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py +745 -0
- evalkit_tf433/lib/python3.10/site-packages/google/api_core/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1418,3 +1418,4 @@ evalkit_tf433/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
|
|
| 1418 |
evalkit_tf433/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1419 |
deepseek/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1420 |
deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1418 |
evalkit_tf433/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1419 |
deepseek/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1420 |
deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1421 |
+
deepseek/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
|
deepseek/lib/libtinfow.so.6
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fa5905616132c8011f70288c40108dec369707481d75643646e9040878a958a
|
| 3 |
+
size 287080
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc
ADDED
|
Binary file (4.33 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 2 |
+
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
|
| 3 |
+
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
|
| 4 |
+
from ray.rllib.models.tf.visionnet import VisionNetwork
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"FullyConnectedNetwork",
|
| 8 |
+
"RecurrentNetwork",
|
| 9 |
+
"TFModelV2",
|
| 10 |
+
"VisionNetwork",
|
| 11 |
+
]
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc
ADDED
|
Binary file (3.49 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc
ADDED
|
Binary file (960 Bytes). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc
ADDED
|
Binary file (28.2 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc
ADDED
|
Binary file (20.9 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc
ADDED
|
Binary file (5.05 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar,
|
| 3 |
+
Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017.
|
| 4 |
+
https://arxiv.org/pdf/1706.03762.pdf
|
| 5 |
+
[2] - Stabilizing Transformers for Reinforcement Learning - E. Parisotto
|
| 6 |
+
et al. - DeepMind - 2019. https://arxiv.org/pdf/1910.06764.pdf
|
| 7 |
+
[3] - Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context.
|
| 8 |
+
Z. Dai, Z. Yang, et al. - Carnegie Mellon U - 2019.
|
| 9 |
+
https://www.aclweb.org/anthology/P19-1285.pdf
|
| 10 |
+
"""
|
| 11 |
+
import gymnasium as gym
|
| 12 |
+
from gymnasium.spaces import Box, Discrete, MultiDiscrete
|
| 13 |
+
import numpy as np
|
| 14 |
+
import tree # pip install dm_tree
|
| 15 |
+
from typing import Any, Dict, Optional, Union
|
| 16 |
+
|
| 17 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 18 |
+
from ray.rllib.models.tf.layers import (
|
| 19 |
+
GRUGate,
|
| 20 |
+
RelativeMultiHeadAttention,
|
| 21 |
+
SkipConnection,
|
| 22 |
+
)
|
| 23 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 24 |
+
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
|
| 25 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 26 |
+
from ray.rllib.policy.view_requirement import ViewRequirement
|
| 27 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 28 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 29 |
+
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
|
| 30 |
+
from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot
|
| 31 |
+
from ray.rllib.utils.typing import ModelConfigDict, TensorType, List
|
| 32 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 33 |
+
from ray.util import log_once
|
| 34 |
+
|
| 35 |
+
tf1, tf, tfv = try_import_tf()
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@OldAPIStack
|
| 39 |
+
class PositionwiseFeedforward(tf.keras.layers.Layer if tf else object):
|
| 40 |
+
"""A 2x linear layer with ReLU activation in between described in [1].
|
| 41 |
+
|
| 42 |
+
Each timestep coming from the attention head will be passed through this
|
| 43 |
+
layer separately.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
out_dim: int,
|
| 49 |
+
hidden_dim: int,
|
| 50 |
+
output_activation: Optional[Any] = None,
|
| 51 |
+
**kwargs,
|
| 52 |
+
):
|
| 53 |
+
super().__init__(**kwargs)
|
| 54 |
+
|
| 55 |
+
self._hidden_layer = tf.keras.layers.Dense(
|
| 56 |
+
hidden_dim,
|
| 57 |
+
activation=tf.nn.relu,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
self._output_layer = tf.keras.layers.Dense(
|
| 61 |
+
out_dim, activation=output_activation
|
| 62 |
+
)
|
| 63 |
+
if log_once("positionwise_feedforward_tf"):
|
| 64 |
+
deprecation_warning(
|
| 65 |
+
old="rllib.models.tf.attention_net.PositionwiseFeedforward",
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def call(self, inputs: TensorType, **kwargs) -> TensorType:
|
| 69 |
+
del kwargs
|
| 70 |
+
output = self._hidden_layer(inputs)
|
| 71 |
+
return self._output_layer(output)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@OldAPIStack
|
| 75 |
+
class TrXLNet(RecurrentNetwork):
|
| 76 |
+
"""A TrXL net Model described in [1]."""
|
| 77 |
+
|
| 78 |
+
def __init__(
|
| 79 |
+
self,
|
| 80 |
+
observation_space: gym.spaces.Space,
|
| 81 |
+
action_space: gym.spaces.Space,
|
| 82 |
+
num_outputs: int,
|
| 83 |
+
model_config: ModelConfigDict,
|
| 84 |
+
name: str,
|
| 85 |
+
num_transformer_units: int,
|
| 86 |
+
attention_dim: int,
|
| 87 |
+
num_heads: int,
|
| 88 |
+
head_dim: int,
|
| 89 |
+
position_wise_mlp_dim: int,
|
| 90 |
+
):
|
| 91 |
+
"""Initializes a TrXLNet object.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
num_transformer_units: The number of Transformer repeats to
|
| 95 |
+
use (denoted L in [2]).
|
| 96 |
+
attention_dim: The input and output dimensions of one
|
| 97 |
+
Transformer unit.
|
| 98 |
+
num_heads: The number of attention heads to use in parallel.
|
| 99 |
+
Denoted as `H` in [3].
|
| 100 |
+
head_dim: The dimension of a single(!) attention head within
|
| 101 |
+
a multi-head attention unit. Denoted as `d` in [3].
|
| 102 |
+
position_wise_mlp_dim: The dimension of the hidden layer
|
| 103 |
+
within the position-wise MLP (after the multi-head attention
|
| 104 |
+
block within one Transformer unit). This is the size of the
|
| 105 |
+
first of the two layers within the PositionwiseFeedforward. The
|
| 106 |
+
second layer always has size=`attention_dim`.
|
| 107 |
+
"""
|
| 108 |
+
if log_once("trxl_net_tf"):
|
| 109 |
+
deprecation_warning(
|
| 110 |
+
old="rllib.models.tf.attention_net.TrXLNet",
|
| 111 |
+
)
|
| 112 |
+
super().__init__(
|
| 113 |
+
observation_space, action_space, num_outputs, model_config, name
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
self.num_transformer_units = num_transformer_units
|
| 117 |
+
self.attention_dim = attention_dim
|
| 118 |
+
self.num_heads = num_heads
|
| 119 |
+
self.head_dim = head_dim
|
| 120 |
+
self.max_seq_len = model_config["max_seq_len"]
|
| 121 |
+
self.obs_dim = observation_space.shape[0]
|
| 122 |
+
|
| 123 |
+
inputs = tf.keras.layers.Input(
|
| 124 |
+
shape=(self.max_seq_len, self.obs_dim), name="inputs"
|
| 125 |
+
)
|
| 126 |
+
E_out = tf.keras.layers.Dense(attention_dim)(inputs)
|
| 127 |
+
|
| 128 |
+
for _ in range(self.num_transformer_units):
|
| 129 |
+
MHA_out = SkipConnection(
|
| 130 |
+
RelativeMultiHeadAttention(
|
| 131 |
+
out_dim=attention_dim,
|
| 132 |
+
num_heads=num_heads,
|
| 133 |
+
head_dim=head_dim,
|
| 134 |
+
input_layernorm=False,
|
| 135 |
+
output_activation=None,
|
| 136 |
+
),
|
| 137 |
+
fan_in_layer=None,
|
| 138 |
+
)(E_out)
|
| 139 |
+
E_out = SkipConnection(
|
| 140 |
+
PositionwiseFeedforward(attention_dim, position_wise_mlp_dim)
|
| 141 |
+
)(MHA_out)
|
| 142 |
+
E_out = tf.keras.layers.LayerNormalization(axis=-1)(E_out)
|
| 143 |
+
|
| 144 |
+
# Postprocess TrXL output with another hidden layer and compute values.
|
| 145 |
+
logits = tf.keras.layers.Dense(
|
| 146 |
+
self.num_outputs, activation=tf.keras.activations.linear, name="logits"
|
| 147 |
+
)(E_out)
|
| 148 |
+
|
| 149 |
+
self.base_model = tf.keras.models.Model([inputs], [logits])
|
| 150 |
+
|
| 151 |
+
@override(RecurrentNetwork)
|
| 152 |
+
def forward_rnn(
|
| 153 |
+
self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
|
| 154 |
+
) -> (TensorType, List[TensorType]):
|
| 155 |
+
# To make Attention work with current RLlib's ModelV2 API:
|
| 156 |
+
# We assume `state` is the history of L recent observations (all
|
| 157 |
+
# concatenated into one tensor) and append the current inputs to the
|
| 158 |
+
# end and only keep the most recent (up to `max_seq_len`). This allows
|
| 159 |
+
# us to deal with timestep-wise inference and full sequence training
|
| 160 |
+
# within the same logic.
|
| 161 |
+
observations = state[0]
|
| 162 |
+
observations = tf.concat((observations, inputs), axis=1)[:, -self.max_seq_len :]
|
| 163 |
+
logits = self.base_model([observations])
|
| 164 |
+
T = tf.shape(inputs)[1] # Length of input segment (time).
|
| 165 |
+
logits = logits[:, -T:]
|
| 166 |
+
|
| 167 |
+
return logits, [observations]
|
| 168 |
+
|
| 169 |
+
@override(RecurrentNetwork)
|
| 170 |
+
def get_initial_state(self) -> List[np.ndarray]:
|
| 171 |
+
# State is the T last observations concat'd together into one Tensor.
|
| 172 |
+
# Plus all Transformer blocks' E(l) outputs concat'd together (up to
|
| 173 |
+
# tau timesteps).
|
| 174 |
+
return [np.zeros((self.max_seq_len, self.obs_dim), np.float32)]
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class GTrXLNet(RecurrentNetwork):
|
| 178 |
+
"""A GTrXL net Model described in [2].
|
| 179 |
+
|
| 180 |
+
This is still in an experimental phase.
|
| 181 |
+
Can be used as a drop-in replacement for LSTMs in PPO and IMPALA.
|
| 182 |
+
|
| 183 |
+
To use this network as a replacement for an RNN, configure your Algorithm
|
| 184 |
+
as follows:
|
| 185 |
+
|
| 186 |
+
Examples:
|
| 187 |
+
>> config["model"]["custom_model"] = GTrXLNet
|
| 188 |
+
>> config["model"]["max_seq_len"] = 10
|
| 189 |
+
>> config["model"]["custom_model_config"] = {
|
| 190 |
+
>> num_transformer_units=1,
|
| 191 |
+
>> attention_dim=32,
|
| 192 |
+
>> num_heads=2,
|
| 193 |
+
>> memory_inference=100,
|
| 194 |
+
>> memory_training=50,
|
| 195 |
+
>> etc..
|
| 196 |
+
>> }
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(
|
| 200 |
+
self,
|
| 201 |
+
observation_space: gym.spaces.Space,
|
| 202 |
+
action_space: gym.spaces.Space,
|
| 203 |
+
num_outputs: Optional[int],
|
| 204 |
+
model_config: ModelConfigDict,
|
| 205 |
+
name: str,
|
| 206 |
+
*,
|
| 207 |
+
num_transformer_units: int = 1,
|
| 208 |
+
attention_dim: int = 64,
|
| 209 |
+
num_heads: int = 2,
|
| 210 |
+
memory_inference: int = 50,
|
| 211 |
+
memory_training: int = 50,
|
| 212 |
+
head_dim: int = 32,
|
| 213 |
+
position_wise_mlp_dim: int = 32,
|
| 214 |
+
init_gru_gate_bias: float = 2.0,
|
| 215 |
+
):
|
| 216 |
+
"""Initializes a GTrXLNet instance.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
num_transformer_units: The number of Transformer repeats to
|
| 220 |
+
use (denoted L in [2]).
|
| 221 |
+
attention_dim: The input and output dimensions of one
|
| 222 |
+
Transformer unit.
|
| 223 |
+
num_heads: The number of attention heads to use in parallel.
|
| 224 |
+
Denoted as `H` in [3].
|
| 225 |
+
memory_inference: The number of timesteps to concat (time
|
| 226 |
+
axis) and feed into the next transformer unit as inference
|
| 227 |
+
input. The first transformer unit will receive this number of
|
| 228 |
+
past observations (plus the current one), instead.
|
| 229 |
+
memory_training: The number of timesteps to concat (time
|
| 230 |
+
axis) and feed into the next transformer unit as training
|
| 231 |
+
input (plus the actual input sequence of len=max_seq_len).
|
| 232 |
+
The first transformer unit will receive this number of
|
| 233 |
+
past observations (plus the input sequence), instead.
|
| 234 |
+
head_dim: The dimension of a single(!) attention head within
|
| 235 |
+
a multi-head attention unit. Denoted as `d` in [3].
|
| 236 |
+
position_wise_mlp_dim: The dimension of the hidden layer
|
| 237 |
+
within the position-wise MLP (after the multi-head attention
|
| 238 |
+
block within one Transformer unit). This is the size of the
|
| 239 |
+
first of the two layers within the PositionwiseFeedforward. The
|
| 240 |
+
second layer always has size=`attention_dim`.
|
| 241 |
+
init_gru_gate_bias: Initial bias values for the GRU gates
|
| 242 |
+
(two GRUs per Transformer unit, one after the MHA, one after
|
| 243 |
+
the position-wise MLP).
|
| 244 |
+
"""
|
| 245 |
+
super().__init__(
|
| 246 |
+
observation_space, action_space, num_outputs, model_config, name
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
self.num_transformer_units = num_transformer_units
|
| 250 |
+
self.attention_dim = attention_dim
|
| 251 |
+
self.num_heads = num_heads
|
| 252 |
+
self.memory_inference = memory_inference
|
| 253 |
+
self.memory_training = memory_training
|
| 254 |
+
self.head_dim = head_dim
|
| 255 |
+
self.max_seq_len = model_config["max_seq_len"]
|
| 256 |
+
self.obs_dim = observation_space.shape[0]
|
| 257 |
+
|
| 258 |
+
# Raw observation input (plus (None) time axis).
|
| 259 |
+
input_layer = tf.keras.layers.Input(shape=(None, self.obs_dim), name="inputs")
|
| 260 |
+
memory_ins = [
|
| 261 |
+
tf.keras.layers.Input(
|
| 262 |
+
shape=(None, self.attention_dim),
|
| 263 |
+
dtype=tf.float32,
|
| 264 |
+
name="memory_in_{}".format(i),
|
| 265 |
+
)
|
| 266 |
+
for i in range(self.num_transformer_units)
|
| 267 |
+
]
|
| 268 |
+
|
| 269 |
+
# Map observation dim to input/output transformer (attention) dim.
|
| 270 |
+
E_out = tf.keras.layers.Dense(self.attention_dim)(input_layer)
|
| 271 |
+
# Output, collected and concat'd to build the internal, tau-len
|
| 272 |
+
# Memory units used for additional contextual information.
|
| 273 |
+
memory_outs = [E_out]
|
| 274 |
+
|
| 275 |
+
# 2) Create L Transformer blocks according to [2].
|
| 276 |
+
for i in range(self.num_transformer_units):
|
| 277 |
+
# RelativeMultiHeadAttention part.
|
| 278 |
+
MHA_out = SkipConnection(
|
| 279 |
+
RelativeMultiHeadAttention(
|
| 280 |
+
out_dim=self.attention_dim,
|
| 281 |
+
num_heads=num_heads,
|
| 282 |
+
head_dim=head_dim,
|
| 283 |
+
input_layernorm=True,
|
| 284 |
+
output_activation=tf.nn.relu,
|
| 285 |
+
),
|
| 286 |
+
fan_in_layer=GRUGate(init_gru_gate_bias),
|
| 287 |
+
name="mha_{}".format(i + 1),
|
| 288 |
+
)(E_out, memory=memory_ins[i])
|
| 289 |
+
# Position-wise MLP part.
|
| 290 |
+
E_out = SkipConnection(
|
| 291 |
+
tf.keras.Sequential(
|
| 292 |
+
(
|
| 293 |
+
tf.keras.layers.LayerNormalization(axis=-1),
|
| 294 |
+
PositionwiseFeedforward(
|
| 295 |
+
out_dim=self.attention_dim,
|
| 296 |
+
hidden_dim=position_wise_mlp_dim,
|
| 297 |
+
output_activation=tf.nn.relu,
|
| 298 |
+
),
|
| 299 |
+
)
|
| 300 |
+
),
|
| 301 |
+
fan_in_layer=GRUGate(init_gru_gate_bias),
|
| 302 |
+
name="pos_wise_mlp_{}".format(i + 1),
|
| 303 |
+
)(MHA_out)
|
| 304 |
+
# Output of position-wise MLP == E(l-1), which is concat'd
|
| 305 |
+
# to the current Mem block (M(l-1)) to yield E~(l-1), which is then
|
| 306 |
+
# used by the next transformer block.
|
| 307 |
+
memory_outs.append(E_out)
|
| 308 |
+
|
| 309 |
+
self._logits = None
|
| 310 |
+
self._value_out = None
|
| 311 |
+
|
| 312 |
+
# Postprocess TrXL output with another hidden layer and compute values.
|
| 313 |
+
if num_outputs is not None:
|
| 314 |
+
self._logits = tf.keras.layers.Dense(
|
| 315 |
+
self.num_outputs, activation=None, name="logits"
|
| 316 |
+
)(E_out)
|
| 317 |
+
values_out = tf.keras.layers.Dense(1, activation=None, name="values")(E_out)
|
| 318 |
+
outs = [self._logits, values_out]
|
| 319 |
+
else:
|
| 320 |
+
outs = [E_out]
|
| 321 |
+
self.num_outputs = self.attention_dim
|
| 322 |
+
|
| 323 |
+
self.trxl_model = tf.keras.Model(
|
| 324 |
+
inputs=[input_layer] + memory_ins, outputs=outs + memory_outs[:-1]
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
self.trxl_model.summary()
|
| 328 |
+
|
| 329 |
+
# __sphinx_doc_begin__
|
| 330 |
+
# Setup trajectory views (`memory-inference` x past memory outs).
|
| 331 |
+
for i in range(self.num_transformer_units):
|
| 332 |
+
space = Box(-1.0, 1.0, shape=(self.attention_dim,))
|
| 333 |
+
self.view_requirements["state_in_{}".format(i)] = ViewRequirement(
|
| 334 |
+
"state_out_{}".format(i),
|
| 335 |
+
shift="-{}:-1".format(self.memory_inference),
|
| 336 |
+
# Repeat the incoming state every max-seq-len times.
|
| 337 |
+
batch_repeat_value=self.max_seq_len,
|
| 338 |
+
space=space,
|
| 339 |
+
)
|
| 340 |
+
self.view_requirements["state_out_{}".format(i)] = ViewRequirement(
|
| 341 |
+
space=space, used_for_training=False
|
| 342 |
+
)
|
| 343 |
+
# __sphinx_doc_end__
|
| 344 |
+
|
| 345 |
+
@override(ModelV2)
|
| 346 |
+
def forward(
|
| 347 |
+
self, input_dict, state: List[TensorType], seq_lens: TensorType
|
| 348 |
+
) -> (TensorType, List[TensorType]):
|
| 349 |
+
assert seq_lens is not None
|
| 350 |
+
|
| 351 |
+
# Add the time dim to observations.
|
| 352 |
+
B = tf.shape(seq_lens)[0]
|
| 353 |
+
observations = input_dict[SampleBatch.OBS]
|
| 354 |
+
|
| 355 |
+
shape = tf.shape(observations)
|
| 356 |
+
T = shape[0] // B
|
| 357 |
+
observations = tf.reshape(observations, tf.concat([[-1, T], shape[1:]], axis=0))
|
| 358 |
+
|
| 359 |
+
all_out = self.trxl_model([observations] + state)
|
| 360 |
+
|
| 361 |
+
if self._logits is not None:
|
| 362 |
+
out = tf.reshape(all_out[0], [-1, self.num_outputs])
|
| 363 |
+
self._value_out = all_out[1]
|
| 364 |
+
memory_outs = all_out[2:]
|
| 365 |
+
else:
|
| 366 |
+
out = tf.reshape(all_out[0], [-1, self.attention_dim])
|
| 367 |
+
memory_outs = all_out[1:]
|
| 368 |
+
|
| 369 |
+
return out, [tf.reshape(m, [-1, self.attention_dim]) for m in memory_outs]
|
| 370 |
+
|
| 371 |
+
@override(RecurrentNetwork)
|
| 372 |
+
def get_initial_state(self) -> List[np.ndarray]:
|
| 373 |
+
return [
|
| 374 |
+
tf.zeros(self.view_requirements["state_in_{}".format(i)].space.shape)
|
| 375 |
+
for i in range(self.num_transformer_units)
|
| 376 |
+
]
|
| 377 |
+
|
| 378 |
+
@override(ModelV2)
|
| 379 |
+
def value_function(self) -> TensorType:
|
| 380 |
+
return tf.reshape(self._value_out, [-1])
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class AttentionWrapper(TFModelV2):
|
| 384 |
+
"""GTrXL wrapper serving as interface for ModelV2s that set use_attention."""
|
| 385 |
+
|
| 386 |
+
def __init__(
|
| 387 |
+
self,
|
| 388 |
+
obs_space: gym.spaces.Space,
|
| 389 |
+
action_space: gym.spaces.Space,
|
| 390 |
+
num_outputs: int,
|
| 391 |
+
model_config: ModelConfigDict,
|
| 392 |
+
name: str,
|
| 393 |
+
):
|
| 394 |
+
if log_once("attention_wrapper_tf_deprecation"):
|
| 395 |
+
deprecation_warning(
|
| 396 |
+
old="ray.rllib.models.tf.attention_net.AttentionWrapper"
|
| 397 |
+
)
|
| 398 |
+
super().__init__(obs_space, action_space, None, model_config, name)
|
| 399 |
+
|
| 400 |
+
self.use_n_prev_actions = model_config["attention_use_n_prev_actions"]
|
| 401 |
+
self.use_n_prev_rewards = model_config["attention_use_n_prev_rewards"]
|
| 402 |
+
|
| 403 |
+
self.action_space_struct = get_base_struct_from_space(self.action_space)
|
| 404 |
+
self.action_dim = 0
|
| 405 |
+
|
| 406 |
+
for space in tree.flatten(self.action_space_struct):
|
| 407 |
+
if isinstance(space, Discrete):
|
| 408 |
+
self.action_dim += space.n
|
| 409 |
+
elif isinstance(space, MultiDiscrete):
|
| 410 |
+
self.action_dim += np.sum(space.nvec)
|
| 411 |
+
elif space.shape is not None:
|
| 412 |
+
self.action_dim += int(np.prod(space.shape))
|
| 413 |
+
else:
|
| 414 |
+
self.action_dim += int(len(space))
|
| 415 |
+
|
| 416 |
+
# Add prev-action/reward nodes to input to LSTM.
|
| 417 |
+
if self.use_n_prev_actions:
|
| 418 |
+
self.num_outputs += self.use_n_prev_actions * self.action_dim
|
| 419 |
+
if self.use_n_prev_rewards:
|
| 420 |
+
self.num_outputs += self.use_n_prev_rewards
|
| 421 |
+
|
| 422 |
+
cfg = model_config
|
| 423 |
+
|
| 424 |
+
self.attention_dim = cfg["attention_dim"]
|
| 425 |
+
|
| 426 |
+
if self.num_outputs is not None:
|
| 427 |
+
in_space = gym.spaces.Box(
|
| 428 |
+
float("-inf"), float("inf"), shape=(self.num_outputs,), dtype=np.float32
|
| 429 |
+
)
|
| 430 |
+
else:
|
| 431 |
+
in_space = obs_space
|
| 432 |
+
|
| 433 |
+
# Construct GTrXL sub-module w/ num_outputs=None (so it does not
|
| 434 |
+
# create a logits/value output; we'll do this ourselves in this wrapper
|
| 435 |
+
# here).
|
| 436 |
+
self.gtrxl = GTrXLNet(
|
| 437 |
+
in_space,
|
| 438 |
+
action_space,
|
| 439 |
+
None,
|
| 440 |
+
model_config,
|
| 441 |
+
"gtrxl",
|
| 442 |
+
num_transformer_units=cfg["attention_num_transformer_units"],
|
| 443 |
+
attention_dim=self.attention_dim,
|
| 444 |
+
num_heads=cfg["attention_num_heads"],
|
| 445 |
+
head_dim=cfg["attention_head_dim"],
|
| 446 |
+
memory_inference=cfg["attention_memory_inference"],
|
| 447 |
+
memory_training=cfg["attention_memory_training"],
|
| 448 |
+
position_wise_mlp_dim=cfg["attention_position_wise_mlp_dim"],
|
| 449 |
+
init_gru_gate_bias=cfg["attention_init_gru_gate_bias"],
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
# `self.num_outputs` right now is the number of nodes coming from the
|
| 453 |
+
# attention net.
|
| 454 |
+
input_ = tf.keras.layers.Input(shape=(self.gtrxl.num_outputs,))
|
| 455 |
+
|
| 456 |
+
# Set final num_outputs to correct value (depending on action space).
|
| 457 |
+
self.num_outputs = num_outputs
|
| 458 |
+
|
| 459 |
+
# Postprocess GTrXL output with another hidden layer and compute
|
| 460 |
+
# values.
|
| 461 |
+
out = tf.keras.layers.Dense(self.num_outputs, activation=None)(input_)
|
| 462 |
+
self._logits_branch = tf.keras.models.Model([input_], [out])
|
| 463 |
+
|
| 464 |
+
out = tf.keras.layers.Dense(1, activation=None)(input_)
|
| 465 |
+
self._value_branch = tf.keras.models.Model([input_], [out])
|
| 466 |
+
|
| 467 |
+
self.view_requirements = self.gtrxl.view_requirements
|
| 468 |
+
self.view_requirements["obs"].space = self.obs_space
|
| 469 |
+
|
| 470 |
+
# Add prev-a/r to this model's view, if required.
|
| 471 |
+
if self.use_n_prev_actions:
|
| 472 |
+
self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(
|
| 473 |
+
SampleBatch.ACTIONS,
|
| 474 |
+
space=self.action_space,
|
| 475 |
+
shift="-{}:-1".format(self.use_n_prev_actions),
|
| 476 |
+
)
|
| 477 |
+
if self.use_n_prev_rewards:
|
| 478 |
+
self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(
|
| 479 |
+
SampleBatch.REWARDS, shift="-{}:-1".format(self.use_n_prev_rewards)
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
@override(RecurrentNetwork)
|
| 483 |
+
def forward(
|
| 484 |
+
self,
|
| 485 |
+
input_dict: Dict[str, TensorType],
|
| 486 |
+
state: List[TensorType],
|
| 487 |
+
seq_lens: TensorType,
|
| 488 |
+
) -> (TensorType, List[TensorType]):
|
| 489 |
+
assert seq_lens is not None
|
| 490 |
+
# Push obs through "unwrapped" net's `forward()` first.
|
| 491 |
+
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
|
| 492 |
+
|
| 493 |
+
# Concat. prev-action/reward if required.
|
| 494 |
+
prev_a_r = []
|
| 495 |
+
|
| 496 |
+
# Prev actions.
|
| 497 |
+
if self.use_n_prev_actions:
|
| 498 |
+
prev_n_actions = input_dict[SampleBatch.PREV_ACTIONS]
|
| 499 |
+
# If actions are not processed yet (in their original form as
|
| 500 |
+
# have been sent to environment):
|
| 501 |
+
# Flatten/one-hot into 1D array.
|
| 502 |
+
if self.model_config["_disable_action_flattening"]:
|
| 503 |
+
# Merge prev n actions into flat tensor.
|
| 504 |
+
flat = flatten_inputs_to_1d_tensor(
|
| 505 |
+
prev_n_actions,
|
| 506 |
+
spaces_struct=self.action_space_struct,
|
| 507 |
+
time_axis=True,
|
| 508 |
+
)
|
| 509 |
+
# Fold time-axis into flattened data.
|
| 510 |
+
flat = tf.reshape(flat, [tf.shape(flat)[0], -1])
|
| 511 |
+
prev_a_r.append(flat)
|
| 512 |
+
# If actions are already flattened (but not one-hot'd yet!),
|
| 513 |
+
# one-hot discrete/multi-discrete actions here and concatenate the
|
| 514 |
+
# n most recent actions together.
|
| 515 |
+
else:
|
| 516 |
+
if isinstance(self.action_space, Discrete):
|
| 517 |
+
for i in range(self.use_n_prev_actions):
|
| 518 |
+
prev_a_r.append(
|
| 519 |
+
one_hot(prev_n_actions[:, i], self.action_space)
|
| 520 |
+
)
|
| 521 |
+
elif isinstance(self.action_space, MultiDiscrete):
|
| 522 |
+
for i in range(
|
| 523 |
+
0, self.use_n_prev_actions, self.action_space.shape[0]
|
| 524 |
+
):
|
| 525 |
+
prev_a_r.append(
|
| 526 |
+
one_hot(
|
| 527 |
+
tf.cast(
|
| 528 |
+
prev_n_actions[
|
| 529 |
+
:, i : i + self.action_space.shape[0]
|
| 530 |
+
],
|
| 531 |
+
tf.float32,
|
| 532 |
+
),
|
| 533 |
+
space=self.action_space,
|
| 534 |
+
)
|
| 535 |
+
)
|
| 536 |
+
else:
|
| 537 |
+
prev_a_r.append(
|
| 538 |
+
tf.reshape(
|
| 539 |
+
tf.cast(prev_n_actions, tf.float32),
|
| 540 |
+
[-1, self.use_n_prev_actions * self.action_dim],
|
| 541 |
+
)
|
| 542 |
+
)
|
| 543 |
+
# Prev rewards.
|
| 544 |
+
if self.use_n_prev_rewards:
|
| 545 |
+
prev_a_r.append(
|
| 546 |
+
tf.reshape(
|
| 547 |
+
tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32),
|
| 548 |
+
[-1, self.use_n_prev_rewards],
|
| 549 |
+
)
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
# Concat prev. actions + rewards to the "main" input.
|
| 553 |
+
if prev_a_r:
|
| 554 |
+
wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1)
|
| 555 |
+
|
| 556 |
+
# Then through our GTrXL.
|
| 557 |
+
input_dict["obs_flat"] = input_dict["obs"] = wrapped_out
|
| 558 |
+
|
| 559 |
+
self._features, memory_outs = self.gtrxl(input_dict, state, seq_lens)
|
| 560 |
+
model_out = self._logits_branch(self._features)
|
| 561 |
+
return model_out, memory_outs
|
| 562 |
+
|
| 563 |
+
@override(ModelV2)
|
| 564 |
+
def value_function(self) -> TensorType:
|
| 565 |
+
assert self._features is not None, "Must call forward() first!"
|
| 566 |
+
return tf.reshape(self._value_branch(self._features), [-1])
|
| 567 |
+
|
| 568 |
+
@override(ModelV2)
|
| 569 |
+
def get_initial_state(self) -> Union[List[np.ndarray], List[TensorType]]:
|
| 570 |
+
return [
|
| 571 |
+
np.zeros(self.gtrxl.view_requirements["state_in_{}".format(i)].space.shape)
|
| 572 |
+
for i in range(self.gtrxl.num_transformer_units)
|
| 573 |
+
]
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.spaces import Box, Discrete, MultiDiscrete
|
| 2 |
+
import numpy as np
|
| 3 |
+
import tree # pip install dm_tree
|
| 4 |
+
|
| 5 |
+
from ray.rllib.models.catalog import ModelCatalog
|
| 6 |
+
from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions
|
| 7 |
+
from ray.rllib.models.tf.misc import normc_initializer
|
| 8 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 9 |
+
from ray.rllib.models.utils import get_filter_config
|
| 10 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 11 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 12 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 13 |
+
from ray.rllib.utils.spaces.space_utils import flatten_space
|
| 14 |
+
from ray.rllib.utils.tf_utils import one_hot
|
| 15 |
+
|
| 16 |
+
tf1, tf, tfv = try_import_tf()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# __sphinx_doc_begin__
|
| 20 |
+
@OldAPIStack
|
| 21 |
+
class ComplexInputNetwork(TFModelV2):
|
| 22 |
+
"""TFModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s).
|
| 23 |
+
|
| 24 |
+
Note: This model should be used for complex (Dict or Tuple) observation
|
| 25 |
+
spaces that have one or more image components.
|
| 26 |
+
|
| 27 |
+
The data flow is as follows:
|
| 28 |
+
|
| 29 |
+
`obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT`
|
| 30 |
+
`CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out`
|
| 31 |
+
`out` -> (optional) FC-stack -> `out2`
|
| 32 |
+
`out2` -> action (logits) and vaulue heads.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
|
| 36 |
+
|
| 37 |
+
self.original_space = (
|
| 38 |
+
obs_space.original_space
|
| 39 |
+
if hasattr(obs_space, "original_space")
|
| 40 |
+
else obs_space
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
self.processed_obs_space = (
|
| 44 |
+
self.original_space
|
| 45 |
+
if model_config.get("_disable_preprocessor_api")
|
| 46 |
+
else obs_space
|
| 47 |
+
)
|
| 48 |
+
super().__init__(
|
| 49 |
+
self.original_space, action_space, num_outputs, model_config, name
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
self.flattened_input_space = flatten_space(self.original_space)
|
| 53 |
+
|
| 54 |
+
# Build the CNN(s) given obs_space's image components.
|
| 55 |
+
self.cnns = {}
|
| 56 |
+
self.one_hot = {}
|
| 57 |
+
self.flatten_dims = {}
|
| 58 |
+
self.flatten = {}
|
| 59 |
+
concat_size = 0
|
| 60 |
+
for i, component in enumerate(self.flattened_input_space):
|
| 61 |
+
# Image space.
|
| 62 |
+
if len(component.shape) == 3 and isinstance(component, Box):
|
| 63 |
+
config = {
|
| 64 |
+
"conv_filters": model_config["conv_filters"]
|
| 65 |
+
if "conv_filters" in model_config
|
| 66 |
+
else get_filter_config(component.shape),
|
| 67 |
+
"conv_activation": model_config.get("conv_activation"),
|
| 68 |
+
"post_fcnet_hiddens": [],
|
| 69 |
+
}
|
| 70 |
+
self.cnns[i] = ModelCatalog.get_model_v2(
|
| 71 |
+
component,
|
| 72 |
+
action_space,
|
| 73 |
+
num_outputs=None,
|
| 74 |
+
model_config=config,
|
| 75 |
+
framework="tf",
|
| 76 |
+
name="cnn_{}".format(i),
|
| 77 |
+
)
|
| 78 |
+
concat_size += int(self.cnns[i].num_outputs)
|
| 79 |
+
# Discrete|MultiDiscrete inputs -> One-hot encode.
|
| 80 |
+
elif isinstance(component, (Discrete, MultiDiscrete)):
|
| 81 |
+
if isinstance(component, Discrete):
|
| 82 |
+
size = component.n
|
| 83 |
+
else:
|
| 84 |
+
size = np.sum(component.nvec)
|
| 85 |
+
config = {
|
| 86 |
+
"fcnet_hiddens": model_config["fcnet_hiddens"],
|
| 87 |
+
"fcnet_activation": model_config.get("fcnet_activation"),
|
| 88 |
+
"post_fcnet_hiddens": [],
|
| 89 |
+
}
|
| 90 |
+
self.one_hot[i] = ModelCatalog.get_model_v2(
|
| 91 |
+
Box(-1.0, 1.0, (size,), np.float32),
|
| 92 |
+
action_space,
|
| 93 |
+
num_outputs=None,
|
| 94 |
+
model_config=config,
|
| 95 |
+
framework="tf",
|
| 96 |
+
name="one_hot_{}".format(i),
|
| 97 |
+
)
|
| 98 |
+
concat_size += int(self.one_hot[i].num_outputs)
|
| 99 |
+
# Everything else (1D Box).
|
| 100 |
+
else:
|
| 101 |
+
size = int(np.prod(component.shape))
|
| 102 |
+
config = {
|
| 103 |
+
"fcnet_hiddens": model_config["fcnet_hiddens"],
|
| 104 |
+
"fcnet_activation": model_config.get("fcnet_activation"),
|
| 105 |
+
"post_fcnet_hiddens": [],
|
| 106 |
+
}
|
| 107 |
+
self.flatten[i] = ModelCatalog.get_model_v2(
|
| 108 |
+
Box(-1.0, 1.0, (size,), np.float32),
|
| 109 |
+
action_space,
|
| 110 |
+
num_outputs=None,
|
| 111 |
+
model_config=config,
|
| 112 |
+
framework="tf",
|
| 113 |
+
name="flatten_{}".format(i),
|
| 114 |
+
)
|
| 115 |
+
self.flatten_dims[i] = size
|
| 116 |
+
concat_size += int(self.flatten[i].num_outputs)
|
| 117 |
+
|
| 118 |
+
# Optional post-concat FC-stack.
|
| 119 |
+
post_fc_stack_config = {
|
| 120 |
+
"fcnet_hiddens": model_config.get("post_fcnet_hiddens", []),
|
| 121 |
+
"fcnet_activation": model_config.get("post_fcnet_activation", "relu"),
|
| 122 |
+
}
|
| 123 |
+
self.post_fc_stack = ModelCatalog.get_model_v2(
|
| 124 |
+
Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32),
|
| 125 |
+
self.action_space,
|
| 126 |
+
None,
|
| 127 |
+
post_fc_stack_config,
|
| 128 |
+
framework="tf",
|
| 129 |
+
name="post_fc_stack",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Actions and value heads.
|
| 133 |
+
self.logits_and_value_model = None
|
| 134 |
+
self._value_out = None
|
| 135 |
+
if num_outputs:
|
| 136 |
+
# Action-distribution head.
|
| 137 |
+
concat_layer = tf.keras.layers.Input((self.post_fc_stack.num_outputs,))
|
| 138 |
+
logits_layer = tf.keras.layers.Dense(
|
| 139 |
+
num_outputs,
|
| 140 |
+
activation=None,
|
| 141 |
+
kernel_initializer=normc_initializer(0.01),
|
| 142 |
+
name="logits",
|
| 143 |
+
)(concat_layer)
|
| 144 |
+
|
| 145 |
+
# Create the value branch model.
|
| 146 |
+
value_layer = tf.keras.layers.Dense(
|
| 147 |
+
1,
|
| 148 |
+
activation=None,
|
| 149 |
+
kernel_initializer=normc_initializer(0.01),
|
| 150 |
+
name="value_out",
|
| 151 |
+
)(concat_layer)
|
| 152 |
+
self.logits_and_value_model = tf.keras.models.Model(
|
| 153 |
+
concat_layer, [logits_layer, value_layer]
|
| 154 |
+
)
|
| 155 |
+
else:
|
| 156 |
+
self.num_outputs = self.post_fc_stack.num_outputs
|
| 157 |
+
|
| 158 |
+
@override(ModelV2)
|
| 159 |
+
def forward(self, input_dict, state, seq_lens):
|
| 160 |
+
if SampleBatch.OBS in input_dict and "obs_flat" in input_dict:
|
| 161 |
+
orig_obs = input_dict[SampleBatch.OBS]
|
| 162 |
+
else:
|
| 163 |
+
orig_obs = restore_original_dimensions(
|
| 164 |
+
input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="tf"
|
| 165 |
+
)
|
| 166 |
+
# Push image observations through our CNNs.
|
| 167 |
+
outs = []
|
| 168 |
+
for i, component in enumerate(tree.flatten(orig_obs)):
|
| 169 |
+
if i in self.cnns:
|
| 170 |
+
cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component}))
|
| 171 |
+
outs.append(cnn_out)
|
| 172 |
+
elif i in self.one_hot:
|
| 173 |
+
if "int" in component.dtype.name:
|
| 174 |
+
one_hot_in = {
|
| 175 |
+
SampleBatch.OBS: one_hot(
|
| 176 |
+
component, self.flattened_input_space[i]
|
| 177 |
+
)
|
| 178 |
+
}
|
| 179 |
+
else:
|
| 180 |
+
one_hot_in = {SampleBatch.OBS: component}
|
| 181 |
+
one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in))
|
| 182 |
+
outs.append(one_hot_out)
|
| 183 |
+
else:
|
| 184 |
+
nn_out, _ = self.flatten[i](
|
| 185 |
+
SampleBatch(
|
| 186 |
+
{
|
| 187 |
+
SampleBatch.OBS: tf.cast(
|
| 188 |
+
tf.reshape(component, [-1, self.flatten_dims[i]]),
|
| 189 |
+
tf.float32,
|
| 190 |
+
)
|
| 191 |
+
}
|
| 192 |
+
)
|
| 193 |
+
)
|
| 194 |
+
outs.append(nn_out)
|
| 195 |
+
# Concat all outputs and the non-image inputs.
|
| 196 |
+
out = tf.concat(outs, axis=1)
|
| 197 |
+
# Push through (optional) FC-stack (this may be an empty stack).
|
| 198 |
+
out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out}))
|
| 199 |
+
|
| 200 |
+
# No logits/value branches.
|
| 201 |
+
if not self.logits_and_value_model:
|
| 202 |
+
return out, []
|
| 203 |
+
|
| 204 |
+
# Logits- and value branches.
|
| 205 |
+
logits, values = self.logits_and_value_model(out)
|
| 206 |
+
self._value_out = tf.reshape(values, [-1])
|
| 207 |
+
return logits, []
|
| 208 |
+
|
| 209 |
+
@override(ModelV2)
|
| 210 |
+
def value_function(self):
|
| 211 |
+
return self._value_out
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# __sphinx_doc_end__
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
from ray.rllib.models.tf.misc import normc_initializer
|
| 6 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 7 |
+
from ray.rllib.models.utils import get_activation_fn
|
| 8 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 9 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 10 |
+
from ray.rllib.utils.typing import TensorType, List, ModelConfigDict
|
| 11 |
+
|
| 12 |
+
tf1, tf, tfv = try_import_tf()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@OldAPIStack
|
| 16 |
+
class FullyConnectedNetwork(TFModelV2):
|
| 17 |
+
"""Generic fully connected network implemented in ModelV2 API."""
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
obs_space: gym.spaces.Space,
|
| 22 |
+
action_space: gym.spaces.Space,
|
| 23 |
+
num_outputs: int,
|
| 24 |
+
model_config: ModelConfigDict,
|
| 25 |
+
name: str,
|
| 26 |
+
):
|
| 27 |
+
super(FullyConnectedNetwork, self).__init__(
|
| 28 |
+
obs_space, action_space, num_outputs, model_config, name
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
hiddens = list(model_config.get("fcnet_hiddens", [])) + list(
|
| 32 |
+
model_config.get("post_fcnet_hiddens", [])
|
| 33 |
+
)
|
| 34 |
+
activation = model_config.get("fcnet_activation")
|
| 35 |
+
if not model_config.get("fcnet_hiddens", []):
|
| 36 |
+
activation = model_config.get("post_fcnet_activation")
|
| 37 |
+
activation = get_activation_fn(activation)
|
| 38 |
+
no_final_linear = model_config.get("no_final_linear")
|
| 39 |
+
vf_share_layers = model_config.get("vf_share_layers")
|
| 40 |
+
free_log_std = model_config.get("free_log_std")
|
| 41 |
+
|
| 42 |
+
# Generate free-floating bias variables for the second half of
|
| 43 |
+
# the outputs.
|
| 44 |
+
if free_log_std:
|
| 45 |
+
assert num_outputs % 2 == 0, (
|
| 46 |
+
"num_outputs must be divisible by two",
|
| 47 |
+
num_outputs,
|
| 48 |
+
)
|
| 49 |
+
num_outputs = num_outputs // 2
|
| 50 |
+
self.log_std_var = tf.Variable(
|
| 51 |
+
[0.0] * num_outputs, dtype=tf.float32, name="log_std"
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# We are using obs_flat, so take the flattened shape as input.
|
| 55 |
+
inputs = tf.keras.layers.Input(
|
| 56 |
+
shape=(int(np.prod(obs_space.shape)),), name="observations"
|
| 57 |
+
)
|
| 58 |
+
# Last hidden layer output (before logits outputs).
|
| 59 |
+
last_layer = inputs
|
| 60 |
+
# The action distribution outputs.
|
| 61 |
+
logits_out = None
|
| 62 |
+
i = 1
|
| 63 |
+
|
| 64 |
+
# Create layers 0 to second-last.
|
| 65 |
+
for size in hiddens[:-1]:
|
| 66 |
+
last_layer = tf.keras.layers.Dense(
|
| 67 |
+
size,
|
| 68 |
+
name="fc_{}".format(i),
|
| 69 |
+
activation=activation,
|
| 70 |
+
kernel_initializer=normc_initializer(1.0),
|
| 71 |
+
)(last_layer)
|
| 72 |
+
i += 1
|
| 73 |
+
|
| 74 |
+
# The last layer is adjusted to be of size num_outputs, but it's a
|
| 75 |
+
# layer with activation.
|
| 76 |
+
if no_final_linear and num_outputs:
|
| 77 |
+
logits_out = tf.keras.layers.Dense(
|
| 78 |
+
num_outputs,
|
| 79 |
+
name="fc_out",
|
| 80 |
+
activation=activation,
|
| 81 |
+
kernel_initializer=normc_initializer(1.0),
|
| 82 |
+
)(last_layer)
|
| 83 |
+
# Finish the layers with the provided sizes (`hiddens`), plus -
|
| 84 |
+
# iff num_outputs > 0 - a last linear layer of size num_outputs.
|
| 85 |
+
else:
|
| 86 |
+
if len(hiddens) > 0:
|
| 87 |
+
last_layer = tf.keras.layers.Dense(
|
| 88 |
+
hiddens[-1],
|
| 89 |
+
name="fc_{}".format(i),
|
| 90 |
+
activation=activation,
|
| 91 |
+
kernel_initializer=normc_initializer(1.0),
|
| 92 |
+
)(last_layer)
|
| 93 |
+
if num_outputs:
|
| 94 |
+
logits_out = tf.keras.layers.Dense(
|
| 95 |
+
num_outputs,
|
| 96 |
+
name="fc_out",
|
| 97 |
+
activation=None,
|
| 98 |
+
kernel_initializer=normc_initializer(0.01),
|
| 99 |
+
)(last_layer)
|
| 100 |
+
# Adjust num_outputs to be the number of nodes in the last layer.
|
| 101 |
+
else:
|
| 102 |
+
self.num_outputs = ([int(np.prod(obs_space.shape))] + hiddens[-1:])[-1]
|
| 103 |
+
|
| 104 |
+
# Concat the log std vars to the end of the state-dependent means.
|
| 105 |
+
if free_log_std and logits_out is not None:
|
| 106 |
+
|
| 107 |
+
def tiled_log_std(x):
|
| 108 |
+
return tf.tile(tf.expand_dims(self.log_std_var, 0), [tf.shape(x)[0], 1])
|
| 109 |
+
|
| 110 |
+
log_std_out = tf.keras.layers.Lambda(tiled_log_std)(inputs)
|
| 111 |
+
logits_out = tf.keras.layers.Concatenate(axis=1)([logits_out, log_std_out])
|
| 112 |
+
|
| 113 |
+
last_vf_layer = None
|
| 114 |
+
if not vf_share_layers:
|
| 115 |
+
# Build a parallel set of hidden layers for the value net.
|
| 116 |
+
last_vf_layer = inputs
|
| 117 |
+
i = 1
|
| 118 |
+
for size in hiddens:
|
| 119 |
+
last_vf_layer = tf.keras.layers.Dense(
|
| 120 |
+
size,
|
| 121 |
+
name="fc_value_{}".format(i),
|
| 122 |
+
activation=activation,
|
| 123 |
+
kernel_initializer=normc_initializer(1.0),
|
| 124 |
+
)(last_vf_layer)
|
| 125 |
+
i += 1
|
| 126 |
+
|
| 127 |
+
value_out = tf.keras.layers.Dense(
|
| 128 |
+
1,
|
| 129 |
+
name="value_out",
|
| 130 |
+
activation=None,
|
| 131 |
+
kernel_initializer=normc_initializer(0.01),
|
| 132 |
+
)(last_vf_layer if last_vf_layer is not None else last_layer)
|
| 133 |
+
|
| 134 |
+
self.base_model = tf.keras.Model(
|
| 135 |
+
inputs, [(logits_out if logits_out is not None else last_layer), value_out]
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
def forward(
|
| 139 |
+
self,
|
| 140 |
+
input_dict: Dict[str, TensorType],
|
| 141 |
+
state: List[TensorType],
|
| 142 |
+
seq_lens: TensorType,
|
| 143 |
+
) -> (TensorType, List[TensorType]):
|
| 144 |
+
model_out, self._value_out = self.base_model(input_dict["obs_flat"])
|
| 145 |
+
return model_out, state
|
| 146 |
+
|
| 147 |
+
def value_function(self) -> TensorType:
|
| 148 |
+
return tf.reshape(self._value_out, [-1])
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.models.tf.layers.gru_gate import GRUGate
|
| 2 |
+
from ray.rllib.models.tf.layers.noisy_layer import NoisyLayer
|
| 3 |
+
from ray.rllib.models.tf.layers.relative_multi_head_attention import (
|
| 4 |
+
PositionalEmbedding,
|
| 5 |
+
RelativeMultiHeadAttention,
|
| 6 |
+
)
|
| 7 |
+
from ray.rllib.models.tf.layers.skip_connection import SkipConnection
|
| 8 |
+
from ray.rllib.models.tf.layers.multi_head_attention import MultiHeadAttention
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"GRUGate",
|
| 12 |
+
"MultiHeadAttention",
|
| 13 |
+
"NoisyLayer",
|
| 14 |
+
"PositionalEmbedding",
|
| 15 |
+
"RelativeMultiHeadAttention",
|
| 16 |
+
"SkipConnection",
|
| 17 |
+
]
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc
ADDED
|
Binary file (2.3 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/noisy_layer.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc
ADDED
|
Binary file (1.85 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/gru_gate.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 2 |
+
from ray.rllib.utils.typing import TensorType, TensorShape
|
| 3 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 4 |
+
from ray.util import log_once
|
| 5 |
+
|
| 6 |
+
tf1, tf, tfv = try_import_tf()
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class GRUGate(tf.keras.layers.Layer if tf else object):
|
| 10 |
+
def __init__(self, init_bias: float = 0.0, **kwargs):
|
| 11 |
+
super().__init__(**kwargs)
|
| 12 |
+
self._init_bias = init_bias
|
| 13 |
+
if log_once("gru_gate"):
|
| 14 |
+
deprecation_warning(
|
| 15 |
+
old="rllib.models.tf.layers.GRUGate",
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
def build(self, input_shape: TensorShape):
|
| 19 |
+
h_shape, x_shape = input_shape
|
| 20 |
+
if x_shape[-1] != h_shape[-1]:
|
| 21 |
+
raise ValueError(
|
| 22 |
+
"Both inputs to GRUGate must have equal size in last axis!"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
dim = int(h_shape[-1])
|
| 26 |
+
self._w_r = self.add_weight(shape=(dim, dim))
|
| 27 |
+
self._w_z = self.add_weight(shape=(dim, dim))
|
| 28 |
+
self._w_h = self.add_weight(shape=(dim, dim))
|
| 29 |
+
|
| 30 |
+
self._u_r = self.add_weight(shape=(dim, dim))
|
| 31 |
+
self._u_z = self.add_weight(shape=(dim, dim))
|
| 32 |
+
self._u_h = self.add_weight(shape=(dim, dim))
|
| 33 |
+
|
| 34 |
+
def bias_initializer(shape, dtype):
|
| 35 |
+
return tf.fill(shape, tf.cast(self._init_bias, dtype=dtype))
|
| 36 |
+
|
| 37 |
+
self._bias_z = self.add_weight(shape=(dim,), initializer=bias_initializer)
|
| 38 |
+
|
| 39 |
+
def call(self, inputs: TensorType, **kwargs) -> TensorType:
|
| 40 |
+
# Pass in internal state first.
|
| 41 |
+
h, X = inputs
|
| 42 |
+
|
| 43 |
+
r = tf.tensordot(X, self._w_r, axes=1) + tf.tensordot(h, self._u_r, axes=1)
|
| 44 |
+
r = tf.nn.sigmoid(r)
|
| 45 |
+
|
| 46 |
+
z = (
|
| 47 |
+
tf.tensordot(X, self._w_z, axes=1)
|
| 48 |
+
+ tf.tensordot(h, self._u_z, axes=1)
|
| 49 |
+
- self._bias_z
|
| 50 |
+
)
|
| 51 |
+
z = tf.nn.sigmoid(z)
|
| 52 |
+
|
| 53 |
+
h_next = tf.tensordot(X, self._w_h, axes=1) + tf.tensordot(
|
| 54 |
+
(h * r), self._u_h, axes=1
|
| 55 |
+
)
|
| 56 |
+
h_next = tf.nn.tanh(h_next)
|
| 57 |
+
|
| 58 |
+
return (1 - z) * h + z * h_next
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/multi_head_attention.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar,
|
| 3 |
+
Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017.
|
| 4 |
+
https://arxiv.org/pdf/1706.03762.pdf
|
| 5 |
+
"""
|
| 6 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 7 |
+
from ray.rllib.utils.typing import TensorType
|
| 8 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 9 |
+
from ray.util import log_once
|
| 10 |
+
|
| 11 |
+
tf1, tf, tfv = try_import_tf()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MultiHeadAttention(tf.keras.layers.Layer if tf else object):
|
| 15 |
+
"""A multi-head attention layer described in [1]."""
|
| 16 |
+
|
| 17 |
+
def __init__(self, out_dim: int, num_heads: int, head_dim: int, **kwargs):
|
| 18 |
+
super().__init__(**kwargs)
|
| 19 |
+
|
| 20 |
+
# No bias or non-linearity.
|
| 21 |
+
self._num_heads = num_heads
|
| 22 |
+
self._head_dim = head_dim
|
| 23 |
+
self._qkv_layer = tf.keras.layers.Dense(
|
| 24 |
+
3 * num_heads * head_dim, use_bias=False
|
| 25 |
+
)
|
| 26 |
+
self._linear_layer = tf.keras.layers.TimeDistributed(
|
| 27 |
+
tf.keras.layers.Dense(out_dim, use_bias=False)
|
| 28 |
+
)
|
| 29 |
+
if log_once("multi_head_attention"):
|
| 30 |
+
deprecation_warning(
|
| 31 |
+
old="rllib.models.tf.layers.MultiHeadAttention",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def call(self, inputs: TensorType) -> TensorType:
|
| 35 |
+
L = tf.shape(inputs)[1] # length of segment
|
| 36 |
+
H = self._num_heads # number of attention heads
|
| 37 |
+
D = self._head_dim # attention head dimension
|
| 38 |
+
|
| 39 |
+
qkv = self._qkv_layer(inputs)
|
| 40 |
+
|
| 41 |
+
queries, keys, values = tf.split(qkv, 3, -1)
|
| 42 |
+
queries = queries[:, -L:] # only query based on the segment
|
| 43 |
+
|
| 44 |
+
queries = tf.reshape(queries, [-1, L, H, D])
|
| 45 |
+
keys = tf.reshape(keys, [-1, L, H, D])
|
| 46 |
+
values = tf.reshape(values, [-1, L, H, D])
|
| 47 |
+
|
| 48 |
+
score = tf.einsum("bihd,bjhd->bijh", queries, keys)
|
| 49 |
+
score = score / D**0.5
|
| 50 |
+
|
| 51 |
+
# causal mask of the same length as the sequence
|
| 52 |
+
mask = tf.sequence_mask(tf.range(1, L + 1), dtype=score.dtype)
|
| 53 |
+
mask = mask[None, :, :, None]
|
| 54 |
+
|
| 55 |
+
masked_score = score * mask + 1e30 * (mask - 1.0)
|
| 56 |
+
wmat = tf.nn.softmax(masked_score, axis=2)
|
| 57 |
+
|
| 58 |
+
out = tf.einsum("bijh,bjhd->bihd", wmat, values)
|
| 59 |
+
shape = tf.concat([tf.shape(out)[:2], [H * D]], axis=0)
|
| 60 |
+
out = tf.reshape(out, shape)
|
| 61 |
+
return self._linear_layer(out)
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/noisy_layer.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ray.rllib.models.utils import get_activation_fn
|
| 4 |
+
from ray.rllib.utils.framework import (
|
| 5 |
+
get_variable,
|
| 6 |
+
try_import_tf,
|
| 7 |
+
TensorType,
|
| 8 |
+
TensorShape,
|
| 9 |
+
)
|
| 10 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 11 |
+
from ray.util import log_once
|
| 12 |
+
|
| 13 |
+
tf1, tf, tfv = try_import_tf()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class NoisyLayer(tf.keras.layers.Layer if tf else object):
|
| 17 |
+
r"""A Layer that adds learnable Noise to some previous layer's outputs.
|
| 18 |
+
|
| 19 |
+
Consists of:
|
| 20 |
+
- a common dense layer: y = w^{T}x + b
|
| 21 |
+
- a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
|
| 22 |
+
(b+\epsilon_b*\sigma_b)
|
| 23 |
+
, where \epsilon are random variables sampled from factorized normal
|
| 24 |
+
distributions and \sigma are trainable variables which are expected to
|
| 25 |
+
vanish along the training procedure.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self, prefix: str, out_size: int, sigma0: float, activation: str = "relu"
|
| 30 |
+
):
|
| 31 |
+
"""Initializes a NoisyLayer object.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
prefix:
|
| 35 |
+
out_size: Output size for Noisy Layer
|
| 36 |
+
sigma0: Initialization value for sigma_b (bias noise)
|
| 37 |
+
non_linear: Non-linear activation for Noisy Layer
|
| 38 |
+
"""
|
| 39 |
+
super().__init__()
|
| 40 |
+
self.prefix = prefix
|
| 41 |
+
self.out_size = out_size
|
| 42 |
+
# TF noise generation can be unreliable on GPU
|
| 43 |
+
# If generating the noise on the CPU,
|
| 44 |
+
# lowering sigma0 to 0.1 may be helpful
|
| 45 |
+
self.sigma0 = sigma0 # 0.5~GPU, 0.1~CPU
|
| 46 |
+
self.activation = activation
|
| 47 |
+
# Variables.
|
| 48 |
+
self.w = None # Weight matrix.
|
| 49 |
+
self.b = None # Biases.
|
| 50 |
+
self.sigma_w = None # Noise for weight matrix
|
| 51 |
+
self.sigma_b = None # Noise for biases.
|
| 52 |
+
if log_once("noisy_layer"):
|
| 53 |
+
deprecation_warning(
|
| 54 |
+
old="rllib.models.tf.layers.NoisyLayer",
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
def build(self, input_shape: TensorShape):
|
| 58 |
+
in_size = int(input_shape[1])
|
| 59 |
+
|
| 60 |
+
self.sigma_w = get_variable(
|
| 61 |
+
value=tf.keras.initializers.RandomUniform(
|
| 62 |
+
minval=-1.0 / np.sqrt(float(in_size)),
|
| 63 |
+
maxval=1.0 / np.sqrt(float(in_size)),
|
| 64 |
+
),
|
| 65 |
+
trainable=True,
|
| 66 |
+
tf_name=self.prefix + "_sigma_w",
|
| 67 |
+
shape=[in_size, self.out_size],
|
| 68 |
+
dtype=tf.float32,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
self.sigma_b = get_variable(
|
| 72 |
+
value=tf.keras.initializers.Constant(self.sigma0 / np.sqrt(float(in_size))),
|
| 73 |
+
trainable=True,
|
| 74 |
+
tf_name=self.prefix + "_sigma_b",
|
| 75 |
+
shape=[self.out_size],
|
| 76 |
+
dtype=tf.float32,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
self.w = get_variable(
|
| 80 |
+
value=tf.keras.initializers.GlorotUniform(),
|
| 81 |
+
tf_name=self.prefix + "_fc_w",
|
| 82 |
+
trainable=True,
|
| 83 |
+
shape=[in_size, self.out_size],
|
| 84 |
+
dtype=tf.float32,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
self.b = get_variable(
|
| 88 |
+
value=tf.keras.initializers.Zeros(),
|
| 89 |
+
tf_name=self.prefix + "_fc_b",
|
| 90 |
+
trainable=True,
|
| 91 |
+
shape=[self.out_size],
|
| 92 |
+
dtype=tf.float32,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def call(self, inputs: TensorType) -> TensorType:
|
| 96 |
+
in_size = int(inputs.shape[1])
|
| 97 |
+
epsilon_in = tf.random.normal(shape=[in_size])
|
| 98 |
+
epsilon_out = tf.random.normal(shape=[self.out_size])
|
| 99 |
+
epsilon_in = self._f_epsilon(epsilon_in)
|
| 100 |
+
epsilon_out = self._f_epsilon(epsilon_out)
|
| 101 |
+
epsilon_w = tf.matmul(
|
| 102 |
+
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0)
|
| 103 |
+
)
|
| 104 |
+
epsilon_b = epsilon_out
|
| 105 |
+
|
| 106 |
+
action_activation = (
|
| 107 |
+
tf.matmul(inputs, self.w + self.sigma_w * epsilon_w)
|
| 108 |
+
+ self.b
|
| 109 |
+
+ self.sigma_b * epsilon_b
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
fn = get_activation_fn(self.activation, framework="tf")
|
| 113 |
+
if fn is not None:
|
| 114 |
+
action_activation = fn(action_activation)
|
| 115 |
+
return action_activation
|
| 116 |
+
|
| 117 |
+
def _f_epsilon(self, x: TensorType) -> TensorType:
|
| 118 |
+
return tf.math.sign(x) * tf.math.sqrt(tf.math.abs(x))
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 4 |
+
from ray.rllib.utils.typing import TensorType
|
| 5 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 6 |
+
from ray.util import log_once
|
| 7 |
+
|
| 8 |
+
tf1, tf, tfv = try_import_tf()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class RelativeMultiHeadAttention(tf.keras.layers.Layer if tf else object):
|
| 12 |
+
"""A RelativeMultiHeadAttention layer as described in [3].
|
| 13 |
+
|
| 14 |
+
Uses segment level recurrence with state reuse.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
out_dim: int,
|
| 20 |
+
num_heads: int,
|
| 21 |
+
head_dim: int,
|
| 22 |
+
input_layernorm: bool = False,
|
| 23 |
+
output_activation: Optional["tf.nn.activation"] = None,
|
| 24 |
+
**kwargs
|
| 25 |
+
):
|
| 26 |
+
"""Initializes a RelativeMultiHeadAttention keras Layer object.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
out_dim: The output dimensions of the multi-head attention
|
| 30 |
+
unit.
|
| 31 |
+
num_heads: The number of attention heads to use.
|
| 32 |
+
Denoted `H` in [2].
|
| 33 |
+
head_dim: The dimension of a single(!) attention head within
|
| 34 |
+
a multi-head attention unit. Denoted as `d` in [3].
|
| 35 |
+
input_layernorm: Whether to prepend a LayerNorm before
|
| 36 |
+
everything else. Should be True for building a GTrXL.
|
| 37 |
+
output_activation (Optional[tf.nn.activation]): Optional tf.nn
|
| 38 |
+
activation function. Should be relu for GTrXL.
|
| 39 |
+
**kwargs:
|
| 40 |
+
"""
|
| 41 |
+
if log_once("relative_multi_head_attention"):
|
| 42 |
+
deprecation_warning(
|
| 43 |
+
old="rllib.models.tf.layers.RelativeMultiHeadAttention",
|
| 44 |
+
)
|
| 45 |
+
super().__init__(**kwargs)
|
| 46 |
+
|
| 47 |
+
# No bias or non-linearity.
|
| 48 |
+
self._num_heads = num_heads
|
| 49 |
+
self._head_dim = head_dim
|
| 50 |
+
# 3=Query, key, and value inputs.
|
| 51 |
+
self._qkv_layer = tf.keras.layers.Dense(
|
| 52 |
+
3 * num_heads * head_dim, use_bias=False
|
| 53 |
+
)
|
| 54 |
+
self._linear_layer = tf.keras.layers.TimeDistributed(
|
| 55 |
+
tf.keras.layers.Dense(out_dim, use_bias=False, activation=output_activation)
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
self._uvar = self.add_weight(shape=(num_heads, head_dim))
|
| 59 |
+
self._vvar = self.add_weight(shape=(num_heads, head_dim))
|
| 60 |
+
|
| 61 |
+
# Constant (non-trainable) sinusoid rel pos encoding matrix, which
|
| 62 |
+
# depends on this incoming time dimension.
|
| 63 |
+
# For inference, we prepend the memory to the current timestep's
|
| 64 |
+
# input: Tau + 1. For training, we prepend the memory to the input
|
| 65 |
+
# sequence: Tau + T.
|
| 66 |
+
self._pos_embedding = PositionalEmbedding(out_dim)
|
| 67 |
+
self._pos_proj = tf.keras.layers.Dense(num_heads * head_dim, use_bias=False)
|
| 68 |
+
|
| 69 |
+
self._input_layernorm = None
|
| 70 |
+
if input_layernorm:
|
| 71 |
+
self._input_layernorm = tf.keras.layers.LayerNormalization(axis=-1)
|
| 72 |
+
|
| 73 |
+
def call(
|
| 74 |
+
self, inputs: TensorType, memory: Optional[TensorType] = None
|
| 75 |
+
) -> TensorType:
|
| 76 |
+
T = tf.shape(inputs)[1] # length of segment (time)
|
| 77 |
+
H = self._num_heads # number of attention heads
|
| 78 |
+
d = self._head_dim # attention head dimension
|
| 79 |
+
|
| 80 |
+
# Add previous memory chunk (as const, w/o gradient) to input.
|
| 81 |
+
# Tau (number of (prev) time slices in each memory chunk).
|
| 82 |
+
Tau = tf.shape(memory)[1]
|
| 83 |
+
inputs = tf.concat([tf.stop_gradient(memory), inputs], axis=1)
|
| 84 |
+
|
| 85 |
+
# Apply the Layer-Norm.
|
| 86 |
+
if self._input_layernorm is not None:
|
| 87 |
+
inputs = self._input_layernorm(inputs)
|
| 88 |
+
|
| 89 |
+
qkv = self._qkv_layer(inputs)
|
| 90 |
+
|
| 91 |
+
queries, keys, values = tf.split(qkv, 3, -1)
|
| 92 |
+
# Cut out memory timesteps from query.
|
| 93 |
+
queries = queries[:, -T:]
|
| 94 |
+
|
| 95 |
+
# Splitting up queries into per-head dims (d).
|
| 96 |
+
queries = tf.reshape(queries, [-1, T, H, d])
|
| 97 |
+
keys = tf.reshape(keys, [-1, Tau + T, H, d])
|
| 98 |
+
values = tf.reshape(values, [-1, Tau + T, H, d])
|
| 99 |
+
|
| 100 |
+
R = self._pos_embedding(Tau + T)
|
| 101 |
+
R = self._pos_proj(R)
|
| 102 |
+
R = tf.reshape(R, [Tau + T, H, d])
|
| 103 |
+
|
| 104 |
+
# b=batch
|
| 105 |
+
# i and j=time indices (i=max-timesteps (inputs); j=Tau memory space)
|
| 106 |
+
# h=head
|
| 107 |
+
# d=head-dim (over which we will reduce-sum)
|
| 108 |
+
score = tf.einsum("bihd,bjhd->bijh", queries + self._uvar, keys)
|
| 109 |
+
pos_score = tf.einsum("bihd,jhd->bijh", queries + self._vvar, R)
|
| 110 |
+
score = score + self.rel_shift(pos_score)
|
| 111 |
+
score = score / d**0.5
|
| 112 |
+
|
| 113 |
+
# Causal mask of the same length as the sequence.
|
| 114 |
+
mask = tf.sequence_mask(tf.range(Tau + 1, Tau + T + 1), dtype=score.dtype)
|
| 115 |
+
mask = mask[None, :, :, None]
|
| 116 |
+
|
| 117 |
+
masked_score = score * mask + 1e30 * (mask - 1.0)
|
| 118 |
+
wmat = tf.nn.softmax(masked_score, axis=2)
|
| 119 |
+
|
| 120 |
+
out = tf.einsum("bijh,bjhd->bihd", wmat, values)
|
| 121 |
+
out = tf.reshape(out, tf.concat((tf.shape(out)[:2], [H * d]), axis=0))
|
| 122 |
+
return self._linear_layer(out)
|
| 123 |
+
|
| 124 |
+
@staticmethod
|
| 125 |
+
def rel_shift(x: TensorType) -> TensorType:
|
| 126 |
+
# Transposed version of the shift approach described in [3].
|
| 127 |
+
# https://github.com/kimiyoung/transformer-xl/blob/
|
| 128 |
+
# 44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L31
|
| 129 |
+
x_size = tf.shape(x)
|
| 130 |
+
|
| 131 |
+
x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]])
|
| 132 |
+
x = tf.reshape(x, [x_size[0], x_size[2] + 1, x_size[1], x_size[3]])
|
| 133 |
+
x = x[:, 1:, :, :]
|
| 134 |
+
x = tf.reshape(x, x_size)
|
| 135 |
+
|
| 136 |
+
return x
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class PositionalEmbedding(tf.keras.layers.Layer if tf else object):
|
| 140 |
+
def __init__(self, out_dim, **kwargs):
|
| 141 |
+
super().__init__(**kwargs)
|
| 142 |
+
self.inverse_freq = 1 / (10000 ** (tf.range(0, out_dim, 2.0) / out_dim))
|
| 143 |
+
|
| 144 |
+
def call(self, seq_length):
|
| 145 |
+
pos_offsets = tf.cast(tf.range(seq_length - 1, -1, -1), tf.float32)
|
| 146 |
+
inputs = pos_offsets[:, None] * self.inverse_freq[None, :]
|
| 147 |
+
return tf.concat((tf.sin(inputs), tf.cos(inputs)), axis=-1)
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/layers/skip_connection.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Any
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 4 |
+
from ray.rllib.utils.typing import TensorType
|
| 5 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 6 |
+
from ray.util import log_once
|
| 7 |
+
|
| 8 |
+
tf1, tf, tfv = try_import_tf()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SkipConnection(tf.keras.layers.Layer if tf else object):
|
| 12 |
+
"""Skip connection layer.
|
| 13 |
+
|
| 14 |
+
Adds the original input to the output (regular residual layer) OR uses
|
| 15 |
+
input as hidden state input to a given fan_in_layer.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, layer: Any, fan_in_layer: Optional[Any] = None, **kwargs):
|
| 19 |
+
"""Initializes a SkipConnection keras layer object.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
layer (tf.keras.layers.Layer): Any layer processing inputs.
|
| 23 |
+
fan_in_layer (Optional[tf.keras.layers.Layer]): An optional
|
| 24 |
+
layer taking two inputs: The original input and the output
|
| 25 |
+
of `layer`.
|
| 26 |
+
"""
|
| 27 |
+
if log_once("skip_connection"):
|
| 28 |
+
deprecation_warning(
|
| 29 |
+
old="rllib.models.tf.layers.SkipConnection",
|
| 30 |
+
)
|
| 31 |
+
super().__init__(**kwargs)
|
| 32 |
+
self._layer = layer
|
| 33 |
+
self._fan_in_layer = fan_in_layer
|
| 34 |
+
|
| 35 |
+
def call(self, inputs: TensorType, **kwargs) -> TensorType:
|
| 36 |
+
# del kwargs
|
| 37 |
+
outputs = self._layer(inputs, **kwargs)
|
| 38 |
+
# Residual case, just add inputs to outputs.
|
| 39 |
+
if self._fan_in_layer is None:
|
| 40 |
+
outputs = outputs + inputs
|
| 41 |
+
# Fan-in e.g. RNN: Call fan-in with `inputs` and `outputs`.
|
| 42 |
+
else:
|
| 43 |
+
# NOTE: In the GRU case, `inputs` is the state input.
|
| 44 |
+
outputs = self._fan_in_layer((inputs, outputs))
|
| 45 |
+
|
| 46 |
+
return outputs
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/misc.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import Tuple, Any, Optional
|
| 3 |
+
|
| 4 |
+
from ray.rllib.utils.annotations import DeveloperAPI
|
| 5 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 6 |
+
from ray.rllib.utils.typing import TensorType
|
| 7 |
+
|
| 8 |
+
tf1, tf, tfv = try_import_tf()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# TODO: (sven) obsolete this class.
|
| 12 |
+
@DeveloperAPI
|
| 13 |
+
def normc_initializer(std: float = 1.0) -> Any:
|
| 14 |
+
def _initializer(shape, dtype=None, partition_info=None):
|
| 15 |
+
out = np.random.randn(*shape).astype(
|
| 16 |
+
dtype.name if hasattr(dtype, "name") else dtype or np.float32
|
| 17 |
+
)
|
| 18 |
+
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
|
| 19 |
+
return tf.constant(out)
|
| 20 |
+
|
| 21 |
+
return _initializer
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@DeveloperAPI
|
| 25 |
+
def conv2d(
|
| 26 |
+
x: TensorType,
|
| 27 |
+
num_filters: int,
|
| 28 |
+
name: str,
|
| 29 |
+
filter_size: Tuple[int, int] = (3, 3),
|
| 30 |
+
stride: Tuple[int, int] = (1, 1),
|
| 31 |
+
pad: str = "SAME",
|
| 32 |
+
dtype: Optional[Any] = None,
|
| 33 |
+
collections: Optional[Any] = None,
|
| 34 |
+
) -> TensorType:
|
| 35 |
+
|
| 36 |
+
if dtype is None:
|
| 37 |
+
dtype = tf.float32
|
| 38 |
+
|
| 39 |
+
with tf1.variable_scope(name):
|
| 40 |
+
stride_shape = [1, stride[0], stride[1], 1]
|
| 41 |
+
filter_shape = [
|
| 42 |
+
filter_size[0],
|
| 43 |
+
filter_size[1],
|
| 44 |
+
int(x.get_shape()[3]),
|
| 45 |
+
num_filters,
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
# There are "num input feature maps * filter height * filter width"
|
| 49 |
+
# inputs to each hidden unit.
|
| 50 |
+
fan_in = np.prod(filter_shape[:3])
|
| 51 |
+
# Each unit in the lower layer receives a gradient from: "num output
|
| 52 |
+
# feature maps * filter height * filter width" / pooling size.
|
| 53 |
+
fan_out = np.prod(filter_shape[:2]) * num_filters
|
| 54 |
+
# Initialize weights with random weights.
|
| 55 |
+
w_bound = np.sqrt(6 / (fan_in + fan_out))
|
| 56 |
+
|
| 57 |
+
w = tf1.get_variable(
|
| 58 |
+
"W",
|
| 59 |
+
filter_shape,
|
| 60 |
+
dtype,
|
| 61 |
+
tf1.random_uniform_initializer(-w_bound, w_bound),
|
| 62 |
+
collections=collections,
|
| 63 |
+
)
|
| 64 |
+
b = tf1.get_variable(
|
| 65 |
+
"b",
|
| 66 |
+
[1, 1, 1, num_filters],
|
| 67 |
+
initializer=tf1.constant_initializer(0.0),
|
| 68 |
+
collections=collections,
|
| 69 |
+
)
|
| 70 |
+
return tf1.nn.conv2d(x, w, stride_shape, pad) + b
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@DeveloperAPI
|
| 74 |
+
def linear(
|
| 75 |
+
x: TensorType,
|
| 76 |
+
size: int,
|
| 77 |
+
name: str,
|
| 78 |
+
initializer: Optional[Any] = None,
|
| 79 |
+
bias_init: float = 0.0,
|
| 80 |
+
) -> TensorType:
|
| 81 |
+
w = tf1.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer)
|
| 82 |
+
b = tf1.get_variable(
|
| 83 |
+
name + "/b", [size], initializer=tf1.constant_initializer(bias_init)
|
| 84 |
+
)
|
| 85 |
+
return tf.matmul(x, w) + b
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@DeveloperAPI
|
| 89 |
+
def flatten(x: TensorType) -> TensorType:
|
| 90 |
+
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
from gymnasium.spaces import Discrete, MultiDiscrete
|
| 4 |
+
import logging
|
| 5 |
+
import tree # pip install dm_tree
|
| 6 |
+
from typing import Dict, List, Tuple
|
| 7 |
+
|
| 8 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 9 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 10 |
+
from ray.rllib.policy.rnn_sequencing import add_time_dimension
|
| 11 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 12 |
+
from ray.rllib.policy.view_requirement import ViewRequirement
|
| 13 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 14 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 15 |
+
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
|
| 16 |
+
from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot
|
| 17 |
+
from ray.rllib.utils.typing import ModelConfigDict, TensorType
|
| 18 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 19 |
+
from ray.util.debug import log_once
|
| 20 |
+
|
| 21 |
+
tf1, tf, tfv = try_import_tf()
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@OldAPIStack
|
| 26 |
+
class RecurrentNetwork(TFModelV2):
|
| 27 |
+
"""Helper class to simplify implementing RNN models with TFModelV2.
|
| 28 |
+
|
| 29 |
+
Instead of implementing forward(), you can implement forward_rnn() which
|
| 30 |
+
takes batches with the time dimension added already.
|
| 31 |
+
|
| 32 |
+
Here is an example implementation for a subclass
|
| 33 |
+
``MyRNNClass(RecurrentNetwork)``::
|
| 34 |
+
|
| 35 |
+
def __init__(self, *args, **kwargs):
|
| 36 |
+
super(MyModelClass, self).__init__(*args, **kwargs)
|
| 37 |
+
cell_size = 256
|
| 38 |
+
|
| 39 |
+
# Define input layers
|
| 40 |
+
input_layer = tf.keras.layers.Input(
|
| 41 |
+
shape=(None, obs_space.shape[0]))
|
| 42 |
+
state_in_h = tf.keras.layers.Input(shape=(256, ))
|
| 43 |
+
state_in_c = tf.keras.layers.Input(shape=(256, ))
|
| 44 |
+
seq_in = tf.keras.layers.Input(shape=(), dtype=tf.int32)
|
| 45 |
+
|
| 46 |
+
# Send to LSTM cell
|
| 47 |
+
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
|
| 48 |
+
cell_size, return_sequences=True, return_state=True,
|
| 49 |
+
name="lstm")(
|
| 50 |
+
inputs=input_layer,
|
| 51 |
+
mask=tf.sequence_mask(seq_in),
|
| 52 |
+
initial_state=[state_in_h, state_in_c])
|
| 53 |
+
output_layer = tf.keras.layers.Dense(...)(lstm_out)
|
| 54 |
+
|
| 55 |
+
# Create the RNN model
|
| 56 |
+
self.rnn_model = tf.keras.Model(
|
| 57 |
+
inputs=[input_layer, seq_in, state_in_h, state_in_c],
|
| 58 |
+
outputs=[output_layer, state_h, state_c])
|
| 59 |
+
self.rnn_model.summary()
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
@override(ModelV2)
|
| 63 |
+
def forward(
|
| 64 |
+
self,
|
| 65 |
+
input_dict: Dict[str, TensorType],
|
| 66 |
+
state: List[TensorType],
|
| 67 |
+
seq_lens: TensorType,
|
| 68 |
+
) -> Tuple[TensorType, List[TensorType]]:
|
| 69 |
+
"""Adds time dimension to batch before sending inputs to forward_rnn().
|
| 70 |
+
|
| 71 |
+
You should implement forward_rnn() in your subclass."""
|
| 72 |
+
# Creating a __init__ function that acts as a passthrough and adding the warning
|
| 73 |
+
# there led to errors probably due to the multiple inheritance. We encountered
|
| 74 |
+
# the same error if we add the Deprecated decorator. We therefore add the
|
| 75 |
+
# deprecation warning here.
|
| 76 |
+
if log_once("recurrent_network_tf"):
|
| 77 |
+
deprecation_warning(
|
| 78 |
+
old="ray.rllib.models.tf.recurrent_net.RecurrentNetwork"
|
| 79 |
+
)
|
| 80 |
+
assert seq_lens is not None
|
| 81 |
+
flat_inputs = input_dict["obs_flat"]
|
| 82 |
+
inputs = add_time_dimension(
|
| 83 |
+
padded_inputs=flat_inputs, seq_lens=seq_lens, framework="tf"
|
| 84 |
+
)
|
| 85 |
+
output, new_state = self.forward_rnn(
|
| 86 |
+
inputs,
|
| 87 |
+
state,
|
| 88 |
+
seq_lens,
|
| 89 |
+
)
|
| 90 |
+
return tf.reshape(output, [-1, self.num_outputs]), new_state
|
| 91 |
+
|
| 92 |
+
def forward_rnn(
|
| 93 |
+
self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
|
| 94 |
+
) -> Tuple[TensorType, List[TensorType]]:
|
| 95 |
+
"""Call the model with the given input tensors and state.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
inputs: observation tensor with shape [B, T, obs_size].
|
| 99 |
+
state: list of state tensors, each with shape [B, T, size].
|
| 100 |
+
seq_lens: 1d tensor holding input sequence lengths.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
(outputs, new_state): The model output tensor of shape
|
| 104 |
+
[B, T, num_outputs] and the list of new state tensors each with
|
| 105 |
+
shape [B, size].
|
| 106 |
+
|
| 107 |
+
Sample implementation for the ``MyRNNClass`` example::
|
| 108 |
+
|
| 109 |
+
def forward_rnn(self, inputs, state, seq_lens):
|
| 110 |
+
model_out, h, c = self.rnn_model([inputs, seq_lens] + state)
|
| 111 |
+
return model_out, [h, c]
|
| 112 |
+
"""
|
| 113 |
+
raise NotImplementedError("You must implement this for a RNN model")
|
| 114 |
+
|
| 115 |
+
def get_initial_state(self) -> List[TensorType]:
|
| 116 |
+
"""Get the initial recurrent state values for the model.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
list of np.array objects, if any
|
| 120 |
+
|
| 121 |
+
Sample implementation for the ``MyRNNClass`` example::
|
| 122 |
+
|
| 123 |
+
def get_initial_state(self):
|
| 124 |
+
return [
|
| 125 |
+
np.zeros(self.cell_size, np.float32),
|
| 126 |
+
np.zeros(self.cell_size, np.float32),
|
| 127 |
+
]
|
| 128 |
+
"""
|
| 129 |
+
raise NotImplementedError("You must implement this for a RNN model")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@OldAPIStack
|
| 133 |
+
class LSTMWrapper(RecurrentNetwork):
|
| 134 |
+
"""An LSTM wrapper serving as an interface for ModelV2s that set use_lstm."""
|
| 135 |
+
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
obs_space: gym.spaces.Space,
|
| 139 |
+
action_space: gym.spaces.Space,
|
| 140 |
+
num_outputs: int,
|
| 141 |
+
model_config: ModelConfigDict,
|
| 142 |
+
name: str,
|
| 143 |
+
):
|
| 144 |
+
super(LSTMWrapper, self).__init__(
|
| 145 |
+
obs_space, action_space, None, model_config, name
|
| 146 |
+
)
|
| 147 |
+
# At this point, self.num_outputs is the number of nodes coming
|
| 148 |
+
# from the wrapped (underlying) model. In other words, self.num_outputs
|
| 149 |
+
# is the input size for the LSTM layer.
|
| 150 |
+
# If None, set it to the observation space.
|
| 151 |
+
if self.num_outputs is None:
|
| 152 |
+
self.num_outputs = int(np.prod(self.obs_space.shape))
|
| 153 |
+
|
| 154 |
+
self.cell_size = model_config["lstm_cell_size"]
|
| 155 |
+
self.use_prev_action = model_config["lstm_use_prev_action"]
|
| 156 |
+
self.use_prev_reward = model_config["lstm_use_prev_reward"]
|
| 157 |
+
|
| 158 |
+
self.action_space_struct = get_base_struct_from_space(self.action_space)
|
| 159 |
+
self.action_dim = 0
|
| 160 |
+
|
| 161 |
+
for space in tree.flatten(self.action_space_struct):
|
| 162 |
+
if isinstance(space, Discrete):
|
| 163 |
+
self.action_dim += space.n
|
| 164 |
+
elif isinstance(space, MultiDiscrete):
|
| 165 |
+
self.action_dim += np.sum(space.nvec)
|
| 166 |
+
elif space.shape is not None:
|
| 167 |
+
self.action_dim += int(np.prod(space.shape))
|
| 168 |
+
else:
|
| 169 |
+
self.action_dim += int(len(space))
|
| 170 |
+
|
| 171 |
+
# Add prev-action/reward nodes to input to LSTM.
|
| 172 |
+
if self.use_prev_action:
|
| 173 |
+
self.num_outputs += self.action_dim
|
| 174 |
+
if self.use_prev_reward:
|
| 175 |
+
self.num_outputs += 1
|
| 176 |
+
|
| 177 |
+
# Define input layers.
|
| 178 |
+
input_layer = tf.keras.layers.Input(
|
| 179 |
+
shape=(None, self.num_outputs), name="inputs"
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
# Set self.num_outputs to the number of output nodes desired by the
|
| 183 |
+
# caller of this constructor.
|
| 184 |
+
self.num_outputs = num_outputs
|
| 185 |
+
|
| 186 |
+
state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h")
|
| 187 |
+
state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c")
|
| 188 |
+
seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
|
| 189 |
+
|
| 190 |
+
# Preprocess observation with a hidden layer and send to LSTM cell
|
| 191 |
+
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
|
| 192 |
+
self.cell_size, return_sequences=True, return_state=True, name="lstm"
|
| 193 |
+
)(
|
| 194 |
+
inputs=input_layer,
|
| 195 |
+
mask=tf.sequence_mask(seq_in),
|
| 196 |
+
initial_state=[state_in_h, state_in_c],
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Postprocess LSTM output with another hidden layer and compute values
|
| 200 |
+
logits = tf.keras.layers.Dense(
|
| 201 |
+
self.num_outputs, activation=tf.keras.activations.linear, name="logits"
|
| 202 |
+
)(lstm_out)
|
| 203 |
+
values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out)
|
| 204 |
+
|
| 205 |
+
# Create the RNN model
|
| 206 |
+
self._rnn_model = tf.keras.Model(
|
| 207 |
+
inputs=[input_layer, seq_in, state_in_h, state_in_c],
|
| 208 |
+
outputs=[logits, values, state_h, state_c],
|
| 209 |
+
)
|
| 210 |
+
# Print out model summary in INFO logging mode.
|
| 211 |
+
if logger.isEnabledFor(logging.INFO):
|
| 212 |
+
self._rnn_model.summary()
|
| 213 |
+
|
| 214 |
+
# Add prev-a/r to this model's view, if required.
|
| 215 |
+
if model_config["lstm_use_prev_action"]:
|
| 216 |
+
self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(
|
| 217 |
+
SampleBatch.ACTIONS, space=self.action_space, shift=-1
|
| 218 |
+
)
|
| 219 |
+
if model_config["lstm_use_prev_reward"]:
|
| 220 |
+
self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(
|
| 221 |
+
SampleBatch.REWARDS, shift=-1
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
@override(RecurrentNetwork)
|
| 225 |
+
def forward(
|
| 226 |
+
self,
|
| 227 |
+
input_dict: Dict[str, TensorType],
|
| 228 |
+
state: List[TensorType],
|
| 229 |
+
seq_lens: TensorType,
|
| 230 |
+
) -> Tuple[TensorType, List[TensorType]]:
|
| 231 |
+
assert seq_lens is not None
|
| 232 |
+
# Push obs through "unwrapped" net's `forward()` first.
|
| 233 |
+
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
|
| 234 |
+
|
| 235 |
+
# Concat. prev-action/reward if required.
|
| 236 |
+
prev_a_r = []
|
| 237 |
+
|
| 238 |
+
# Prev actions.
|
| 239 |
+
if self.model_config["lstm_use_prev_action"]:
|
| 240 |
+
prev_a = input_dict[SampleBatch.PREV_ACTIONS]
|
| 241 |
+
# If actions are not processed yet (in their original form as
|
| 242 |
+
# have been sent to environment):
|
| 243 |
+
# Flatten/one-hot into 1D array.
|
| 244 |
+
if self.model_config["_disable_action_flattening"]:
|
| 245 |
+
prev_a_r.append(
|
| 246 |
+
flatten_inputs_to_1d_tensor(
|
| 247 |
+
prev_a,
|
| 248 |
+
spaces_struct=self.action_space_struct,
|
| 249 |
+
time_axis=False,
|
| 250 |
+
)
|
| 251 |
+
)
|
| 252 |
+
# If actions are already flattened (but not one-hot'd yet!),
|
| 253 |
+
# one-hot discrete/multi-discrete actions here.
|
| 254 |
+
else:
|
| 255 |
+
if isinstance(self.action_space, (Discrete, MultiDiscrete)):
|
| 256 |
+
prev_a = one_hot(prev_a, self.action_space)
|
| 257 |
+
prev_a_r.append(
|
| 258 |
+
tf.reshape(tf.cast(prev_a, tf.float32), [-1, self.action_dim])
|
| 259 |
+
)
|
| 260 |
+
# Prev rewards.
|
| 261 |
+
if self.model_config["lstm_use_prev_reward"]:
|
| 262 |
+
prev_a_r.append(
|
| 263 |
+
tf.reshape(
|
| 264 |
+
tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32), [-1, 1]
|
| 265 |
+
)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# Concat prev. actions + rewards to the "main" input.
|
| 269 |
+
if prev_a_r:
|
| 270 |
+
wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1)
|
| 271 |
+
|
| 272 |
+
# Push everything through our LSTM.
|
| 273 |
+
input_dict["obs_flat"] = wrapped_out
|
| 274 |
+
return super().forward(input_dict, state, seq_lens)
|
| 275 |
+
|
| 276 |
+
@override(RecurrentNetwork)
|
| 277 |
+
def forward_rnn(
|
| 278 |
+
self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
|
| 279 |
+
) -> Tuple[TensorType, List[TensorType]]:
|
| 280 |
+
model_out, self._value_out, h, c = self._rnn_model([inputs, seq_lens] + state)
|
| 281 |
+
return model_out, [h, c]
|
| 282 |
+
|
| 283 |
+
@override(ModelV2)
|
| 284 |
+
def get_initial_state(self) -> List[np.ndarray]:
|
| 285 |
+
return [
|
| 286 |
+
np.zeros(self.cell_size, np.float32),
|
| 287 |
+
np.zeros(self.cell_size, np.float32),
|
| 288 |
+
]
|
| 289 |
+
|
| 290 |
+
@override(ModelV2)
|
| 291 |
+
def value_function(self) -> TensorType:
|
| 292 |
+
return tf.reshape(self._value_out, [-1])
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_action_dist.py
ADDED
|
@@ -0,0 +1,735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
from math import log
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tree # pip install dm_tree
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 9 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 10 |
+
from ray.rllib.utils import MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT, SMALL_NUMBER
|
| 11 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 12 |
+
from ray.rllib.utils.framework import try_import_tf, try_import_tfp
|
| 13 |
+
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
|
| 14 |
+
from ray.rllib.utils.typing import TensorType, List, Union, Tuple, ModelConfigDict
|
| 15 |
+
|
| 16 |
+
tf1, tf, tfv = try_import_tf()
|
| 17 |
+
tfp = try_import_tfp()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@OldAPIStack
|
| 21 |
+
class TFActionDistribution(ActionDistribution):
|
| 22 |
+
"""TF-specific extensions for building action distributions."""
|
| 23 |
+
|
| 24 |
+
@override(ActionDistribution)
|
| 25 |
+
def __init__(self, inputs: List[TensorType], model: ModelV2):
|
| 26 |
+
super().__init__(inputs, model)
|
| 27 |
+
self.sample_op = self._build_sample_op()
|
| 28 |
+
self.sampled_action_logp_op = self.logp(self.sample_op)
|
| 29 |
+
|
| 30 |
+
def _build_sample_op(self) -> TensorType:
|
| 31 |
+
"""Implement this instead of sample(), to enable op reuse.
|
| 32 |
+
|
| 33 |
+
This is needed since the sample op is non-deterministic and is shared
|
| 34 |
+
between sample() and sampled_action_logp().
|
| 35 |
+
"""
|
| 36 |
+
raise NotImplementedError
|
| 37 |
+
|
| 38 |
+
@override(ActionDistribution)
|
| 39 |
+
def sample(self) -> TensorType:
|
| 40 |
+
"""Draw a sample from the action distribution."""
|
| 41 |
+
return self.sample_op
|
| 42 |
+
|
| 43 |
+
@override(ActionDistribution)
|
| 44 |
+
def sampled_action_logp(self) -> TensorType:
|
| 45 |
+
"""Returns the log probability of the sampled action."""
|
| 46 |
+
return self.sampled_action_logp_op
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@OldAPIStack
|
| 50 |
+
class Categorical(TFActionDistribution):
|
| 51 |
+
"""Categorical distribution for discrete action spaces."""
|
| 52 |
+
|
| 53 |
+
def __init__(
|
| 54 |
+
self, inputs: List[TensorType], model: ModelV2 = None, temperature: float = 1.0
|
| 55 |
+
):
|
| 56 |
+
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
|
| 57 |
+
# Allow softmax formula w/ temperature != 1.0:
|
| 58 |
+
# Divide inputs by temperature.
|
| 59 |
+
super().__init__(inputs / temperature, model)
|
| 60 |
+
|
| 61 |
+
@override(ActionDistribution)
|
| 62 |
+
def deterministic_sample(self) -> TensorType:
|
| 63 |
+
return tf.math.argmax(self.inputs, axis=1)
|
| 64 |
+
|
| 65 |
+
@override(ActionDistribution)
|
| 66 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 67 |
+
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
|
| 68 |
+
logits=self.inputs, labels=tf.cast(x, tf.int32)
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
@override(ActionDistribution)
|
| 72 |
+
def entropy(self) -> TensorType:
|
| 73 |
+
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
|
| 74 |
+
ea0 = tf.exp(a0)
|
| 75 |
+
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
|
| 76 |
+
p0 = ea0 / z0
|
| 77 |
+
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=1)
|
| 78 |
+
|
| 79 |
+
@override(ActionDistribution)
|
| 80 |
+
def kl(self, other: ActionDistribution) -> TensorType:
|
| 81 |
+
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
|
| 82 |
+
a1 = other.inputs - tf.reduce_max(other.inputs, axis=1, keepdims=True)
|
| 83 |
+
ea0 = tf.exp(a0)
|
| 84 |
+
ea1 = tf.exp(a1)
|
| 85 |
+
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
|
| 86 |
+
z1 = tf.reduce_sum(ea1, axis=1, keepdims=True)
|
| 87 |
+
p0 = ea0 / z0
|
| 88 |
+
return tf.reduce_sum(p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=1)
|
| 89 |
+
|
| 90 |
+
@override(TFActionDistribution)
|
| 91 |
+
def _build_sample_op(self) -> TensorType:
|
| 92 |
+
return tf.squeeze(tf.random.categorical(self.inputs, 1), axis=1)
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
@override(ActionDistribution)
|
| 96 |
+
def required_model_output_shape(action_space, model_config):
|
| 97 |
+
return action_space.n
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@OldAPIStack
|
| 101 |
+
def get_categorical_class_with_temperature(t: float):
|
| 102 |
+
"""Categorical distribution class that has customized default temperature."""
|
| 103 |
+
|
| 104 |
+
class CategoricalWithTemperature(Categorical):
|
| 105 |
+
def __init__(self, inputs, model=None, temperature=t):
|
| 106 |
+
super().__init__(inputs, model, temperature)
|
| 107 |
+
|
| 108 |
+
return CategoricalWithTemperature
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@OldAPIStack
|
| 112 |
+
class MultiCategorical(TFActionDistribution):
|
| 113 |
+
"""MultiCategorical distribution for MultiDiscrete action spaces."""
|
| 114 |
+
|
| 115 |
+
def __init__(
|
| 116 |
+
self,
|
| 117 |
+
inputs: List[TensorType],
|
| 118 |
+
model: ModelV2,
|
| 119 |
+
input_lens: Union[List[int], np.ndarray, Tuple[int, ...]],
|
| 120 |
+
action_space=None,
|
| 121 |
+
):
|
| 122 |
+
# skip TFActionDistribution init
|
| 123 |
+
ActionDistribution.__init__(self, inputs, model)
|
| 124 |
+
self.cats = [
|
| 125 |
+
Categorical(input_, model)
|
| 126 |
+
for input_ in tf.split(inputs, input_lens, axis=1)
|
| 127 |
+
]
|
| 128 |
+
self.action_space = action_space
|
| 129 |
+
if self.action_space is None:
|
| 130 |
+
self.action_space = gym.spaces.MultiDiscrete(
|
| 131 |
+
[c.inputs.shape[1] for c in self.cats]
|
| 132 |
+
)
|
| 133 |
+
self.sample_op = self._build_sample_op()
|
| 134 |
+
self.sampled_action_logp_op = self.logp(self.sample_op)
|
| 135 |
+
|
| 136 |
+
@override(ActionDistribution)
|
| 137 |
+
def deterministic_sample(self) -> TensorType:
|
| 138 |
+
sample_ = tf.stack([cat.deterministic_sample() for cat in self.cats], axis=1)
|
| 139 |
+
if isinstance(self.action_space, gym.spaces.Box):
|
| 140 |
+
return tf.cast(
|
| 141 |
+
tf.reshape(sample_, [-1] + list(self.action_space.shape)),
|
| 142 |
+
self.action_space.dtype,
|
| 143 |
+
)
|
| 144 |
+
return sample_
|
| 145 |
+
|
| 146 |
+
@override(ActionDistribution)
|
| 147 |
+
def logp(self, actions: TensorType) -> TensorType:
|
| 148 |
+
# If tensor is provided, unstack it into list.
|
| 149 |
+
if isinstance(actions, tf.Tensor):
|
| 150 |
+
if isinstance(self.action_space, gym.spaces.Box):
|
| 151 |
+
actions = tf.reshape(
|
| 152 |
+
actions, [-1, int(np.prod(self.action_space.shape))]
|
| 153 |
+
)
|
| 154 |
+
elif isinstance(self.action_space, gym.spaces.MultiDiscrete):
|
| 155 |
+
actions.set_shape((None, len(self.cats)))
|
| 156 |
+
actions = tf.unstack(tf.cast(actions, tf.int32), axis=1)
|
| 157 |
+
logps = tf.stack([cat.logp(act) for cat, act in zip(self.cats, actions)])
|
| 158 |
+
return tf.reduce_sum(logps, axis=0)
|
| 159 |
+
|
| 160 |
+
@override(ActionDistribution)
|
| 161 |
+
def multi_entropy(self) -> TensorType:
|
| 162 |
+
return tf.stack([cat.entropy() for cat in self.cats], axis=1)
|
| 163 |
+
|
| 164 |
+
@override(ActionDistribution)
|
| 165 |
+
def entropy(self) -> TensorType:
|
| 166 |
+
return tf.reduce_sum(self.multi_entropy(), axis=1)
|
| 167 |
+
|
| 168 |
+
@override(ActionDistribution)
|
| 169 |
+
def multi_kl(self, other: ActionDistribution) -> TensorType:
|
| 170 |
+
return tf.stack(
|
| 171 |
+
[cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)], axis=1
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
@override(ActionDistribution)
|
| 175 |
+
def kl(self, other: ActionDistribution) -> TensorType:
|
| 176 |
+
return tf.reduce_sum(self.multi_kl(other), axis=1)
|
| 177 |
+
|
| 178 |
+
@override(TFActionDistribution)
|
| 179 |
+
def _build_sample_op(self) -> TensorType:
|
| 180 |
+
sample_op = tf.stack([cat.sample() for cat in self.cats], axis=1)
|
| 181 |
+
if isinstance(self.action_space, gym.spaces.Box):
|
| 182 |
+
return tf.cast(
|
| 183 |
+
tf.reshape(sample_op, [-1] + list(self.action_space.shape)),
|
| 184 |
+
dtype=self.action_space.dtype,
|
| 185 |
+
)
|
| 186 |
+
return sample_op
|
| 187 |
+
|
| 188 |
+
@staticmethod
|
| 189 |
+
@override(ActionDistribution)
|
| 190 |
+
def required_model_output_shape(
|
| 191 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 192 |
+
) -> Union[int, np.ndarray]:
|
| 193 |
+
# Int Box.
|
| 194 |
+
if isinstance(action_space, gym.spaces.Box):
|
| 195 |
+
assert action_space.dtype.name.startswith("int")
|
| 196 |
+
low_ = np.min(action_space.low)
|
| 197 |
+
high_ = np.max(action_space.high)
|
| 198 |
+
assert np.all(action_space.low == low_)
|
| 199 |
+
assert np.all(action_space.high == high_)
|
| 200 |
+
return np.prod(action_space.shape, dtype=np.int32) * (high_ - low_ + 1)
|
| 201 |
+
# MultiDiscrete space.
|
| 202 |
+
else:
|
| 203 |
+
# nvec is already integer, so no casting needed.
|
| 204 |
+
return np.sum(action_space.nvec)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@OldAPIStack
|
| 208 |
+
class SlateMultiCategorical(Categorical):
|
| 209 |
+
"""MultiCategorical distribution for MultiDiscrete action spaces.
|
| 210 |
+
|
| 211 |
+
The action space must be uniform, meaning all nvec items have the same size, e.g.
|
| 212 |
+
MultiDiscrete([10, 10, 10]), where 10 is the number of candidates to pick from
|
| 213 |
+
and 3 is the slate size (pick 3 out of 10). When picking candidates, no candidate
|
| 214 |
+
must be picked more than once.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
def __init__(
|
| 218 |
+
self,
|
| 219 |
+
inputs: List[TensorType],
|
| 220 |
+
model: ModelV2 = None,
|
| 221 |
+
temperature: float = 1.0,
|
| 222 |
+
action_space: Optional[gym.spaces.MultiDiscrete] = None,
|
| 223 |
+
all_slates=None,
|
| 224 |
+
):
|
| 225 |
+
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
|
| 226 |
+
# Allow softmax formula w/ temperature != 1.0:
|
| 227 |
+
# Divide inputs by temperature.
|
| 228 |
+
super().__init__(inputs / temperature, model)
|
| 229 |
+
self.action_space = action_space
|
| 230 |
+
# Assert uniformness of the action space (all discrete buckets have the same
|
| 231 |
+
# size).
|
| 232 |
+
assert isinstance(self.action_space, gym.spaces.MultiDiscrete) and all(
|
| 233 |
+
n == self.action_space.nvec[0] for n in self.action_space.nvec
|
| 234 |
+
)
|
| 235 |
+
self.all_slates = all_slates
|
| 236 |
+
|
| 237 |
+
@override(ActionDistribution)
|
| 238 |
+
def deterministic_sample(self) -> TensorType:
|
| 239 |
+
# Get a sample from the underlying Categorical (batch of ints).
|
| 240 |
+
sample = super().deterministic_sample()
|
| 241 |
+
# Use the sampled ints to pick the actual slates.
|
| 242 |
+
return tf.gather(self.all_slates, sample)
|
| 243 |
+
|
| 244 |
+
@override(ActionDistribution)
|
| 245 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 246 |
+
# TODO: Implement.
|
| 247 |
+
return tf.ones_like(self.inputs[:, 0])
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@OldAPIStack
|
| 251 |
+
class GumbelSoftmax(TFActionDistribution):
|
| 252 |
+
"""GumbelSoftmax distr. (for differentiable sampling in discr. actions
|
| 253 |
+
|
| 254 |
+
The Gumbel Softmax distribution [1] (also known as the Concrete [2]
|
| 255 |
+
distribution) is a close cousin of the relaxed one-hot categorical
|
| 256 |
+
distribution, whose tfp implementation we will use here plus
|
| 257 |
+
adjusted `sample_...` and `log_prob` methods. See discussion at [0].
|
| 258 |
+
|
| 259 |
+
[0] https://stackoverflow.com/questions/56226133/
|
| 260 |
+
soft-actor-critic-with-discrete-action-space
|
| 261 |
+
|
| 262 |
+
[1] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017):
|
| 263 |
+
https://arxiv.org/abs/1611.01144
|
| 264 |
+
[2] The Concrete Distribution: A Continuous Relaxation of Discrete Random
|
| 265 |
+
Variables (Maddison et al, 2017) https://arxiv.org/abs/1611.00712
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
def __init__(
|
| 269 |
+
self, inputs: List[TensorType], model: ModelV2 = None, temperature: float = 1.0
|
| 270 |
+
):
|
| 271 |
+
"""Initializes a GumbelSoftmax distribution.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
temperature: Temperature parameter. For low temperatures,
|
| 275 |
+
the expected value approaches a categorical random variable.
|
| 276 |
+
For high temperatures, the expected value approaches a uniform
|
| 277 |
+
distribution.
|
| 278 |
+
"""
|
| 279 |
+
assert temperature >= 0.0
|
| 280 |
+
self.dist = tfp.distributions.RelaxedOneHotCategorical(
|
| 281 |
+
temperature=temperature, logits=inputs
|
| 282 |
+
)
|
| 283 |
+
self.probs = tf.nn.softmax(self.dist._distribution.logits)
|
| 284 |
+
super().__init__(inputs, model)
|
| 285 |
+
|
| 286 |
+
@override(ActionDistribution)
|
| 287 |
+
def deterministic_sample(self) -> TensorType:
|
| 288 |
+
# Return the dist object's prob values.
|
| 289 |
+
return self.probs
|
| 290 |
+
|
| 291 |
+
@override(ActionDistribution)
|
| 292 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 293 |
+
# Override since the implementation of tfp.RelaxedOneHotCategorical
|
| 294 |
+
# yields positive values.
|
| 295 |
+
if x.shape != self.dist.logits.shape:
|
| 296 |
+
values = tf.one_hot(
|
| 297 |
+
x, self.dist.logits.shape.as_list()[-1], dtype=tf.float32
|
| 298 |
+
)
|
| 299 |
+
assert values.shape == self.dist.logits.shape, (
|
| 300 |
+
values.shape,
|
| 301 |
+
self.dist.logits.shape,
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
# [0]'s implementation (see line below) seems to be an approximation
|
| 305 |
+
# to the actual Gumbel Softmax density.
|
| 306 |
+
return -tf.reduce_sum(
|
| 307 |
+
-x * tf.nn.log_softmax(self.dist.logits, axis=-1), axis=-1
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
@override(TFActionDistribution)
|
| 311 |
+
def _build_sample_op(self) -> TensorType:
|
| 312 |
+
return self.dist.sample()
|
| 313 |
+
|
| 314 |
+
@staticmethod
|
| 315 |
+
@override(ActionDistribution)
|
| 316 |
+
def required_model_output_shape(
|
| 317 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 318 |
+
) -> Union[int, np.ndarray]:
|
| 319 |
+
return action_space.n
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
@OldAPIStack
|
| 323 |
+
class DiagGaussian(TFActionDistribution):
|
| 324 |
+
"""Action distribution where each vector element is a gaussian.
|
| 325 |
+
|
| 326 |
+
The first half of the input vector defines the gaussian means, and the
|
| 327 |
+
second half the gaussian standard deviations.
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
def __init__(
|
| 331 |
+
self,
|
| 332 |
+
inputs: List[TensorType],
|
| 333 |
+
model: ModelV2,
|
| 334 |
+
*,
|
| 335 |
+
action_space: Optional[gym.spaces.Space] = None
|
| 336 |
+
):
|
| 337 |
+
mean, log_std = tf.split(inputs, 2, axis=1)
|
| 338 |
+
self.mean = mean
|
| 339 |
+
self.log_std = log_std
|
| 340 |
+
self.std = tf.exp(log_std)
|
| 341 |
+
# Remember to squeeze action samples in case action space is Box(shape)
|
| 342 |
+
self.zero_action_dim = action_space and action_space.shape == ()
|
| 343 |
+
super().__init__(inputs, model)
|
| 344 |
+
|
| 345 |
+
@override(ActionDistribution)
|
| 346 |
+
def deterministic_sample(self) -> TensorType:
|
| 347 |
+
return self.mean
|
| 348 |
+
|
| 349 |
+
@override(ActionDistribution)
|
| 350 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 351 |
+
# Cover case where action space is Box(shape=()).
|
| 352 |
+
if int(tf.shape(x).shape[0]) == 1:
|
| 353 |
+
x = tf.expand_dims(x, axis=1)
|
| 354 |
+
return (
|
| 355 |
+
-0.5
|
| 356 |
+
* tf.reduce_sum(
|
| 357 |
+
tf.math.square((tf.cast(x, tf.float32) - self.mean) / self.std), axis=1
|
| 358 |
+
)
|
| 359 |
+
- 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[1], tf.float32)
|
| 360 |
+
- tf.reduce_sum(self.log_std, axis=1)
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
@override(ActionDistribution)
|
| 364 |
+
def kl(self, other: ActionDistribution) -> TensorType:
|
| 365 |
+
assert isinstance(other, DiagGaussian)
|
| 366 |
+
return tf.reduce_sum(
|
| 367 |
+
other.log_std
|
| 368 |
+
- self.log_std
|
| 369 |
+
+ (tf.math.square(self.std) + tf.math.square(self.mean - other.mean))
|
| 370 |
+
/ (2.0 * tf.math.square(other.std))
|
| 371 |
+
- 0.5,
|
| 372 |
+
axis=1,
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
@override(ActionDistribution)
|
| 376 |
+
def entropy(self) -> TensorType:
|
| 377 |
+
return tf.reduce_sum(self.log_std + 0.5 * np.log(2.0 * np.pi * np.e), axis=1)
|
| 378 |
+
|
| 379 |
+
@override(TFActionDistribution)
|
| 380 |
+
def _build_sample_op(self) -> TensorType:
|
| 381 |
+
sample = self.mean + self.std * tf.random.normal(tf.shape(self.mean))
|
| 382 |
+
if self.zero_action_dim:
|
| 383 |
+
return tf.squeeze(sample, axis=-1)
|
| 384 |
+
return sample
|
| 385 |
+
|
| 386 |
+
@staticmethod
|
| 387 |
+
@override(ActionDistribution)
|
| 388 |
+
def required_model_output_shape(
|
| 389 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 390 |
+
) -> Union[int, np.ndarray]:
|
| 391 |
+
return np.prod(action_space.shape, dtype=np.int32) * 2
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
@OldAPIStack
|
| 395 |
+
class SquashedGaussian(TFActionDistribution):
|
| 396 |
+
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
|
| 397 |
+
|
| 398 |
+
The distribution will never return low or high exactly, but
|
| 399 |
+
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
def __init__(
|
| 403 |
+
self,
|
| 404 |
+
inputs: List[TensorType],
|
| 405 |
+
model: ModelV2,
|
| 406 |
+
low: float = -1.0,
|
| 407 |
+
high: float = 1.0,
|
| 408 |
+
):
|
| 409 |
+
"""Parameterizes the distribution via `inputs`.
|
| 410 |
+
|
| 411 |
+
Args:
|
| 412 |
+
low: The lowest possible sampling value
|
| 413 |
+
(excluding this value).
|
| 414 |
+
high: The highest possible sampling value
|
| 415 |
+
(excluding this value).
|
| 416 |
+
"""
|
| 417 |
+
assert tfp is not None
|
| 418 |
+
mean, log_std = tf.split(inputs, 2, axis=-1)
|
| 419 |
+
# Clip `scale` values (coming from NN) to reasonable values.
|
| 420 |
+
log_std = tf.clip_by_value(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT)
|
| 421 |
+
std = tf.exp(log_std)
|
| 422 |
+
self.distr = tfp.distributions.Normal(loc=mean, scale=std)
|
| 423 |
+
assert np.all(np.less(low, high))
|
| 424 |
+
self.low = low
|
| 425 |
+
self.high = high
|
| 426 |
+
super().__init__(inputs, model)
|
| 427 |
+
|
| 428 |
+
@override(ActionDistribution)
|
| 429 |
+
def deterministic_sample(self) -> TensorType:
|
| 430 |
+
mean = self.distr.mean()
|
| 431 |
+
return self._squash(mean)
|
| 432 |
+
|
| 433 |
+
@override(TFActionDistribution)
|
| 434 |
+
def _build_sample_op(self) -> TensorType:
|
| 435 |
+
return self._squash(self.distr.sample())
|
| 436 |
+
|
| 437 |
+
@override(ActionDistribution)
|
| 438 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 439 |
+
# Unsquash values (from [low,high] to ]-inf,inf[)
|
| 440 |
+
unsquashed_values = tf.cast(self._unsquash(x), self.inputs.dtype)
|
| 441 |
+
# Get log prob of unsquashed values from our Normal.
|
| 442 |
+
log_prob_gaussian = self.distr.log_prob(unsquashed_values)
|
| 443 |
+
# For safety reasons, clamp somehow, only then sum up.
|
| 444 |
+
log_prob_gaussian = tf.clip_by_value(log_prob_gaussian, -100, 100)
|
| 445 |
+
log_prob_gaussian = tf.reduce_sum(log_prob_gaussian, axis=-1)
|
| 446 |
+
# Get log-prob for squashed Gaussian.
|
| 447 |
+
unsquashed_values_tanhd = tf.math.tanh(unsquashed_values)
|
| 448 |
+
log_prob = log_prob_gaussian - tf.reduce_sum(
|
| 449 |
+
tf.math.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER), axis=-1
|
| 450 |
+
)
|
| 451 |
+
return log_prob
|
| 452 |
+
|
| 453 |
+
def sample_logp(self):
|
| 454 |
+
z = self.distr.sample()
|
| 455 |
+
actions = self._squash(z)
|
| 456 |
+
return actions, tf.reduce_sum(
|
| 457 |
+
self.distr.log_prob(z) - tf.math.log(1 - actions * actions + SMALL_NUMBER),
|
| 458 |
+
axis=-1,
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
@override(ActionDistribution)
|
| 462 |
+
def entropy(self) -> TensorType:
|
| 463 |
+
raise ValueError("Entropy not defined for SquashedGaussian!")
|
| 464 |
+
|
| 465 |
+
@override(ActionDistribution)
|
| 466 |
+
def kl(self, other: ActionDistribution) -> TensorType:
|
| 467 |
+
raise ValueError("KL not defined for SquashedGaussian!")
|
| 468 |
+
|
| 469 |
+
def _squash(self, raw_values: TensorType) -> TensorType:
|
| 470 |
+
# Returned values are within [low, high] (including `low` and `high`).
|
| 471 |
+
squashed = ((tf.math.tanh(raw_values) + 1.0) / 2.0) * (
|
| 472 |
+
self.high - self.low
|
| 473 |
+
) + self.low
|
| 474 |
+
return tf.clip_by_value(squashed, self.low, self.high)
|
| 475 |
+
|
| 476 |
+
def _unsquash(self, values: TensorType) -> TensorType:
|
| 477 |
+
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - 1.0
|
| 478 |
+
# Stabilize input to atanh.
|
| 479 |
+
save_normed_values = tf.clip_by_value(
|
| 480 |
+
normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER
|
| 481 |
+
)
|
| 482 |
+
unsquashed = tf.math.atanh(save_normed_values)
|
| 483 |
+
return unsquashed
|
| 484 |
+
|
| 485 |
+
@staticmethod
|
| 486 |
+
@override(ActionDistribution)
|
| 487 |
+
def required_model_output_shape(
|
| 488 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 489 |
+
) -> Union[int, np.ndarray]:
|
| 490 |
+
return np.prod(action_space.shape, dtype=np.int32) * 2
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
@OldAPIStack
|
| 494 |
+
class Beta(TFActionDistribution):
|
| 495 |
+
"""
|
| 496 |
+
A Beta distribution is defined on the interval [0, 1] and parameterized by
|
| 497 |
+
shape parameters alpha and beta (also called concentration parameters).
|
| 498 |
+
|
| 499 |
+
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
|
| 500 |
+
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
|
| 501 |
+
and Gamma(n) = (n - 1)!
|
| 502 |
+
"""
|
| 503 |
+
|
| 504 |
+
def __init__(
|
| 505 |
+
self,
|
| 506 |
+
inputs: List[TensorType],
|
| 507 |
+
model: ModelV2,
|
| 508 |
+
low: float = 0.0,
|
| 509 |
+
high: float = 1.0,
|
| 510 |
+
):
|
| 511 |
+
# Stabilize input parameters (possibly coming from a linear layer).
|
| 512 |
+
inputs = tf.clip_by_value(inputs, log(SMALL_NUMBER), -log(SMALL_NUMBER))
|
| 513 |
+
inputs = tf.math.log(tf.math.exp(inputs) + 1.0) + 1.0
|
| 514 |
+
self.low = low
|
| 515 |
+
self.high = high
|
| 516 |
+
alpha, beta = tf.split(inputs, 2, axis=-1)
|
| 517 |
+
# Note: concentration0==beta, concentration1=alpha (!)
|
| 518 |
+
self.dist = tfp.distributions.Beta(concentration1=alpha, concentration0=beta)
|
| 519 |
+
super().__init__(inputs, model)
|
| 520 |
+
|
| 521 |
+
@override(ActionDistribution)
|
| 522 |
+
def deterministic_sample(self) -> TensorType:
|
| 523 |
+
mean = self.dist.mean()
|
| 524 |
+
return self._squash(mean)
|
| 525 |
+
|
| 526 |
+
@override(TFActionDistribution)
|
| 527 |
+
def _build_sample_op(self) -> TensorType:
|
| 528 |
+
return self._squash(self.dist.sample())
|
| 529 |
+
|
| 530 |
+
@override(ActionDistribution)
|
| 531 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 532 |
+
unsquashed_values = self._unsquash(x)
|
| 533 |
+
return tf.math.reduce_sum(self.dist.log_prob(unsquashed_values), axis=-1)
|
| 534 |
+
|
| 535 |
+
def _squash(self, raw_values: TensorType) -> TensorType:
|
| 536 |
+
return raw_values * (self.high - self.low) + self.low
|
| 537 |
+
|
| 538 |
+
def _unsquash(self, values: TensorType) -> TensorType:
|
| 539 |
+
return (values - self.low) / (self.high - self.low)
|
| 540 |
+
|
| 541 |
+
@staticmethod
|
| 542 |
+
@override(ActionDistribution)
|
| 543 |
+
def required_model_output_shape(
|
| 544 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 545 |
+
) -> Union[int, np.ndarray]:
|
| 546 |
+
return np.prod(action_space.shape, dtype=np.int32) * 2
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
@OldAPIStack
|
| 550 |
+
class Deterministic(TFActionDistribution):
|
| 551 |
+
"""Action distribution that returns the input values directly.
|
| 552 |
+
|
| 553 |
+
This is similar to DiagGaussian with standard deviation zero (thus only
|
| 554 |
+
requiring the "mean" values as NN output).
|
| 555 |
+
"""
|
| 556 |
+
|
| 557 |
+
@override(ActionDistribution)
|
| 558 |
+
def deterministic_sample(self) -> TensorType:
|
| 559 |
+
return self.inputs
|
| 560 |
+
|
| 561 |
+
@override(TFActionDistribution)
|
| 562 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 563 |
+
return tf.zeros_like(self.inputs)
|
| 564 |
+
|
| 565 |
+
@override(TFActionDistribution)
|
| 566 |
+
def _build_sample_op(self) -> TensorType:
|
| 567 |
+
return self.inputs
|
| 568 |
+
|
| 569 |
+
@staticmethod
|
| 570 |
+
@override(ActionDistribution)
|
| 571 |
+
def required_model_output_shape(
|
| 572 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 573 |
+
) -> Union[int, np.ndarray]:
|
| 574 |
+
return np.prod(action_space.shape, dtype=np.int32)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@OldAPIStack
|
| 578 |
+
class MultiActionDistribution(TFActionDistribution):
|
| 579 |
+
"""Action distribution that operates on a set of actions.
|
| 580 |
+
|
| 581 |
+
Args:
|
| 582 |
+
inputs (Tensor list): A list of tensors from which to compute samples.
|
| 583 |
+
"""
|
| 584 |
+
|
| 585 |
+
def __init__(
|
| 586 |
+
self, inputs, model, *, child_distributions, input_lens, action_space, **kwargs
|
| 587 |
+
):
|
| 588 |
+
ActionDistribution.__init__(self, inputs, model)
|
| 589 |
+
|
| 590 |
+
self.action_space_struct = get_base_struct_from_space(action_space)
|
| 591 |
+
|
| 592 |
+
self.input_lens = np.array(input_lens, dtype=np.int32)
|
| 593 |
+
split_inputs = tf.split(inputs, self.input_lens, axis=1)
|
| 594 |
+
self.flat_child_distributions = tree.map_structure(
|
| 595 |
+
lambda dist, input_: dist(input_, model, **kwargs),
|
| 596 |
+
child_distributions,
|
| 597 |
+
split_inputs,
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
@override(ActionDistribution)
|
| 601 |
+
def logp(self, x):
|
| 602 |
+
# Single tensor input (all merged).
|
| 603 |
+
if isinstance(x, (tf.Tensor, np.ndarray)):
|
| 604 |
+
split_indices = []
|
| 605 |
+
for dist in self.flat_child_distributions:
|
| 606 |
+
if isinstance(dist, Categorical):
|
| 607 |
+
split_indices.append(1)
|
| 608 |
+
elif (
|
| 609 |
+
isinstance(dist, MultiCategorical) and dist.action_space is not None
|
| 610 |
+
):
|
| 611 |
+
split_indices.append(np.prod(dist.action_space.shape))
|
| 612 |
+
else:
|
| 613 |
+
sample = dist.sample()
|
| 614 |
+
# Cover Box(shape=()) case.
|
| 615 |
+
if len(sample.shape) == 1:
|
| 616 |
+
split_indices.append(1)
|
| 617 |
+
else:
|
| 618 |
+
split_indices.append(tf.shape(sample)[1])
|
| 619 |
+
split_x = tf.split(x, split_indices, axis=1)
|
| 620 |
+
# Structured or flattened (by single action component) input.
|
| 621 |
+
else:
|
| 622 |
+
split_x = tree.flatten(x)
|
| 623 |
+
|
| 624 |
+
def map_(val, dist):
|
| 625 |
+
# Remove extra categorical dimension.
|
| 626 |
+
if isinstance(dist, Categorical):
|
| 627 |
+
val = tf.cast(
|
| 628 |
+
tf.squeeze(val, axis=-1) if len(val.shape) > 1 else val, tf.int32
|
| 629 |
+
)
|
| 630 |
+
return dist.logp(val)
|
| 631 |
+
|
| 632 |
+
# Remove extra categorical dimension and take the logp of each
|
| 633 |
+
# component.
|
| 634 |
+
flat_logps = tree.map_structure(map_, split_x, self.flat_child_distributions)
|
| 635 |
+
|
| 636 |
+
return functools.reduce(lambda a, b: a + b, flat_logps)
|
| 637 |
+
|
| 638 |
+
@override(ActionDistribution)
|
| 639 |
+
def kl(self, other):
|
| 640 |
+
kl_list = [
|
| 641 |
+
d.kl(o)
|
| 642 |
+
for d, o in zip(
|
| 643 |
+
self.flat_child_distributions, other.flat_child_distributions
|
| 644 |
+
)
|
| 645 |
+
]
|
| 646 |
+
return functools.reduce(lambda a, b: a + b, kl_list)
|
| 647 |
+
|
| 648 |
+
@override(ActionDistribution)
|
| 649 |
+
def entropy(self):
|
| 650 |
+
entropy_list = [d.entropy() for d in self.flat_child_distributions]
|
| 651 |
+
return functools.reduce(lambda a, b: a + b, entropy_list)
|
| 652 |
+
|
| 653 |
+
@override(ActionDistribution)
|
| 654 |
+
def sample(self):
|
| 655 |
+
child_distributions = tree.unflatten_as(
|
| 656 |
+
self.action_space_struct, self.flat_child_distributions
|
| 657 |
+
)
|
| 658 |
+
return tree.map_structure(lambda s: s.sample(), child_distributions)
|
| 659 |
+
|
| 660 |
+
@override(ActionDistribution)
|
| 661 |
+
def deterministic_sample(self):
|
| 662 |
+
child_distributions = tree.unflatten_as(
|
| 663 |
+
self.action_space_struct, self.flat_child_distributions
|
| 664 |
+
)
|
| 665 |
+
return tree.map_structure(
|
| 666 |
+
lambda s: s.deterministic_sample(), child_distributions
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
@override(TFActionDistribution)
|
| 670 |
+
def sampled_action_logp(self):
|
| 671 |
+
p = self.flat_child_distributions[0].sampled_action_logp()
|
| 672 |
+
for c in self.flat_child_distributions[1:]:
|
| 673 |
+
p += c.sampled_action_logp()
|
| 674 |
+
return p
|
| 675 |
+
|
| 676 |
+
@override(ActionDistribution)
|
| 677 |
+
def required_model_output_shape(self, action_space, model_config):
|
| 678 |
+
return np.sum(self.input_lens, dtype=np.int32)
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
@OldAPIStack
|
| 682 |
+
class Dirichlet(TFActionDistribution):
|
| 683 |
+
"""Dirichlet distribution for continuous actions that are between
|
| 684 |
+
[0,1] and sum to 1.
|
| 685 |
+
|
| 686 |
+
e.g. actions that represent resource allocation."""
|
| 687 |
+
|
| 688 |
+
def __init__(self, inputs: List[TensorType], model: ModelV2):
|
| 689 |
+
"""Input is a tensor of logits. The exponential of logits is used to
|
| 690 |
+
parametrize the Dirichlet distribution as all parameters need to be
|
| 691 |
+
positive. An arbitrary small epsilon is added to the concentration
|
| 692 |
+
parameters to be zero due to numerical error.
|
| 693 |
+
|
| 694 |
+
See issue #4440 for more details.
|
| 695 |
+
"""
|
| 696 |
+
self.epsilon = 1e-7
|
| 697 |
+
concentration = tf.exp(inputs) + self.epsilon
|
| 698 |
+
self.dist = tf1.distributions.Dirichlet(
|
| 699 |
+
concentration=concentration,
|
| 700 |
+
validate_args=True,
|
| 701 |
+
allow_nan_stats=False,
|
| 702 |
+
)
|
| 703 |
+
super().__init__(concentration, model)
|
| 704 |
+
|
| 705 |
+
@override(ActionDistribution)
|
| 706 |
+
def deterministic_sample(self) -> TensorType:
|
| 707 |
+
return tf.nn.softmax(self.dist.concentration)
|
| 708 |
+
|
| 709 |
+
@override(ActionDistribution)
|
| 710 |
+
def logp(self, x: TensorType) -> TensorType:
|
| 711 |
+
# Support of Dirichlet are positive real numbers. x is already
|
| 712 |
+
# an array of positive numbers, but we clip to avoid zeros due to
|
| 713 |
+
# numerical errors.
|
| 714 |
+
x = tf.maximum(x, self.epsilon)
|
| 715 |
+
x = x / tf.reduce_sum(x, axis=-1, keepdims=True)
|
| 716 |
+
return self.dist.log_prob(x)
|
| 717 |
+
|
| 718 |
+
@override(ActionDistribution)
|
| 719 |
+
def entropy(self) -> TensorType:
|
| 720 |
+
return self.dist.entropy()
|
| 721 |
+
|
| 722 |
+
@override(ActionDistribution)
|
| 723 |
+
def kl(self, other: ActionDistribution) -> TensorType:
|
| 724 |
+
return self.dist.kl_divergence(other.dist)
|
| 725 |
+
|
| 726 |
+
@override(TFActionDistribution)
|
| 727 |
+
def _build_sample_op(self) -> TensorType:
|
| 728 |
+
return self.dist.sample()
|
| 729 |
+
|
| 730 |
+
@staticmethod
|
| 731 |
+
@override(ActionDistribution)
|
| 732 |
+
def required_model_output_shape(
|
| 733 |
+
action_space: gym.Space, model_config: ModelConfigDict
|
| 734 |
+
) -> Union[int, np.ndarray]:
|
| 735 |
+
return np.prod(action_space.shape, dtype=np.int32)
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py
ADDED
|
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""The main difference between this and the old ActionDistribution is that this one
|
| 2 |
+
has more explicit input args. So that the input format does not have to be guessed from
|
| 3 |
+
the code. This matches the design pattern of torch distribution which developers may
|
| 4 |
+
already be familiar with.
|
| 5 |
+
"""
|
| 6 |
+
import gymnasium as gym
|
| 7 |
+
import tree
|
| 8 |
+
import numpy as np
|
| 9 |
+
from typing import Dict, Iterable, List, Optional
|
| 10 |
+
import abc
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
from ray.rllib.models.distributions import Distribution
|
| 14 |
+
from ray.rllib.utils.annotations import override, DeveloperAPI
|
| 15 |
+
from ray.rllib.utils.framework import try_import_tf, try_import_tfp
|
| 16 |
+
from ray.rllib.utils.typing import TensorType, Union, Tuple
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_, tf, _ = try_import_tf()
|
| 20 |
+
tfp = try_import_tfp()
|
| 21 |
+
|
| 22 |
+
# TODO (Kourosh) Write unittest for this class similar to torch distributions.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@DeveloperAPI
|
| 26 |
+
class TfDistribution(Distribution, abc.ABC):
|
| 27 |
+
"""Wrapper class for tfp.distributions."""
|
| 28 |
+
|
| 29 |
+
def __init__(self, *args, **kwargs):
|
| 30 |
+
super().__init__()
|
| 31 |
+
self._dist = self._get_tf_distribution(*args, **kwargs)
|
| 32 |
+
|
| 33 |
+
@abc.abstractmethod
|
| 34 |
+
def _get_tf_distribution(self, *args, **kwargs) -> "tfp.distributions.Distribution":
|
| 35 |
+
"""Returns the tfp.distributions.Distribution object to use."""
|
| 36 |
+
|
| 37 |
+
@override(Distribution)
|
| 38 |
+
def logp(self, value: TensorType, **kwargs) -> TensorType:
|
| 39 |
+
return self._dist.log_prob(value, **kwargs)
|
| 40 |
+
|
| 41 |
+
@override(Distribution)
|
| 42 |
+
def entropy(self) -> TensorType:
|
| 43 |
+
return self._dist.entropy()
|
| 44 |
+
|
| 45 |
+
@override(Distribution)
|
| 46 |
+
def kl(self, other: "Distribution") -> TensorType:
|
| 47 |
+
return self._dist.kl_divergence(other._dist)
|
| 48 |
+
|
| 49 |
+
@override(Distribution)
|
| 50 |
+
def sample(
|
| 51 |
+
self, *, sample_shape=()
|
| 52 |
+
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
|
| 53 |
+
sample = self._dist.sample(sample_shape)
|
| 54 |
+
return sample
|
| 55 |
+
|
| 56 |
+
@override(Distribution)
|
| 57 |
+
def rsample(
|
| 58 |
+
self, *, sample_shape=()
|
| 59 |
+
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
|
| 60 |
+
raise NotImplementedError
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@DeveloperAPI
|
| 64 |
+
class TfCategorical(TfDistribution):
|
| 65 |
+
"""Wrapper class for Categorical distribution.
|
| 66 |
+
|
| 67 |
+
Creates a categorical distribution parameterized by either :attr:`probs` or
|
| 68 |
+
:attr:`logits` (but not both).
|
| 69 |
+
|
| 70 |
+
Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is
|
| 71 |
+
``probs.size(-1)``.
|
| 72 |
+
|
| 73 |
+
If `probs` is 1-dimensional with length-`K`, each element is the relative
|
| 74 |
+
probability of sampling the class at that index.
|
| 75 |
+
|
| 76 |
+
If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of
|
| 77 |
+
relative probability vectors.
|
| 78 |
+
|
| 79 |
+
.. testcode::
|
| 80 |
+
:skipif: True
|
| 81 |
+
|
| 82 |
+
m = TfCategorical([ 0.25, 0.25, 0.25, 0.25 ])
|
| 83 |
+
m.sample(sample_shape=(2,)) # equal probability of 0, 1, 2, 3
|
| 84 |
+
|
| 85 |
+
.. testoutput::
|
| 86 |
+
|
| 87 |
+
tf.Tensor([2 3], shape=(2,), dtype=int32)
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
probs: The probablities of each event.
|
| 91 |
+
logits: Event log probabilities (unnormalized)
|
| 92 |
+
temperature: In case of using logits, this parameter can be used to determine
|
| 93 |
+
the sharpness of the distribution. i.e.
|
| 94 |
+
``probs = softmax(logits / temperature)``. The temperature must be strictly
|
| 95 |
+
positive. A low value (e.g. 1e-10) will result in argmax sampling while a
|
| 96 |
+
larger value will result in uniform sampling.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
@override(TfDistribution)
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
probs: "tf.Tensor" = None,
|
| 103 |
+
logits: "tf.Tensor" = None,
|
| 104 |
+
) -> None:
|
| 105 |
+
# We assert this here because to_deterministic makes this assumption.
|
| 106 |
+
assert (probs is None) != (
|
| 107 |
+
logits is None
|
| 108 |
+
), "Exactly one out of `probs` and `logits` must be set!"
|
| 109 |
+
|
| 110 |
+
self.probs = probs
|
| 111 |
+
self.logits = logits
|
| 112 |
+
self.one_hot = tfp.distributions.OneHotCategorical(logits=logits, probs=probs)
|
| 113 |
+
super().__init__(logits=logits, probs=probs)
|
| 114 |
+
|
| 115 |
+
@override(Distribution)
|
| 116 |
+
def logp(self, value: TensorType, **kwargs) -> TensorType:
|
| 117 |
+
# This prevents an error in which float values at the boundaries of the range
|
| 118 |
+
# of the distribution are passed to this function.
|
| 119 |
+
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
|
| 120 |
+
logits=self.logits if self.logits is not None else tf.log(self.probs),
|
| 121 |
+
labels=tf.cast(value, tf.int32),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
@override(TfDistribution)
|
| 125 |
+
def _get_tf_distribution(
|
| 126 |
+
self,
|
| 127 |
+
probs: "tf.Tensor" = None,
|
| 128 |
+
logits: "tf.Tensor" = None,
|
| 129 |
+
) -> "tfp.distributions.Distribution":
|
| 130 |
+
return tfp.distributions.Categorical(probs=probs, logits=logits)
|
| 131 |
+
|
| 132 |
+
@staticmethod
|
| 133 |
+
@override(Distribution)
|
| 134 |
+
def required_input_dim(space: gym.Space, **kwargs) -> int:
|
| 135 |
+
assert isinstance(space, gym.spaces.Discrete)
|
| 136 |
+
return int(space.n)
|
| 137 |
+
|
| 138 |
+
@override(Distribution)
|
| 139 |
+
def rsample(self, sample_shape=()):
|
| 140 |
+
one_hot_sample = self.one_hot.sample(sample_shape)
|
| 141 |
+
return tf.stop_gradients(one_hot_sample - self.probs) + self.probs
|
| 142 |
+
|
| 143 |
+
@classmethod
|
| 144 |
+
@override(Distribution)
|
| 145 |
+
def from_logits(cls, logits: TensorType, **kwargs) -> "TfCategorical":
|
| 146 |
+
return TfCategorical(logits=logits, **kwargs)
|
| 147 |
+
|
| 148 |
+
def to_deterministic(self) -> "TfDeterministic":
|
| 149 |
+
if self.probs is not None:
|
| 150 |
+
probs_or_logits = self.probs
|
| 151 |
+
else:
|
| 152 |
+
probs_or_logits = self.logits
|
| 153 |
+
|
| 154 |
+
return TfDeterministic(loc=tf.math.argmax(probs_or_logits, axis=-1))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@DeveloperAPI
|
| 158 |
+
class TfDiagGaussian(TfDistribution):
|
| 159 |
+
"""Wrapper class for Normal distribution.
|
| 160 |
+
|
| 161 |
+
Creates a normal distribution parameterized by :attr:`loc` and :attr:`scale`. In
|
| 162 |
+
case of multi-dimensional distribution, the variance is assumed to be diagonal.
|
| 163 |
+
|
| 164 |
+
.. testcode::
|
| 165 |
+
:skipif: True
|
| 166 |
+
|
| 167 |
+
m = TfDiagGaussian(loc=[0.0, 0.0], scale=[1.0, 1.0])
|
| 168 |
+
m.sample(sample_shape=(2,)) # 2d normal dist with loc=0 and scale=1
|
| 169 |
+
|
| 170 |
+
.. testoutput::
|
| 171 |
+
|
| 172 |
+
tensor([[ 0.1046, -0.6120], [ 0.234, 0.556]])
|
| 173 |
+
|
| 174 |
+
.. testcode::
|
| 175 |
+
:skipif: True
|
| 176 |
+
|
| 177 |
+
# scale is None
|
| 178 |
+
m = TfDiagGaussian(loc=[0.0, 1.0])
|
| 179 |
+
m.sample(sample_shape=(2,)) # normally distributed with loc=0 and scale=1
|
| 180 |
+
|
| 181 |
+
.. testoutput::
|
| 182 |
+
|
| 183 |
+
tensor([0.1046, 0.6120])
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
loc: mean of the distribution (often referred to as mu). If scale is None, the
|
| 188 |
+
second half of the `loc` will be used as the log of scale.
|
| 189 |
+
scale: standard deviation of the distribution (often referred to as sigma).
|
| 190 |
+
Has to be positive.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
@override(TfDistribution)
|
| 194 |
+
def __init__(
|
| 195 |
+
self,
|
| 196 |
+
loc: Union[float, TensorType],
|
| 197 |
+
scale: Optional[Union[float, TensorType]] = None,
|
| 198 |
+
):
|
| 199 |
+
self.loc = loc
|
| 200 |
+
super().__init__(loc=loc, scale=scale)
|
| 201 |
+
|
| 202 |
+
@override(TfDistribution)
|
| 203 |
+
def _get_tf_distribution(self, loc, scale) -> "tfp.distributions.Distribution":
|
| 204 |
+
return tfp.distributions.Normal(loc=loc, scale=scale)
|
| 205 |
+
|
| 206 |
+
@override(TfDistribution)
|
| 207 |
+
def logp(self, value: TensorType) -> TensorType:
|
| 208 |
+
return tf.math.reduce_sum(super().logp(value), axis=-1)
|
| 209 |
+
|
| 210 |
+
@override(TfDistribution)
|
| 211 |
+
def entropy(self) -> TensorType:
|
| 212 |
+
return tf.math.reduce_sum(super().entropy(), axis=-1)
|
| 213 |
+
|
| 214 |
+
@override(TfDistribution)
|
| 215 |
+
def kl(self, other: "TfDistribution") -> TensorType:
|
| 216 |
+
return tf.math.reduce_sum(super().kl(other), axis=-1)
|
| 217 |
+
|
| 218 |
+
@staticmethod
|
| 219 |
+
@override(Distribution)
|
| 220 |
+
def required_input_dim(space: gym.Space, **kwargs) -> int:
|
| 221 |
+
assert isinstance(space, gym.spaces.Box)
|
| 222 |
+
return int(np.prod(space.shape, dtype=np.int32) * 2)
|
| 223 |
+
|
| 224 |
+
@override(Distribution)
|
| 225 |
+
def rsample(self, sample_shape=()):
|
| 226 |
+
eps = tf.random.normal(sample_shape)
|
| 227 |
+
return self._dist.loc + eps * self._dist.scale
|
| 228 |
+
|
| 229 |
+
@classmethod
|
| 230 |
+
@override(Distribution)
|
| 231 |
+
def from_logits(cls, logits: TensorType, **kwargs) -> "TfDiagGaussian":
|
| 232 |
+
loc, log_std = tf.split(logits, num_or_size_splits=2, axis=-1)
|
| 233 |
+
scale = tf.math.exp(log_std)
|
| 234 |
+
return TfDiagGaussian(loc=loc, scale=scale)
|
| 235 |
+
|
| 236 |
+
def to_deterministic(self) -> "TfDeterministic":
|
| 237 |
+
return TfDeterministic(loc=self.loc)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@DeveloperAPI
|
| 241 |
+
class TfDeterministic(Distribution):
|
| 242 |
+
"""The distribution that returns the input values directly.
|
| 243 |
+
|
| 244 |
+
This is similar to DiagGaussian with standard deviation zero (thus only
|
| 245 |
+
requiring the "mean" values as NN output).
|
| 246 |
+
|
| 247 |
+
Note: entropy is always zero, ang logp and kl are not implemented.
|
| 248 |
+
|
| 249 |
+
.. testcode::
|
| 250 |
+
:skipif: True
|
| 251 |
+
|
| 252 |
+
m = TfDeterministic(loc=tf.constant([0.0, 0.0]))
|
| 253 |
+
m.sample(sample_shape=(2,))
|
| 254 |
+
|
| 255 |
+
.. testoutput::
|
| 256 |
+
|
| 257 |
+
Tensor([[ 0.0, 0.0], [ 0.0, 0.0]])
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
loc: the determinsitic value to return
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
@override(Distribution)
|
| 264 |
+
def __init__(self, loc: "tf.Tensor") -> None:
|
| 265 |
+
super().__init__()
|
| 266 |
+
self.loc = loc
|
| 267 |
+
|
| 268 |
+
@override(Distribution)
|
| 269 |
+
def sample(
|
| 270 |
+
self,
|
| 271 |
+
*,
|
| 272 |
+
sample_shape: Tuple[int, ...] = (),
|
| 273 |
+
**kwargs,
|
| 274 |
+
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
|
| 275 |
+
shape = sample_shape + self.loc.shape
|
| 276 |
+
return tf.ones(shape, dtype=self.loc.dtype) * self.loc
|
| 277 |
+
|
| 278 |
+
@override(Distribution)
|
| 279 |
+
def rsample(
|
| 280 |
+
self,
|
| 281 |
+
*,
|
| 282 |
+
sample_shape: Tuple[int, ...] = None,
|
| 283 |
+
**kwargs,
|
| 284 |
+
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
|
| 285 |
+
raise NotImplementedError
|
| 286 |
+
|
| 287 |
+
@override(Distribution)
|
| 288 |
+
def logp(self, value: TensorType, **kwargs) -> TensorType:
|
| 289 |
+
return tf.zeros_like(self.loc)
|
| 290 |
+
|
| 291 |
+
@override(Distribution)
|
| 292 |
+
def entropy(self, **kwargs) -> TensorType:
|
| 293 |
+
raise RuntimeError(f"`entropy()` not supported for {self.__class__.__name__}.")
|
| 294 |
+
|
| 295 |
+
@override(Distribution)
|
| 296 |
+
def kl(self, other: "Distribution", **kwargs) -> TensorType:
|
| 297 |
+
raise RuntimeError(f"`kl()` not supported for {self.__class__.__name__}.")
|
| 298 |
+
|
| 299 |
+
@staticmethod
|
| 300 |
+
@override(Distribution)
|
| 301 |
+
def required_input_dim(space: gym.Space, **kwargs) -> int:
|
| 302 |
+
assert isinstance(space, gym.spaces.Box)
|
| 303 |
+
return int(np.prod(space.shape, dtype=np.int32))
|
| 304 |
+
|
| 305 |
+
@classmethod
|
| 306 |
+
@override(Distribution)
|
| 307 |
+
def from_logits(cls, logits: TensorType, **kwargs) -> "TfDeterministic":
|
| 308 |
+
return TfDeterministic(loc=logits)
|
| 309 |
+
|
| 310 |
+
def to_deterministic(self) -> "TfDeterministic":
|
| 311 |
+
return self
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@DeveloperAPI
|
| 315 |
+
class TfMultiCategorical(Distribution):
|
| 316 |
+
"""MultiCategorical distribution for MultiDiscrete action spaces."""
|
| 317 |
+
|
| 318 |
+
@override(Distribution)
|
| 319 |
+
def __init__(
|
| 320 |
+
self,
|
| 321 |
+
categoricals: List[TfCategorical],
|
| 322 |
+
):
|
| 323 |
+
super().__init__()
|
| 324 |
+
self._cats = categoricals
|
| 325 |
+
|
| 326 |
+
@override(Distribution)
|
| 327 |
+
def sample(self) -> TensorType:
|
| 328 |
+
arr = [cat.sample() for cat in self._cats]
|
| 329 |
+
sample_ = tf.stack(arr, axis=-1)
|
| 330 |
+
return sample_
|
| 331 |
+
|
| 332 |
+
@override(Distribution)
|
| 333 |
+
def rsample(self, sample_shape=()):
|
| 334 |
+
arr = [cat.rsample() for cat in self._cats]
|
| 335 |
+
sample_ = tf.stack(arr, axis=-1)
|
| 336 |
+
return sample_
|
| 337 |
+
|
| 338 |
+
@override(Distribution)
|
| 339 |
+
def logp(self, value: tf.Tensor) -> TensorType:
|
| 340 |
+
actions = tf.unstack(tf.cast(value, tf.int32), axis=-1)
|
| 341 |
+
logps = tf.stack([cat.logp(act) for cat, act in zip(self._cats, actions)])
|
| 342 |
+
return tf.reduce_sum(logps, axis=0)
|
| 343 |
+
|
| 344 |
+
@override(Distribution)
|
| 345 |
+
def entropy(self) -> TensorType:
|
| 346 |
+
return tf.reduce_sum(
|
| 347 |
+
tf.stack([cat.entropy() for cat in self._cats], axis=-1), axis=-1
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
@override(Distribution)
|
| 351 |
+
def kl(self, other: Distribution) -> TensorType:
|
| 352 |
+
kls = tf.stack(
|
| 353 |
+
[cat.kl(oth_cat) for cat, oth_cat in zip(self._cats, other._cats)], axis=-1
|
| 354 |
+
)
|
| 355 |
+
return tf.reduce_sum(kls, axis=-1)
|
| 356 |
+
|
| 357 |
+
@staticmethod
|
| 358 |
+
@override(Distribution)
|
| 359 |
+
def required_input_dim(space: gym.Space, **kwargs) -> int:
|
| 360 |
+
assert isinstance(space, gym.spaces.MultiDiscrete)
|
| 361 |
+
return int(np.sum(space.nvec))
|
| 362 |
+
|
| 363 |
+
@classmethod
|
| 364 |
+
@override(Distribution)
|
| 365 |
+
def from_logits(
|
| 366 |
+
cls,
|
| 367 |
+
logits: tf.Tensor,
|
| 368 |
+
input_lens: List[int],
|
| 369 |
+
**kwargs,
|
| 370 |
+
) -> "TfMultiCategorical":
|
| 371 |
+
"""Creates this Distribution from logits (and additional arguments).
|
| 372 |
+
|
| 373 |
+
If you wish to create this distribution from logits only, please refer to
|
| 374 |
+
`Distribution.get_partial_dist_cls()`.
|
| 375 |
+
|
| 376 |
+
Args:
|
| 377 |
+
logits: The tensor containing logits to be separated by logit_lens.
|
| 378 |
+
child_distribution_cls_struct: A struct of Distribution classes that can
|
| 379 |
+
be instantiated from the given logits.
|
| 380 |
+
input_lens: A list of integers that indicate the length of the logits
|
| 381 |
+
vectors to be passed into each child distribution.
|
| 382 |
+
**kwargs: Forward compatibility kwargs.
|
| 383 |
+
"""
|
| 384 |
+
categoricals = [
|
| 385 |
+
TfCategorical(logits=logits)
|
| 386 |
+
for logits in tf.split(logits, input_lens, axis=-1)
|
| 387 |
+
]
|
| 388 |
+
|
| 389 |
+
return TfMultiCategorical(categoricals=categoricals)
|
| 390 |
+
|
| 391 |
+
def to_deterministic(self) -> "TfMultiDistribution":
|
| 392 |
+
return TfMultiDistribution([cat.to_deterministic() for cat in self._cats])
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
@DeveloperAPI
|
| 396 |
+
class TfMultiDistribution(Distribution):
|
| 397 |
+
"""Action distribution that operates on multiple, possibly nested actions."""
|
| 398 |
+
|
| 399 |
+
def __init__(
|
| 400 |
+
self,
|
| 401 |
+
child_distribution_struct: Union[Tuple, List, Dict],
|
| 402 |
+
):
|
| 403 |
+
"""Initializes a TfMultiDistribution object.
|
| 404 |
+
|
| 405 |
+
Args:
|
| 406 |
+
child_distribution_struct: Any struct
|
| 407 |
+
that contains the child distribution classes to use to
|
| 408 |
+
instantiate the child distributions from `logits`.
|
| 409 |
+
"""
|
| 410 |
+
super().__init__()
|
| 411 |
+
self._original_struct = child_distribution_struct
|
| 412 |
+
self._flat_child_distributions = tree.flatten(child_distribution_struct)
|
| 413 |
+
|
| 414 |
+
@override(Distribution)
|
| 415 |
+
def rsample(
|
| 416 |
+
self,
|
| 417 |
+
*,
|
| 418 |
+
sample_shape: Tuple[int, ...] = None,
|
| 419 |
+
**kwargs,
|
| 420 |
+
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
|
| 421 |
+
rsamples = []
|
| 422 |
+
for dist in self._flat_child_distributions:
|
| 423 |
+
rsample = dist.rsample(sample_shape=sample_shape, **kwargs)
|
| 424 |
+
rsamples.append(rsample)
|
| 425 |
+
|
| 426 |
+
rsamples = tree.unflatten_as(self._original_struct, rsamples)
|
| 427 |
+
return rsamples
|
| 428 |
+
|
| 429 |
+
@override(Distribution)
|
| 430 |
+
def logp(self, value):
|
| 431 |
+
# Single tensor input (all merged).
|
| 432 |
+
if isinstance(value, (tf.Tensor, np.ndarray)):
|
| 433 |
+
split_indices = []
|
| 434 |
+
for dist in self._flat_child_distributions:
|
| 435 |
+
if isinstance(dist, TfCategorical):
|
| 436 |
+
split_indices.append(1)
|
| 437 |
+
elif isinstance(dist, TfMultiCategorical):
|
| 438 |
+
split_indices.append(len(dist._cats))
|
| 439 |
+
else:
|
| 440 |
+
sample = dist.sample()
|
| 441 |
+
# Cover Box(shape=()) case.
|
| 442 |
+
if len(sample.shape) == 1:
|
| 443 |
+
split_indices.append(1)
|
| 444 |
+
else:
|
| 445 |
+
split_indices.append(tf.shape(sample)[1])
|
| 446 |
+
split_value = tf.split(value, split_indices, axis=1)
|
| 447 |
+
# Structured or flattened (by single action component) input.
|
| 448 |
+
else:
|
| 449 |
+
split_value = tree.flatten(value)
|
| 450 |
+
|
| 451 |
+
def map_(val, dist):
|
| 452 |
+
# Remove extra dimension if present.
|
| 453 |
+
if (
|
| 454 |
+
isinstance(dist, TfCategorical)
|
| 455 |
+
and len(val.shape) > 1
|
| 456 |
+
and val.shape[-1] == 1
|
| 457 |
+
):
|
| 458 |
+
val = tf.squeeze(val, axis=-1)
|
| 459 |
+
|
| 460 |
+
return dist.logp(val)
|
| 461 |
+
|
| 462 |
+
# Remove extra categorical dimension and take the logp of each
|
| 463 |
+
# component.
|
| 464 |
+
flat_logps = tree.map_structure(
|
| 465 |
+
map_, split_value, self._flat_child_distributions
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
return sum(flat_logps)
|
| 469 |
+
|
| 470 |
+
@override(Distribution)
|
| 471 |
+
def kl(self, other):
|
| 472 |
+
kl_list = [
|
| 473 |
+
d.kl(o)
|
| 474 |
+
for d, o in zip(
|
| 475 |
+
self._flat_child_distributions, other._flat_child_distributions
|
| 476 |
+
)
|
| 477 |
+
]
|
| 478 |
+
return sum(kl_list)
|
| 479 |
+
|
| 480 |
+
@override(Distribution)
|
| 481 |
+
def entropy(self):
|
| 482 |
+
entropy_list = [d.entropy() for d in self._flat_child_distributions]
|
| 483 |
+
return sum(entropy_list)
|
| 484 |
+
|
| 485 |
+
@override(Distribution)
|
| 486 |
+
def sample(self):
|
| 487 |
+
child_distributions_struct = tree.unflatten_as(
|
| 488 |
+
self._original_struct, self._flat_child_distributions
|
| 489 |
+
)
|
| 490 |
+
return tree.map_structure(lambda s: s.sample(), child_distributions_struct)
|
| 491 |
+
|
| 492 |
+
@staticmethod
|
| 493 |
+
@override(Distribution)
|
| 494 |
+
def required_input_dim(space: gym.Space, input_lens: List[int], **kwargs) -> int:
|
| 495 |
+
return sum(input_lens)
|
| 496 |
+
|
| 497 |
+
@classmethod
|
| 498 |
+
@override(Distribution)
|
| 499 |
+
def from_logits(
|
| 500 |
+
cls,
|
| 501 |
+
logits: tf.Tensor,
|
| 502 |
+
child_distribution_cls_struct: Union[Dict, Iterable],
|
| 503 |
+
input_lens: Union[Dict, List[int]],
|
| 504 |
+
space: gym.Space,
|
| 505 |
+
**kwargs,
|
| 506 |
+
) -> "TfMultiDistribution":
|
| 507 |
+
"""Creates this Distribution from logits (and additional arguments).
|
| 508 |
+
|
| 509 |
+
If you wish to create this distribution from logits only, please refer to
|
| 510 |
+
`Distribution.get_partial_dist_cls()`.
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
logits: The tensor containing logits to be separated by `input_lens`.
|
| 514 |
+
child_distribution_cls_struct: A struct of Distribution classes that can
|
| 515 |
+
be instantiated from the given logits.
|
| 516 |
+
child_distribution_cls_struct: A struct of Distribution classes that can
|
| 517 |
+
be instantiated from the given logits.
|
| 518 |
+
input_lens: A list or dict of integers that indicate the length of each
|
| 519 |
+
logit. If this is given as a dict, the structure should match the
|
| 520 |
+
structure of child_distribution_cls_struct.
|
| 521 |
+
space: The possibly nested output space.
|
| 522 |
+
**kwargs: Forward compatibility kwargs.
|
| 523 |
+
|
| 524 |
+
Returns:
|
| 525 |
+
A TfMultiDistribution object.
|
| 526 |
+
"""
|
| 527 |
+
logit_lens = tree.flatten(input_lens)
|
| 528 |
+
child_distribution_cls_list = tree.flatten(child_distribution_cls_struct)
|
| 529 |
+
split_logits = tf.split(logits, logit_lens, axis=1)
|
| 530 |
+
|
| 531 |
+
child_distribution_list = tree.map_structure(
|
| 532 |
+
lambda dist, input_: dist.from_logits(input_),
|
| 533 |
+
child_distribution_cls_list,
|
| 534 |
+
list(split_logits),
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
child_distribution_struct = tree.unflatten_as(
|
| 538 |
+
child_distribution_cls_struct, child_distribution_list
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
return TfMultiDistribution(
|
| 542 |
+
child_distribution_struct=child_distribution_struct,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
def to_deterministic(self) -> "TfMultiDistribution":
|
| 546 |
+
flat_deterministic_dists = [
|
| 547 |
+
dist.to_deterministic for dist in self._flat_child_distributions
|
| 548 |
+
]
|
| 549 |
+
deterministic_dists = tree.unflatten_as(
|
| 550 |
+
self._original_struct, flat_deterministic_dists
|
| 551 |
+
)
|
| 552 |
+
return TfMultiDistribution(deterministic_dists)
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import gymnasium as gym
|
| 3 |
+
import re
|
| 4 |
+
from typing import Dict, List, Union
|
| 5 |
+
|
| 6 |
+
from ray.util import log_once
|
| 7 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 8 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 9 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 10 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 11 |
+
from ray.rllib.utils.typing import ModelConfigDict, TensorType
|
| 12 |
+
|
| 13 |
+
tf1, tf, tfv = try_import_tf()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@OldAPIStack
|
| 17 |
+
class TFModelV2(ModelV2):
|
| 18 |
+
"""TF version of ModelV2, which should contain a tf keras Model.
|
| 19 |
+
|
| 20 |
+
Note that this class by itself is not a valid model unless you
|
| 21 |
+
implement forward() in a subclass."""
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
obs_space: gym.spaces.Space,
|
| 26 |
+
action_space: gym.spaces.Space,
|
| 27 |
+
num_outputs: int,
|
| 28 |
+
model_config: ModelConfigDict,
|
| 29 |
+
name: str,
|
| 30 |
+
):
|
| 31 |
+
"""Initializes a TFModelV2 instance.
|
| 32 |
+
|
| 33 |
+
Here is an example implementation for a subclass
|
| 34 |
+
``MyModelClass(TFModelV2)``::
|
| 35 |
+
|
| 36 |
+
def __init__(self, *args, **kwargs):
|
| 37 |
+
super(MyModelClass, self).__init__(*args, **kwargs)
|
| 38 |
+
input_layer = tf.keras.layers.Input(...)
|
| 39 |
+
hidden_layer = tf.keras.layers.Dense(...)(input_layer)
|
| 40 |
+
output_layer = tf.keras.layers.Dense(...)(hidden_layer)
|
| 41 |
+
value_layer = tf.keras.layers.Dense(...)(hidden_layer)
|
| 42 |
+
self.base_model = tf.keras.Model(
|
| 43 |
+
input_layer, [output_layer, value_layer])
|
| 44 |
+
"""
|
| 45 |
+
super().__init__(
|
| 46 |
+
obs_space, action_space, num_outputs, model_config, name, framework="tf"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Deprecated: TFModelV2 now automatically track their variables.
|
| 50 |
+
self.var_list = []
|
| 51 |
+
|
| 52 |
+
if tf1.executing_eagerly():
|
| 53 |
+
self.graph = None
|
| 54 |
+
else:
|
| 55 |
+
self.graph = tf1.get_default_graph()
|
| 56 |
+
|
| 57 |
+
def context(self) -> contextlib.AbstractContextManager:
|
| 58 |
+
"""Returns a contextmanager for the current TF graph."""
|
| 59 |
+
if self.graph:
|
| 60 |
+
return self.graph.as_default()
|
| 61 |
+
else:
|
| 62 |
+
return ModelV2.context(self)
|
| 63 |
+
|
| 64 |
+
def update_ops(self) -> List[TensorType]:
|
| 65 |
+
"""Return the list of update ops for this model.
|
| 66 |
+
|
| 67 |
+
For example, this should include any BatchNorm update ops."""
|
| 68 |
+
return []
|
| 69 |
+
|
| 70 |
+
def register_variables(self, variables: List[TensorType]) -> None:
|
| 71 |
+
"""Register the given list of variables with this model."""
|
| 72 |
+
if log_once("deprecated_tfmodelv2_register_variables"):
|
| 73 |
+
deprecation_warning(old="TFModelV2.register_variables", error=False)
|
| 74 |
+
self.var_list.extend(variables)
|
| 75 |
+
|
| 76 |
+
@override(ModelV2)
|
| 77 |
+
def variables(
|
| 78 |
+
self, as_dict: bool = False
|
| 79 |
+
) -> Union[List[TensorType], Dict[str, TensorType]]:
|
| 80 |
+
if as_dict:
|
| 81 |
+
# Old way using `register_variables`.
|
| 82 |
+
if self.var_list:
|
| 83 |
+
return {v.name: v for v in self.var_list}
|
| 84 |
+
# New way: Automatically determine the var tree.
|
| 85 |
+
else:
|
| 86 |
+
return self._find_sub_modules("", self.__dict__)
|
| 87 |
+
|
| 88 |
+
# Old way using `register_variables`.
|
| 89 |
+
if self.var_list:
|
| 90 |
+
return list(self.var_list)
|
| 91 |
+
# New way: Automatically determine the var tree.
|
| 92 |
+
else:
|
| 93 |
+
return list(self.variables(as_dict=True).values())
|
| 94 |
+
|
| 95 |
+
@override(ModelV2)
|
| 96 |
+
def trainable_variables(
|
| 97 |
+
self, as_dict: bool = False
|
| 98 |
+
) -> Union[List[TensorType], Dict[str, TensorType]]:
|
| 99 |
+
if as_dict:
|
| 100 |
+
return {
|
| 101 |
+
k: v for k, v in self.variables(as_dict=True).items() if v.trainable
|
| 102 |
+
}
|
| 103 |
+
return [v for v in self.variables() if v.trainable]
|
| 104 |
+
|
| 105 |
+
@staticmethod
|
| 106 |
+
def _find_sub_modules(current_key, struct):
|
| 107 |
+
# Keras Model: key=k + "." + var-name (replace '/' by '.').
|
| 108 |
+
if isinstance(struct, tf.keras.models.Model) or isinstance(struct, tf.Module):
|
| 109 |
+
ret = {}
|
| 110 |
+
for var in struct.variables:
|
| 111 |
+
name = re.sub("/", ".", var.name)
|
| 112 |
+
key = current_key + "." + name
|
| 113 |
+
ret[key] = var
|
| 114 |
+
return ret
|
| 115 |
+
# Other TFModelV2: Include its vars into ours.
|
| 116 |
+
elif isinstance(struct, TFModelV2):
|
| 117 |
+
return {
|
| 118 |
+
current_key + "." + key: var
|
| 119 |
+
for key, var in struct.variables(as_dict=True).items()
|
| 120 |
+
}
|
| 121 |
+
# tf.Variable
|
| 122 |
+
elif isinstance(struct, tf.Variable):
|
| 123 |
+
return {current_key: struct}
|
| 124 |
+
# List/Tuple.
|
| 125 |
+
elif isinstance(struct, (tuple, list)):
|
| 126 |
+
ret = {}
|
| 127 |
+
for i, value in enumerate(struct):
|
| 128 |
+
sub_vars = TFModelV2._find_sub_modules(
|
| 129 |
+
current_key + "_{}".format(i), value
|
| 130 |
+
)
|
| 131 |
+
ret.update(sub_vars)
|
| 132 |
+
return ret
|
| 133 |
+
# Dict.
|
| 134 |
+
elif isinstance(struct, dict):
|
| 135 |
+
if current_key:
|
| 136 |
+
current_key += "_"
|
| 137 |
+
ret = {}
|
| 138 |
+
for key, value in struct.items():
|
| 139 |
+
sub_vars = TFModelV2._find_sub_modules(current_key + str(key), value)
|
| 140 |
+
ret.update(sub_vars)
|
| 141 |
+
return ret
|
| 142 |
+
return {}
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/tf/visionnet.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium as gym
|
| 2 |
+
from typing import Dict, List
|
| 3 |
+
|
| 4 |
+
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
|
| 5 |
+
from ray.rllib.models.tf.misc import normc_initializer
|
| 6 |
+
from ray.rllib.models.utils import get_activation_fn, get_filter_config
|
| 7 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 8 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 9 |
+
from ray.rllib.utils.typing import ModelConfigDict, TensorType
|
| 10 |
+
|
| 11 |
+
tf1, tf, tfv = try_import_tf()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@OldAPIStack
|
| 15 |
+
class VisionNetwork(TFModelV2):
|
| 16 |
+
"""Generic vision network implemented in ModelV2 API.
|
| 17 |
+
|
| 18 |
+
An additional post-conv fully connected stack can be added and configured
|
| 19 |
+
via the config keys:
|
| 20 |
+
`post_fcnet_hiddens`: Dense layer sizes after the Conv2D stack.
|
| 21 |
+
`post_fcnet_activation`: Activation function to use for this FC stack.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
obs_space: gym.spaces.Space,
|
| 27 |
+
action_space: gym.spaces.Space,
|
| 28 |
+
num_outputs: int,
|
| 29 |
+
model_config: ModelConfigDict,
|
| 30 |
+
name: str,
|
| 31 |
+
):
|
| 32 |
+
if not model_config.get("conv_filters"):
|
| 33 |
+
model_config["conv_filters"] = get_filter_config(obs_space.shape)
|
| 34 |
+
|
| 35 |
+
super(VisionNetwork, self).__init__(
|
| 36 |
+
obs_space, action_space, num_outputs, model_config, name
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
activation = get_activation_fn(
|
| 40 |
+
self.model_config.get("conv_activation"), framework="tf"
|
| 41 |
+
)
|
| 42 |
+
filters = self.model_config["conv_filters"]
|
| 43 |
+
assert len(filters) > 0, "Must provide at least 1 entry in `conv_filters`!"
|
| 44 |
+
|
| 45 |
+
# Post FC net config.
|
| 46 |
+
post_fcnet_hiddens = model_config.get("post_fcnet_hiddens", [])
|
| 47 |
+
post_fcnet_activation = get_activation_fn(
|
| 48 |
+
model_config.get("post_fcnet_activation"), framework="tf"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
no_final_linear = self.model_config.get("no_final_linear")
|
| 52 |
+
vf_share_layers = self.model_config.get("vf_share_layers")
|
| 53 |
+
|
| 54 |
+
input_shape = obs_space.shape
|
| 55 |
+
self.data_format = "channels_last"
|
| 56 |
+
|
| 57 |
+
inputs = tf.keras.layers.Input(shape=input_shape, name="observations")
|
| 58 |
+
last_layer = inputs
|
| 59 |
+
# Whether the last layer is the output of a Flattened (rather than
|
| 60 |
+
# a n x (1,1) Conv2D).
|
| 61 |
+
self.last_layer_is_flattened = False
|
| 62 |
+
|
| 63 |
+
# Build the action layers
|
| 64 |
+
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
|
| 65 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 66 |
+
out_size,
|
| 67 |
+
kernel,
|
| 68 |
+
strides=stride
|
| 69 |
+
if isinstance(stride, (list, tuple))
|
| 70 |
+
else (stride, stride),
|
| 71 |
+
activation=activation,
|
| 72 |
+
padding="same",
|
| 73 |
+
data_format="channels_last",
|
| 74 |
+
name="conv{}".format(i),
|
| 75 |
+
)(last_layer)
|
| 76 |
+
|
| 77 |
+
out_size, kernel, stride = filters[-1]
|
| 78 |
+
|
| 79 |
+
# No final linear: Last layer has activation function and exits with
|
| 80 |
+
# num_outputs nodes (this could be a 1x1 conv or a FC layer, depending
|
| 81 |
+
# on `post_fcnet_...` settings).
|
| 82 |
+
if no_final_linear and num_outputs:
|
| 83 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 84 |
+
out_size if post_fcnet_hiddens else num_outputs,
|
| 85 |
+
kernel,
|
| 86 |
+
strides=stride
|
| 87 |
+
if isinstance(stride, (list, tuple))
|
| 88 |
+
else (stride, stride),
|
| 89 |
+
activation=activation,
|
| 90 |
+
padding="valid",
|
| 91 |
+
data_format="channels_last",
|
| 92 |
+
name="conv_out",
|
| 93 |
+
)(last_layer)
|
| 94 |
+
# Add (optional) post-fc-stack after last Conv2D layer.
|
| 95 |
+
layer_sizes = post_fcnet_hiddens[:-1] + (
|
| 96 |
+
[num_outputs] if post_fcnet_hiddens else []
|
| 97 |
+
)
|
| 98 |
+
feature_out = last_layer
|
| 99 |
+
|
| 100 |
+
for i, out_size in enumerate(layer_sizes):
|
| 101 |
+
feature_out = last_layer
|
| 102 |
+
last_layer = tf.keras.layers.Dense(
|
| 103 |
+
out_size,
|
| 104 |
+
name="post_fcnet_{}".format(i),
|
| 105 |
+
activation=post_fcnet_activation,
|
| 106 |
+
kernel_initializer=normc_initializer(1.0),
|
| 107 |
+
)(last_layer)
|
| 108 |
+
|
| 109 |
+
# Finish network normally (w/o overriding last layer size with
|
| 110 |
+
# `num_outputs`), then add another linear one of size `num_outputs`.
|
| 111 |
+
else:
|
| 112 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 113 |
+
out_size,
|
| 114 |
+
kernel,
|
| 115 |
+
strides=stride
|
| 116 |
+
if isinstance(stride, (list, tuple))
|
| 117 |
+
else (stride, stride),
|
| 118 |
+
activation=activation,
|
| 119 |
+
padding="valid",
|
| 120 |
+
data_format="channels_last",
|
| 121 |
+
name="conv{}".format(len(filters)),
|
| 122 |
+
)(last_layer)
|
| 123 |
+
|
| 124 |
+
# num_outputs defined. Use that to create an exact
|
| 125 |
+
# `num_output`-sized (1,1)-Conv2D.
|
| 126 |
+
if num_outputs:
|
| 127 |
+
if post_fcnet_hiddens:
|
| 128 |
+
last_cnn = last_layer = tf.keras.layers.Conv2D(
|
| 129 |
+
post_fcnet_hiddens[0],
|
| 130 |
+
[1, 1],
|
| 131 |
+
activation=post_fcnet_activation,
|
| 132 |
+
padding="same",
|
| 133 |
+
data_format="channels_last",
|
| 134 |
+
name="conv_out",
|
| 135 |
+
)(last_layer)
|
| 136 |
+
# Add (optional) post-fc-stack after last Conv2D layer.
|
| 137 |
+
for i, out_size in enumerate(
|
| 138 |
+
post_fcnet_hiddens[1:] + [num_outputs]
|
| 139 |
+
):
|
| 140 |
+
feature_out = last_layer
|
| 141 |
+
last_layer = tf.keras.layers.Dense(
|
| 142 |
+
out_size,
|
| 143 |
+
name="post_fcnet_{}".format(i + 1),
|
| 144 |
+
activation=post_fcnet_activation
|
| 145 |
+
if i < len(post_fcnet_hiddens) - 1
|
| 146 |
+
else None,
|
| 147 |
+
kernel_initializer=normc_initializer(1.0),
|
| 148 |
+
)(last_layer)
|
| 149 |
+
else:
|
| 150 |
+
feature_out = last_layer
|
| 151 |
+
last_cnn = last_layer = tf.keras.layers.Conv2D(
|
| 152 |
+
num_outputs,
|
| 153 |
+
[1, 1],
|
| 154 |
+
activation=None,
|
| 155 |
+
padding="same",
|
| 156 |
+
data_format="channels_last",
|
| 157 |
+
name="conv_out",
|
| 158 |
+
)(last_layer)
|
| 159 |
+
|
| 160 |
+
if last_cnn.shape[1] != 1 or last_cnn.shape[2] != 1:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
"Given `conv_filters` ({}) do not result in a [B, 1, "
|
| 163 |
+
"1, {} (`num_outputs`)] shape (but in {})! Please "
|
| 164 |
+
"adjust your Conv2D stack such that the dims 1 and 2 "
|
| 165 |
+
"are both 1.".format(
|
| 166 |
+
self.model_config["conv_filters"],
|
| 167 |
+
self.num_outputs,
|
| 168 |
+
list(last_cnn.shape),
|
| 169 |
+
)
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# num_outputs not known -> Flatten, then set self.num_outputs
|
| 173 |
+
# to the resulting number of nodes.
|
| 174 |
+
else:
|
| 175 |
+
self.last_layer_is_flattened = True
|
| 176 |
+
last_layer = tf.keras.layers.Flatten(data_format="channels_last")(
|
| 177 |
+
last_layer
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Add (optional) post-fc-stack after last Conv2D layer.
|
| 181 |
+
for i, out_size in enumerate(post_fcnet_hiddens):
|
| 182 |
+
last_layer = tf.keras.layers.Dense(
|
| 183 |
+
out_size,
|
| 184 |
+
name="post_fcnet_{}".format(i),
|
| 185 |
+
activation=post_fcnet_activation,
|
| 186 |
+
kernel_initializer=normc_initializer(1.0),
|
| 187 |
+
)(last_layer)
|
| 188 |
+
feature_out = last_layer
|
| 189 |
+
self.num_outputs = last_layer.shape[1]
|
| 190 |
+
logits_out = last_layer
|
| 191 |
+
|
| 192 |
+
# Build the value layers
|
| 193 |
+
if vf_share_layers:
|
| 194 |
+
if not self.last_layer_is_flattened:
|
| 195 |
+
feature_out = tf.keras.layers.Lambda(
|
| 196 |
+
lambda x: tf.squeeze(x, axis=[1, 2])
|
| 197 |
+
)(feature_out)
|
| 198 |
+
value_out = tf.keras.layers.Dense(
|
| 199 |
+
1,
|
| 200 |
+
name="value_out",
|
| 201 |
+
activation=None,
|
| 202 |
+
kernel_initializer=normc_initializer(0.01),
|
| 203 |
+
)(feature_out)
|
| 204 |
+
else:
|
| 205 |
+
# build a parallel set of hidden layers for the value net
|
| 206 |
+
last_layer = inputs
|
| 207 |
+
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
|
| 208 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 209 |
+
out_size,
|
| 210 |
+
kernel,
|
| 211 |
+
strides=stride
|
| 212 |
+
if isinstance(stride, (list, tuple))
|
| 213 |
+
else (stride, stride),
|
| 214 |
+
activation=activation,
|
| 215 |
+
padding="same",
|
| 216 |
+
data_format="channels_last",
|
| 217 |
+
name="conv_value_{}".format(i),
|
| 218 |
+
)(last_layer)
|
| 219 |
+
out_size, kernel, stride = filters[-1]
|
| 220 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 221 |
+
out_size,
|
| 222 |
+
kernel,
|
| 223 |
+
strides=stride
|
| 224 |
+
if isinstance(stride, (list, tuple))
|
| 225 |
+
else (stride, stride),
|
| 226 |
+
activation=activation,
|
| 227 |
+
padding="valid",
|
| 228 |
+
data_format="channels_last",
|
| 229 |
+
name="conv_value_{}".format(len(filters)),
|
| 230 |
+
)(last_layer)
|
| 231 |
+
last_layer = tf.keras.layers.Conv2D(
|
| 232 |
+
1,
|
| 233 |
+
[1, 1],
|
| 234 |
+
activation=None,
|
| 235 |
+
padding="same",
|
| 236 |
+
data_format="channels_last",
|
| 237 |
+
name="conv_value_out",
|
| 238 |
+
)(last_layer)
|
| 239 |
+
value_out = tf.keras.layers.Lambda(lambda x: tf.squeeze(x, axis=[1, 2]))(
|
| 240 |
+
last_layer
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
self.base_model = tf.keras.Model(inputs, [logits_out, value_out])
|
| 244 |
+
|
| 245 |
+
def forward(
|
| 246 |
+
self,
|
| 247 |
+
input_dict: Dict[str, TensorType],
|
| 248 |
+
state: List[TensorType],
|
| 249 |
+
seq_lens: TensorType,
|
| 250 |
+
) -> (TensorType, List[TensorType]):
|
| 251 |
+
obs = input_dict["obs"]
|
| 252 |
+
if self.data_format == "channels_first":
|
| 253 |
+
obs = tf.transpose(obs, [0, 2, 3, 1])
|
| 254 |
+
# Explicit cast to float32 needed in eager.
|
| 255 |
+
model_out, self._value_out = self.base_model(tf.cast(obs, tf.float32))
|
| 256 |
+
# Our last layer is already flat.
|
| 257 |
+
if self.last_layer_is_flattened:
|
| 258 |
+
return model_out, state
|
| 259 |
+
# Last layer is a n x [1,1] Conv2D -> Flatten.
|
| 260 |
+
else:
|
| 261 |
+
return tf.squeeze(model_out, axis=[1, 2]), state
|
| 262 |
+
|
| 263 |
+
def value_function(self) -> TensorType:
|
| 264 |
+
return tf.reshape(self._value_out, [-1])
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (176 Bytes). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/misc.cpython-310.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
deepseek/lib/python3.10/site-packages/ray/rllib/models/utils.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable, Optional, Union
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.annotations import DeveloperAPI
|
| 4 |
+
from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DeveloperAPI
|
| 8 |
+
def get_activation_fn(
|
| 9 |
+
name: Optional[Union[Callable, str]] = None,
|
| 10 |
+
framework: str = "tf",
|
| 11 |
+
):
|
| 12 |
+
"""Returns a framework specific activation function, given a name string.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
name: One of "relu" (default), "tanh", "elu",
|
| 16 |
+
"swish" (or "silu", which is the same), or "linear" (same as None).
|
| 17 |
+
framework: One of "jax", "tf|tf2" or "torch".
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
A framework-specific activtion function. e.g. tf.nn.tanh or
|
| 21 |
+
torch.nn.ReLU. None if name in ["linear", None].
|
| 22 |
+
|
| 23 |
+
Raises:
|
| 24 |
+
ValueError: If name is an unknown activation function.
|
| 25 |
+
"""
|
| 26 |
+
# Already a callable, return as-is.
|
| 27 |
+
if callable(name):
|
| 28 |
+
return name
|
| 29 |
+
|
| 30 |
+
name_lower = name.lower() if isinstance(name, str) else name
|
| 31 |
+
|
| 32 |
+
# Infer the correct activation function from the string specifier.
|
| 33 |
+
if framework == "torch":
|
| 34 |
+
if name_lower in ["linear", None]:
|
| 35 |
+
return None
|
| 36 |
+
|
| 37 |
+
_, nn = try_import_torch()
|
| 38 |
+
# First try getting the correct activation function from nn directly.
|
| 39 |
+
# Note that torch activation functions are not all lower case.
|
| 40 |
+
fn = getattr(nn, name, None)
|
| 41 |
+
if fn is not None:
|
| 42 |
+
return fn
|
| 43 |
+
|
| 44 |
+
if name_lower in ["swish", "silu"]:
|
| 45 |
+
return nn.SiLU
|
| 46 |
+
elif name_lower == "relu":
|
| 47 |
+
return nn.ReLU
|
| 48 |
+
elif name_lower == "tanh":
|
| 49 |
+
return nn.Tanh
|
| 50 |
+
elif name_lower == "elu":
|
| 51 |
+
return nn.ELU
|
| 52 |
+
elif framework == "jax":
|
| 53 |
+
if name_lower in ["linear", None]:
|
| 54 |
+
return None
|
| 55 |
+
jax, _ = try_import_jax()
|
| 56 |
+
if name_lower in ["swish", "silu"]:
|
| 57 |
+
return jax.nn.swish
|
| 58 |
+
if name_lower == "relu":
|
| 59 |
+
return jax.nn.relu
|
| 60 |
+
elif name_lower == "tanh":
|
| 61 |
+
return jax.nn.hard_tanh
|
| 62 |
+
elif name_lower == "elu":
|
| 63 |
+
return jax.nn.elu
|
| 64 |
+
else:
|
| 65 |
+
assert framework in ["tf", "tf2"], "Unsupported framework `{}`!".format(
|
| 66 |
+
framework
|
| 67 |
+
)
|
| 68 |
+
if name_lower in ["linear", None]:
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
tf1, tf, tfv = try_import_tf()
|
| 72 |
+
# Try getting the correct activation function from tf.nn directly.
|
| 73 |
+
# Note that tf activation functions are all lower case, so this should always
|
| 74 |
+
# work.
|
| 75 |
+
fn = getattr(tf.nn, name_lower, None)
|
| 76 |
+
|
| 77 |
+
if fn is not None:
|
| 78 |
+
return fn
|
| 79 |
+
|
| 80 |
+
raise ValueError(
|
| 81 |
+
"Unknown activation ({}) for framework={}!".format(name, framework)
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@DeveloperAPI
|
| 86 |
+
def get_initializer_fn(name: Optional[Union[str, Callable]], framework: str = "torch"):
|
| 87 |
+
"""Returns the framework-specific initializer class or function.
|
| 88 |
+
|
| 89 |
+
This function relies fully on the specified initializer classes and
|
| 90 |
+
functions in the frameworks `torch` and `tf2` (see for `torch`
|
| 91 |
+
https://pytorch.org/docs/stable/nn.init.html and for `tf2` see
|
| 92 |
+
https://www.tensorflow.org/api_docs/python/tf/keras/initializers).
|
| 93 |
+
|
| 94 |
+
Note, for framework `torch` the in-place initializers are needed, i.e. names
|
| 95 |
+
should end with an underscore `_`, e.g. `glorot_uniform_`.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
name: Name of the initializer class or function in one of the two
|
| 99 |
+
supported frameworks, i.e. `torch` or `tf2`.
|
| 100 |
+
framework: The framework string, either `torch or `tf2`.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
A framework-specific function or class defining an initializer to be used
|
| 104 |
+
for network initialization,
|
| 105 |
+
|
| 106 |
+
Raises:
|
| 107 |
+
`ValueError` if the `name` is neither class or function in the specified
|
| 108 |
+
`framework`. Raises also a `ValueError`, if `name` does not define an
|
| 109 |
+
in-place initializer for framework `torch`.
|
| 110 |
+
"""
|
| 111 |
+
# Already a callable or `None` return as is. If `None` we use the default
|
| 112 |
+
# initializer defined in the framework-specific layers themselves.
|
| 113 |
+
if callable(name) or name is None:
|
| 114 |
+
return name
|
| 115 |
+
|
| 116 |
+
if framework == "torch":
|
| 117 |
+
name_lower = name.lower() if isinstance(name, str) else name
|
| 118 |
+
|
| 119 |
+
_, nn = try_import_torch()
|
| 120 |
+
|
| 121 |
+
# Check, if the name includes an underscore. We must use the
|
| 122 |
+
# in-place initialization from Torch.
|
| 123 |
+
if not name_lower.endswith("_"):
|
| 124 |
+
raise ValueError(
|
| 125 |
+
"Not an in-place initializer: Torch weight initializers "
|
| 126 |
+
"need to be provided as their in-place version, i.e. "
|
| 127 |
+
"<initializaer_name> + '_'. See "
|
| 128 |
+
"https://pytorch.org/docs/stable/nn.init.html. "
|
| 129 |
+
f"User provided {name}."
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# First, try to get the initialization directly from `nn.init`.
|
| 133 |
+
# Note, that all initialization methods in `nn.init` are lower
|
| 134 |
+
# case and that `<method>_` defines the "in-place" method.
|
| 135 |
+
fn = getattr(nn.init, name_lower, None)
|
| 136 |
+
if fn is not None:
|
| 137 |
+
# TODO (simon): Raise a warning if not "in-place" method.
|
| 138 |
+
return fn
|
| 139 |
+
# Unknown initializer.
|
| 140 |
+
else:
|
| 141 |
+
# Inform the user that this initializer does not exist.
|
| 142 |
+
raise ValueError(
|
| 143 |
+
f"Unknown initializer name: {name_lower} is not a method in "
|
| 144 |
+
"`torch.nn.init`!"
|
| 145 |
+
)
|
| 146 |
+
elif framework == "tf2":
|
| 147 |
+
# Note, as initializer classes in TensorFlow can be either given by their
|
| 148 |
+
# name in camel toe typing or by their shortcut we use the `name` as it is.
|
| 149 |
+
# See https://www.tensorflow.org/api_docs/python/tf/keras/initializers.
|
| 150 |
+
|
| 151 |
+
_, tf, _ = try_import_tf()
|
| 152 |
+
|
| 153 |
+
# Try to get the initialization function directly from `tf.keras.initializers`.
|
| 154 |
+
fn = getattr(tf.keras.initializers, name, None)
|
| 155 |
+
if fn is not None:
|
| 156 |
+
return fn
|
| 157 |
+
# Unknown initializer.
|
| 158 |
+
else:
|
| 159 |
+
# Inform the user that this initializer does not exist.
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"Unknown initializer: {name} is not a initializer in "
|
| 162 |
+
"`tf.keras.initializers`!"
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@DeveloperAPI
|
| 167 |
+
def get_filter_config(shape):
|
| 168 |
+
"""Returns a default Conv2D filter config (list) for a given image shape.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
shape (Tuple[int]): The input (image) shape, e.g. (84,84,3).
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
List[list]: The Conv2D filter configuration usable as `conv_filters`
|
| 175 |
+
inside a model config dict.
|
| 176 |
+
"""
|
| 177 |
+
# 96x96x3 (e.g. CarRacing-v0).
|
| 178 |
+
filters_96x96 = [
|
| 179 |
+
[16, [8, 8], 4],
|
| 180 |
+
[32, [4, 4], 2],
|
| 181 |
+
[256, [11, 11], 2],
|
| 182 |
+
]
|
| 183 |
+
# Atari.
|
| 184 |
+
filters_84x84 = [
|
| 185 |
+
[16, [8, 8], 4],
|
| 186 |
+
[32, [4, 4], 2],
|
| 187 |
+
[256, [11, 11], 1],
|
| 188 |
+
]
|
| 189 |
+
# Dreamer-style (S-sized model) Atari or DM Control Suite.
|
| 190 |
+
filters_64x64 = [
|
| 191 |
+
[32, [4, 4], 2],
|
| 192 |
+
[64, [4, 4], 2],
|
| 193 |
+
[128, [4, 4], 2],
|
| 194 |
+
[256, [4, 4], 2],
|
| 195 |
+
]
|
| 196 |
+
# Small (1/2) Atari.
|
| 197 |
+
filters_42x42 = [
|
| 198 |
+
[16, [4, 4], 2],
|
| 199 |
+
[32, [4, 4], 2],
|
| 200 |
+
[256, [11, 11], 1],
|
| 201 |
+
]
|
| 202 |
+
# Test image (10x10).
|
| 203 |
+
filters_10x10 = [
|
| 204 |
+
[16, [5, 5], 2],
|
| 205 |
+
[32, [5, 5], 2],
|
| 206 |
+
]
|
| 207 |
+
|
| 208 |
+
shape = list(shape)
|
| 209 |
+
if len(shape) in [2, 3] and (shape[:2] == [96, 96] or shape[1:] == [96, 96]):
|
| 210 |
+
return filters_96x96
|
| 211 |
+
elif len(shape) in [2, 3] and (shape[:2] == [84, 84] or shape[1:] == [84, 84]):
|
| 212 |
+
return filters_84x84
|
| 213 |
+
elif len(shape) in [2, 3] and (shape[:2] == [64, 64] or shape[1:] == [64, 64]):
|
| 214 |
+
return filters_64x64
|
| 215 |
+
elif len(shape) in [2, 3] and (shape[:2] == [42, 42] or shape[1:] == [42, 42]):
|
| 216 |
+
return filters_42x42
|
| 217 |
+
elif len(shape) in [2, 3] and (shape[:2] == [10, 10] or shape[1:] == [10, 10]):
|
| 218 |
+
return filters_10x10
|
| 219 |
+
else:
|
| 220 |
+
raise ValueError(
|
| 221 |
+
"No default configuration for obs shape {}".format(shape)
|
| 222 |
+
+ ", you must specify `conv_filters` manually as a model option. "
|
| 223 |
+
"Default configurations are only available for inputs of the following "
|
| 224 |
+
"shapes: [42, 42, K], [84, 84, K], [64, 64, K], [10, 10, K]. You may "
|
| 225 |
+
"alternatively want to use a custom model or preprocessor."
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@DeveloperAPI
|
| 230 |
+
def get_initializer(name, framework="tf"):
|
| 231 |
+
"""Returns a framework specific initializer, given a name string.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
name: One of "xavier_uniform" (default), "xavier_normal".
|
| 235 |
+
framework: One of "jax", "tf|tf2" or "torch".
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
A framework-specific initializer function, e.g.
|
| 239 |
+
tf.keras.initializers.GlorotUniform or
|
| 240 |
+
torch.nn.init.xavier_uniform_.
|
| 241 |
+
|
| 242 |
+
Raises:
|
| 243 |
+
ValueError: If name is an unknown initializer.
|
| 244 |
+
"""
|
| 245 |
+
# Already a callable, return as-is.
|
| 246 |
+
if callable(name):
|
| 247 |
+
return name
|
| 248 |
+
|
| 249 |
+
if framework == "jax":
|
| 250 |
+
_, flax = try_import_jax()
|
| 251 |
+
assert flax is not None, "`flax` not installed. Try `pip install jax flax`."
|
| 252 |
+
import flax.linen as nn
|
| 253 |
+
|
| 254 |
+
if name in [None, "default", "xavier_uniform"]:
|
| 255 |
+
return nn.initializers.xavier_uniform()
|
| 256 |
+
elif name == "xavier_normal":
|
| 257 |
+
return nn.initializers.xavier_normal()
|
| 258 |
+
if framework == "torch":
|
| 259 |
+
_, nn = try_import_torch()
|
| 260 |
+
assert nn is not None, "`torch` not installed. Try `pip install torch`."
|
| 261 |
+
if name in [None, "default", "xavier_uniform"]:
|
| 262 |
+
return nn.init.xavier_uniform_
|
| 263 |
+
elif name == "xavier_normal":
|
| 264 |
+
return nn.init.xavier_normal_
|
| 265 |
+
else:
|
| 266 |
+
assert framework in ["tf", "tf2"], "Unsupported framework `{}`!".format(
|
| 267 |
+
framework
|
| 268 |
+
)
|
| 269 |
+
tf1, tf, tfv = try_import_tf()
|
| 270 |
+
assert (
|
| 271 |
+
tf is not None
|
| 272 |
+
), "`tensorflow` not installed. Try `pip install tensorflow`."
|
| 273 |
+
if name in [None, "default", "xavier_uniform"]:
|
| 274 |
+
return tf.keras.initializers.GlorotUniform
|
| 275 |
+
elif name == "xavier_normal":
|
| 276 |
+
return tf.keras.initializers.GlorotNormal
|
| 277 |
+
|
| 278 |
+
raise ValueError(
|
| 279 |
+
"Unknown activation ({}) for framework={}!".format(name, framework)
|
| 280 |
+
)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/actors.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict, deque
|
| 2 |
+
import logging
|
| 3 |
+
import platform
|
| 4 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type
|
| 5 |
+
|
| 6 |
+
import ray
|
| 7 |
+
from ray.actor import ActorClass, ActorHandle
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TaskPool:
|
| 13 |
+
"""Helper class for tracking the status of many in-flight actor tasks."""
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self._tasks = {}
|
| 17 |
+
self._objects = {}
|
| 18 |
+
self._fetching = deque()
|
| 19 |
+
|
| 20 |
+
def add(self, worker, all_obj_refs):
|
| 21 |
+
if isinstance(all_obj_refs, list):
|
| 22 |
+
obj_ref = all_obj_refs[0]
|
| 23 |
+
else:
|
| 24 |
+
obj_ref = all_obj_refs
|
| 25 |
+
self._tasks[obj_ref] = worker
|
| 26 |
+
self._objects[obj_ref] = all_obj_refs
|
| 27 |
+
|
| 28 |
+
def completed(self, blocking_wait=False):
|
| 29 |
+
pending = list(self._tasks)
|
| 30 |
+
if pending:
|
| 31 |
+
ready, _ = ray.wait(pending, num_returns=len(pending), timeout=0)
|
| 32 |
+
if not ready and blocking_wait:
|
| 33 |
+
ready, _ = ray.wait(pending, num_returns=1, timeout=10.0)
|
| 34 |
+
for obj_ref in ready:
|
| 35 |
+
yield (self._tasks.pop(obj_ref), self._objects.pop(obj_ref))
|
| 36 |
+
|
| 37 |
+
def completed_prefetch(self, blocking_wait=False, max_yield=999):
|
| 38 |
+
"""Similar to completed but only returns once the object is local.
|
| 39 |
+
|
| 40 |
+
Assumes obj_ref only is one id."""
|
| 41 |
+
|
| 42 |
+
for worker, obj_ref in self.completed(blocking_wait=blocking_wait):
|
| 43 |
+
self._fetching.append((worker, obj_ref))
|
| 44 |
+
|
| 45 |
+
for _ in range(max_yield):
|
| 46 |
+
if not self._fetching:
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
yield self._fetching.popleft()
|
| 50 |
+
|
| 51 |
+
def reset_workers(self, workers):
|
| 52 |
+
"""Notify that some workers may be removed."""
|
| 53 |
+
for obj_ref, ev in self._tasks.copy().items():
|
| 54 |
+
if ev not in workers:
|
| 55 |
+
del self._tasks[obj_ref]
|
| 56 |
+
del self._objects[obj_ref]
|
| 57 |
+
|
| 58 |
+
# We want to keep the same deque reference so that we don't suffer from
|
| 59 |
+
# stale references in generators that are still in flight
|
| 60 |
+
for _ in range(len(self._fetching)):
|
| 61 |
+
ev, obj_ref = self._fetching.popleft()
|
| 62 |
+
if ev in workers:
|
| 63 |
+
# Re-queue items that are still valid
|
| 64 |
+
self._fetching.append((ev, obj_ref))
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def count(self):
|
| 68 |
+
return len(self._tasks)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def create_colocated_actors(
|
| 72 |
+
actor_specs: Sequence[Tuple[Type, Any, Any, int]],
|
| 73 |
+
node: Optional[str] = "localhost",
|
| 74 |
+
max_attempts: int = 10,
|
| 75 |
+
) -> Dict[Type, List[ActorHandle]]:
|
| 76 |
+
"""Create co-located actors of any type(s) on any node.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
actor_specs: Tuple/list with tuples consisting of: 1) The
|
| 80 |
+
(already @ray.remote) class(es) to construct, 2) c'tor args,
|
| 81 |
+
3) c'tor kwargs, and 4) the number of actors of that class with
|
| 82 |
+
given args/kwargs to construct.
|
| 83 |
+
node: The node to co-locate the actors on. By default ("localhost"),
|
| 84 |
+
place the actors on the node the caller of this function is
|
| 85 |
+
located on. Use None for indicating that any (resource fulfilling)
|
| 86 |
+
node in the cluster may be used.
|
| 87 |
+
max_attempts: The maximum number of co-location attempts to
|
| 88 |
+
perform before throwing an error.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
A dict mapping the created types to the list of n ActorHandles
|
| 92 |
+
created (and co-located) for that type.
|
| 93 |
+
"""
|
| 94 |
+
if node == "localhost":
|
| 95 |
+
node = platform.node()
|
| 96 |
+
|
| 97 |
+
# Maps each entry in `actor_specs` to lists of already co-located actors.
|
| 98 |
+
ok = [[] for _ in range(len(actor_specs))]
|
| 99 |
+
|
| 100 |
+
# Try n times to co-locate all given actor types (`actor_specs`).
|
| 101 |
+
# With each (failed) attempt, increase the number of actors we try to
|
| 102 |
+
# create (on the same node), then kill the ones that have been created in
|
| 103 |
+
# excess.
|
| 104 |
+
for attempt in range(max_attempts):
|
| 105 |
+
# If any attempt to co-locate fails, set this to False and we'll do
|
| 106 |
+
# another attempt.
|
| 107 |
+
all_good = True
|
| 108 |
+
# Process all `actor_specs` in sequence.
|
| 109 |
+
for i, (typ, args, kwargs, count) in enumerate(actor_specs):
|
| 110 |
+
args = args or [] # Allow None.
|
| 111 |
+
kwargs = kwargs or {} # Allow None.
|
| 112 |
+
# We don't have enough actors yet of this spec co-located on
|
| 113 |
+
# the desired node.
|
| 114 |
+
if len(ok[i]) < count:
|
| 115 |
+
co_located = try_create_colocated(
|
| 116 |
+
cls=typ,
|
| 117 |
+
args=args,
|
| 118 |
+
kwargs=kwargs,
|
| 119 |
+
count=count * (attempt + 1),
|
| 120 |
+
node=node,
|
| 121 |
+
)
|
| 122 |
+
# If node did not matter (None), from here on, use the host
|
| 123 |
+
# that the first actor(s) are already co-located on.
|
| 124 |
+
if node is None:
|
| 125 |
+
node = ray.get(co_located[0].get_host.remote())
|
| 126 |
+
# Add the newly co-located actors to the `ok` list.
|
| 127 |
+
ok[i].extend(co_located)
|
| 128 |
+
# If we still don't have enough -> We'll have to do another
|
| 129 |
+
# attempt.
|
| 130 |
+
if len(ok[i]) < count:
|
| 131 |
+
all_good = False
|
| 132 |
+
# We created too many actors for this spec -> Kill/truncate
|
| 133 |
+
# the excess ones.
|
| 134 |
+
if len(ok[i]) > count:
|
| 135 |
+
for a in ok[i][count:]:
|
| 136 |
+
a.__ray_terminate__.remote()
|
| 137 |
+
ok[i] = ok[i][:count]
|
| 138 |
+
|
| 139 |
+
# All `actor_specs` have been fulfilled, return lists of
|
| 140 |
+
# co-located actors.
|
| 141 |
+
if all_good:
|
| 142 |
+
return ok
|
| 143 |
+
|
| 144 |
+
raise Exception("Unable to create enough colocated actors -> aborting.")
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def try_create_colocated(
|
| 148 |
+
cls: Type[ActorClass],
|
| 149 |
+
args: List[Any],
|
| 150 |
+
count: int,
|
| 151 |
+
kwargs: Optional[List[Any]] = None,
|
| 152 |
+
node: Optional[str] = "localhost",
|
| 153 |
+
) -> List[ActorHandle]:
|
| 154 |
+
"""Tries to co-locate (same node) a set of Actors of the same type.
|
| 155 |
+
|
| 156 |
+
Returns a list of successfully co-located actors. All actors that could
|
| 157 |
+
not be co-located (with the others on the given node) will not be in this
|
| 158 |
+
list.
|
| 159 |
+
|
| 160 |
+
Creates each actor via it's remote() constructor and then checks, whether
|
| 161 |
+
it has been co-located (on the same node) with the other (already created)
|
| 162 |
+
ones. If not, terminates the just created actor.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
cls: The Actor class to use (already @ray.remote "converted").
|
| 166 |
+
args: List of args to pass to the Actor's constructor. One item
|
| 167 |
+
per to-be-created actor (`count`).
|
| 168 |
+
count: Number of actors of the given `cls` to construct.
|
| 169 |
+
kwargs: Optional list of kwargs to pass to the Actor's constructor.
|
| 170 |
+
One item per to-be-created actor (`count`).
|
| 171 |
+
node: The node to co-locate the actors on. By default ("localhost"),
|
| 172 |
+
place the actors on the node the caller of this function is
|
| 173 |
+
located on. If None, will try to co-locate all actors on
|
| 174 |
+
any available node.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
List containing all successfully co-located actor handles.
|
| 178 |
+
"""
|
| 179 |
+
if node == "localhost":
|
| 180 |
+
node = platform.node()
|
| 181 |
+
|
| 182 |
+
kwargs = kwargs or {}
|
| 183 |
+
actors = [cls.remote(*args, **kwargs) for _ in range(count)]
|
| 184 |
+
co_located, non_co_located = split_colocated(actors, node=node)
|
| 185 |
+
logger.info("Got {} colocated actors of {}".format(len(co_located), count))
|
| 186 |
+
for a in non_co_located:
|
| 187 |
+
a.__ray_terminate__.remote()
|
| 188 |
+
return co_located
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def split_colocated(
|
| 192 |
+
actors: List[ActorHandle],
|
| 193 |
+
node: Optional[str] = "localhost",
|
| 194 |
+
) -> Tuple[List[ActorHandle], List[ActorHandle]]:
|
| 195 |
+
"""Splits up given actors into colocated (on same node) and non colocated.
|
| 196 |
+
|
| 197 |
+
The co-location criterion depends on the `node` given:
|
| 198 |
+
If given (or default: platform.node()): Consider all actors that are on
|
| 199 |
+
that node "colocated".
|
| 200 |
+
If None: Consider the largest sub-set of actors that are all located on
|
| 201 |
+
the same node (whatever that node is) as "colocated".
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
actors: The list of actor handles to split into "colocated" and
|
| 205 |
+
"non colocated".
|
| 206 |
+
node: The node defining "colocation" criterion. If provided, consider
|
| 207 |
+
thos actors "colocated" that sit on this node. If None, use the
|
| 208 |
+
largest subset within `actors` that are sitting on the same
|
| 209 |
+
(any) node.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
Tuple of two lists: 1) Co-located ActorHandles, 2) non co-located
|
| 213 |
+
ActorHandles.
|
| 214 |
+
"""
|
| 215 |
+
if node == "localhost":
|
| 216 |
+
node = platform.node()
|
| 217 |
+
|
| 218 |
+
# Get nodes of all created actors.
|
| 219 |
+
hosts = ray.get([a.get_host.remote() for a in actors])
|
| 220 |
+
|
| 221 |
+
# If `node` not provided, use the largest group of actors that sit on the
|
| 222 |
+
# same node, regardless of what that node is.
|
| 223 |
+
if node is None:
|
| 224 |
+
node_groups = defaultdict(set)
|
| 225 |
+
for host, actor in zip(hosts, actors):
|
| 226 |
+
node_groups[host].add(actor)
|
| 227 |
+
max_ = -1
|
| 228 |
+
largest_group = None
|
| 229 |
+
for host in node_groups:
|
| 230 |
+
if max_ < len(node_groups[host]):
|
| 231 |
+
max_ = len(node_groups[host])
|
| 232 |
+
largest_group = host
|
| 233 |
+
non_co_located = []
|
| 234 |
+
for host in node_groups:
|
| 235 |
+
if host != largest_group:
|
| 236 |
+
non_co_located.extend(list(node_groups[host]))
|
| 237 |
+
return list(node_groups[largest_group]), non_co_located
|
| 238 |
+
# Node provided (or default: localhost): Consider those actors "colocated"
|
| 239 |
+
# that were placed on `node`.
|
| 240 |
+
else:
|
| 241 |
+
# Split into co-located (on `node) and non-co-located (not on `node`).
|
| 242 |
+
co_located = []
|
| 243 |
+
non_co_located = []
|
| 244 |
+
for host, a in zip(hosts, actors):
|
| 245 |
+
# This actor has been placed on the correct node.
|
| 246 |
+
if host == node:
|
| 247 |
+
co_located.append(a)
|
| 248 |
+
# This actor has been placed on a different node.
|
| 249 |
+
else:
|
| 250 |
+
non_co_located.append(a)
|
| 251 |
+
return co_located, non_co_located
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def drop_colocated(actors: List[ActorHandle]) -> List[ActorHandle]:
|
| 255 |
+
colocated, non_colocated = split_colocated(actors)
|
| 256 |
+
for a in colocated:
|
| 257 |
+
a.__ray_terminate__.remote()
|
| 258 |
+
return non_colocated
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/annotations.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 2 |
+
from ray.util.annotations import _mark_annotated
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def override(parent_cls):
|
| 6 |
+
"""Decorator for documenting method overrides.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
parent_cls: The superclass that provides the overridden method. If
|
| 10 |
+
`parent_class` does not actually have the method or the class, in which
|
| 11 |
+
method is defined is not a subclass of `parent_class`, an error is raised.
|
| 12 |
+
|
| 13 |
+
.. testcode::
|
| 14 |
+
:skipif: True
|
| 15 |
+
|
| 16 |
+
from ray.rllib.policy import Policy
|
| 17 |
+
class TorchPolicy(Policy):
|
| 18 |
+
...
|
| 19 |
+
# Indicates that `TorchPolicy.loss()` overrides the parent
|
| 20 |
+
# Policy class' own `loss method. Leads to an error if Policy
|
| 21 |
+
# does not have a `loss` method.
|
| 22 |
+
|
| 23 |
+
@override(Policy)
|
| 24 |
+
def loss(self, model, action_dist, train_batch):
|
| 25 |
+
...
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
class OverrideCheck:
|
| 30 |
+
def __init__(self, func, expected_parent_cls):
|
| 31 |
+
self.func = func
|
| 32 |
+
self.expected_parent_cls = expected_parent_cls
|
| 33 |
+
|
| 34 |
+
def __set_name__(self, owner, name):
|
| 35 |
+
# Check if the owner (the class) is a subclass of the expected base class
|
| 36 |
+
if not issubclass(owner, self.expected_parent_cls):
|
| 37 |
+
raise TypeError(
|
| 38 |
+
f"When using the @override decorator, {owner.__name__} must be a "
|
| 39 |
+
f"subclass of {parent_cls.__name__}!"
|
| 40 |
+
)
|
| 41 |
+
# Set the function as a regular method on the class.
|
| 42 |
+
setattr(owner, name, self.func)
|
| 43 |
+
|
| 44 |
+
def decorator(method):
|
| 45 |
+
# Check, whether `method` is actually defined by the parent class.
|
| 46 |
+
if method.__name__ not in dir(parent_cls):
|
| 47 |
+
raise NameError(
|
| 48 |
+
f"When using the @override decorator, {method.__name__} must override "
|
| 49 |
+
f"the respective method (with the same name) of {parent_cls.__name__}!"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# Check if the class is a subclass of the expected base class
|
| 53 |
+
OverrideCheck(method, parent_cls)
|
| 54 |
+
return method
|
| 55 |
+
|
| 56 |
+
return decorator
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def PublicAPI(obj):
|
| 60 |
+
"""Decorator for documenting public APIs.
|
| 61 |
+
|
| 62 |
+
Public APIs are classes and methods exposed to end users of RLlib. You
|
| 63 |
+
can expect these APIs to remain stable across RLlib releases.
|
| 64 |
+
|
| 65 |
+
Subclasses that inherit from a ``@PublicAPI`` base class can be
|
| 66 |
+
assumed part of the RLlib public API as well (e.g., all Algorithm classes
|
| 67 |
+
are in public API because Algorithm is ``@PublicAPI``).
|
| 68 |
+
|
| 69 |
+
In addition, you can assume all algo configurations are part of their
|
| 70 |
+
public API as well.
|
| 71 |
+
|
| 72 |
+
.. testcode::
|
| 73 |
+
:skipif: True
|
| 74 |
+
|
| 75 |
+
# Indicates that the `Algorithm` class is exposed to end users
|
| 76 |
+
# of RLlib and will remain stable across RLlib releases.
|
| 77 |
+
from ray import tune
|
| 78 |
+
@PublicAPI
|
| 79 |
+
class Algorithm(tune.Trainable):
|
| 80 |
+
...
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
_mark_annotated(obj)
|
| 84 |
+
return obj
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def DeveloperAPI(obj):
|
| 88 |
+
"""Decorator for documenting developer APIs.
|
| 89 |
+
|
| 90 |
+
Developer APIs are classes and methods explicitly exposed to developers
|
| 91 |
+
for the purposes of building custom algorithms or advanced training
|
| 92 |
+
strategies on top of RLlib internals. You can generally expect these APIs
|
| 93 |
+
to be stable sans minor changes (but less stable than public APIs).
|
| 94 |
+
|
| 95 |
+
Subclasses that inherit from a ``@DeveloperAPI`` base class can be
|
| 96 |
+
assumed part of the RLlib developer API as well.
|
| 97 |
+
|
| 98 |
+
.. testcode::
|
| 99 |
+
:skipif: True
|
| 100 |
+
|
| 101 |
+
# Indicates that the `TorchPolicy` class is exposed to end users
|
| 102 |
+
# of RLlib and will remain (relatively) stable across RLlib
|
| 103 |
+
# releases.
|
| 104 |
+
from ray.rllib.policy import Policy
|
| 105 |
+
@DeveloperAPI
|
| 106 |
+
class TorchPolicy(Policy):
|
| 107 |
+
...
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
_mark_annotated(obj)
|
| 111 |
+
return obj
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def ExperimentalAPI(obj):
|
| 115 |
+
"""Decorator for documenting experimental APIs.
|
| 116 |
+
|
| 117 |
+
Experimental APIs are classes and methods that are in development and may
|
| 118 |
+
change at any time in their development process. You should not expect
|
| 119 |
+
these APIs to be stable until their tag is changed to `DeveloperAPI` or
|
| 120 |
+
`PublicAPI`.
|
| 121 |
+
|
| 122 |
+
Subclasses that inherit from a ``@ExperimentalAPI`` base class can be
|
| 123 |
+
assumed experimental as well.
|
| 124 |
+
|
| 125 |
+
.. testcode::
|
| 126 |
+
:skipif: True
|
| 127 |
+
|
| 128 |
+
from ray.rllib.policy import Policy
|
| 129 |
+
class TorchPolicy(Policy):
|
| 130 |
+
...
|
| 131 |
+
# Indicates that the `TorchPolicy.loss` method is a new and
|
| 132 |
+
# experimental API and may change frequently in future
|
| 133 |
+
# releases.
|
| 134 |
+
@ExperimentalAPI
|
| 135 |
+
def loss(self, model, action_dist, train_batch):
|
| 136 |
+
...
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
_mark_annotated(obj)
|
| 140 |
+
return obj
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def OldAPIStack(obj):
|
| 144 |
+
"""Decorator for classes/methods/functions belonging to the old API stack.
|
| 145 |
+
|
| 146 |
+
These should be deprecated at some point after Ray 3.0 (RLlib GA).
|
| 147 |
+
It is recommended for users to start exploring (and coding against) the new API
|
| 148 |
+
stack instead.
|
| 149 |
+
"""
|
| 150 |
+
# No effect yet.
|
| 151 |
+
|
| 152 |
+
_mark_annotated(obj)
|
| 153 |
+
return obj
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def OverrideToImplementCustomLogic(obj):
|
| 157 |
+
"""Users should override this in their sub-classes to implement custom logic.
|
| 158 |
+
|
| 159 |
+
Used in Algorithm and Policy to tag methods that need overriding, e.g.
|
| 160 |
+
`Policy.loss()`.
|
| 161 |
+
|
| 162 |
+
.. testcode::
|
| 163 |
+
:skipif: True
|
| 164 |
+
|
| 165 |
+
from ray.rllib.policy.torch_policy import TorchPolicy
|
| 166 |
+
@overrides(TorchPolicy)
|
| 167 |
+
@OverrideToImplementCustomLogic
|
| 168 |
+
def loss(self, ...):
|
| 169 |
+
# implement custom loss function here ...
|
| 170 |
+
# ... w/o calling the corresponding `super().loss()` method.
|
| 171 |
+
...
|
| 172 |
+
|
| 173 |
+
"""
|
| 174 |
+
obj.__is_overridden__ = False
|
| 175 |
+
return obj
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def OverrideToImplementCustomLogic_CallToSuperRecommended(obj):
|
| 179 |
+
"""Users should override this in their sub-classes to implement custom logic.
|
| 180 |
+
|
| 181 |
+
Thereby, it is recommended (but not required) to call the super-class'
|
| 182 |
+
corresponding method.
|
| 183 |
+
|
| 184 |
+
Used in Algorithm and Policy to tag methods that need overriding, but the
|
| 185 |
+
super class' method should still be called, e.g.
|
| 186 |
+
`Algorithm.setup()`.
|
| 187 |
+
|
| 188 |
+
.. testcode::
|
| 189 |
+
:skipif: True
|
| 190 |
+
|
| 191 |
+
from ray import tune
|
| 192 |
+
@overrides(tune.Trainable)
|
| 193 |
+
@OverrideToImplementCustomLogic_CallToSuperRecommended
|
| 194 |
+
def setup(self, config):
|
| 195 |
+
# implement custom setup logic here ...
|
| 196 |
+
super().setup(config)
|
| 197 |
+
# ... or here (after having called super()'s setup method.
|
| 198 |
+
"""
|
| 199 |
+
obj.__is_overridden__ = False
|
| 200 |
+
return obj
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def is_overridden(obj):
|
| 204 |
+
"""Check whether a function has been overridden.
|
| 205 |
+
|
| 206 |
+
Note, this only works for API calls decorated with OverrideToImplementCustomLogic
|
| 207 |
+
or OverrideToImplementCustomLogic_CallToSuperRecommended.
|
| 208 |
+
"""
|
| 209 |
+
return getattr(obj, "__is_overridden__", True)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# Backward compatibility.
|
| 213 |
+
Deprecated = Deprecated
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/exploration.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.spaces import Space
|
| 2 |
+
from typing import Dict, List, Optional, Union, TYPE_CHECKING
|
| 3 |
+
|
| 4 |
+
from ray.rllib.env.base_env import BaseEnv
|
| 5 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 6 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 7 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 8 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 9 |
+
from ray.rllib.utils.framework import try_import_torch, TensorType
|
| 10 |
+
from ray.rllib.utils.typing import LocalOptimizer, AlgorithmConfigDict
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from ray.rllib.policy.policy import Policy
|
| 14 |
+
from ray.rllib.utils import try_import_tf
|
| 15 |
+
|
| 16 |
+
_, tf, _ = try_import_tf()
|
| 17 |
+
|
| 18 |
+
_, nn = try_import_torch()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@OldAPIStack
|
| 22 |
+
class Exploration:
|
| 23 |
+
"""Implements an exploration strategy for Policies.
|
| 24 |
+
|
| 25 |
+
An Exploration takes model outputs, a distribution, and a timestep from
|
| 26 |
+
the agent and computes an action to apply to the environment using an
|
| 27 |
+
implemented exploration schema.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
action_space: Space,
|
| 33 |
+
*,
|
| 34 |
+
framework: str,
|
| 35 |
+
policy_config: AlgorithmConfigDict,
|
| 36 |
+
model: ModelV2,
|
| 37 |
+
num_workers: int,
|
| 38 |
+
worker_index: int
|
| 39 |
+
):
|
| 40 |
+
"""
|
| 41 |
+
Args:
|
| 42 |
+
action_space: The action space in which to explore.
|
| 43 |
+
framework: One of "tf" or "torch".
|
| 44 |
+
policy_config: The Policy's config dict.
|
| 45 |
+
model: The Policy's model.
|
| 46 |
+
num_workers: The overall number of workers used.
|
| 47 |
+
worker_index: The index of the worker using this class.
|
| 48 |
+
"""
|
| 49 |
+
self.action_space = action_space
|
| 50 |
+
self.policy_config = policy_config
|
| 51 |
+
self.model = model
|
| 52 |
+
self.num_workers = num_workers
|
| 53 |
+
self.worker_index = worker_index
|
| 54 |
+
self.framework = framework
|
| 55 |
+
# The device on which the Model has been placed.
|
| 56 |
+
# This Exploration will be on the same device.
|
| 57 |
+
self.device = None
|
| 58 |
+
if isinstance(self.model, nn.Module):
|
| 59 |
+
params = list(self.model.parameters())
|
| 60 |
+
if params:
|
| 61 |
+
self.device = params[0].device
|
| 62 |
+
|
| 63 |
+
def before_compute_actions(
|
| 64 |
+
self,
|
| 65 |
+
*,
|
| 66 |
+
timestep: Optional[Union[TensorType, int]] = None,
|
| 67 |
+
explore: Optional[Union[TensorType, bool]] = None,
|
| 68 |
+
tf_sess: Optional["tf.Session"] = None,
|
| 69 |
+
**kwargs
|
| 70 |
+
):
|
| 71 |
+
"""Hook for preparations before policy.compute_actions() is called.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
timestep: An optional timestep tensor.
|
| 75 |
+
explore: An optional explore boolean flag.
|
| 76 |
+
tf_sess: The tf-session object to use.
|
| 77 |
+
**kwargs: Forward compatibility kwargs.
|
| 78 |
+
"""
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
# fmt: off
|
| 82 |
+
# __sphinx_doc_begin_get_exploration_action__
|
| 83 |
+
|
| 84 |
+
def get_exploration_action(self,
|
| 85 |
+
*,
|
| 86 |
+
action_distribution: ActionDistribution,
|
| 87 |
+
timestep: Union[TensorType, int],
|
| 88 |
+
explore: bool = True):
|
| 89 |
+
"""Returns a (possibly) exploratory action and its log-likelihood.
|
| 90 |
+
|
| 91 |
+
Given the Model's logits outputs and action distribution, returns an
|
| 92 |
+
exploratory action.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
action_distribution: The instantiated
|
| 96 |
+
ActionDistribution object to work with when creating
|
| 97 |
+
exploration actions.
|
| 98 |
+
timestep: The current sampling time step. It can be a tensor
|
| 99 |
+
for TF graph mode, otherwise an integer.
|
| 100 |
+
explore: True: "Normal" exploration behavior.
|
| 101 |
+
False: Suppress all exploratory behavior and return
|
| 102 |
+
a deterministic action.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
A tuple consisting of 1) the chosen exploration action or a
|
| 106 |
+
tf-op to fetch the exploration action from the graph and
|
| 107 |
+
2) the log-likelihood of the exploration action.
|
| 108 |
+
"""
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
# __sphinx_doc_end_get_exploration_action__
|
| 112 |
+
# fmt: on
|
| 113 |
+
|
| 114 |
+
def on_episode_start(
|
| 115 |
+
self,
|
| 116 |
+
policy: "Policy",
|
| 117 |
+
*,
|
| 118 |
+
environment: BaseEnv = None,
|
| 119 |
+
episode: int = None,
|
| 120 |
+
tf_sess: Optional["tf.Session"] = None
|
| 121 |
+
):
|
| 122 |
+
"""Handles necessary exploration logic at the beginning of an episode.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
policy: The Policy object that holds this Exploration.
|
| 126 |
+
environment: The environment object we are acting in.
|
| 127 |
+
episode: The number of the episode that is starting.
|
| 128 |
+
tf_sess: In case of tf, the session object.
|
| 129 |
+
"""
|
| 130 |
+
pass
|
| 131 |
+
|
| 132 |
+
def on_episode_end(
|
| 133 |
+
self,
|
| 134 |
+
policy: "Policy",
|
| 135 |
+
*,
|
| 136 |
+
environment: BaseEnv = None,
|
| 137 |
+
episode: int = None,
|
| 138 |
+
tf_sess: Optional["tf.Session"] = None
|
| 139 |
+
):
|
| 140 |
+
"""Handles necessary exploration logic at the end of an episode.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
policy: The Policy object that holds this Exploration.
|
| 144 |
+
environment: The environment object we are acting in.
|
| 145 |
+
episode: The number of the episode that is starting.
|
| 146 |
+
tf_sess: In case of tf, the session object.
|
| 147 |
+
"""
|
| 148 |
+
pass
|
| 149 |
+
|
| 150 |
+
def postprocess_trajectory(
|
| 151 |
+
self,
|
| 152 |
+
policy: "Policy",
|
| 153 |
+
sample_batch: SampleBatch,
|
| 154 |
+
tf_sess: Optional["tf.Session"] = None,
|
| 155 |
+
):
|
| 156 |
+
"""Handles post-processing of done episode trajectories.
|
| 157 |
+
|
| 158 |
+
Changes the given batch in place. This callback is invoked by the
|
| 159 |
+
sampler after policy.postprocess_trajectory() is called.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
policy: The owning policy object.
|
| 163 |
+
sample_batch: The SampleBatch object to post-process.
|
| 164 |
+
tf_sess: An optional tf.Session object.
|
| 165 |
+
"""
|
| 166 |
+
return sample_batch
|
| 167 |
+
|
| 168 |
+
def get_exploration_optimizer(
|
| 169 |
+
self, optimizers: List[LocalOptimizer]
|
| 170 |
+
) -> List[LocalOptimizer]:
|
| 171 |
+
"""May add optimizer(s) to the Policy's own `optimizers`.
|
| 172 |
+
|
| 173 |
+
The number of optimizers (Policy's plus Exploration's optimizers) must
|
| 174 |
+
match the number of loss terms produced by the Policy's loss function
|
| 175 |
+
and the Exploration component's loss terms.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
optimizers: The list of the Policy's local optimizers.
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
The updated list of local optimizers to use on the different
|
| 182 |
+
loss terms.
|
| 183 |
+
"""
|
| 184 |
+
return optimizers
|
| 185 |
+
|
| 186 |
+
def get_state(self, sess: Optional["tf.Session"] = None) -> Dict[str, TensorType]:
|
| 187 |
+
"""Returns the current exploration state.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
sess: An optional tf Session object to use.
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
The Exploration object's current state.
|
| 194 |
+
"""
|
| 195 |
+
return {}
|
| 196 |
+
|
| 197 |
+
def set_state(self, state: object, sess: Optional["tf.Session"] = None) -> None:
|
| 198 |
+
"""Sets the Exploration object's state to the given values.
|
| 199 |
+
|
| 200 |
+
Note that some exploration components are stateless, even though they
|
| 201 |
+
decay some values over time (e.g. EpsilonGreedy). However the decay is
|
| 202 |
+
only dependent on the current global timestep of the policy and we
|
| 203 |
+
therefore don't need to keep track of it.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
state: The state to set this Exploration to.
|
| 207 |
+
sess: An optional tf Session object to use.
|
| 208 |
+
"""
|
| 209 |
+
pass
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/gaussian_noise.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.spaces import Space
|
| 2 |
+
import numpy as np
|
| 3 |
+
from typing import Union, Optional
|
| 4 |
+
|
| 5 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 6 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 7 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 8 |
+
from ray.rllib.utils.exploration.exploration import Exploration
|
| 9 |
+
from ray.rllib.utils.exploration.random import Random
|
| 10 |
+
from ray.rllib.utils.framework import (
|
| 11 |
+
try_import_tf,
|
| 12 |
+
try_import_torch,
|
| 13 |
+
get_variable,
|
| 14 |
+
TensorType,
|
| 15 |
+
)
|
| 16 |
+
from ray.rllib.utils.numpy import convert_to_numpy
|
| 17 |
+
from ray.rllib.utils.schedules import Schedule
|
| 18 |
+
from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule
|
| 19 |
+
from ray.rllib.utils.tf_utils import zero_logps_from_actions
|
| 20 |
+
|
| 21 |
+
tf1, tf, tfv = try_import_tf()
|
| 22 |
+
torch, _ = try_import_torch()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@OldAPIStack
|
| 26 |
+
class GaussianNoise(Exploration):
|
| 27 |
+
"""An exploration that adds white noise to continuous actions.
|
| 28 |
+
|
| 29 |
+
If explore=True, returns actions plus scale (annealed over time) x
|
| 30 |
+
Gaussian noise. Also, some completely random period is possible at the
|
| 31 |
+
beginning.
|
| 32 |
+
|
| 33 |
+
If explore=False, returns the deterministic action.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
action_space: Space,
|
| 39 |
+
*,
|
| 40 |
+
framework: str,
|
| 41 |
+
model: ModelV2,
|
| 42 |
+
random_timesteps: int = 1000,
|
| 43 |
+
stddev: float = 0.1,
|
| 44 |
+
initial_scale: float = 1.0,
|
| 45 |
+
final_scale: float = 0.02,
|
| 46 |
+
scale_timesteps: int = 10000,
|
| 47 |
+
scale_schedule: Optional[Schedule] = None,
|
| 48 |
+
**kwargs
|
| 49 |
+
):
|
| 50 |
+
"""Initializes a GaussianNoise instance.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
random_timesteps: The number of timesteps for which to act
|
| 54 |
+
completely randomly. Only after this number of timesteps, the
|
| 55 |
+
`self.scale` annealing process will start (see below).
|
| 56 |
+
stddev: The stddev (sigma) to use for the
|
| 57 |
+
Gaussian noise to be added to the actions.
|
| 58 |
+
initial_scale: The initial scaling weight to multiply
|
| 59 |
+
the noise with.
|
| 60 |
+
final_scale: The final scaling weight to multiply
|
| 61 |
+
the noise with.
|
| 62 |
+
scale_timesteps: The timesteps over which to linearly anneal
|
| 63 |
+
the scaling factor (after(!) having used random actions for
|
| 64 |
+
`random_timesteps` steps).
|
| 65 |
+
scale_schedule: An optional Schedule object
|
| 66 |
+
to use (instead of constructing one from the given parameters).
|
| 67 |
+
"""
|
| 68 |
+
assert framework is not None
|
| 69 |
+
super().__init__(action_space, model=model, framework=framework, **kwargs)
|
| 70 |
+
|
| 71 |
+
# Create the Random exploration module (used for the first n
|
| 72 |
+
# timesteps).
|
| 73 |
+
self.random_timesteps = random_timesteps
|
| 74 |
+
self.random_exploration = Random(
|
| 75 |
+
action_space, model=self.model, framework=self.framework, **kwargs
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
self.stddev = stddev
|
| 79 |
+
# The `scale` annealing schedule.
|
| 80 |
+
self.scale_schedule = scale_schedule or PiecewiseSchedule(
|
| 81 |
+
endpoints=[
|
| 82 |
+
(random_timesteps, initial_scale),
|
| 83 |
+
(random_timesteps + scale_timesteps, final_scale),
|
| 84 |
+
],
|
| 85 |
+
outside_value=final_scale,
|
| 86 |
+
framework=self.framework,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# The current timestep value (tf-var or python int).
|
| 90 |
+
self.last_timestep = get_variable(
|
| 91 |
+
np.array(0, np.int64),
|
| 92 |
+
framework=self.framework,
|
| 93 |
+
tf_name="timestep",
|
| 94 |
+
dtype=np.int64,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Build the tf-info-op.
|
| 98 |
+
if self.framework == "tf":
|
| 99 |
+
self._tf_state_op = self.get_state()
|
| 100 |
+
|
| 101 |
+
@override(Exploration)
|
| 102 |
+
def get_exploration_action(
|
| 103 |
+
self,
|
| 104 |
+
*,
|
| 105 |
+
action_distribution: ActionDistribution,
|
| 106 |
+
timestep: Union[int, TensorType],
|
| 107 |
+
explore: bool = True
|
| 108 |
+
):
|
| 109 |
+
# Adds IID Gaussian noise for exploration, TD3-style.
|
| 110 |
+
if self.framework == "torch":
|
| 111 |
+
return self._get_torch_exploration_action(
|
| 112 |
+
action_distribution, explore, timestep
|
| 113 |
+
)
|
| 114 |
+
else:
|
| 115 |
+
return self._get_tf_exploration_action_op(
|
| 116 |
+
action_distribution, explore, timestep
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def _get_tf_exploration_action_op(
|
| 120 |
+
self,
|
| 121 |
+
action_dist: ActionDistribution,
|
| 122 |
+
explore: bool,
|
| 123 |
+
timestep: Union[int, TensorType],
|
| 124 |
+
):
|
| 125 |
+
ts = timestep if timestep is not None else self.last_timestep
|
| 126 |
+
|
| 127 |
+
# The deterministic actions (if explore=False).
|
| 128 |
+
deterministic_actions = action_dist.deterministic_sample()
|
| 129 |
+
|
| 130 |
+
# Take a Gaussian sample with our stddev (mean=0.0) and scale it.
|
| 131 |
+
gaussian_sample = self.scale_schedule(ts) * tf.random.normal(
|
| 132 |
+
tf.shape(deterministic_actions), stddev=self.stddev
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Stochastic actions could either be: random OR action + noise.
|
| 136 |
+
random_actions, _ = self.random_exploration.get_tf_exploration_action_op(
|
| 137 |
+
action_dist, explore
|
| 138 |
+
)
|
| 139 |
+
stochastic_actions = tf.cond(
|
| 140 |
+
pred=tf.convert_to_tensor(ts < self.random_timesteps),
|
| 141 |
+
true_fn=lambda: random_actions,
|
| 142 |
+
false_fn=lambda: tf.clip_by_value(
|
| 143 |
+
deterministic_actions + gaussian_sample,
|
| 144 |
+
self.action_space.low * tf.ones_like(deterministic_actions),
|
| 145 |
+
self.action_space.high * tf.ones_like(deterministic_actions),
|
| 146 |
+
),
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# Chose by `explore` (main exploration switch).
|
| 150 |
+
action = tf.cond(
|
| 151 |
+
pred=tf.constant(explore, dtype=tf.bool)
|
| 152 |
+
if isinstance(explore, bool)
|
| 153 |
+
else explore,
|
| 154 |
+
true_fn=lambda: stochastic_actions,
|
| 155 |
+
false_fn=lambda: deterministic_actions,
|
| 156 |
+
)
|
| 157 |
+
# Logp=always zero.
|
| 158 |
+
logp = zero_logps_from_actions(deterministic_actions)
|
| 159 |
+
|
| 160 |
+
# Increment `last_timestep` by 1 (or set to `timestep`).
|
| 161 |
+
if self.framework == "tf2":
|
| 162 |
+
if timestep is None:
|
| 163 |
+
self.last_timestep.assign_add(1)
|
| 164 |
+
else:
|
| 165 |
+
self.last_timestep.assign(tf.cast(timestep, tf.int64))
|
| 166 |
+
return action, logp
|
| 167 |
+
else:
|
| 168 |
+
assign_op = (
|
| 169 |
+
tf1.assign_add(self.last_timestep, 1)
|
| 170 |
+
if timestep is None
|
| 171 |
+
else tf1.assign(self.last_timestep, timestep)
|
| 172 |
+
)
|
| 173 |
+
with tf1.control_dependencies([assign_op]):
|
| 174 |
+
return action, logp
|
| 175 |
+
|
| 176 |
+
def _get_torch_exploration_action(
|
| 177 |
+
self,
|
| 178 |
+
action_dist: ActionDistribution,
|
| 179 |
+
explore: bool,
|
| 180 |
+
timestep: Union[int, TensorType],
|
| 181 |
+
):
|
| 182 |
+
# Set last timestep or (if not given) increase by one.
|
| 183 |
+
self.last_timestep = (
|
| 184 |
+
timestep if timestep is not None else self.last_timestep + 1
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# Apply exploration.
|
| 188 |
+
if explore:
|
| 189 |
+
# Random exploration phase.
|
| 190 |
+
if self.last_timestep < self.random_timesteps:
|
| 191 |
+
action, _ = self.random_exploration.get_torch_exploration_action(
|
| 192 |
+
action_dist, explore=True
|
| 193 |
+
)
|
| 194 |
+
# Take a Gaussian sample with our stddev (mean=0.0) and scale it.
|
| 195 |
+
else:
|
| 196 |
+
det_actions = action_dist.deterministic_sample()
|
| 197 |
+
scale = self.scale_schedule(self.last_timestep)
|
| 198 |
+
gaussian_sample = scale * torch.normal(
|
| 199 |
+
mean=torch.zeros(det_actions.size()), std=self.stddev
|
| 200 |
+
).to(self.device)
|
| 201 |
+
action = torch.min(
|
| 202 |
+
torch.max(
|
| 203 |
+
det_actions + gaussian_sample,
|
| 204 |
+
torch.tensor(
|
| 205 |
+
self.action_space.low,
|
| 206 |
+
dtype=torch.float32,
|
| 207 |
+
device=self.device,
|
| 208 |
+
),
|
| 209 |
+
),
|
| 210 |
+
torch.tensor(
|
| 211 |
+
self.action_space.high, dtype=torch.float32, device=self.device
|
| 212 |
+
),
|
| 213 |
+
)
|
| 214 |
+
# No exploration -> Return deterministic actions.
|
| 215 |
+
else:
|
| 216 |
+
action = action_dist.deterministic_sample()
|
| 217 |
+
|
| 218 |
+
# Logp=always zero.
|
| 219 |
+
logp = torch.zeros((action.size()[0],), dtype=torch.float32, device=self.device)
|
| 220 |
+
|
| 221 |
+
return action, logp
|
| 222 |
+
|
| 223 |
+
@override(Exploration)
|
| 224 |
+
def get_state(self, sess: Optional["tf.Session"] = None):
|
| 225 |
+
"""Returns the current scale value.
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
Union[float,tf.Tensor[float]]: The current scale value.
|
| 229 |
+
"""
|
| 230 |
+
if sess:
|
| 231 |
+
return sess.run(self._tf_state_op)
|
| 232 |
+
scale = self.scale_schedule(self.last_timestep)
|
| 233 |
+
return {
|
| 234 |
+
"cur_scale": convert_to_numpy(scale) if self.framework != "tf" else scale,
|
| 235 |
+
"last_timestep": convert_to_numpy(self.last_timestep)
|
| 236 |
+
if self.framework != "tf"
|
| 237 |
+
else self.last_timestep,
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
@override(Exploration)
|
| 241 |
+
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
|
| 242 |
+
if self.framework == "tf":
|
| 243 |
+
self.last_timestep.load(state["last_timestep"], session=sess)
|
| 244 |
+
elif isinstance(self.last_timestep, int):
|
| 245 |
+
self.last_timestep = state["last_timestep"]
|
| 246 |
+
else:
|
| 247 |
+
self.last_timestep.assign(state["last_timestep"])
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/ornstein_uhlenbeck_noise.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import Optional, Union
|
| 3 |
+
|
| 4 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 5 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 6 |
+
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
|
| 7 |
+
from ray.rllib.utils.framework import (
|
| 8 |
+
try_import_tf,
|
| 9 |
+
try_import_torch,
|
| 10 |
+
get_variable,
|
| 11 |
+
TensorType,
|
| 12 |
+
)
|
| 13 |
+
from ray.rllib.utils.numpy import convert_to_numpy
|
| 14 |
+
from ray.rllib.utils.schedules import Schedule
|
| 15 |
+
from ray.rllib.utils.tf_utils import zero_logps_from_actions
|
| 16 |
+
|
| 17 |
+
tf1, tf, tfv = try_import_tf()
|
| 18 |
+
torch, _ = try_import_torch()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@OldAPIStack
|
| 22 |
+
class OrnsteinUhlenbeckNoise(GaussianNoise):
|
| 23 |
+
"""An exploration that adds Ornstein-Uhlenbeck noise to continuous actions.
|
| 24 |
+
|
| 25 |
+
If explore=True, returns sampled actions plus a noise term X,
|
| 26 |
+
which changes according to this formula:
|
| 27 |
+
Xt+1 = -theta*Xt + sigma*N[0,stddev], where theta, sigma and stddev are
|
| 28 |
+
constants. Also, some completely random period is possible at the
|
| 29 |
+
beginning.
|
| 30 |
+
If explore=False, returns the deterministic action.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
action_space,
|
| 36 |
+
*,
|
| 37 |
+
framework: str,
|
| 38 |
+
ou_theta: float = 0.15,
|
| 39 |
+
ou_sigma: float = 0.2,
|
| 40 |
+
ou_base_scale: float = 0.1,
|
| 41 |
+
random_timesteps: int = 1000,
|
| 42 |
+
initial_scale: float = 1.0,
|
| 43 |
+
final_scale: float = 0.02,
|
| 44 |
+
scale_timesteps: int = 10000,
|
| 45 |
+
scale_schedule: Optional[Schedule] = None,
|
| 46 |
+
**kwargs
|
| 47 |
+
):
|
| 48 |
+
"""Initializes an Ornstein-Uhlenbeck Exploration object.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
action_space: The gym action space used by the environment.
|
| 52 |
+
ou_theta: The theta parameter of the Ornstein-Uhlenbeck process.
|
| 53 |
+
ou_sigma: The sigma parameter of the Ornstein-Uhlenbeck process.
|
| 54 |
+
ou_base_scale: A fixed scaling factor, by which all OU-
|
| 55 |
+
noise is multiplied. NOTE: This is on top of the parent
|
| 56 |
+
GaussianNoise's scaling.
|
| 57 |
+
random_timesteps: The number of timesteps for which to act
|
| 58 |
+
completely randomly. Only after this number of timesteps, the
|
| 59 |
+
`self.scale` annealing process will start (see below).
|
| 60 |
+
initial_scale: The initial scaling weight to multiply the
|
| 61 |
+
noise with.
|
| 62 |
+
final_scale: The final scaling weight to multiply the noise with.
|
| 63 |
+
scale_timesteps: The timesteps over which to linearly anneal the
|
| 64 |
+
scaling factor (after(!) having used random actions for
|
| 65 |
+
`random_timesteps` steps.
|
| 66 |
+
scale_schedule: An optional Schedule object to use (instead
|
| 67 |
+
of constructing one from the given parameters).
|
| 68 |
+
framework: One of None, "tf", "torch".
|
| 69 |
+
"""
|
| 70 |
+
# The current OU-state value (gets updated each time, an eploration
|
| 71 |
+
# action is computed).
|
| 72 |
+
self.ou_state = get_variable(
|
| 73 |
+
np.array(action_space.low.size * [0.0], dtype=np.float32),
|
| 74 |
+
framework=framework,
|
| 75 |
+
tf_name="ou_state",
|
| 76 |
+
torch_tensor=True,
|
| 77 |
+
device=None,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
super().__init__(
|
| 81 |
+
action_space,
|
| 82 |
+
framework=framework,
|
| 83 |
+
random_timesteps=random_timesteps,
|
| 84 |
+
initial_scale=initial_scale,
|
| 85 |
+
final_scale=final_scale,
|
| 86 |
+
scale_timesteps=scale_timesteps,
|
| 87 |
+
scale_schedule=scale_schedule,
|
| 88 |
+
stddev=1.0, # Force `self.stddev` to 1.0.
|
| 89 |
+
**kwargs
|
| 90 |
+
)
|
| 91 |
+
self.ou_theta = ou_theta
|
| 92 |
+
self.ou_sigma = ou_sigma
|
| 93 |
+
self.ou_base_scale = ou_base_scale
|
| 94 |
+
# Now that we know the device, move ou_state there, in case of PyTorch.
|
| 95 |
+
if self.framework == "torch" and self.device is not None:
|
| 96 |
+
self.ou_state = self.ou_state.to(self.device)
|
| 97 |
+
|
| 98 |
+
@override(GaussianNoise)
|
| 99 |
+
def _get_tf_exploration_action_op(
|
| 100 |
+
self,
|
| 101 |
+
action_dist: ActionDistribution,
|
| 102 |
+
explore: Union[bool, TensorType],
|
| 103 |
+
timestep: Union[int, TensorType],
|
| 104 |
+
):
|
| 105 |
+
ts = timestep if timestep is not None else self.last_timestep
|
| 106 |
+
scale = self.scale_schedule(ts)
|
| 107 |
+
|
| 108 |
+
# The deterministic actions (if explore=False).
|
| 109 |
+
deterministic_actions = action_dist.deterministic_sample()
|
| 110 |
+
|
| 111 |
+
# Apply base-scaled and time-annealed scaled OU-noise to
|
| 112 |
+
# deterministic actions.
|
| 113 |
+
gaussian_sample = tf.random.normal(
|
| 114 |
+
shape=[self.action_space.low.size], stddev=self.stddev
|
| 115 |
+
)
|
| 116 |
+
ou_new = self.ou_theta * -self.ou_state + self.ou_sigma * gaussian_sample
|
| 117 |
+
if self.framework == "tf2":
|
| 118 |
+
self.ou_state.assign_add(ou_new)
|
| 119 |
+
ou_state_new = self.ou_state
|
| 120 |
+
else:
|
| 121 |
+
ou_state_new = tf1.assign_add(self.ou_state, ou_new)
|
| 122 |
+
high_m_low = self.action_space.high - self.action_space.low
|
| 123 |
+
high_m_low = tf.where(
|
| 124 |
+
tf.math.is_inf(high_m_low), tf.ones_like(high_m_low), high_m_low
|
| 125 |
+
)
|
| 126 |
+
noise = scale * self.ou_base_scale * ou_state_new * high_m_low
|
| 127 |
+
stochastic_actions = tf.clip_by_value(
|
| 128 |
+
deterministic_actions + noise,
|
| 129 |
+
self.action_space.low * tf.ones_like(deterministic_actions),
|
| 130 |
+
self.action_space.high * tf.ones_like(deterministic_actions),
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Stochastic actions could either be: random OR action + noise.
|
| 134 |
+
random_actions, _ = self.random_exploration.get_tf_exploration_action_op(
|
| 135 |
+
action_dist, explore
|
| 136 |
+
)
|
| 137 |
+
exploration_actions = tf.cond(
|
| 138 |
+
pred=tf.convert_to_tensor(ts < self.random_timesteps),
|
| 139 |
+
true_fn=lambda: random_actions,
|
| 140 |
+
false_fn=lambda: stochastic_actions,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Chose by `explore` (main exploration switch).
|
| 144 |
+
action = tf.cond(
|
| 145 |
+
pred=tf.constant(explore, dtype=tf.bool)
|
| 146 |
+
if isinstance(explore, bool)
|
| 147 |
+
else explore,
|
| 148 |
+
true_fn=lambda: exploration_actions,
|
| 149 |
+
false_fn=lambda: deterministic_actions,
|
| 150 |
+
)
|
| 151 |
+
# Logp=always zero.
|
| 152 |
+
logp = zero_logps_from_actions(deterministic_actions)
|
| 153 |
+
|
| 154 |
+
# Increment `last_timestep` by 1 (or set to `timestep`).
|
| 155 |
+
if self.framework == "tf2":
|
| 156 |
+
if timestep is None:
|
| 157 |
+
self.last_timestep.assign_add(1)
|
| 158 |
+
else:
|
| 159 |
+
self.last_timestep.assign(tf.cast(timestep, tf.int64))
|
| 160 |
+
else:
|
| 161 |
+
assign_op = (
|
| 162 |
+
tf1.assign_add(self.last_timestep, 1)
|
| 163 |
+
if timestep is None
|
| 164 |
+
else tf1.assign(self.last_timestep, timestep)
|
| 165 |
+
)
|
| 166 |
+
with tf1.control_dependencies([assign_op, ou_state_new]):
|
| 167 |
+
action = tf.identity(action)
|
| 168 |
+
logp = tf.identity(logp)
|
| 169 |
+
|
| 170 |
+
return action, logp
|
| 171 |
+
|
| 172 |
+
@override(GaussianNoise)
|
| 173 |
+
def _get_torch_exploration_action(
|
| 174 |
+
self,
|
| 175 |
+
action_dist: ActionDistribution,
|
| 176 |
+
explore: bool,
|
| 177 |
+
timestep: Union[int, TensorType],
|
| 178 |
+
):
|
| 179 |
+
# Set last timestep or (if not given) increase by one.
|
| 180 |
+
self.last_timestep = (
|
| 181 |
+
timestep if timestep is not None else self.last_timestep + 1
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Apply exploration.
|
| 185 |
+
if explore:
|
| 186 |
+
# Random exploration phase.
|
| 187 |
+
if self.last_timestep < self.random_timesteps:
|
| 188 |
+
action, _ = self.random_exploration.get_torch_exploration_action(
|
| 189 |
+
action_dist, explore=True
|
| 190 |
+
)
|
| 191 |
+
# Apply base-scaled and time-annealed scaled OU-noise to
|
| 192 |
+
# deterministic actions.
|
| 193 |
+
else:
|
| 194 |
+
det_actions = action_dist.deterministic_sample()
|
| 195 |
+
scale = self.scale_schedule(self.last_timestep)
|
| 196 |
+
gaussian_sample = scale * torch.normal(
|
| 197 |
+
mean=torch.zeros(self.ou_state.size()), std=1.0
|
| 198 |
+
).to(self.device)
|
| 199 |
+
ou_new = (
|
| 200 |
+
self.ou_theta * -self.ou_state + self.ou_sigma * gaussian_sample
|
| 201 |
+
)
|
| 202 |
+
self.ou_state += ou_new
|
| 203 |
+
high_m_low = torch.from_numpy(
|
| 204 |
+
self.action_space.high - self.action_space.low
|
| 205 |
+
).to(self.device)
|
| 206 |
+
high_m_low = torch.where(
|
| 207 |
+
torch.isinf(high_m_low),
|
| 208 |
+
torch.ones_like(high_m_low).to(self.device),
|
| 209 |
+
high_m_low,
|
| 210 |
+
)
|
| 211 |
+
noise = scale * self.ou_base_scale * self.ou_state * high_m_low
|
| 212 |
+
|
| 213 |
+
action = torch.min(
|
| 214 |
+
torch.max(
|
| 215 |
+
det_actions + noise,
|
| 216 |
+
torch.tensor(
|
| 217 |
+
self.action_space.low,
|
| 218 |
+
dtype=torch.float32,
|
| 219 |
+
device=self.device,
|
| 220 |
+
),
|
| 221 |
+
),
|
| 222 |
+
torch.tensor(
|
| 223 |
+
self.action_space.high, dtype=torch.float32, device=self.device
|
| 224 |
+
),
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# No exploration -> Return deterministic actions.
|
| 228 |
+
else:
|
| 229 |
+
action = action_dist.deterministic_sample()
|
| 230 |
+
|
| 231 |
+
# Logp=always zero.
|
| 232 |
+
logp = torch.zeros((action.size()[0],), dtype=torch.float32, device=self.device)
|
| 233 |
+
|
| 234 |
+
return action, logp
|
| 235 |
+
|
| 236 |
+
@override(GaussianNoise)
|
| 237 |
+
def get_state(self, sess: Optional["tf.Session"] = None):
|
| 238 |
+
"""Returns the current scale value.
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
Union[float,tf.Tensor[float]]: The current scale value.
|
| 242 |
+
"""
|
| 243 |
+
if sess:
|
| 244 |
+
return sess.run(
|
| 245 |
+
dict(
|
| 246 |
+
self._tf_state_op,
|
| 247 |
+
**{
|
| 248 |
+
"ou_state": self.ou_state,
|
| 249 |
+
}
|
| 250 |
+
)
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
state = super().get_state()
|
| 254 |
+
return dict(
|
| 255 |
+
state,
|
| 256 |
+
**{
|
| 257 |
+
"ou_state": convert_to_numpy(self.ou_state)
|
| 258 |
+
if self.framework != "tf"
|
| 259 |
+
else self.ou_state,
|
| 260 |
+
}
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
@override(GaussianNoise)
|
| 264 |
+
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
|
| 265 |
+
if self.framework == "tf":
|
| 266 |
+
self.ou_state.load(state["ou_state"], session=sess)
|
| 267 |
+
elif isinstance(self.ou_state, np.ndarray):
|
| 268 |
+
self.ou_state = state["ou_state"]
|
| 269 |
+
elif torch and torch.is_tensor(self.ou_state):
|
| 270 |
+
self.ou_state = torch.from_numpy(state["ou_state"])
|
| 271 |
+
else:
|
| 272 |
+
self.ou_state.assign(state["ou_state"])
|
| 273 |
+
super().set_state(state, sess=sess)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/parameter_noise.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.spaces import Box, Discrete
|
| 2 |
+
import numpy as np
|
| 3 |
+
from typing import Optional, TYPE_CHECKING, Union
|
| 4 |
+
|
| 5 |
+
from ray.rllib.env.base_env import BaseEnv
|
| 6 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 7 |
+
from ray.rllib.models.modelv2 import ModelV2
|
| 8 |
+
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
|
| 9 |
+
from ray.rllib.models.torch.torch_action_dist import (
|
| 10 |
+
TorchCategorical,
|
| 11 |
+
TorchDeterministic,
|
| 12 |
+
)
|
| 13 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 14 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 15 |
+
from ray.rllib.utils.exploration.exploration import Exploration
|
| 16 |
+
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
|
| 17 |
+
from ray.rllib.utils.from_config import from_config
|
| 18 |
+
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
|
| 19 |
+
from ray.rllib.utils.typing import TensorType
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from ray.rllib.policy.policy import Policy
|
| 23 |
+
|
| 24 |
+
tf1, tf, tfv = try_import_tf()
|
| 25 |
+
torch, _ = try_import_torch()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@OldAPIStack
|
| 29 |
+
class ParameterNoise(Exploration):
|
| 30 |
+
"""An exploration that changes a Model's parameters.
|
| 31 |
+
|
| 32 |
+
Implemented based on:
|
| 33 |
+
[1] https://openai.com/research/better-exploration-with-parameter-noise
|
| 34 |
+
[2] https://arxiv.org/pdf/1706.01905.pdf
|
| 35 |
+
|
| 36 |
+
At the beginning of an episode, Gaussian noise is added to all weights
|
| 37 |
+
of the model. At the end of the episode, the noise is undone and an action
|
| 38 |
+
diff (pi-delta) is calculated, from which we determine the changes in the
|
| 39 |
+
noise's stddev for the next episode.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
action_space,
|
| 45 |
+
*,
|
| 46 |
+
framework: str,
|
| 47 |
+
policy_config: dict,
|
| 48 |
+
model: ModelV2,
|
| 49 |
+
initial_stddev: float = 1.0,
|
| 50 |
+
random_timesteps: int = 10000,
|
| 51 |
+
sub_exploration: Optional[dict] = None,
|
| 52 |
+
**kwargs
|
| 53 |
+
):
|
| 54 |
+
"""Initializes a ParameterNoise Exploration object.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
initial_stddev: The initial stddev to use for the noise.
|
| 58 |
+
random_timesteps: The number of timesteps to act completely
|
| 59 |
+
randomly (see [1]).
|
| 60 |
+
sub_exploration: Optional sub-exploration config.
|
| 61 |
+
None for auto-detection/setup.
|
| 62 |
+
"""
|
| 63 |
+
assert framework is not None
|
| 64 |
+
super().__init__(
|
| 65 |
+
action_space,
|
| 66 |
+
policy_config=policy_config,
|
| 67 |
+
model=model,
|
| 68 |
+
framework=framework,
|
| 69 |
+
**kwargs
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
self.stddev = get_variable(
|
| 73 |
+
initial_stddev, framework=self.framework, tf_name="stddev"
|
| 74 |
+
)
|
| 75 |
+
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
|
| 76 |
+
|
| 77 |
+
# The weight variables of the Model where noise should be applied to.
|
| 78 |
+
# This excludes any variable, whose name contains "LayerNorm" (those
|
| 79 |
+
# are BatchNormalization layers, which should not be perturbed).
|
| 80 |
+
self.model_variables = [
|
| 81 |
+
v
|
| 82 |
+
for k, v in self.model.trainable_variables(as_dict=True).items()
|
| 83 |
+
if "LayerNorm" not in k
|
| 84 |
+
]
|
| 85 |
+
# Our noise to be added to the weights. Each item in `self.noise`
|
| 86 |
+
# corresponds to one Model variable and holding the Gaussian noise to
|
| 87 |
+
# be added to that variable (weight).
|
| 88 |
+
self.noise = []
|
| 89 |
+
for var in self.model_variables:
|
| 90 |
+
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
|
| 91 |
+
self.noise.append(
|
| 92 |
+
get_variable(
|
| 93 |
+
np.zeros(var.shape, dtype=np.float32),
|
| 94 |
+
framework=self.framework,
|
| 95 |
+
tf_name=name_,
|
| 96 |
+
torch_tensor=True,
|
| 97 |
+
device=self.device,
|
| 98 |
+
)
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# tf-specific ops to sample, assign and remove noise.
|
| 102 |
+
if self.framework == "tf" and not tf.executing_eagerly():
|
| 103 |
+
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
|
| 104 |
+
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
|
| 105 |
+
self.tf_remove_noise_op = self._tf_remove_noise_op()
|
| 106 |
+
# Create convenience sample+add op for tf.
|
| 107 |
+
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
|
| 108 |
+
add_op = self._tf_add_stored_noise_op()
|
| 109 |
+
with tf1.control_dependencies([add_op]):
|
| 110 |
+
self.tf_sample_new_noise_and_add_op = tf.no_op()
|
| 111 |
+
|
| 112 |
+
# Whether the Model's weights currently have noise added or not.
|
| 113 |
+
self.weights_are_currently_noisy = False
|
| 114 |
+
|
| 115 |
+
# Auto-detection of underlying exploration functionality.
|
| 116 |
+
if sub_exploration is None:
|
| 117 |
+
# For discrete action spaces, use an underlying EpsilonGreedy with
|
| 118 |
+
# a special schedule.
|
| 119 |
+
if isinstance(self.action_space, Discrete):
|
| 120 |
+
sub_exploration = {
|
| 121 |
+
"type": "EpsilonGreedy",
|
| 122 |
+
"epsilon_schedule": {
|
| 123 |
+
"type": "PiecewiseSchedule",
|
| 124 |
+
# Step function (see [2]).
|
| 125 |
+
"endpoints": [
|
| 126 |
+
(0, 1.0),
|
| 127 |
+
(random_timesteps + 1, 1.0),
|
| 128 |
+
(random_timesteps + 2, 0.01),
|
| 129 |
+
],
|
| 130 |
+
"outside_value": 0.01,
|
| 131 |
+
},
|
| 132 |
+
}
|
| 133 |
+
elif isinstance(self.action_space, Box):
|
| 134 |
+
sub_exploration = {
|
| 135 |
+
"type": "OrnsteinUhlenbeckNoise",
|
| 136 |
+
"random_timesteps": random_timesteps,
|
| 137 |
+
}
|
| 138 |
+
# TODO(sven): Implement for any action space.
|
| 139 |
+
else:
|
| 140 |
+
raise NotImplementedError
|
| 141 |
+
|
| 142 |
+
self.sub_exploration = from_config(
|
| 143 |
+
Exploration,
|
| 144 |
+
sub_exploration,
|
| 145 |
+
framework=self.framework,
|
| 146 |
+
action_space=self.action_space,
|
| 147 |
+
policy_config=self.policy_config,
|
| 148 |
+
model=self.model,
|
| 149 |
+
**kwargs
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
# Whether we need to call `self._delayed_on_episode_start` before
|
| 153 |
+
# the forward pass.
|
| 154 |
+
self.episode_started = False
|
| 155 |
+
|
| 156 |
+
@override(Exploration)
|
| 157 |
+
def before_compute_actions(
|
| 158 |
+
self,
|
| 159 |
+
*,
|
| 160 |
+
timestep: Optional[int] = None,
|
| 161 |
+
explore: Optional[bool] = None,
|
| 162 |
+
tf_sess: Optional["tf.Session"] = None
|
| 163 |
+
):
|
| 164 |
+
explore = explore if explore is not None else self.policy_config["explore"]
|
| 165 |
+
|
| 166 |
+
# Is this the first forward pass in the new episode? If yes, do the
|
| 167 |
+
# noise re-sampling and add to weights.
|
| 168 |
+
if self.episode_started:
|
| 169 |
+
self._delayed_on_episode_start(explore, tf_sess)
|
| 170 |
+
|
| 171 |
+
# Add noise if necessary.
|
| 172 |
+
if explore and not self.weights_are_currently_noisy:
|
| 173 |
+
self._add_stored_noise(tf_sess=tf_sess)
|
| 174 |
+
# Remove noise if necessary.
|
| 175 |
+
elif not explore and self.weights_are_currently_noisy:
|
| 176 |
+
self._remove_noise(tf_sess=tf_sess)
|
| 177 |
+
|
| 178 |
+
@override(Exploration)
|
| 179 |
+
def get_exploration_action(
|
| 180 |
+
self,
|
| 181 |
+
*,
|
| 182 |
+
action_distribution: ActionDistribution,
|
| 183 |
+
timestep: Union[TensorType, int],
|
| 184 |
+
explore: Union[TensorType, bool]
|
| 185 |
+
):
|
| 186 |
+
# Use our sub-exploration object to handle the final exploration
|
| 187 |
+
# action (depends on the algo-type/action-space/etc..).
|
| 188 |
+
return self.sub_exploration.get_exploration_action(
|
| 189 |
+
action_distribution=action_distribution, timestep=timestep, explore=explore
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
@override(Exploration)
|
| 193 |
+
def on_episode_start(
|
| 194 |
+
self,
|
| 195 |
+
policy: "Policy",
|
| 196 |
+
*,
|
| 197 |
+
environment: BaseEnv = None,
|
| 198 |
+
episode: int = None,
|
| 199 |
+
tf_sess: Optional["tf.Session"] = None
|
| 200 |
+
):
|
| 201 |
+
# We have to delay the noise-adding step by one forward call.
|
| 202 |
+
# This is due to the fact that the optimizer does it's step right
|
| 203 |
+
# after the episode was reset (and hence the noise was already added!).
|
| 204 |
+
# We don't want to update into a noisy net.
|
| 205 |
+
self.episode_started = True
|
| 206 |
+
|
| 207 |
+
def _delayed_on_episode_start(self, explore, tf_sess):
|
| 208 |
+
# Sample fresh noise and add to weights.
|
| 209 |
+
if explore:
|
| 210 |
+
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
|
| 211 |
+
# Only sample, don't apply anything to the weights.
|
| 212 |
+
else:
|
| 213 |
+
self._sample_new_noise(tf_sess=tf_sess)
|
| 214 |
+
self.episode_started = False
|
| 215 |
+
|
| 216 |
+
@override(Exploration)
|
| 217 |
+
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
|
| 218 |
+
# Remove stored noise from weights (only if currently noisy).
|
| 219 |
+
if self.weights_are_currently_noisy:
|
| 220 |
+
self._remove_noise(tf_sess=tf_sess)
|
| 221 |
+
|
| 222 |
+
@override(Exploration)
|
| 223 |
+
def postprocess_trajectory(
|
| 224 |
+
self,
|
| 225 |
+
policy: "Policy",
|
| 226 |
+
sample_batch: SampleBatch,
|
| 227 |
+
tf_sess: Optional["tf.Session"] = None,
|
| 228 |
+
):
|
| 229 |
+
noisy_action_dist = noise_free_action_dist = None
|
| 230 |
+
# Adjust the stddev depending on the action (pi)-distance.
|
| 231 |
+
# Also see [1] for details.
|
| 232 |
+
# TODO(sven): Find out whether this can be scrapped by simply using
|
| 233 |
+
# the `sample_batch` to get the noisy/noise-free action dist.
|
| 234 |
+
_, _, fetches = policy.compute_actions_from_input_dict(
|
| 235 |
+
input_dict=sample_batch, explore=self.weights_are_currently_noisy
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# Categorical case (e.g. DQN).
|
| 239 |
+
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
|
| 240 |
+
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
|
| 241 |
+
# Deterministic (Gaussian actions, e.g. DDPG).
|
| 242 |
+
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
|
| 243 |
+
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
|
| 244 |
+
else:
|
| 245 |
+
raise NotImplementedError # TODO(sven): Other action-dist cases.
|
| 246 |
+
|
| 247 |
+
if self.weights_are_currently_noisy:
|
| 248 |
+
noisy_action_dist = action_dist
|
| 249 |
+
else:
|
| 250 |
+
noise_free_action_dist = action_dist
|
| 251 |
+
|
| 252 |
+
_, _, fetches = policy.compute_actions_from_input_dict(
|
| 253 |
+
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
# Categorical case (e.g. DQN).
|
| 257 |
+
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
|
| 258 |
+
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
|
| 259 |
+
# Deterministic (Gaussian actions, e.g. DDPG).
|
| 260 |
+
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
|
| 261 |
+
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
|
| 262 |
+
|
| 263 |
+
if noisy_action_dist is None:
|
| 264 |
+
noisy_action_dist = action_dist
|
| 265 |
+
else:
|
| 266 |
+
noise_free_action_dist = action_dist
|
| 267 |
+
|
| 268 |
+
delta = distance = None
|
| 269 |
+
# Categorical case (e.g. DQN).
|
| 270 |
+
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
|
| 271 |
+
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
|
| 272 |
+
# TODO(sven): Allow KL-divergence to be calculated by our
|
| 273 |
+
# Distribution classes (don't support off-graph/numpy yet).
|
| 274 |
+
distance = np.nanmean(
|
| 275 |
+
np.sum(
|
| 276 |
+
noise_free_action_dist
|
| 277 |
+
* np.log(
|
| 278 |
+
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
|
| 279 |
+
),
|
| 280 |
+
1,
|
| 281 |
+
)
|
| 282 |
+
)
|
| 283 |
+
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
|
| 284 |
+
"cur_epsilon"
|
| 285 |
+
]
|
| 286 |
+
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
|
| 287 |
+
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
|
| 288 |
+
# Calculate MSE between noisy and non-noisy output (see [2]).
|
| 289 |
+
distance = np.sqrt(
|
| 290 |
+
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
|
| 291 |
+
)
|
| 292 |
+
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
|
| 293 |
+
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
|
| 294 |
+
|
| 295 |
+
# Adjust stddev according to the calculated action-distance.
|
| 296 |
+
if distance <= delta:
|
| 297 |
+
self.stddev_val *= 1.01
|
| 298 |
+
else:
|
| 299 |
+
self.stddev_val /= 1.01
|
| 300 |
+
|
| 301 |
+
# Update our state (self.stddev and self.stddev_val).
|
| 302 |
+
self.set_state(self.get_state(), sess=tf_sess)
|
| 303 |
+
|
| 304 |
+
return sample_batch
|
| 305 |
+
|
| 306 |
+
def _sample_new_noise(self, *, tf_sess=None):
|
| 307 |
+
"""Samples new noise and stores it in `self.noise`."""
|
| 308 |
+
if self.framework == "tf":
|
| 309 |
+
tf_sess.run(self.tf_sample_new_noise_op)
|
| 310 |
+
elif self.framework == "tf2":
|
| 311 |
+
self._tf_sample_new_noise_op()
|
| 312 |
+
else:
|
| 313 |
+
for i in range(len(self.noise)):
|
| 314 |
+
self.noise[i] = torch.normal(
|
| 315 |
+
mean=torch.zeros(self.noise[i].size()), std=self.stddev
|
| 316 |
+
).to(self.device)
|
| 317 |
+
|
| 318 |
+
def _tf_sample_new_noise_op(self):
|
| 319 |
+
added_noises = []
|
| 320 |
+
for noise in self.noise:
|
| 321 |
+
added_noises.append(
|
| 322 |
+
tf1.assign(
|
| 323 |
+
noise,
|
| 324 |
+
tf.random.normal(
|
| 325 |
+
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
|
| 326 |
+
),
|
| 327 |
+
)
|
| 328 |
+
)
|
| 329 |
+
return tf.group(*added_noises)
|
| 330 |
+
|
| 331 |
+
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
|
| 332 |
+
if self.framework == "tf":
|
| 333 |
+
if override and self.weights_are_currently_noisy:
|
| 334 |
+
tf_sess.run(self.tf_remove_noise_op)
|
| 335 |
+
tf_sess.run(self.tf_sample_new_noise_and_add_op)
|
| 336 |
+
else:
|
| 337 |
+
if override and self.weights_are_currently_noisy:
|
| 338 |
+
self._remove_noise()
|
| 339 |
+
self._sample_new_noise()
|
| 340 |
+
self._add_stored_noise()
|
| 341 |
+
|
| 342 |
+
self.weights_are_currently_noisy = True
|
| 343 |
+
|
| 344 |
+
def _add_stored_noise(self, *, tf_sess=None):
|
| 345 |
+
"""Adds the stored `self.noise` to the model's parameters.
|
| 346 |
+
|
| 347 |
+
Note: No new sampling of noise here.
|
| 348 |
+
|
| 349 |
+
Args:
|
| 350 |
+
tf_sess (Optional[tf.Session]): The tf-session to use to add the
|
| 351 |
+
stored noise to the (currently noise-free) weights.
|
| 352 |
+
override: If True, undo any currently applied noise first,
|
| 353 |
+
then add the currently stored noise.
|
| 354 |
+
"""
|
| 355 |
+
# Make sure we only add noise to currently noise-free weights.
|
| 356 |
+
assert self.weights_are_currently_noisy is False
|
| 357 |
+
|
| 358 |
+
# Add stored noise to the model's parameters.
|
| 359 |
+
if self.framework == "tf":
|
| 360 |
+
tf_sess.run(self.tf_add_stored_noise_op)
|
| 361 |
+
elif self.framework == "tf2":
|
| 362 |
+
self._tf_add_stored_noise_op()
|
| 363 |
+
else:
|
| 364 |
+
for var, noise in zip(self.model_variables, self.noise):
|
| 365 |
+
# Add noise to weights in-place.
|
| 366 |
+
var.requires_grad = False
|
| 367 |
+
var.add_(noise)
|
| 368 |
+
var.requires_grad = True
|
| 369 |
+
|
| 370 |
+
self.weights_are_currently_noisy = True
|
| 371 |
+
|
| 372 |
+
def _tf_add_stored_noise_op(self):
|
| 373 |
+
"""Generates tf-op that assigns the stored noise to weights.
|
| 374 |
+
|
| 375 |
+
Also used by tf-eager.
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
tf.op: The tf op to apply the already stored noise to the NN.
|
| 379 |
+
"""
|
| 380 |
+
add_noise_ops = list()
|
| 381 |
+
for var, noise in zip(self.model_variables, self.noise):
|
| 382 |
+
add_noise_ops.append(tf1.assign_add(var, noise))
|
| 383 |
+
ret = tf.group(*tuple(add_noise_ops))
|
| 384 |
+
with tf1.control_dependencies([ret]):
|
| 385 |
+
return tf.no_op()
|
| 386 |
+
|
| 387 |
+
def _remove_noise(self, *, tf_sess=None):
|
| 388 |
+
"""
|
| 389 |
+
Removes the current action noise from the model parameters.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
tf_sess (Optional[tf.Session]): The tf-session to use to remove
|
| 393 |
+
the noise from the (currently noisy) weights.
|
| 394 |
+
"""
|
| 395 |
+
# Make sure we only remove noise iff currently noisy.
|
| 396 |
+
assert self.weights_are_currently_noisy is True
|
| 397 |
+
|
| 398 |
+
# Removes the stored noise from the model's parameters.
|
| 399 |
+
if self.framework == "tf":
|
| 400 |
+
tf_sess.run(self.tf_remove_noise_op)
|
| 401 |
+
elif self.framework == "tf2":
|
| 402 |
+
self._tf_remove_noise_op()
|
| 403 |
+
else:
|
| 404 |
+
for var, noise in zip(self.model_variables, self.noise):
|
| 405 |
+
# Remove noise from weights in-place.
|
| 406 |
+
var.requires_grad = False
|
| 407 |
+
var.add_(-noise)
|
| 408 |
+
var.requires_grad = True
|
| 409 |
+
|
| 410 |
+
self.weights_are_currently_noisy = False
|
| 411 |
+
|
| 412 |
+
def _tf_remove_noise_op(self):
|
| 413 |
+
"""Generates a tf-op for removing noise from the model's weights.
|
| 414 |
+
|
| 415 |
+
Also used by tf-eager.
|
| 416 |
+
|
| 417 |
+
Returns:
|
| 418 |
+
tf.op: The tf op to remve the currently stored noise from the NN.
|
| 419 |
+
"""
|
| 420 |
+
remove_noise_ops = list()
|
| 421 |
+
for var, noise in zip(self.model_variables, self.noise):
|
| 422 |
+
remove_noise_ops.append(tf1.assign_add(var, -noise))
|
| 423 |
+
ret = tf.group(*tuple(remove_noise_ops))
|
| 424 |
+
with tf1.control_dependencies([ret]):
|
| 425 |
+
return tf.no_op()
|
| 426 |
+
|
| 427 |
+
@override(Exploration)
|
| 428 |
+
def get_state(self, sess=None):
|
| 429 |
+
return {"cur_stddev": self.stddev_val}
|
| 430 |
+
|
| 431 |
+
@override(Exploration)
|
| 432 |
+
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
|
| 433 |
+
self.stddev_val = state["cur_stddev"]
|
| 434 |
+
# Set self.stddev to calculated value.
|
| 435 |
+
if self.framework == "tf":
|
| 436 |
+
self.stddev.load(self.stddev_val, session=sess)
|
| 437 |
+
elif isinstance(self.stddev, float):
|
| 438 |
+
self.stddev = self.stddev_val
|
| 439 |
+
else:
|
| 440 |
+
self.stddev.assign(self.stddev_val)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/per_worker_gaussian_noise.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gymnasium.spaces import Space
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 5 |
+
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
|
| 6 |
+
from ray.rllib.utils.schedules import ConstantSchedule
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@OldAPIStack
|
| 10 |
+
class PerWorkerGaussianNoise(GaussianNoise):
|
| 11 |
+
"""A per-worker Gaussian noise class for distributed algorithms.
|
| 12 |
+
|
| 13 |
+
Sets the `scale` schedules of individual workers to a constant:
|
| 14 |
+
0.4 ^ (1 + [worker-index] / float([num-workers] - 1) * 7)
|
| 15 |
+
See Ape-X paper.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
action_space: Space,
|
| 21 |
+
*,
|
| 22 |
+
framework: Optional[str],
|
| 23 |
+
num_workers: Optional[int],
|
| 24 |
+
worker_index: Optional[int],
|
| 25 |
+
**kwargs
|
| 26 |
+
):
|
| 27 |
+
"""
|
| 28 |
+
Args:
|
| 29 |
+
action_space: The gym action space used by the environment.
|
| 30 |
+
num_workers: The overall number of workers used.
|
| 31 |
+
worker_index: The index of the Worker using this
|
| 32 |
+
Exploration.
|
| 33 |
+
framework: One of None, "tf", "torch".
|
| 34 |
+
"""
|
| 35 |
+
scale_schedule = None
|
| 36 |
+
# Use a fixed, different epsilon per worker. See: Ape-X paper.
|
| 37 |
+
if num_workers > 0:
|
| 38 |
+
if worker_index > 0:
|
| 39 |
+
num_workers_minus_1 = float(num_workers - 1) if num_workers > 1 else 1.0
|
| 40 |
+
exponent = 1 + (worker_index / num_workers_minus_1) * 7
|
| 41 |
+
scale_schedule = ConstantSchedule(0.4**exponent, framework=framework)
|
| 42 |
+
# Local worker should have zero exploration so that eval
|
| 43 |
+
# rollouts run properly.
|
| 44 |
+
else:
|
| 45 |
+
scale_schedule = ConstantSchedule(0.0, framework=framework)
|
| 46 |
+
|
| 47 |
+
super().__init__(
|
| 48 |
+
action_space, scale_schedule=scale_schedule, framework=framework, **kwargs
|
| 49 |
+
)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/exploration/slate_soft_q.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
|
| 3 |
+
from ray.rllib.models.action_dist import ActionDistribution
|
| 4 |
+
from ray.rllib.utils.annotations import OldAPIStack, override
|
| 5 |
+
from ray.rllib.utils.exploration.exploration import TensorType
|
| 6 |
+
from ray.rllib.utils.exploration.soft_q import SoftQ
|
| 7 |
+
from ray.rllib.utils.framework import try_import_tf, try_import_torch
|
| 8 |
+
|
| 9 |
+
tf1, tf, tfv = try_import_tf()
|
| 10 |
+
torch, _ = try_import_torch()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@OldAPIStack
|
| 14 |
+
class SlateSoftQ(SoftQ):
|
| 15 |
+
@override(SoftQ)
|
| 16 |
+
def get_exploration_action(
|
| 17 |
+
self,
|
| 18 |
+
action_distribution: ActionDistribution,
|
| 19 |
+
timestep: Union[int, TensorType],
|
| 20 |
+
explore: bool = True,
|
| 21 |
+
):
|
| 22 |
+
assert (
|
| 23 |
+
self.framework == "torch"
|
| 24 |
+
), "ERROR: SlateSoftQ only supports torch so far!"
|
| 25 |
+
|
| 26 |
+
cls = type(action_distribution)
|
| 27 |
+
|
| 28 |
+
# Re-create the action distribution with the correct temperature
|
| 29 |
+
# applied.
|
| 30 |
+
action_distribution = cls(
|
| 31 |
+
action_distribution.inputs, self.model, temperature=self.temperature
|
| 32 |
+
)
|
| 33 |
+
batch_size = action_distribution.inputs.size()[0]
|
| 34 |
+
action_logp = torch.zeros(batch_size, dtype=torch.float)
|
| 35 |
+
|
| 36 |
+
self.last_timestep = timestep
|
| 37 |
+
|
| 38 |
+
# Explore.
|
| 39 |
+
if explore:
|
| 40 |
+
# Return stochastic sample over (q-value) logits.
|
| 41 |
+
action = action_distribution.sample()
|
| 42 |
+
# Return the deterministic "sample" (argmax) over (q-value) logits.
|
| 43 |
+
else:
|
| 44 |
+
action = action_distribution.deterministic_sample()
|
| 45 |
+
|
| 46 |
+
return action, action_logp
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/filter.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import threading
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tree # pip install dm_tree
|
| 6 |
+
|
| 7 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 8 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 9 |
+
from ray.rllib.utils.numpy import SMALL_NUMBER
|
| 10 |
+
from ray.rllib.utils.typing import TensorStructType
|
| 11 |
+
from ray.rllib.utils.serialization import _serialize_ndarray, _deserialize_ndarray
|
| 12 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@OldAPIStack
|
| 18 |
+
class Filter:
|
| 19 |
+
"""Processes input, possibly statefully."""
|
| 20 |
+
|
| 21 |
+
def apply_changes(self, other: "Filter", *args, **kwargs) -> None:
|
| 22 |
+
"""Updates self with "new state" from other filter."""
|
| 23 |
+
raise NotImplementedError
|
| 24 |
+
|
| 25 |
+
def copy(self) -> "Filter":
|
| 26 |
+
"""Creates a new object with same state as self.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
A copy of self.
|
| 30 |
+
"""
|
| 31 |
+
raise NotImplementedError
|
| 32 |
+
|
| 33 |
+
def sync(self, other: "Filter") -> None:
|
| 34 |
+
"""Copies all state from other filter to self."""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
def reset_buffer(self) -> None:
|
| 38 |
+
"""Creates copy of current state and resets accumulated state"""
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
def as_serializable(self) -> "Filter":
|
| 42 |
+
raise NotImplementedError
|
| 43 |
+
|
| 44 |
+
@Deprecated(new="Filter.reset_buffer()", error=True)
|
| 45 |
+
def clear_buffer(self):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@OldAPIStack
|
| 50 |
+
class NoFilter(Filter):
|
| 51 |
+
is_concurrent = True
|
| 52 |
+
|
| 53 |
+
def __call__(self, x: TensorStructType, update=True):
|
| 54 |
+
# Process no further if already np.ndarray, dict, or tuple.
|
| 55 |
+
if isinstance(x, (np.ndarray, dict, tuple)):
|
| 56 |
+
return x
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
return np.asarray(x)
|
| 60 |
+
except Exception:
|
| 61 |
+
raise ValueError("Failed to convert to array", x)
|
| 62 |
+
|
| 63 |
+
def apply_changes(self, other: "NoFilter", *args, **kwargs) -> None:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
def copy(self) -> "NoFilter":
|
| 67 |
+
return self
|
| 68 |
+
|
| 69 |
+
def sync(self, other: "NoFilter") -> None:
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def reset_buffer(self) -> None:
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
def as_serializable(self) -> "NoFilter":
|
| 76 |
+
return self
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# http://www.johndcook.com/blog/standard_deviation/
|
| 80 |
+
@OldAPIStack
|
| 81 |
+
class RunningStat:
|
| 82 |
+
def __init__(self, shape=()):
|
| 83 |
+
self.num_pushes = 0
|
| 84 |
+
self.mean_array = np.zeros(shape)
|
| 85 |
+
self.std_array = np.zeros(shape)
|
| 86 |
+
|
| 87 |
+
def copy(self):
|
| 88 |
+
other = RunningStat()
|
| 89 |
+
# TODO: Remove these safe-guards if not needed anymore.
|
| 90 |
+
other.num_pushes = self.num_pushes if hasattr(self, "num_pushes") else self._n
|
| 91 |
+
other.mean_array = (
|
| 92 |
+
np.copy(self.mean_array)
|
| 93 |
+
if hasattr(self, "mean_array")
|
| 94 |
+
else np.copy(self._M)
|
| 95 |
+
)
|
| 96 |
+
other.std_array = (
|
| 97 |
+
np.copy(self.std_array) if hasattr(self, "std_array") else np.copy(self._S)
|
| 98 |
+
)
|
| 99 |
+
return other
|
| 100 |
+
|
| 101 |
+
def push(self, x):
|
| 102 |
+
x = np.asarray(x)
|
| 103 |
+
# Unvectorized update of the running statistics.
|
| 104 |
+
if x.shape != self.mean_array.shape:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"Unexpected input shape {}, expected {}, value = {}".format(
|
| 107 |
+
x.shape, self.mean_array.shape, x
|
| 108 |
+
)
|
| 109 |
+
)
|
| 110 |
+
self.num_pushes += 1
|
| 111 |
+
if self.num_pushes == 1:
|
| 112 |
+
self.mean_array[...] = x
|
| 113 |
+
else:
|
| 114 |
+
delta = x - self.mean_array
|
| 115 |
+
self.mean_array[...] += delta / self.num_pushes
|
| 116 |
+
self.std_array[...] += (
|
| 117 |
+
(delta / self.num_pushes) * delta * (self.num_pushes - 1)
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
def update(self, other):
|
| 121 |
+
n1 = float(self.num_pushes)
|
| 122 |
+
n2 = float(other.num_pushes)
|
| 123 |
+
n = n1 + n2
|
| 124 |
+
if n == 0:
|
| 125 |
+
# Avoid divide by zero, which creates nans
|
| 126 |
+
return
|
| 127 |
+
delta = self.mean_array - other.mean_array
|
| 128 |
+
delta2 = delta * delta
|
| 129 |
+
m = (n1 * self.mean_array + n2 * other.mean_array) / n
|
| 130 |
+
s = self.std_array + other.std_array + (delta2 / n) * n1 * n2
|
| 131 |
+
self.num_pushes = n
|
| 132 |
+
self.mean_array = m
|
| 133 |
+
self.std_array = s
|
| 134 |
+
|
| 135 |
+
def __repr__(self):
|
| 136 |
+
return "(n={}, mean_mean={}, mean_std={})".format(
|
| 137 |
+
self.n, np.mean(self.mean), np.mean(self.std)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
def n(self):
|
| 142 |
+
return self.num_pushes
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def mean(self):
|
| 146 |
+
return self.mean_array
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
def var(self):
|
| 150 |
+
return (
|
| 151 |
+
self.std_array / (self.num_pushes - 1)
|
| 152 |
+
if self.num_pushes > 1
|
| 153 |
+
else np.square(self.mean_array)
|
| 154 |
+
).astype(np.float32)
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def std(self):
|
| 158 |
+
return np.sqrt(self.var)
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def shape(self):
|
| 162 |
+
return self.mean_array.shape
|
| 163 |
+
|
| 164 |
+
def to_state(self):
|
| 165 |
+
return {
|
| 166 |
+
"num_pushes": self.num_pushes,
|
| 167 |
+
"mean_array": _serialize_ndarray(self.mean_array),
|
| 168 |
+
"std_array": _serialize_ndarray(self.std_array),
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
@staticmethod
|
| 172 |
+
def from_state(state):
|
| 173 |
+
running_stats = RunningStat()
|
| 174 |
+
running_stats.num_pushes = state["num_pushes"]
|
| 175 |
+
running_stats.mean_array = _deserialize_ndarray(state["mean_array"])
|
| 176 |
+
running_stats.std_array = _deserialize_ndarray(state["std_array"])
|
| 177 |
+
return running_stats
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
@OldAPIStack
|
| 181 |
+
class MeanStdFilter(Filter):
|
| 182 |
+
"""Keeps track of a running mean for seen states"""
|
| 183 |
+
|
| 184 |
+
is_concurrent = False
|
| 185 |
+
|
| 186 |
+
def __init__(self, shape, demean=True, destd=True, clip=10.0):
|
| 187 |
+
self.shape = shape
|
| 188 |
+
# We don't have a preprocessor, if shape is None (Discrete) or
|
| 189 |
+
# flat_shape is Tuple[np.ndarray] or Dict[str, np.ndarray]
|
| 190 |
+
# (complex inputs).
|
| 191 |
+
flat_shape = tree.flatten(self.shape)
|
| 192 |
+
self.no_preprocessor = shape is None or (
|
| 193 |
+
isinstance(self.shape, (dict, tuple))
|
| 194 |
+
and len(flat_shape) > 0
|
| 195 |
+
and isinstance(flat_shape[0], np.ndarray)
|
| 196 |
+
)
|
| 197 |
+
# If preprocessing (flattening dicts/tuples), make sure shape
|
| 198 |
+
# is an np.ndarray, so we don't confuse it with a complex Tuple
|
| 199 |
+
# space's shape structure (which is a Tuple[np.ndarray]).
|
| 200 |
+
if not self.no_preprocessor:
|
| 201 |
+
self.shape = np.array(self.shape)
|
| 202 |
+
self.demean = demean
|
| 203 |
+
self.destd = destd
|
| 204 |
+
self.clip = clip
|
| 205 |
+
# Running stats.
|
| 206 |
+
self.running_stats = tree.map_structure(lambda s: RunningStat(s), self.shape)
|
| 207 |
+
|
| 208 |
+
# In distributed rollouts, each worker sees different states.
|
| 209 |
+
# The buffer is used to keep track of deltas amongst all the
|
| 210 |
+
# observation filters.
|
| 211 |
+
self.buffer = None
|
| 212 |
+
self.reset_buffer()
|
| 213 |
+
|
| 214 |
+
def reset_buffer(self) -> None:
|
| 215 |
+
self.buffer = tree.map_structure(lambda s: RunningStat(s), self.shape)
|
| 216 |
+
|
| 217 |
+
def apply_changes(
|
| 218 |
+
self, other: "MeanStdFilter", with_buffer: bool = False, *args, **kwargs
|
| 219 |
+
) -> None:
|
| 220 |
+
"""Applies updates from the buffer of another filter.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
other: Other filter to apply info from
|
| 224 |
+
with_buffer: Flag for specifying if the buffer should be
|
| 225 |
+
copied from other.
|
| 226 |
+
|
| 227 |
+
.. testcode::
|
| 228 |
+
:skipif: True
|
| 229 |
+
|
| 230 |
+
a = MeanStdFilter(())
|
| 231 |
+
a(1)
|
| 232 |
+
a(2)
|
| 233 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 234 |
+
|
| 235 |
+
.. testoutput::
|
| 236 |
+
|
| 237 |
+
[2, 1.5, 2]
|
| 238 |
+
|
| 239 |
+
.. testcode::
|
| 240 |
+
:skipif: True
|
| 241 |
+
|
| 242 |
+
b = MeanStdFilter(())
|
| 243 |
+
b(10)
|
| 244 |
+
a.apply_changes(b, with_buffer=False)
|
| 245 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 246 |
+
|
| 247 |
+
.. testoutput::
|
| 248 |
+
|
| 249 |
+
[3, 4.333333333333333, 2]
|
| 250 |
+
|
| 251 |
+
.. testcode::
|
| 252 |
+
:skipif: True
|
| 253 |
+
|
| 254 |
+
a.apply_changes(b, with_buffer=True)
|
| 255 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 256 |
+
|
| 257 |
+
.. testoutput::
|
| 258 |
+
|
| 259 |
+
[4, 5.75, 1]
|
| 260 |
+
"""
|
| 261 |
+
tree.map_structure(
|
| 262 |
+
lambda rs, other_rs: rs.update(other_rs), self.running_stats, other.buffer
|
| 263 |
+
)
|
| 264 |
+
if with_buffer:
|
| 265 |
+
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
|
| 266 |
+
|
| 267 |
+
def copy(self) -> "MeanStdFilter":
|
| 268 |
+
"""Returns a copy of `self`."""
|
| 269 |
+
other = MeanStdFilter(self.shape)
|
| 270 |
+
other.sync(self)
|
| 271 |
+
return other
|
| 272 |
+
|
| 273 |
+
def as_serializable(self) -> "MeanStdFilter":
|
| 274 |
+
return self.copy()
|
| 275 |
+
|
| 276 |
+
def sync(self, other: "MeanStdFilter") -> None:
|
| 277 |
+
"""Syncs all fields together from other filter.
|
| 278 |
+
|
| 279 |
+
.. testcode::
|
| 280 |
+
:skipif: True
|
| 281 |
+
|
| 282 |
+
a = MeanStdFilter(())
|
| 283 |
+
a(1)
|
| 284 |
+
a(2)
|
| 285 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 286 |
+
|
| 287 |
+
.. testoutput::
|
| 288 |
+
|
| 289 |
+
[2, array(1.5), 2]
|
| 290 |
+
|
| 291 |
+
.. testcode::
|
| 292 |
+
:skipif: True
|
| 293 |
+
|
| 294 |
+
b = MeanStdFilter(())
|
| 295 |
+
b(10)
|
| 296 |
+
print([b.running_stats.n, b.running_stats.mean, b.buffer.n])
|
| 297 |
+
|
| 298 |
+
.. testoutput::
|
| 299 |
+
|
| 300 |
+
[1, array(10.0), 1]
|
| 301 |
+
|
| 302 |
+
.. testcode::
|
| 303 |
+
:skipif: True
|
| 304 |
+
|
| 305 |
+
a.sync(b)
|
| 306 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 307 |
+
|
| 308 |
+
.. testoutput::
|
| 309 |
+
|
| 310 |
+
[1, array(10.0), 1]
|
| 311 |
+
"""
|
| 312 |
+
self.demean = other.demean
|
| 313 |
+
self.destd = other.destd
|
| 314 |
+
self.clip = other.clip
|
| 315 |
+
self.running_stats = tree.map_structure(
|
| 316 |
+
lambda rs: rs.copy(), other.running_stats
|
| 317 |
+
)
|
| 318 |
+
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
|
| 319 |
+
|
| 320 |
+
def __call__(self, x: TensorStructType, update: bool = True) -> TensorStructType:
|
| 321 |
+
if self.no_preprocessor:
|
| 322 |
+
x = tree.map_structure(lambda x_: np.asarray(x_), x)
|
| 323 |
+
else:
|
| 324 |
+
x = np.asarray(x)
|
| 325 |
+
|
| 326 |
+
def _helper(x, rs, buffer, shape):
|
| 327 |
+
# Discrete|MultiDiscrete spaces -> No normalization.
|
| 328 |
+
if shape is None:
|
| 329 |
+
return x
|
| 330 |
+
|
| 331 |
+
# Keep dtype as is througout this filter.
|
| 332 |
+
orig_dtype = x.dtype
|
| 333 |
+
|
| 334 |
+
if update:
|
| 335 |
+
if len(x.shape) == len(rs.shape) + 1:
|
| 336 |
+
# The vectorized case.
|
| 337 |
+
for i in range(x.shape[0]):
|
| 338 |
+
rs.push(x[i])
|
| 339 |
+
buffer.push(x[i])
|
| 340 |
+
else:
|
| 341 |
+
# The unvectorized case.
|
| 342 |
+
rs.push(x)
|
| 343 |
+
buffer.push(x)
|
| 344 |
+
if self.demean:
|
| 345 |
+
x = x - rs.mean
|
| 346 |
+
if self.destd:
|
| 347 |
+
x = x / (rs.std + SMALL_NUMBER)
|
| 348 |
+
if self.clip:
|
| 349 |
+
x = np.clip(x, -self.clip, self.clip)
|
| 350 |
+
return x.astype(orig_dtype)
|
| 351 |
+
|
| 352 |
+
if self.no_preprocessor:
|
| 353 |
+
return tree.map_structure_up_to(
|
| 354 |
+
x, _helper, x, self.running_stats, self.buffer, self.shape
|
| 355 |
+
)
|
| 356 |
+
else:
|
| 357 |
+
return _helper(x, self.running_stats, self.buffer, self.shape)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@OldAPIStack
|
| 361 |
+
class ConcurrentMeanStdFilter(MeanStdFilter):
|
| 362 |
+
is_concurrent = True
|
| 363 |
+
|
| 364 |
+
def __init__(self, *args, **kwargs):
|
| 365 |
+
super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs)
|
| 366 |
+
deprecation_warning(
|
| 367 |
+
old="ConcurrentMeanStdFilter",
|
| 368 |
+
error=False,
|
| 369 |
+
help="ConcurrentMeanStd filters are only used for testing and will "
|
| 370 |
+
"therefore be deprecated in the course of moving to the "
|
| 371 |
+
"Connetors API, where testing of filters will be done by other "
|
| 372 |
+
"means.",
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
self._lock = threading.RLock()
|
| 376 |
+
|
| 377 |
+
def lock_wrap(func):
|
| 378 |
+
def wrapper(*args, **kwargs):
|
| 379 |
+
with self._lock:
|
| 380 |
+
return func(*args, **kwargs)
|
| 381 |
+
|
| 382 |
+
return wrapper
|
| 383 |
+
|
| 384 |
+
self.__getattribute__ = lock_wrap(self.__getattribute__)
|
| 385 |
+
|
| 386 |
+
def as_serializable(self) -> "MeanStdFilter":
|
| 387 |
+
"""Returns non-concurrent version of current class"""
|
| 388 |
+
other = MeanStdFilter(self.shape)
|
| 389 |
+
other.sync(self)
|
| 390 |
+
return other
|
| 391 |
+
|
| 392 |
+
def copy(self) -> "ConcurrentMeanStdFilter":
|
| 393 |
+
"""Returns a copy of Filter."""
|
| 394 |
+
other = ConcurrentMeanStdFilter(self.shape)
|
| 395 |
+
other.sync(self)
|
| 396 |
+
return other
|
| 397 |
+
|
| 398 |
+
def __repr__(self) -> str:
|
| 399 |
+
return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format(
|
| 400 |
+
self.shape,
|
| 401 |
+
self.demean,
|
| 402 |
+
self.destd,
|
| 403 |
+
self.clip,
|
| 404 |
+
self.running_stats,
|
| 405 |
+
self.buffer,
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
@OldAPIStack
|
| 410 |
+
def get_filter(filter_config, shape):
|
| 411 |
+
if filter_config == "MeanStdFilter":
|
| 412 |
+
return MeanStdFilter(shape, clip=None)
|
| 413 |
+
elif filter_config == "ConcurrentMeanStdFilter":
|
| 414 |
+
return ConcurrentMeanStdFilter(shape, clip=None)
|
| 415 |
+
elif filter_config == "NoFilter":
|
| 416 |
+
return NoFilter()
|
| 417 |
+
elif callable(filter_config):
|
| 418 |
+
return filter_config(shape)
|
| 419 |
+
else:
|
| 420 |
+
raise Exception("Unknown observation_filter: " + str(filter_config))
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/images.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import importlib
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ray.rllib.utils.annotations import DeveloperAPI
|
| 7 |
+
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@DeveloperAPI
|
| 12 |
+
def is_package_installed(package_name):
|
| 13 |
+
try:
|
| 14 |
+
importlib.metadata.version(package_name)
|
| 15 |
+
return True
|
| 16 |
+
except importlib.metadata.PackageNotFoundError:
|
| 17 |
+
return False
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import cv2
|
| 22 |
+
|
| 23 |
+
cv2.ocl.setUseOpenCL(False)
|
| 24 |
+
|
| 25 |
+
logger.debug("CV2 found for image processing.")
|
| 26 |
+
except ImportError as e:
|
| 27 |
+
if is_package_installed("opencv-python"):
|
| 28 |
+
raise ImportError(
|
| 29 |
+
f"OpenCV is installed, but we failed to import it. This may be because "
|
| 30 |
+
f"you need to install `opencv-python-headless` instead of "
|
| 31 |
+
f"`opencv-python`. Error message: {e}",
|
| 32 |
+
)
|
| 33 |
+
cv2 = None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@DeveloperAPI
|
| 37 |
+
def resize(img: np.ndarray, height: int, width: int) -> np.ndarray:
|
| 38 |
+
if not cv2:
|
| 39 |
+
raise ModuleNotFoundError(
|
| 40 |
+
"`opencv` not installed! Do `pip install opencv-python`"
|
| 41 |
+
)
|
| 42 |
+
return cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@DeveloperAPI
|
| 46 |
+
def rgb2gray(img: np.ndarray) -> np.ndarray:
|
| 47 |
+
if not cv2:
|
| 48 |
+
raise ModuleNotFoundError(
|
| 49 |
+
"`opencv` not installed! Do `pip install opencv-python`"
|
| 50 |
+
)
|
| 51 |
+
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@DeveloperAPI
|
| 55 |
+
def imread(img_file: str) -> np.ndarray:
|
| 56 |
+
if not cv2:
|
| 57 |
+
raise ModuleNotFoundError(
|
| 58 |
+
"`opencv` not installed! Do `pip install opencv-python`"
|
| 59 |
+
)
|
| 60 |
+
return cv2.imread(img_file).astype(np.float32)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/numpy.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
from gymnasium.spaces import Discrete, MultiDiscrete
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tree # pip install dm_tree
|
| 5 |
+
from types import MappingProxyType
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
from ray.rllib.utils.annotations import PublicAPI
|
| 10 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 11 |
+
from ray.rllib.utils.framework import try_import_tf, try_import_torch
|
| 12 |
+
from ray.rllib.utils.typing import SpaceStruct, TensorType, TensorStructType, Union
|
| 13 |
+
|
| 14 |
+
tf1, tf, tfv = try_import_tf()
|
| 15 |
+
torch, _ = try_import_torch()
|
| 16 |
+
|
| 17 |
+
SMALL_NUMBER = 1e-6
|
| 18 |
+
# Some large int number. May be increased here, if needed.
|
| 19 |
+
LARGE_INTEGER = 100000000
|
| 20 |
+
# Min and Max outputs (clipped) from an NN-output layer interpreted as the
|
| 21 |
+
# log(x) of some x (e.g. a stddev of a normal
|
| 22 |
+
# distribution).
|
| 23 |
+
MIN_LOG_NN_OUTPUT = -5
|
| 24 |
+
MAX_LOG_NN_OUTPUT = 2
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@PublicAPI
|
| 28 |
+
@Deprecated(
|
| 29 |
+
help="RLlib itself has no use for this anymore.",
|
| 30 |
+
error=False,
|
| 31 |
+
)
|
| 32 |
+
def aligned_array(size: int, dtype, align: int = 64) -> np.ndarray:
|
| 33 |
+
"""Returns an array of a given size that is 64-byte aligned.
|
| 34 |
+
|
| 35 |
+
The returned array can be efficiently copied into GPU memory by TensorFlow.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
size: The size (total number of items) of the array. For example,
|
| 39 |
+
array([[0.0, 1.0], [2.0, 3.0]]) would have size=4.
|
| 40 |
+
dtype: The numpy dtype of the array.
|
| 41 |
+
align: The alignment to use.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
A np.ndarray with the given specifications.
|
| 45 |
+
"""
|
| 46 |
+
n = size * dtype.itemsize
|
| 47 |
+
empty = np.empty(n + (align - 1), dtype=np.uint8)
|
| 48 |
+
data_align = empty.ctypes.data % align
|
| 49 |
+
offset = 0 if data_align == 0 else (align - data_align)
|
| 50 |
+
if n == 0:
|
| 51 |
+
# stop np from optimising out empty slice reference
|
| 52 |
+
output = empty[offset : offset + 1][0:0].view(dtype)
|
| 53 |
+
else:
|
| 54 |
+
output = empty[offset : offset + n].view(dtype)
|
| 55 |
+
|
| 56 |
+
assert len(output) == size, len(output)
|
| 57 |
+
assert output.ctypes.data % align == 0, output.ctypes.data
|
| 58 |
+
return output
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@PublicAPI
|
| 62 |
+
@Deprecated(
|
| 63 |
+
help="RLlib itself has no use for this anymore.",
|
| 64 |
+
error=False,
|
| 65 |
+
)
|
| 66 |
+
def concat_aligned(
|
| 67 |
+
items: List[np.ndarray], time_major: Optional[bool] = None
|
| 68 |
+
) -> np.ndarray:
|
| 69 |
+
"""Concatenate arrays, ensuring the output is 64-byte aligned.
|
| 70 |
+
|
| 71 |
+
We only align float arrays; other arrays are concatenated as normal.
|
| 72 |
+
|
| 73 |
+
This should be used instead of np.concatenate() to improve performance
|
| 74 |
+
when the output array is likely to be fed into TensorFlow.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
items: The list of items to concatenate and align.
|
| 78 |
+
time_major: Whether the data in items is time-major, in which
|
| 79 |
+
case, we will concatenate along axis=1.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
The concat'd and aligned array.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
if len(items) == 0:
|
| 86 |
+
return []
|
| 87 |
+
elif len(items) == 1:
|
| 88 |
+
# we assume the input is aligned. In any case, it doesn't help
|
| 89 |
+
# performance to force align it since that incurs a needless copy.
|
| 90 |
+
return items[0]
|
| 91 |
+
elif isinstance(items[0], np.ndarray) and items[0].dtype in [
|
| 92 |
+
np.float32,
|
| 93 |
+
np.float64,
|
| 94 |
+
np.uint8,
|
| 95 |
+
]:
|
| 96 |
+
dtype = items[0].dtype
|
| 97 |
+
flat = aligned_array(sum(s.size for s in items), dtype)
|
| 98 |
+
if time_major is not None:
|
| 99 |
+
if time_major is True:
|
| 100 |
+
batch_dim = sum(s.shape[1] for s in items)
|
| 101 |
+
new_shape = (items[0].shape[0], batch_dim,) + items[
|
| 102 |
+
0
|
| 103 |
+
].shape[2:]
|
| 104 |
+
else:
|
| 105 |
+
batch_dim = sum(s.shape[0] for s in items)
|
| 106 |
+
new_shape = (batch_dim, items[0].shape[1],) + items[
|
| 107 |
+
0
|
| 108 |
+
].shape[2:]
|
| 109 |
+
else:
|
| 110 |
+
batch_dim = sum(s.shape[0] for s in items)
|
| 111 |
+
new_shape = (batch_dim,) + items[0].shape[1:]
|
| 112 |
+
output = flat.reshape(new_shape)
|
| 113 |
+
assert output.ctypes.data % 64 == 0, output.ctypes.data
|
| 114 |
+
np.concatenate(items, out=output, axis=1 if time_major else 0)
|
| 115 |
+
return output
|
| 116 |
+
else:
|
| 117 |
+
return np.concatenate(items, axis=1 if time_major else 0)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@PublicAPI
|
| 121 |
+
def convert_to_numpy(x: TensorStructType, reduce_type: bool = True) -> TensorStructType:
|
| 122 |
+
"""Converts values in `stats` to non-Tensor numpy or python types.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
x: Any (possibly nested) struct, the values in which will be
|
| 126 |
+
converted and returned as a new struct with all torch/tf tensors
|
| 127 |
+
being converted to numpy types.
|
| 128 |
+
reduce_type: Whether to automatically reduce all float64 and int64 data
|
| 129 |
+
into float32 and int32 data, respectively.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
A new struct with the same structure as `x`, but with all
|
| 133 |
+
values converted to numpy arrays (on CPU).
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
# The mapping function used to numpyize torch/tf Tensors (and move them
|
| 137 |
+
# to the CPU beforehand).
|
| 138 |
+
def mapping(item):
|
| 139 |
+
if torch and isinstance(item, torch.Tensor):
|
| 140 |
+
ret = (
|
| 141 |
+
item.cpu().item()
|
| 142 |
+
if len(item.size()) == 0
|
| 143 |
+
else item.detach().cpu().numpy()
|
| 144 |
+
)
|
| 145 |
+
elif (
|
| 146 |
+
tf and isinstance(item, (tf.Tensor, tf.Variable)) and hasattr(item, "numpy")
|
| 147 |
+
):
|
| 148 |
+
assert tf.executing_eagerly()
|
| 149 |
+
ret = item.numpy()
|
| 150 |
+
else:
|
| 151 |
+
ret = item
|
| 152 |
+
if reduce_type and isinstance(ret, np.ndarray):
|
| 153 |
+
if np.issubdtype(ret.dtype, np.floating):
|
| 154 |
+
ret = ret.astype(np.float32)
|
| 155 |
+
elif np.issubdtype(ret.dtype, int):
|
| 156 |
+
ret = ret.astype(np.int32)
|
| 157 |
+
return ret
|
| 158 |
+
return ret
|
| 159 |
+
|
| 160 |
+
return tree.map_structure(mapping, x)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@PublicAPI
|
| 164 |
+
def fc(
|
| 165 |
+
x: np.ndarray,
|
| 166 |
+
weights: np.ndarray,
|
| 167 |
+
biases: Optional[np.ndarray] = None,
|
| 168 |
+
framework: Optional[str] = None,
|
| 169 |
+
) -> np.ndarray:
|
| 170 |
+
"""Calculates FC (dense) layer outputs given weights/biases and input.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
x: The input to the dense layer.
|
| 174 |
+
weights: The weights matrix.
|
| 175 |
+
biases: The biases vector. All 0s if None.
|
| 176 |
+
framework: An optional framework hint (to figure out,
|
| 177 |
+
e.g. whether to transpose torch weight matrices).
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
The dense layer's output.
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def map_(data, transpose=False):
|
| 184 |
+
if torch:
|
| 185 |
+
if isinstance(data, torch.Tensor):
|
| 186 |
+
data = data.cpu().detach().numpy()
|
| 187 |
+
if tf and tf.executing_eagerly():
|
| 188 |
+
if isinstance(data, tf.Variable):
|
| 189 |
+
data = data.numpy()
|
| 190 |
+
if transpose:
|
| 191 |
+
data = np.transpose(data)
|
| 192 |
+
return data
|
| 193 |
+
|
| 194 |
+
x = map_(x)
|
| 195 |
+
# Torch stores matrices in transpose (faster for backprop).
|
| 196 |
+
transpose = framework == "torch" and (
|
| 197 |
+
x.shape[1] != weights.shape[0] and x.shape[1] == weights.shape[1]
|
| 198 |
+
)
|
| 199 |
+
weights = map_(weights, transpose=transpose)
|
| 200 |
+
biases = map_(biases)
|
| 201 |
+
|
| 202 |
+
return np.matmul(x, weights) + (0.0 if biases is None else biases)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@PublicAPI
|
| 206 |
+
def flatten_inputs_to_1d_tensor(
|
| 207 |
+
inputs: TensorStructType,
|
| 208 |
+
spaces_struct: Optional[SpaceStruct] = None,
|
| 209 |
+
time_axis: bool = False,
|
| 210 |
+
batch_axis: bool = True,
|
| 211 |
+
) -> TensorType:
|
| 212 |
+
"""Flattens arbitrary input structs according to the given spaces struct.
|
| 213 |
+
|
| 214 |
+
Returns a single 1D tensor resulting from the different input
|
| 215 |
+
components' values.
|
| 216 |
+
|
| 217 |
+
Thereby:
|
| 218 |
+
- Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes
|
| 219 |
+
are not treated differently from other types of Boxes and get
|
| 220 |
+
flattened as well.
|
| 221 |
+
- Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with
|
| 222 |
+
Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]].
|
| 223 |
+
- MultiDiscrete values are multi-one-hot'd, e.g. a batch of
|
| 224 |
+
[[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in
|
| 225 |
+
[[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]].
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
inputs: The inputs to be flattened.
|
| 229 |
+
spaces_struct: The (possibly nested) structure of the spaces that `inputs`
|
| 230 |
+
belongs to.
|
| 231 |
+
time_axis: Whether all inputs have a time-axis (after the batch axis).
|
| 232 |
+
If True, will keep not only the batch axis (0th), but the time axis
|
| 233 |
+
(1st) as-is and flatten everything from the 2nd axis up.
|
| 234 |
+
batch_axis: Whether all inputs have a batch axis.
|
| 235 |
+
If True, will keep that batch axis as-is and flatten everything from the
|
| 236 |
+
other dims up.
|
| 237 |
+
|
| 238 |
+
Returns:
|
| 239 |
+
A single 1D tensor resulting from concatenating all
|
| 240 |
+
flattened/one-hot'd input components. Depending on the time_axis flag,
|
| 241 |
+
the shape is (B, n) or (B, T, n).
|
| 242 |
+
|
| 243 |
+
.. testcode::
|
| 244 |
+
:skipif: True
|
| 245 |
+
|
| 246 |
+
# B=2
|
| 247 |
+
from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor
|
| 248 |
+
from gymnasium.spaces import Discrete, Box
|
| 249 |
+
out = flatten_inputs_to_1d_tensor(
|
| 250 |
+
{"a": [1, 0], "b": [[[0.0], [0.1]], [1.0], [1.1]]},
|
| 251 |
+
spaces_struct=dict(a=Discrete(2), b=Box(shape=(2, 1)))
|
| 252 |
+
)
|
| 253 |
+
print(out)
|
| 254 |
+
|
| 255 |
+
# B=2; T=2
|
| 256 |
+
out = flatten_inputs_to_1d_tensor(
|
| 257 |
+
([[1, 0], [0, 1]],
|
| 258 |
+
[[[0.0, 0.1], [1.0, 1.1]], [[2.0, 2.1], [3.0, 3.1]]]),
|
| 259 |
+
spaces_struct=tuple([Discrete(2), Box(shape=(2, ))]),
|
| 260 |
+
time_axis=True
|
| 261 |
+
)
|
| 262 |
+
print(out)
|
| 263 |
+
|
| 264 |
+
.. testoutput::
|
| 265 |
+
|
| 266 |
+
[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]] # B=2 n=4
|
| 267 |
+
[[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]],
|
| 268 |
+
[[1.0, 0.0, 2.0, 2.1], [0.0, 1.0, 3.0, 3.1]]] # B=2 T=2 n=4
|
| 269 |
+
"""
|
| 270 |
+
# `time_axis` must not be True if `batch_axis` is False.
|
| 271 |
+
assert not (time_axis and not batch_axis)
|
| 272 |
+
|
| 273 |
+
flat_inputs = tree.flatten(inputs)
|
| 274 |
+
flat_spaces = (
|
| 275 |
+
tree.flatten(spaces_struct)
|
| 276 |
+
if spaces_struct is not None
|
| 277 |
+
else [None] * len(flat_inputs)
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
B = None
|
| 281 |
+
T = None
|
| 282 |
+
out = []
|
| 283 |
+
for input_, space in zip(flat_inputs, flat_spaces):
|
| 284 |
+
# Store batch and (if applicable) time dimension.
|
| 285 |
+
if B is None and batch_axis:
|
| 286 |
+
B = input_.shape[0]
|
| 287 |
+
if time_axis:
|
| 288 |
+
T = input_.shape[1]
|
| 289 |
+
|
| 290 |
+
# One-hot encoding.
|
| 291 |
+
if isinstance(space, Discrete):
|
| 292 |
+
if time_axis:
|
| 293 |
+
input_ = np.reshape(input_, [B * T])
|
| 294 |
+
out.append(one_hot(input_, depth=space.n).astype(np.float32))
|
| 295 |
+
# Multi one-hot encoding.
|
| 296 |
+
elif isinstance(space, MultiDiscrete):
|
| 297 |
+
if time_axis:
|
| 298 |
+
input_ = np.reshape(input_, [B * T, -1])
|
| 299 |
+
if batch_axis:
|
| 300 |
+
out.append(
|
| 301 |
+
np.concatenate(
|
| 302 |
+
[
|
| 303 |
+
one_hot(input_[:, i], depth=n).astype(np.float32)
|
| 304 |
+
for i, n in enumerate(space.nvec)
|
| 305 |
+
],
|
| 306 |
+
axis=-1,
|
| 307 |
+
)
|
| 308 |
+
)
|
| 309 |
+
else:
|
| 310 |
+
out.append(
|
| 311 |
+
np.concatenate(
|
| 312 |
+
[
|
| 313 |
+
one_hot(input_[i], depth=n).astype(np.float32)
|
| 314 |
+
for i, n in enumerate(space.nvec)
|
| 315 |
+
],
|
| 316 |
+
axis=-1,
|
| 317 |
+
)
|
| 318 |
+
)
|
| 319 |
+
# Box: Flatten.
|
| 320 |
+
else:
|
| 321 |
+
# Special case for spaces: Box(.., shape=(), ..)
|
| 322 |
+
if isinstance(input_, float):
|
| 323 |
+
input_ = np.array([input_])
|
| 324 |
+
|
| 325 |
+
if time_axis:
|
| 326 |
+
input_ = np.reshape(input_, [B * T, -1])
|
| 327 |
+
elif batch_axis:
|
| 328 |
+
input_ = np.reshape(input_, [B, -1])
|
| 329 |
+
else:
|
| 330 |
+
input_ = np.reshape(input_, [-1])
|
| 331 |
+
out.append(input_.astype(np.float32))
|
| 332 |
+
|
| 333 |
+
merged = np.concatenate(out, axis=-1)
|
| 334 |
+
# Restore the time-dimension, if applicable.
|
| 335 |
+
if time_axis:
|
| 336 |
+
merged = np.reshape(merged, [B, T, -1])
|
| 337 |
+
return merged
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
@PublicAPI
|
| 341 |
+
def make_action_immutable(obj):
|
| 342 |
+
"""Flags actions immutable to notify users when trying to change them.
|
| 343 |
+
|
| 344 |
+
Can also be used with any tree-like structure containing either
|
| 345 |
+
dictionaries, numpy arrays or already immutable objects per se.
|
| 346 |
+
Note, however that `tree.map_structure()` will in general not
|
| 347 |
+
include the shallow object containing all others and therefore
|
| 348 |
+
immutability will hold only for all objects contained in it.
|
| 349 |
+
Use `tree.traverse(fun, action, top_down=False)` to include
|
| 350 |
+
also the containing object.
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
obj: The object to be made immutable.
|
| 354 |
+
|
| 355 |
+
Returns:
|
| 356 |
+
The immutable object.
|
| 357 |
+
|
| 358 |
+
.. testcode::
|
| 359 |
+
:skipif: True
|
| 360 |
+
|
| 361 |
+
import tree
|
| 362 |
+
import numpy as np
|
| 363 |
+
from ray.rllib.utils.numpy import make_action_immutable
|
| 364 |
+
arr = np.arange(1,10)
|
| 365 |
+
d = dict(a = 1, b = (arr, arr))
|
| 366 |
+
tree.traverse(make_action_immutable, d, top_down=False)
|
| 367 |
+
"""
|
| 368 |
+
if isinstance(obj, np.ndarray):
|
| 369 |
+
obj.setflags(write=False)
|
| 370 |
+
return obj
|
| 371 |
+
elif isinstance(obj, OrderedDict):
|
| 372 |
+
return MappingProxyType(dict(obj))
|
| 373 |
+
elif isinstance(obj, dict):
|
| 374 |
+
return MappingProxyType(obj)
|
| 375 |
+
else:
|
| 376 |
+
return obj
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
@PublicAPI
|
| 380 |
+
def huber_loss(x: np.ndarray, delta: float = 1.0) -> np.ndarray:
|
| 381 |
+
"""Reference: https://en.wikipedia.org/wiki/Huber_loss."""
|
| 382 |
+
return np.where(
|
| 383 |
+
np.abs(x) < delta, np.power(x, 2.0) * 0.5, delta * (np.abs(x) - 0.5 * delta)
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
@PublicAPI
|
| 388 |
+
def l2_loss(x: np.ndarray) -> np.ndarray:
|
| 389 |
+
"""Computes half the L2 norm of a tensor (w/o the sqrt): sum(x**2) / 2.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
x: The input tensor.
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
The l2-loss output according to the above formula given `x`.
|
| 396 |
+
"""
|
| 397 |
+
return np.sum(np.square(x)) / 2.0
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
@PublicAPI
|
| 401 |
+
def lstm(
|
| 402 |
+
x,
|
| 403 |
+
weights: np.ndarray,
|
| 404 |
+
biases: Optional[np.ndarray] = None,
|
| 405 |
+
initial_internal_states: Optional[np.ndarray] = None,
|
| 406 |
+
time_major: bool = False,
|
| 407 |
+
forget_bias: float = 1.0,
|
| 408 |
+
):
|
| 409 |
+
"""Calculates LSTM layer output given weights/biases, states, and input.
|
| 410 |
+
|
| 411 |
+
Args:
|
| 412 |
+
x: The inputs to the LSTM layer including time-rank
|
| 413 |
+
(0th if time-major, else 1st) and the batch-rank
|
| 414 |
+
(1st if time-major, else 0th).
|
| 415 |
+
weights: The weights matrix.
|
| 416 |
+
biases: The biases vector. All 0s if None.
|
| 417 |
+
initial_internal_states: The initial internal
|
| 418 |
+
states to pass into the layer. All 0s if None.
|
| 419 |
+
time_major: Whether to use time-major or not. Default: False.
|
| 420 |
+
forget_bias: Gets added to first sigmoid (forget gate) output.
|
| 421 |
+
Default: 1.0.
|
| 422 |
+
|
| 423 |
+
Returns:
|
| 424 |
+
Tuple consisting of 1) The LSTM layer's output and
|
| 425 |
+
2) Tuple: Last (c-state, h-state).
|
| 426 |
+
"""
|
| 427 |
+
sequence_length = x.shape[0 if time_major else 1]
|
| 428 |
+
batch_size = x.shape[1 if time_major else 0]
|
| 429 |
+
units = weights.shape[1] // 4 # 4 internal layers (3x sigmoid, 1x tanh)
|
| 430 |
+
|
| 431 |
+
if initial_internal_states is None:
|
| 432 |
+
c_states = np.zeros(shape=(batch_size, units))
|
| 433 |
+
h_states = np.zeros(shape=(batch_size, units))
|
| 434 |
+
else:
|
| 435 |
+
c_states = initial_internal_states[0]
|
| 436 |
+
h_states = initial_internal_states[1]
|
| 437 |
+
|
| 438 |
+
# Create a placeholder for all n-time step outputs.
|
| 439 |
+
if time_major:
|
| 440 |
+
unrolled_outputs = np.zeros(shape=(sequence_length, batch_size, units))
|
| 441 |
+
else:
|
| 442 |
+
unrolled_outputs = np.zeros(shape=(batch_size, sequence_length, units))
|
| 443 |
+
|
| 444 |
+
# Push the batch 4 times through the LSTM cell and capture the outputs plus
|
| 445 |
+
# the final h- and c-states.
|
| 446 |
+
for t in range(sequence_length):
|
| 447 |
+
input_matrix = x[t, :, :] if time_major else x[:, t, :]
|
| 448 |
+
input_matrix = np.concatenate((input_matrix, h_states), axis=1)
|
| 449 |
+
input_matmul_matrix = np.matmul(input_matrix, weights) + biases
|
| 450 |
+
# Forget gate (3rd slot in tf output matrix). Add static forget bias.
|
| 451 |
+
sigmoid_1 = sigmoid(input_matmul_matrix[:, units * 2 : units * 3] + forget_bias)
|
| 452 |
+
c_states = np.multiply(c_states, sigmoid_1)
|
| 453 |
+
# Add gate (1st and 2nd slots in tf output matrix).
|
| 454 |
+
sigmoid_2 = sigmoid(input_matmul_matrix[:, 0:units])
|
| 455 |
+
tanh_3 = np.tanh(input_matmul_matrix[:, units : units * 2])
|
| 456 |
+
c_states = np.add(c_states, np.multiply(sigmoid_2, tanh_3))
|
| 457 |
+
# Output gate (last slot in tf output matrix).
|
| 458 |
+
sigmoid_4 = sigmoid(input_matmul_matrix[:, units * 3 : units * 4])
|
| 459 |
+
h_states = np.multiply(sigmoid_4, np.tanh(c_states))
|
| 460 |
+
|
| 461 |
+
# Store this output time-slice.
|
| 462 |
+
if time_major:
|
| 463 |
+
unrolled_outputs[t, :, :] = h_states
|
| 464 |
+
else:
|
| 465 |
+
unrolled_outputs[:, t, :] = h_states
|
| 466 |
+
|
| 467 |
+
return unrolled_outputs, (c_states, h_states)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
@PublicAPI
|
| 471 |
+
def one_hot(
|
| 472 |
+
x: Union[TensorType, int],
|
| 473 |
+
depth: int = 0,
|
| 474 |
+
on_value: float = 1.0,
|
| 475 |
+
off_value: float = 0.0,
|
| 476 |
+
dtype: type = np.float32,
|
| 477 |
+
) -> np.ndarray:
|
| 478 |
+
"""One-hot utility function for numpy.
|
| 479 |
+
|
| 480 |
+
Thanks to qianyizhang:
|
| 481 |
+
https://gist.github.com/qianyizhang/07ee1c15cad08afb03f5de69349efc30.
|
| 482 |
+
|
| 483 |
+
Args:
|
| 484 |
+
x: The input to be one-hot encoded.
|
| 485 |
+
depth: The max. number to be one-hot encoded (size of last rank).
|
| 486 |
+
on_value: The value to use for on. Default: 1.0.
|
| 487 |
+
off_value: The value to use for off. Default: 0.0.
|
| 488 |
+
|
| 489 |
+
Returns:
|
| 490 |
+
The one-hot encoded equivalent of the input array.
|
| 491 |
+
"""
|
| 492 |
+
|
| 493 |
+
# Handle simple ints properly.
|
| 494 |
+
if isinstance(x, int):
|
| 495 |
+
x = np.array(x, dtype=np.int32)
|
| 496 |
+
# Handle torch arrays properly.
|
| 497 |
+
elif torch and isinstance(x, torch.Tensor):
|
| 498 |
+
x = x.numpy()
|
| 499 |
+
|
| 500 |
+
# Handle bool arrays correctly.
|
| 501 |
+
if x.dtype == np.bool_:
|
| 502 |
+
x = x.astype(np.int_)
|
| 503 |
+
depth = 2
|
| 504 |
+
|
| 505 |
+
# If depth is not given, try to infer it from the values in the array.
|
| 506 |
+
if depth == 0:
|
| 507 |
+
depth = np.max(x) + 1
|
| 508 |
+
assert (
|
| 509 |
+
np.max(x) < depth
|
| 510 |
+
), "ERROR: The max. index of `x` ({}) is larger than depth ({})!".format(
|
| 511 |
+
np.max(x), depth
|
| 512 |
+
)
|
| 513 |
+
shape = x.shape
|
| 514 |
+
|
| 515 |
+
out = np.ones(shape=(*shape, depth)) * off_value
|
| 516 |
+
indices = []
|
| 517 |
+
for i in range(x.ndim):
|
| 518 |
+
tiles = [1] * x.ndim
|
| 519 |
+
s = [1] * x.ndim
|
| 520 |
+
s[i] = -1
|
| 521 |
+
r = np.arange(shape[i]).reshape(s)
|
| 522 |
+
if i > 0:
|
| 523 |
+
tiles[i - 1] = shape[i - 1]
|
| 524 |
+
r = np.tile(r, tiles)
|
| 525 |
+
indices.append(r)
|
| 526 |
+
indices.append(x)
|
| 527 |
+
out[tuple(indices)] = on_value
|
| 528 |
+
return out.astype(dtype)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
@PublicAPI
|
| 532 |
+
def one_hot_multidiscrete(x, depths=List[int]):
|
| 533 |
+
# Handle torch arrays properly.
|
| 534 |
+
if torch and isinstance(x, torch.Tensor):
|
| 535 |
+
x = x.numpy()
|
| 536 |
+
|
| 537 |
+
shape = x.shape
|
| 538 |
+
return np.concatenate(
|
| 539 |
+
[
|
| 540 |
+
one_hot(x[i] if len(shape) == 1 else x[:, i], depth=n).astype(np.float32)
|
| 541 |
+
for i, n in enumerate(depths)
|
| 542 |
+
],
|
| 543 |
+
axis=-1,
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
@PublicAPI
|
| 548 |
+
def relu(x: np.ndarray, alpha: float = 0.0) -> np.ndarray:
|
| 549 |
+
"""Implementation of the leaky ReLU function.
|
| 550 |
+
|
| 551 |
+
y = x * alpha if x < 0 else x
|
| 552 |
+
|
| 553 |
+
Args:
|
| 554 |
+
x: The input values.
|
| 555 |
+
alpha: A scaling ("leak") factor to use for negative x.
|
| 556 |
+
|
| 557 |
+
Returns:
|
| 558 |
+
The leaky ReLU output for x.
|
| 559 |
+
"""
|
| 560 |
+
return np.maximum(x, x * alpha, x)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
@PublicAPI
|
| 564 |
+
def sigmoid(x: np.ndarray, derivative: bool = False) -> np.ndarray:
|
| 565 |
+
"""
|
| 566 |
+
Returns the sigmoid function applied to x.
|
| 567 |
+
Alternatively, can return the derivative or the sigmoid function.
|
| 568 |
+
|
| 569 |
+
Args:
|
| 570 |
+
x: The input to the sigmoid function.
|
| 571 |
+
derivative: Whether to return the derivative or not.
|
| 572 |
+
Default: False.
|
| 573 |
+
|
| 574 |
+
Returns:
|
| 575 |
+
The sigmoid function (or its derivative) applied to x.
|
| 576 |
+
"""
|
| 577 |
+
if derivative:
|
| 578 |
+
return x * (1 - x)
|
| 579 |
+
else:
|
| 580 |
+
return 1 / (1 + np.exp(-x))
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@PublicAPI
|
| 584 |
+
def softmax(
|
| 585 |
+
x: Union[np.ndarray, list], axis: int = -1, epsilon: Optional[float] = None
|
| 586 |
+
) -> np.ndarray:
|
| 587 |
+
"""Returns the softmax values for x.
|
| 588 |
+
|
| 589 |
+
The exact formula used is:
|
| 590 |
+
S(xi) = e^xi / SUMj(e^xj), where j goes over all elements in x.
|
| 591 |
+
|
| 592 |
+
Args:
|
| 593 |
+
x: The input to the softmax function.
|
| 594 |
+
axis: The axis along which to softmax.
|
| 595 |
+
epsilon: Optional epsilon as a minimum value. If None, use
|
| 596 |
+
`SMALL_NUMBER`.
|
| 597 |
+
|
| 598 |
+
Returns:
|
| 599 |
+
The softmax over x.
|
| 600 |
+
"""
|
| 601 |
+
epsilon = epsilon or SMALL_NUMBER
|
| 602 |
+
# x_exp = np.maximum(np.exp(x), SMALL_NUMBER)
|
| 603 |
+
x_exp = np.exp(x)
|
| 604 |
+
# return x_exp /
|
| 605 |
+
# np.maximum(np.sum(x_exp, axis, keepdims=True), SMALL_NUMBER)
|
| 606 |
+
return np.maximum(x_exp / np.sum(x_exp, axis, keepdims=True), epsilon)
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.utils.replay_buffers.episode_replay_buffer import EpisodeReplayBuffer
|
| 2 |
+
from ray.rllib.utils.replay_buffers.fifo_replay_buffer import FifoReplayBuffer
|
| 3 |
+
from ray.rllib.utils.replay_buffers.multi_agent_mixin_replay_buffer import (
|
| 4 |
+
MultiAgentMixInReplayBuffer,
|
| 5 |
+
)
|
| 6 |
+
from ray.rllib.utils.replay_buffers.multi_agent_episode_buffer import (
|
| 7 |
+
MultiAgentEpisodeReplayBuffer,
|
| 8 |
+
)
|
| 9 |
+
from ray.rllib.utils.replay_buffers.multi_agent_prioritized_episode_buffer import (
|
| 10 |
+
MultiAgentPrioritizedEpisodeReplayBuffer,
|
| 11 |
+
)
|
| 12 |
+
from ray.rllib.utils.replay_buffers.multi_agent_prioritized_replay_buffer import (
|
| 13 |
+
MultiAgentPrioritizedReplayBuffer,
|
| 14 |
+
)
|
| 15 |
+
from ray.rllib.utils.replay_buffers.multi_agent_replay_buffer import (
|
| 16 |
+
MultiAgentReplayBuffer,
|
| 17 |
+
ReplayMode,
|
| 18 |
+
)
|
| 19 |
+
from ray.rllib.utils.replay_buffers.prioritized_episode_buffer import (
|
| 20 |
+
PrioritizedEpisodeReplayBuffer,
|
| 21 |
+
)
|
| 22 |
+
from ray.rllib.utils.replay_buffers.prioritized_replay_buffer import (
|
| 23 |
+
PrioritizedReplayBuffer,
|
| 24 |
+
)
|
| 25 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer, StorageUnit
|
| 26 |
+
from ray.rllib.utils.replay_buffers.reservoir_replay_buffer import ReservoirReplayBuffer
|
| 27 |
+
from ray.rllib.utils.replay_buffers import utils
|
| 28 |
+
|
| 29 |
+
__all__ = [
|
| 30 |
+
"EpisodeReplayBuffer",
|
| 31 |
+
"FifoReplayBuffer",
|
| 32 |
+
"MultiAgentEpisodeReplayBuffer",
|
| 33 |
+
"MultiAgentMixInReplayBuffer",
|
| 34 |
+
"MultiAgentPrioritizedEpisodeReplayBuffer",
|
| 35 |
+
"MultiAgentPrioritizedReplayBuffer",
|
| 36 |
+
"MultiAgentReplayBuffer",
|
| 37 |
+
"PrioritizedEpisodeReplayBuffer",
|
| 38 |
+
"PrioritizedReplayBuffer",
|
| 39 |
+
"ReplayMode",
|
| 40 |
+
"ReplayBuffer",
|
| 41 |
+
"ReservoirReplayBuffer",
|
| 42 |
+
"StorageUnit",
|
| 43 |
+
"utils",
|
| 44 |
+
]
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/simple_replay_buffer.py
ADDED
|
File without changes
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/serialization.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
from collections import OrderedDict
|
| 3 |
+
import importlib
|
| 4 |
+
import io
|
| 5 |
+
import zlib
|
| 6 |
+
from typing import Any, Dict, Optional, Sequence, Type, Union
|
| 7 |
+
|
| 8 |
+
import gymnasium as gym
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
import ray
|
| 12 |
+
from ray.rllib.utils.annotations import DeveloperAPI
|
| 13 |
+
from ray.rllib.utils.error import NotSerializable
|
| 14 |
+
from ray.rllib.utils.spaces.flexdict import FlexDict
|
| 15 |
+
from ray.rllib.utils.spaces.repeated import Repeated
|
| 16 |
+
from ray.rllib.utils.spaces.simplex import Simplex
|
| 17 |
+
|
| 18 |
+
NOT_SERIALIZABLE = "__not_serializable__"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@DeveloperAPI
|
| 22 |
+
def convert_numpy_to_python_primitives(obj: Any):
|
| 23 |
+
"""Convert an object that is a numpy type to a python type.
|
| 24 |
+
|
| 25 |
+
If the object is not a numpy type, it is returned unchanged.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
obj: The object to convert.
|
| 29 |
+
"""
|
| 30 |
+
if isinstance(obj, np.integer):
|
| 31 |
+
return int(obj)
|
| 32 |
+
elif isinstance(obj, np.floating):
|
| 33 |
+
return float(obj)
|
| 34 |
+
elif isinstance(obj, np.bool_):
|
| 35 |
+
return bool(obj)
|
| 36 |
+
elif isinstance(obj, np.str_):
|
| 37 |
+
return str(obj)
|
| 38 |
+
elif isinstance(obj, np.ndarray):
|
| 39 |
+
ret = obj.tolist()
|
| 40 |
+
for i, v in enumerate(ret):
|
| 41 |
+
ret[i] = convert_numpy_to_python_primitives(v)
|
| 42 |
+
return ret
|
| 43 |
+
else:
|
| 44 |
+
return obj
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _serialize_ndarray(array: np.ndarray) -> str:
|
| 48 |
+
"""Pack numpy ndarray into Base64 encoded strings for serialization.
|
| 49 |
+
|
| 50 |
+
This function uses numpy.save() instead of pickling to ensure
|
| 51 |
+
compatibility.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
array: numpy ndarray.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
b64 escaped string.
|
| 58 |
+
"""
|
| 59 |
+
buf = io.BytesIO()
|
| 60 |
+
np.save(buf, array)
|
| 61 |
+
return base64.b64encode(zlib.compress(buf.getvalue())).decode("ascii")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _deserialize_ndarray(b64_string: str) -> np.ndarray:
|
| 65 |
+
"""Unpack b64 escaped string into numpy ndarray.
|
| 66 |
+
|
| 67 |
+
This function assumes the unescaped bytes are of npy format.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
b64_string: Base64 escaped string.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
numpy ndarray.
|
| 74 |
+
"""
|
| 75 |
+
return np.load(
|
| 76 |
+
io.BytesIO(zlib.decompress(base64.b64decode(b64_string))), allow_pickle=True
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@DeveloperAPI
|
| 81 |
+
def gym_space_to_dict(space: gym.spaces.Space) -> Dict:
|
| 82 |
+
"""Serialize a gym Space into a JSON-serializable dict.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
space: gym.spaces.Space
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Serialized JSON string.
|
| 89 |
+
"""
|
| 90 |
+
if space is None:
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
def _box(sp: gym.spaces.Box) -> Dict:
|
| 94 |
+
return {
|
| 95 |
+
"space": "box",
|
| 96 |
+
"low": _serialize_ndarray(sp.low),
|
| 97 |
+
"high": _serialize_ndarray(sp.high),
|
| 98 |
+
"shape": sp._shape, # shape is a tuple.
|
| 99 |
+
"dtype": sp.dtype.str,
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
def _discrete(sp: gym.spaces.Discrete) -> Dict:
|
| 103 |
+
d = {
|
| 104 |
+
"space": "discrete",
|
| 105 |
+
"n": int(sp.n),
|
| 106 |
+
}
|
| 107 |
+
# Offset is a relatively new Discrete space feature.
|
| 108 |
+
if hasattr(sp, "start"):
|
| 109 |
+
d["start"] = int(sp.start)
|
| 110 |
+
return d
|
| 111 |
+
|
| 112 |
+
def _multi_binary(sp: gym.spaces.MultiBinary) -> Dict:
|
| 113 |
+
return {
|
| 114 |
+
"space": "multi-binary",
|
| 115 |
+
"n": sp.n,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
def _multi_discrete(sp: gym.spaces.MultiDiscrete) -> Dict:
|
| 119 |
+
return {
|
| 120 |
+
"space": "multi-discrete",
|
| 121 |
+
"nvec": _serialize_ndarray(sp.nvec),
|
| 122 |
+
"dtype": sp.dtype.str,
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
def _tuple(sp: gym.spaces.Tuple) -> Dict:
|
| 126 |
+
return {
|
| 127 |
+
"space": "tuple",
|
| 128 |
+
"spaces": [gym_space_to_dict(sp) for sp in sp.spaces],
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
def _dict(sp: gym.spaces.Dict) -> Dict:
|
| 132 |
+
return {
|
| 133 |
+
"space": "dict",
|
| 134 |
+
"spaces": {k: gym_space_to_dict(sp) for k, sp in sp.spaces.items()},
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
def _simplex(sp: Simplex) -> Dict:
|
| 138 |
+
return {
|
| 139 |
+
"space": "simplex",
|
| 140 |
+
"shape": sp._shape, # shape is a tuple.
|
| 141 |
+
"concentration": sp.concentration,
|
| 142 |
+
"dtype": sp.dtype.str,
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
def _repeated(sp: Repeated) -> Dict:
|
| 146 |
+
return {
|
| 147 |
+
"space": "repeated",
|
| 148 |
+
"child_space": gym_space_to_dict(sp.child_space),
|
| 149 |
+
"max_len": sp.max_len,
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
def _flex_dict(sp: FlexDict) -> Dict:
|
| 153 |
+
d = {
|
| 154 |
+
"space": "flex_dict",
|
| 155 |
+
}
|
| 156 |
+
for k, s in sp.spaces:
|
| 157 |
+
d[k] = gym_space_to_dict(s)
|
| 158 |
+
return d
|
| 159 |
+
|
| 160 |
+
def _text(sp: "gym.spaces.Text") -> Dict:
|
| 161 |
+
# Note (Kourosh): This only works in gym >= 0.25.0
|
| 162 |
+
charset = getattr(sp, "character_set", None)
|
| 163 |
+
if charset is None:
|
| 164 |
+
charset = getattr(sp, "charset", None)
|
| 165 |
+
if charset is None:
|
| 166 |
+
raise ValueError(
|
| 167 |
+
"Text space must have a character_set or charset attribute"
|
| 168 |
+
)
|
| 169 |
+
return {
|
| 170 |
+
"space": "text",
|
| 171 |
+
"min_length": sp.min_length,
|
| 172 |
+
"max_length": sp.max_length,
|
| 173 |
+
"charset": charset,
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
if isinstance(space, gym.spaces.Box):
|
| 177 |
+
return _box(space)
|
| 178 |
+
elif isinstance(space, gym.spaces.Discrete):
|
| 179 |
+
return _discrete(space)
|
| 180 |
+
elif isinstance(space, gym.spaces.MultiBinary):
|
| 181 |
+
return _multi_binary(space)
|
| 182 |
+
elif isinstance(space, gym.spaces.MultiDiscrete):
|
| 183 |
+
return _multi_discrete(space)
|
| 184 |
+
elif isinstance(space, gym.spaces.Tuple):
|
| 185 |
+
return _tuple(space)
|
| 186 |
+
elif isinstance(space, gym.spaces.Dict):
|
| 187 |
+
return _dict(space)
|
| 188 |
+
elif isinstance(space, gym.spaces.Text):
|
| 189 |
+
return _text(space)
|
| 190 |
+
elif isinstance(space, Simplex):
|
| 191 |
+
return _simplex(space)
|
| 192 |
+
elif isinstance(space, Repeated):
|
| 193 |
+
return _repeated(space)
|
| 194 |
+
elif isinstance(space, FlexDict):
|
| 195 |
+
return _flex_dict(space)
|
| 196 |
+
else:
|
| 197 |
+
raise ValueError("Unknown space type for serialization, ", type(space))
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@DeveloperAPI
|
| 201 |
+
def space_to_dict(space: gym.spaces.Space) -> Dict:
|
| 202 |
+
d = {"space": gym_space_to_dict(space)}
|
| 203 |
+
if "original_space" in space.__dict__:
|
| 204 |
+
d["original_space"] = space_to_dict(space.original_space)
|
| 205 |
+
return d
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@DeveloperAPI
|
| 209 |
+
def gym_space_from_dict(d: Dict) -> gym.spaces.Space:
|
| 210 |
+
"""De-serialize a dict into gym Space.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
str: serialized JSON str.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
De-serialized gym space.
|
| 217 |
+
"""
|
| 218 |
+
if d is None:
|
| 219 |
+
return None
|
| 220 |
+
|
| 221 |
+
def __common(d: Dict):
|
| 222 |
+
"""Common updates to the dict before we use it to construct spaces"""
|
| 223 |
+
ret = d.copy()
|
| 224 |
+
del ret["space"]
|
| 225 |
+
if "dtype" in ret:
|
| 226 |
+
ret["dtype"] = np.dtype(ret["dtype"])
|
| 227 |
+
return ret
|
| 228 |
+
|
| 229 |
+
def _box(d: Dict) -> gym.spaces.Box:
|
| 230 |
+
ret = d.copy()
|
| 231 |
+
ret.update(
|
| 232 |
+
{
|
| 233 |
+
"low": _deserialize_ndarray(d["low"]),
|
| 234 |
+
"high": _deserialize_ndarray(d["high"]),
|
| 235 |
+
}
|
| 236 |
+
)
|
| 237 |
+
return gym.spaces.Box(**__common(ret))
|
| 238 |
+
|
| 239 |
+
def _discrete(d: Dict) -> gym.spaces.Discrete:
|
| 240 |
+
return gym.spaces.Discrete(**__common(d))
|
| 241 |
+
|
| 242 |
+
def _multi_binary(d: Dict) -> gym.spaces.MultiBinary:
|
| 243 |
+
return gym.spaces.MultiBinary(**__common(d))
|
| 244 |
+
|
| 245 |
+
def _multi_discrete(d: Dict) -> gym.spaces.MultiDiscrete:
|
| 246 |
+
ret = d.copy()
|
| 247 |
+
ret.update(
|
| 248 |
+
{
|
| 249 |
+
"nvec": _deserialize_ndarray(ret["nvec"]),
|
| 250 |
+
}
|
| 251 |
+
)
|
| 252 |
+
return gym.spaces.MultiDiscrete(**__common(ret))
|
| 253 |
+
|
| 254 |
+
def _tuple(d: Dict) -> gym.spaces.Discrete:
|
| 255 |
+
spaces = [gym_space_from_dict(sp) for sp in d["spaces"]]
|
| 256 |
+
return gym.spaces.Tuple(spaces=spaces)
|
| 257 |
+
|
| 258 |
+
def _dict(d: Dict) -> gym.spaces.Discrete:
|
| 259 |
+
# We need to always use an OrderedDict here to cover the following two ways, by
|
| 260 |
+
# which a user might construct a Dict space originally. We need to restore this
|
| 261 |
+
# original Dict space with the exact order of keys the user intended to.
|
| 262 |
+
# - User provides an OrderedDict inside the gym.spaces.Dict constructor ->
|
| 263 |
+
# gymnasium should NOT further sort the keys. The same (user-provided) order
|
| 264 |
+
# must be restored.
|
| 265 |
+
# - User provides a simple dict inside the gym.spaces.Dict constructor ->
|
| 266 |
+
# By its API definition, gymnasium automatically sorts all keys alphabetically.
|
| 267 |
+
# The same (alphabetical) order must thus be restored.
|
| 268 |
+
spaces = OrderedDict(
|
| 269 |
+
{k: gym_space_from_dict(sp) for k, sp in d["spaces"].items()}
|
| 270 |
+
)
|
| 271 |
+
return gym.spaces.Dict(spaces=spaces)
|
| 272 |
+
|
| 273 |
+
def _simplex(d: Dict) -> Simplex:
|
| 274 |
+
return Simplex(**__common(d))
|
| 275 |
+
|
| 276 |
+
def _repeated(d: Dict) -> Repeated:
|
| 277 |
+
child_space = gym_space_from_dict(d["child_space"])
|
| 278 |
+
return Repeated(child_space=child_space, max_len=d["max_len"])
|
| 279 |
+
|
| 280 |
+
def _flex_dict(d: Dict) -> FlexDict:
|
| 281 |
+
spaces = {k: gym_space_from_dict(s) for k, s in d.items() if k != "space"}
|
| 282 |
+
return FlexDict(spaces=spaces)
|
| 283 |
+
|
| 284 |
+
def _text(d: Dict) -> "gym.spaces.Text":
|
| 285 |
+
return gym.spaces.Text(**__common(d))
|
| 286 |
+
|
| 287 |
+
space_map = {
|
| 288 |
+
"box": _box,
|
| 289 |
+
"discrete": _discrete,
|
| 290 |
+
"multi-binary": _multi_binary,
|
| 291 |
+
"multi-discrete": _multi_discrete,
|
| 292 |
+
"tuple": _tuple,
|
| 293 |
+
"dict": _dict,
|
| 294 |
+
"simplex": _simplex,
|
| 295 |
+
"repeated": _repeated,
|
| 296 |
+
"flex_dict": _flex_dict,
|
| 297 |
+
"text": _text,
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
space_type = d["space"]
|
| 301 |
+
if space_type not in space_map:
|
| 302 |
+
raise ValueError("Unknown space type for de-serialization, ", space_type)
|
| 303 |
+
|
| 304 |
+
return space_map[space_type](d)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@DeveloperAPI
|
| 308 |
+
def space_from_dict(d: Dict) -> gym.spaces.Space:
|
| 309 |
+
space = gym_space_from_dict(d["space"])
|
| 310 |
+
if "original_space" in d:
|
| 311 |
+
assert "space" in d["original_space"]
|
| 312 |
+
if isinstance(d["original_space"]["space"], str):
|
| 313 |
+
# For backward compatibility reasons, if d["original_space"]["space"]
|
| 314 |
+
# is a string, this original space was serialized by gym_space_to_dict.
|
| 315 |
+
space.original_space = gym_space_from_dict(d["original_space"])
|
| 316 |
+
else:
|
| 317 |
+
# Otherwise, this original space was serialized by space_to_dict.
|
| 318 |
+
space.original_space = space_from_dict(d["original_space"])
|
| 319 |
+
return space
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
@DeveloperAPI
|
| 323 |
+
def check_if_args_kwargs_serializable(args: Sequence[Any], kwargs: Dict[str, Any]):
|
| 324 |
+
"""Check if parameters to a function are serializable by ray.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
args: arguments to be checked.
|
| 328 |
+
kwargs: keyword arguments to be checked.
|
| 329 |
+
|
| 330 |
+
Raises:
|
| 331 |
+
NoteSerializable if either args are kwargs are not serializable
|
| 332 |
+
by ray.
|
| 333 |
+
"""
|
| 334 |
+
for arg in args:
|
| 335 |
+
try:
|
| 336 |
+
# if the object is truly serializable we should be able to
|
| 337 |
+
# ray.put and ray.get it.
|
| 338 |
+
ray.get(ray.put(arg))
|
| 339 |
+
except TypeError as e:
|
| 340 |
+
raise NotSerializable(
|
| 341 |
+
"RLModule constructor arguments must be serializable. "
|
| 342 |
+
f"Found non-serializable argument: {arg}.\n"
|
| 343 |
+
f"Original serialization error: {e}"
|
| 344 |
+
)
|
| 345 |
+
for k, v in kwargs.items():
|
| 346 |
+
try:
|
| 347 |
+
# if the object is truly serializable we should be able to
|
| 348 |
+
# ray.put and ray.get it.
|
| 349 |
+
ray.get(ray.put(v))
|
| 350 |
+
except TypeError as e:
|
| 351 |
+
raise NotSerializable(
|
| 352 |
+
"RLModule constructor arguments must be serializable. "
|
| 353 |
+
f"Found non-serializable keyword argument: {k} = {v}.\n"
|
| 354 |
+
f"Original serialization error: {e}"
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@DeveloperAPI
|
| 359 |
+
def serialize_type(type_: Union[Type, str]) -> str:
|
| 360 |
+
"""Converts a type into its full classpath ([module file] + "." + [class name]).
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
type_: The type to convert.
|
| 364 |
+
|
| 365 |
+
Returns:
|
| 366 |
+
The full classpath of the given type, e.g. "ray.rllib.algorithms.ppo.PPOConfig".
|
| 367 |
+
"""
|
| 368 |
+
# TODO (avnishn): find a way to incorporate the tune registry here.
|
| 369 |
+
# Already serialized.
|
| 370 |
+
if isinstance(type_, str):
|
| 371 |
+
return type_
|
| 372 |
+
|
| 373 |
+
return type_.__module__ + "." + type_.__qualname__
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
@DeveloperAPI
|
| 377 |
+
def deserialize_type(
|
| 378 |
+
module: Union[str, Type], error: bool = False
|
| 379 |
+
) -> Optional[Union[str, Type]]:
|
| 380 |
+
"""Resolves a class path to a class.
|
| 381 |
+
If the given module is already a class, it is returned as is.
|
| 382 |
+
If the given module is a string, it is imported and the class is returned.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
module: The classpath (str) or type to resolve.
|
| 386 |
+
error: Whether to throw a ValueError if `module` could not be resolved into
|
| 387 |
+
a class. If False and `module` is not resolvable, returns None.
|
| 388 |
+
|
| 389 |
+
Returns:
|
| 390 |
+
The resolved class or `module` (if `error` is False and no resolution possible).
|
| 391 |
+
|
| 392 |
+
Raises:
|
| 393 |
+
ValueError: If `error` is True and `module` cannot be resolved.
|
| 394 |
+
"""
|
| 395 |
+
# Already a class, return as-is.
|
| 396 |
+
if isinstance(module, type):
|
| 397 |
+
return module
|
| 398 |
+
# A string.
|
| 399 |
+
elif isinstance(module, str):
|
| 400 |
+
# Try interpreting (as classpath) and importing the given module.
|
| 401 |
+
try:
|
| 402 |
+
module_path, class_name = module.rsplit(".", 1)
|
| 403 |
+
module = importlib.import_module(module_path)
|
| 404 |
+
return getattr(module, class_name)
|
| 405 |
+
# Module not found OR not a module (but a registered string?).
|
| 406 |
+
except (ModuleNotFoundError, ImportError, AttributeError, ValueError) as e:
|
| 407 |
+
# Ignore if error=False.
|
| 408 |
+
if error:
|
| 409 |
+
raise ValueError(
|
| 410 |
+
f"Could not deserialize the given classpath `module={module}` into "
|
| 411 |
+
"a valid python class! Make sure you have all necessary pip "
|
| 412 |
+
"packages installed and all custom modules are in your "
|
| 413 |
+
"`PYTHONPATH` env variable."
|
| 414 |
+
) from e
|
| 415 |
+
else:
|
| 416 |
+
raise ValueError(f"`module` ({module} must be type or string (classpath)!")
|
| 417 |
+
|
| 418 |
+
return module
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.typing import TensorType
|
| 4 |
+
from ray.rllib.utils.framework import try_import_torch, try_import_tf
|
| 5 |
+
from ray.util.annotations import PublicAPI
|
| 6 |
+
|
| 7 |
+
torch, _ = try_import_torch()
|
| 8 |
+
_, tf, _ = try_import_tf()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Dict of NumPy dtype -> torch dtype
|
| 12 |
+
if torch:
|
| 13 |
+
numpy_to_torch_dtype_dict = {
|
| 14 |
+
np.bool_: torch.bool,
|
| 15 |
+
np.uint8: torch.uint8,
|
| 16 |
+
np.int8: torch.int8,
|
| 17 |
+
np.int16: torch.int16,
|
| 18 |
+
np.int32: torch.int32,
|
| 19 |
+
np.int64: torch.int64,
|
| 20 |
+
np.float16: torch.float16,
|
| 21 |
+
np.float32: torch.float32,
|
| 22 |
+
np.float64: torch.float64,
|
| 23 |
+
np.complex64: torch.complex64,
|
| 24 |
+
np.complex128: torch.complex128,
|
| 25 |
+
}
|
| 26 |
+
else:
|
| 27 |
+
numpy_to_torch_dtype_dict = {}
|
| 28 |
+
|
| 29 |
+
# Dict of NumPy dtype -> tf dtype
|
| 30 |
+
if tf:
|
| 31 |
+
numpy_to_tf_dtype_dict = {
|
| 32 |
+
np.bool_: tf.bool,
|
| 33 |
+
np.uint8: tf.uint8,
|
| 34 |
+
np.int8: tf.int8,
|
| 35 |
+
np.int16: tf.int16,
|
| 36 |
+
np.int32: tf.int32,
|
| 37 |
+
np.int64: tf.int64,
|
| 38 |
+
np.float16: tf.float16,
|
| 39 |
+
np.float32: tf.float32,
|
| 40 |
+
np.float64: tf.float64,
|
| 41 |
+
np.complex64: tf.complex64,
|
| 42 |
+
np.complex128: tf.complex128,
|
| 43 |
+
}
|
| 44 |
+
else:
|
| 45 |
+
numpy_to_tf_dtype_dict = {}
|
| 46 |
+
|
| 47 |
+
# Dict of torch dtype -> NumPy dtype
|
| 48 |
+
torch_to_numpy_dtype_dict = {
|
| 49 |
+
value: key for (key, value) in numpy_to_torch_dtype_dict.items()
|
| 50 |
+
}
|
| 51 |
+
# Dict of tf dtype -> NumPy dtype
|
| 52 |
+
tf_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_tf_dtype_dict.items()}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@PublicAPI(stability="alpha")
|
| 56 |
+
def get_np_dtype(x: TensorType) -> np.dtype:
|
| 57 |
+
"""Returns the NumPy dtype of the given tensor or array."""
|
| 58 |
+
if torch and isinstance(x, torch.Tensor):
|
| 59 |
+
return torch_to_numpy_dtype_dict[x.dtype]
|
| 60 |
+
if tf and isinstance(x, tf.Tensor):
|
| 61 |
+
return tf_to_numpy_dtype_dict[x.dtype]
|
| 62 |
+
elif isinstance(x, np.ndarray):
|
| 63 |
+
return x.dtype
|
| 64 |
+
else:
|
| 65 |
+
raise TypeError("Unsupported type: {}".format(type(x)))
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py
ADDED
|
@@ -0,0 +1,1817 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import pprint
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
import time
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
Dict,
|
| 13 |
+
List,
|
| 14 |
+
Optional,
|
| 15 |
+
Tuple,
|
| 16 |
+
Type,
|
| 17 |
+
Union,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import gymnasium as gym
|
| 21 |
+
from gymnasium.spaces import Box, Discrete, MultiDiscrete, MultiBinary
|
| 22 |
+
from gymnasium.spaces import Dict as GymDict
|
| 23 |
+
from gymnasium.spaces import Tuple as GymTuple
|
| 24 |
+
import numpy as np
|
| 25 |
+
import tree # pip install dm_tree
|
| 26 |
+
|
| 27 |
+
import ray
|
| 28 |
+
from ray import air, tune
|
| 29 |
+
from ray.air.constants import TRAINING_ITERATION
|
| 30 |
+
from ray.air.integrations.wandb import WandbLoggerCallback, WANDB_ENV_VAR
|
| 31 |
+
from ray.rllib.core import DEFAULT_MODULE_ID, Columns
|
| 32 |
+
from ray.rllib.env.wrappers.atari_wrappers import is_atari, wrap_deepmind
|
| 33 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 34 |
+
from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch
|
| 35 |
+
from ray.rllib.utils.metrics import (
|
| 36 |
+
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY,
|
| 37 |
+
ENV_RUNNER_RESULTS,
|
| 38 |
+
EPISODE_RETURN_MEAN,
|
| 39 |
+
EVALUATION_RESULTS,
|
| 40 |
+
NUM_ENV_STEPS_TRAINED,
|
| 41 |
+
NUM_ENV_STEPS_SAMPLED_LIFETIME,
|
| 42 |
+
)
|
| 43 |
+
from ray.rllib.utils.typing import ResultDict
|
| 44 |
+
from ray.rllib.utils.error import UnsupportedSpaceException
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
from ray.tune import CLIReporter
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if TYPE_CHECKING:
|
| 51 |
+
from ray.rllib.algorithms import Algorithm, AlgorithmConfig
|
| 52 |
+
from ray.rllib.offline.dataset_reader import DatasetReader
|
| 53 |
+
|
| 54 |
+
jax, _ = try_import_jax()
|
| 55 |
+
tf1, tf, tfv = try_import_tf()
|
| 56 |
+
torch, _ = try_import_torch()
|
| 57 |
+
|
| 58 |
+
logger = logging.getLogger(__name__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def add_rllib_example_script_args(
|
| 62 |
+
parser: Optional[argparse.ArgumentParser] = None,
|
| 63 |
+
default_reward: float = 100.0,
|
| 64 |
+
default_iters: int = 200,
|
| 65 |
+
default_timesteps: int = 100000,
|
| 66 |
+
) -> argparse.ArgumentParser:
|
| 67 |
+
"""Adds RLlib-typical (and common) examples scripts command line args to a parser.
|
| 68 |
+
|
| 69 |
+
TODO (sven): This function should be used by most of our examples scripts, which
|
| 70 |
+
already mostly have this logic in them (but written out).
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
parser: The parser to add the arguments to. If None, create a new one.
|
| 74 |
+
default_reward: The default value for the --stop-reward option.
|
| 75 |
+
default_iters: The default value for the --stop-iters option.
|
| 76 |
+
default_timesteps: The default value for the --stop-timesteps option.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
The altered (or newly created) parser object.
|
| 80 |
+
"""
|
| 81 |
+
if parser is None:
|
| 82 |
+
parser = argparse.ArgumentParser()
|
| 83 |
+
|
| 84 |
+
# Algo and Algo config options.
|
| 85 |
+
parser.add_argument(
|
| 86 |
+
"--algo", type=str, default="PPO", help="The RLlib-registered algorithm to use."
|
| 87 |
+
)
|
| 88 |
+
parser.add_argument(
|
| 89 |
+
"--enable-new-api-stack",
|
| 90 |
+
action="store_true",
|
| 91 |
+
help="Whether to use the `enable_rl_module_and_learner` config setting.",
|
| 92 |
+
)
|
| 93 |
+
parser.add_argument(
|
| 94 |
+
"--framework",
|
| 95 |
+
choices=["tf", "tf2", "torch"],
|
| 96 |
+
default="torch",
|
| 97 |
+
help="The DL framework specifier.",
|
| 98 |
+
)
|
| 99 |
+
parser.add_argument(
|
| 100 |
+
"--env",
|
| 101 |
+
type=str,
|
| 102 |
+
default=None,
|
| 103 |
+
help="The gym.Env identifier to run the experiment with.",
|
| 104 |
+
)
|
| 105 |
+
parser.add_argument(
|
| 106 |
+
"--num-env-runners",
|
| 107 |
+
type=int,
|
| 108 |
+
default=None,
|
| 109 |
+
help="The number of (remote) EnvRunners to use for the experiment.",
|
| 110 |
+
)
|
| 111 |
+
parser.add_argument(
|
| 112 |
+
"--num-envs-per-env-runner",
|
| 113 |
+
type=int,
|
| 114 |
+
default=None,
|
| 115 |
+
help="The number of (vectorized) environments per EnvRunner. Note that "
|
| 116 |
+
"this is identical to the batch size for (inference) action computations.",
|
| 117 |
+
)
|
| 118 |
+
parser.add_argument(
|
| 119 |
+
"--num-agents",
|
| 120 |
+
type=int,
|
| 121 |
+
default=0,
|
| 122 |
+
help="If 0 (default), will run as single-agent. If > 0, will run as "
|
| 123 |
+
"multi-agent with the environment simply cloned n times and each agent acting "
|
| 124 |
+
"independently at every single timestep. The overall reward for this "
|
| 125 |
+
"experiment is then the sum over all individual agents' rewards.",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Evaluation options.
|
| 129 |
+
parser.add_argument(
|
| 130 |
+
"--evaluation-num-env-runners",
|
| 131 |
+
type=int,
|
| 132 |
+
default=0,
|
| 133 |
+
help="The number of evaluation (remote) EnvRunners to use for the experiment.",
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--evaluation-interval",
|
| 137 |
+
type=int,
|
| 138 |
+
default=0,
|
| 139 |
+
help="Every how many iterations to run one round of evaluation. "
|
| 140 |
+
"Use 0 (default) to disable evaluation.",
|
| 141 |
+
)
|
| 142 |
+
parser.add_argument(
|
| 143 |
+
"--evaluation-duration",
|
| 144 |
+
type=lambda v: v if v == "auto" else int(v),
|
| 145 |
+
default=10,
|
| 146 |
+
help="The number of evaluation units to run each evaluation round. "
|
| 147 |
+
"Use `--evaluation-duration-unit` to count either in 'episodes' "
|
| 148 |
+
"or 'timesteps'. If 'auto', will run as many as possible during train pass ("
|
| 149 |
+
"`--evaluation-parallel-to-training` must be set then).",
|
| 150 |
+
)
|
| 151 |
+
parser.add_argument(
|
| 152 |
+
"--evaluation-duration-unit",
|
| 153 |
+
type=str,
|
| 154 |
+
default="episodes",
|
| 155 |
+
choices=["episodes", "timesteps"],
|
| 156 |
+
help="The evaluation duration unit to count by. One of 'episodes' or "
|
| 157 |
+
"'timesteps'. This unit will be run `--evaluation-duration` times in each "
|
| 158 |
+
"evaluation round. If `--evaluation-duration=auto`, this setting does not "
|
| 159 |
+
"matter.",
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--evaluation-parallel-to-training",
|
| 163 |
+
action="store_true",
|
| 164 |
+
help="Whether to run evaluation parallel to training. This might help speed up "
|
| 165 |
+
"your overall iteration time. Be aware that when using this option, your "
|
| 166 |
+
"reported evaluation results are referring to one iteration before the current "
|
| 167 |
+
"one.",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# RLlib logging options.
|
| 171 |
+
parser.add_argument(
|
| 172 |
+
"--output",
|
| 173 |
+
type=str,
|
| 174 |
+
default=None,
|
| 175 |
+
help="The output directory to write trajectories to, which are collected by "
|
| 176 |
+
"the algo's EnvRunners.",
|
| 177 |
+
)
|
| 178 |
+
parser.add_argument(
|
| 179 |
+
"--log-level",
|
| 180 |
+
type=str,
|
| 181 |
+
default=None, # None -> use default
|
| 182 |
+
choices=["INFO", "DEBUG", "WARN", "ERROR"],
|
| 183 |
+
help="The log-level to be used by the RLlib logger.",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# tune.Tuner options.
|
| 187 |
+
parser.add_argument(
|
| 188 |
+
"--no-tune",
|
| 189 |
+
action="store_true",
|
| 190 |
+
help="Whether to NOT use tune.Tuner(), but rather a simple for-loop calling "
|
| 191 |
+
"`algo.train()` repeatedly until one of the stop criteria is met.",
|
| 192 |
+
)
|
| 193 |
+
parser.add_argument(
|
| 194 |
+
"--num-samples",
|
| 195 |
+
type=int,
|
| 196 |
+
default=1,
|
| 197 |
+
help="How many (tune.Tuner.fit()) experiments to execute - if possible in "
|
| 198 |
+
"parallel.",
|
| 199 |
+
)
|
| 200 |
+
parser.add_argument(
|
| 201 |
+
"--max-concurrent-trials",
|
| 202 |
+
type=int,
|
| 203 |
+
default=None,
|
| 204 |
+
help="How many (tune.Tuner) trials to run concurrently.",
|
| 205 |
+
)
|
| 206 |
+
parser.add_argument(
|
| 207 |
+
"--verbose",
|
| 208 |
+
type=int,
|
| 209 |
+
default=2,
|
| 210 |
+
help="The verbosity level for the `tune.Tuner()` running the experiment.",
|
| 211 |
+
)
|
| 212 |
+
parser.add_argument(
|
| 213 |
+
"--checkpoint-freq",
|
| 214 |
+
type=int,
|
| 215 |
+
default=0,
|
| 216 |
+
help=(
|
| 217 |
+
"The frequency (in training iterations) with which to create checkpoints. "
|
| 218 |
+
"Note that if --wandb-key is provided, all checkpoints will "
|
| 219 |
+
"automatically be uploaded to WandB."
|
| 220 |
+
),
|
| 221 |
+
)
|
| 222 |
+
parser.add_argument(
|
| 223 |
+
"--checkpoint-at-end",
|
| 224 |
+
action="store_true",
|
| 225 |
+
help=(
|
| 226 |
+
"Whether to create a checkpoint at the very end of the experiment. "
|
| 227 |
+
"Note that if --wandb-key is provided, all checkpoints will "
|
| 228 |
+
"automatically be uploaded to WandB."
|
| 229 |
+
),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# WandB logging options.
|
| 233 |
+
parser.add_argument(
|
| 234 |
+
"--wandb-key",
|
| 235 |
+
type=str,
|
| 236 |
+
default=None,
|
| 237 |
+
help="The WandB API key to use for uploading results.",
|
| 238 |
+
)
|
| 239 |
+
parser.add_argument(
|
| 240 |
+
"--wandb-project",
|
| 241 |
+
type=str,
|
| 242 |
+
default=None,
|
| 243 |
+
help="The WandB project name to use.",
|
| 244 |
+
)
|
| 245 |
+
parser.add_argument(
|
| 246 |
+
"--wandb-run-name",
|
| 247 |
+
type=str,
|
| 248 |
+
default=None,
|
| 249 |
+
help="The WandB run name to use.",
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Experiment stopping and testing criteria.
|
| 253 |
+
parser.add_argument(
|
| 254 |
+
"--stop-reward",
|
| 255 |
+
type=float,
|
| 256 |
+
default=default_reward,
|
| 257 |
+
help="Reward at which the script should stop training.",
|
| 258 |
+
)
|
| 259 |
+
parser.add_argument(
|
| 260 |
+
"--stop-iters",
|
| 261 |
+
type=int,
|
| 262 |
+
default=default_iters,
|
| 263 |
+
help="The number of iterations to train.",
|
| 264 |
+
)
|
| 265 |
+
parser.add_argument(
|
| 266 |
+
"--stop-timesteps",
|
| 267 |
+
type=int,
|
| 268 |
+
default=default_timesteps,
|
| 269 |
+
help="The number of (environment sampling) timesteps to train.",
|
| 270 |
+
)
|
| 271 |
+
parser.add_argument(
|
| 272 |
+
"--as-test",
|
| 273 |
+
action="store_true",
|
| 274 |
+
help="Whether this script should be run as a test. If set, --stop-reward must "
|
| 275 |
+
"be achieved within --stop-timesteps AND --stop-iters, otherwise this "
|
| 276 |
+
"script will throw an exception at the end.",
|
| 277 |
+
)
|
| 278 |
+
parser.add_argument(
|
| 279 |
+
"--as-release-test",
|
| 280 |
+
action="store_true",
|
| 281 |
+
help="Whether this script should be run as a release test. If set, "
|
| 282 |
+
"all that applies to the --as-test option is true, plus, a short JSON summary "
|
| 283 |
+
"will be written into a results file whose location is given by the ENV "
|
| 284 |
+
"variable `TEST_OUTPUT_JSON`.",
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
# Learner scaling options.
|
| 288 |
+
parser.add_argument(
|
| 289 |
+
"--num-learners",
|
| 290 |
+
type=int,
|
| 291 |
+
default=None,
|
| 292 |
+
help="The number of Learners to use. If none, use the algorithm's default "
|
| 293 |
+
"value.",
|
| 294 |
+
)
|
| 295 |
+
parser.add_argument(
|
| 296 |
+
"--num-gpus-per-learner",
|
| 297 |
+
type=float,
|
| 298 |
+
default=None,
|
| 299 |
+
help="The number of GPUs per Learner to use. If none and there are enough GPUs "
|
| 300 |
+
"for all required Learners (--num-learners), use a value of 1, otherwise 0.",
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
# Ray init options.
|
| 304 |
+
parser.add_argument("--num-cpus", type=int, default=0)
|
| 305 |
+
parser.add_argument(
|
| 306 |
+
"--local-mode",
|
| 307 |
+
action="store_true",
|
| 308 |
+
help="Init Ray in local mode for easier debugging.",
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Old API stack: config.num_gpus.
|
| 312 |
+
parser.add_argument(
|
| 313 |
+
"--num-gpus",
|
| 314 |
+
type=int,
|
| 315 |
+
default=0,
|
| 316 |
+
help="The number of GPUs to use (if on the old API stack).",
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
return parser
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
|
| 323 |
+
"""
|
| 324 |
+
Checks two structures (dict, tuple, list,
|
| 325 |
+
np.array, float, int, etc..) for (almost) numeric identity.
|
| 326 |
+
All numbers in the two structures have to match up to `decimal` digits
|
| 327 |
+
after the floating point. Uses assertions.
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
x: The value to be compared (to the expectation: `y`). This
|
| 331 |
+
may be a Tensor.
|
| 332 |
+
y: The expected value to be compared to `x`. This must not
|
| 333 |
+
be a tf-Tensor, but may be a tf/torch-Tensor.
|
| 334 |
+
decimals: The number of digits after the floating point up to
|
| 335 |
+
which all numeric values have to match.
|
| 336 |
+
atol: Absolute tolerance of the difference between x and y
|
| 337 |
+
(overrides `decimals` if given).
|
| 338 |
+
rtol: Relative tolerance of the difference between x and y
|
| 339 |
+
(overrides `decimals` if given).
|
| 340 |
+
false: Whether to check that x and y are NOT the same.
|
| 341 |
+
"""
|
| 342 |
+
# A dict type.
|
| 343 |
+
if isinstance(x, dict):
|
| 344 |
+
assert isinstance(y, dict), "ERROR: If x is dict, y needs to be a dict as well!"
|
| 345 |
+
y_keys = set(x.keys())
|
| 346 |
+
for key, value in x.items():
|
| 347 |
+
assert key in y, f"ERROR: y does not have x's key='{key}'! y={y}"
|
| 348 |
+
check(value, y[key], decimals=decimals, atol=atol, rtol=rtol, false=false)
|
| 349 |
+
y_keys.remove(key)
|
| 350 |
+
assert not y_keys, "ERROR: y contains keys ({}) that are not in x! y={}".format(
|
| 351 |
+
list(y_keys), y
|
| 352 |
+
)
|
| 353 |
+
# A tuple type.
|
| 354 |
+
elif isinstance(x, (tuple, list)):
|
| 355 |
+
assert isinstance(
|
| 356 |
+
y, (tuple, list)
|
| 357 |
+
), "ERROR: If x is tuple/list, y needs to be a tuple/list as well!"
|
| 358 |
+
assert len(y) == len(
|
| 359 |
+
x
|
| 360 |
+
), "ERROR: y does not have the same length as x ({} vs {})!".format(
|
| 361 |
+
len(y), len(x)
|
| 362 |
+
)
|
| 363 |
+
for i, value in enumerate(x):
|
| 364 |
+
check(value, y[i], decimals=decimals, atol=atol, rtol=rtol, false=false)
|
| 365 |
+
# Boolean comparison.
|
| 366 |
+
elif isinstance(x, (np.bool_, bool)):
|
| 367 |
+
if false is True:
|
| 368 |
+
assert bool(x) is not bool(y), f"ERROR: x ({x}) is y ({y})!"
|
| 369 |
+
else:
|
| 370 |
+
assert bool(x) is bool(y), f"ERROR: x ({x}) is not y ({y})!"
|
| 371 |
+
# Nones or primitives (excluding int vs float, which should be compared with
|
| 372 |
+
# tolerance/decimals as well).
|
| 373 |
+
elif (
|
| 374 |
+
x is None
|
| 375 |
+
or y is None
|
| 376 |
+
or isinstance(x, str)
|
| 377 |
+
or (isinstance(x, int) and isinstance(y, int))
|
| 378 |
+
):
|
| 379 |
+
if false is True:
|
| 380 |
+
assert x != y, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 381 |
+
else:
|
| 382 |
+
assert x == y, f"ERROR: x ({x}) is not the same as y ({y})!"
|
| 383 |
+
# String/byte comparisons.
|
| 384 |
+
elif (
|
| 385 |
+
hasattr(x, "dtype") and (x.dtype == object or str(x.dtype).startswith("<U"))
|
| 386 |
+
) or isinstance(x, bytes):
|
| 387 |
+
try:
|
| 388 |
+
np.testing.assert_array_equal(x, y)
|
| 389 |
+
if false is True:
|
| 390 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 391 |
+
except AssertionError as e:
|
| 392 |
+
if false is False:
|
| 393 |
+
raise e
|
| 394 |
+
# Everything else (assume numeric or tf/torch.Tensor).
|
| 395 |
+
# Also includes int vs float comparison, which is performed with tolerance/decimals.
|
| 396 |
+
else:
|
| 397 |
+
if tf1 is not None:
|
| 398 |
+
# y should never be a Tensor (y=expected value).
|
| 399 |
+
if isinstance(y, (tf1.Tensor, tf1.Variable)):
|
| 400 |
+
# In eager mode, numpyize tensors.
|
| 401 |
+
if tf.executing_eagerly():
|
| 402 |
+
y = y.numpy()
|
| 403 |
+
else:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
"`y` (expected value) must not be a Tensor. "
|
| 406 |
+
"Use numpy.ndarray instead"
|
| 407 |
+
)
|
| 408 |
+
if isinstance(x, (tf1.Tensor, tf1.Variable)):
|
| 409 |
+
# In eager mode, numpyize tensors.
|
| 410 |
+
if tf1.executing_eagerly():
|
| 411 |
+
x = x.numpy()
|
| 412 |
+
# Otherwise, use a new tf-session.
|
| 413 |
+
else:
|
| 414 |
+
with tf1.Session() as sess:
|
| 415 |
+
x = sess.run(x)
|
| 416 |
+
return check(
|
| 417 |
+
x, y, decimals=decimals, atol=atol, rtol=rtol, false=false
|
| 418 |
+
)
|
| 419 |
+
if torch is not None:
|
| 420 |
+
if isinstance(x, torch.Tensor):
|
| 421 |
+
x = x.detach().cpu().numpy()
|
| 422 |
+
if isinstance(y, torch.Tensor):
|
| 423 |
+
y = y.detach().cpu().numpy()
|
| 424 |
+
|
| 425 |
+
# Stats objects.
|
| 426 |
+
from ray.rllib.utils.metrics.stats import Stats
|
| 427 |
+
|
| 428 |
+
if isinstance(x, Stats):
|
| 429 |
+
x = x.peek()
|
| 430 |
+
if isinstance(y, Stats):
|
| 431 |
+
y = y.peek()
|
| 432 |
+
|
| 433 |
+
# Using decimals.
|
| 434 |
+
if atol is None and rtol is None:
|
| 435 |
+
# Assert equality of both values.
|
| 436 |
+
try:
|
| 437 |
+
np.testing.assert_almost_equal(x, y, decimal=decimals)
|
| 438 |
+
# Both values are not equal.
|
| 439 |
+
except AssertionError as e:
|
| 440 |
+
# Raise error in normal case.
|
| 441 |
+
if false is False:
|
| 442 |
+
raise e
|
| 443 |
+
# Both values are equal.
|
| 444 |
+
else:
|
| 445 |
+
# If false is set -> raise error (not expected to be equal).
|
| 446 |
+
if false is True:
|
| 447 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 448 |
+
|
| 449 |
+
# Using atol/rtol.
|
| 450 |
+
else:
|
| 451 |
+
# Provide defaults for either one of atol/rtol.
|
| 452 |
+
if atol is None:
|
| 453 |
+
atol = 0
|
| 454 |
+
if rtol is None:
|
| 455 |
+
rtol = 1e-7
|
| 456 |
+
try:
|
| 457 |
+
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
|
| 458 |
+
except AssertionError as e:
|
| 459 |
+
if false is False:
|
| 460 |
+
raise e
|
| 461 |
+
else:
|
| 462 |
+
if false is True:
|
| 463 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def check_compute_single_action(
|
| 467 |
+
algorithm, include_state=False, include_prev_action_reward=False
|
| 468 |
+
):
|
| 469 |
+
"""Tests different combinations of args for algorithm.compute_single_action.
|
| 470 |
+
|
| 471 |
+
Args:
|
| 472 |
+
algorithm: The Algorithm object to test.
|
| 473 |
+
include_state: Whether to include the initial state of the Policy's
|
| 474 |
+
Model in the `compute_single_action` call.
|
| 475 |
+
include_prev_action_reward: Whether to include the prev-action and
|
| 476 |
+
-reward in the `compute_single_action` call.
|
| 477 |
+
|
| 478 |
+
Raises:
|
| 479 |
+
ValueError: If anything unexpected happens.
|
| 480 |
+
"""
|
| 481 |
+
# Have to import this here to avoid circular dependency.
|
| 482 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
|
| 483 |
+
|
| 484 |
+
# Some Algorithms may not abide to the standard API.
|
| 485 |
+
pid = DEFAULT_POLICY_ID
|
| 486 |
+
try:
|
| 487 |
+
# Multi-agent: Pick any learnable policy (or DEFAULT_POLICY if it's the only
|
| 488 |
+
# one).
|
| 489 |
+
pid = next(iter(algorithm.env_runner.get_policies_to_train()))
|
| 490 |
+
pol = algorithm.get_policy(pid)
|
| 491 |
+
except AttributeError:
|
| 492 |
+
pol = algorithm.policy
|
| 493 |
+
# Get the policy's model.
|
| 494 |
+
model = pol.model
|
| 495 |
+
|
| 496 |
+
action_space = pol.action_space
|
| 497 |
+
|
| 498 |
+
def _test(
|
| 499 |
+
what, method_to_test, obs_space, full_fetch, explore, timestep, unsquash, clip
|
| 500 |
+
):
|
| 501 |
+
call_kwargs = {}
|
| 502 |
+
if what is algorithm:
|
| 503 |
+
call_kwargs["full_fetch"] = full_fetch
|
| 504 |
+
call_kwargs["policy_id"] = pid
|
| 505 |
+
|
| 506 |
+
obs = obs_space.sample()
|
| 507 |
+
if isinstance(obs_space, Box):
|
| 508 |
+
obs = np.clip(obs, -1.0, 1.0)
|
| 509 |
+
state_in = None
|
| 510 |
+
if include_state:
|
| 511 |
+
state_in = model.get_initial_state()
|
| 512 |
+
if not state_in:
|
| 513 |
+
state_in = []
|
| 514 |
+
i = 0
|
| 515 |
+
while f"state_in_{i}" in model.view_requirements:
|
| 516 |
+
state_in.append(
|
| 517 |
+
model.view_requirements[f"state_in_{i}"].space.sample()
|
| 518 |
+
)
|
| 519 |
+
i += 1
|
| 520 |
+
action_in = action_space.sample() if include_prev_action_reward else None
|
| 521 |
+
reward_in = 1.0 if include_prev_action_reward else None
|
| 522 |
+
|
| 523 |
+
if method_to_test == "input_dict":
|
| 524 |
+
assert what is pol
|
| 525 |
+
|
| 526 |
+
input_dict = {SampleBatch.OBS: obs}
|
| 527 |
+
if include_prev_action_reward:
|
| 528 |
+
input_dict[SampleBatch.PREV_ACTIONS] = action_in
|
| 529 |
+
input_dict[SampleBatch.PREV_REWARDS] = reward_in
|
| 530 |
+
if state_in:
|
| 531 |
+
if what.config.get("enable_rl_module_and_learner", False):
|
| 532 |
+
input_dict["state_in"] = state_in
|
| 533 |
+
else:
|
| 534 |
+
for i, s in enumerate(state_in):
|
| 535 |
+
input_dict[f"state_in_{i}"] = s
|
| 536 |
+
input_dict_batched = SampleBatch(
|
| 537 |
+
tree.map_structure(lambda s: np.expand_dims(s, 0), input_dict)
|
| 538 |
+
)
|
| 539 |
+
action = pol.compute_actions_from_input_dict(
|
| 540 |
+
input_dict=input_dict_batched,
|
| 541 |
+
explore=explore,
|
| 542 |
+
timestep=timestep,
|
| 543 |
+
**call_kwargs,
|
| 544 |
+
)
|
| 545 |
+
# Unbatch everything to be able to compare against single
|
| 546 |
+
# action below.
|
| 547 |
+
# ARS and ES return action batches as lists.
|
| 548 |
+
if isinstance(action[0], list):
|
| 549 |
+
action = (np.array(action[0]), action[1], action[2])
|
| 550 |
+
action = tree.map_structure(lambda s: s[0], action)
|
| 551 |
+
|
| 552 |
+
try:
|
| 553 |
+
action2 = pol.compute_single_action(
|
| 554 |
+
input_dict=input_dict,
|
| 555 |
+
explore=explore,
|
| 556 |
+
timestep=timestep,
|
| 557 |
+
**call_kwargs,
|
| 558 |
+
)
|
| 559 |
+
# Make sure these are the same, unless we have exploration
|
| 560 |
+
# switched on (or noisy layers).
|
| 561 |
+
if not explore and not pol.config.get("noisy"):
|
| 562 |
+
check(action, action2)
|
| 563 |
+
except TypeError:
|
| 564 |
+
pass
|
| 565 |
+
else:
|
| 566 |
+
action = what.compute_single_action(
|
| 567 |
+
obs,
|
| 568 |
+
state_in,
|
| 569 |
+
prev_action=action_in,
|
| 570 |
+
prev_reward=reward_in,
|
| 571 |
+
explore=explore,
|
| 572 |
+
timestep=timestep,
|
| 573 |
+
unsquash_action=unsquash,
|
| 574 |
+
clip_action=clip,
|
| 575 |
+
**call_kwargs,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
state_out = None
|
| 579 |
+
if state_in or full_fetch or what is pol:
|
| 580 |
+
action, state_out, _ = action
|
| 581 |
+
if state_out:
|
| 582 |
+
for si, so in zip(tree.flatten(state_in), tree.flatten(state_out)):
|
| 583 |
+
if tf.is_tensor(si):
|
| 584 |
+
# If si is a tensor of Dimensions, we need to convert it
|
| 585 |
+
# We expect this to be the case for TF RLModules who's initial
|
| 586 |
+
# states are Tf Tensors.
|
| 587 |
+
si_shape = si.shape.as_list()
|
| 588 |
+
else:
|
| 589 |
+
si_shape = list(si.shape)
|
| 590 |
+
check(si_shape, so.shape)
|
| 591 |
+
|
| 592 |
+
if unsquash is None:
|
| 593 |
+
unsquash = what.config["normalize_actions"]
|
| 594 |
+
if clip is None:
|
| 595 |
+
clip = what.config["clip_actions"]
|
| 596 |
+
|
| 597 |
+
# Test whether unsquash/clipping works on the Algorithm's
|
| 598 |
+
# compute_single_action method: Both flags should force the action
|
| 599 |
+
# to be within the space's bounds.
|
| 600 |
+
if method_to_test == "single" and what == algorithm:
|
| 601 |
+
if not action_space.contains(action) and (
|
| 602 |
+
clip or unsquash or not isinstance(action_space, Box)
|
| 603 |
+
):
|
| 604 |
+
raise ValueError(
|
| 605 |
+
f"Returned action ({action}) of algorithm/policy {what} "
|
| 606 |
+
f"not in Env's action_space {action_space}"
|
| 607 |
+
)
|
| 608 |
+
# We are operating in normalized space: Expect only smaller action
|
| 609 |
+
# values.
|
| 610 |
+
if (
|
| 611 |
+
isinstance(action_space, Box)
|
| 612 |
+
and not unsquash
|
| 613 |
+
and what.config.get("normalize_actions")
|
| 614 |
+
and np.any(np.abs(action) > 15.0)
|
| 615 |
+
):
|
| 616 |
+
raise ValueError(
|
| 617 |
+
f"Returned action ({action}) of algorithm/policy {what} "
|
| 618 |
+
"should be in normalized space, but seems too large/small "
|
| 619 |
+
"for that!"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
# Loop through: Policy vs Algorithm; Different API methods to calculate
|
| 623 |
+
# actions; unsquash option; clip option; full fetch or not.
|
| 624 |
+
for what in [pol, algorithm]:
|
| 625 |
+
if what is algorithm:
|
| 626 |
+
# Get the obs-space from Workers.env (not Policy) due to possible
|
| 627 |
+
# pre-processor up front.
|
| 628 |
+
worker_set = getattr(algorithm, "env_runner_group", None)
|
| 629 |
+
assert worker_set
|
| 630 |
+
if not worker_set.local_env_runner:
|
| 631 |
+
obs_space = algorithm.get_policy(pid).observation_space
|
| 632 |
+
else:
|
| 633 |
+
obs_space = worker_set.local_env_runner.for_policy(
|
| 634 |
+
lambda p: p.observation_space, policy_id=pid
|
| 635 |
+
)
|
| 636 |
+
obs_space = getattr(obs_space, "original_space", obs_space)
|
| 637 |
+
else:
|
| 638 |
+
obs_space = pol.observation_space
|
| 639 |
+
|
| 640 |
+
for method_to_test in ["single"] + (["input_dict"] if what is pol else []):
|
| 641 |
+
for explore in [True, False]:
|
| 642 |
+
for full_fetch in [False, True] if what is algorithm else [False]:
|
| 643 |
+
timestep = random.randint(0, 100000)
|
| 644 |
+
for unsquash in [True, False, None]:
|
| 645 |
+
for clip in [False] if unsquash else [True, False, None]:
|
| 646 |
+
print("-" * 80)
|
| 647 |
+
print(f"what={what}")
|
| 648 |
+
print(f"method_to_test={method_to_test}")
|
| 649 |
+
print(f"explore={explore}")
|
| 650 |
+
print(f"full_fetch={full_fetch}")
|
| 651 |
+
print(f"unsquash={unsquash}")
|
| 652 |
+
print(f"clip={clip}")
|
| 653 |
+
_test(
|
| 654 |
+
what,
|
| 655 |
+
method_to_test,
|
| 656 |
+
obs_space,
|
| 657 |
+
full_fetch,
|
| 658 |
+
explore,
|
| 659 |
+
timestep,
|
| 660 |
+
unsquash,
|
| 661 |
+
clip,
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def check_inference_w_connectors(policy, env_name, max_steps: int = 100):
|
| 666 |
+
"""Checks whether the given policy can infer actions from an env with connectors.
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
policy: The policy to check.
|
| 670 |
+
env_name: Name of the environment to check
|
| 671 |
+
max_steps: The maximum number of steps to run the environment for.
|
| 672 |
+
|
| 673 |
+
Raises:
|
| 674 |
+
ValueError: If the policy cannot infer actions from the environment.
|
| 675 |
+
"""
|
| 676 |
+
# Avoids circular import
|
| 677 |
+
from ray.rllib.utils.policy import local_policy_inference
|
| 678 |
+
|
| 679 |
+
env = gym.make(env_name)
|
| 680 |
+
|
| 681 |
+
# Potentially wrap the env like we do in RolloutWorker
|
| 682 |
+
if is_atari(env):
|
| 683 |
+
env = wrap_deepmind(
|
| 684 |
+
env,
|
| 685 |
+
dim=policy.config["model"]["dim"],
|
| 686 |
+
framestack=policy.config["model"].get("framestack"),
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
obs, info = env.reset()
|
| 690 |
+
reward, terminated, truncated = 0.0, False, False
|
| 691 |
+
ts = 0
|
| 692 |
+
while not terminated and not truncated and ts < max_steps:
|
| 693 |
+
action_out = local_policy_inference(
|
| 694 |
+
policy,
|
| 695 |
+
env_id=0,
|
| 696 |
+
agent_id=0,
|
| 697 |
+
obs=obs,
|
| 698 |
+
reward=reward,
|
| 699 |
+
terminated=terminated,
|
| 700 |
+
truncated=truncated,
|
| 701 |
+
info=info,
|
| 702 |
+
)
|
| 703 |
+
obs, reward, terminated, truncated, info = env.step(action_out[0][0])
|
| 704 |
+
|
| 705 |
+
ts += 1
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
def check_learning_achieved(
|
| 709 |
+
tune_results: "tune.ResultGrid",
|
| 710 |
+
min_value: float,
|
| 711 |
+
evaluation: Optional[bool] = None,
|
| 712 |
+
metric: str = f"{ENV_RUNNER_RESULTS}/episode_return_mean",
|
| 713 |
+
):
|
| 714 |
+
"""Throws an error if `min_reward` is not reached within tune_results.
|
| 715 |
+
|
| 716 |
+
Checks the last iteration found in tune_results for its
|
| 717 |
+
"episode_return_mean" value and compares it to `min_reward`.
|
| 718 |
+
|
| 719 |
+
Args:
|
| 720 |
+
tune_results: The tune.Tuner().fit() returned results object.
|
| 721 |
+
min_reward: The min reward that must be reached.
|
| 722 |
+
evaluation: If True, use `evaluation/env_runners/[metric]`, if False, use
|
| 723 |
+
`env_runners/[metric]`, if None, use evaluation sampler results if
|
| 724 |
+
available otherwise, use train sampler results.
|
| 725 |
+
|
| 726 |
+
Raises:
|
| 727 |
+
ValueError: If `min_reward` not reached.
|
| 728 |
+
"""
|
| 729 |
+
# Get maximum value of `metrics` over all trials
|
| 730 |
+
# (check if at least one trial achieved some learning, not just the final one).
|
| 731 |
+
recorded_values = []
|
| 732 |
+
for _, row in tune_results.get_dataframe().iterrows():
|
| 733 |
+
if evaluation or (
|
| 734 |
+
evaluation is None and f"{EVALUATION_RESULTS}/{metric}" in row
|
| 735 |
+
):
|
| 736 |
+
recorded_values.append(row[f"{EVALUATION_RESULTS}/{metric}"])
|
| 737 |
+
else:
|
| 738 |
+
recorded_values.append(row[metric])
|
| 739 |
+
best_value = max(recorded_values)
|
| 740 |
+
if best_value < min_value:
|
| 741 |
+
raise ValueError(f"`{metric}` of {min_value} not reached!")
|
| 742 |
+
print(f"`{metric}` of {min_value} reached! ok")
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def check_off_policyness(
|
| 746 |
+
results: ResultDict,
|
| 747 |
+
upper_limit: float,
|
| 748 |
+
lower_limit: float = 0.0,
|
| 749 |
+
) -> Optional[float]:
|
| 750 |
+
"""Verifies that the off-policy'ness of some update is within some range.
|
| 751 |
+
|
| 752 |
+
Off-policy'ness is defined as the average (across n workers) diff
|
| 753 |
+
between the number of gradient updates performed on the policy used
|
| 754 |
+
for sampling vs the number of gradient updates that have been performed
|
| 755 |
+
on the trained policy (usually the one on the local worker).
|
| 756 |
+
|
| 757 |
+
Uses the published DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY metric inside
|
| 758 |
+
a training results dict and compares to the given bounds.
|
| 759 |
+
|
| 760 |
+
Note: Only works with single-agent results thus far.
|
| 761 |
+
|
| 762 |
+
Args:
|
| 763 |
+
results: The training results dict.
|
| 764 |
+
upper_limit: The upper limit to for the off_policy_ness value.
|
| 765 |
+
lower_limit: The lower limit to for the off_policy_ness value.
|
| 766 |
+
|
| 767 |
+
Returns:
|
| 768 |
+
The off-policy'ness value (described above).
|
| 769 |
+
|
| 770 |
+
Raises:
|
| 771 |
+
AssertionError: If the value is out of bounds.
|
| 772 |
+
"""
|
| 773 |
+
|
| 774 |
+
# Have to import this here to avoid circular dependency.
|
| 775 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 776 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
|
| 777 |
+
|
| 778 |
+
# Assert that the off-policy'ness is within the given bounds.
|
| 779 |
+
learner_info = results["info"][LEARNER_INFO]
|
| 780 |
+
if DEFAULT_POLICY_ID not in learner_info:
|
| 781 |
+
return None
|
| 782 |
+
off_policy_ness = learner_info[DEFAULT_POLICY_ID][
|
| 783 |
+
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY
|
| 784 |
+
]
|
| 785 |
+
# Roughly: Reaches up to 0.4 for 2 rollout workers and up to 0.2 for
|
| 786 |
+
# 1 rollout worker.
|
| 787 |
+
if not (lower_limit <= off_policy_ness <= upper_limit):
|
| 788 |
+
raise AssertionError(
|
| 789 |
+
f"`off_policy_ness` ({off_policy_ness}) is outside the given bounds "
|
| 790 |
+
f"({lower_limit} - {upper_limit})!"
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
return off_policy_ness
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
def check_train_results_new_api_stack(train_results: ResultDict) -> None:
|
| 797 |
+
"""Checks proper structure of a Algorithm.train() returned dict.
|
| 798 |
+
|
| 799 |
+
Args:
|
| 800 |
+
train_results: The train results dict to check.
|
| 801 |
+
|
| 802 |
+
Raises:
|
| 803 |
+
AssertionError: If `train_results` doesn't have the proper structure or
|
| 804 |
+
data in it.
|
| 805 |
+
"""
|
| 806 |
+
# Import these here to avoid circular dependencies.
|
| 807 |
+
from ray.rllib.utils.metrics import (
|
| 808 |
+
ENV_RUNNER_RESULTS,
|
| 809 |
+
FAULT_TOLERANCE_STATS,
|
| 810 |
+
LEARNER_RESULTS,
|
| 811 |
+
TIMERS,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
# Assert that some keys are where we would expect them.
|
| 815 |
+
for key in [
|
| 816 |
+
ENV_RUNNER_RESULTS,
|
| 817 |
+
FAULT_TOLERANCE_STATS,
|
| 818 |
+
LEARNER_RESULTS,
|
| 819 |
+
TIMERS,
|
| 820 |
+
TRAINING_ITERATION,
|
| 821 |
+
"config",
|
| 822 |
+
]:
|
| 823 |
+
assert (
|
| 824 |
+
key in train_results
|
| 825 |
+
), f"'{key}' not found in `train_results` ({train_results})!"
|
| 826 |
+
|
| 827 |
+
# Make sure, `config` is an actual dict, not an AlgorithmConfig object.
|
| 828 |
+
assert isinstance(
|
| 829 |
+
train_results["config"], dict
|
| 830 |
+
), "`config` in results not a python dict!"
|
| 831 |
+
|
| 832 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 833 |
+
|
| 834 |
+
is_multi_agent = (
|
| 835 |
+
AlgorithmConfig()
|
| 836 |
+
.update_from_dict({"policies": train_results["config"]["policies"]})
|
| 837 |
+
.is_multi_agent()
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
# Check in particular the "info" dict.
|
| 841 |
+
learner_results = train_results[LEARNER_RESULTS]
|
| 842 |
+
|
| 843 |
+
# Make sure we have a `DEFAULT_MODULE_ID key if we are not in a
|
| 844 |
+
# multi-agent setup.
|
| 845 |
+
if not is_multi_agent:
|
| 846 |
+
assert len(learner_results) == 0 or DEFAULT_MODULE_ID in learner_results, (
|
| 847 |
+
f"'{DEFAULT_MODULE_ID}' not found in "
|
| 848 |
+
f"train_results['{LEARNER_RESULTS}']!"
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
for module_id, module_metrics in learner_results.items():
|
| 852 |
+
# The ModuleID can be __all_modules__ in multi-agent case when the new learner
|
| 853 |
+
# stack is enabled.
|
| 854 |
+
if module_id == "__all_modules__":
|
| 855 |
+
continue
|
| 856 |
+
|
| 857 |
+
# On the new API stack, policy has no LEARNER_STATS_KEY under it anymore.
|
| 858 |
+
for key, value in module_metrics.items():
|
| 859 |
+
# Min- and max-stats should be single values.
|
| 860 |
+
if key.endswith("_min") or key.endswith("_max"):
|
| 861 |
+
assert np.isscalar(value), f"'key' value not a scalar ({value})!"
|
| 862 |
+
|
| 863 |
+
return train_results
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
@OldAPIStack
|
| 867 |
+
def check_train_results(train_results: ResultDict):
|
| 868 |
+
"""Checks proper structure of a Algorithm.train() returned dict.
|
| 869 |
+
|
| 870 |
+
Args:
|
| 871 |
+
train_results: The train results dict to check.
|
| 872 |
+
|
| 873 |
+
Raises:
|
| 874 |
+
AssertionError: If `train_results` doesn't have the proper structure or
|
| 875 |
+
data in it.
|
| 876 |
+
"""
|
| 877 |
+
# Import these here to avoid circular dependencies.
|
| 878 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 879 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY
|
| 880 |
+
|
| 881 |
+
# Assert that some keys are where we would expect them.
|
| 882 |
+
for key in [
|
| 883 |
+
"config",
|
| 884 |
+
"custom_metrics",
|
| 885 |
+
ENV_RUNNER_RESULTS,
|
| 886 |
+
"info",
|
| 887 |
+
"iterations_since_restore",
|
| 888 |
+
"num_healthy_workers",
|
| 889 |
+
"perf",
|
| 890 |
+
"time_since_restore",
|
| 891 |
+
"time_this_iter_s",
|
| 892 |
+
"timers",
|
| 893 |
+
"time_total_s",
|
| 894 |
+
TRAINING_ITERATION,
|
| 895 |
+
]:
|
| 896 |
+
assert (
|
| 897 |
+
key in train_results
|
| 898 |
+
), f"'{key}' not found in `train_results` ({train_results})!"
|
| 899 |
+
|
| 900 |
+
for key in [
|
| 901 |
+
"episode_len_mean",
|
| 902 |
+
"episode_reward_max",
|
| 903 |
+
"episode_reward_mean",
|
| 904 |
+
"episode_reward_min",
|
| 905 |
+
"hist_stats",
|
| 906 |
+
"policy_reward_max",
|
| 907 |
+
"policy_reward_mean",
|
| 908 |
+
"policy_reward_min",
|
| 909 |
+
"sampler_perf",
|
| 910 |
+
]:
|
| 911 |
+
assert key in train_results[ENV_RUNNER_RESULTS], (
|
| 912 |
+
f"'{key}' not found in `train_results[ENV_RUNNER_RESULTS]` "
|
| 913 |
+
f"({train_results[ENV_RUNNER_RESULTS]})!"
|
| 914 |
+
)
|
| 915 |
+
|
| 916 |
+
# Make sure, `config` is an actual dict, not an AlgorithmConfig object.
|
| 917 |
+
assert isinstance(
|
| 918 |
+
train_results["config"], dict
|
| 919 |
+
), "`config` in results not a python dict!"
|
| 920 |
+
|
| 921 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 922 |
+
|
| 923 |
+
is_multi_agent = (
|
| 924 |
+
AlgorithmConfig()
|
| 925 |
+
.update_from_dict({"policies": train_results["config"]["policies"]})
|
| 926 |
+
.is_multi_agent()
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
# Check in particular the "info" dict.
|
| 930 |
+
info = train_results["info"]
|
| 931 |
+
assert LEARNER_INFO in info, f"'learner' not in train_results['infos'] ({info})!"
|
| 932 |
+
assert (
|
| 933 |
+
"num_steps_trained" in info or NUM_ENV_STEPS_TRAINED in info
|
| 934 |
+
), f"'num_(env_)?steps_trained' not in train_results['infos'] ({info})!"
|
| 935 |
+
|
| 936 |
+
learner_info = info[LEARNER_INFO]
|
| 937 |
+
|
| 938 |
+
# Make sure we have a default_policy key if we are not in a
|
| 939 |
+
# multi-agent setup.
|
| 940 |
+
if not is_multi_agent:
|
| 941 |
+
# APEX algos sometimes have an empty learner info dict (no metrics
|
| 942 |
+
# collected yet).
|
| 943 |
+
assert len(learner_info) == 0 or DEFAULT_POLICY_ID in learner_info, (
|
| 944 |
+
f"'{DEFAULT_POLICY_ID}' not found in "
|
| 945 |
+
f"train_results['infos']['learner'] ({learner_info})!"
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
for pid, policy_stats in learner_info.items():
|
| 949 |
+
if pid == "batch_count":
|
| 950 |
+
continue
|
| 951 |
+
|
| 952 |
+
# the pid can be __all__ in multi-agent case when the new learner stack is
|
| 953 |
+
# enabled.
|
| 954 |
+
if pid == "__all__":
|
| 955 |
+
continue
|
| 956 |
+
|
| 957 |
+
# On the new API stack, policy has no LEARNER_STATS_KEY under it anymore.
|
| 958 |
+
if LEARNER_STATS_KEY in policy_stats:
|
| 959 |
+
learner_stats = policy_stats[LEARNER_STATS_KEY]
|
| 960 |
+
else:
|
| 961 |
+
learner_stats = policy_stats
|
| 962 |
+
for key, value in learner_stats.items():
|
| 963 |
+
# Min- and max-stats should be single values.
|
| 964 |
+
if key.startswith("min_") or key.startswith("max_"):
|
| 965 |
+
assert np.isscalar(value), f"'key' value not a scalar ({value})!"
|
| 966 |
+
|
| 967 |
+
return train_results
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
# TODO (sven): Make this the de-facto, well documented, and unified utility for most of
|
| 971 |
+
# our tests:
|
| 972 |
+
# - CI (label: "learning_tests")
|
| 973 |
+
# - release tests (benchmarks)
|
| 974 |
+
# - example scripts
|
| 975 |
+
def run_rllib_example_script_experiment(
|
| 976 |
+
base_config: "AlgorithmConfig",
|
| 977 |
+
args: Optional[argparse.Namespace] = None,
|
| 978 |
+
*,
|
| 979 |
+
stop: Optional[Dict] = None,
|
| 980 |
+
success_metric: Optional[Dict] = None,
|
| 981 |
+
trainable: Optional[Type] = None,
|
| 982 |
+
tune_callbacks: Optional[List] = None,
|
| 983 |
+
keep_config: bool = False,
|
| 984 |
+
scheduler=None,
|
| 985 |
+
progress_reporter=None,
|
| 986 |
+
) -> Union[ResultDict, tune.result_grid.ResultGrid]:
|
| 987 |
+
"""Given an algorithm config and some command line args, runs an experiment.
|
| 988 |
+
|
| 989 |
+
There are some constraints on what properties must be defined in `args`.
|
| 990 |
+
It should ideally be generated via calling
|
| 991 |
+
`args = add_rllib_example_script_args()`, which can be found in this very module
|
| 992 |
+
here.
|
| 993 |
+
|
| 994 |
+
The function sets up an Algorithm object from the given config (altered by the
|
| 995 |
+
contents of `args`), then runs the Algorithm via Tune (or manually, if
|
| 996 |
+
`args.no_tune` is set to True) using the stopping criteria in `stop`.
|
| 997 |
+
|
| 998 |
+
At the end of the experiment, if `args.as_test` is True, checks, whether the
|
| 999 |
+
Algorithm reached the `success_metric` (if None, use `env_runners/
|
| 1000 |
+
episode_return_mean` with a minimum value of `args.stop_reward`).
|
| 1001 |
+
|
| 1002 |
+
See https://github.com/ray-project/ray/tree/master/rllib/examples for an overview
|
| 1003 |
+
of all supported command line options.
|
| 1004 |
+
|
| 1005 |
+
Args:
|
| 1006 |
+
base_config: The AlgorithmConfig object to use for this experiment. This base
|
| 1007 |
+
config will be automatically "extended" based on some of the provided
|
| 1008 |
+
`args`. For example, `args.num_env_runners` is used to set
|
| 1009 |
+
`config.num_env_runners`, etc..
|
| 1010 |
+
args: A argparse.Namespace object, ideally returned by calling
|
| 1011 |
+
`args = add_rllib_example_script_args()`. It must have the following
|
| 1012 |
+
properties defined: `stop_iters`, `stop_reward`, `stop_timesteps`,
|
| 1013 |
+
`no_tune`, `verbose`, `checkpoint_freq`, `as_test`. Optionally, for WandB
|
| 1014 |
+
logging: `wandb_key`, `wandb_project`, `wandb_run_name`.
|
| 1015 |
+
stop: An optional dict mapping ResultDict key strings (using "/" in case of
|
| 1016 |
+
nesting, e.g. "env_runners/episode_return_mean" for referring to
|
| 1017 |
+
`result_dict['env_runners']['episode_return_mean']` to minimum
|
| 1018 |
+
values, reaching of which will stop the experiment). Default is:
|
| 1019 |
+
{
|
| 1020 |
+
"env_runners/episode_return_mean": args.stop_reward,
|
| 1021 |
+
"training_iteration": args.stop_iters,
|
| 1022 |
+
"num_env_steps_sampled_lifetime": args.stop_timesteps,
|
| 1023 |
+
}
|
| 1024 |
+
success_metric: Only relevant if `args.as_test` is True.
|
| 1025 |
+
A dict mapping a single(!) ResultDict key string (using "/" in
|
| 1026 |
+
case of nesting, e.g. "env_runners/episode_return_mean" for referring
|
| 1027 |
+
to `result_dict['env_runners']['episode_return_mean']` to a single(!)
|
| 1028 |
+
minimum value to be reached in order for the experiment to count as
|
| 1029 |
+
successful. If `args.as_test` is True AND this `success_metric` is not
|
| 1030 |
+
reached with the bounds defined by `stop`, will raise an Exception.
|
| 1031 |
+
trainable: The Trainable sub-class to run in the tune.Tuner. If None (default),
|
| 1032 |
+
use the registered RLlib Algorithm class specified by args.algo.
|
| 1033 |
+
tune_callbacks: A list of Tune callbacks to configure with the tune.Tuner.
|
| 1034 |
+
In case `args.wandb_key` is provided, appends a WandB logger to this
|
| 1035 |
+
list.
|
| 1036 |
+
keep_config: Set this to True, if you don't want this utility to change the
|
| 1037 |
+
given `base_config` in any way and leave it as-is. This is helpful
|
| 1038 |
+
for those example scripts which demonstrate how to set config settings
|
| 1039 |
+
that are taken care of automatically in this function otherwise (e.g.
|
| 1040 |
+
`num_env_runners`).
|
| 1041 |
+
|
| 1042 |
+
Returns:
|
| 1043 |
+
The last ResultDict from a --no-tune run OR the tune.Tuner.fit()
|
| 1044 |
+
results.
|
| 1045 |
+
"""
|
| 1046 |
+
if args is None:
|
| 1047 |
+
parser = add_rllib_example_script_args()
|
| 1048 |
+
args = parser.parse_args()
|
| 1049 |
+
|
| 1050 |
+
# If run --as-release-test, --as-test must also be set.
|
| 1051 |
+
if args.as_release_test:
|
| 1052 |
+
args.as_test = True
|
| 1053 |
+
|
| 1054 |
+
# Initialize Ray.
|
| 1055 |
+
ray.init(
|
| 1056 |
+
num_cpus=args.num_cpus or None,
|
| 1057 |
+
local_mode=args.local_mode,
|
| 1058 |
+
ignore_reinit_error=True,
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
# Define one or more stopping criteria.
|
| 1062 |
+
if stop is None:
|
| 1063 |
+
stop = {
|
| 1064 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
|
| 1065 |
+
f"{ENV_RUNNER_RESULTS}/{NUM_ENV_STEPS_SAMPLED_LIFETIME}": (
|
| 1066 |
+
args.stop_timesteps
|
| 1067 |
+
),
|
| 1068 |
+
TRAINING_ITERATION: args.stop_iters,
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
config = base_config
|
| 1072 |
+
|
| 1073 |
+
# Enhance the `base_config`, based on provided `args`.
|
| 1074 |
+
if not keep_config:
|
| 1075 |
+
# Set the framework.
|
| 1076 |
+
config.framework(args.framework)
|
| 1077 |
+
|
| 1078 |
+
# Add an env specifier (only if not already set in config)?
|
| 1079 |
+
if args.env is not None and config.env is None:
|
| 1080 |
+
config.environment(args.env)
|
| 1081 |
+
|
| 1082 |
+
# Enable the new API stack?
|
| 1083 |
+
if args.enable_new_api_stack:
|
| 1084 |
+
config.api_stack(
|
| 1085 |
+
enable_rl_module_and_learner=True,
|
| 1086 |
+
enable_env_runner_and_connector_v2=True,
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
# Define EnvRunner/RolloutWorker scaling and behavior.
|
| 1090 |
+
if args.num_env_runners is not None:
|
| 1091 |
+
config.env_runners(num_env_runners=args.num_env_runners)
|
| 1092 |
+
|
| 1093 |
+
# Define compute resources used automatically (only using the --num-learners
|
| 1094 |
+
# and --num-gpus-per-learner args).
|
| 1095 |
+
# New stack.
|
| 1096 |
+
if config.enable_rl_module_and_learner:
|
| 1097 |
+
if args.num_gpus > 0:
|
| 1098 |
+
raise ValueError(
|
| 1099 |
+
"--num-gpus is not supported on the new API stack! To train on "
|
| 1100 |
+
"GPUs, use the command line options `--num-gpus-per-learner=1` and "
|
| 1101 |
+
"`--num-learners=[your number of available GPUs]`, instead."
|
| 1102 |
+
)
|
| 1103 |
+
|
| 1104 |
+
# Do we have GPUs available in the cluster?
|
| 1105 |
+
num_gpus_available = ray.cluster_resources().get("GPU", 0)
|
| 1106 |
+
# Number of actual Learner instances (including the local Learner if
|
| 1107 |
+
# `num_learners=0`).
|
| 1108 |
+
num_actual_learners = (
|
| 1109 |
+
args.num_learners
|
| 1110 |
+
if args.num_learners is not None
|
| 1111 |
+
else config.num_learners
|
| 1112 |
+
) or 1 # 1: There is always a local Learner, if num_learners=0.
|
| 1113 |
+
# How many were hard-requested by the user
|
| 1114 |
+
# (through explicit `--num-gpus-per-learner >= 1`).
|
| 1115 |
+
num_gpus_requested = (args.num_gpus_per_learner or 0) * num_actual_learners
|
| 1116 |
+
# Number of GPUs needed, if `num_gpus_per_learner=None` (auto).
|
| 1117 |
+
num_gpus_needed_if_available = (
|
| 1118 |
+
args.num_gpus_per_learner
|
| 1119 |
+
if args.num_gpus_per_learner is not None
|
| 1120 |
+
else 1
|
| 1121 |
+
) * num_actual_learners
|
| 1122 |
+
# Define compute resources used.
|
| 1123 |
+
config.resources(num_gpus=0) # old API stack setting
|
| 1124 |
+
if args.num_learners is not None:
|
| 1125 |
+
config.learners(num_learners=args.num_learners)
|
| 1126 |
+
|
| 1127 |
+
# User wants to use GPUs if available, but doesn't hard-require them.
|
| 1128 |
+
if args.num_gpus_per_learner is None:
|
| 1129 |
+
if num_gpus_available >= num_gpus_needed_if_available:
|
| 1130 |
+
config.learners(num_gpus_per_learner=1)
|
| 1131 |
+
else:
|
| 1132 |
+
config.learners(num_gpus_per_learner=0, num_cpus_per_learner=1)
|
| 1133 |
+
|
| 1134 |
+
# User hard-requires n GPUs, but they are not available -> Error.
|
| 1135 |
+
elif num_gpus_available < num_gpus_requested:
|
| 1136 |
+
raise ValueError(
|
| 1137 |
+
"You are running your script with --num-learners="
|
| 1138 |
+
f"{args.num_learners} and --num-gpus-per-learner="
|
| 1139 |
+
f"{args.num_gpus_per_learner}, but your cluster only has "
|
| 1140 |
+
f"{num_gpus_available} GPUs! Will run "
|
| 1141 |
+
f"with {num_gpus_available} CPU Learners instead."
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
# All required GPUs are available -> Use them.
|
| 1145 |
+
else:
|
| 1146 |
+
config.learners(num_gpus_per_learner=args.num_gpus_per_learner)
|
| 1147 |
+
|
| 1148 |
+
# Old stack.
|
| 1149 |
+
else:
|
| 1150 |
+
config.resources(num_gpus=args.num_gpus)
|
| 1151 |
+
|
| 1152 |
+
# Evaluation setup.
|
| 1153 |
+
if args.evaluation_interval > 0:
|
| 1154 |
+
config.evaluation(
|
| 1155 |
+
evaluation_num_env_runners=args.evaluation_num_env_runners,
|
| 1156 |
+
evaluation_interval=args.evaluation_interval,
|
| 1157 |
+
evaluation_duration=args.evaluation_duration,
|
| 1158 |
+
evaluation_duration_unit=args.evaluation_duration_unit,
|
| 1159 |
+
evaluation_parallel_to_training=args.evaluation_parallel_to_training,
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
# Set the log-level (if applicable).
|
| 1163 |
+
if args.log_level is not None:
|
| 1164 |
+
config.debugging(log_level=args.log_level)
|
| 1165 |
+
|
| 1166 |
+
# Set the output dir (if applicable).
|
| 1167 |
+
if args.output is not None:
|
| 1168 |
+
config.offline_data(output=args.output)
|
| 1169 |
+
|
| 1170 |
+
# Run the experiment w/o Tune (directly operate on the RLlib Algorithm object).
|
| 1171 |
+
if args.no_tune:
|
| 1172 |
+
assert not args.as_test and not args.as_release_test
|
| 1173 |
+
algo = config.build()
|
| 1174 |
+
for i in range(stop.get(TRAINING_ITERATION, args.stop_iters)):
|
| 1175 |
+
results = algo.train()
|
| 1176 |
+
if ENV_RUNNER_RESULTS in results:
|
| 1177 |
+
mean_return = results[ENV_RUNNER_RESULTS].get(
|
| 1178 |
+
EPISODE_RETURN_MEAN, np.nan
|
| 1179 |
+
)
|
| 1180 |
+
print(f"iter={i} R={mean_return}", end="")
|
| 1181 |
+
if EVALUATION_RESULTS in results:
|
| 1182 |
+
Reval = results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][
|
| 1183 |
+
EPISODE_RETURN_MEAN
|
| 1184 |
+
]
|
| 1185 |
+
print(f" R(eval)={Reval}", end="")
|
| 1186 |
+
print()
|
| 1187 |
+
for key, threshold in stop.items():
|
| 1188 |
+
val = results
|
| 1189 |
+
for k in key.split("/"):
|
| 1190 |
+
try:
|
| 1191 |
+
val = val[k]
|
| 1192 |
+
except KeyError:
|
| 1193 |
+
val = None
|
| 1194 |
+
break
|
| 1195 |
+
if val is not None and not np.isnan(val) and val >= threshold:
|
| 1196 |
+
print(f"Stop criterium ({key}={threshold}) fulfilled!")
|
| 1197 |
+
ray.shutdown()
|
| 1198 |
+
return results
|
| 1199 |
+
|
| 1200 |
+
ray.shutdown()
|
| 1201 |
+
return results
|
| 1202 |
+
|
| 1203 |
+
# Run the experiment using Ray Tune.
|
| 1204 |
+
|
| 1205 |
+
# Log results using WandB.
|
| 1206 |
+
tune_callbacks = tune_callbacks or []
|
| 1207 |
+
if hasattr(args, "wandb_key") and (
|
| 1208 |
+
args.wandb_key is not None or WANDB_ENV_VAR in os.environ
|
| 1209 |
+
):
|
| 1210 |
+
wandb_key = args.wandb_key or os.environ[WANDB_ENV_VAR]
|
| 1211 |
+
project = args.wandb_project or (
|
| 1212 |
+
args.algo.lower() + "-" + re.sub("\\W+", "-", str(config.env).lower())
|
| 1213 |
+
)
|
| 1214 |
+
tune_callbacks.append(
|
| 1215 |
+
WandbLoggerCallback(
|
| 1216 |
+
api_key=wandb_key,
|
| 1217 |
+
project=project,
|
| 1218 |
+
upload_checkpoints=True,
|
| 1219 |
+
**({"name": args.wandb_run_name} if args.wandb_run_name else {}),
|
| 1220 |
+
)
|
| 1221 |
+
)
|
| 1222 |
+
|
| 1223 |
+
# Auto-configure a CLIReporter (to log the results to the console).
|
| 1224 |
+
# Use better ProgressReporter for multi-agent cases: List individual policy rewards.
|
| 1225 |
+
if progress_reporter is None and args.num_agents > 0:
|
| 1226 |
+
progress_reporter = CLIReporter(
|
| 1227 |
+
metric_columns={
|
| 1228 |
+
**{
|
| 1229 |
+
TRAINING_ITERATION: "iter",
|
| 1230 |
+
"time_total_s": "total time (s)",
|
| 1231 |
+
NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts",
|
| 1232 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "combined return",
|
| 1233 |
+
},
|
| 1234 |
+
**{
|
| 1235 |
+
(
|
| 1236 |
+
f"{ENV_RUNNER_RESULTS}/module_episode_returns_mean/" f"{pid}"
|
| 1237 |
+
): f"return {pid}"
|
| 1238 |
+
for pid in config.policies
|
| 1239 |
+
},
|
| 1240 |
+
},
|
| 1241 |
+
)
|
| 1242 |
+
|
| 1243 |
+
# Force Tuner to use old progress output as the new one silently ignores our custom
|
| 1244 |
+
# `CLIReporter`.
|
| 1245 |
+
os.environ["RAY_AIR_NEW_OUTPUT"] = "0"
|
| 1246 |
+
|
| 1247 |
+
# Run the actual experiment (using Tune).
|
| 1248 |
+
start_time = time.time()
|
| 1249 |
+
results = tune.Tuner(
|
| 1250 |
+
trainable or config.algo_class,
|
| 1251 |
+
param_space=config,
|
| 1252 |
+
run_config=air.RunConfig(
|
| 1253 |
+
stop=stop,
|
| 1254 |
+
verbose=args.verbose,
|
| 1255 |
+
callbacks=tune_callbacks,
|
| 1256 |
+
checkpoint_config=air.CheckpointConfig(
|
| 1257 |
+
checkpoint_frequency=args.checkpoint_freq,
|
| 1258 |
+
checkpoint_at_end=args.checkpoint_at_end,
|
| 1259 |
+
),
|
| 1260 |
+
progress_reporter=progress_reporter,
|
| 1261 |
+
),
|
| 1262 |
+
tune_config=tune.TuneConfig(
|
| 1263 |
+
num_samples=args.num_samples,
|
| 1264 |
+
max_concurrent_trials=args.max_concurrent_trials,
|
| 1265 |
+
scheduler=scheduler,
|
| 1266 |
+
),
|
| 1267 |
+
).fit()
|
| 1268 |
+
time_taken = time.time() - start_time
|
| 1269 |
+
|
| 1270 |
+
ray.shutdown()
|
| 1271 |
+
|
| 1272 |
+
# If run as a test, check whether we reached the specified success criteria.
|
| 1273 |
+
test_passed = False
|
| 1274 |
+
if args.as_test:
|
| 1275 |
+
# Success metric not provided, try extracting it from `stop`.
|
| 1276 |
+
if success_metric is None:
|
| 1277 |
+
for try_it in [
|
| 1278 |
+
f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
|
| 1279 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
|
| 1280 |
+
]:
|
| 1281 |
+
if try_it in stop:
|
| 1282 |
+
success_metric = {try_it: stop[try_it]}
|
| 1283 |
+
break
|
| 1284 |
+
if success_metric is None:
|
| 1285 |
+
success_metric = {
|
| 1286 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
|
| 1287 |
+
}
|
| 1288 |
+
# TODO (sven): Make this work for more than one metric (AND-logic?).
|
| 1289 |
+
# Get maximum value of `metric` over all trials
|
| 1290 |
+
# (check if at least one trial achieved some learning, not just the final one).
|
| 1291 |
+
success_metric_key, success_metric_value = next(iter(success_metric.items()))
|
| 1292 |
+
best_value = max(
|
| 1293 |
+
row[success_metric_key] for _, row in results.get_dataframe().iterrows()
|
| 1294 |
+
)
|
| 1295 |
+
if best_value >= success_metric_value:
|
| 1296 |
+
test_passed = True
|
| 1297 |
+
print(f"`{success_metric_key}` of {success_metric_value} reached! ok")
|
| 1298 |
+
|
| 1299 |
+
if args.as_release_test:
|
| 1300 |
+
trial = results._experiment_analysis.trials[0]
|
| 1301 |
+
stats = trial.last_result
|
| 1302 |
+
stats.pop("config", None)
|
| 1303 |
+
json_summary = {
|
| 1304 |
+
"time_taken": float(time_taken),
|
| 1305 |
+
"trial_states": [trial.status],
|
| 1306 |
+
"last_update": float(time.time()),
|
| 1307 |
+
"stats": stats,
|
| 1308 |
+
"passed": [test_passed],
|
| 1309 |
+
"not_passed": [not test_passed],
|
| 1310 |
+
"failures": {str(trial): 1} if not test_passed else {},
|
| 1311 |
+
}
|
| 1312 |
+
with open(
|
| 1313 |
+
os.environ.get("TEST_OUTPUT_JSON", "/tmp/learning_test.json"),
|
| 1314 |
+
"wt",
|
| 1315 |
+
) as f:
|
| 1316 |
+
try:
|
| 1317 |
+
json.dump(json_summary, f)
|
| 1318 |
+
# Something went wrong writing json. Try again w/ simplified stats.
|
| 1319 |
+
except Exception:
|
| 1320 |
+
from ray.rllib.algorithms.algorithm import Algorithm
|
| 1321 |
+
|
| 1322 |
+
simplified_stats = {
|
| 1323 |
+
k: stats[k] for k in Algorithm._progress_metrics if k in stats
|
| 1324 |
+
}
|
| 1325 |
+
json_summary["stats"] = simplified_stats
|
| 1326 |
+
json.dump(json_summary, f)
|
| 1327 |
+
|
| 1328 |
+
if not test_passed:
|
| 1329 |
+
raise ValueError(
|
| 1330 |
+
f"`{success_metric_key}` of {success_metric_value} not reached!"
|
| 1331 |
+
)
|
| 1332 |
+
|
| 1333 |
+
return results
|
| 1334 |
+
|
| 1335 |
+
|
| 1336 |
+
def check_same_batch(batch1, batch2) -> None:
|
| 1337 |
+
"""Check if both batches are (almost) identical.
|
| 1338 |
+
|
| 1339 |
+
For MultiAgentBatches, the step count and individual policy's
|
| 1340 |
+
SampleBatches are checked for identity. For SampleBatches, identity is
|
| 1341 |
+
checked as the almost numerical key-value-pair identity between batches
|
| 1342 |
+
with ray.rllib.utils.test_utils.check(). unroll_id is compared only if
|
| 1343 |
+
both batches have an unroll_id.
|
| 1344 |
+
|
| 1345 |
+
Args:
|
| 1346 |
+
batch1: Batch to compare against batch2
|
| 1347 |
+
batch2: Batch to compare against batch1
|
| 1348 |
+
"""
|
| 1349 |
+
# Avoids circular import
|
| 1350 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
|
| 1351 |
+
|
| 1352 |
+
assert type(batch1) is type(
|
| 1353 |
+
batch2
|
| 1354 |
+
), "Input batches are of different types {} and {}".format(
|
| 1355 |
+
str(type(batch1)), str(type(batch2))
|
| 1356 |
+
)
|
| 1357 |
+
|
| 1358 |
+
def check_sample_batches(_batch1, _batch2, _policy_id=None):
|
| 1359 |
+
unroll_id_1 = _batch1.get("unroll_id", None)
|
| 1360 |
+
unroll_id_2 = _batch2.get("unroll_id", None)
|
| 1361 |
+
# unroll IDs only have to fit if both batches have them
|
| 1362 |
+
if unroll_id_1 is not None and unroll_id_2 is not None:
|
| 1363 |
+
assert unroll_id_1 == unroll_id_2
|
| 1364 |
+
|
| 1365 |
+
batch1_keys = set()
|
| 1366 |
+
for k, v in _batch1.items():
|
| 1367 |
+
# unroll_id is compared above already
|
| 1368 |
+
if k == "unroll_id":
|
| 1369 |
+
continue
|
| 1370 |
+
check(v, _batch2[k])
|
| 1371 |
+
batch1_keys.add(k)
|
| 1372 |
+
|
| 1373 |
+
batch2_keys = set(_batch2.keys())
|
| 1374 |
+
# unroll_id is compared above already
|
| 1375 |
+
batch2_keys.discard("unroll_id")
|
| 1376 |
+
_difference = batch1_keys.symmetric_difference(batch2_keys)
|
| 1377 |
+
|
| 1378 |
+
# Cases where one batch has info and the other has not
|
| 1379 |
+
if _policy_id:
|
| 1380 |
+
assert not _difference, (
|
| 1381 |
+
"SampleBatches for policy with ID {} "
|
| 1382 |
+
"don't share information on the "
|
| 1383 |
+
"following information: \n{}"
|
| 1384 |
+
"".format(_policy_id, _difference)
|
| 1385 |
+
)
|
| 1386 |
+
else:
|
| 1387 |
+
assert not _difference, (
|
| 1388 |
+
"SampleBatches don't share information "
|
| 1389 |
+
"on the following information: \n{}"
|
| 1390 |
+
"".format(_difference)
|
| 1391 |
+
)
|
| 1392 |
+
|
| 1393 |
+
if type(batch1) is SampleBatch:
|
| 1394 |
+
check_sample_batches(batch1, batch2)
|
| 1395 |
+
elif type(batch1) is MultiAgentBatch:
|
| 1396 |
+
assert batch1.count == batch2.count
|
| 1397 |
+
batch1_ids = set()
|
| 1398 |
+
for policy_id, policy_batch in batch1.policy_batches.items():
|
| 1399 |
+
check_sample_batches(
|
| 1400 |
+
policy_batch, batch2.policy_batches[policy_id], policy_id
|
| 1401 |
+
)
|
| 1402 |
+
batch1_ids.add(policy_id)
|
| 1403 |
+
|
| 1404 |
+
# Case where one ma batch has info on a policy the other has not
|
| 1405 |
+
batch2_ids = set(batch2.policy_batches.keys())
|
| 1406 |
+
difference = batch1_ids.symmetric_difference(batch2_ids)
|
| 1407 |
+
assert (
|
| 1408 |
+
not difference
|
| 1409 |
+
), f"MultiAgentBatches don't share the following information: \n{difference}."
|
| 1410 |
+
else:
|
| 1411 |
+
raise ValueError("Unsupported batch type " + str(type(batch1)))
|
| 1412 |
+
|
| 1413 |
+
|
| 1414 |
+
def check_reproducibilty(
|
| 1415 |
+
algo_class: Type["Algorithm"],
|
| 1416 |
+
algo_config: "AlgorithmConfig",
|
| 1417 |
+
*,
|
| 1418 |
+
fw_kwargs: Dict[str, Any],
|
| 1419 |
+
training_iteration: int = 1,
|
| 1420 |
+
) -> None:
|
| 1421 |
+
# TODO @kourosh: we can get rid of examples/deterministic_training.py once
|
| 1422 |
+
# this is added to all algorithms
|
| 1423 |
+
"""Check if the algorithm is reproducible across different testing conditions:
|
| 1424 |
+
|
| 1425 |
+
frameworks: all input frameworks
|
| 1426 |
+
num_gpus: int(os.environ.get("RLLIB_NUM_GPUS", "0"))
|
| 1427 |
+
num_workers: 0 (only local workers) or
|
| 1428 |
+
4 ((1) local workers + (4) remote workers)
|
| 1429 |
+
num_envs_per_env_runner: 2
|
| 1430 |
+
|
| 1431 |
+
Args:
|
| 1432 |
+
algo_class: Algorithm class to test.
|
| 1433 |
+
algo_config: Base config to use for the algorithm.
|
| 1434 |
+
fw_kwargs: Framework iterator keyword arguments.
|
| 1435 |
+
training_iteration: Number of training iterations to run.
|
| 1436 |
+
|
| 1437 |
+
Returns:
|
| 1438 |
+
None
|
| 1439 |
+
|
| 1440 |
+
Raises:
|
| 1441 |
+
It raises an AssertionError if the algorithm is not reproducible.
|
| 1442 |
+
"""
|
| 1443 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 1444 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
|
| 1445 |
+
|
| 1446 |
+
stop_dict = {TRAINING_ITERATION: training_iteration}
|
| 1447 |
+
# use 0 and 2 workers (for more that 4 workers we have to make sure the instance
|
| 1448 |
+
# type in ci build has enough resources)
|
| 1449 |
+
for num_workers in [0, 2]:
|
| 1450 |
+
algo_config = (
|
| 1451 |
+
algo_config.debugging(seed=42).env_runners(
|
| 1452 |
+
num_env_runners=num_workers, num_envs_per_env_runner=2
|
| 1453 |
+
)
|
| 1454 |
+
# new API
|
| 1455 |
+
.learners(
|
| 1456 |
+
num_gpus_per_learner=int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
| 1457 |
+
)
|
| 1458 |
+
# old API
|
| 1459 |
+
.resources(
|
| 1460 |
+
num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
| 1461 |
+
)
|
| 1462 |
+
)
|
| 1463 |
+
|
| 1464 |
+
print(
|
| 1465 |
+
f"Testing reproducibility of {algo_class.__name__}"
|
| 1466 |
+
f" with {num_workers} workers"
|
| 1467 |
+
)
|
| 1468 |
+
print("/// config")
|
| 1469 |
+
pprint.pprint(algo_config.to_dict())
|
| 1470 |
+
# test tune.Tuner().fit() reproducibility
|
| 1471 |
+
results1 = tune.Tuner(
|
| 1472 |
+
algo_class,
|
| 1473 |
+
param_space=algo_config.to_dict(),
|
| 1474 |
+
run_config=air.RunConfig(stop=stop_dict, verbose=1),
|
| 1475 |
+
).fit()
|
| 1476 |
+
results1 = results1.get_best_result().metrics
|
| 1477 |
+
|
| 1478 |
+
results2 = tune.Tuner(
|
| 1479 |
+
algo_class,
|
| 1480 |
+
param_space=algo_config.to_dict(),
|
| 1481 |
+
run_config=air.RunConfig(stop=stop_dict, verbose=1),
|
| 1482 |
+
).fit()
|
| 1483 |
+
results2 = results2.get_best_result().metrics
|
| 1484 |
+
|
| 1485 |
+
# Test rollout behavior.
|
| 1486 |
+
check(
|
| 1487 |
+
results1[ENV_RUNNER_RESULTS]["hist_stats"],
|
| 1488 |
+
results2[ENV_RUNNER_RESULTS]["hist_stats"],
|
| 1489 |
+
)
|
| 1490 |
+
# As well as training behavior (minibatch sequence during SGD
|
| 1491 |
+
# iterations).
|
| 1492 |
+
# As well as training behavior (minibatch sequence during SGD
|
| 1493 |
+
# iterations).
|
| 1494 |
+
if algo_config.enable_rl_module_and_learner:
|
| 1495 |
+
check(
|
| 1496 |
+
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID],
|
| 1497 |
+
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID],
|
| 1498 |
+
)
|
| 1499 |
+
else:
|
| 1500 |
+
check(
|
| 1501 |
+
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
|
| 1502 |
+
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
|
| 1503 |
+
)
|
| 1504 |
+
|
| 1505 |
+
|
| 1506 |
+
def get_cartpole_dataset_reader(batch_size: int = 1) -> "DatasetReader":
|
| 1507 |
+
"""Returns a DatasetReader for the cartpole dataset.
|
| 1508 |
+
Args:
|
| 1509 |
+
batch_size: The batch size to use for the reader.
|
| 1510 |
+
Returns:
|
| 1511 |
+
A rllib DatasetReader for the cartpole dataset.
|
| 1512 |
+
"""
|
| 1513 |
+
from ray.rllib.algorithms import AlgorithmConfig
|
| 1514 |
+
from ray.rllib.offline import IOContext
|
| 1515 |
+
from ray.rllib.offline.dataset_reader import (
|
| 1516 |
+
DatasetReader,
|
| 1517 |
+
get_dataset_and_shards,
|
| 1518 |
+
)
|
| 1519 |
+
|
| 1520 |
+
path = "tests/data/cartpole/large.json"
|
| 1521 |
+
input_config = {"format": "json", "paths": path}
|
| 1522 |
+
dataset, _ = get_dataset_and_shards(
|
| 1523 |
+
AlgorithmConfig().offline_data(input_="dataset", input_config=input_config)
|
| 1524 |
+
)
|
| 1525 |
+
ioctx = IOContext(
|
| 1526 |
+
config=(
|
| 1527 |
+
AlgorithmConfig()
|
| 1528 |
+
.training(train_batch_size=batch_size)
|
| 1529 |
+
.offline_data(actions_in_input_normalized=True)
|
| 1530 |
+
),
|
| 1531 |
+
worker_index=0,
|
| 1532 |
+
)
|
| 1533 |
+
reader = DatasetReader(dataset, ioctx)
|
| 1534 |
+
return reader
|
| 1535 |
+
|
| 1536 |
+
|
| 1537 |
+
class ModelChecker:
|
| 1538 |
+
"""Helper class to compare architecturally identical Models across frameworks.
|
| 1539 |
+
|
| 1540 |
+
Holds a ModelConfig, such that individual models can be added simply via their
|
| 1541 |
+
framework string (by building them with config.build(framework=...).
|
| 1542 |
+
A call to `check()` forces all added models to be compared in terms of their
|
| 1543 |
+
number of trainable and non-trainable parameters, as well as, their
|
| 1544 |
+
computation results given a common weights structure and values and identical
|
| 1545 |
+
inputs to the models.
|
| 1546 |
+
"""
|
| 1547 |
+
|
| 1548 |
+
def __init__(self, config):
|
| 1549 |
+
self.config = config
|
| 1550 |
+
|
| 1551 |
+
# To compare number of params between frameworks.
|
| 1552 |
+
self.param_counts = {}
|
| 1553 |
+
# To compare computed outputs from fixed-weights-nets between frameworks.
|
| 1554 |
+
self.output_values = {}
|
| 1555 |
+
|
| 1556 |
+
# We will pass an observation filled with this one random value through
|
| 1557 |
+
# all DL networks (after they have been set to fixed-weights) to compare
|
| 1558 |
+
# the computed outputs.
|
| 1559 |
+
self.random_fill_input_value = np.random.uniform(-0.01, 0.01)
|
| 1560 |
+
|
| 1561 |
+
# Dict of models to check against each other.
|
| 1562 |
+
self.models = {}
|
| 1563 |
+
|
| 1564 |
+
def add(self, framework: str = "torch", obs=True, state=False) -> Any:
|
| 1565 |
+
"""Builds a new Model for the given framework."""
|
| 1566 |
+
model = self.models[framework] = self.config.build(framework=framework)
|
| 1567 |
+
|
| 1568 |
+
# Pass a B=1 observation through the model.
|
| 1569 |
+
inputs = np.full(
|
| 1570 |
+
[1] + ([1] if state else []) + list(self.config.input_dims),
|
| 1571 |
+
self.random_fill_input_value,
|
| 1572 |
+
)
|
| 1573 |
+
if obs:
|
| 1574 |
+
inputs = {Columns.OBS: inputs}
|
| 1575 |
+
if state:
|
| 1576 |
+
inputs[Columns.STATE_IN] = tree.map_structure(
|
| 1577 |
+
lambda s: np.zeros(shape=[1] + list(s)), state
|
| 1578 |
+
)
|
| 1579 |
+
if framework == "torch":
|
| 1580 |
+
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
|
| 1581 |
+
|
| 1582 |
+
inputs = convert_to_torch_tensor(inputs)
|
| 1583 |
+
# w/ old specs: inputs = model.input_specs.fill(self.random_fill_input_value)
|
| 1584 |
+
|
| 1585 |
+
outputs = model(inputs)
|
| 1586 |
+
|
| 1587 |
+
# Bring model into a reproducible, comparable state (so we can compare
|
| 1588 |
+
# computations across frameworks). Use only a value-sequence of len=1 here
|
| 1589 |
+
# as it could possibly be that the layers are stored in different order
|
| 1590 |
+
# across the different frameworks.
|
| 1591 |
+
model._set_to_dummy_weights(value_sequence=(self.random_fill_input_value,))
|
| 1592 |
+
|
| 1593 |
+
# Perform another forward pass.
|
| 1594 |
+
comparable_outputs = model(inputs)
|
| 1595 |
+
|
| 1596 |
+
# Store the number of parameters for this framework's net.
|
| 1597 |
+
self.param_counts[framework] = model.get_num_parameters()
|
| 1598 |
+
# Store the fixed-weights-net outputs for this framework's net.
|
| 1599 |
+
if framework == "torch":
|
| 1600 |
+
self.output_values[framework] = tree.map_structure(
|
| 1601 |
+
lambda s: s.detach().numpy() if s is not None else None,
|
| 1602 |
+
comparable_outputs,
|
| 1603 |
+
)
|
| 1604 |
+
else:
|
| 1605 |
+
self.output_values[framework] = tree.map_structure(
|
| 1606 |
+
lambda s: s.numpy() if s is not None else None, comparable_outputs
|
| 1607 |
+
)
|
| 1608 |
+
return outputs
|
| 1609 |
+
|
| 1610 |
+
def check(self):
|
| 1611 |
+
"""Compares all added Models with each other and possibly raises errors."""
|
| 1612 |
+
|
| 1613 |
+
main_key = next(iter(self.models.keys()))
|
| 1614 |
+
# Compare number of trainable and non-trainable params between all
|
| 1615 |
+
# frameworks.
|
| 1616 |
+
for c in self.param_counts.values():
|
| 1617 |
+
check(c, self.param_counts[main_key])
|
| 1618 |
+
|
| 1619 |
+
# Compare dummy outputs by exact values given that all nets received the
|
| 1620 |
+
# same input and all nets have the same (dummy) weight values.
|
| 1621 |
+
for v in self.output_values.values():
|
| 1622 |
+
check(v, self.output_values[main_key], atol=0.0005)
|
| 1623 |
+
|
| 1624 |
+
|
| 1625 |
+
def _get_mean_action_from_algorithm(alg: "Algorithm", obs: np.ndarray) -> np.ndarray:
|
| 1626 |
+
"""Returns the mean action computed by the given algorithm.
|
| 1627 |
+
|
| 1628 |
+
Note: This makes calls to `Algorithm.compute_single_action`
|
| 1629 |
+
|
| 1630 |
+
Args:
|
| 1631 |
+
alg: The constructed algorithm to run inference on.
|
| 1632 |
+
obs: The observation to compute the action for.
|
| 1633 |
+
|
| 1634 |
+
Returns:
|
| 1635 |
+
The mean action computed by the algorithm over 5000 samples.
|
| 1636 |
+
|
| 1637 |
+
"""
|
| 1638 |
+
out = []
|
| 1639 |
+
for _ in range(5000):
|
| 1640 |
+
out.append(float(alg.compute_single_action(obs)))
|
| 1641 |
+
return np.mean(out)
|
| 1642 |
+
|
| 1643 |
+
|
| 1644 |
+
def check_supported_spaces(
|
| 1645 |
+
alg: str,
|
| 1646 |
+
config: "AlgorithmConfig",
|
| 1647 |
+
train: bool = True,
|
| 1648 |
+
check_bounds: bool = False,
|
| 1649 |
+
frameworks: Optional[Tuple[str]] = None,
|
| 1650 |
+
use_gpu: bool = False,
|
| 1651 |
+
):
|
| 1652 |
+
"""Checks whether the given algorithm supports different action and obs spaces.
|
| 1653 |
+
|
| 1654 |
+
Performs the checks by constructing an rllib algorithm from the config and
|
| 1655 |
+
checking to see that the model inside the policy is the correct one given
|
| 1656 |
+
the action and obs spaces. For example if the action space is discrete and
|
| 1657 |
+
the obs space is an image, then the model should be a vision network with
|
| 1658 |
+
a categorical action distribution.
|
| 1659 |
+
|
| 1660 |
+
Args:
|
| 1661 |
+
alg: The name of the algorithm to test.
|
| 1662 |
+
config: The config to use for the algorithm.
|
| 1663 |
+
train: Whether to train the algorithm for a few iterations.
|
| 1664 |
+
check_bounds: Whether to check the bounds of the action space.
|
| 1665 |
+
frameworks: The frameworks to test the algorithm with.
|
| 1666 |
+
use_gpu: Whether to check support for training on a gpu.
|
| 1667 |
+
|
| 1668 |
+
|
| 1669 |
+
"""
|
| 1670 |
+
# Do these imports here because otherwise we have circular imports.
|
| 1671 |
+
from ray.rllib.examples.envs.classes.random_env import RandomEnv
|
| 1672 |
+
from ray.rllib.models.torch.complex_input_net import (
|
| 1673 |
+
ComplexInputNetwork as TorchComplexNet,
|
| 1674 |
+
)
|
| 1675 |
+
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFCNet
|
| 1676 |
+
from ray.rllib.models.torch.visionnet import VisionNetwork as TorchVisionNet
|
| 1677 |
+
|
| 1678 |
+
action_spaces_to_test = {
|
| 1679 |
+
# Test discrete twice here until we support multi_binary action spaces
|
| 1680 |
+
"discrete": Discrete(5),
|
| 1681 |
+
"continuous": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1682 |
+
"int_actions": Box(0, 3, (2, 3), dtype=np.int32),
|
| 1683 |
+
"multidiscrete": MultiDiscrete([1, 2, 3, 4]),
|
| 1684 |
+
"tuple": GymTuple(
|
| 1685 |
+
[Discrete(2), Discrete(3), Box(-1.0, 1.0, (5,), dtype=np.float32)]
|
| 1686 |
+
),
|
| 1687 |
+
"dict": GymDict(
|
| 1688 |
+
{
|
| 1689 |
+
"action_choice": Discrete(3),
|
| 1690 |
+
"parameters": Box(-1.0, 1.0, (1,), dtype=np.float32),
|
| 1691 |
+
"yet_another_nested_dict": GymDict(
|
| 1692 |
+
{"a": GymTuple([Discrete(2), Discrete(3)])}
|
| 1693 |
+
),
|
| 1694 |
+
}
|
| 1695 |
+
),
|
| 1696 |
+
}
|
| 1697 |
+
|
| 1698 |
+
observation_spaces_to_test = {
|
| 1699 |
+
"multi_binary": MultiBinary([3, 10, 10]),
|
| 1700 |
+
"discrete": Discrete(5),
|
| 1701 |
+
"continuous": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1702 |
+
"vector2d": Box(-1.0, 1.0, (5, 5), dtype=np.float32),
|
| 1703 |
+
"image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
|
| 1704 |
+
"tuple": GymTuple([Discrete(10), Box(-1.0, 1.0, (5,), dtype=np.float32)]),
|
| 1705 |
+
"dict": GymDict(
|
| 1706 |
+
{
|
| 1707 |
+
"task": Discrete(10),
|
| 1708 |
+
"position": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1709 |
+
}
|
| 1710 |
+
),
|
| 1711 |
+
}
|
| 1712 |
+
|
| 1713 |
+
# The observation spaces that we test RLModules with
|
| 1714 |
+
rlmodule_supported_observation_spaces = [
|
| 1715 |
+
"multi_binary",
|
| 1716 |
+
"discrete",
|
| 1717 |
+
"continuous",
|
| 1718 |
+
"image",
|
| 1719 |
+
"tuple",
|
| 1720 |
+
"dict",
|
| 1721 |
+
]
|
| 1722 |
+
|
| 1723 |
+
# The action spaces that we test RLModules with
|
| 1724 |
+
rlmodule_supported_action_spaces = ["discrete", "continuous"]
|
| 1725 |
+
|
| 1726 |
+
default_observation_space = default_action_space = "discrete"
|
| 1727 |
+
|
| 1728 |
+
config["log_level"] = "ERROR"
|
| 1729 |
+
config["env"] = RandomEnv
|
| 1730 |
+
|
| 1731 |
+
def _do_check(alg, config, a_name, o_name):
|
| 1732 |
+
# We need to copy here so that this validation does not affect the actual
|
| 1733 |
+
# validation method call further down the line.
|
| 1734 |
+
config_copy = config.copy()
|
| 1735 |
+
config_copy.validate()
|
| 1736 |
+
# If RLModules are enabled, we need to skip a few tests for now:
|
| 1737 |
+
if config_copy.enable_rl_module_and_learner:
|
| 1738 |
+
# Skip PPO cases in which RLModules don't support the given spaces yet.
|
| 1739 |
+
if o_name not in rlmodule_supported_observation_spaces:
|
| 1740 |
+
logger.warning(
|
| 1741 |
+
"Skipping PPO test with RLModules for obs space {}".format(o_name)
|
| 1742 |
+
)
|
| 1743 |
+
return
|
| 1744 |
+
if a_name not in rlmodule_supported_action_spaces:
|
| 1745 |
+
logger.warning(
|
| 1746 |
+
"Skipping PPO test with RLModules for action space {}".format(
|
| 1747 |
+
a_name
|
| 1748 |
+
)
|
| 1749 |
+
)
|
| 1750 |
+
return
|
| 1751 |
+
|
| 1752 |
+
fw = config["framework"]
|
| 1753 |
+
action_space = action_spaces_to_test[a_name]
|
| 1754 |
+
obs_space = observation_spaces_to_test[o_name]
|
| 1755 |
+
print(
|
| 1756 |
+
"=== Testing {} (fw={}) action_space={} obs_space={} ===".format(
|
| 1757 |
+
alg, fw, action_space, obs_space
|
| 1758 |
+
)
|
| 1759 |
+
)
|
| 1760 |
+
t0 = time.time()
|
| 1761 |
+
config.update_from_dict(
|
| 1762 |
+
dict(
|
| 1763 |
+
env_config=dict(
|
| 1764 |
+
action_space=action_space,
|
| 1765 |
+
observation_space=obs_space,
|
| 1766 |
+
reward_space=Box(1.0, 1.0, shape=(), dtype=np.float32),
|
| 1767 |
+
p_terminated=1.0,
|
| 1768 |
+
check_action_bounds=check_bounds,
|
| 1769 |
+
)
|
| 1770 |
+
)
|
| 1771 |
+
)
|
| 1772 |
+
stat = "ok"
|
| 1773 |
+
|
| 1774 |
+
try:
|
| 1775 |
+
algo = config.build()
|
| 1776 |
+
except ray.exceptions.RayActorError as e:
|
| 1777 |
+
if len(e.args) >= 2 and isinstance(e.args[2], UnsupportedSpaceException):
|
| 1778 |
+
stat = "unsupported"
|
| 1779 |
+
elif isinstance(e.args[0].args[2], UnsupportedSpaceException):
|
| 1780 |
+
stat = "unsupported"
|
| 1781 |
+
else:
|
| 1782 |
+
raise
|
| 1783 |
+
except UnsupportedSpaceException:
|
| 1784 |
+
stat = "unsupported"
|
| 1785 |
+
else:
|
| 1786 |
+
if alg not in ["SAC", "PPO"]:
|
| 1787 |
+
# 2D (image) input: Expect VisionNet.
|
| 1788 |
+
if o_name in ["atari", "image"]:
|
| 1789 |
+
assert isinstance(algo.get_policy().model, TorchVisionNet)
|
| 1790 |
+
# 1D input: Expect FCNet.
|
| 1791 |
+
elif o_name == "continuous":
|
| 1792 |
+
assert isinstance(algo.get_policy().model, TorchFCNet)
|
| 1793 |
+
# Could be either one: ComplexNet (if disabled Preprocessor)
|
| 1794 |
+
# or FCNet (w/ Preprocessor).
|
| 1795 |
+
elif o_name == "vector2d":
|
| 1796 |
+
assert isinstance(
|
| 1797 |
+
algo.get_policy().model, (TorchComplexNet, TorchFCNet)
|
| 1798 |
+
)
|
| 1799 |
+
if train:
|
| 1800 |
+
algo.train()
|
| 1801 |
+
algo.stop()
|
| 1802 |
+
print("Test: {}, ran in {}s".format(stat, time.time() - t0))
|
| 1803 |
+
|
| 1804 |
+
if not frameworks:
|
| 1805 |
+
frameworks = ("tf2", "tf", "torch")
|
| 1806 |
+
|
| 1807 |
+
_do_check_remote = ray.remote(_do_check)
|
| 1808 |
+
_do_check_remote = _do_check_remote.options(num_gpus=1 if use_gpu else 0)
|
| 1809 |
+
# Test all action spaces first.
|
| 1810 |
+
for a_name in action_spaces_to_test.keys():
|
| 1811 |
+
o_name = default_observation_space
|
| 1812 |
+
ray.get(_do_check_remote.remote(alg, config, a_name, o_name))
|
| 1813 |
+
|
| 1814 |
+
# Now test all observation spaces.
|
| 1815 |
+
for o_name in observation_spaces_to_test.keys():
|
| 1816 |
+
a_name = default_action_space
|
| 1817 |
+
ray.get(_do_check_remote.remote(alg, config, a_name, o_name))
|
deepseek/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py
ADDED
|
@@ -0,0 +1,745 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Dict, List, Optional, TYPE_CHECKING, Union
|
| 5 |
+
|
| 6 |
+
import gymnasium as gym
|
| 7 |
+
from gymnasium.spaces import Discrete, MultiDiscrete
|
| 8 |
+
import numpy as np
|
| 9 |
+
from packaging import version
|
| 10 |
+
import tree # pip install dm_tree
|
| 11 |
+
|
| 12 |
+
from ray.rllib.models.repeated_values import RepeatedValues
|
| 13 |
+
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
|
| 14 |
+
from ray.rllib.utils.framework import try_import_torch
|
| 15 |
+
from ray.rllib.utils.numpy import SMALL_NUMBER
|
| 16 |
+
from ray.rllib.utils.typing import (
|
| 17 |
+
LocalOptimizer,
|
| 18 |
+
NetworkType,
|
| 19 |
+
SpaceStruct,
|
| 20 |
+
TensorStructType,
|
| 21 |
+
TensorType,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from ray.rllib.core.learner.learner import ParamDict, ParamList
|
| 26 |
+
from ray.rllib.policy.torch_policy import TorchPolicy
|
| 27 |
+
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
torch, nn = try_import_torch()
|
| 31 |
+
|
| 32 |
+
# Limit values suitable for use as close to a -inf logit. These are useful
|
| 33 |
+
# since -inf / inf cause NaNs during backprop.
|
| 34 |
+
FLOAT_MIN = -3.4e38
|
| 35 |
+
FLOAT_MAX = 3.4e38
|
| 36 |
+
|
| 37 |
+
if torch:
|
| 38 |
+
TORCH_COMPILE_REQUIRED_VERSION = version.parse("2.0.0")
|
| 39 |
+
else:
|
| 40 |
+
TORCH_COMPILE_REQUIRED_VERSION = ValueError(
|
| 41 |
+
"torch is not installed. " "TORCH_COMPILE_REQUIRED_VERSION is " "not defined."
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# TODO (sven): Deprecate this function once we have moved completely to the Learner API.
|
| 46 |
+
# Replaced with `clip_gradients()`.
|
| 47 |
+
@PublicAPI
|
| 48 |
+
def apply_grad_clipping(
|
| 49 |
+
policy: "TorchPolicy", optimizer: LocalOptimizer, loss: TensorType
|
| 50 |
+
) -> Dict[str, TensorType]:
|
| 51 |
+
"""Applies gradient clipping to already computed grads inside `optimizer`.
|
| 52 |
+
|
| 53 |
+
Note: This function does NOT perform an analogous operation as
|
| 54 |
+
tf.clip_by_global_norm. It merely clips by norm (per gradient tensor) and
|
| 55 |
+
then computes the global norm across all given tensors (but without clipping
|
| 56 |
+
by that global norm).
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
policy: The TorchPolicy, which calculated `loss`.
|
| 60 |
+
optimizer: A local torch optimizer object.
|
| 61 |
+
loss: The torch loss tensor.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
An info dict containing the "grad_norm" key and the resulting clipped
|
| 65 |
+
gradients.
|
| 66 |
+
"""
|
| 67 |
+
grad_gnorm = 0
|
| 68 |
+
if policy.config["grad_clip"] is not None:
|
| 69 |
+
clip_value = policy.config["grad_clip"]
|
| 70 |
+
else:
|
| 71 |
+
clip_value = np.inf
|
| 72 |
+
|
| 73 |
+
num_none_grads = 0
|
| 74 |
+
for param_group in optimizer.param_groups:
|
| 75 |
+
# Make sure we only pass params with grad != None into torch
|
| 76 |
+
# clip_grad_norm_. Would fail otherwise.
|
| 77 |
+
params = list(filter(lambda p: p.grad is not None, param_group["params"]))
|
| 78 |
+
if params:
|
| 79 |
+
# PyTorch clips gradients inplace and returns the norm before clipping
|
| 80 |
+
# We therefore need to compute grad_gnorm further down (fixes #4965)
|
| 81 |
+
global_norm = nn.utils.clip_grad_norm_(params, clip_value)
|
| 82 |
+
|
| 83 |
+
if isinstance(global_norm, torch.Tensor):
|
| 84 |
+
global_norm = global_norm.cpu().numpy()
|
| 85 |
+
|
| 86 |
+
grad_gnorm += min(global_norm, clip_value)
|
| 87 |
+
else:
|
| 88 |
+
num_none_grads += 1
|
| 89 |
+
|
| 90 |
+
# Note (Kourosh): grads could indeed be zero. This method should still return
|
| 91 |
+
# grad_gnorm in that case.
|
| 92 |
+
if num_none_grads == len(optimizer.param_groups):
|
| 93 |
+
# No grads available
|
| 94 |
+
return {}
|
| 95 |
+
return {"grad_gnorm": grad_gnorm}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@PublicAPI
|
| 99 |
+
def clip_gradients(
|
| 100 |
+
gradients_dict: "ParamDict",
|
| 101 |
+
*,
|
| 102 |
+
grad_clip: Optional[float] = None,
|
| 103 |
+
grad_clip_by: str = "value",
|
| 104 |
+
) -> TensorType:
|
| 105 |
+
"""Performs gradient clipping on a grad-dict based on a clip value and clip mode.
|
| 106 |
+
|
| 107 |
+
Changes the provided gradient dict in place.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
gradients_dict: The gradients dict, mapping str to gradient tensors.
|
| 111 |
+
grad_clip: The value to clip with. The way gradients are clipped is defined
|
| 112 |
+
by the `grad_clip_by` arg (see below).
|
| 113 |
+
grad_clip_by: One of 'value', 'norm', or 'global_norm'.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
If `grad_clip_by`="global_norm" and `grad_clip` is not None, returns the global
|
| 117 |
+
norm of all tensors, otherwise returns None.
|
| 118 |
+
"""
|
| 119 |
+
# No clipping, return.
|
| 120 |
+
if grad_clip is None:
|
| 121 |
+
return
|
| 122 |
+
|
| 123 |
+
# Clip by value (each gradient individually).
|
| 124 |
+
if grad_clip_by == "value":
|
| 125 |
+
for k, v in gradients_dict.copy().items():
|
| 126 |
+
gradients_dict[k] = (
|
| 127 |
+
None if v is None else torch.clip(v, -grad_clip, grad_clip)
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# Clip by L2-norm (per gradient tensor).
|
| 131 |
+
elif grad_clip_by == "norm":
|
| 132 |
+
for k, v in gradients_dict.copy().items():
|
| 133 |
+
if v is not None:
|
| 134 |
+
# Compute the L2-norm of the gradient tensor.
|
| 135 |
+
norm = v.norm(2).nan_to_num(neginf=-10e8, posinf=10e8)
|
| 136 |
+
# Clip all the gradients.
|
| 137 |
+
if norm > grad_clip:
|
| 138 |
+
v.mul_(grad_clip / norm)
|
| 139 |
+
|
| 140 |
+
# Clip by global L2-norm (across all gradient tensors).
|
| 141 |
+
else:
|
| 142 |
+
assert (
|
| 143 |
+
grad_clip_by == "global_norm"
|
| 144 |
+
), f"`grad_clip_by` ({grad_clip_by}) must be one of [value|norm|global_norm]!"
|
| 145 |
+
gradients_list = list(gradients_dict.values())
|
| 146 |
+
total_norm = compute_global_norm(gradients_list)
|
| 147 |
+
# We do want the coefficient to be in between 0.0 and 1.0, therefore
|
| 148 |
+
# if the global_norm is smaller than the clip value, we use the clip value
|
| 149 |
+
# as normalization constant.
|
| 150 |
+
device = gradients_list[0].device
|
| 151 |
+
clip_coef = grad_clip / torch.maximum(
|
| 152 |
+
torch.tensor(grad_clip).to(device), total_norm + 1e-6
|
| 153 |
+
)
|
| 154 |
+
# Note: multiplying by the clamped coef is redundant when the coef is clamped to
|
| 155 |
+
# 1, but doing so avoids a `if clip_coef < 1:` conditional which can require a
|
| 156 |
+
# CPU <=> device synchronization when the gradients do not reside in CPU memory.
|
| 157 |
+
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
|
| 158 |
+
for g in gradients_list:
|
| 159 |
+
if g is not None:
|
| 160 |
+
g.detach().mul_(clip_coef_clamped.to(g.device))
|
| 161 |
+
return total_norm
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@PublicAPI
|
| 165 |
+
def compute_global_norm(gradients_list: "ParamList") -> TensorType:
|
| 166 |
+
"""Computes the global norm for a gradients dict.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
gradients_list: The gradients list containing parameters.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
Returns the global norm of all tensors in `gradients_list`.
|
| 173 |
+
"""
|
| 174 |
+
# Define the norm type to be L2.
|
| 175 |
+
norm_type = 2.0
|
| 176 |
+
# If we have no grads, return zero.
|
| 177 |
+
if len(gradients_list) == 0:
|
| 178 |
+
return torch.tensor(0.0)
|
| 179 |
+
device = gradients_list[0].device
|
| 180 |
+
|
| 181 |
+
# Compute the global norm.
|
| 182 |
+
total_norm = torch.norm(
|
| 183 |
+
torch.stack(
|
| 184 |
+
[
|
| 185 |
+
torch.norm(g.detach(), norm_type)
|
| 186 |
+
# Note, we want to avoid overflow in the norm computation, this does
|
| 187 |
+
# not affect the gradients themselves as we clamp by multiplying and
|
| 188 |
+
# not by overriding tensor values.
|
| 189 |
+
.nan_to_num(neginf=-10e8, posinf=10e8).to(device)
|
| 190 |
+
for g in gradients_list
|
| 191 |
+
if g is not None
|
| 192 |
+
]
|
| 193 |
+
),
|
| 194 |
+
norm_type,
|
| 195 |
+
).nan_to_num(neginf=-10e8, posinf=10e8)
|
| 196 |
+
if torch.logical_or(total_norm.isnan(), total_norm.isinf()):
|
| 197 |
+
raise RuntimeError(
|
| 198 |
+
f"The total norm of order {norm_type} for gradients from "
|
| 199 |
+
"`parameters` is non-finite, so it cannot be clipped. "
|
| 200 |
+
)
|
| 201 |
+
# Return the global norm.
|
| 202 |
+
return total_norm
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@PublicAPI
|
| 206 |
+
def concat_multi_gpu_td_errors(
|
| 207 |
+
policy: Union["TorchPolicy", "TorchPolicyV2"]
|
| 208 |
+
) -> Dict[str, TensorType]:
|
| 209 |
+
"""Concatenates multi-GPU (per-tower) TD error tensors given TorchPolicy.
|
| 210 |
+
|
| 211 |
+
TD-errors are extracted from the TorchPolicy via its tower_stats property.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
policy: The TorchPolicy to extract the TD-error values from.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
A dict mapping strings "td_error" and "mean_td_error" to the
|
| 218 |
+
corresponding concatenated and mean-reduced values.
|
| 219 |
+
"""
|
| 220 |
+
td_error = torch.cat(
|
| 221 |
+
[
|
| 222 |
+
t.tower_stats.get("td_error", torch.tensor([0.0])).to(policy.device)
|
| 223 |
+
for t in policy.model_gpu_towers
|
| 224 |
+
],
|
| 225 |
+
dim=0,
|
| 226 |
+
)
|
| 227 |
+
policy.td_error = td_error
|
| 228 |
+
return {
|
| 229 |
+
"td_error": td_error,
|
| 230 |
+
"mean_td_error": torch.mean(td_error),
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@PublicAPI
|
| 235 |
+
def convert_to_torch_tensor(
|
| 236 |
+
x: TensorStructType,
|
| 237 |
+
device: Optional[str] = None,
|
| 238 |
+
pin_memory: bool = False,
|
| 239 |
+
):
|
| 240 |
+
"""Converts any struct to torch.Tensors.
|
| 241 |
+
|
| 242 |
+
Args:
|
| 243 |
+
x: Any (possibly nested) struct, the values in which will be
|
| 244 |
+
converted and returned as a new struct with all leaves converted
|
| 245 |
+
to torch tensors.
|
| 246 |
+
device: The device to create the tensor on.
|
| 247 |
+
pin_memory: If True, will call the `pin_memory()` method on the created tensors.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
Any: A new struct with the same structure as `x`, but with all
|
| 251 |
+
values converted to torch Tensor types. This does not convert possibly
|
| 252 |
+
nested elements that are None because torch has no representation for that.
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
def mapping(item):
|
| 256 |
+
if item is None:
|
| 257 |
+
# Torch has no representation for `None`, so we return None
|
| 258 |
+
return item
|
| 259 |
+
|
| 260 |
+
# Special handling of "Repeated" values.
|
| 261 |
+
if isinstance(item, RepeatedValues):
|
| 262 |
+
return RepeatedValues(
|
| 263 |
+
tree.map_structure(mapping, item.values), item.lengths, item.max_len
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Already torch tensor -> make sure it's on right device.
|
| 267 |
+
if torch.is_tensor(item):
|
| 268 |
+
tensor = item
|
| 269 |
+
# Numpy arrays.
|
| 270 |
+
elif isinstance(item, np.ndarray):
|
| 271 |
+
# Object type (e.g. info dicts in train batch): leave as-is.
|
| 272 |
+
# str type (e.g. agent_id in train batch): leave as-is.
|
| 273 |
+
if item.dtype == object or item.dtype.type is np.str_:
|
| 274 |
+
return item
|
| 275 |
+
# Non-writable numpy-arrays will cause PyTorch warning.
|
| 276 |
+
elif item.flags.writeable is False:
|
| 277 |
+
with warnings.catch_warnings():
|
| 278 |
+
warnings.simplefilter("ignore")
|
| 279 |
+
tensor = torch.from_numpy(item)
|
| 280 |
+
# Already numpy: Wrap as torch tensor.
|
| 281 |
+
else:
|
| 282 |
+
tensor = torch.from_numpy(item)
|
| 283 |
+
# Everything else: Convert to numpy, then wrap as torch tensor.
|
| 284 |
+
else:
|
| 285 |
+
tensor = torch.from_numpy(np.asarray(item))
|
| 286 |
+
|
| 287 |
+
# Floatify all float64 tensors (but leave float16 as-is).
|
| 288 |
+
if tensor.is_floating_point() and str(tensor.dtype) != "torch.float16":
|
| 289 |
+
tensor = tensor.float()
|
| 290 |
+
|
| 291 |
+
# Pin the tensor's memory (for faster transfer to GPU later).
|
| 292 |
+
if pin_memory and torch.cuda.is_available():
|
| 293 |
+
tensor.pin_memory()
|
| 294 |
+
|
| 295 |
+
return tensor if device is None else tensor.to(device)
|
| 296 |
+
|
| 297 |
+
return tree.map_structure(mapping, x)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@PublicAPI
|
| 301 |
+
def copy_torch_tensors(x: TensorStructType, device: Optional[str] = None):
|
| 302 |
+
"""Creates a copy of `x` and makes deep copies torch.Tensors in x.
|
| 303 |
+
|
| 304 |
+
Also moves the copied tensors to the specified device (if not None).
|
| 305 |
+
|
| 306 |
+
Note if an object in x is not a torch.Tensor, it will be shallow-copied.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
x : Any (possibly nested) struct possibly containing torch.Tensors.
|
| 310 |
+
device : The device to move the tensors to.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
Any: A new struct with the same structure as `x`, but with all
|
| 314 |
+
torch.Tensors deep-copied and moved to the specified device.
|
| 315 |
+
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def mapping(item):
|
| 319 |
+
if isinstance(item, torch.Tensor):
|
| 320 |
+
return (
|
| 321 |
+
torch.clone(item.detach())
|
| 322 |
+
if device is None
|
| 323 |
+
else item.detach().to(device)
|
| 324 |
+
)
|
| 325 |
+
else:
|
| 326 |
+
return item
|
| 327 |
+
|
| 328 |
+
return tree.map_structure(mapping, x)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@PublicAPI
|
| 332 |
+
def explained_variance(y: TensorType, pred: TensorType) -> TensorType:
|
| 333 |
+
"""Computes the explained variance for a pair of labels and predictions.
|
| 334 |
+
|
| 335 |
+
The formula used is:
|
| 336 |
+
max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2))
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
y: The labels.
|
| 340 |
+
pred: The predictions.
|
| 341 |
+
|
| 342 |
+
Returns:
|
| 343 |
+
The explained variance given a pair of labels and predictions.
|
| 344 |
+
"""
|
| 345 |
+
y_var = torch.var(y, dim=[0])
|
| 346 |
+
diff_var = torch.var(y - pred, dim=[0])
|
| 347 |
+
min_ = torch.tensor([-1.0]).to(pred.device)
|
| 348 |
+
return torch.max(min_, 1 - (diff_var / (y_var + SMALL_NUMBER)))[0]
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
@PublicAPI
|
| 352 |
+
def flatten_inputs_to_1d_tensor(
|
| 353 |
+
inputs: TensorStructType,
|
| 354 |
+
spaces_struct: Optional[SpaceStruct] = None,
|
| 355 |
+
time_axis: bool = False,
|
| 356 |
+
) -> TensorType:
|
| 357 |
+
"""Flattens arbitrary input structs according to the given spaces struct.
|
| 358 |
+
|
| 359 |
+
Returns a single 1D tensor resulting from the different input
|
| 360 |
+
components' values.
|
| 361 |
+
|
| 362 |
+
Thereby:
|
| 363 |
+
- Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes
|
| 364 |
+
are not treated differently from other types of Boxes and get
|
| 365 |
+
flattened as well.
|
| 366 |
+
- Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with
|
| 367 |
+
Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]].
|
| 368 |
+
- MultiDiscrete values are multi-one-hot'd, e.g. a batch of
|
| 369 |
+
[[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in
|
| 370 |
+
[[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]].
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
inputs: The inputs to be flattened.
|
| 374 |
+
spaces_struct: The structure of the spaces that behind the input
|
| 375 |
+
time_axis: Whether all inputs have a time-axis (after the batch axis).
|
| 376 |
+
If True, will keep not only the batch axis (0th), but the time axis
|
| 377 |
+
(1st) as-is and flatten everything from the 2nd axis up.
|
| 378 |
+
|
| 379 |
+
Returns:
|
| 380 |
+
A single 1D tensor resulting from concatenating all
|
| 381 |
+
flattened/one-hot'd input components. Depending on the time_axis flag,
|
| 382 |
+
the shape is (B, n) or (B, T, n).
|
| 383 |
+
|
| 384 |
+
.. testcode::
|
| 385 |
+
|
| 386 |
+
from gymnasium.spaces import Discrete, Box
|
| 387 |
+
from ray.rllib.utils.torch_utils import flatten_inputs_to_1d_tensor
|
| 388 |
+
import torch
|
| 389 |
+
struct = {
|
| 390 |
+
"a": np.array([1, 3]),
|
| 391 |
+
"b": (
|
| 392 |
+
np.array([[1.0, 2.0], [4.0, 5.0]]),
|
| 393 |
+
np.array(
|
| 394 |
+
[[[8.0], [7.0]], [[5.0], [4.0]]]
|
| 395 |
+
),
|
| 396 |
+
),
|
| 397 |
+
"c": {
|
| 398 |
+
"cb": np.array([1.0, 2.0]),
|
| 399 |
+
},
|
| 400 |
+
}
|
| 401 |
+
struct_torch = tree.map_structure(lambda s: torch.from_numpy(s), struct)
|
| 402 |
+
spaces = dict(
|
| 403 |
+
{
|
| 404 |
+
"a": gym.spaces.Discrete(4),
|
| 405 |
+
"b": (gym.spaces.Box(-1.0, 10.0, (2,)), gym.spaces.Box(-1.0, 1.0, (2,
|
| 406 |
+
1))),
|
| 407 |
+
"c": dict(
|
| 408 |
+
{
|
| 409 |
+
"cb": gym.spaces.Box(-1.0, 1.0, ()),
|
| 410 |
+
}
|
| 411 |
+
),
|
| 412 |
+
}
|
| 413 |
+
)
|
| 414 |
+
print(flatten_inputs_to_1d_tensor(struct_torch, spaces_struct=spaces))
|
| 415 |
+
|
| 416 |
+
.. testoutput::
|
| 417 |
+
|
| 418 |
+
tensor([[0., 1., 0., 0., 1., 2., 8., 7., 1.],
|
| 419 |
+
[0., 0., 0., 1., 4., 5., 5., 4., 2.]])
|
| 420 |
+
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
flat_inputs = tree.flatten(inputs)
|
| 424 |
+
flat_spaces = (
|
| 425 |
+
tree.flatten(spaces_struct)
|
| 426 |
+
if spaces_struct is not None
|
| 427 |
+
else [None] * len(flat_inputs)
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
B = None
|
| 431 |
+
T = None
|
| 432 |
+
out = []
|
| 433 |
+
for input_, space in zip(flat_inputs, flat_spaces):
|
| 434 |
+
# Store batch and (if applicable) time dimension.
|
| 435 |
+
if B is None:
|
| 436 |
+
B = input_.shape[0]
|
| 437 |
+
if time_axis:
|
| 438 |
+
T = input_.shape[1]
|
| 439 |
+
|
| 440 |
+
# One-hot encoding.
|
| 441 |
+
if isinstance(space, Discrete):
|
| 442 |
+
if time_axis:
|
| 443 |
+
input_ = torch.reshape(input_, [B * T])
|
| 444 |
+
out.append(one_hot(input_, space).float())
|
| 445 |
+
# Multi one-hot encoding.
|
| 446 |
+
elif isinstance(space, MultiDiscrete):
|
| 447 |
+
if time_axis:
|
| 448 |
+
input_ = torch.reshape(input_, [B * T, -1])
|
| 449 |
+
out.append(one_hot(input_, space).float())
|
| 450 |
+
# Box: Flatten.
|
| 451 |
+
else:
|
| 452 |
+
if time_axis:
|
| 453 |
+
input_ = torch.reshape(input_, [B * T, -1])
|
| 454 |
+
else:
|
| 455 |
+
input_ = torch.reshape(input_, [B, -1])
|
| 456 |
+
out.append(input_.float())
|
| 457 |
+
|
| 458 |
+
merged = torch.cat(out, dim=-1)
|
| 459 |
+
# Restore the time-dimension, if applicable.
|
| 460 |
+
if time_axis:
|
| 461 |
+
merged = torch.reshape(merged, [B, T, -1])
|
| 462 |
+
|
| 463 |
+
return merged
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
@PublicAPI
|
| 467 |
+
def global_norm(tensors: List[TensorType]) -> TensorType:
|
| 468 |
+
"""Returns the global L2 norm over a list of tensors.
|
| 469 |
+
|
| 470 |
+
output = sqrt(SUM(t ** 2 for t in tensors)),
|
| 471 |
+
where SUM reduces over all tensors and over all elements in tensors.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
tensors: The list of tensors to calculate the global norm over.
|
| 475 |
+
|
| 476 |
+
Returns:
|
| 477 |
+
The global L2 norm over the given tensor list.
|
| 478 |
+
"""
|
| 479 |
+
# List of single tensors' L2 norms: SQRT(SUM(xi^2)) over all xi in tensor.
|
| 480 |
+
single_l2s = [torch.pow(torch.sum(torch.pow(t, 2.0)), 0.5) for t in tensors]
|
| 481 |
+
# Compute global norm from all single tensors' L2 norms.
|
| 482 |
+
return torch.pow(sum(torch.pow(l2, 2.0) for l2 in single_l2s), 0.5)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
@PublicAPI
|
| 486 |
+
def huber_loss(x: TensorType, delta: float = 1.0) -> TensorType:
|
| 487 |
+
"""Computes the huber loss for a given term and delta parameter.
|
| 488 |
+
|
| 489 |
+
Reference: https://en.wikipedia.org/wiki/Huber_loss
|
| 490 |
+
Note that the factor of 0.5 is implicitly included in the calculation.
|
| 491 |
+
|
| 492 |
+
Formula:
|
| 493 |
+
L = 0.5 * x^2 for small abs x (delta threshold)
|
| 494 |
+
L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold)
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
x: The input term, e.g. a TD error.
|
| 498 |
+
delta: The delta parmameter in the above formula.
|
| 499 |
+
|
| 500 |
+
Returns:
|
| 501 |
+
The Huber loss resulting from `x` and `delta`.
|
| 502 |
+
"""
|
| 503 |
+
return torch.where(
|
| 504 |
+
torch.abs(x) < delta,
|
| 505 |
+
torch.pow(x, 2.0) * 0.5,
|
| 506 |
+
delta * (torch.abs(x) - 0.5 * delta),
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
@PublicAPI
|
| 511 |
+
def l2_loss(x: TensorType) -> TensorType:
|
| 512 |
+
"""Computes half the L2 norm over a tensor's values without the sqrt.
|
| 513 |
+
|
| 514 |
+
output = 0.5 * sum(x ** 2)
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
x: The input tensor.
|
| 518 |
+
|
| 519 |
+
Returns:
|
| 520 |
+
0.5 times the L2 norm over the given tensor's values (w/o sqrt).
|
| 521 |
+
"""
|
| 522 |
+
return 0.5 * torch.sum(torch.pow(x, 2.0))
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@PublicAPI
|
| 526 |
+
def minimize_and_clip(
|
| 527 |
+
optimizer: "torch.optim.Optimizer", clip_val: float = 10.0
|
| 528 |
+
) -> None:
|
| 529 |
+
"""Clips grads found in `optimizer.param_groups` to given value in place.
|
| 530 |
+
|
| 531 |
+
Ensures the norm of the gradients for each variable is clipped to
|
| 532 |
+
`clip_val`.
|
| 533 |
+
|
| 534 |
+
Args:
|
| 535 |
+
optimizer: The torch.optim.Optimizer to get the variables from.
|
| 536 |
+
clip_val: The global norm clip value. Will clip around -clip_val and
|
| 537 |
+
+clip_val.
|
| 538 |
+
"""
|
| 539 |
+
# Loop through optimizer's variables and norm per variable.
|
| 540 |
+
for param_group in optimizer.param_groups:
|
| 541 |
+
for p in param_group["params"]:
|
| 542 |
+
if p.grad is not None:
|
| 543 |
+
torch.nn.utils.clip_grad_norm_(p.grad, clip_val)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
@PublicAPI
|
| 547 |
+
def one_hot(x: TensorType, space: gym.Space) -> TensorType:
|
| 548 |
+
"""Returns a one-hot tensor, given and int tensor and a space.
|
| 549 |
+
|
| 550 |
+
Handles the MultiDiscrete case as well.
|
| 551 |
+
|
| 552 |
+
Args:
|
| 553 |
+
x: The input tensor.
|
| 554 |
+
space: The space to use for generating the one-hot tensor.
|
| 555 |
+
|
| 556 |
+
Returns:
|
| 557 |
+
The resulting one-hot tensor.
|
| 558 |
+
|
| 559 |
+
Raises:
|
| 560 |
+
ValueError: If the given space is not a discrete one.
|
| 561 |
+
|
| 562 |
+
.. testcode::
|
| 563 |
+
|
| 564 |
+
import torch
|
| 565 |
+
import gymnasium as gym
|
| 566 |
+
from ray.rllib.utils.torch_utils import one_hot
|
| 567 |
+
x = torch.IntTensor([0, 3]) # batch-dim=2
|
| 568 |
+
# Discrete space with 4 (one-hot) slots per batch item.
|
| 569 |
+
s = gym.spaces.Discrete(4)
|
| 570 |
+
print(one_hot(x, s))
|
| 571 |
+
x = torch.IntTensor([[0, 1, 2, 3]]) # batch-dim=1
|
| 572 |
+
# MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots
|
| 573 |
+
# per batch item.
|
| 574 |
+
s = gym.spaces.MultiDiscrete([5, 4, 4, 7])
|
| 575 |
+
print(one_hot(x, s))
|
| 576 |
+
|
| 577 |
+
.. testoutput::
|
| 578 |
+
|
| 579 |
+
tensor([[1, 0, 0, 0],
|
| 580 |
+
[0, 0, 0, 1]])
|
| 581 |
+
tensor([[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]])
|
| 582 |
+
"""
|
| 583 |
+
if isinstance(space, Discrete):
|
| 584 |
+
return nn.functional.one_hot(x.long(), space.n)
|
| 585 |
+
elif isinstance(space, MultiDiscrete):
|
| 586 |
+
if isinstance(space.nvec[0], np.ndarray):
|
| 587 |
+
nvec = np.ravel(space.nvec)
|
| 588 |
+
x = x.reshape(x.shape[0], -1)
|
| 589 |
+
else:
|
| 590 |
+
nvec = space.nvec
|
| 591 |
+
return torch.cat(
|
| 592 |
+
[nn.functional.one_hot(x[:, i].long(), n) for i, n in enumerate(nvec)],
|
| 593 |
+
dim=-1,
|
| 594 |
+
)
|
| 595 |
+
else:
|
| 596 |
+
raise ValueError("Unsupported space for `one_hot`: {}".format(space))
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
@PublicAPI
|
| 600 |
+
def reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType:
|
| 601 |
+
"""Same as torch.mean() but ignores -inf values.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
x: The input tensor to reduce mean over.
|
| 605 |
+
axis: The axis over which to reduce. None for all axes.
|
| 606 |
+
|
| 607 |
+
Returns:
|
| 608 |
+
The mean reduced inputs, ignoring inf values.
|
| 609 |
+
"""
|
| 610 |
+
mask = torch.ne(x, float("-inf"))
|
| 611 |
+
x_zeroed = torch.where(mask, x, torch.zeros_like(x))
|
| 612 |
+
return torch.sum(x_zeroed, axis) / torch.sum(mask.float(), axis)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
@PublicAPI
|
| 616 |
+
def sequence_mask(
|
| 617 |
+
lengths: TensorType,
|
| 618 |
+
maxlen: Optional[int] = None,
|
| 619 |
+
dtype=None,
|
| 620 |
+
time_major: bool = False,
|
| 621 |
+
) -> TensorType:
|
| 622 |
+
"""Offers same behavior as tf.sequence_mask for torch.
|
| 623 |
+
|
| 624 |
+
Thanks to Dimitris Papatheodorou
|
| 625 |
+
(https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/
|
| 626 |
+
39036).
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
lengths: The tensor of individual lengths to mask by.
|
| 630 |
+
maxlen: The maximum length to use for the time axis. If None, use
|
| 631 |
+
the max of `lengths`.
|
| 632 |
+
dtype: The torch dtype to use for the resulting mask.
|
| 633 |
+
time_major: Whether to return the mask as [B, T] (False; default) or
|
| 634 |
+
as [T, B] (True).
|
| 635 |
+
|
| 636 |
+
Returns:
|
| 637 |
+
The sequence mask resulting from the given input and parameters.
|
| 638 |
+
"""
|
| 639 |
+
# If maxlen not given, use the longest lengths in the `lengths` tensor.
|
| 640 |
+
if maxlen is None:
|
| 641 |
+
maxlen = lengths.max()
|
| 642 |
+
|
| 643 |
+
mask = torch.ones(tuple(lengths.shape) + (int(maxlen),))
|
| 644 |
+
|
| 645 |
+
mask = ~(mask.to(lengths.device).cumsum(dim=1).t() > lengths)
|
| 646 |
+
# Time major transformation.
|
| 647 |
+
if not time_major:
|
| 648 |
+
mask = mask.t()
|
| 649 |
+
|
| 650 |
+
# By default, set the mask to be boolean.
|
| 651 |
+
mask.type(dtype or torch.bool)
|
| 652 |
+
|
| 653 |
+
return mask
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@PublicAPI
|
| 657 |
+
def update_target_network(
|
| 658 |
+
main_net: NetworkType,
|
| 659 |
+
target_net: NetworkType,
|
| 660 |
+
tau: float,
|
| 661 |
+
) -> None:
|
| 662 |
+
"""Updates a torch.nn.Module target network using Polyak averaging.
|
| 663 |
+
|
| 664 |
+
new_target_net_weight = (
|
| 665 |
+
tau * main_net_weight + (1.0 - tau) * current_target_net_weight
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
main_net: The nn.Module to update from.
|
| 670 |
+
target_net: The target network to update.
|
| 671 |
+
tau: The tau value to use in the Polyak averaging formula.
|
| 672 |
+
"""
|
| 673 |
+
# Get the current parameters from the Q network.
|
| 674 |
+
state_dict = main_net.state_dict()
|
| 675 |
+
# Use here Polyak averaging.
|
| 676 |
+
new_state_dict = {
|
| 677 |
+
k: tau * state_dict[k] + (1 - tau) * v
|
| 678 |
+
for k, v in target_net.state_dict().items()
|
| 679 |
+
}
|
| 680 |
+
# Apply the new parameters to the target Q network.
|
| 681 |
+
target_net.load_state_dict(new_state_dict)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
@DeveloperAPI
|
| 685 |
+
def warn_if_infinite_kl_divergence(
|
| 686 |
+
policy: "TorchPolicy",
|
| 687 |
+
kl_divergence: TensorType,
|
| 688 |
+
) -> None:
|
| 689 |
+
if policy.loss_initialized() and kl_divergence.isinf():
|
| 690 |
+
logger.warning(
|
| 691 |
+
"KL divergence is non-finite, this will likely destabilize your model and"
|
| 692 |
+
" the training process. Action(s) in a specific state have near-zero"
|
| 693 |
+
" probability. This can happen naturally in deterministic environments"
|
| 694 |
+
" where the optimal policy has zero mass for a specific action. To fix this"
|
| 695 |
+
" issue, consider setting the coefficient for the KL loss term to zero or"
|
| 696 |
+
" increasing policy entropy."
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
@PublicAPI
|
| 701 |
+
def set_torch_seed(seed: Optional[int] = None) -> None:
|
| 702 |
+
"""Sets the torch random seed to the given value.
|
| 703 |
+
|
| 704 |
+
Args:
|
| 705 |
+
seed: The seed to use or None for no seeding.
|
| 706 |
+
"""
|
| 707 |
+
if seed is not None and torch:
|
| 708 |
+
torch.manual_seed(seed)
|
| 709 |
+
# See https://github.com/pytorch/pytorch/issues/47672.
|
| 710 |
+
cuda_version = torch.version.cuda
|
| 711 |
+
if cuda_version is not None and float(torch.version.cuda) >= 10.2:
|
| 712 |
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = "4096:8"
|
| 713 |
+
else:
|
| 714 |
+
# Not all Operations support this.
|
| 715 |
+
torch.use_deterministic_algorithms(True)
|
| 716 |
+
# This is only for Convolution no problem.
|
| 717 |
+
torch.backends.cudnn.deterministic = True
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
@PublicAPI
|
| 721 |
+
def softmax_cross_entropy_with_logits(
|
| 722 |
+
logits: TensorType,
|
| 723 |
+
labels: TensorType,
|
| 724 |
+
) -> TensorType:
|
| 725 |
+
"""Same behavior as tf.nn.softmax_cross_entropy_with_logits.
|
| 726 |
+
|
| 727 |
+
Args:
|
| 728 |
+
x: The input predictions.
|
| 729 |
+
labels: The labels corresponding to `x`.
|
| 730 |
+
|
| 731 |
+
Returns:
|
| 732 |
+
The resulting softmax cross-entropy given predictions and labels.
|
| 733 |
+
"""
|
| 734 |
+
return torch.sum(-labels * nn.functional.log_softmax(logits, -1), -1)
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
def _dynamo_is_available():
|
| 738 |
+
# This only works if torch._dynamo is available
|
| 739 |
+
try:
|
| 740 |
+
# TODO(Artur): Remove this once torch._dynamo is available on CI
|
| 741 |
+
import torch._dynamo as dynamo # noqa: F401
|
| 742 |
+
|
| 743 |
+
return True
|
| 744 |
+
except ImportError:
|
| 745 |
+
return False
|
evalkit_tf433/lib/python3.10/site-packages/google/api_core/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (376 Bytes). View file
|
|
|