ZTWHHH commited on
Commit
47a9978
·
verified ·
1 Parent(s): c0d2dbe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so +3 -0
  3. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/__init__.cpython-310.pyc +0 -0
  4. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_context.cpython-310.pyc +0 -0
  5. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_runner.cpython-310.pyc +0 -0
  6. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/multi_agent_env_runner.cpython-310.pyc +0 -0
  7. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_client.cpython-310.pyc +0 -0
  8. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_server_input.cpython-310.pyc +0 -0
  9. deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/single_agent_env_runner.cpython-310.pyc +0 -0
  10. deepseek/lib/python3.10/site-packages/ray/rllib/env/apis/__pycache__/__init__.cpython-310.pyc +0 -0
  11. deepseek/lib/python3.10/site-packages/ray/rllib/env/env_runner.py +168 -0
  12. deepseek/lib/python3.10/site-packages/ray/rllib/env/external_env.py +481 -0
  13. deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__init__.py +124 -0
  14. deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__pycache__/infinite_lookback_buffer.cpython-310.pyc +0 -0
  15. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__init__.py +0 -0
  16. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/atari_wrappers.cpython-310.pyc +0 -0
  17. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_control_wrapper.cpython-310.pyc +0 -0
  18. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_env_wrapper.cpython-310.pyc +0 -0
  19. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/group_agents_wrapper.cpython-310.pyc +0 -0
  20. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/open_spiel.cpython-310.pyc +0 -0
  21. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/unity3d_env.cpython-310.pyc +0 -0
  22. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/dm_control_wrapper.py +220 -0
  23. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/exception_wrapper.py +38 -0
  24. deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/unity3d_env.py +381 -0
  25. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/__init__.cpython-310.pyc +0 -0
  26. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/cartpole_mass.cpython-310.pyc +0 -0
  27. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/coin_game_non_vectorized_env.cpython-310.pyc +0 -0
  28. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/env_with_subprocess.cpython-310.pyc +0 -0
  29. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/multi_agent.cpython-310.pyc +0 -0
  30. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pendulum_mass.cpython-310.pyc +0 -0
  31. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pettingzoo_chess.cpython-310.pyc +0 -0
  32. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/random_env.cpython-310.pyc +0 -0
  33. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/repeat_initial_obs_env.cpython-310.pyc +0 -0
  34. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/two_step_game.cpython-310.pyc +0 -0
  35. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/utils/__init__.py +0 -0
  36. deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_tune/custom_experiment.py +183 -0
  37. deepseek/lib/python3.10/site-packages/ray/rllib/models/catalog.py +905 -0
  38. deepseek/lib/python3.10/site-packages/ray/rllib/models/distributions.py +248 -0
  39. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/complex_input_net.py +237 -0
  40. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc +0 -0
  41. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc +0 -0
  42. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc +0 -0
  43. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/multi_head_attention.py +70 -0
  44. deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/noisy_layer.py +99 -0
  45. evalkit_tf433/lib/python3.10/site-packages/google/api_core/_rest_streaming_base.py +118 -0
  46. evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_info.py +108 -0
  47. evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_logging.py +144 -0
  48. evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_options.py +153 -0
  49. evalkit_tf433/lib/python3.10/site-packages/google/api_core/extended_operation.py +225 -0
  50. evalkit_tf433/lib/python3.10/site-packages/google/api_core/general_helpers.py +16 -0
.gitattributes CHANGED
@@ -1417,3 +1417,4 @@ evalkit_tf433/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
1417
  evalkit_tf433/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
1418
  evalkit_tf433/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1419
  deepseek/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1417
  evalkit_tf433/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
1418
  evalkit_tf433/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1419
  deepseek/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1420
+ deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa898fa09439b50c1f316bc36c06c60fbb25269b653de81f01066339f189ea0
3
+ size 322128
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_context.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_runner.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/multi_agent_env_runner.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_client.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_server_input.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/single_agent_env_runner.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/apis/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/env_runner.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import logging
3
+ from typing import Any, Dict, Tuple, TYPE_CHECKING
4
+
5
+ import gymnasium as gym
6
+ import tree # pip install dm_tree
7
+
8
+ from ray.rllib.utils.actor_manager import FaultAwareApply
9
+ from ray.rllib.utils.framework import try_import_tf
10
+ from ray.rllib.utils.torch_utils import convert_to_torch_tensor
11
+ from ray.rllib.utils.typing import TensorType
12
+ from ray.util.annotations import PublicAPI
13
+
14
+ if TYPE_CHECKING:
15
+ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
16
+
17
+ logger = logging.getLogger("ray.rllib")
18
+
19
+ tf1, tf, _ = try_import_tf()
20
+
21
+ ENV_RESET_FAILURE = "env_reset_failure"
22
+ ENV_STEP_FAILURE = "env_step_failure"
23
+
24
+
25
+ # TODO (sven): As soon as RolloutWorker is no longer supported, make this base class
26
+ # a Checkpointable. Currently, only some of its subclasses are Checkpointables.
27
+ @PublicAPI(stability="alpha")
28
+ class EnvRunner(FaultAwareApply, metaclass=abc.ABCMeta):
29
+ """Base class for distributed RL-style data collection from an environment.
30
+
31
+ The EnvRunner API's core functionalities can be summarized as:
32
+ - Gets configured via passing a AlgorithmConfig object to the constructor.
33
+ Normally, subclasses of EnvRunner then construct their own environment (possibly
34
+ vectorized) copies and RLModules/Policies and use the latter to step through the
35
+ environment in order to collect training data.
36
+ - Clients of EnvRunner can use the `sample()` method to collect data for training
37
+ from the environment(s).
38
+ - EnvRunner offers parallelism via creating n remote Ray Actors based on this class.
39
+ Use `ray.remote([resources])(EnvRunner)` method to create the corresponding Ray
40
+ remote class. Then instantiate n Actors using the Ray `[ctor].remote(...)` syntax.
41
+ - EnvRunner clients can get information about the server/node on which the
42
+ individual Actors are running.
43
+ """
44
+
45
+ def __init__(self, *, config: "AlgorithmConfig", **kwargs):
46
+ """Initializes an EnvRunner instance.
47
+
48
+ Args:
49
+ config: The AlgorithmConfig to use to setup this EnvRunner.
50
+ **kwargs: Forward compatibility kwargs.
51
+ """
52
+ self.config = config.copy(copy_frozen=False)
53
+ self.env = None
54
+
55
+ super().__init__(**kwargs)
56
+
57
+ # This eager check is necessary for certain all-framework tests
58
+ # that use tf's eager_mode() context generator.
59
+ if (
60
+ tf1
61
+ and (self.config.framework_str == "tf2" or config.enable_tf1_exec_eagerly)
62
+ and not tf1.executing_eagerly()
63
+ ):
64
+ tf1.enable_eager_execution()
65
+
66
+ @abc.abstractmethod
67
+ def assert_healthy(self):
68
+ """Checks that self.__init__() has been completed properly.
69
+
70
+ Useful in case an `EnvRunner` is run as @ray.remote (Actor) and the owner
71
+ would like to make sure the Ray Actor has been properly initialized.
72
+
73
+ Raises:
74
+ AssertionError: If the EnvRunner Actor has NOT been properly initialized.
75
+ """
76
+
77
+ # TODO: Make this an abstract method that must be implemented.
78
+ def make_env(self):
79
+ """Creates the RL environment for this EnvRunner and assigns it to `self.env`.
80
+
81
+ Note that users should be able to change the EnvRunner's config (e.g. change
82
+ `self.config.env_config`) and then call this method to create new environments
83
+ with the updated configuration.
84
+ It should also be called after a failure of an earlier env in order to clean up
85
+ the existing env (for example `close()` it), re-create a new one, and then
86
+ continue sampling with that new env.
87
+ """
88
+ pass
89
+
90
+ @abc.abstractmethod
91
+ def sample(self, **kwargs) -> Any:
92
+ """Returns experiences (of any form) sampled from this EnvRunner.
93
+
94
+ The exact nature and size of collected data are defined via the EnvRunner's
95
+ config and may be overridden by the given arguments.
96
+
97
+ Args:
98
+ **kwargs: Forward compatibility kwargs.
99
+
100
+ Returns:
101
+ The collected experience in any form.
102
+ """
103
+
104
+ @abc.abstractmethod
105
+ def get_spaces(self) -> Dict[str, Tuple[gym.Space, gym.Space]]:
106
+ """Returns a dict mapping ModuleIDs to 2-tuples of obs- and action space."""
107
+
108
+ def stop(self) -> None:
109
+ """Releases all resources used by this EnvRunner.
110
+
111
+ For example, when using a gym.Env in this EnvRunner, you should make sure
112
+ that its `close()` method is called.
113
+ """
114
+ pass
115
+
116
+ def __del__(self) -> None:
117
+ """If this Actor is deleted, clears all resources used by it."""
118
+ pass
119
+
120
+ def _try_env_reset(self):
121
+ """Tries resetting the env and - if an error orrurs - handles it gracefully."""
122
+ # Try to reset.
123
+ try:
124
+ obs, infos = self.env.reset()
125
+ # Everything ok -> return.
126
+ return obs, infos
127
+ # Error.
128
+ except Exception as e:
129
+ # If user wants to simply restart the env -> recreate env and try again
130
+ # (calling this method recursively until success).
131
+ if self.config.restart_failed_sub_environments:
132
+ logger.exception(
133
+ "Resetting the env resulted in an error! The original error "
134
+ f"is: {e.args[0]}"
135
+ )
136
+ # Recreate the env and simply try again.
137
+ self.make_env()
138
+ return self._try_env_reset()
139
+ else:
140
+ raise e
141
+
142
+ def _try_env_step(self, actions):
143
+ """Tries stepping the env and - if an error orrurs - handles it gracefully."""
144
+ try:
145
+ results = self.env.step(actions)
146
+ return results
147
+ except Exception as e:
148
+ if self.config.restart_failed_sub_environments:
149
+ logger.exception(
150
+ "Stepping the env resulted in an error! The original error "
151
+ f"is: {e.args[0]}"
152
+ )
153
+ # Recreate the env.
154
+ self.make_env()
155
+ # And return that the stepping failed. The caller will then handle
156
+ # specific cleanup operations (for example discarding thus-far collected
157
+ # data and repeating the step attempt).
158
+ return ENV_STEP_FAILURE
159
+ else:
160
+ raise e
161
+
162
+ def _convert_to_tensor(self, struct) -> TensorType:
163
+ """Converts structs to a framework-specific tensor."""
164
+
165
+ if self.config.framework_str == "torch":
166
+ return convert_to_torch_tensor(struct)
167
+ else:
168
+ return tree.map_structure(tf.convert_to_tensor, struct)
deepseek/lib/python3.10/site-packages/ray/rllib/env/external_env.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ import queue
3
+ import threading
4
+ import uuid
5
+ from typing import Callable, Tuple, Optional, TYPE_CHECKING
6
+
7
+ from ray.rllib.env.base_env import BaseEnv
8
+ from ray.rllib.utils.annotations import override, OldAPIStack
9
+ from ray.rllib.utils.typing import (
10
+ EnvActionType,
11
+ EnvInfoDict,
12
+ EnvObsType,
13
+ EnvType,
14
+ MultiEnvDict,
15
+ )
16
+ from ray.rllib.utils.deprecation import deprecation_warning
17
+
18
+ if TYPE_CHECKING:
19
+ from ray.rllib.models.preprocessors import Preprocessor
20
+
21
+
22
+ @OldAPIStack
23
+ class ExternalEnv(threading.Thread):
24
+ """An environment that interfaces with external agents.
25
+
26
+ Unlike simulator envs, control is inverted: The environment queries the
27
+ policy to obtain actions and in return logs observations and rewards for
28
+ training. This is in contrast to gym.Env, where the algorithm drives the
29
+ simulation through env.step() calls.
30
+
31
+ You can use ExternalEnv as the backend for policy serving (by serving HTTP
32
+ requests in the run loop), for ingesting offline logs data (by reading
33
+ offline transitions in the run loop), or other custom use cases not easily
34
+ expressed through gym.Env.
35
+
36
+ ExternalEnv supports both on-policy actions (through self.get_action()),
37
+ and off-policy actions (through self.log_action()).
38
+
39
+ This env is thread-safe, but individual episodes must be executed serially.
40
+
41
+ .. testcode::
42
+ :skipif: True
43
+
44
+ from ray.tune import register_env
45
+ from ray.rllib.algorithms.dqn import DQN
46
+ YourExternalEnv = ...
47
+ register_env("my_env", lambda config: YourExternalEnv(config))
48
+ algo = DQN(env="my_env")
49
+ while True:
50
+ print(algo.train())
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ action_space: gym.Space,
56
+ observation_space: gym.Space,
57
+ max_concurrent: int = None,
58
+ ):
59
+ """Initializes an ExternalEnv instance.
60
+
61
+ Args:
62
+ action_space: Action space of the env.
63
+ observation_space: Observation space of the env.
64
+ """
65
+
66
+ threading.Thread.__init__(self)
67
+
68
+ self.daemon = True
69
+ self.action_space = action_space
70
+ self.observation_space = observation_space
71
+ self._episodes = {}
72
+ self._finished = set()
73
+ self._results_avail_condition = threading.Condition()
74
+ if max_concurrent is not None:
75
+ deprecation_warning(
76
+ "The `max_concurrent` argument has been deprecated. Please configure"
77
+ "the number of episodes using the `rollout_fragment_length` and"
78
+ "`batch_mode` arguments. Please raise an issue on the Ray Github if "
79
+ "these arguments do not support your expected use case for ExternalEnv",
80
+ error=True,
81
+ )
82
+
83
+ def run(self):
84
+ """Override this to implement the run loop.
85
+
86
+ Your loop should continuously:
87
+ 1. Call self.start_episode(episode_id)
88
+ 2. Call self.[get|log]_action(episode_id, obs, [action]?)
89
+ 3. Call self.log_returns(episode_id, reward)
90
+ 4. Call self.end_episode(episode_id, obs)
91
+ 5. Wait if nothing to do.
92
+
93
+ Multiple episodes may be started at the same time.
94
+ """
95
+ raise NotImplementedError
96
+
97
+ def start_episode(
98
+ self, episode_id: Optional[str] = None, training_enabled: bool = True
99
+ ) -> str:
100
+ """Record the start of an episode.
101
+
102
+ Args:
103
+ episode_id: Unique string id for the episode or
104
+ None for it to be auto-assigned and returned.
105
+ training_enabled: Whether to use experiences for this
106
+ episode to improve the policy.
107
+
108
+ Returns:
109
+ Unique string id for the episode.
110
+ """
111
+
112
+ if episode_id is None:
113
+ episode_id = uuid.uuid4().hex
114
+
115
+ if episode_id in self._finished:
116
+ raise ValueError("Episode {} has already completed.".format(episode_id))
117
+
118
+ if episode_id in self._episodes:
119
+ raise ValueError("Episode {} is already started".format(episode_id))
120
+
121
+ self._episodes[episode_id] = _ExternalEnvEpisode(
122
+ episode_id, self._results_avail_condition, training_enabled
123
+ )
124
+
125
+ return episode_id
126
+
127
+ def get_action(self, episode_id: str, observation: EnvObsType) -> EnvActionType:
128
+ """Record an observation and get the on-policy action.
129
+
130
+ Args:
131
+ episode_id: Episode id returned from start_episode().
132
+ observation: Current environment observation.
133
+
134
+ Returns:
135
+ Action from the env action space.
136
+ """
137
+
138
+ episode = self._get(episode_id)
139
+ return episode.wait_for_action(observation)
140
+
141
+ def log_action(
142
+ self, episode_id: str, observation: EnvObsType, action: EnvActionType
143
+ ) -> None:
144
+ """Record an observation and (off-policy) action taken.
145
+
146
+ Args:
147
+ episode_id: Episode id returned from start_episode().
148
+ observation: Current environment observation.
149
+ action: Action for the observation.
150
+ """
151
+
152
+ episode = self._get(episode_id)
153
+ episode.log_action(observation, action)
154
+
155
+ def log_returns(
156
+ self, episode_id: str, reward: float, info: Optional[EnvInfoDict] = None
157
+ ) -> None:
158
+ """Records returns (rewards and infos) from the environment.
159
+
160
+ The reward will be attributed to the previous action taken by the
161
+ episode. Rewards accumulate until the next action. If no reward is
162
+ logged before the next action, a reward of 0.0 is assumed.
163
+
164
+ Args:
165
+ episode_id: Episode id returned from start_episode().
166
+ reward: Reward from the environment.
167
+ info: Optional info dict.
168
+ """
169
+
170
+ episode = self._get(episode_id)
171
+ episode.cur_reward += reward
172
+
173
+ if info:
174
+ episode.cur_info = info or {}
175
+
176
+ def end_episode(self, episode_id: str, observation: EnvObsType) -> None:
177
+ """Records the end of an episode.
178
+
179
+ Args:
180
+ episode_id: Episode id returned from start_episode().
181
+ observation: Current environment observation.
182
+ """
183
+
184
+ episode = self._get(episode_id)
185
+ self._finished.add(episode.episode_id)
186
+ episode.done(observation)
187
+
188
+ def _get(self, episode_id: str) -> "_ExternalEnvEpisode":
189
+ """Get a started episode by its ID or raise an error."""
190
+
191
+ if episode_id in self._finished:
192
+ raise ValueError("Episode {} has already completed.".format(episode_id))
193
+
194
+ if episode_id not in self._episodes:
195
+ raise ValueError("Episode {} not found.".format(episode_id))
196
+
197
+ return self._episodes[episode_id]
198
+
199
+ def to_base_env(
200
+ self,
201
+ make_env: Optional[Callable[[int], EnvType]] = None,
202
+ num_envs: int = 1,
203
+ remote_envs: bool = False,
204
+ remote_env_batch_wait_ms: int = 0,
205
+ restart_failed_sub_environments: bool = False,
206
+ ) -> "BaseEnv":
207
+ """Converts an RLlib MultiAgentEnv into a BaseEnv object.
208
+
209
+ The resulting BaseEnv is always vectorized (contains n
210
+ sub-environments) to support batched forward passes, where n may
211
+ also be 1. BaseEnv also supports async execution via the `poll` and
212
+ `send_actions` methods and thus supports external simulators.
213
+
214
+ Args:
215
+ make_env: A callable taking an int as input (which indicates
216
+ the number of individual sub-environments within the final
217
+ vectorized BaseEnv) and returning one individual
218
+ sub-environment.
219
+ num_envs: The number of sub-environments to create in the
220
+ resulting (vectorized) BaseEnv. The already existing `env`
221
+ will be one of the `num_envs`.
222
+ remote_envs: Whether each sub-env should be a @ray.remote
223
+ actor. You can set this behavior in your config via the
224
+ `remote_worker_envs=True` option.
225
+ remote_env_batch_wait_ms: The wait time (in ms) to poll remote
226
+ sub-environments for, if applicable. Only used if
227
+ `remote_envs` is True.
228
+
229
+ Returns:
230
+ The resulting BaseEnv object.
231
+ """
232
+ if num_envs != 1:
233
+ raise ValueError(
234
+ "External(MultiAgent)Env does not currently support "
235
+ "num_envs > 1. One way of solving this would be to "
236
+ "treat your Env as a MultiAgentEnv hosting only one "
237
+ "type of agent but with several copies."
238
+ )
239
+ env = ExternalEnvWrapper(self)
240
+
241
+ return env
242
+
243
+
244
+ @OldAPIStack
245
+ class _ExternalEnvEpisode:
246
+ """Tracked state for each active episode."""
247
+
248
+ def __init__(
249
+ self,
250
+ episode_id: str,
251
+ results_avail_condition: threading.Condition,
252
+ training_enabled: bool,
253
+ multiagent: bool = False,
254
+ ):
255
+ self.episode_id = episode_id
256
+ self.results_avail_condition = results_avail_condition
257
+ self.training_enabled = training_enabled
258
+ self.multiagent = multiagent
259
+ self.data_queue = queue.Queue()
260
+ self.action_queue = queue.Queue()
261
+ if multiagent:
262
+ self.new_observation_dict = None
263
+ self.new_action_dict = None
264
+ self.cur_reward_dict = {}
265
+ self.cur_terminated_dict = {"__all__": False}
266
+ self.cur_truncated_dict = {"__all__": False}
267
+ self.cur_info_dict = {}
268
+ else:
269
+ self.new_observation = None
270
+ self.new_action = None
271
+ self.cur_reward = 0.0
272
+ self.cur_terminated = False
273
+ self.cur_truncated = False
274
+ self.cur_info = {}
275
+
276
+ def get_data(self):
277
+ if self.data_queue.empty():
278
+ return None
279
+ return self.data_queue.get_nowait()
280
+
281
+ def log_action(self, observation, action):
282
+ if self.multiagent:
283
+ self.new_observation_dict = observation
284
+ self.new_action_dict = action
285
+ else:
286
+ self.new_observation = observation
287
+ self.new_action = action
288
+ self._send()
289
+ self.action_queue.get(True, timeout=60.0)
290
+
291
+ def wait_for_action(self, observation):
292
+ if self.multiagent:
293
+ self.new_observation_dict = observation
294
+ else:
295
+ self.new_observation = observation
296
+ self._send()
297
+ return self.action_queue.get(True, timeout=300.0)
298
+
299
+ def done(self, observation):
300
+ if self.multiagent:
301
+ self.new_observation_dict = observation
302
+ self.cur_terminated_dict = {"__all__": True}
303
+ # TODO(sven): External env API does not currently support truncated,
304
+ # but we should deprecate external Env anyways in favor of a client-only
305
+ # approach.
306
+ self.cur_truncated_dict = {"__all__": False}
307
+ else:
308
+ self.new_observation = observation
309
+ self.cur_terminated = True
310
+ self.cur_truncated = False
311
+ self._send()
312
+
313
+ def _send(self):
314
+ if self.multiagent:
315
+ if not self.training_enabled:
316
+ for agent_id in self.cur_info_dict:
317
+ self.cur_info_dict[agent_id]["training_enabled"] = False
318
+ item = {
319
+ "obs": self.new_observation_dict,
320
+ "reward": self.cur_reward_dict,
321
+ "terminated": self.cur_terminated_dict,
322
+ "truncated": self.cur_truncated_dict,
323
+ "info": self.cur_info_dict,
324
+ }
325
+ if self.new_action_dict is not None:
326
+ item["off_policy_action"] = self.new_action_dict
327
+ self.new_observation_dict = None
328
+ self.new_action_dict = None
329
+ self.cur_reward_dict = {}
330
+ else:
331
+ item = {
332
+ "obs": self.new_observation,
333
+ "reward": self.cur_reward,
334
+ "terminated": self.cur_terminated,
335
+ "truncated": self.cur_truncated,
336
+ "info": self.cur_info,
337
+ }
338
+ if self.new_action is not None:
339
+ item["off_policy_action"] = self.new_action
340
+ self.new_observation = None
341
+ self.new_action = None
342
+ self.cur_reward = 0.0
343
+ if not self.training_enabled:
344
+ item["info"]["training_enabled"] = False
345
+
346
+ with self.results_avail_condition:
347
+ self.data_queue.put_nowait(item)
348
+ self.results_avail_condition.notify()
349
+
350
+
351
+ @OldAPIStack
352
+ class ExternalEnvWrapper(BaseEnv):
353
+ """Internal adapter of ExternalEnv to BaseEnv."""
354
+
355
+ def __init__(
356
+ self, external_env: "ExternalEnv", preprocessor: "Preprocessor" = None
357
+ ):
358
+ from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
359
+
360
+ self.external_env = external_env
361
+ self.prep = preprocessor
362
+ self.multiagent = issubclass(type(external_env), ExternalMultiAgentEnv)
363
+ self._action_space = external_env.action_space
364
+ if preprocessor:
365
+ self._observation_space = preprocessor.observation_space
366
+ else:
367
+ self._observation_space = external_env.observation_space
368
+ external_env.start()
369
+
370
+ @override(BaseEnv)
371
+ def poll(
372
+ self,
373
+ ) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict, MultiEnvDict, MultiEnvDict]:
374
+ with self.external_env._results_avail_condition:
375
+ results = self._poll()
376
+ while len(results[0]) == 0:
377
+ self.external_env._results_avail_condition.wait()
378
+ results = self._poll()
379
+ if not self.external_env.is_alive():
380
+ raise Exception("Serving thread has stopped.")
381
+ return results
382
+
383
+ @override(BaseEnv)
384
+ def send_actions(self, action_dict: MultiEnvDict) -> None:
385
+ from ray.rllib.env.base_env import _DUMMY_AGENT_ID
386
+
387
+ if self.multiagent:
388
+ for env_id, actions in action_dict.items():
389
+ self.external_env._episodes[env_id].action_queue.put(actions)
390
+ else:
391
+ for env_id, action in action_dict.items():
392
+ self.external_env._episodes[env_id].action_queue.put(
393
+ action[_DUMMY_AGENT_ID]
394
+ )
395
+
396
+ def _poll(
397
+ self,
398
+ ) -> Tuple[
399
+ MultiEnvDict,
400
+ MultiEnvDict,
401
+ MultiEnvDict,
402
+ MultiEnvDict,
403
+ MultiEnvDict,
404
+ MultiEnvDict,
405
+ ]:
406
+ from ray.rllib.env.base_env import with_dummy_agent_id
407
+
408
+ all_obs, all_rewards, all_terminateds, all_truncateds, all_infos = (
409
+ {},
410
+ {},
411
+ {},
412
+ {},
413
+ {},
414
+ )
415
+ off_policy_actions = {}
416
+ for eid, episode in self.external_env._episodes.copy().items():
417
+ data = episode.get_data()
418
+ cur_terminated = (
419
+ episode.cur_terminated_dict["__all__"]
420
+ if self.multiagent
421
+ else episode.cur_terminated
422
+ )
423
+ cur_truncated = (
424
+ episode.cur_truncated_dict["__all__"]
425
+ if self.multiagent
426
+ else episode.cur_truncated
427
+ )
428
+ if cur_terminated or cur_truncated:
429
+ del self.external_env._episodes[eid]
430
+ if data:
431
+ if self.prep:
432
+ all_obs[eid] = self.prep.transform(data["obs"])
433
+ else:
434
+ all_obs[eid] = data["obs"]
435
+ all_rewards[eid] = data["reward"]
436
+ all_terminateds[eid] = data["terminated"]
437
+ all_truncateds[eid] = data["truncated"]
438
+ all_infos[eid] = data["info"]
439
+ if "off_policy_action" in data:
440
+ off_policy_actions[eid] = data["off_policy_action"]
441
+ if self.multiagent:
442
+ # Ensure a consistent set of keys
443
+ # rely on all_obs having all possible keys for now.
444
+ for eid, eid_dict in all_obs.items():
445
+ for agent_id in eid_dict.keys():
446
+
447
+ def fix(d, zero_val):
448
+ if agent_id not in d[eid]:
449
+ d[eid][agent_id] = zero_val
450
+
451
+ fix(all_rewards, 0.0)
452
+ fix(all_terminateds, False)
453
+ fix(all_truncateds, False)
454
+ fix(all_infos, {})
455
+ return (
456
+ all_obs,
457
+ all_rewards,
458
+ all_terminateds,
459
+ all_truncateds,
460
+ all_infos,
461
+ off_policy_actions,
462
+ )
463
+ else:
464
+ return (
465
+ with_dummy_agent_id(all_obs),
466
+ with_dummy_agent_id(all_rewards),
467
+ with_dummy_agent_id(all_terminateds, "__all__"),
468
+ with_dummy_agent_id(all_truncateds, "__all__"),
469
+ with_dummy_agent_id(all_infos),
470
+ with_dummy_agent_id(off_policy_actions),
471
+ )
472
+
473
+ @property
474
+ @override(BaseEnv)
475
+ def observation_space(self) -> gym.spaces.Dict:
476
+ return self._observation_space
477
+
478
+ @property
479
+ @override(BaseEnv)
480
+ def action_space(self) -> gym.Space:
481
+ return self._action_space
deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__init__.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Type, Union
3
+
4
+ import gymnasium as gym
5
+
6
+ from ray.rllib.env.env_context import EnvContext
7
+ from ray.rllib.utils.error import (
8
+ ERR_MSG_INVALID_ENV_DESCRIPTOR,
9
+ EnvError,
10
+ )
11
+ from ray.util.annotations import PublicAPI
12
+
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ @PublicAPI
18
+ def try_import_pyspiel(error: bool = False):
19
+ """Tries importing pyspiel and returns the module (or None).
20
+
21
+ Args:
22
+ error: Whether to raise an error if pyspiel cannot be imported.
23
+
24
+ Returns:
25
+ The pyspiel module.
26
+
27
+ Raises:
28
+ ImportError: If error=True and pyspiel is not installed.
29
+ """
30
+ try:
31
+ import pyspiel
32
+
33
+ return pyspiel
34
+ except ImportError:
35
+ if error:
36
+ raise ImportError(
37
+ "Could not import pyspiel! Pygame is not a dependency of RLlib "
38
+ "and RLlib requires you to install pygame separately: "
39
+ "`pip install pygame`."
40
+ )
41
+ return None
42
+
43
+
44
+ @PublicAPI
45
+ def try_import_open_spiel(error: bool = False):
46
+ """Tries importing open_spiel and returns the module (or None).
47
+
48
+ Args:
49
+ error: Whether to raise an error if open_spiel cannot be imported.
50
+
51
+ Returns:
52
+ The open_spiel module.
53
+
54
+ Raises:
55
+ ImportError: If error=True and open_spiel is not installed.
56
+ """
57
+ try:
58
+ import open_spiel
59
+
60
+ return open_spiel
61
+ except ImportError:
62
+ if error:
63
+ raise ImportError(
64
+ "Could not import open_spiel! open_spiel is not a dependency of RLlib "
65
+ "and RLlib requires you to install open_spiel separately: "
66
+ "`pip install open_spiel`."
67
+ )
68
+ return None
69
+
70
+
71
+ def _gym_env_creator(
72
+ env_context: EnvContext,
73
+ env_descriptor: Union[str, Type[gym.Env]],
74
+ ) -> gym.Env:
75
+ """Tries to create a gym env given an EnvContext object and descriptor.
76
+
77
+ Note: This function tries to construct the env from a string descriptor
78
+ only using possibly installed RL env packages (such as gym, pybullet_envs,
79
+ etc). These packages are no installation requirements for RLlib. In case
80
+ you would like to support more such env packages, add the necessary imports
81
+ and construction logic below.
82
+
83
+ Args:
84
+ env_context: The env context object to configure the env.
85
+ Note that this is a config dict, plus the properties:
86
+ `worker_index`, `vector_index`, and `remote`.
87
+ env_descriptor: The env descriptor as a gym-registered string, e.g. CartPole-v1,
88
+ ALE/MsPacman-v5, or CartPoleContinuousBulletEnv-v0.
89
+ Alternatively, the gym.Env subclass to use.
90
+
91
+ Returns:
92
+ The actual gym environment object.
93
+
94
+ Raises:
95
+ gym.error.Error: If the env cannot be constructed.
96
+ """
97
+ # Allow for PyBullet or envs to be used as well (via string). This allows
98
+ # for doing things like `env=CartPoleContinuousBulletEnv-v0`.
99
+ try:
100
+ import pybullet_envs
101
+
102
+ pybullet_envs.getList()
103
+ except (AttributeError, ModuleNotFoundError, ImportError):
104
+ pass
105
+
106
+ # If env descriptor is a str, starting with "ale_py:ALE/", for now, register all ALE
107
+ # envs from ale_py.
108
+ if isinstance(env_descriptor, str) and env_descriptor.startswith("ale_py:ALE/"):
109
+ import ale_py
110
+
111
+ gym.register_envs(ale_py)
112
+
113
+ # Try creating a gym env. If this fails we can output a
114
+ # decent error message.
115
+ try:
116
+ # If class provided, call constructor directly.
117
+ if isinstance(env_descriptor, type):
118
+ env = env_descriptor(env_context)
119
+ else:
120
+ env = gym.make(env_descriptor, **env_context)
121
+ except gym.error.Error:
122
+ raise EnvError(ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_descriptor))
123
+
124
+ return env
deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__pycache__/infinite_lookback_buffer.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/atari_wrappers.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_control_wrapper.cpython-310.pyc ADDED
Binary file (6.59 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_env_wrapper.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/group_agents_wrapper.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/open_spiel.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/unity3d_env.cpython-310.pyc ADDED
Binary file (9.81 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/dm_control_wrapper.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DeepMind Control Suite Wrapper directly sourced from:
3
+ https://github.com/denisyarats/dmc2gym
4
+
5
+ MIT License
6
+
7
+ Copyright (c) 2020 Denis Yarats
8
+
9
+ Permission is hereby granted, free of charge, to any person obtaining a copy
10
+ of this software and associated documentation files (the "Software"), to deal
11
+ in the Software without restriction, including without limitation the rights
12
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
+ copies of the Software, and to permit persons to whom the Software is
14
+ furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included in all
17
+ copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
+ SOFTWARE.
26
+ """
27
+ from gymnasium import core, spaces
28
+
29
+ try:
30
+ from dm_env import specs
31
+ except ImportError:
32
+ specs = None
33
+ try:
34
+ # Suppress MuJoCo warning (dm_control uses absl logging).
35
+ import absl.logging
36
+
37
+ absl.logging.set_verbosity("error")
38
+ from dm_control import suite
39
+ except (ImportError, OSError):
40
+ suite = None
41
+ import numpy as np
42
+
43
+ from ray.rllib.utils.annotations import PublicAPI
44
+
45
+
46
+ def _spec_to_box(spec):
47
+ def extract_min_max(s):
48
+ assert s.dtype == np.float64 or s.dtype == np.float32
49
+ dim = np.int_(np.prod(s.shape))
50
+ if type(s) == specs.Array:
51
+ bound = np.inf * np.ones(dim, dtype=np.float32)
52
+ return -bound, bound
53
+ elif type(s) == specs.BoundedArray:
54
+ zeros = np.zeros(dim, dtype=np.float32)
55
+ return s.minimum + zeros, s.maximum + zeros
56
+
57
+ mins, maxs = [], []
58
+ for s in spec:
59
+ mn, mx = extract_min_max(s)
60
+ mins.append(mn)
61
+ maxs.append(mx)
62
+ low = np.concatenate(mins, axis=0)
63
+ high = np.concatenate(maxs, axis=0)
64
+ assert low.shape == high.shape
65
+ return spaces.Box(low, high, dtype=np.float32)
66
+
67
+
68
+ def _flatten_obs(obs):
69
+ obs_pieces = []
70
+ for v in obs.values():
71
+ flat = np.array([v]) if np.isscalar(v) else v.ravel()
72
+ obs_pieces.append(flat)
73
+ return np.concatenate(obs_pieces, axis=0)
74
+
75
+
76
+ @PublicAPI
77
+ class DMCEnv(core.Env):
78
+ def __init__(
79
+ self,
80
+ domain_name,
81
+ task_name,
82
+ task_kwargs=None,
83
+ visualize_reward=False,
84
+ from_pixels=False,
85
+ height=64,
86
+ width=64,
87
+ camera_id=0,
88
+ frame_skip=2,
89
+ environment_kwargs=None,
90
+ channels_first=True,
91
+ preprocess=True,
92
+ ):
93
+ self._from_pixels = from_pixels
94
+ self._height = height
95
+ self._width = width
96
+ self._camera_id = camera_id
97
+ self._frame_skip = frame_skip
98
+ self._channels_first = channels_first
99
+ self.preprocess = preprocess
100
+
101
+ if specs is None:
102
+ raise RuntimeError(
103
+ (
104
+ "The `specs` module from `dm_env` was not imported. Make sure "
105
+ "`dm_env` is installed and visible in the current python "
106
+ "environment."
107
+ )
108
+ )
109
+ if suite is None:
110
+ raise RuntimeError(
111
+ (
112
+ "The `suite` module from `dm_control` was not imported. Make "
113
+ "sure `dm_control` is installed and visible in the current "
114
+ "python enviornment."
115
+ )
116
+ )
117
+
118
+ # create task
119
+ self._env = suite.load(
120
+ domain_name=domain_name,
121
+ task_name=task_name,
122
+ task_kwargs=task_kwargs,
123
+ visualize_reward=visualize_reward,
124
+ environment_kwargs=environment_kwargs,
125
+ )
126
+
127
+ # true and normalized action spaces
128
+ self._true_action_space = _spec_to_box([self._env.action_spec()])
129
+ self._norm_action_space = spaces.Box(
130
+ low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32
131
+ )
132
+
133
+ # create observation space
134
+ if from_pixels:
135
+ shape = [3, height, width] if channels_first else [height, width, 3]
136
+ self._observation_space = spaces.Box(
137
+ low=0, high=255, shape=shape, dtype=np.uint8
138
+ )
139
+ if preprocess:
140
+ self._observation_space = spaces.Box(
141
+ low=-0.5, high=0.5, shape=shape, dtype=np.float32
142
+ )
143
+ else:
144
+ self._observation_space = _spec_to_box(
145
+ self._env.observation_spec().values()
146
+ )
147
+
148
+ self._state_space = _spec_to_box(self._env.observation_spec().values())
149
+
150
+ self.current_state = None
151
+
152
+ def __getattr__(self, name):
153
+ return getattr(self._env, name)
154
+
155
+ def _get_obs(self, time_step):
156
+ if self._from_pixels:
157
+ obs = self.render(
158
+ height=self._height, width=self._width, camera_id=self._camera_id
159
+ )
160
+ if self._channels_first:
161
+ obs = obs.transpose(2, 0, 1).copy()
162
+ if self.preprocess:
163
+ obs = obs / 255.0 - 0.5
164
+ else:
165
+ obs = _flatten_obs(time_step.observation)
166
+ return obs.astype(np.float32)
167
+
168
+ def _convert_action(self, action):
169
+ action = action.astype(np.float64)
170
+ true_delta = self._true_action_space.high - self._true_action_space.low
171
+ norm_delta = self._norm_action_space.high - self._norm_action_space.low
172
+ action = (action - self._norm_action_space.low) / norm_delta
173
+ action = action * true_delta + self._true_action_space.low
174
+ action = action.astype(np.float32)
175
+ return action
176
+
177
+ @property
178
+ def observation_space(self):
179
+ return self._observation_space
180
+
181
+ @property
182
+ def state_space(self):
183
+ return self._state_space
184
+
185
+ @property
186
+ def action_space(self):
187
+ return self._norm_action_space
188
+
189
+ def step(self, action):
190
+ assert self._norm_action_space.contains(action)
191
+ action = self._convert_action(action)
192
+ assert self._true_action_space.contains(action)
193
+ reward = 0.0
194
+ extra = {"internal_state": self._env.physics.get_state().copy()}
195
+
196
+ terminated = truncated = False
197
+ for _ in range(self._frame_skip):
198
+ time_step = self._env.step(action)
199
+ reward += time_step.reward or 0.0
200
+ terminated = False
201
+ truncated = time_step.last()
202
+ if terminated or truncated:
203
+ break
204
+ obs = self._get_obs(time_step)
205
+ self.current_state = _flatten_obs(time_step.observation)
206
+ extra["discount"] = time_step.discount
207
+ return obs, reward, terminated, truncated, extra
208
+
209
+ def reset(self, *, seed=None, options=None):
210
+ time_step = self._env.reset()
211
+ self.current_state = _flatten_obs(time_step.observation)
212
+ obs = self._get_obs(time_step)
213
+ return obs, {}
214
+
215
+ def render(self, mode="rgb_array", height=None, width=None, camera_id=0):
216
+ assert mode == "rgb_array", "only support for rgb_array mode"
217
+ height = height or self._height
218
+ width = width or self._width
219
+ camera_id = camera_id or self._camera_id
220
+ return self._env.physics.render(height=height, width=width, camera_id=camera_id)
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/exception_wrapper.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import traceback
3
+
4
+ import gymnasium as gym
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+
9
+ class TooManyResetAttemptsException(Exception):
10
+ def __init__(self, max_attempts: int):
11
+ super().__init__(
12
+ f"Reached the maximum number of attempts ({max_attempts}) "
13
+ f"to reset an environment."
14
+ )
15
+
16
+
17
+ class ResetOnExceptionWrapper(gym.Wrapper):
18
+ def __init__(self, env: gym.Env, max_reset_attempts: int = 5):
19
+ super().__init__(env)
20
+ self.max_reset_attempts = max_reset_attempts
21
+
22
+ def reset(self, **kwargs):
23
+ attempt = 0
24
+ while attempt < self.max_reset_attempts:
25
+ try:
26
+ return self.env.reset(**kwargs)
27
+ except Exception:
28
+ logger.error(traceback.format_exc())
29
+ attempt += 1
30
+ else:
31
+ raise TooManyResetAttemptsException(self.max_reset_attempts)
32
+
33
+ def step(self, action):
34
+ try:
35
+ return self.env.step(action)
36
+ except Exception:
37
+ logger.error(traceback.format_exc())
38
+ return self.reset(), 0.0, False, {"__terminated__": True}
deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/unity3d_env.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gymnasium.spaces import Box, MultiDiscrete, Tuple as TupleSpace
2
+ import logging
3
+ import numpy as np
4
+ import random
5
+ import time
6
+ from typing import Callable, Optional, Tuple
7
+
8
+ from ray.rllib.env.multi_agent_env import MultiAgentEnv
9
+ from ray.rllib.policy.policy import PolicySpec
10
+ from ray.rllib.utils.annotations import PublicAPI
11
+ from ray.rllib.utils.typing import MultiAgentDict, PolicyID, AgentID
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @PublicAPI
17
+ class Unity3DEnv(MultiAgentEnv):
18
+ """A MultiAgentEnv representing a single Unity3D game instance.
19
+
20
+ For an example on how to use this Env with a running Unity3D editor
21
+ or with a compiled game, see:
22
+ `rllib/examples/unity3d_env_local.py`
23
+ For an example on how to use it inside a Unity game client, which
24
+ connects to an RLlib Policy server, see:
25
+ `rllib/examples/envs/external_envs/unity3d_[client|server].py`
26
+
27
+ Supports all Unity3D (MLAgents) examples, multi- or single-agent and
28
+ gets converted automatically into an ExternalMultiAgentEnv, when used
29
+ inside an RLlib PolicyClient for cloud/distributed training of Unity games.
30
+ """
31
+
32
+ # Default base port when connecting directly to the Editor
33
+ _BASE_PORT_EDITOR = 5004
34
+ # Default base port when connecting to a compiled environment
35
+ _BASE_PORT_ENVIRONMENT = 5005
36
+ # The worker_id for each environment instance
37
+ _WORKER_ID = 0
38
+
39
+ def __init__(
40
+ self,
41
+ file_name: str = None,
42
+ port: Optional[int] = None,
43
+ seed: int = 0,
44
+ no_graphics: bool = False,
45
+ timeout_wait: int = 300,
46
+ episode_horizon: int = 1000,
47
+ ):
48
+ """Initializes a Unity3DEnv object.
49
+
50
+ Args:
51
+ file_name (Optional[str]): Name of the Unity game binary.
52
+ If None, will assume a locally running Unity3D editor
53
+ to be used, instead.
54
+ port (Optional[int]): Port number to connect to Unity environment.
55
+ seed: A random seed value to use for the Unity3D game.
56
+ no_graphics: Whether to run the Unity3D simulator in
57
+ no-graphics mode. Default: False.
58
+ timeout_wait: Time (in seconds) to wait for connection from
59
+ the Unity3D instance.
60
+ episode_horizon: A hard horizon to abide to. After at most
61
+ this many steps (per-agent episode `step()` calls), the
62
+ Unity3D game is reset and will start again (finishing the
63
+ multi-agent episode that the game represents).
64
+ Note: The game itself may contain its own episode length
65
+ limits, which are always obeyed (on top of this value here).
66
+ """
67
+ super().__init__()
68
+
69
+ if file_name is None:
70
+ print(
71
+ "No game binary provided, will use a running Unity editor "
72
+ "instead.\nMake sure you are pressing the Play (|>) button in "
73
+ "your editor to start."
74
+ )
75
+
76
+ import mlagents_envs
77
+ from mlagents_envs.environment import UnityEnvironment
78
+
79
+ # Try connecting to the Unity3D game instance. If a port is blocked
80
+ port_ = None
81
+ while True:
82
+ # Sleep for random time to allow for concurrent startup of many
83
+ # environments (num_env_runners >> 1). Otherwise, would lead to port
84
+ # conflicts sometimes.
85
+ if port_ is not None:
86
+ time.sleep(random.randint(1, 10))
87
+ port_ = port or (
88
+ self._BASE_PORT_ENVIRONMENT if file_name else self._BASE_PORT_EDITOR
89
+ )
90
+ # cache the worker_id and
91
+ # increase it for the next environment
92
+ worker_id_ = Unity3DEnv._WORKER_ID if file_name else 0
93
+ Unity3DEnv._WORKER_ID += 1
94
+ try:
95
+ self.unity_env = UnityEnvironment(
96
+ file_name=file_name,
97
+ worker_id=worker_id_,
98
+ base_port=port_,
99
+ seed=seed,
100
+ no_graphics=no_graphics,
101
+ timeout_wait=timeout_wait,
102
+ )
103
+ print("Created UnityEnvironment for port {}".format(port_ + worker_id_))
104
+ except mlagents_envs.exception.UnityWorkerInUseException:
105
+ pass
106
+ else:
107
+ break
108
+
109
+ # ML-Agents API version.
110
+ self.api_version = self.unity_env.API_VERSION.split(".")
111
+ self.api_version = [int(s) for s in self.api_version]
112
+
113
+ # Reset entire env every this number of step calls.
114
+ self.episode_horizon = episode_horizon
115
+ # Keep track of how many times we have called `step` so far.
116
+ self.episode_timesteps = 0
117
+
118
+ def step(
119
+ self, action_dict: MultiAgentDict
120
+ ) -> Tuple[
121
+ MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict
122
+ ]:
123
+ """Performs one multi-agent step through the game.
124
+
125
+ Args:
126
+ action_dict: Multi-agent action dict with:
127
+ keys=agent identifier consisting of
128
+ [MLagents behavior name, e.g. "Goalie?team=1"] + "_" +
129
+ [Agent index, a unique MLAgent-assigned index per single agent]
130
+
131
+ Returns:
132
+ tuple:
133
+ - obs: Multi-agent observation dict.
134
+ Only those observations for which to get new actions are
135
+ returned.
136
+ - rewards: Rewards dict matching `obs`.
137
+ - dones: Done dict with only an __all__ multi-agent entry in
138
+ it. __all__=True, if episode is done for all agents.
139
+ - infos: An (empty) info dict.
140
+ """
141
+ from mlagents_envs.base_env import ActionTuple
142
+
143
+ # Set only the required actions (from the DecisionSteps) in Unity3D.
144
+ all_agents = []
145
+ for behavior_name in self.unity_env.behavior_specs:
146
+ # New ML-Agents API: Set all agents actions at the same time
147
+ # via an ActionTuple. Since API v1.4.0.
148
+ if self.api_version[0] > 1 or (
149
+ self.api_version[0] == 1 and self.api_version[1] >= 4
150
+ ):
151
+ actions = []
152
+ for agent_id in self.unity_env.get_steps(behavior_name)[0].agent_id:
153
+ key = behavior_name + "_{}".format(agent_id)
154
+ all_agents.append(key)
155
+ actions.append(action_dict[key])
156
+ if actions:
157
+ if actions[0].dtype == np.float32:
158
+ action_tuple = ActionTuple(continuous=np.array(actions))
159
+ else:
160
+ action_tuple = ActionTuple(discrete=np.array(actions))
161
+ self.unity_env.set_actions(behavior_name, action_tuple)
162
+ # Old behavior: Do not use an ActionTuple and set each agent's
163
+ # action individually.
164
+ else:
165
+ for agent_id in self.unity_env.get_steps(behavior_name)[
166
+ 0
167
+ ].agent_id_to_index.keys():
168
+ key = behavior_name + "_{}".format(agent_id)
169
+ all_agents.append(key)
170
+ self.unity_env.set_action_for_agent(
171
+ behavior_name, agent_id, action_dict[key]
172
+ )
173
+ # Do the step.
174
+ self.unity_env.step()
175
+
176
+ obs, rewards, terminateds, truncateds, infos = self._get_step_results()
177
+
178
+ # Global horizon reached? -> Return __all__ truncated=True, so user
179
+ # can reset. Set all agents' individual `truncated` to True as well.
180
+ self.episode_timesteps += 1
181
+ if self.episode_timesteps > self.episode_horizon:
182
+ return (
183
+ obs,
184
+ rewards,
185
+ terminateds,
186
+ dict({"__all__": True}, **{agent_id: True for agent_id in all_agents}),
187
+ infos,
188
+ )
189
+
190
+ return obs, rewards, terminateds, truncateds, infos
191
+
192
+ def reset(
193
+ self, *, seed=None, options=None
194
+ ) -> Tuple[MultiAgentDict, MultiAgentDict]:
195
+ """Resets the entire Unity3D scene (a single multi-agent episode)."""
196
+ self.episode_timesteps = 0
197
+ self.unity_env.reset()
198
+ obs, _, _, _, infos = self._get_step_results()
199
+ return obs, infos
200
+
201
+ def _get_step_results(self):
202
+ """Collects those agents' obs/rewards that have to act in next `step`.
203
+
204
+ Returns:
205
+ Tuple:
206
+ obs: Multi-agent observation dict.
207
+ Only those observations for which to get new actions are
208
+ returned.
209
+ rewards: Rewards dict matching `obs`.
210
+ dones: Done dict with only an __all__ multi-agent entry in it.
211
+ __all__=True, if episode is done for all agents.
212
+ infos: An (empty) info dict.
213
+ """
214
+ obs = {}
215
+ rewards = {}
216
+ infos = {}
217
+ for behavior_name in self.unity_env.behavior_specs:
218
+ decision_steps, terminal_steps = self.unity_env.get_steps(behavior_name)
219
+ # Important: Only update those sub-envs that are currently
220
+ # available within _env_state.
221
+ # Loop through all envs ("agents") and fill in, whatever
222
+ # information we have.
223
+ for agent_id, idx in decision_steps.agent_id_to_index.items():
224
+ key = behavior_name + "_{}".format(agent_id)
225
+ os = tuple(o[idx] for o in decision_steps.obs)
226
+ os = os[0] if len(os) == 1 else os
227
+ obs[key] = os
228
+ rewards[key] = (
229
+ decision_steps.reward[idx] + decision_steps.group_reward[idx]
230
+ )
231
+ for agent_id, idx in terminal_steps.agent_id_to_index.items():
232
+ key = behavior_name + "_{}".format(agent_id)
233
+ # Only overwrite rewards (last reward in episode), b/c obs
234
+ # here is the last obs (which doesn't matter anyways).
235
+ # Unless key does not exist in obs.
236
+ if key not in obs:
237
+ os = tuple(o[idx] for o in terminal_steps.obs)
238
+ obs[key] = os = os[0] if len(os) == 1 else os
239
+ rewards[key] = (
240
+ terminal_steps.reward[idx] + terminal_steps.group_reward[idx]
241
+ )
242
+
243
+ # Only use dones if all agents are done, then we should do a reset.
244
+ return obs, rewards, {"__all__": False}, {"__all__": False}, infos
245
+
246
+ @staticmethod
247
+ def get_policy_configs_for_game(
248
+ game_name: str,
249
+ ) -> Tuple[dict, Callable[[AgentID], PolicyID]]:
250
+
251
+ # The RLlib server must know about the Spaces that the Client will be
252
+ # using inside Unity3D, up-front.
253
+ obs_spaces = {
254
+ # 3DBall.
255
+ "3DBall": Box(float("-inf"), float("inf"), (8,)),
256
+ # 3DBallHard.
257
+ "3DBallHard": Box(float("-inf"), float("inf"), (45,)),
258
+ # GridFoodCollector
259
+ "GridFoodCollector": Box(float("-inf"), float("inf"), (40, 40, 6)),
260
+ # Pyramids.
261
+ "Pyramids": TupleSpace(
262
+ [
263
+ Box(float("-inf"), float("inf"), (56,)),
264
+ Box(float("-inf"), float("inf"), (56,)),
265
+ Box(float("-inf"), float("inf"), (56,)),
266
+ Box(float("-inf"), float("inf"), (4,)),
267
+ ]
268
+ ),
269
+ # SoccerTwos.
270
+ "SoccerPlayer": TupleSpace(
271
+ [
272
+ Box(-1.0, 1.0, (264,)),
273
+ Box(-1.0, 1.0, (72,)),
274
+ ]
275
+ ),
276
+ # SoccerStrikersVsGoalie.
277
+ "Goalie": Box(float("-inf"), float("inf"), (738,)),
278
+ "Striker": TupleSpace(
279
+ [
280
+ Box(float("-inf"), float("inf"), (231,)),
281
+ Box(float("-inf"), float("inf"), (63,)),
282
+ ]
283
+ ),
284
+ # Sorter.
285
+ "Sorter": TupleSpace(
286
+ [
287
+ Box(
288
+ float("-inf"),
289
+ float("inf"),
290
+ (
291
+ 20,
292
+ 23,
293
+ ),
294
+ ),
295
+ Box(float("-inf"), float("inf"), (10,)),
296
+ Box(float("-inf"), float("inf"), (8,)),
297
+ ]
298
+ ),
299
+ # Tennis.
300
+ "Tennis": Box(float("-inf"), float("inf"), (27,)),
301
+ # VisualHallway.
302
+ "VisualHallway": Box(float("-inf"), float("inf"), (84, 84, 3)),
303
+ # Walker.
304
+ "Walker": Box(float("-inf"), float("inf"), (212,)),
305
+ # FoodCollector.
306
+ "FoodCollector": TupleSpace(
307
+ [
308
+ Box(float("-inf"), float("inf"), (49,)),
309
+ Box(float("-inf"), float("inf"), (4,)),
310
+ ]
311
+ ),
312
+ }
313
+ action_spaces = {
314
+ # 3DBall.
315
+ "3DBall": Box(-1.0, 1.0, (2,), dtype=np.float32),
316
+ # 3DBallHard.
317
+ "3DBallHard": Box(-1.0, 1.0, (2,), dtype=np.float32),
318
+ # GridFoodCollector.
319
+ "GridFoodCollector": MultiDiscrete([3, 3, 3, 2]),
320
+ # Pyramids.
321
+ "Pyramids": MultiDiscrete([5]),
322
+ # SoccerStrikersVsGoalie.
323
+ "Goalie": MultiDiscrete([3, 3, 3]),
324
+ "Striker": MultiDiscrete([3, 3, 3]),
325
+ # SoccerTwos.
326
+ "SoccerPlayer": MultiDiscrete([3, 3, 3]),
327
+ # Sorter.
328
+ "Sorter": MultiDiscrete([3, 3, 3]),
329
+ # Tennis.
330
+ "Tennis": Box(-1.0, 1.0, (3,)),
331
+ # VisualHallway.
332
+ "VisualHallway": MultiDiscrete([5]),
333
+ # Walker.
334
+ "Walker": Box(-1.0, 1.0, (39,)),
335
+ # FoodCollector.
336
+ "FoodCollector": MultiDiscrete([3, 3, 3, 2]),
337
+ }
338
+
339
+ # Policies (Unity: "behaviors") and agent-to-policy mapping fns.
340
+ if game_name == "SoccerStrikersVsGoalie":
341
+ policies = {
342
+ "Goalie": PolicySpec(
343
+ observation_space=obs_spaces["Goalie"],
344
+ action_space=action_spaces["Goalie"],
345
+ ),
346
+ "Striker": PolicySpec(
347
+ observation_space=obs_spaces["Striker"],
348
+ action_space=action_spaces["Striker"],
349
+ ),
350
+ }
351
+
352
+ def policy_mapping_fn(agent_id, episode, worker, **kwargs):
353
+ return "Striker" if "Striker" in agent_id else "Goalie"
354
+
355
+ elif game_name == "SoccerTwos":
356
+ policies = {
357
+ "PurplePlayer": PolicySpec(
358
+ observation_space=obs_spaces["SoccerPlayer"],
359
+ action_space=action_spaces["SoccerPlayer"],
360
+ ),
361
+ "BluePlayer": PolicySpec(
362
+ observation_space=obs_spaces["SoccerPlayer"],
363
+ action_space=action_spaces["SoccerPlayer"],
364
+ ),
365
+ }
366
+
367
+ def policy_mapping_fn(agent_id, episode, worker, **kwargs):
368
+ return "BluePlayer" if "1_" in agent_id else "PurplePlayer"
369
+
370
+ else:
371
+ policies = {
372
+ game_name: PolicySpec(
373
+ observation_space=obs_spaces[game_name],
374
+ action_space=action_spaces[game_name],
375
+ ),
376
+ }
377
+
378
+ def policy_mapping_fn(agent_id, episode, worker, **kwargs):
379
+ return game_name
380
+
381
+ return policies, policy_mapping_fn
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/cartpole_mass.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/coin_game_non_vectorized_env.cpython-310.pyc ADDED
Binary file (9.29 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/env_with_subprocess.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/multi_agent.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pendulum_mass.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pettingzoo_chess.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/random_env.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/repeat_initial_obs_env.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/two_step_game.cpython-310.pyc ADDED
Binary file (3.93 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/utils/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_tune/custom_experiment.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of a custom Ray Tune experiment wrapping an RLlib Algorithm.
2
+
3
+ You should only use such a customized workflow if the following conditions apply:
4
+ - You know exactly what you are doing :)
5
+ - Configuring an existing RLlib Algorithm (e.g. PPO) via its AlgorithmConfig
6
+ is not sufficient and doesn't allow you to shape the Algorithm into behaving the way
7
+ you'd like. Note that for complex, custom evaluation procedures there are many
8
+ AlgorithmConfig options one can use (for more details, see:
9
+ https://github.com/ray-project/ray/blob/master/rllib/examples/evaluation/custom_evaluation.py). # noqa
10
+ - Subclassing an RLlib Algorithm class and overriding the new class' `training_step`
11
+ method is not sufficient and doesn't allow you to define the algorithm's execution
12
+ logic the way you'd like. See an example here on how to customize the algorithm's
13
+ `training_step()` method:
14
+ https://github.com/ray-project/ray/blob/master/rllib/examples/algorithm/custom_training_step_on_and_off_policy_combined.py # noqa
15
+
16
+
17
+ How to run this script
18
+ ----------------------
19
+ `python [script file name].py`
20
+
21
+
22
+ Results to expect
23
+ -----------------
24
+ You should see the following output (at the end of the experiment) in your console:
25
+
26
+ ╭───────────────────────────────────────────────────────────────────────────────────────
27
+ │ Trial name status iter total time (s) ts
28
+ ├───────────────────────────────────────────────────────────────────────────────────────
29
+ │ my_experiment_CartPole-v1_77083_00000 TERMINATED 10 36.7799 60000
30
+ ╰───────────────────────────────────────────────────────────────────────────────────────
31
+ ╭───────────────────────────────────────────────────────╮
32
+ │ reward episode_len_mean episodes_this_iter │
33
+ ├───────────────────────────────────────────────────────┤
34
+ │ 254.821 254.821 12 │
35
+ ╰───────────────────────────────────────────────────────╯
36
+ evaluation episode returns=[500.0, 500.0, 500.0]
37
+
38
+ Note that evaluation results (on the CartPole-v1 env) should be close to perfect
39
+ (episode return of ~500.0) as we are acting greedily inside the evaluation procedure.
40
+ """
41
+ from typing import Dict
42
+
43
+ import numpy as np
44
+ from ray import train, tune
45
+ from ray.rllib.algorithms.ppo import PPOConfig
46
+ from ray.rllib.utils.framework import try_import_torch
47
+ from ray.rllib.utils.metrics import NUM_ENV_STEPS_SAMPLED_LIFETIME
48
+
49
+ torch, _ = try_import_torch()
50
+
51
+
52
+ def my_experiment(config: Dict):
53
+
54
+ # Extract the number of iterations to run from the config.
55
+ train_iterations = config.pop("train-iterations", 2)
56
+ eval_episodes_to_do = config.pop("eval-episodes", 1)
57
+
58
+ config = (
59
+ PPOConfig()
60
+ .update_from_dict(config)
61
+ .api_stack(enable_rl_module_and_learner=True)
62
+ .environment("CartPole-v1")
63
+ )
64
+
65
+ # Train for n iterations with high LR.
66
+ config.training(lr=0.001)
67
+ algo_high_lr = config.build()
68
+ for _ in range(train_iterations):
69
+ train_results = algo_high_lr.train()
70
+ # Add the phase to the result dict.
71
+ train_results["phase"] = 1
72
+ train.report(train_results)
73
+ phase_high_lr_time = train_results[NUM_ENV_STEPS_SAMPLED_LIFETIME]
74
+ checkpoint_training_high_lr = algo_high_lr.save()
75
+ algo_high_lr.stop()
76
+
77
+ # Train for n iterations with low LR.
78
+ config.training(lr=0.00001)
79
+ algo_low_lr = config.build()
80
+ # Load state from the high-lr algo into this one.
81
+ algo_low_lr.restore(checkpoint_training_high_lr)
82
+ for _ in range(train_iterations):
83
+ train_results = algo_low_lr.train()
84
+ # Add the phase to the result dict.
85
+ train_results["phase"] = 2
86
+ # keep time moving forward
87
+ train_results[NUM_ENV_STEPS_SAMPLED_LIFETIME] += phase_high_lr_time
88
+ train.report(train_results)
89
+
90
+ checkpoint_training_low_lr = algo_low_lr.save()
91
+ algo_low_lr.stop()
92
+
93
+ # After training, run a manual evaluation procedure.
94
+
95
+ # Set the number of EnvRunners for collecting training data to 0 (local
96
+ # worker only).
97
+ config.env_runners(num_env_runners=0)
98
+
99
+ eval_algo = config.build()
100
+ # Load state from the low-lr algo into this one.
101
+ eval_algo.restore(checkpoint_training_low_lr)
102
+ # The algo's local worker (SingleAgentEnvRunner) that holds a
103
+ # gym.vector.Env object and an RLModule for computing actions.
104
+ local_env_runner = eval_algo.env_runner
105
+ # Extract the gymnasium env object from the created algo (its local
106
+ # SingleAgentEnvRunner worker). Note that the env in this single-agent
107
+ # case is a gymnasium vector env and that we get its first sub-env here.
108
+ env = local_env_runner.env.unwrapped.envs[0]
109
+
110
+ # The local worker (SingleAgentEnvRunner)
111
+ rl_module = local_env_runner.module
112
+
113
+ # Run a very simple env loop and add up rewards over a single episode.
114
+ obs, infos = env.reset()
115
+ episode_returns = []
116
+ episode_lengths = []
117
+ sum_rewards = length = 0
118
+ num_episodes = 0
119
+ while num_episodes < eval_episodes_to_do:
120
+ # Call the RLModule's `forward_inference()` method to compute an
121
+ # action.
122
+ rl_module_out = rl_module.forward_inference(
123
+ {
124
+ "obs": torch.from_numpy(np.expand_dims(obs, 0)), # <- add B=1
125
+ }
126
+ )
127
+ action_logits = rl_module_out["action_dist_inputs"][0] # <- remove B=1
128
+ action = np.argmax(action_logits.detach().cpu().numpy()) # act greedily
129
+
130
+ # Step the env.
131
+ obs, reward, terminated, truncated, info = env.step(action)
132
+
133
+ # Acculumate stats and reset the env, if necessary.
134
+ sum_rewards += reward
135
+ length += 1
136
+ if terminated or truncated:
137
+ num_episodes += 1
138
+ episode_returns.append(sum_rewards)
139
+ episode_lengths.append(length)
140
+ sum_rewards = length = 0
141
+ obs, infos = env.reset()
142
+
143
+ # Compile evaluation results.
144
+ eval_results = {
145
+ "eval_returns": episode_returns,
146
+ "eval_episode_lengths": episode_lengths,
147
+ }
148
+ # Combine the most recent training results with the just collected
149
+ # evaluation results.
150
+ results = {**train_results, **eval_results}
151
+ # Report everything.
152
+ train.report(results)
153
+
154
+
155
+ if __name__ == "__main__":
156
+ base_config = PPOConfig().environment("CartPole-v1").env_runners(num_env_runners=0)
157
+ # Convert to a plain dict for Tune. Note that this is usually not needed, you can
158
+ # pass into the below Tune Tuner any instantiated RLlib AlgorithmConfig object.
159
+ # However, for demonstration purposes, we show here how you can add other, arbitrary
160
+ # keys to the plain config dict and then pass these keys to your custom experiment
161
+ # function.
162
+ config_dict = base_config.to_dict()
163
+
164
+ # Set a Special flag signalling `my_experiment` how many training steps to
165
+ # perform on each: the high learning rate and low learning rate.
166
+ config_dict["train-iterations"] = 5
167
+ # Set a Special flag signalling `my_experiment` how many episodes to evaluate for.
168
+ config_dict["eval-episodes"] = 3
169
+
170
+ training_function = tune.with_resources(
171
+ my_experiment,
172
+ resources=base_config.algo_class.default_resource_request(base_config),
173
+ )
174
+
175
+ tuner = tune.Tuner(
176
+ training_function,
177
+ # Pass in your config dict.
178
+ param_space=config_dict,
179
+ )
180
+ results = tuner.fit()
181
+ best_results = results.get_best_result()
182
+
183
+ print(f"evaluation episode returns={best_results.metrics['eval_returns']}")
deepseek/lib/python3.10/site-packages/ray/rllib/models/catalog.py ADDED
@@ -0,0 +1,905 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import gymnasium as gym
3
+ from gymnasium.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple
4
+ import logging
5
+ import numpy as np
6
+ import tree # pip install dm_tree
7
+ from typing import List, Optional, Type, Union
8
+
9
+ from ray.tune.registry import (
10
+ RLLIB_MODEL,
11
+ RLLIB_ACTION_DIST,
12
+ _global_registry,
13
+ )
14
+ from ray.rllib.models.action_dist import ActionDistribution
15
+ from ray.rllib.models.modelv2 import ModelV2
16
+ from ray.rllib.models.preprocessors import get_preprocessor, Preprocessor
17
+ from ray.rllib.models.tf.tf_action_dist import (
18
+ Categorical,
19
+ Deterministic,
20
+ DiagGaussian,
21
+ Dirichlet,
22
+ MultiActionDistribution,
23
+ MultiCategorical,
24
+ )
25
+ from ray.rllib.models.torch.torch_action_dist import (
26
+ TorchCategorical,
27
+ TorchDeterministic,
28
+ TorchDirichlet,
29
+ TorchDiagGaussian,
30
+ TorchMultiActionDistribution,
31
+ TorchMultiCategorical,
32
+ )
33
+ from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
34
+ from ray.rllib.utils.deprecation import (
35
+ DEPRECATED_VALUE,
36
+ deprecation_warning,
37
+ )
38
+ from ray.rllib.utils.error import UnsupportedSpaceException
39
+ from ray.rllib.utils.framework import try_import_tf, try_import_torch
40
+ from ray.rllib.utils.from_config import from_config
41
+ from ray.rllib.utils.spaces.simplex import Simplex
42
+ from ray.rllib.utils.spaces.space_utils import flatten_space
43
+ from ray.rllib.utils.typing import ModelConfigDict, TensorType
44
+
45
+ tf1, tf, tfv = try_import_tf()
46
+ torch, _ = try_import_torch()
47
+
48
+ logger = logging.getLogger(__name__)
49
+
50
+ # fmt: off
51
+ # __sphinx_doc_begin__
52
+ MODEL_DEFAULTS: ModelConfigDict = {
53
+ "fcnet_hiddens": [256, 256],
54
+ "fcnet_activation": "tanh",
55
+ "fcnet_weights_initializer": None,
56
+ "fcnet_weights_initializer_config": None,
57
+ "fcnet_bias_initializer": None,
58
+ "fcnet_bias_initializer_config": None,
59
+ "conv_filters": None,
60
+ "conv_activation": "relu",
61
+ "conv_kernel_initializer": None,
62
+ "conv_kernel_initializer_config": None,
63
+ "conv_bias_initializer": None,
64
+ "conv_bias_initializer_config": None,
65
+ "conv_transpose_kernel_initializer": None,
66
+ "conv_transpose_kernel_initializer_config": None,
67
+ "conv_transpose_bias_initializer": None,
68
+ "conv_transpose_bias_initializer_config": None,
69
+ "post_fcnet_hiddens": [],
70
+ "post_fcnet_activation": "relu",
71
+ "post_fcnet_weights_initializer": None,
72
+ "post_fcnet_weights_initializer_config": None,
73
+ "post_fcnet_bias_initializer": None,
74
+ "post_fcnet_bias_initializer_config": None,
75
+ "free_log_std": False,
76
+ "log_std_clip_param": 20.0,
77
+ "no_final_linear": False,
78
+ "vf_share_layers": True,
79
+ "use_lstm": False,
80
+ "max_seq_len": 20,
81
+ "lstm_cell_size": 256,
82
+ "lstm_use_prev_action": False,
83
+ "lstm_use_prev_reward": False,
84
+ "lstm_weights_initializer": None,
85
+ "lstm_weights_initializer_config": None,
86
+ "lstm_bias_initializer": None,
87
+ "lstm_bias_initializer_config": None,
88
+ "_time_major": False,
89
+ "use_attention": False,
90
+ "attention_num_transformer_units": 1,
91
+ "attention_dim": 64,
92
+ "attention_num_heads": 1,
93
+ "attention_head_dim": 32,
94
+ "attention_memory_inference": 50,
95
+ "attention_memory_training": 50,
96
+ "attention_position_wise_mlp_dim": 32,
97
+ "attention_init_gru_gate_bias": 2.0,
98
+ "attention_use_n_prev_actions": 0,
99
+ "attention_use_n_prev_rewards": 0,
100
+ "framestack": True,
101
+ "dim": 84,
102
+ "grayscale": False,
103
+ "zero_mean": True,
104
+ "custom_model": None,
105
+ "custom_model_config": {},
106
+ "custom_action_dist": None,
107
+ "custom_preprocessor": None,
108
+ "encoder_latent_dim": None,
109
+ "always_check_shapes": False,
110
+
111
+ # Deprecated keys:
112
+ "lstm_use_prev_action_reward": DEPRECATED_VALUE,
113
+ "_use_default_native_models": DEPRECATED_VALUE,
114
+ "_disable_preprocessor_api": False,
115
+ "_disable_action_flattening": False,
116
+ }
117
+ # __sphinx_doc_end__
118
+ # fmt: on
119
+
120
+
121
+ @DeveloperAPI
122
+ class ModelCatalog:
123
+ """Registry of models, preprocessors, and action distributions for envs.
124
+
125
+ .. testcode::
126
+ :skipif: True
127
+
128
+ prep = ModelCatalog.get_preprocessor(env)
129
+ observation = prep.transform(raw_observation)
130
+
131
+ dist_class, dist_dim = ModelCatalog.get_action_dist(
132
+ env.action_space, {})
133
+ model = ModelCatalog.get_model_v2(
134
+ obs_space, action_space, num_outputs, options)
135
+ dist = dist_class(model.outputs, model)
136
+ action = dist.sample()
137
+ """
138
+
139
+ @staticmethod
140
+ @DeveloperAPI
141
+ def get_action_dist(
142
+ action_space: gym.Space,
143
+ config: ModelConfigDict,
144
+ dist_type: Optional[Union[str, Type[ActionDistribution]]] = None,
145
+ framework: str = "tf",
146
+ **kwargs
147
+ ) -> (type, int):
148
+ """Returns a distribution class and size for the given action space.
149
+
150
+ Args:
151
+ action_space: Action space of the target gym env.
152
+ config (Optional[dict]): Optional model config.
153
+ dist_type (Optional[Union[str, Type[ActionDistribution]]]):
154
+ Identifier of the action distribution (str) interpreted as a
155
+ hint or the actual ActionDistribution class to use.
156
+ framework: One of "tf2", "tf", "torch", or "jax".
157
+ kwargs: Optional kwargs to pass on to the Distribution's
158
+ constructor.
159
+
160
+ Returns:
161
+ Tuple:
162
+ - dist_class (ActionDistribution): Python class of the
163
+ distribution.
164
+ - dist_dim (int): The size of the input vector to the
165
+ distribution.
166
+ """
167
+
168
+ dist_cls = None
169
+ config = config or MODEL_DEFAULTS
170
+ # Custom distribution given.
171
+ if config.get("custom_action_dist"):
172
+ custom_action_config = config.copy()
173
+ action_dist_name = custom_action_config.pop("custom_action_dist")
174
+ logger.debug("Using custom action distribution {}".format(action_dist_name))
175
+ dist_cls = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)
176
+ return ModelCatalog._get_multi_action_distribution(
177
+ dist_cls, action_space, custom_action_config, framework
178
+ )
179
+
180
+ # Dist_type is given directly as a class.
181
+ elif (
182
+ type(dist_type) is type
183
+ and issubclass(dist_type, ActionDistribution)
184
+ and dist_type not in (MultiActionDistribution, TorchMultiActionDistribution)
185
+ ):
186
+ dist_cls = dist_type
187
+ # Box space -> DiagGaussian OR Deterministic.
188
+ elif isinstance(action_space, Box):
189
+ if action_space.dtype.name.startswith("int"):
190
+ low_ = np.min(action_space.low)
191
+ high_ = np.max(action_space.high)
192
+ dist_cls = (
193
+ TorchMultiCategorical if framework == "torch" else MultiCategorical
194
+ )
195
+ num_cats = int(np.prod(action_space.shape))
196
+ return (
197
+ partial(
198
+ dist_cls,
199
+ input_lens=[high_ - low_ + 1 for _ in range(num_cats)],
200
+ action_space=action_space,
201
+ ),
202
+ num_cats * (high_ - low_ + 1),
203
+ )
204
+ else:
205
+ if len(action_space.shape) > 1:
206
+ raise UnsupportedSpaceException(
207
+ "Action space has multiple dimensions "
208
+ "{}. ".format(action_space.shape)
209
+ + "Consider reshaping this into a single dimension, "
210
+ "using a custom action distribution, "
211
+ "using a Tuple action space, or the multi-agent API."
212
+ )
213
+ # TODO(sven): Check for bounds and return SquashedNormal, etc..
214
+ if dist_type is None:
215
+ return (
216
+ partial(
217
+ TorchDiagGaussian if framework == "torch" else DiagGaussian,
218
+ action_space=action_space,
219
+ ),
220
+ DiagGaussian.required_model_output_shape(action_space, config),
221
+ )
222
+ elif dist_type == "deterministic":
223
+ dist_cls = (
224
+ TorchDeterministic if framework == "torch" else Deterministic
225
+ )
226
+ # Discrete Space -> Categorical.
227
+ elif isinstance(action_space, Discrete):
228
+ if framework == "torch":
229
+ dist_cls = TorchCategorical
230
+ elif framework == "jax":
231
+ from ray.rllib.models.jax.jax_action_dist import JAXCategorical
232
+
233
+ dist_cls = JAXCategorical
234
+ else:
235
+ dist_cls = Categorical
236
+ # Tuple/Dict Spaces -> MultiAction.
237
+ elif dist_type in (
238
+ MultiActionDistribution,
239
+ TorchMultiActionDistribution,
240
+ ) or isinstance(action_space, (Tuple, Dict)):
241
+ return ModelCatalog._get_multi_action_distribution(
242
+ (
243
+ MultiActionDistribution
244
+ if framework == "tf"
245
+ else TorchMultiActionDistribution
246
+ ),
247
+ action_space,
248
+ config,
249
+ framework,
250
+ )
251
+ # Simplex -> Dirichlet.
252
+ elif isinstance(action_space, Simplex):
253
+ dist_cls = TorchDirichlet if framework == "torch" else Dirichlet
254
+ # MultiDiscrete -> MultiCategorical.
255
+ elif isinstance(action_space, MultiDiscrete):
256
+ dist_cls = (
257
+ TorchMultiCategorical if framework == "torch" else MultiCategorical
258
+ )
259
+ return partial(dist_cls, input_lens=action_space.nvec), int(
260
+ sum(action_space.nvec)
261
+ )
262
+ # Unknown type -> Error.
263
+ else:
264
+ raise NotImplementedError(
265
+ "Unsupported args: {} {}".format(action_space, dist_type)
266
+ )
267
+
268
+ return dist_cls, int(dist_cls.required_model_output_shape(action_space, config))
269
+
270
+ @staticmethod
271
+ @DeveloperAPI
272
+ def get_action_shape(
273
+ action_space: gym.Space, framework: str = "tf"
274
+ ) -> (np.dtype, List[int]):
275
+ """Returns action tensor dtype and shape for the action space.
276
+
277
+ Args:
278
+ action_space: Action space of the target gym env.
279
+ framework: The framework identifier. One of "tf" or "torch".
280
+
281
+ Returns:
282
+ (dtype, shape): Dtype and shape of the actions tensor.
283
+ """
284
+ dl_lib = torch if framework == "torch" else tf
285
+ if isinstance(action_space, Discrete):
286
+ return action_space.dtype, (None,)
287
+ elif isinstance(action_space, (Box, Simplex)):
288
+ if np.issubdtype(action_space.dtype, np.floating):
289
+ return dl_lib.float32, (None,) + action_space.shape
290
+ elif np.issubdtype(action_space.dtype, np.integer):
291
+ return dl_lib.int32, (None,) + action_space.shape
292
+ else:
293
+ raise ValueError("RLlib doesn't support non int or float box spaces")
294
+ elif isinstance(action_space, MultiDiscrete):
295
+ return action_space.dtype, (None,) + action_space.shape
296
+ elif isinstance(action_space, (Tuple, Dict)):
297
+ flat_action_space = flatten_space(action_space)
298
+ size = 0
299
+ all_discrete = True
300
+ for i in range(len(flat_action_space)):
301
+ if isinstance(flat_action_space[i], Discrete):
302
+ size += 1
303
+ else:
304
+ all_discrete = False
305
+ size += np.prod(flat_action_space[i].shape)
306
+ size = int(size)
307
+ return dl_lib.int32 if all_discrete else dl_lib.float32, (None, size)
308
+ else:
309
+ raise NotImplementedError(
310
+ "Action space {} not supported".format(action_space)
311
+ )
312
+
313
+ @staticmethod
314
+ @DeveloperAPI
315
+ def get_action_placeholder(
316
+ action_space: gym.Space, name: str = "action"
317
+ ) -> TensorType:
318
+ """Returns an action placeholder consistent with the action space
319
+
320
+ Args:
321
+ action_space: Action space of the target gym env.
322
+ name: An optional string to name the placeholder by.
323
+ Default: "action".
324
+
325
+ Returns:
326
+ action_placeholder: A placeholder for the actions
327
+ """
328
+ dtype, shape = ModelCatalog.get_action_shape(action_space, framework="tf")
329
+
330
+ return tf1.placeholder(dtype, shape=shape, name=name)
331
+
332
+ @staticmethod
333
+ @DeveloperAPI
334
+ def get_model_v2(
335
+ obs_space: gym.Space,
336
+ action_space: gym.Space,
337
+ num_outputs: int,
338
+ model_config: ModelConfigDict,
339
+ framework: str = "tf",
340
+ name: str = "default_model",
341
+ model_interface: type = None,
342
+ default_model: type = None,
343
+ **model_kwargs
344
+ ) -> ModelV2:
345
+ """Returns a suitable model compatible with given spaces and output.
346
+
347
+ Args:
348
+ obs_space: Observation space of the target gym env. This
349
+ may have an `original_space` attribute that specifies how to
350
+ unflatten the tensor into a ragged tensor.
351
+ action_space: Action space of the target gym env.
352
+ num_outputs: The size of the output vector of the model.
353
+ model_config: The "model" sub-config dict
354
+ within the Algorithm's config dict.
355
+ framework: One of "tf2", "tf", "torch", or "jax".
356
+ name: Name (scope) for the model.
357
+ model_interface: Interface required for the model
358
+ default_model: Override the default class for the model. This
359
+ only has an effect when not using a custom model
360
+ model_kwargs: Args to pass to the ModelV2 constructor
361
+
362
+ Returns:
363
+ model (ModelV2): Model to use for the policy.
364
+ """
365
+
366
+ # Validate the given config dict.
367
+ ModelCatalog._validate_config(
368
+ config=model_config, action_space=action_space, framework=framework
369
+ )
370
+
371
+ if model_config.get("custom_model"):
372
+ # Allow model kwargs to be overridden / augmented by
373
+ # custom_model_config.
374
+ customized_model_kwargs = dict(
375
+ model_kwargs, **model_config.get("custom_model_config", {})
376
+ )
377
+
378
+ if isinstance(model_config["custom_model"], type):
379
+ model_cls = model_config["custom_model"]
380
+ elif (
381
+ isinstance(model_config["custom_model"], str)
382
+ and "." in model_config["custom_model"]
383
+ ):
384
+ return from_config(
385
+ cls=model_config["custom_model"],
386
+ obs_space=obs_space,
387
+ action_space=action_space,
388
+ num_outputs=num_outputs,
389
+ model_config=customized_model_kwargs,
390
+ name=name,
391
+ )
392
+ else:
393
+ model_cls = _global_registry.get(
394
+ RLLIB_MODEL, model_config["custom_model"]
395
+ )
396
+
397
+ # Only allow ModelV2 or native keras Models.
398
+ if not issubclass(model_cls, ModelV2):
399
+ if framework not in ["tf", "tf2"] or not issubclass(
400
+ model_cls, tf.keras.Model
401
+ ):
402
+ raise ValueError(
403
+ "`model_cls` must be a ModelV2 sub-class, but is"
404
+ " {}!".format(model_cls)
405
+ )
406
+
407
+ logger.info("Wrapping {} as {}".format(model_cls, model_interface))
408
+ model_cls = ModelCatalog._wrap_if_needed(model_cls, model_interface)
409
+
410
+ if framework in ["tf2", "tf"]:
411
+ # Try wrapping custom model with LSTM/attention, if required.
412
+ if model_config.get("use_lstm") or model_config.get("use_attention"):
413
+ from ray.rllib.models.tf.attention_net import (
414
+ AttentionWrapper,
415
+ )
416
+ from ray.rllib.models.tf.recurrent_net import (
417
+ LSTMWrapper,
418
+ )
419
+
420
+ wrapped_cls = model_cls
421
+ forward = wrapped_cls.forward
422
+ model_cls = ModelCatalog._wrap_if_needed(
423
+ wrapped_cls,
424
+ LSTMWrapper
425
+ if model_config.get("use_lstm")
426
+ else AttentionWrapper,
427
+ )
428
+ model_cls._wrapped_forward = forward
429
+
430
+ # Obsolete: Track and warn if vars were created but not
431
+ # registered. Only still do this, if users do register their
432
+ # variables. If not (which they shouldn't), don't check here.
433
+ created = set()
434
+
435
+ def track_var_creation(next_creator, **kw):
436
+ v = next_creator(**kw)
437
+ created.add(v.ref())
438
+ return v
439
+
440
+ with tf.variable_creator_scope(track_var_creation):
441
+ if issubclass(model_cls, tf.keras.Model):
442
+ instance = model_cls(
443
+ input_space=obs_space,
444
+ action_space=action_space,
445
+ num_outputs=num_outputs,
446
+ name=name,
447
+ **customized_model_kwargs,
448
+ )
449
+ else:
450
+ # Try calling with kwargs first (custom ModelV2 should
451
+ # accept these as kwargs, not get them from
452
+ # config["custom_model_config"] anymore).
453
+ try:
454
+ instance = model_cls(
455
+ obs_space,
456
+ action_space,
457
+ num_outputs,
458
+ model_config,
459
+ name,
460
+ **customized_model_kwargs,
461
+ )
462
+ except TypeError as e:
463
+ # Keyword error: Try old way w/o kwargs.
464
+ if "__init__() got an unexpected " in e.args[0]:
465
+ instance = model_cls(
466
+ obs_space,
467
+ action_space,
468
+ num_outputs,
469
+ model_config,
470
+ name,
471
+ **model_kwargs,
472
+ )
473
+ logger.warning(
474
+ "Custom ModelV2 should accept all custom "
475
+ "options as **kwargs, instead of expecting"
476
+ " them in config['custom_model_config']!"
477
+ )
478
+ # Other error -> re-raise.
479
+ else:
480
+ raise e
481
+
482
+ # User still registered TFModelV2's variables: Check, whether
483
+ # ok.
484
+ registered = []
485
+ if not isinstance(instance, tf.keras.Model):
486
+ registered = set(instance.var_list)
487
+ if len(registered) > 0:
488
+ not_registered = set()
489
+ for var in created:
490
+ if var not in registered:
491
+ not_registered.add(var)
492
+ if not_registered:
493
+ raise ValueError(
494
+ "It looks like you are still using "
495
+ "`{}.register_variables()` to register your "
496
+ "model's weights. This is no longer required, but "
497
+ "if you are still calling this method at least "
498
+ "once, you must make sure to register all created "
499
+ "variables properly. The missing variables are {},"
500
+ " and you only registered {}. "
501
+ "Did you forget to call `register_variables()` on "
502
+ "some of the variables in question?".format(
503
+ instance, not_registered, registered
504
+ )
505
+ )
506
+ elif framework == "torch":
507
+ # Try wrapping custom model with LSTM/attention, if required.
508
+ if model_config.get("use_lstm") or model_config.get("use_attention"):
509
+ from ray.rllib.models.torch.attention_net import AttentionWrapper
510
+ from ray.rllib.models.torch.recurrent_net import LSTMWrapper
511
+
512
+ wrapped_cls = model_cls
513
+ forward = wrapped_cls.forward
514
+ model_cls = ModelCatalog._wrap_if_needed(
515
+ wrapped_cls,
516
+ LSTMWrapper
517
+ if model_config.get("use_lstm")
518
+ else AttentionWrapper,
519
+ )
520
+ model_cls._wrapped_forward = forward
521
+
522
+ # PyTorch automatically tracks nn.Modules inside the parent
523
+ # nn.Module's constructor.
524
+ # Try calling with kwargs first (custom ModelV2 should
525
+ # accept these as kwargs, not get them from
526
+ # config["custom_model_config"] anymore).
527
+ try:
528
+ instance = model_cls(
529
+ obs_space,
530
+ action_space,
531
+ num_outputs,
532
+ model_config,
533
+ name,
534
+ **customized_model_kwargs,
535
+ )
536
+ except TypeError as e:
537
+ # Keyword error: Try old way w/o kwargs.
538
+ if "__init__() got an unexpected " in e.args[0]:
539
+ instance = model_cls(
540
+ obs_space,
541
+ action_space,
542
+ num_outputs,
543
+ model_config,
544
+ name,
545
+ **model_kwargs,
546
+ )
547
+ logger.warning(
548
+ "Custom ModelV2 should accept all custom "
549
+ "options as **kwargs, instead of expecting"
550
+ " them in config['custom_model_config']!"
551
+ )
552
+ # Other error -> re-raise.
553
+ else:
554
+ raise e
555
+ else:
556
+ raise NotImplementedError(
557
+ "`framework` must be 'tf2|tf|torch', but is "
558
+ "{}!".format(framework)
559
+ )
560
+
561
+ return instance
562
+
563
+ # Find a default TFModelV2 and wrap with model_interface.
564
+ if framework in ["tf", "tf2"]:
565
+ v2_class = None
566
+ # Try to get a default v2 model.
567
+ if not model_config.get("custom_model"):
568
+ v2_class = default_model or ModelCatalog._get_v2_model_class(
569
+ obs_space, model_config, framework=framework
570
+ )
571
+
572
+ if not v2_class:
573
+ raise ValueError("ModelV2 class could not be determined!")
574
+
575
+ if model_config.get("use_lstm") or model_config.get("use_attention"):
576
+ from ray.rllib.models.tf.attention_net import (
577
+ AttentionWrapper,
578
+ )
579
+ from ray.rllib.models.tf.recurrent_net import (
580
+ LSTMWrapper,
581
+ )
582
+
583
+ wrapped_cls = v2_class
584
+ if model_config.get("use_lstm"):
585
+ v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper)
586
+ v2_class._wrapped_forward = wrapped_cls.forward
587
+ else:
588
+ v2_class = ModelCatalog._wrap_if_needed(
589
+ wrapped_cls, AttentionWrapper
590
+ )
591
+ v2_class._wrapped_forward = wrapped_cls.forward
592
+
593
+ # Wrap in the requested interface.
594
+ wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
595
+
596
+ if issubclass(wrapper, tf.keras.Model):
597
+ model = wrapper(
598
+ input_space=obs_space,
599
+ action_space=action_space,
600
+ num_outputs=num_outputs,
601
+ name=name,
602
+ **dict(model_kwargs, **model_config),
603
+ )
604
+ return model
605
+
606
+ return wrapper(
607
+ obs_space, action_space, num_outputs, model_config, name, **model_kwargs
608
+ )
609
+
610
+ # Find a default TorchModelV2 and wrap with model_interface.
611
+ elif framework == "torch":
612
+ # Try to get a default v2 model.
613
+ if not model_config.get("custom_model"):
614
+ v2_class = default_model or ModelCatalog._get_v2_model_class(
615
+ obs_space, model_config, framework=framework
616
+ )
617
+
618
+ if not v2_class:
619
+ raise ValueError("ModelV2 class could not be determined!")
620
+
621
+ if model_config.get("use_lstm") or model_config.get("use_attention"):
622
+ from ray.rllib.models.torch.attention_net import AttentionWrapper
623
+ from ray.rllib.models.torch.recurrent_net import LSTMWrapper
624
+
625
+ wrapped_cls = v2_class
626
+ forward = wrapped_cls.forward
627
+ if model_config.get("use_lstm"):
628
+ v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper)
629
+ else:
630
+ v2_class = ModelCatalog._wrap_if_needed(
631
+ wrapped_cls, AttentionWrapper
632
+ )
633
+
634
+ v2_class._wrapped_forward = forward
635
+
636
+ # Wrap in the requested interface.
637
+ wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
638
+ return wrapper(
639
+ obs_space, action_space, num_outputs, model_config, name, **model_kwargs
640
+ )
641
+
642
+ # Find a default JAXModelV2 and wrap with model_interface.
643
+ elif framework == "jax":
644
+ v2_class = default_model or ModelCatalog._get_v2_model_class(
645
+ obs_space, model_config, framework=framework
646
+ )
647
+ # Wrap in the requested interface.
648
+ wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
649
+ return wrapper(
650
+ obs_space, action_space, num_outputs, model_config, name, **model_kwargs
651
+ )
652
+ else:
653
+ raise NotImplementedError(
654
+ "`framework` must be 'tf2|tf|torch', but is " "{}!".format(framework)
655
+ )
656
+
657
+ @staticmethod
658
+ @DeveloperAPI
659
+ def get_preprocessor(
660
+ env: gym.Env, options: Optional[dict] = None, include_multi_binary: bool = False
661
+ ) -> Preprocessor:
662
+ """Returns a suitable preprocessor for the given env.
663
+
664
+ This is a wrapper for get_preprocessor_for_space().
665
+ """
666
+
667
+ return ModelCatalog.get_preprocessor_for_space(
668
+ env.observation_space, options, include_multi_binary
669
+ )
670
+
671
+ @staticmethod
672
+ @DeveloperAPI
673
+ def get_preprocessor_for_space(
674
+ observation_space: gym.Space,
675
+ options: dict = None,
676
+ include_multi_binary: bool = False,
677
+ ) -> Preprocessor:
678
+ """Returns a suitable preprocessor for the given observation space.
679
+
680
+ Args:
681
+ observation_space: The input observation space.
682
+ options: Options to pass to the preprocessor.
683
+ include_multi_binary: Whether to include the MultiBinaryPreprocessor in
684
+ the possible preprocessors returned by this method.
685
+
686
+ Returns:
687
+ preprocessor: Preprocessor for the observations.
688
+ """
689
+
690
+ options = options or MODEL_DEFAULTS
691
+ for k in options.keys():
692
+ if k not in MODEL_DEFAULTS:
693
+ raise Exception(
694
+ "Unknown config key `{}`, all keys: {}".format(
695
+ k, list(MODEL_DEFAULTS)
696
+ )
697
+ )
698
+
699
+ cls = get_preprocessor(
700
+ observation_space, include_multi_binary=include_multi_binary
701
+ )
702
+ prep = cls(observation_space, options)
703
+
704
+ if prep is not None:
705
+ logger.debug(
706
+ "Created preprocessor {}: {} -> {}".format(
707
+ prep, observation_space, prep.shape
708
+ )
709
+ )
710
+ return prep
711
+
712
+ @staticmethod
713
+ @PublicAPI
714
+ def register_custom_model(model_name: str, model_class: type) -> None:
715
+ """Register a custom model class by name.
716
+
717
+ The model can be later used by specifying {"custom_model": model_name}
718
+ in the model config.
719
+
720
+ Args:
721
+ model_name: Name to register the model under.
722
+ model_class: Python class of the model.
723
+ """
724
+ if tf is not None:
725
+ if issubclass(model_class, tf.keras.Model):
726
+ deprecation_warning(old="register_custom_model", error=False)
727
+ _global_registry.register(RLLIB_MODEL, model_name, model_class)
728
+
729
+ @staticmethod
730
+ @PublicAPI
731
+ def register_custom_action_dist(
732
+ action_dist_name: str, action_dist_class: type
733
+ ) -> None:
734
+ """Register a custom action distribution class by name.
735
+
736
+ The model can be later used by specifying
737
+ {"custom_action_dist": action_dist_name} in the model config.
738
+
739
+ Args:
740
+ model_name: Name to register the action distribution under.
741
+ model_class: Python class of the action distribution.
742
+ """
743
+ _global_registry.register(
744
+ RLLIB_ACTION_DIST, action_dist_name, action_dist_class
745
+ )
746
+
747
+ @staticmethod
748
+ def _wrap_if_needed(model_cls: type, model_interface: type) -> type:
749
+ if not model_interface or issubclass(model_cls, model_interface):
750
+ return model_cls
751
+
752
+ assert issubclass(model_cls, ModelV2), model_cls
753
+
754
+ class wrapper(model_interface, model_cls):
755
+ pass
756
+
757
+ name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__)
758
+ wrapper.__name__ = name
759
+ wrapper.__qualname__ = name
760
+
761
+ return wrapper
762
+
763
+ @staticmethod
764
+ def _get_v2_model_class(
765
+ input_space: gym.Space, model_config: ModelConfigDict, framework: str = "tf"
766
+ ) -> Type[ModelV2]:
767
+ VisionNet = None
768
+ ComplexNet = None
769
+
770
+ if framework in ["tf2", "tf"]:
771
+ from ray.rllib.models.tf.fcnet import (
772
+ FullyConnectedNetwork as FCNet,
773
+ )
774
+ from ray.rllib.models.tf.visionnet import (
775
+ VisionNetwork as VisionNet,
776
+ )
777
+ from ray.rllib.models.tf.complex_input_net import (
778
+ ComplexInputNetwork as ComplexNet,
779
+ )
780
+ elif framework == "torch":
781
+ from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as FCNet
782
+ from ray.rllib.models.torch.visionnet import VisionNetwork as VisionNet
783
+ from ray.rllib.models.torch.complex_input_net import (
784
+ ComplexInputNetwork as ComplexNet,
785
+ )
786
+ elif framework == "jax":
787
+ from ray.rllib.models.jax.fcnet import FullyConnectedNetwork as FCNet
788
+ else:
789
+ raise ValueError(
790
+ "framework={} not supported in `ModelCatalog._get_v2_model_"
791
+ "class`!".format(framework)
792
+ )
793
+
794
+ orig_space = (
795
+ input_space
796
+ if not hasattr(input_space, "original_space")
797
+ else input_space.original_space
798
+ )
799
+
800
+ # `input_space` is 3D Box -> VisionNet.
801
+ if isinstance(input_space, Box) and len(input_space.shape) == 3:
802
+ if framework == "jax":
803
+ raise NotImplementedError("No non-FC default net for JAX yet!")
804
+ return VisionNet
805
+ # `input_space` is 1D Box -> FCNet.
806
+ elif (
807
+ isinstance(input_space, Box)
808
+ and len(input_space.shape) == 1
809
+ and (
810
+ not isinstance(orig_space, (Dict, Tuple))
811
+ or not any(
812
+ isinstance(s, Box) and len(s.shape) >= 2
813
+ for s in flatten_space(orig_space)
814
+ )
815
+ )
816
+ ):
817
+ return FCNet
818
+ # Complex (Dict, Tuple, 2D Box (flatten), Discrete, MultiDiscrete).
819
+ else:
820
+ if framework == "jax":
821
+ raise NotImplementedError("No non-FC default net for JAX yet!")
822
+ return ComplexNet
823
+
824
+ @staticmethod
825
+ def _get_multi_action_distribution(dist_class, action_space, config, framework):
826
+ # In case the custom distribution is a child of MultiActionDistr.
827
+ # If users want to completely ignore the suggested child
828
+ # distributions, they should simply do so in their custom class'
829
+ # constructor.
830
+ if issubclass(
831
+ dist_class, (MultiActionDistribution, TorchMultiActionDistribution)
832
+ ):
833
+ flat_action_space = flatten_space(action_space)
834
+ child_dists_and_in_lens = tree.map_structure(
835
+ lambda s: ModelCatalog.get_action_dist(s, config, framework=framework),
836
+ flat_action_space,
837
+ )
838
+ child_dists = [e[0] for e in child_dists_and_in_lens]
839
+ input_lens = [int(e[1]) for e in child_dists_and_in_lens]
840
+ return (
841
+ partial(
842
+ dist_class,
843
+ action_space=action_space,
844
+ child_distributions=child_dists,
845
+ input_lens=input_lens,
846
+ ),
847
+ int(sum(input_lens)),
848
+ )
849
+ return dist_class, dist_class.required_model_output_shape(action_space, config)
850
+
851
+ @staticmethod
852
+ def _validate_config(
853
+ config: ModelConfigDict, action_space: gym.spaces.Space, framework: str
854
+ ) -> None:
855
+ """Validates a given model config dict.
856
+
857
+ Args:
858
+ config: The "model" sub-config dict
859
+ within the Algorithm's config dict.
860
+ action_space: The action space of the model, whose config are
861
+ validated.
862
+ framework: One of "jax", "tf2", "tf", or "torch".
863
+
864
+ Raises:
865
+ ValueError: If something is wrong with the given config.
866
+ """
867
+ # Soft-deprecate custom preprocessors.
868
+ if config.get("custom_preprocessor") is not None:
869
+ deprecation_warning(
870
+ old="model.custom_preprocessor",
871
+ new="gym.ObservationWrapper around your env or handle complex "
872
+ "inputs inside your Model",
873
+ error=True,
874
+ )
875
+
876
+ if config.get("use_attention") and config.get("use_lstm"):
877
+ raise ValueError(
878
+ "Only one of `use_lstm` or `use_attention` may be set to True!"
879
+ )
880
+
881
+ # For complex action spaces, only allow prev action inputs to
882
+ # LSTMs and attention nets iff `_disable_action_flattening=True`.
883
+ # TODO: `_disable_action_flattening=True` will be the default in
884
+ # the future.
885
+ if (
886
+ (
887
+ config.get("lstm_use_prev_action")
888
+ or config.get("attention_use_n_prev_actions", 0) > 0
889
+ )
890
+ and not config.get("_disable_action_flattening")
891
+ and isinstance(action_space, (Tuple, Dict))
892
+ ):
893
+ raise ValueError(
894
+ "For your complex action space (Tuple|Dict) and your model's "
895
+ "`prev-actions` setup of your model, you must set "
896
+ "`_disable_action_flattening=True` in your main config dict!"
897
+ )
898
+
899
+ if framework == "jax":
900
+ if config.get("use_attention"):
901
+ raise ValueError(
902
+ "`use_attention` not available for framework=jax so far!"
903
+ )
904
+ elif config.get("use_lstm"):
905
+ raise ValueError("`use_lstm` not available for framework=jax so far!")
deepseek/lib/python3.10/site-packages/ray/rllib/models/distributions.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This is the next version of action distribution base class."""
2
+ from typing import Tuple
3
+ import gymnasium as gym
4
+ import abc
5
+
6
+ from ray.rllib.utils.annotations import ExperimentalAPI
7
+ from ray.rllib.utils.typing import TensorType, Union
8
+ from ray.rllib.utils.annotations import override
9
+
10
+
11
+ @ExperimentalAPI
12
+ class Distribution(abc.ABC):
13
+ """The base class for distribution over a random variable.
14
+
15
+ Examples:
16
+
17
+ .. testcode::
18
+
19
+ import torch
20
+ from ray.rllib.core.models.configs import MLPHeadConfig
21
+ from ray.rllib.models.torch.torch_distributions import TorchCategorical
22
+
23
+ model = MLPHeadConfig(input_dims=[1]).build(framework="torch")
24
+
25
+ # Create an action distribution from model logits
26
+ action_logits = model(torch.Tensor([[1]]))
27
+ action_dist = TorchCategorical.from_logits(action_logits)
28
+ action = action_dist.sample()
29
+
30
+ # Create another distribution from a dummy Tensor
31
+ action_dist2 = TorchCategorical.from_logits(torch.Tensor([0]))
32
+
33
+ # Compute some common metrics
34
+ logp = action_dist.logp(action)
35
+ kl = action_dist.kl(action_dist2)
36
+ entropy = action_dist.entropy()
37
+ """
38
+
39
+ @abc.abstractmethod
40
+ def sample(
41
+ self,
42
+ *,
43
+ sample_shape: Tuple[int, ...] = None,
44
+ return_logp: bool = False,
45
+ **kwargs,
46
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
47
+ """Draw a sample from the distribution.
48
+
49
+ Args:
50
+ sample_shape: The shape of the sample to draw.
51
+ return_logp: Whether to return the logp of the sampled values.
52
+ **kwargs: Forward compatibility placeholder.
53
+
54
+ Returns:
55
+ The sampled values. If return_logp is True, returns a tuple of the
56
+ sampled values and its logp.
57
+ """
58
+
59
+ @abc.abstractmethod
60
+ def rsample(
61
+ self,
62
+ *,
63
+ sample_shape: Tuple[int, ...] = None,
64
+ return_logp: bool = False,
65
+ **kwargs,
66
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
67
+ """Draw a re-parameterized sample from the action distribution.
68
+
69
+ If this method is implemented, we can take gradients of samples w.r.t. the
70
+ distribution parameters.
71
+
72
+ Args:
73
+ sample_shape: The shape of the sample to draw.
74
+ return_logp: Whether to return the logp of the sampled values.
75
+ **kwargs: Forward compatibility placeholder.
76
+
77
+ Returns:
78
+ The sampled values. If return_logp is True, returns a tuple of the
79
+ sampled values and its logp.
80
+ """
81
+
82
+ @abc.abstractmethod
83
+ def logp(self, value: TensorType, **kwargs) -> TensorType:
84
+ """The log-likelihood of the distribution computed at `value`
85
+
86
+ Args:
87
+ value: The value to compute the log-likelihood at.
88
+ **kwargs: Forward compatibility placeholder.
89
+
90
+ Returns:
91
+ The log-likelihood of the value.
92
+ """
93
+
94
+ @abc.abstractmethod
95
+ def kl(self, other: "Distribution", **kwargs) -> TensorType:
96
+ """The KL-divergence between two distributions.
97
+
98
+ Args:
99
+ other: The other distribution.
100
+ **kwargs: Forward compatibility placeholder.
101
+
102
+ Returns:
103
+ The KL-divergence between the two distributions.
104
+ """
105
+
106
+ @abc.abstractmethod
107
+ def entropy(self, **kwargs) -> TensorType:
108
+ """The entropy of the distribution.
109
+
110
+ Args:
111
+ **kwargs: Forward compatibility placeholder.
112
+
113
+ Returns:
114
+ The entropy of the distribution.
115
+ """
116
+
117
+ @staticmethod
118
+ @abc.abstractmethod
119
+ def required_input_dim(space: gym.Space, **kwargs) -> int:
120
+ """Returns the required length of an input parameter tensor.
121
+
122
+ Args:
123
+ space: The space this distribution will be used for,
124
+ whose shape attributes will be used to determine the required shape of
125
+ the input parameter tensor.
126
+ **kwargs: Forward compatibility placeholder.
127
+
128
+ Returns:
129
+ size of the required input vector (minus leading batch dimension).
130
+ """
131
+
132
+ @classmethod
133
+ def from_logits(cls, logits: TensorType, **kwargs) -> "Distribution":
134
+ """Creates a Distribution from logits.
135
+
136
+ The caller does not need to have knowledge of the distribution class in order
137
+ to create it and sample from it. The passed batched logits vectors might be
138
+ split up and are passed to the distribution class' constructor as kwargs.
139
+
140
+ Args:
141
+ logits: The logits to create the distribution from.
142
+ **kwargs: Forward compatibility placeholder.
143
+
144
+ Returns:
145
+ The created distribution.
146
+
147
+ .. testcode::
148
+
149
+ import numpy as np
150
+ from ray.rllib.models.distributions import Distribution
151
+
152
+ class Uniform(Distribution):
153
+ def __init__(self, lower, upper):
154
+ self.lower = lower
155
+ self.upper = upper
156
+
157
+ def sample(self):
158
+ return self.lower + (self.upper - self.lower) * np.random.rand()
159
+
160
+ def logp(self, x):
161
+ ...
162
+
163
+ def kl(self, other):
164
+ ...
165
+
166
+ def entropy(self):
167
+ ...
168
+
169
+ @staticmethod
170
+ def required_input_dim(space):
171
+ ...
172
+
173
+ def rsample(self):
174
+ ...
175
+
176
+ @classmethod
177
+ def from_logits(cls, logits, **kwargs):
178
+ return Uniform(logits[:, 0], logits[:, 1])
179
+
180
+ logits = np.array([[0.0, 1.0], [2.0, 3.0]])
181
+ my_dist = Uniform.from_logits(logits)
182
+ sample = my_dist.sample()
183
+ """
184
+ raise NotImplementedError
185
+
186
+ @classmethod
187
+ def get_partial_dist_cls(
188
+ parent_cls: "Distribution", **partial_kwargs
189
+ ) -> "Distribution":
190
+ """Returns a partial child of TorchMultiActionDistribution.
191
+
192
+ This is useful if inputs needed to instantiate the Distribution from logits
193
+ are available, but the logits are not.
194
+ """
195
+
196
+ class DistributionPartial(parent_cls):
197
+ def __init__(self, *args, **kwargs):
198
+ super().__init__(*args, **kwargs)
199
+
200
+ @staticmethod
201
+ def _merge_kwargs(**kwargs):
202
+ """Checks if keys in kwargs don't clash with partial_kwargs."""
203
+ overlap = set(kwargs) & set(partial_kwargs)
204
+ if overlap:
205
+ raise ValueError(
206
+ f"Cannot override the following kwargs: {overlap}.\n"
207
+ f"This is because they were already set at the time this "
208
+ f"partial class was defined."
209
+ )
210
+ merged_kwargs = {**partial_kwargs, **kwargs}
211
+ return merged_kwargs
212
+
213
+ @classmethod
214
+ @override(parent_cls)
215
+ def required_input_dim(cls, space: gym.Space, **kwargs) -> int:
216
+ merged_kwargs = cls._merge_kwargs(**kwargs)
217
+ assert space == merged_kwargs["space"]
218
+ return parent_cls.required_input_dim(**merged_kwargs)
219
+
220
+ @classmethod
221
+ @override(parent_cls)
222
+ def from_logits(
223
+ cls,
224
+ logits: TensorType,
225
+ **kwargs,
226
+ ) -> "DistributionPartial":
227
+ merged_kwargs = cls._merge_kwargs(**kwargs)
228
+ distribution = parent_cls.from_logits(logits, **merged_kwargs)
229
+ # Replace the class of the returned distribution with this partial
230
+ # This makes it so that we can use type() on this distribution and
231
+ # get back the partial class.
232
+ distribution.__class__ = cls
233
+ return distribution
234
+
235
+ # Substitute name of this partial class to match the original class.
236
+ DistributionPartial.__name__ = f"{parent_cls}Partial"
237
+
238
+ return DistributionPartial
239
+
240
+ def to_deterministic(self) -> "Distribution":
241
+ """Returns a deterministic equivalent for this distribution.
242
+
243
+ Specifically, the deterministic equivalent for a Categorical distribution is a
244
+ Deterministic distribution that selects the action with maximum logit value.
245
+ Generally, the choice of the deterministic replacement is informed by
246
+ established conventions.
247
+ """
248
+ raise NotImplementedError
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/complex_input_net.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gymnasium.spaces import Box, Discrete, MultiDiscrete
2
+ import numpy as np
3
+ import tree # pip install dm_tree
4
+
5
+ from ray.rllib.models.torch.misc import (
6
+ normc_initializer as torch_normc_initializer,
7
+ SlimFC,
8
+ )
9
+ from ray.rllib.models.catalog import ModelCatalog
10
+ from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions
11
+ from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
12
+ from ray.rllib.models.utils import get_filter_config
13
+ from ray.rllib.policy.sample_batch import SampleBatch
14
+ from ray.rllib.utils.annotations import OldAPIStack, override
15
+ from ray.rllib.utils.framework import try_import_torch
16
+ from ray.rllib.utils.spaces.space_utils import flatten_space
17
+ from ray.rllib.utils.torch_utils import one_hot
18
+
19
+ torch, nn = try_import_torch()
20
+
21
+
22
+ @OldAPIStack
23
+ class ComplexInputNetwork(TorchModelV2, nn.Module):
24
+ """TorchModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s).
25
+
26
+ Note: This model should be used for complex (Dict or Tuple) observation
27
+ spaces that have one or more image components.
28
+
29
+ The data flow is as follows:
30
+
31
+ `obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT`
32
+ `CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out`
33
+ `out` -> (optional) FC-stack -> `out2`
34
+ `out2` -> action (logits) and value heads.
35
+ """
36
+
37
+ def __init__(self, obs_space, action_space, num_outputs, model_config, name):
38
+ self.original_space = (
39
+ obs_space.original_space
40
+ if hasattr(obs_space, "original_space")
41
+ else obs_space
42
+ )
43
+
44
+ self.processed_obs_space = (
45
+ self.original_space
46
+ if model_config.get("_disable_preprocessor_api")
47
+ else obs_space
48
+ )
49
+
50
+ nn.Module.__init__(self)
51
+ TorchModelV2.__init__(
52
+ self, self.original_space, action_space, num_outputs, model_config, name
53
+ )
54
+
55
+ self.flattened_input_space = flatten_space(self.original_space)
56
+
57
+ # Atari type CNNs or IMPALA type CNNs (with residual layers)?
58
+ # self.cnn_type = self.model_config["custom_model_config"].get(
59
+ # "conv_type", "atari")
60
+
61
+ # Build the CNN(s) given obs_space's image components.
62
+ self.cnns = nn.ModuleDict()
63
+ self.one_hot = nn.ModuleDict()
64
+ self.flatten_dims = {}
65
+ self.flatten = nn.ModuleDict()
66
+ concat_size = 0
67
+ for i, component in enumerate(self.flattened_input_space):
68
+ i = str(i)
69
+ # Image space.
70
+ if len(component.shape) == 3 and isinstance(component, Box):
71
+ config = {
72
+ "conv_filters": model_config["conv_filters"]
73
+ if "conv_filters" in model_config
74
+ else get_filter_config(component.shape),
75
+ "conv_activation": model_config.get("conv_activation"),
76
+ "post_fcnet_hiddens": [],
77
+ }
78
+ # if self.cnn_type == "atari":
79
+ self.cnns[i] = ModelCatalog.get_model_v2(
80
+ component,
81
+ action_space,
82
+ num_outputs=None,
83
+ model_config=config,
84
+ framework="torch",
85
+ name="cnn_{}".format(i),
86
+ )
87
+ # TODO (sven): add IMPALA-style option.
88
+ # else:
89
+ # cnn = TorchImpalaVisionNet(
90
+ # component,
91
+ # action_space,
92
+ # num_outputs=None,
93
+ # model_config=config,
94
+ # name="cnn_{}".format(i))
95
+
96
+ concat_size += self.cnns[i].num_outputs
97
+ self.add_module("cnn_{}".format(i), self.cnns[i])
98
+ # Discrete|MultiDiscrete inputs -> One-hot encode.
99
+ elif isinstance(component, (Discrete, MultiDiscrete)):
100
+ if isinstance(component, Discrete):
101
+ size = component.n
102
+ else:
103
+ size = np.sum(component.nvec)
104
+ config = {
105
+ "fcnet_hiddens": model_config["fcnet_hiddens"],
106
+ "fcnet_activation": model_config.get("fcnet_activation"),
107
+ "post_fcnet_hiddens": [],
108
+ }
109
+ self.one_hot[i] = ModelCatalog.get_model_v2(
110
+ Box(-1.0, 1.0, (size,), np.float32),
111
+ action_space,
112
+ num_outputs=None,
113
+ model_config=config,
114
+ framework="torch",
115
+ name="one_hot_{}".format(i),
116
+ )
117
+ concat_size += self.one_hot[i].num_outputs
118
+ self.add_module("one_hot_{}".format(i), self.one_hot[i])
119
+ # Everything else (1D Box).
120
+ else:
121
+ size = int(np.prod(component.shape))
122
+ config = {
123
+ "fcnet_hiddens": model_config["fcnet_hiddens"],
124
+ "fcnet_activation": model_config.get("fcnet_activation"),
125
+ "post_fcnet_hiddens": [],
126
+ }
127
+ self.flatten[i] = ModelCatalog.get_model_v2(
128
+ Box(-1.0, 1.0, (size,), np.float32),
129
+ action_space,
130
+ num_outputs=None,
131
+ model_config=config,
132
+ framework="torch",
133
+ name="flatten_{}".format(i),
134
+ )
135
+ self.flatten_dims[i] = size
136
+ concat_size += self.flatten[i].num_outputs
137
+ self.add_module("flatten_{}".format(i), self.flatten[i])
138
+
139
+ # Optional post-concat FC-stack.
140
+ post_fc_stack_config = {
141
+ "fcnet_hiddens": model_config.get("post_fcnet_hiddens", []),
142
+ "fcnet_activation": model_config.get("post_fcnet_activation", "relu"),
143
+ }
144
+ self.post_fc_stack = ModelCatalog.get_model_v2(
145
+ Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32),
146
+ self.action_space,
147
+ None,
148
+ post_fc_stack_config,
149
+ framework="torch",
150
+ name="post_fc_stack",
151
+ )
152
+
153
+ # Actions and value heads.
154
+ self.logits_layer = None
155
+ self.value_layer = None
156
+ self._value_out = None
157
+
158
+ if num_outputs:
159
+ # Action-distribution head.
160
+ self.logits_layer = SlimFC(
161
+ in_size=self.post_fc_stack.num_outputs,
162
+ out_size=num_outputs,
163
+ activation_fn=None,
164
+ initializer=torch_normc_initializer(0.01),
165
+ )
166
+ # Create the value branch model.
167
+ self.value_layer = SlimFC(
168
+ in_size=self.post_fc_stack.num_outputs,
169
+ out_size=1,
170
+ activation_fn=None,
171
+ initializer=torch_normc_initializer(0.01),
172
+ )
173
+ else:
174
+ self.num_outputs = concat_size
175
+
176
+ @override(ModelV2)
177
+ def forward(self, input_dict, state, seq_lens):
178
+ if SampleBatch.OBS in input_dict and "obs_flat" in input_dict:
179
+ orig_obs = input_dict[SampleBatch.OBS]
180
+ else:
181
+ orig_obs = restore_original_dimensions(
182
+ input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="torch"
183
+ )
184
+ # Push observations through the different components
185
+ # (CNNs, one-hot + FC, etc..).
186
+ outs = []
187
+ for i, component in enumerate(tree.flatten(orig_obs)):
188
+ i = str(i)
189
+ if i in self.cnns:
190
+ cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component}))
191
+ outs.append(cnn_out)
192
+ elif i in self.one_hot:
193
+ if component.dtype in [
194
+ torch.int8,
195
+ torch.int16,
196
+ torch.int32,
197
+ torch.int64,
198
+ torch.uint8,
199
+ ]:
200
+ one_hot_in = {
201
+ SampleBatch.OBS: one_hot(
202
+ component, self.flattened_input_space[int(i)]
203
+ )
204
+ }
205
+ else:
206
+ one_hot_in = {SampleBatch.OBS: component}
207
+ one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in))
208
+ outs.append(one_hot_out)
209
+ else:
210
+ nn_out, _ = self.flatten[i](
211
+ SampleBatch(
212
+ {
213
+ SampleBatch.OBS: torch.reshape(
214
+ component, [-1, self.flatten_dims[i]]
215
+ )
216
+ }
217
+ )
218
+ )
219
+ outs.append(nn_out)
220
+
221
+ # Concat all outputs and the non-image inputs.
222
+ out = torch.cat(outs, dim=1)
223
+ # Push through (optional) FC-stack (this may be an empty stack).
224
+ out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out}))
225
+
226
+ # No logits/value branches.
227
+ if self.logits_layer is None:
228
+ return out, []
229
+
230
+ # Logits- and value branches.
231
+ logits, values = self.logits_layer(out), self.value_layer(out)
232
+ self._value_out = torch.reshape(values, [-1])
233
+ return logits, []
234
+
235
+ @override(ModelV2)
236
+ def value_function(self):
237
+ return self._value_out
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc ADDED
Binary file (2.77 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/multi_head_attention.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar,
3
+ Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017.
4
+ https://arxiv.org/pdf/1706.03762.pdf
5
+ """
6
+ from ray.rllib.utils.framework import try_import_torch
7
+ from ray.rllib.models.torch.misc import SlimFC
8
+ from ray.rllib.utils.annotations import OldAPIStack
9
+ from ray.rllib.utils.torch_utils import sequence_mask
10
+ from ray.rllib.utils.framework import TensorType
11
+
12
+ torch, nn = try_import_torch()
13
+
14
+
15
+ @OldAPIStack
16
+ class MultiHeadAttention(nn.Module):
17
+ """A multi-head attention layer described in [1]."""
18
+
19
+ def __init__(
20
+ self, in_dim: int, out_dim: int, num_heads: int, head_dim: int, **kwargs
21
+ ):
22
+ """
23
+ in_dim: Dimension of input
24
+ out_dim: Dimension of output
25
+ num_heads: Number of attention heads
26
+ head_dim: Output dimension of each attention head
27
+ """
28
+ super().__init__(**kwargs)
29
+
30
+ # No bias or non-linearity.
31
+ self._num_heads = num_heads
32
+ self._head_dim = head_dim
33
+ self._qkv_layer = SlimFC(
34
+ in_size=in_dim, out_size=3 * num_heads * head_dim, use_bias=False
35
+ )
36
+
37
+ self._linear_layer = SlimFC(
38
+ in_size=num_heads * head_dim, out_size=out_dim, use_bias=False
39
+ )
40
+
41
+ def forward(self, inputs: TensorType) -> TensorType:
42
+ L = list(inputs.size())[1] # length of segment
43
+ H = self._num_heads # number of attention heads
44
+ D = self._head_dim # attention head dimension
45
+
46
+ qkv = self._qkv_layer(inputs)
47
+
48
+ queries, keys, values = torch.chunk(input=qkv, chunks=3, dim=-1)
49
+ queries = queries[:, -L:] # only query based on the segment
50
+
51
+ queries = torch.reshape(queries, [-1, L, H, D])
52
+ keys = torch.reshape(keys, [-1, L, H, D])
53
+ values = torch.reshape(values, [-1, L, H, D])
54
+
55
+ score = torch.einsum("bihd,bjhd->bijh", queries, keys)
56
+ score = score / D**0.5
57
+
58
+ # causal mask of the same length as the sequence
59
+ mask = sequence_mask(torch.arange(1, L + 1), dtype=score.dtype)
60
+ mask = mask[None, :, :, None]
61
+ mask = mask.float()
62
+
63
+ masked_score = score * mask + 1e30 * (mask - 1.0)
64
+ wmat = nn.functional.softmax(masked_score, dim=2)
65
+
66
+ out = torch.einsum("bijh,bjhd->bihd", wmat, values)
67
+ shape = list(out.size())[:2] + [H * D]
68
+ # temp = torch.cat(temp2, [H * D], dim=0)
69
+ out = torch.reshape(out, shape)
70
+ return self._linear_layer(out)
deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/noisy_layer.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from ray.rllib.models.utils import get_activation_fn
4
+ from ray.rllib.utils.framework import try_import_torch, TensorType
5
+
6
+ torch, nn = try_import_torch()
7
+
8
+
9
+ class NoisyLayer(nn.Module):
10
+ r"""A Layer that adds learnable Noise to some previous layer's outputs.
11
+
12
+ Consists of:
13
+ - a common dense layer: y = w^{T}x + b
14
+ - a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
15
+ (b+\epsilon_b*\sigma_b)
16
+ , where \epsilon are random variables sampled from factorized normal
17
+ distributions and \sigma are trainable variables which are expected to
18
+ vanish along the training procedure.
19
+ """
20
+
21
+ def __init__(
22
+ self, in_size: int, out_size: int, sigma0: float, activation: str = "relu"
23
+ ):
24
+ """Initializes a NoisyLayer object.
25
+
26
+ Args:
27
+ in_size: Input size for Noisy Layer
28
+ out_size: Output size for Noisy Layer
29
+ sigma0: Initialization value for sigma_b (bias noise)
30
+ activation: Non-linear activation for Noisy Layer
31
+ """
32
+ super().__init__()
33
+
34
+ self.in_size = in_size
35
+ self.out_size = out_size
36
+ self.sigma0 = sigma0
37
+ self.activation = get_activation_fn(activation, framework="torch")
38
+ if self.activation is not None:
39
+ self.activation = self.activation()
40
+
41
+ sigma_w = nn.Parameter(
42
+ torch.from_numpy(
43
+ np.random.uniform(
44
+ low=-1.0 / np.sqrt(float(self.in_size)),
45
+ high=1.0 / np.sqrt(float(self.in_size)),
46
+ size=[self.in_size, out_size],
47
+ )
48
+ ).float()
49
+ )
50
+ self.register_parameter("sigma_w", sigma_w)
51
+ sigma_b = nn.Parameter(
52
+ torch.from_numpy(
53
+ np.full(
54
+ shape=[out_size], fill_value=sigma0 / np.sqrt(float(self.in_size))
55
+ )
56
+ ).float()
57
+ )
58
+ self.register_parameter("sigma_b", sigma_b)
59
+
60
+ w = nn.Parameter(
61
+ torch.from_numpy(
62
+ np.full(
63
+ shape=[self.in_size, self.out_size],
64
+ fill_value=6 / np.sqrt(float(in_size) + float(out_size)),
65
+ )
66
+ ).float()
67
+ )
68
+ self.register_parameter("w", w)
69
+ b = nn.Parameter(torch.from_numpy(np.zeros([out_size])).float())
70
+ self.register_parameter("b", b)
71
+
72
+ def forward(self, inputs: TensorType) -> TensorType:
73
+ epsilon_in = self._f_epsilon(
74
+ torch.normal(
75
+ mean=torch.zeros([self.in_size]), std=torch.ones([self.in_size])
76
+ ).to(inputs.device)
77
+ )
78
+ epsilon_out = self._f_epsilon(
79
+ torch.normal(
80
+ mean=torch.zeros([self.out_size]), std=torch.ones([self.out_size])
81
+ ).to(inputs.device)
82
+ )
83
+ epsilon_w = torch.matmul(
84
+ torch.unsqueeze(epsilon_in, -1), other=torch.unsqueeze(epsilon_out, 0)
85
+ )
86
+ epsilon_b = epsilon_out
87
+
88
+ action_activation = (
89
+ torch.matmul(inputs, self.w + self.sigma_w * epsilon_w)
90
+ + self.b
91
+ + self.sigma_b * epsilon_b
92
+ )
93
+
94
+ if self.activation is not None:
95
+ action_activation = self.activation(action_activation)
96
+ return action_activation
97
+
98
+ def _f_epsilon(self, x: TensorType) -> TensorType:
99
+ return torch.sign(x) * torch.pow(torch.abs(x), 0.5)
evalkit_tf433/lib/python3.10/site-packages/google/api_core/_rest_streaming_base.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Helpers for server-side streaming in REST."""
16
+
17
+ from collections import deque
18
+ import string
19
+ from typing import Deque, Union
20
+ import types
21
+
22
+ import proto
23
+ import google.protobuf.message
24
+ from google.protobuf.json_format import Parse
25
+
26
+
27
+ class BaseResponseIterator:
28
+ """Base Iterator over REST API responses. This class should not be used directly.
29
+
30
+ Args:
31
+ response_message_cls (Union[proto.Message, google.protobuf.message.Message]): A response
32
+ class expected to be returned from an API.
33
+
34
+ Raises:
35
+ ValueError: If `response_message_cls` is not a subclass of `proto.Message` or `google.protobuf.message.Message`.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ response_message_cls: Union[proto.Message, google.protobuf.message.Message],
41
+ ):
42
+ self._response_message_cls = response_message_cls
43
+ # Contains a list of JSON responses ready to be sent to user.
44
+ self._ready_objs: Deque[str] = deque()
45
+ # Current JSON response being built.
46
+ self._obj = ""
47
+ # Keeps track of the nesting level within a JSON object.
48
+ self._level = 0
49
+ # Keeps track whether HTTP response is currently sending values
50
+ # inside of a string value.
51
+ self._in_string = False
52
+ # Whether an escape symbol "\" was encountered.
53
+ self._escape_next = False
54
+
55
+ self._grab = types.MethodType(self._create_grab(), self)
56
+
57
+ def _process_chunk(self, chunk: str):
58
+ if self._level == 0:
59
+ if chunk[0] != "[":
60
+ raise ValueError(
61
+ "Can only parse array of JSON objects, instead got %s" % chunk
62
+ )
63
+ for char in chunk:
64
+ if char == "{":
65
+ if self._level == 1:
66
+ # Level 1 corresponds to the outermost JSON object
67
+ # (i.e. the one we care about).
68
+ self._obj = ""
69
+ if not self._in_string:
70
+ self._level += 1
71
+ self._obj += char
72
+ elif char == "}":
73
+ self._obj += char
74
+ if not self._in_string:
75
+ self._level -= 1
76
+ if not self._in_string and self._level == 1:
77
+ self._ready_objs.append(self._obj)
78
+ elif char == '"':
79
+ # Helps to deal with an escaped quotes inside of a string.
80
+ if not self._escape_next:
81
+ self._in_string = not self._in_string
82
+ self._obj += char
83
+ elif char in string.whitespace:
84
+ if self._in_string:
85
+ self._obj += char
86
+ elif char == "[":
87
+ if self._level == 0:
88
+ self._level += 1
89
+ else:
90
+ self._obj += char
91
+ elif char == "]":
92
+ if self._level == 1:
93
+ self._level -= 1
94
+ else:
95
+ self._obj += char
96
+ else:
97
+ self._obj += char
98
+ self._escape_next = not self._escape_next if char == "\\" else False
99
+
100
+ def _create_grab(self):
101
+ if issubclass(self._response_message_cls, proto.Message):
102
+
103
+ def grab(this):
104
+ return this._response_message_cls.from_json(
105
+ this._ready_objs.popleft(), ignore_unknown_fields=True
106
+ )
107
+
108
+ return grab
109
+ elif issubclass(self._response_message_cls, google.protobuf.message.Message):
110
+
111
+ def grab(this):
112
+ return Parse(this._ready_objs.popleft(), this._response_message_cls())
113
+
114
+ return grab
115
+ else:
116
+ raise ValueError(
117
+ "Response message class must be a subclass of proto.Message or google.protobuf.message.Message."
118
+ )
evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_info.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Helpers for providing client information.
16
+
17
+ Client information is used to send information about the calling client,
18
+ such as the library and Python version, to API services.
19
+ """
20
+
21
+ import platform
22
+ from typing import Union
23
+
24
+ from google.api_core import version as api_core_version
25
+
26
+ _PY_VERSION = platform.python_version()
27
+ _API_CORE_VERSION = api_core_version.__version__
28
+
29
+ _GRPC_VERSION: Union[str, None]
30
+
31
+ try:
32
+ import grpc
33
+
34
+ _GRPC_VERSION = grpc.__version__
35
+ except ImportError: # pragma: NO COVER
36
+ _GRPC_VERSION = None
37
+
38
+
39
+ class ClientInfo(object):
40
+ """Client information used to generate a user-agent for API calls.
41
+
42
+ This user-agent information is sent along with API calls to allow the
43
+ receiving service to do analytics on which versions of Python and Google
44
+ libraries are being used.
45
+
46
+ Args:
47
+ python_version (str): The Python interpreter version, for example,
48
+ ``'3.9.6'``.
49
+ grpc_version (Optional[str]): The gRPC library version.
50
+ api_core_version (str): The google-api-core library version.
51
+ gapic_version (Optional[str]): The version of gapic-generated client
52
+ library, if the library was generated by gapic.
53
+ client_library_version (Optional[str]): The version of the client
54
+ library, generally used if the client library was not generated
55
+ by gapic or if additional functionality was built on top of
56
+ a gapic client library.
57
+ user_agent (Optional[str]): Prefix to the user agent header. This is
58
+ used to supply information such as application name or partner tool.
59
+ Recommended format: ``application-or-tool-ID/major.minor.version``.
60
+ rest_version (Optional[str]): A string with labeled versions of the
61
+ dependencies used for REST transport.
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ python_version=_PY_VERSION,
67
+ grpc_version=_GRPC_VERSION,
68
+ api_core_version=_API_CORE_VERSION,
69
+ gapic_version=None,
70
+ client_library_version=None,
71
+ user_agent=None,
72
+ rest_version=None,
73
+ ):
74
+ self.python_version = python_version
75
+ self.grpc_version = grpc_version
76
+ self.api_core_version = api_core_version
77
+ self.gapic_version = gapic_version
78
+ self.client_library_version = client_library_version
79
+ self.user_agent = user_agent
80
+ self.rest_version = rest_version
81
+
82
+ def to_user_agent(self):
83
+ """Returns the user-agent string for this client info."""
84
+
85
+ # Note: the order here is important as the internal metrics system
86
+ # expects these items to be in specific locations.
87
+ ua = ""
88
+
89
+ if self.user_agent is not None:
90
+ ua += "{user_agent} "
91
+
92
+ ua += "gl-python/{python_version} "
93
+
94
+ if self.grpc_version is not None:
95
+ ua += "grpc/{grpc_version} "
96
+
97
+ if self.rest_version is not None:
98
+ ua += "rest/{rest_version} "
99
+
100
+ ua += "gax/{api_core_version} "
101
+
102
+ if self.gapic_version is not None:
103
+ ua += "gapic/{gapic_version} "
104
+
105
+ if self.client_library_version is not None:
106
+ ua += "gccl/{client_library_version} "
107
+
108
+ return ua.format(**self.__dict__).strip()
evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_logging.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import json
3
+ import os
4
+
5
+ from typing import List, Optional
6
+
7
+ _LOGGING_INITIALIZED = False
8
+ _BASE_LOGGER_NAME = "google"
9
+
10
+ # Fields to be included in the StructuredLogFormatter.
11
+ #
12
+ # TODO(https://github.com/googleapis/python-api-core/issues/761): Update this list to support additional logging fields.
13
+ _recognized_logging_fields = [
14
+ "httpRequest",
15
+ "rpcName",
16
+ "serviceName",
17
+ "credentialsType",
18
+ "credentialsInfo",
19
+ "universeDomain",
20
+ "request",
21
+ "response",
22
+ "metadata",
23
+ "retryAttempt",
24
+ "httpResponse",
25
+ ] # Additional fields to be Logged.
26
+
27
+
28
+ def logger_configured(logger) -> bool:
29
+ """Determines whether `logger` has non-default configuration
30
+
31
+ Args:
32
+ logger: The logger to check.
33
+
34
+ Returns:
35
+ bool: Whether the logger has any non-default configuration.
36
+ """
37
+ return (
38
+ logger.handlers != [] or logger.level != logging.NOTSET or not logger.propagate
39
+ )
40
+
41
+
42
+ def initialize_logging():
43
+ """Initializes "google" loggers, partly based on the environment variable
44
+
45
+ Initializes the "google" logger and any loggers (at the "google"
46
+ level or lower) specified by the environment variable
47
+ GOOGLE_SDK_PYTHON_LOGGING_SCOPE, as long as none of these loggers
48
+ were previously configured. If any such loggers (including the
49
+ "google" logger) are initialized, they are set to NOT propagate
50
+ log events up to their parent loggers.
51
+
52
+ This initialization is executed only once, and hence the
53
+ environment variable is only processed the first time this
54
+ function is called.
55
+ """
56
+ global _LOGGING_INITIALIZED
57
+ if _LOGGING_INITIALIZED:
58
+ return
59
+ scopes = os.getenv("GOOGLE_SDK_PYTHON_LOGGING_SCOPE", "")
60
+ setup_logging(scopes)
61
+ _LOGGING_INITIALIZED = True
62
+
63
+
64
+ def parse_logging_scopes(scopes: Optional[str] = None) -> List[str]:
65
+ """Returns a list of logger names.
66
+
67
+ Splits the single string of comma-separated logger names into a list of individual logger name strings.
68
+
69
+ Args:
70
+ scopes: The name of a single logger. (In the future, this will be a comma-separated list of multiple loggers.)
71
+
72
+ Returns:
73
+ A list of all the logger names in scopes.
74
+ """
75
+ if not scopes:
76
+ return []
77
+ # TODO(https://github.com/googleapis/python-api-core/issues/759): check if the namespace is a valid namespace.
78
+ # TODO(b/380481951): Support logging multiple scopes.
79
+ # TODO(b/380483756): Raise or log a warning for an invalid scope.
80
+ namespaces = [scopes]
81
+ return namespaces
82
+
83
+
84
+ def configure_defaults(logger):
85
+ """Configures `logger` to emit structured info to stdout."""
86
+ if not logger_configured(logger):
87
+ console_handler = logging.StreamHandler()
88
+ logger.setLevel("DEBUG")
89
+ logger.propagate = False
90
+ formatter = StructuredLogFormatter()
91
+ console_handler.setFormatter(formatter)
92
+ logger.addHandler(console_handler)
93
+
94
+
95
+ def setup_logging(scopes: str = ""):
96
+ """Sets up logging for the specified `scopes`.
97
+
98
+ If the loggers specified in `scopes` have not been previously
99
+ configured, this will configure them to emit structured log
100
+ entries to stdout, and to not propagate their log events to their
101
+ parent loggers. Additionally, if the "google" logger (whether it
102
+ was specified in `scopes` or not) was not previously configured,
103
+ it will also configure it to not propagate log events to the root
104
+ logger.
105
+
106
+ Args:
107
+ scopes: The name of a single logger. (In the future, this will be a comma-separated list of multiple loggers.)
108
+
109
+ """
110
+
111
+ # only returns valid logger scopes (namespaces)
112
+ # this list has at most one element.
113
+ logger_names = parse_logging_scopes(scopes)
114
+
115
+ for namespace in logger_names:
116
+ # This will either create a module level logger or get the reference of the base logger instantiated above.
117
+ logger = logging.getLogger(namespace)
118
+
119
+ # Configure default settings.
120
+ configure_defaults(logger)
121
+
122
+ # disable log propagation at base logger level to the root logger only if a base logger is not already configured via code changes.
123
+ base_logger = logging.getLogger(_BASE_LOGGER_NAME)
124
+ if not logger_configured(base_logger):
125
+ base_logger.propagate = False
126
+
127
+
128
+ # TODO(https://github.com/googleapis/python-api-core/issues/763): Expand documentation.
129
+ class StructuredLogFormatter(logging.Formatter):
130
+ # TODO(https://github.com/googleapis/python-api-core/issues/761): ensure that additional fields such as
131
+ # function name, file name, and line no. appear in a log output.
132
+ def format(self, record: logging.LogRecord):
133
+ log_obj = {
134
+ "timestamp": self.formatTime(record),
135
+ "severity": record.levelname,
136
+ "name": record.name,
137
+ "message": record.getMessage(),
138
+ }
139
+
140
+ for field_name in _recognized_logging_fields:
141
+ value = getattr(record, field_name, None)
142
+ if value is not None:
143
+ log_obj[field_name] = value
144
+ return json.dumps(log_obj)
evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_options.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Client options class.
16
+
17
+ Client options provide a consistent interface for user options to be defined
18
+ across clients.
19
+
20
+ You can pass a client options object to a client.
21
+
22
+ .. code-block:: python
23
+
24
+ from google.api_core.client_options import ClientOptions
25
+ from google.cloud.vision_v1 import ImageAnnotatorClient
26
+
27
+ def get_client_cert():
28
+ # code to load client certificate and private key.
29
+ return client_cert_bytes, client_private_key_bytes
30
+
31
+ options = ClientOptions(api_endpoint="foo.googleapis.com",
32
+ client_cert_source=get_client_cert)
33
+
34
+ client = ImageAnnotatorClient(client_options=options)
35
+
36
+ You can also pass a mapping object.
37
+
38
+ .. code-block:: python
39
+
40
+ from google.cloud.vision_v1 import ImageAnnotatorClient
41
+
42
+ client = ImageAnnotatorClient(
43
+ client_options={
44
+ "api_endpoint": "foo.googleapis.com",
45
+ "client_cert_source" : get_client_cert
46
+ })
47
+
48
+
49
+ """
50
+
51
+ from typing import Callable, Mapping, Optional, Sequence, Tuple
52
+
53
+
54
+ class ClientOptions(object):
55
+ """Client Options used to set options on clients.
56
+
57
+ Args:
58
+ api_endpoint (Optional[str]): The desired API endpoint, e.g.,
59
+ compute.googleapis.com
60
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback
61
+ which returns client certificate bytes and private key bytes both in
62
+ PEM format. ``client_cert_source`` and ``client_encrypted_cert_source``
63
+ are mutually exclusive.
64
+ client_encrypted_cert_source (Optional[Callable[[], Tuple[str, str, bytes]]]):
65
+ A callback which returns client certificate file path, encrypted
66
+ private key file path, and the passphrase bytes.``client_cert_source``
67
+ and ``client_encrypted_cert_source`` are mutually exclusive.
68
+ quota_project_id (Optional[str]): A project name that a client's
69
+ quota belongs to.
70
+ credentials_file (Optional[str]): A path to a file storing credentials.
71
+ ``credentials_file` and ``api_key`` are mutually exclusive.
72
+
73
+ .. warning::
74
+ Important: If you accept a credential configuration (credential JSON/File/Stream)
75
+ from an external source for authentication to Google Cloud Platform, you must
76
+ validate it before providing it to any Google API or client library. Providing an
77
+ unvalidated credential configuration to Google APIs or libraries can compromise
78
+ the security of your systems and data. For more information, refer to
79
+ `Validate credential configurations from external sources`_.
80
+
81
+ .. _Validate credential configurations from external sources:
82
+
83
+ https://cloud.google.com/docs/authentication/external/externally-sourced-credentials
84
+ scopes (Optional[Sequence[str]]): OAuth access token override scopes.
85
+ api_key (Optional[str]): Google API key. ``credentials_file`` and
86
+ ``api_key`` are mutually exclusive.
87
+ api_audience (Optional[str]): The intended audience for the API calls
88
+ to the service that will be set when using certain 3rd party
89
+ authentication flows. Audience is typically a resource identifier.
90
+ If not set, the service endpoint value will be used as a default.
91
+ An example of a valid ``api_audience`` is: "https://language.googleapis.com".
92
+ universe_domain (Optional[str]): The desired universe domain. This must match
93
+ the one in credentials. If not set, the default universe domain is
94
+ `googleapis.com`. If both `api_endpoint` and `universe_domain` are set,
95
+ then `api_endpoint` is used as the service endpoint. If `api_endpoint` is
96
+ not specified, the format will be `{service}.{universe_domain}`.
97
+
98
+ Raises:
99
+ ValueError: If both ``client_cert_source`` and ``client_encrypted_cert_source``
100
+ are provided, or both ``credentials_file`` and ``api_key`` are provided.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ api_endpoint: Optional[str] = None,
106
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
107
+ client_encrypted_cert_source: Optional[
108
+ Callable[[], Tuple[str, str, bytes]]
109
+ ] = None,
110
+ quota_project_id: Optional[str] = None,
111
+ credentials_file: Optional[str] = None,
112
+ scopes: Optional[Sequence[str]] = None,
113
+ api_key: Optional[str] = None,
114
+ api_audience: Optional[str] = None,
115
+ universe_domain: Optional[str] = None,
116
+ ):
117
+ if client_cert_source and client_encrypted_cert_source:
118
+ raise ValueError(
119
+ "client_cert_source and client_encrypted_cert_source are mutually exclusive"
120
+ )
121
+ if api_key and credentials_file:
122
+ raise ValueError("api_key and credentials_file are mutually exclusive")
123
+ self.api_endpoint = api_endpoint
124
+ self.client_cert_source = client_cert_source
125
+ self.client_encrypted_cert_source = client_encrypted_cert_source
126
+ self.quota_project_id = quota_project_id
127
+ self.credentials_file = credentials_file
128
+ self.scopes = scopes
129
+ self.api_key = api_key
130
+ self.api_audience = api_audience
131
+ self.universe_domain = universe_domain
132
+
133
+ def __repr__(self) -> str:
134
+ return "ClientOptions: " + repr(self.__dict__)
135
+
136
+
137
+ def from_dict(options: Mapping[str, object]) -> ClientOptions:
138
+ """Construct a client options object from a mapping object.
139
+
140
+ Args:
141
+ options (collections.abc.Mapping): A mapping object with client options.
142
+ See the docstring for ClientOptions for details on valid arguments.
143
+ """
144
+
145
+ client_options = ClientOptions()
146
+
147
+ for key, value in options.items():
148
+ if hasattr(client_options, key):
149
+ setattr(client_options, key, value)
150
+ else:
151
+ raise ValueError("ClientOptions does not accept an option '" + key + "'")
152
+
153
+ return client_options
evalkit_tf433/lib/python3.10/site-packages/google/api_core/extended_operation.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Futures for extended long-running operations returned from Google Cloud APIs.
16
+
17
+ These futures can be used to synchronously wait for the result of a
18
+ long-running operations using :meth:`ExtendedOperation.result`:
19
+
20
+ .. code-block:: python
21
+
22
+ extended_operation = my_api_client.long_running_method()
23
+
24
+ extended_operation.result()
25
+
26
+ Or asynchronously using callbacks and :meth:`Operation.add_done_callback`:
27
+
28
+ .. code-block:: python
29
+
30
+ extended_operation = my_api_client.long_running_method()
31
+
32
+ def my_callback(ex_op):
33
+ print(f"Operation {ex_op.name} completed")
34
+
35
+ extended_operation.add_done_callback(my_callback)
36
+
37
+ """
38
+
39
+ import threading
40
+
41
+ from google.api_core import exceptions
42
+ from google.api_core.future import polling
43
+
44
+
45
+ class ExtendedOperation(polling.PollingFuture):
46
+ """An ExtendedOperation future for interacting with a Google API Long-Running Operation.
47
+
48
+ Args:
49
+ extended_operation (proto.Message): The initial operation.
50
+ refresh (Callable[[], type(extended_operation)]): A callable that returns
51
+ the latest state of the operation.
52
+ cancel (Callable[[], None]): A callable that tries to cancel the operation.
53
+ polling Optional(google.api_core.retry.Retry): The configuration used
54
+ for polling. This can be used to control how often :meth:`done`
55
+ is polled. If the ``timeout`` argument to :meth:`result` is
56
+ specified it will override the ``polling.timeout`` property.
57
+ retry Optional(google.api_core.retry.Retry): DEPRECATED use ``polling``
58
+ instead. If specified it will override ``polling`` parameter to
59
+ maintain backward compatibility.
60
+
61
+ Note: Most long-running API methods use google.api_core.operation.Operation
62
+ This class is a wrapper for a subset of methods that use alternative
63
+ Long-Running Operation (LRO) semantics.
64
+
65
+ Note: there is not a concrete type the extended operation must be.
66
+ It MUST have fields that correspond to the following, POSSIBLY WITH DIFFERENT NAMES:
67
+ * name: str
68
+ * status: Union[str, bool, enum.Enum]
69
+ * error_code: int
70
+ * error_message: str
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ extended_operation,
76
+ refresh,
77
+ cancel,
78
+ polling=polling.DEFAULT_POLLING,
79
+ **kwargs,
80
+ ):
81
+ super().__init__(polling=polling, **kwargs)
82
+ self._extended_operation = extended_operation
83
+ self._refresh = refresh
84
+ self._cancel = cancel
85
+ # Note: the extended operation does not give a good way to indicate cancellation.
86
+ # We make do with manually tracking cancellation and checking for doneness.
87
+ self._cancelled = False
88
+ self._completion_lock = threading.Lock()
89
+ # Invoke in case the operation came back already complete.
90
+ self._handle_refreshed_operation()
91
+
92
+ # Note: the following four properties MUST be overridden in a subclass
93
+ # if, and only if, the fields in the corresponding extended operation message
94
+ # have different names.
95
+ #
96
+ # E.g. we have an extended operation class that looks like
97
+ #
98
+ # class MyOperation(proto.Message):
99
+ # moniker = proto.Field(proto.STRING, number=1)
100
+ # status_msg = proto.Field(proto.STRING, number=2)
101
+ # optional http_error_code = proto.Field(proto.INT32, number=3)
102
+ # optional http_error_msg = proto.Field(proto.STRING, number=4)
103
+ #
104
+ # the ExtendedOperation subclass would provide property overrides that map
105
+ # to these (poorly named) fields.
106
+ @property
107
+ def name(self):
108
+ return self._extended_operation.name
109
+
110
+ @property
111
+ def status(self):
112
+ return self._extended_operation.status
113
+
114
+ @property
115
+ def error_code(self):
116
+ return self._extended_operation.error_code
117
+
118
+ @property
119
+ def error_message(self):
120
+ return self._extended_operation.error_message
121
+
122
+ def __getattr__(self, name):
123
+ return getattr(self._extended_operation, name)
124
+
125
+ def done(self, retry=None):
126
+ self._refresh_and_update(retry)
127
+ return self._extended_operation.done
128
+
129
+ def cancel(self):
130
+ if self.done():
131
+ return False
132
+
133
+ self._cancel()
134
+ self._cancelled = True
135
+ return True
136
+
137
+ def cancelled(self):
138
+ # TODO(dovs): there is not currently a good way to determine whether the
139
+ # operation has been cancelled.
140
+ # The best we can do is manually keep track of cancellation
141
+ # and check for doneness.
142
+ if not self._cancelled:
143
+ return False
144
+
145
+ self._refresh_and_update()
146
+ return self._extended_operation.done
147
+
148
+ def _refresh_and_update(self, retry=None):
149
+ if not self._extended_operation.done:
150
+ self._extended_operation = (
151
+ self._refresh(retry=retry) if retry else self._refresh()
152
+ )
153
+ self._handle_refreshed_operation()
154
+
155
+ def _handle_refreshed_operation(self):
156
+ with self._completion_lock:
157
+ if not self._extended_operation.done:
158
+ return
159
+
160
+ if self.error_code and self.error_message:
161
+ # Note: `errors` can be removed once proposal A from
162
+ # b/284179390 is implemented.
163
+ errors = []
164
+ if hasattr(self, "error") and hasattr(self.error, "errors"):
165
+ errors = self.error.errors
166
+ exception = exceptions.from_http_status(
167
+ status_code=self.error_code,
168
+ message=self.error_message,
169
+ response=self._extended_operation,
170
+ errors=errors,
171
+ )
172
+ self.set_exception(exception)
173
+ elif self.error_code or self.error_message:
174
+ exception = exceptions.GoogleAPICallError(
175
+ f"Unexpected error {self.error_code}: {self.error_message}"
176
+ )
177
+ self.set_exception(exception)
178
+ else:
179
+ # Extended operations have no payload.
180
+ self.set_result(None)
181
+
182
+ @classmethod
183
+ def make(cls, refresh, cancel, extended_operation, **kwargs):
184
+ """
185
+ Return an instantiated ExtendedOperation (or child) that wraps
186
+ * a refresh callable
187
+ * a cancel callable (can be a no-op)
188
+ * an initial result
189
+
190
+ .. note::
191
+ It is the caller's responsibility to set up refresh and cancel
192
+ with their correct request argument.
193
+ The reason for this is that the services that use Extended Operations
194
+ have rpcs that look something like the following:
195
+
196
+ // service.proto
197
+ service MyLongService {
198
+ rpc StartLongTask(StartLongTaskRequest) returns (ExtendedOperation) {
199
+ option (google.cloud.operation_service) = "CustomOperationService";
200
+ }
201
+ }
202
+
203
+ service CustomOperationService {
204
+ rpc Get(GetOperationRequest) returns (ExtendedOperation) {
205
+ option (google.cloud.operation_polling_method) = true;
206
+ }
207
+ }
208
+
209
+ Any info needed for the poll, e.g. a name, path params, etc.
210
+ is held in the request, which the initial client method is in a much
211
+ better position to make made because the caller made the initial request.
212
+
213
+ TL;DR: the caller sets up closures for refresh and cancel that carry
214
+ the properly configured requests.
215
+
216
+ Args:
217
+ refresh (Callable[Optional[Retry]][type(extended_operation)]): A callable that
218
+ returns the latest state of the operation.
219
+ cancel (Callable[][Any]): A callable that tries to cancel the operation
220
+ on a best effort basis.
221
+ extended_operation (Any): The initial response of the long running method.
222
+ See the docstring for ExtendedOperation.__init__ for requirements on
223
+ the type and fields of extended_operation
224
+ """
225
+ return cls(extended_operation, refresh, cancel, **kwargs)
evalkit_tf433/lib/python3.10/site-packages/google/api_core/general_helpers.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This import for backward compatibility only.
16
+ from functools import wraps # noqa: F401 pragma: NO COVER