ZTWHHH commited on
Commit
c0d2dbe
·
verified ·
1 Parent(s): ed16995

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. deepseek/lib/python3.10/site-packages/ray/rllib/examples/algorithms/classes/__pycache__/__init__.cpython-310.pyc +0 -0
  2. deepseek/lib/python3.10/site-packages/ray/rllib/examples/algorithms/classes/__pycache__/vpg.cpython-310.pyc +0 -0
  3. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__init__.py +0 -0
  4. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__pycache__/euclidian_distance_based_curiosity.cpython-310.pyc +0 -0
  5. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__pycache__/flatten_observations_dict_space.cpython-310.pyc +0 -0
  6. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/__init__.py +0 -0
  7. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/__pycache__/protobuf_cartpole_observation_decoder.cpython-310.pyc +0 -0
  8. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/euclidian_distance_based_curiosity.py +122 -0
  9. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/protobuf_cartpole_observation_decoder.py +80 -0
  10. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/count_based_curiosity.py +14 -0
  11. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/euclidian_distance_based_curiosity.py +14 -0
  12. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/flatten_observations_dict_space.py +154 -0
  13. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/mean_std_filtering.py +198 -0
  14. deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/prev_actions_prev_rewards.py +164 -0
  15. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__init__.py +0 -0
  16. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/__init__.cpython-310.pyc +0 -0
  17. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/env_rendering_and_recording.cpython-310.pyc +0 -0
  18. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/env_with_protobuf_observations.cpython-310.pyc +0 -0
  19. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/unity3d_env_local.cpython-310.pyc +0 -0
  20. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/gpu_requiring_env.py +27 -0
  21. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/simple_corridor.py +42 -0
  22. deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/transformed_action_space_env.py +61 -0
  23. deepseek/lib/python3.10/site-packages/ray/rllib/examples/hierarchical/__pycache__/hierarchical_training.cpython-310.pyc +0 -0
  24. deepseek/lib/python3.10/site-packages/ray/rllib/examples/learners/classes/intrinsic_curiosity_learners.py +164 -0
  25. deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/__init__.py +0 -0
  26. deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  27. deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/custom_metrics_in_env_runners.py +340 -0
  28. deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/__init__.py +0 -0
  29. deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/multi_agent_pendulum.py +73 -0
  30. deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/pettingzoo_shared_value_function.py +7 -0
  31. deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/two_step_game_with_grouped_agents.py +90 -0
  32. deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/utils/self_play_callback_old_api_stack.py +75 -0
  33. deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/__pycache__/custom_input_api.cpython-310.pyc +0 -0
  34. deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/offline_rl.py +167 -0
  35. deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/pretrain_bc_single_agent_evaluate_as_multi_agent.py +171 -0
  36. deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_serve/__pycache__/ray_serve_with_rllib.cpython-310.pyc +0 -0
  37. deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_serve/ray_serve_with_rllib.py +190 -0
  38. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/__init__.cpython-310.pyc +0 -0
  39. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/migrate_modelv2_to_new_api_stack_by_config.cpython-310.pyc +0 -0
  40. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/migrate_modelv2_to_new_api_stack_by_policy_checkpoint.cpython-310.pyc +0 -0
  41. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/autoregressive_actions_rl_module.py +112 -0
  42. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__init__.py +10 -0
  43. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__pycache__/action_masking_rlm.cpython-310.pyc +0 -0
  44. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__pycache__/random_rlm.cpython-310.pyc +0 -0
  45. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/mobilenet_rlm.py +78 -0
  46. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/random_rlm.py +71 -0
  47. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/rock_paper_scissors_heuristic_rlm.py +108 -0
  48. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/tiny_atari_cnn_rlm.py +168 -0
  49. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/custom_cnn_rl_module.py +120 -0
  50. deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/pretraining_single_agent_training_multi_agent.py +149 -0
deepseek/lib/python3.10/site-packages/ray/rllib/examples/algorithms/classes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/algorithms/classes/__pycache__/vpg.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__pycache__/euclidian_distance_based_curiosity.cpython-310.pyc ADDED
Binary file (560 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/__pycache__/flatten_observations_dict_space.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/__pycache__/protobuf_cartpole_observation_decoder.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/euclidian_distance_based_curiosity.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from typing import Any, List, Optional
3
+
4
+ import gymnasium as gym
5
+ import numpy as np
6
+
7
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
8
+ from ray.rllib.core.rl_module.rl_module import RLModule
9
+ from ray.rllib.utils.typing import EpisodeType
10
+
11
+
12
+ class EuclidianDistanceBasedCuriosity(ConnectorV2):
13
+ """Learner ConnectorV2 piece computing intrinsic rewards with euclidian distance.
14
+
15
+ Add this connector piece to your Learner pipeline, through your algo config:
16
+ ```
17
+ config.training(
18
+ learner_connector=lambda obs_sp, act_sp: EuclidianDistanceBasedCuriosity()
19
+ )
20
+ ```
21
+
22
+ Intrinsic rewards are computed on the Learner side based on comparing the euclidian
23
+ distance of observations vs already seen ones. A configurable number of observations
24
+ will be stored in a FIFO buffer and all incoming observations have their distance
25
+ measured against those.
26
+
27
+ The minimum distance measured is the intrinsic reward for the incoming obs
28
+ (multiplied by a fixed coeffieicnt and added to the "main" extrinsic reward):
29
+ r(i) = intrinsic_reward_coeff * min(ED(o, o(i)) for o in stored_obs))
30
+ where `ED` is the euclidian distance and `stored_obs` is the buffer.
31
+
32
+ The intrinsic reward is then added to the extrinsic reward and saved back into the
33
+ episode (under the main "rewards" key).
34
+
35
+ Note that the computation and saving back to the episode all happens before the
36
+ actual train batch is generated from the episode data. Thus, the Learner and the
37
+ RLModule used do not take notice of the extra reward added.
38
+
39
+ Only one observation per incoming episode will be stored as a new one in the buffer.
40
+ Thereby, we pick the observation with the largest `min(ED)` value over all already
41
+ stored observations to be stored per episode.
42
+
43
+ If you would like to use a simpler, count-based mechanism for intrinsic reward
44
+ computations, take a look at the `CountBasedCuriosity` connector piece
45
+ at `ray.rllib.examples.connectors.classes.count_based_curiosity`
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ input_observation_space: Optional[gym.Space] = None,
51
+ input_action_space: Optional[gym.Space] = None,
52
+ *,
53
+ intrinsic_reward_coeff: float = 1.0,
54
+ max_buffer_size: int = 100,
55
+ **kwargs,
56
+ ):
57
+ """Initializes a CountBasedCuriosity instance.
58
+
59
+ Args:
60
+ intrinsic_reward_coeff: The weight with which to multiply the intrinsic
61
+ reward before adding (and saving) it back to the main (extrinsic)
62
+ reward of the episode at each timestep.
63
+ """
64
+ super().__init__(input_observation_space, input_action_space)
65
+
66
+ # Create an observation buffer
67
+ self.obs_buffer = deque(maxlen=max_buffer_size)
68
+ self.intrinsic_reward_coeff = intrinsic_reward_coeff
69
+
70
+ self._test = 0
71
+
72
+ def __call__(
73
+ self,
74
+ *,
75
+ rl_module: RLModule,
76
+ batch: Any,
77
+ episodes: List[EpisodeType],
78
+ explore: Optional[bool] = None,
79
+ shared_data: Optional[dict] = None,
80
+ **kwargs,
81
+ ) -> Any:
82
+ if self._test > 10:
83
+ return batch
84
+ self._test += 1
85
+ # Loop through all episodes and change the reward to
86
+ # [reward + intrinsic reward]
87
+ for sa_episode in self.single_agent_episode_iterator(
88
+ episodes=episodes, agents_that_stepped_only=False
89
+ ):
90
+ # Loop through all obs, except the last one.
91
+ observations = sa_episode.get_observations(slice(None, -1))
92
+ # Get all respective (extrinsic) rewards.
93
+ rewards = sa_episode.get_rewards()
94
+
95
+ max_dist_obs = None
96
+ max_dist = float("-inf")
97
+ for i, (obs, rew) in enumerate(zip(observations, rewards)):
98
+ # Compare obs to all stored observations and compute euclidian distance.
99
+ min_dist = 0.0
100
+ if self.obs_buffer:
101
+ min_dist = min(
102
+ np.sqrt(np.sum((obs - stored_obs) ** 2))
103
+ for stored_obs in self.obs_buffer
104
+ )
105
+ if min_dist > max_dist:
106
+ max_dist = min_dist
107
+ max_dist_obs = obs
108
+
109
+ # Compute our euclidian distance-based intrinsic reward and add it to
110
+ # the main (extrinsic) reward.
111
+ rew += self.intrinsic_reward_coeff * min_dist
112
+ # Store the new reward back to the episode (under the correct
113
+ # timestep/index).
114
+ sa_episode.set_rewards(new_data=rew, at_indices=i)
115
+
116
+ # Add the one observation of this episode with the largest (min) euclidian
117
+ # dist to all already stored obs to the buffer (maybe throwing out the
118
+ # oldest obs in there).
119
+ if max_dist_obs is not None:
120
+ self.obs_buffer.append(max_dist_obs)
121
+
122
+ return batch
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/classes/protobuf_cartpole_observation_decoder.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Optional
2
+
3
+ import gymnasium as gym
4
+ import numpy as np
5
+
6
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
7
+ from ray.rllib.core.rl_module.rl_module import RLModule
8
+ from ray.rllib.examples.envs.classes.utils.cartpole_observations_proto import (
9
+ CartPoleObservation,
10
+ )
11
+ from ray.rllib.utils.annotations import override
12
+ from ray.rllib.utils.typing import EpisodeType
13
+
14
+
15
+ class ProtobufCartPoleObservationDecoder(ConnectorV2):
16
+ """Env-to-module ConnectorV2 piece decoding protobuf obs into CartPole-v1 obs.
17
+
18
+ Add this connector piece to your env-to-module pipeline, through your algo config:
19
+ ```
20
+ config.env_runners(
21
+ env_to_module_connector=lambda env: ProtobufCartPoleObservationDecoder()
22
+ )
23
+ ```
24
+
25
+ The incoming observation space must be a 1D Box of dtype uint8
26
+ (which is the same as a binary string). The outgoing observation space is the
27
+ normal CartPole-v1 1D space: Box(-inf, inf, (4,), float32).
28
+ """
29
+
30
+ @override(ConnectorV2)
31
+ def recompute_output_observation_space(
32
+ self,
33
+ input_observation_space: gym.Space,
34
+ input_action_space: gym.Space,
35
+ ) -> gym.Space:
36
+ # Make sure the incoming observation space is a protobuf (binary string).
37
+ assert (
38
+ isinstance(input_observation_space, gym.spaces.Box)
39
+ and len(input_observation_space.shape) == 1
40
+ and input_observation_space.dtype.name == "uint8"
41
+ )
42
+ # Return CartPole-v1's natural observation space.
43
+ return gym.spaces.Box(float("-inf"), float("inf"), (4,), np.float32)
44
+
45
+ def __call__(
46
+ self,
47
+ *,
48
+ rl_module: RLModule,
49
+ batch: Any,
50
+ episodes: List[EpisodeType],
51
+ explore: Optional[bool] = None,
52
+ shared_data: Optional[dict] = None,
53
+ **kwargs,
54
+ ) -> Any:
55
+ # Loop through all episodes and change the observation from a binary string
56
+ # to an actual 1D np.ndarray (normal CartPole-v1 obs).
57
+ for sa_episode in self.single_agent_episode_iterator(episodes=episodes):
58
+ # Get last obs (binary string).
59
+ obs = sa_episode.get_observations(-1)
60
+ obs_bytes = obs.tobytes()
61
+ obs_protobuf = CartPoleObservation()
62
+ obs_protobuf.ParseFromString(obs_bytes)
63
+
64
+ # Set up the natural CartPole-v1 observation tensor from the protobuf
65
+ # values.
66
+ new_obs = np.array(
67
+ [
68
+ obs_protobuf.x_pos,
69
+ obs_protobuf.x_veloc,
70
+ obs_protobuf.angle_pos,
71
+ obs_protobuf.angle_veloc,
72
+ ],
73
+ np.float32,
74
+ )
75
+
76
+ # Write the new observation (1D tensor) back into the Episode.
77
+ sa_episode.set_observations(new_data=new_obs, at_indices=-1)
78
+
79
+ # Return `data` as-is.
80
+ return batch
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/count_based_curiosity.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Placeholder for training with count-based curiosity.
2
+
3
+ The actual script can be found at a different location (see code below).
4
+ """
5
+
6
+ if __name__ == "__main__":
7
+ import subprocess
8
+ import sys
9
+
10
+ # Forward to "python ../curiosity/[same script name].py [same options]"
11
+ command = [sys.executable, "../curiosity/", sys.argv[0]] + sys.argv[1:]
12
+
13
+ # Run the script.
14
+ subprocess.run(command, capture_output=True)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/euclidian_distance_based_curiosity.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Placeholder for training with euclidian distance-based curiosity.
2
+
3
+ The actual script can be found at a different location (see code below).
4
+ """
5
+
6
+ if __name__ == "__main__":
7
+ import subprocess
8
+ import sys
9
+
10
+ # Forward to "python ../curiosity/[same script name].py [same options]"
11
+ command = [sys.executable, "../curiosity/", sys.argv[0]] + sys.argv[1:]
12
+
13
+ # Run the script.
14
+ subprocess.run(command, capture_output=True)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/flatten_observations_dict_space.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example using a ConnectorV2 to flatten arbitrarily nested dict or tuple observations.
2
+
3
+ An RLlib Algorithm has 3 distinct connector pipelines:
4
+ - An env-to-module pipeline in an EnvRunner accepting a list of episodes and producing
5
+ a batch for an RLModule to compute actions (`forward_inference()` or
6
+ `forward_exploration()`).
7
+ - A module-to-env pipeline in an EnvRunner taking the RLModule's output and converting
8
+ it into an action readable by the environment.
9
+ - A learner connector pipeline on a Learner taking a list of episodes and producing
10
+ a batch for an RLModule to perform the training forward pass (`forward_train()`).
11
+
12
+ Each of these pipelines has a fixed set of default ConnectorV2 pieces that RLlib
13
+ adds/prepends to these pipelines in order to perform the most basic functionalities.
14
+ For example, RLlib adds the `AddObservationsFromEpisodesToBatch` ConnectorV2 into any
15
+ env-to-module pipeline to make sure the batch for computing actions contains - at the
16
+ minimum - the most recent observation.
17
+
18
+ On top of these default ConnectorV2 pieces, users can define their own ConnectorV2
19
+ pieces (or use the ones available already in RLlib) and add them to one of the 3
20
+ different pipelines described above, as required.
21
+
22
+ This example:
23
+ - shows how the `FlattenObservation` ConnectorV2 piece can be added to the
24
+ env-to-module pipeline.
25
+ - demonstrates that by using this connector, any arbitrarily nested dict or tuple
26
+ observations is properly flattened into a simple 1D tensor, for easier RLModule
27
+ processing.
28
+ - shows how - in a multi-agent setup - individual agents can be specified, whose
29
+ observations should be flattened (while other agents' observations will always
30
+ be left as-is).
31
+ - uses a variant of the CartPole-v1 environment, in which the 4 observation items
32
+ (x-pos, x-veloc, angle, and angle-veloc) are taken apart and put into a nested dict
33
+ with the structure:
34
+ {
35
+ "x-pos": [x-pos],
36
+ "angular-pos": {
37
+ "value": [angle],
38
+ "some_random_stuff": [random Discrete(3)], # <- should be ignored by algo
39
+ },
40
+ "velocs": Tuple([x-veloc], [angle-veloc]),
41
+ }
42
+
43
+
44
+ How to run this script
45
+ ----------------------
46
+ `python [script file name].py --enable-new-api-stack`
47
+
48
+ For debugging, use the following additional command line options
49
+ `--no-tune --num-env-runners=0`
50
+ which should allow you to set breakpoints anywhere in the RLlib code and
51
+ have the execution stop there for inspection and debugging.
52
+
53
+ For logging to your WandB account, use:
54
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
55
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
56
+
57
+
58
+ Results to expect
59
+ -----------------
60
+
61
+ +---------------------+------------+----------------+--------+------------------+
62
+ | Trial name | status | loc | iter | total time (s) |
63
+ | | | | | |
64
+ |---------------------+------------+----------------+--------+------------------+
65
+ | PPO_env_a2fd6_00000 | TERMINATED | 127.0.0.1:7409 | 25 | 24.1426 |
66
+ +---------------------+------------+----------------+--------+------------------+
67
+ +------------------------+------------------------+------------------------+
68
+ | num_env_steps_sample | num_env_steps_traine | episode_return_mean |
69
+ | d_lifetime | d_lifetime | |
70
+ +------------------------+------------------------+------------------------|
71
+ | 100000 | 100000 | 421.42 |
72
+ +------------------------+------------------------+------------------------+
73
+ """
74
+ from ray.tune.registry import register_env
75
+ from ray.rllib.connectors.env_to_module import FlattenObservations
76
+ from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
77
+ from ray.rllib.examples.envs.classes.cartpole_with_dict_observation_space import (
78
+ CartPoleWithDictObservationSpace,
79
+ )
80
+ from ray.rllib.examples.envs.classes.multi_agent import (
81
+ MultiAgentCartPoleWithDictObservationSpace,
82
+ )
83
+ from ray.rllib.utils.test_utils import (
84
+ add_rllib_example_script_args,
85
+ run_rllib_example_script_experiment,
86
+ )
87
+ from ray.tune.registry import get_trainable_cls
88
+
89
+
90
+ # Read in common example script command line arguments.
91
+ parser = add_rllib_example_script_args(default_timesteps=200000, default_reward=400.0)
92
+ parser.set_defaults(enable_new_api_stack=True)
93
+
94
+
95
+ if __name__ == "__main__":
96
+ args = parser.parse_args()
97
+
98
+ # Define env-to-module-connector pipeline for the new stack.
99
+ def _env_to_module_pipeline(env):
100
+ return FlattenObservations(multi_agent=args.num_agents > 0)
101
+
102
+ # Register our environment with tune.
103
+ if args.num_agents > 0:
104
+ register_env(
105
+ "env",
106
+ lambda _: MultiAgentCartPoleWithDictObservationSpace(
107
+ config={"num_agents": args.num_agents}
108
+ ),
109
+ )
110
+ else:
111
+ register_env("env", lambda _: CartPoleWithDictObservationSpace())
112
+
113
+ # Define the AlgorithmConfig used.
114
+ base_config = (
115
+ get_trainable_cls(args.algo)
116
+ .get_default_config()
117
+ .environment("env")
118
+ .env_runners(env_to_module_connector=_env_to_module_pipeline)
119
+ .training(
120
+ gamma=0.99,
121
+ lr=0.0003,
122
+ )
123
+ .rl_module(
124
+ model_config=DefaultModelConfig(
125
+ fcnet_hiddens=[32],
126
+ fcnet_activation="linear",
127
+ vf_share_layers=True,
128
+ ),
129
+ )
130
+ )
131
+
132
+ # Add a simple multi-agent setup.
133
+ if args.num_agents > 0:
134
+ base_config.multi_agent(
135
+ policies={f"p{i}" for i in range(args.num_agents)},
136
+ policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
137
+ )
138
+
139
+ # PPO-specific settings (for better learning behavior only).
140
+ if args.algo == "PPO":
141
+ base_config.training(
142
+ num_epochs=6,
143
+ vf_loss_coeff=0.01,
144
+ )
145
+ # IMPALA-specific settings (for better learning behavior only).
146
+ elif args.algo == "IMPALA":
147
+ base_config.training(
148
+ lr=0.0005,
149
+ vf_loss_coeff=0.05,
150
+ entropy_coeff=0.0,
151
+ )
152
+
153
+ # Run everything as configured.
154
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/mean_std_filtering.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example using a ConnectorV2 for processing observations with a mean/std filter.
2
+
3
+ An RLlib Algorithm has 3 distinct connector pipelines:
4
+ - An env-to-module pipeline in an EnvRunner accepting a list of episodes and producing
5
+ a batch for an RLModule to compute actions (`forward_inference()` or
6
+ `forward_exploration()`).
7
+ - A module-to-env pipeline in an EnvRunner taking the RLModule's output and converting
8
+ it into an action readable by the environment.
9
+ - A learner connector pipeline on a Learner taking a list of episodes and producing
10
+ a batch for an RLModule to perform the training forward pass (`forward_train()`).
11
+
12
+ Each of these pipelines has a fixed set of default ConnectorV2 pieces that RLlib
13
+ adds/prepends to these pipelines in order to perform the most basic functionalities.
14
+ For example, RLlib adds the `AddObservationsFromEpisodesToBatch` ConnectorV2 into any
15
+ env-to-module pipeline to make sure the batch for computing actions contains - at the
16
+ minimum - the most recent observation.
17
+
18
+ On top of these default ConnectorV2 pieces, users can define their own ConnectorV2
19
+ pieces (or use the ones available already in RLlib) and add them to one of the 3
20
+ different pipelines described above, as required.
21
+
22
+ This example:
23
+ - shows how the `MeanStdFilter` ConnectorV2 piece can be added to the env-to-module
24
+ pipeline.
25
+ - demonstrates that using such a filter enhances learning behavior (or even makes
26
+ if possible to learn overall) in some environments, especially those with lopsided
27
+ observation spaces, for example `Box(-3000, -1000, ...)`.
28
+
29
+
30
+ How to run this script
31
+ ----------------------
32
+ `python [script file name].py --enable-new-api-stack`
33
+
34
+ For debugging, use the following additional command line options
35
+ `--no-tune --num-env-runners=0`
36
+ which should allow you to set breakpoints anywhere in the RLlib code and
37
+ have the execution stop there for inspection and debugging.
38
+
39
+ For logging to your WandB account, use:
40
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
41
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
42
+
43
+
44
+ Results to expect
45
+ -----------------
46
+ Running this example with the mean-std filter results in the normally expected Pendulum
47
+ learning behavior:
48
+ +-------------------------------+------------+-----------------+--------+
49
+ | Trial name | status | loc | iter |
50
+ | | | | |
51
+ |-------------------------------+------------+-----------------+--------+
52
+ | PPO_lopsided-pend_f9c96_00000 | TERMINATED | 127.0.0.1:43612 | 77 |
53
+ +-------------------------------+------------+-----------------+--------+
54
+ +------------------+------------------------+-----------------------+
55
+ | total time (s) | num_env_steps_sample | episode_return_mean |
56
+ | | d_lifetime | |
57
+ |------------------+------------------------+-----------------------|
58
+ | 30.7466 | 40040 | -276.3 |
59
+ +------------------+------------------------+-----------------------+
60
+
61
+ If you try using the `--disable-mean-std-filter` (all other things being equal), you
62
+ will either see no learning progress at all (or a very slow one), but more likely some
63
+ numerical instability related error will be thrown:
64
+
65
+ ValueError: Expected parameter loc (Tensor of shape (64, 1)) of distribution
66
+ Normal(loc: torch.Size([64, 1]), scale: torch.Size([64, 1])) to satisfy the
67
+ constraint Real(), but found invalid values:
68
+ tensor([[nan],
69
+ [nan],
70
+ [nan],
71
+ ...
72
+ """
73
+ import gymnasium as gym
74
+ import numpy as np
75
+
76
+ from ray.rllib.connectors.env_to_module.mean_std_filter import MeanStdFilter
77
+ from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
78
+ from ray.rllib.examples.envs.classes.multi_agent import MultiAgentPendulum
79
+ from ray.rllib.utils.framework import try_import_torch
80
+ from ray.rllib.utils.test_utils import (
81
+ add_rllib_example_script_args,
82
+ run_rllib_example_script_experiment,
83
+ )
84
+ from ray.tune.registry import get_trainable_cls, register_env
85
+
86
+ torch, _ = try_import_torch()
87
+
88
+ parser = add_rllib_example_script_args(
89
+ default_iters=500,
90
+ default_timesteps=500000,
91
+ default_reward=-300.0,
92
+ )
93
+ parser.add_argument(
94
+ "--disable-mean-std-filter",
95
+ action="store_true",
96
+ help="Run w/o a mean/std env-to-module connector piece (filter).",
97
+ )
98
+
99
+
100
+ class LopsidedObs(gym.ObservationWrapper):
101
+ def __init__(self, env):
102
+ super().__init__(env)
103
+ self.observation_space = gym.spaces.Box(-4000.0, -1456.0, (3,), np.float32)
104
+
105
+ def observation(self, observation):
106
+ # Lopside [-1.0, 1.0] Pendulum observations
107
+ return ((observation + 1.0) / 2.0) * (4000.0 - 1456.0) - 4000.0
108
+
109
+
110
+ if __name__ == "__main__":
111
+ args = parser.parse_args()
112
+
113
+ assert (
114
+ args.enable_new_api_stack
115
+ ), "Must set --enable-new-api-stack when running this script!"
116
+
117
+ # Register our environment with tune.
118
+ if args.num_agents > 0:
119
+ register_env(
120
+ "lopsided-pend",
121
+ lambda _: MultiAgentPendulum(config={"num_agents": args.num_agents}),
122
+ )
123
+ else:
124
+ register_env("lopsided-pend", lambda _: LopsidedObs(gym.make("Pendulum-v1")))
125
+
126
+ base_config = (
127
+ get_trainable_cls(args.algo)
128
+ .get_default_config()
129
+ .environment("lopsided-pend")
130
+ .env_runners(
131
+ # TODO (sven): MAEnvRunner does not support vectorized envs yet
132
+ # due to gym's env checkers and non-compatability with RLlib's
133
+ # MultiAgentEnv API.
134
+ num_envs_per_env_runner=1 if args.num_agents > 0 else 20,
135
+ # Define a single connector piece to be prepended to the env-to-module
136
+ # connector pipeline.
137
+ # Alternatively, return a list of n ConnectorV2 pieces (which will then be
138
+ # included in an automatically generated EnvToModulePipeline or return a
139
+ # EnvToModulePipeline directly.
140
+ env_to_module_connector=(
141
+ None
142
+ if args.disable_mean_std_filter
143
+ else lambda env: MeanStdFilter(multi_agent=args.num_agents > 0)
144
+ ),
145
+ )
146
+ .training(
147
+ train_batch_size_per_learner=512,
148
+ gamma=0.95,
149
+ # Linearly adjust learning rate based on number of GPUs.
150
+ lr=0.0003 * (args.num_learners or 1),
151
+ vf_loss_coeff=0.01,
152
+ )
153
+ .rl_module(
154
+ model_config=DefaultModelConfig(
155
+ fcnet_activation="relu",
156
+ fcnet_kernel_initializer=torch.nn.init.xavier_uniform_,
157
+ fcnet_bias_initializer=torch.nn.init.constant_,
158
+ fcnet_bias_initializer_kwargs={"val": 0.0},
159
+ ),
160
+ )
161
+ # In case you would like to run with a evaluation EnvRunners, make sure your
162
+ # `evaluation_config` key contains the `use_worker_filter_stats=False` setting
163
+ # (see below). This setting makes sure that the mean/std stats collected by the
164
+ # evaluation EnvRunners are NOT used for the training EnvRunners (unless you
165
+ # really want to mix these stats). It's normally a good idea to keep the stats
166
+ # collected during evaluation completely out of the training data (already for
167
+ # better reproducibility alone).
168
+ # .evaluation(
169
+ # evaluation_num_env_runners=1,
170
+ # evaluation_interval=1,
171
+ # evaluation_config={
172
+ # "explore": False,
173
+ # # Do NOT use the eval EnvRunners' ConnectorV2 states. Instead, before
174
+ # # each round of evaluation, broadcast the latest training
175
+ # # EnvRunnerGroup's ConnectorV2 states (merged from all training remote
176
+ # # EnvRunners) to the eval EnvRunnerGroup (and discard the eval
177
+ # # EnvRunners' stats).
178
+ # "use_worker_filter_stats": False,
179
+ # },
180
+ # )
181
+ )
182
+
183
+ # PPO specific settings.
184
+ if args.algo == "PPO":
185
+ base_config.training(
186
+ minibatch_size=64,
187
+ lambda_=0.1,
188
+ vf_clip_param=10.0,
189
+ )
190
+
191
+ # Add a simple multi-agent setup.
192
+ if args.num_agents > 0:
193
+ base_config.multi_agent(
194
+ policies={f"p{i}" for i in range(args.num_agents)},
195
+ policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
196
+ )
197
+
198
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/connectors/prev_actions_prev_rewards.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example using a ConnectorV2 to add previous rewards/actions to an RLModule's input.
2
+
3
+ An RLlib Algorithm has 3 distinct connector pipelines:
4
+ - An env-to-module pipeline in an EnvRunner accepting a list of episodes and producing
5
+ a batch for an RLModule to compute actions (`forward_inference()` or
6
+ `forward_exploration()`).
7
+ - A module-to-env pipeline in an EnvRunner taking the RLModule's output and converting
8
+ it into an action readable by the environment.
9
+ - A learner connector pipeline on a Learner taking a list of episodes and producing
10
+ a batch for an RLModule to perform the training forward pass (`forward_train()`).
11
+
12
+ Each of these pipelines has a fixed set of default ConnectorV2 pieces that RLlib
13
+ adds/prepends to these pipelines in order to perform the most basic functionalities.
14
+ For example, RLlib adds the `AddObservationsFromEpisodesToBatch` ConnectorV2 into any
15
+ env-to-module pipeline to make sure the batch for computing actions contains - at the
16
+ minimum - the most recent observation.
17
+
18
+ On top of these default ConnectorV2 pieces, users can define their own ConnectorV2
19
+ pieces (or use the ones available already in RLlib) and add them to one of the 3
20
+ different pipelines described above, as required.
21
+
22
+ This example:
23
+ - shows how the `PrevActionsPrevRewards` ConnectorV2 piece can be added to the
24
+ env-to-module pipeline to extract previous rewards and/or actions from the ongoing
25
+ episodes.
26
+ - shows how this connector creates and wraps this new information (rewards and
27
+ actions) together with the original observations into the RLModule's input dict
28
+ under a new `gym.spaces.Dict` structure (for example, if your observation space
29
+ is `O=Box(shape=(3,))` and you add the most recent 1 reward, the new observation
30
+ space will be `Dict({"_original_obs": O, "prev_n_rewards": Box(shape=())})`.
31
+ - demonstrates how to use RLlib's `FlattenObservations` right after the
32
+ `PrevActionsPrevRewards` to flatten that new dict observation structure again into
33
+ a single 1D tensor.
34
+ - uses the StatelessCartPole environment, a CartPole-v1 derivative that's missing
35
+ both x-veloc and angle-veloc observation components and is therefore non-Markovian
36
+ (only partially observable). An LSTM default model is used for training. Adding
37
+ the additional context to the observations (for example, prev. actions) helps the
38
+ LSTM to more quickly learn in this environment.
39
+
40
+
41
+ How to run this script
42
+ ----------------------
43
+ `python [script file name].py --enable-new-api-stack --num-frames=4 --env=ALE/Pong-v5`
44
+
45
+ Use the `--num-frames` option to define the number of observations to framestack.
46
+ If you don't want to use Connectors to perform the framestacking, set the
47
+ `--use-gym-wrapper-framestacking` flag to perform framestacking already inside a
48
+ gymnasium observation wrapper. In this case though, be aware that the tensors being
49
+ sent through the network are `--num-frames` x larger than if you use the Connector
50
+ setup.
51
+
52
+ For debugging, use the following additional command line options
53
+ `--no-tune --num-env-runners=0`
54
+ which should allow you to set breakpoints anywhere in the RLlib code and
55
+ have the execution stop there for inspection and debugging.
56
+
57
+ For logging to your WandB account, use:
58
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
59
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
60
+
61
+
62
+ Results to expect
63
+ -----------------
64
+
65
+ You should see something similar to this in your terminal output when running
66
+ ths script as described above:
67
+
68
+ +---------------------+------------+-----------------+--------+------------------+
69
+ | Trial name | status | loc | iter | total time (s) |
70
+ | | | | | |
71
+ |---------------------+------------+-----------------+--------+------------------+
72
+ | PPO_env_0edd2_00000 | TERMINATED | 127.0.0.1:12632 | 17 | 42.6898 |
73
+ +---------------------+------------+-----------------+--------+------------------+
74
+ +------------------------+------------------------+------------------------+
75
+ | num_env_steps_sample | num_env_steps_traine | episode_return_mean |
76
+ | d_lifetime | d_lifetime | |
77
+ |------------------------+------------------------+------------------------|
78
+ | 68000 | 68000 | 205.22 |
79
+ +------------------------+------------------------+------------------------+
80
+ """
81
+ from ray.rllib.algorithms.ppo import PPOConfig
82
+ from ray.rllib.connectors.env_to_module import (
83
+ FlattenObservations,
84
+ PrevActionsPrevRewards,
85
+ )
86
+ from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
87
+ from ray.rllib.examples.envs.classes.stateless_cartpole import StatelessCartPole
88
+ from ray.rllib.examples.envs.classes.multi_agent import MultiAgentStatelessCartPole
89
+ from ray.rllib.utils.framework import try_import_torch
90
+ from ray.rllib.utils.test_utils import (
91
+ add_rllib_example_script_args,
92
+ run_rllib_example_script_experiment,
93
+ )
94
+ from ray.tune import register_env
95
+
96
+ torch, nn = try_import_torch()
97
+
98
+
99
+ parser = add_rllib_example_script_args(
100
+ default_reward=200.0, default_timesteps=1000000, default_iters=2000
101
+ )
102
+ parser.set_defaults(enable_new_api_stack=True)
103
+ parser.add_argument("--n-prev-rewards", type=int, default=1)
104
+ parser.add_argument("--n-prev-actions", type=int, default=1)
105
+
106
+
107
+ if __name__ == "__main__":
108
+ args = parser.parse_args()
109
+
110
+ # Define our custom connector pipelines.
111
+ def _env_to_module(env):
112
+ # Create the env-to-module connector pipeline.
113
+ return [
114
+ PrevActionsPrevRewards(
115
+ multi_agent=args.num_agents > 0,
116
+ n_prev_rewards=args.n_prev_rewards,
117
+ n_prev_actions=args.n_prev_actions,
118
+ ),
119
+ FlattenObservations(multi_agent=args.num_agents > 0),
120
+ ]
121
+
122
+ # Register our environment with tune.
123
+ if args.num_agents > 0:
124
+ register_env(
125
+ "env",
126
+ lambda _: MultiAgentStatelessCartPole(
127
+ config={"num_agents": args.num_agents}
128
+ ),
129
+ )
130
+ else:
131
+ register_env("env", lambda _: StatelessCartPole())
132
+
133
+ config = (
134
+ PPOConfig()
135
+ .environment("env")
136
+ .env_runners(env_to_module_connector=_env_to_module)
137
+ .training(
138
+ num_epochs=6,
139
+ lr=0.0003,
140
+ train_batch_size=4000,
141
+ vf_loss_coeff=0.01,
142
+ )
143
+ .rl_module(
144
+ model_config=DefaultModelConfig(
145
+ use_lstm=True,
146
+ max_seq_len=20,
147
+ fcnet_hiddens=[32],
148
+ fcnet_activation="linear",
149
+ fcnet_kernel_initializer=nn.init.xavier_uniform_,
150
+ fcnet_bias_initializer=nn.init.constant_,
151
+ fcnet_bias_initializer_kwargs={"val": 0.0},
152
+ vf_share_layers=True,
153
+ ),
154
+ )
155
+ )
156
+
157
+ # Add a simple multi-agent setup.
158
+ if args.num_agents > 0:
159
+ config = config.multi_agent(
160
+ policies={f"p{i}" for i in range(args.num_agents)},
161
+ policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
162
+ )
163
+
164
+ run_rllib_example_script_experiment(config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/env_rendering_and_recording.cpython-310.pyc ADDED
Binary file (8.72 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/env_with_protobuf_observations.cpython-310.pyc ADDED
Binary file (3.46 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/__pycache__/unity3d_env_local.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/gpu_requiring_env.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ray
2
+ from ray.rllib.examples.envs.classes.simple_corridor import SimpleCorridor
3
+
4
+
5
+ class GPURequiringEnv(SimpleCorridor):
6
+ """A dummy env that requires a GPU in order to work.
7
+
8
+ The env here is a simple corridor env that additionally simulates a GPU
9
+ check in its constructor via `ray.get_gpu_ids()`. If this returns an
10
+ empty list, we raise an error.
11
+
12
+ To make this env work, use `num_gpus_per_env_runner > 0` (RolloutWorkers
13
+ requesting this many GPUs each) and - maybe - `num_gpus > 0` in case
14
+ your local worker/driver must have an env as well. However, this is
15
+ only the case if `create_env_on_driver`=True (default is False).
16
+ """
17
+
18
+ def __init__(self, config=None):
19
+ super().__init__(config)
20
+
21
+ # Fake-require some GPUs (at least one).
22
+ # If your local worker's env (`create_env_on_driver`=True) does not
23
+ # necessarily require a GPU, you can perform the below assertion only
24
+ # if `config.worker_index != 0`.
25
+ gpus_available = ray.get_gpu_ids()
26
+ assert len(gpus_available) > 0, "Not enough GPUs for this env!"
27
+ print("Env can see these GPUs: {}".format(gpus_available))
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/simple_corridor.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ from gymnasium.spaces import Box, Discrete
3
+ import numpy as np
4
+
5
+
6
+ class SimpleCorridor(gym.Env):
7
+ """Example of a custom env in which you have to walk down a corridor.
8
+
9
+ You can configure the length of the corridor via the env config."""
10
+
11
+ def __init__(self, config=None):
12
+ config = config or {}
13
+
14
+ self.action_space = Discrete(2)
15
+ self.observation_space = Box(0.0, 999.0, shape=(1,), dtype=np.float32)
16
+
17
+ self.set_corridor_length(config.get("corridor_length", 10))
18
+
19
+ self._cur_pos = 0
20
+
21
+ def set_corridor_length(self, length):
22
+ self.end_pos = length
23
+ print(f"Set corridor length to {self.end_pos}")
24
+ assert self.end_pos <= 999, "The maximum `corridor_length` allowed is 999!"
25
+
26
+ def reset(self, *, seed=None, options=None):
27
+ self._cur_pos = 0.0
28
+ return self._get_obs(), {}
29
+
30
+ def step(self, action):
31
+ assert action in [0, 1], action
32
+ if action == 0 and self._cur_pos > 0:
33
+ self._cur_pos -= 1.0
34
+ elif action == 1:
35
+ self._cur_pos += 1.0
36
+ terminated = self._cur_pos >= self.end_pos
37
+ truncated = False
38
+ reward = 1.0 if terminated else -0.01
39
+ return self._get_obs(), reward, terminated, truncated, {}
40
+
41
+ def _get_obs(self):
42
+ return np.array([self._cur_pos], np.float32)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/transformed_action_space_env.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ from typing import Type
3
+
4
+
5
+ class ActionTransform(gym.ActionWrapper):
6
+ def __init__(self, env, low, high):
7
+ super().__init__(env)
8
+ self._low = low
9
+ self._high = high
10
+ self.action_space = type(env.action_space)(
11
+ self._low, self._high, env.action_space.shape, env.action_space.dtype
12
+ )
13
+
14
+ def action(self, action):
15
+ return (action - self._low) / (self._high - self._low) * (
16
+ self.env.action_space.high - self.env.action_space.low
17
+ ) + self.env.action_space.low
18
+
19
+
20
+ def transform_action_space(env_name_or_creator) -> Type[gym.Env]:
21
+ """Wrapper for gym.Envs to have their action space transformed.
22
+
23
+ Args:
24
+ env_name_or_creator (Union[str, Callable[]]: String specifier or
25
+ env_maker function.
26
+
27
+ Returns:
28
+ New transformed_action_space_env function that returns an environment
29
+ wrapped by the ActionTransform wrapper. The constructor takes a
30
+ config dict with `_low` and `_high` keys specifying the new action
31
+ range (default -1.0 to 1.0). The reset of the config dict will be
32
+ passed on to the underlying/wrapped env's constructor.
33
+
34
+ .. testcode::
35
+ :skipif: True
36
+
37
+ # By gym string:
38
+ pendulum_300_to_500_cls = transform_action_space("Pendulum-v1")
39
+ # Create a transformed pendulum env.
40
+ pendulum_300_to_500 = pendulum_300_to_500_cls({"_low": -15.0})
41
+ pendulum_300_to_500.action_space
42
+
43
+ .. testoutput::
44
+
45
+ gym.spaces.Box(-15.0, 1.0, (1, ), "float32")
46
+ """
47
+
48
+ def transformed_action_space_env(config):
49
+ if isinstance(env_name_or_creator, str):
50
+ inner_env = gym.make(env_name_or_creator)
51
+ else:
52
+ inner_env = env_name_or_creator(config)
53
+ _low = config.pop("low", -1.0)
54
+ _high = config.pop("high", 1.0)
55
+ env = ActionTransform(inner_env, _low, _high)
56
+ return env
57
+
58
+ return transformed_action_space_env
59
+
60
+
61
+ TransformedActionPendulum = transform_action_space("Pendulum-v1")
deepseek/lib/python3.10/site-packages/ray/rllib/examples/hierarchical/__pycache__/hierarchical_training.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/learners/classes/intrinsic_curiosity_learners.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Optional
2
+
3
+ import gymnasium as gym
4
+ import torch
5
+
6
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_learner import (
7
+ DQNRainbowTorchLearner,
8
+ )
9
+ from ray.rllib.algorithms.ppo.torch.ppo_torch_learner import PPOTorchLearner
10
+ from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import (
11
+ AddObservationsFromEpisodesToBatch,
12
+ )
13
+ from ray.rllib.connectors.common.numpy_to_tensor import NumpyToTensor
14
+ from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa
15
+ AddNextObservationsFromEpisodesToTrainBatch,
16
+ )
17
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
18
+ from ray.rllib.core import Columns, DEFAULT_MODULE_ID
19
+ from ray.rllib.core.learner.torch.torch_learner import TorchLearner
20
+ from ray.rllib.core.rl_module.rl_module import RLModule
21
+ from ray.rllib.utils.typing import EpisodeType
22
+
23
+ ICM_MODULE_ID = "_intrinsic_curiosity_model"
24
+
25
+
26
+ class DQNTorchLearnerWithCuriosity(DQNRainbowTorchLearner):
27
+ def build(self) -> None:
28
+ super().build()
29
+ add_intrinsic_curiosity_connectors(self)
30
+
31
+
32
+ class PPOTorchLearnerWithCuriosity(PPOTorchLearner):
33
+ def build(self) -> None:
34
+ super().build()
35
+ add_intrinsic_curiosity_connectors(self)
36
+
37
+
38
+ def add_intrinsic_curiosity_connectors(torch_learner: TorchLearner) -> None:
39
+ """Adds two connector pieces to the Learner pipeline, needed for ICM training.
40
+
41
+ - The `AddNextObservationsFromEpisodesToTrainBatch` connector makes sure the train
42
+ batch contains the NEXT_OBS for ICM's forward- and inverse dynamics net training.
43
+ - The `IntrinsicCuriosityModelConnector` piece computes intrinsic rewards from the
44
+ ICM and adds the results to the extrinsic reward of the main module's train batch.
45
+
46
+ Args:
47
+ torch_learner: The TorchLearner, to whose Learner pipeline the two ICM connector
48
+ pieces should be added.
49
+ """
50
+ learner_config_dict = torch_learner.config.learner_config_dict
51
+
52
+ # Assert, we are only training one policy (RLModule) and we have the ICM
53
+ # in our MultiRLModule.
54
+ assert (
55
+ len(torch_learner.module) == 2
56
+ and DEFAULT_MODULE_ID in torch_learner.module
57
+ and ICM_MODULE_ID in torch_learner.module
58
+ )
59
+
60
+ # Make sure both curiosity loss settings are explicitly set in the
61
+ # `learner_config_dict`.
62
+ if (
63
+ "forward_loss_weight" not in learner_config_dict
64
+ or "intrinsic_reward_coeff" not in learner_config_dict
65
+ ):
66
+ raise KeyError(
67
+ "When using the IntrinsicCuriosityTorchLearner, both `forward_loss_weight` "
68
+ " and `intrinsic_reward_coeff` must be part of your config's "
69
+ "`learner_config_dict`! Add these values through: `config.training("
70
+ "learner_config_dict={'forward_loss_weight': .., 'intrinsic_reward_coeff': "
71
+ "..})`."
72
+ )
73
+
74
+ if torch_learner.config.add_default_connectors_to_learner_pipeline:
75
+ # Prepend a "add-NEXT_OBS-from-episodes-to-train-batch" connector piece
76
+ # (right after the corresponding "add-OBS-..." default piece).
77
+ torch_learner._learner_connector.insert_after(
78
+ AddObservationsFromEpisodesToBatch,
79
+ AddNextObservationsFromEpisodesToTrainBatch(),
80
+ )
81
+ # Append the ICM connector, computing intrinsic rewards and adding these to
82
+ # the main model's extrinsic rewards.
83
+ torch_learner._learner_connector.insert_after(
84
+ NumpyToTensor,
85
+ IntrinsicCuriosityModelConnector(
86
+ intrinsic_reward_coeff=(
87
+ torch_learner.config.learner_config_dict["intrinsic_reward_coeff"]
88
+ )
89
+ ),
90
+ )
91
+
92
+
93
+ class IntrinsicCuriosityModelConnector(ConnectorV2):
94
+ """Learner ConnectorV2 piece to compute intrinsic rewards based on an ICM.
95
+
96
+ For more details, see here:
97
+ [1] Curiosity-driven Exploration by Self-supervised Prediction
98
+ Pathak, Agrawal, Efros, and Darrell - UC Berkeley - ICML 2017.
99
+ https://arxiv.org/pdf/1705.05363.pdf
100
+
101
+ This connector piece:
102
+ - requires two RLModules to be present in the MultiRLModule:
103
+ DEFAULT_MODULE_ID (the policy model to be trained) and ICM_MODULE_ID (the instrinsic
104
+ curiosity architecture).
105
+ - must be located toward the end of to your Learner pipeline (after the
106
+ `NumpyToTensor` piece) in order to perform a forward pass on the ICM model with the
107
+ readily compiled batch and a following forward-loss computation to get the intrinsi
108
+ rewards.
109
+ - these intrinsic rewards will then be added to the (extrinsic) rewards in the main
110
+ model's train batch.
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ input_observation_space: Optional[gym.Space] = None,
116
+ input_action_space: Optional[gym.Space] = None,
117
+ *,
118
+ intrinsic_reward_coeff: float,
119
+ **kwargs,
120
+ ):
121
+ """Initializes a CountBasedCuriosity instance.
122
+
123
+ Args:
124
+ intrinsic_reward_coeff: The weight with which to multiply the intrinsic
125
+ reward before adding it to the extrinsic rewards of the main model.
126
+ """
127
+ super().__init__(input_observation_space, input_action_space)
128
+
129
+ self.intrinsic_reward_coeff = intrinsic_reward_coeff
130
+
131
+ def __call__(
132
+ self,
133
+ *,
134
+ rl_module: RLModule,
135
+ batch: Any,
136
+ episodes: List[EpisodeType],
137
+ explore: Optional[bool] = None,
138
+ shared_data: Optional[dict] = None,
139
+ **kwargs,
140
+ ) -> Any:
141
+ # Assert that the batch is ready.
142
+ assert DEFAULT_MODULE_ID in batch and ICM_MODULE_ID not in batch
143
+ assert (
144
+ Columns.OBS in batch[DEFAULT_MODULE_ID]
145
+ and Columns.NEXT_OBS in batch[DEFAULT_MODULE_ID]
146
+ )
147
+ # TODO (sven): We are performing two forward passes per update right now.
148
+ # Once here in the connector (w/o grad) to just get the intrinsic rewards
149
+ # and once in the learner to actually compute the ICM loss and update the ICM.
150
+ # Maybe we can save one of these, but this would currently harm the DDP-setup
151
+ # for multi-GPU training.
152
+ with torch.no_grad():
153
+ # Perform ICM forward pass.
154
+ fwd_out = rl_module[ICM_MODULE_ID].forward_train(batch[DEFAULT_MODULE_ID])
155
+
156
+ # Add the intrinsic rewards to the main module's extrinsic rewards.
157
+ batch[DEFAULT_MODULE_ID][Columns.REWARDS] += (
158
+ self.intrinsic_reward_coeff * fwd_out[Columns.INTRINSIC_REWARDS]
159
+ )
160
+
161
+ # Duplicate the batch such that the ICM also has data to learn on.
162
+ batch[ICM_MODULE_ID] = batch[DEFAULT_MODULE_ID]
163
+
164
+ return batch
deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/metrics/custom_metrics_in_env_runners.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of adding custom metrics to the results returned by `EnvRunner.sample()`.
2
+
3
+ We use the `MetricsLogger` class, which RLlib provides inside all its components (only
4
+ when using the new API stack through
5
+ `config.api_stack(enable_rl_module_and_learner=True,
6
+ enable_env_runner_and_connector_v2=True)`),
7
+ and which offers a unified API to log individual values per iteration, per episode
8
+ timestep, per episode (as a whole), per loss call, etc..
9
+ `MetricsLogger` objects are available in all custom API code, for example inside your
10
+ custom `Algorithm.training_step()` methods, custom loss functions, custom callbacks,
11
+ and custom EnvRunners.
12
+
13
+ This example:
14
+ - demonstrates how to write a custom Callbacks subclass, which overrides some
15
+ EnvRunner-bound methods, such as `on_episode_start`, `on_episode_step`, and
16
+ `on_episode_end`.
17
+ - shows how to temporarily store per-timestep data inside the currently running
18
+ episode within the EnvRunner (and the callback methods).
19
+ - shows how to extract this temporary data again when the episode is done in order
20
+ to further process the data into a single, reportable metric.
21
+ - explains how to use the `MetricsLogger` API to create and log different metrics
22
+ to the final Algorithm's iteration output. These include - but are not limited to -
23
+ a 2D heatmap (image) per episode, an average per-episode metric (over a sliding
24
+ window of 200 episodes), a maximum per-episode metric (over a sliding window of 100
25
+ episodes), and an EMA-smoothed metric.
26
+
27
+ In this script, we define a custom `DefaultCallbacks` class and then override some of
28
+ its methods in order to define custom behavior during episode sampling. In particular,
29
+ we add custom metrics to the Algorithm's published result dict (once per
30
+ iteration) before it is sent back to Ray Tune (and possibly a WandB logger).
31
+
32
+ For demonstration purposes only, we log the following custom metrics:
33
+ - A 2D heatmap showing the frequency of all accumulated y/x-locations of Ms Pacman
34
+ during an episode. We create and log a separate heatmap per episode and limit the number
35
+ of heatmaps reported back to the algorithm by each EnvRunner to 10 (`window=10`).
36
+ - The maximum per-episode distance travelled by Ms Pacman over a sliding window of 100
37
+ episodes.
38
+ - The average per-episode distance travelled by Ms Pacman over a sliding window of 200
39
+ episodes.
40
+ - The EMA-smoothed number of lives of Ms Pacman at each timestep (across all episodes).
41
+
42
+
43
+ How to run this script
44
+ ----------------------
45
+ `python [script file name].py --enable-new-api-stack --wandb-key [your WandB key]
46
+ --wandb-project [some project name]`
47
+
48
+ For debugging, use the following additional command line options
49
+ `--no-tune --num-env-runners=0`
50
+ which should allow you to set breakpoints anywhere in the RLlib code and
51
+ have the execution stop there for inspection and debugging.
52
+
53
+ For logging to your WandB account, use:
54
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
55
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
56
+
57
+
58
+ Results to expect
59
+ -----------------
60
+ This script has not been finetuned to actually learn the environment. Its purpose
61
+ is to show how you can create and log custom metrics during episode sampling and
62
+ have these stats be sent to WandB for further analysis.
63
+
64
+ However, you should see training proceeding over time like this:
65
+ +---------------------+----------+----------------+--------+------------------+
66
+ | Trial name | status | loc | iter | total time (s) |
67
+ | | | | | |
68
+ |---------------------+----------+----------------+--------+------------------+
69
+ | PPO_env_efd16_00000 | RUNNING | 127.0.0.1:6181 | 4 | 72.4725 |
70
+ +---------------------+----------+----------------+--------+------------------+
71
+ +------------------------+------------------------+------------------------+
72
+ | episode_return_mean | num_episodes_lifetim | num_env_steps_traine |
73
+ | | e | d_lifetime |
74
+ |------------------------+------------------------+------------------------|
75
+ | 76.4 | 45 | 8053 |
76
+ +------------------------+------------------------+------------------------+
77
+ """
78
+ from typing import Optional, Sequence
79
+
80
+ import gymnasium as gym
81
+ import matplotlib.pyplot as plt
82
+ from matplotlib.colors import Normalize
83
+ import numpy as np
84
+
85
+ from ray.rllib.algorithms.callbacks import DefaultCallbacks
86
+ from ray.rllib.env.wrappers.atari_wrappers import wrap_atari_for_new_api_stack
87
+ from ray.rllib.utils.images import resize
88
+ from ray.rllib.utils.test_utils import (
89
+ add_rllib_example_script_args,
90
+ run_rllib_example_script_experiment,
91
+ )
92
+ from ray.tune.registry import get_trainable_cls, register_env
93
+
94
+
95
+ class MsPacmanHeatmapCallback(DefaultCallbacks):
96
+ """A custom callback to extract information from MsPacman and log these.
97
+
98
+ This callback logs:
99
+ - the positions of MsPacman over an episode to produce heatmaps from this data.
100
+ At each episode timestep, the current pacman (y/x)-position is determined and added
101
+ to the episode's temporary storage. At the end of an episode, a simple 2D heatmap
102
+ is created from this data and the heatmap is logged to the MetricsLogger (to be
103
+ viewed in WandB).
104
+ - the max distance travelled by MsPacman per episode, then averaging these max
105
+ values over a window of size=100.
106
+ - the mean distance travelled by MsPacman per episode (over an infinite window).
107
+ - the number of lifes of MsPacman EMA-smoothed over time.
108
+
109
+ This callback can be setup to only log stats on certain EnvRunner indices through
110
+ the `env_runner_indices` c'tor arg.
111
+ """
112
+
113
+ def __init__(self, env_runner_indices: Optional[Sequence[int]] = None):
114
+ """Initializes an MsPacmanHeatmapCallback instance.
115
+
116
+ Args:
117
+ env_runner_indices: The (optional) EnvRunner indices, for this callback
118
+ should be active. If None, activates the heatmap for all EnvRunners.
119
+ If a Sequence type, only logs/heatmaps, if the EnvRunner index is found
120
+ in `env_runner_indices`.
121
+ """
122
+ super().__init__()
123
+ # Only create heatmap on certain EnvRunner indices?
124
+ self._env_runner_indices = env_runner_indices
125
+
126
+ # Mapping from episode ID to max distance travelled thus far.
127
+ self._episode_start_position = {}
128
+
129
+ def on_episode_start(
130
+ self,
131
+ *,
132
+ episode,
133
+ env_runner,
134
+ metrics_logger,
135
+ env,
136
+ env_index,
137
+ rl_module,
138
+ **kwargs,
139
+ ) -> None:
140
+ # Skip, if this EnvRunner's index is not in `self._env_runner_indices`.
141
+ if (
142
+ self._env_runner_indices is not None
143
+ and env_runner.worker_index not in self._env_runner_indices
144
+ ):
145
+ return
146
+
147
+ yx_pos = self._get_pacman_yx_pos(env)
148
+ self._episode_start_position[episode.id_] = yx_pos
149
+
150
+ def on_episode_step(
151
+ self,
152
+ *,
153
+ episode,
154
+ env_runner,
155
+ metrics_logger,
156
+ env,
157
+ env_index,
158
+ rl_module,
159
+ **kwargs,
160
+ ) -> None:
161
+ """Adds current pacman y/x-position to episode's temporary data."""
162
+
163
+ # Skip, if this EnvRunner's index is not in `self._env_runner_indices`.
164
+ if (
165
+ self._env_runner_indices is not None
166
+ and env_runner.worker_index not in self._env_runner_indices
167
+ ):
168
+ return
169
+
170
+ yx_pos = self._get_pacman_yx_pos(env)
171
+ episode.add_temporary_timestep_data("pacman_yx_pos", yx_pos)
172
+
173
+ # Compute distance to start position.
174
+ dist_travelled = np.sqrt(
175
+ np.sum(
176
+ np.square(
177
+ np.array(self._episode_start_position[episode.id_])
178
+ - np.array(yx_pos)
179
+ )
180
+ )
181
+ )
182
+ episode.add_temporary_timestep_data("pacman_dist_travelled", dist_travelled)
183
+
184
+ def on_episode_end(
185
+ self,
186
+ *,
187
+ episode,
188
+ env_runner,
189
+ metrics_logger,
190
+ env,
191
+ env_index,
192
+ rl_module,
193
+ **kwargs,
194
+ ) -> None:
195
+ # Skip, if this EnvRunner's index is not in `self._env_runner_indices`.
196
+ if (
197
+ self._env_runner_indices is not None
198
+ and env_runner.worker_index not in self._env_runner_indices
199
+ ):
200
+ return
201
+
202
+ # Erase the start position record.
203
+ del self._episode_start_position[episode.id_]
204
+
205
+ # Get all pacman y/x-positions from the episode.
206
+ yx_positions = episode.get_temporary_timestep_data("pacman_yx_pos")
207
+ # h x w
208
+ heatmap = np.zeros((80, 100), dtype=np.int32)
209
+ for yx_pos in yx_positions:
210
+ if yx_pos != (-1, -1):
211
+ heatmap[yx_pos[0], yx_pos[1]] += 1
212
+
213
+ # Create the actual heatmap image.
214
+ # Normalize the heatmap to values between 0 and 1
215
+ norm = Normalize(vmin=heatmap.min(), vmax=heatmap.max())
216
+ # Use a colormap (e.g., 'hot') to map normalized values to RGB
217
+ colormap = plt.get_cmap("coolwarm") # try "hot" and "viridis" as well?
218
+ # Returns a (64, 64, 4) array (RGBA).
219
+ heatmap_rgb = colormap(norm(heatmap))
220
+ # Convert RGBA to RGB by dropping the alpha channel and converting to uint8.
221
+ heatmap_rgb = (heatmap_rgb[:, :, :3] * 255).astype(np.uint8)
222
+ # Log the image.
223
+ metrics_logger.log_value(
224
+ "pacman_heatmap",
225
+ heatmap_rgb,
226
+ reduce=None,
227
+ window=10, # Log 10 images at most per EnvRunner/training iteration.
228
+ )
229
+
230
+ # Get the max distance travelled for this episode.
231
+ dist_travelled = np.max(
232
+ episode.get_temporary_timestep_data("pacman_dist_travelled")
233
+ )
234
+
235
+ # Log the max. dist travelled in this episode (window=100).
236
+ metrics_logger.log_value(
237
+ "pacman_max_dist_travelled",
238
+ dist_travelled,
239
+ # For future reductions (e.g. over n different episodes and all the
240
+ # data coming from other env runners), reduce by max.
241
+ reduce="max",
242
+ # Always keep the last 100 values and max over this window.
243
+ # Note that this means that over time, if the values drop to lower
244
+ # numbers again, the reported `pacman_max_dist_travelled` might also
245
+ # decrease again (meaning `window=100` makes this not a "lifetime max").
246
+ window=100,
247
+ )
248
+
249
+ # Log the average dist travelled per episode (window=200).
250
+ metrics_logger.log_value(
251
+ "pacman_mean_dist_travelled",
252
+ dist_travelled,
253
+ reduce="mean", # <- default
254
+ # Always keep the last 200 values and average over this window.
255
+ window=200,
256
+ )
257
+
258
+ # Log the number of lifes (as EMA-smoothed; no window).
259
+ metrics_logger.log_value(
260
+ "pacman_lifes",
261
+ episode.get_infos(-1)["lives"],
262
+ reduce="mean", # <- default (must be "mean" for EMA smothing)
263
+ ema_coeff=0.01, # <- default EMA coefficient (`window` must be None)
264
+ )
265
+
266
+ def _get_pacman_yx_pos(self, env):
267
+ # If we have a vector env, only render the sub-env at index 0.
268
+ if isinstance(env.unwrapped, gym.vector.VectorEnv):
269
+ image = env.envs[0].render()
270
+ else:
271
+ image = env.render()
272
+ # Downsize to 100x100 for our utility function to work with.
273
+ image = resize(image, 100, 100)
274
+ # Crop image at bottom 20% (where lives are shown, which may confuse the pacman
275
+ # detector).
276
+ image = image[:80]
277
+ # Define the yellow color range in RGB (Ms. Pac-Man is yellowish).
278
+ # We allow some range around yellow to account for variation.
279
+ yellow_lower = np.array([200, 130, 65], dtype=np.uint8)
280
+ yellow_upper = np.array([220, 175, 105], dtype=np.uint8)
281
+ # Create a mask that highlights the yellow pixels
282
+ mask = np.all((image >= yellow_lower) & (image <= yellow_upper), axis=-1)
283
+ # Find the coordinates of the yellow pixels
284
+ yellow_pixels = np.argwhere(mask)
285
+ if yellow_pixels.size == 0:
286
+ return (-1, -1)
287
+
288
+ # Calculate the centroid of the yellow pixels to get Ms. Pac-Man's position
289
+ y, x = yellow_pixels.mean(axis=0).astype(int)
290
+ return y, x
291
+
292
+
293
+ parser = add_rllib_example_script_args(default_reward=450.0)
294
+ parser.set_defaults(enable_new_api_stack=True)
295
+
296
+
297
+ if __name__ == "__main__":
298
+ args = parser.parse_args()
299
+
300
+ # Register our environment with tune.
301
+ register_env(
302
+ "env",
303
+ lambda cfg: wrap_atari_for_new_api_stack(
304
+ gym.make("ale_py:ALE/MsPacman-v5", **cfg, **{"render_mode": "rgb_array"}),
305
+ framestack=4,
306
+ ),
307
+ )
308
+
309
+ base_config = (
310
+ get_trainable_cls(args.algo)
311
+ .get_default_config()
312
+ .environment(
313
+ "env",
314
+ env_config={
315
+ # Make analogous to old v4 + NoFrameskip.
316
+ "frameskip": 1,
317
+ "full_action_space": False,
318
+ "repeat_action_probability": 0.0,
319
+ },
320
+ )
321
+ .callbacks(MsPacmanHeatmapCallback)
322
+ .training(
323
+ # Make learning time fast, but note that this example may not
324
+ # necessarily learn well (its purpose is to demo the
325
+ # functionality of callbacks and the MetricsLogger).
326
+ train_batch_size_per_learner=2000,
327
+ minibatch_size=512,
328
+ num_epochs=6,
329
+ )
330
+ .rl_module(
331
+ model_config_dict={
332
+ "vf_share_layers": True,
333
+ "conv_filters": [[16, 4, 2], [32, 4, 2], [64, 4, 2], [128, 4, 2]],
334
+ "conv_activation": "relu",
335
+ "post_fcnet_hiddens": [256],
336
+ }
337
+ )
338
+ )
339
+
340
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/multi_agent_pendulum.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simple example of setting up an agent-to-module mapping function.
2
+
3
+ How to run this script
4
+ ----------------------
5
+ `python [script file name].py --enable-new-api-stack --num-agents=2`
6
+
7
+ Control the number of agents and policies (RLModules) via --num-agents and
8
+ --num-policies.
9
+
10
+ For debugging, use the following additional command line options
11
+ `--no-tune --num-env-runners=0`
12
+ which should allow you to set breakpoints anywhere in the RLlib code and
13
+ have the execution stop there for inspection and debugging.
14
+
15
+ For logging to your WandB account, use:
16
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
17
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
18
+ """
19
+
20
+ from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
21
+ from ray.rllib.examples.envs.classes.multi_agent import MultiAgentPendulum
22
+ from ray.rllib.utils.test_utils import (
23
+ add_rllib_example_script_args,
24
+ run_rllib_example_script_experiment,
25
+ )
26
+ from ray.tune.registry import get_trainable_cls, register_env
27
+
28
+ parser = add_rllib_example_script_args(
29
+ default_iters=200,
30
+ default_timesteps=100000,
31
+ default_reward=-400.0,
32
+ )
33
+ # TODO (sven): This arg is currently ignored (hard-set to 2).
34
+ parser.add_argument("--num-policies", type=int, default=2)
35
+
36
+
37
+ if __name__ == "__main__":
38
+ args = parser.parse_args()
39
+
40
+ # Register our environment with tune.
41
+ if args.num_agents > 0:
42
+ register_env(
43
+ "env",
44
+ lambda _: MultiAgentPendulum(config={"num_agents": args.num_agents}),
45
+ )
46
+
47
+ base_config = (
48
+ get_trainable_cls(args.algo)
49
+ .get_default_config()
50
+ .environment("env" if args.num_agents > 0 else "Pendulum-v1")
51
+ .training(
52
+ train_batch_size_per_learner=512,
53
+ minibatch_size=64,
54
+ lambda_=0.1,
55
+ gamma=0.95,
56
+ lr=0.0003,
57
+ model={"fcnet_activation": "relu"},
58
+ vf_clip_param=10.0,
59
+ )
60
+ .rl_module(
61
+ model_config=DefaultModelConfig(fcnet_activation="relu"),
62
+ )
63
+ )
64
+
65
+ # Add a simple multi-agent setup.
66
+ if args.num_agents > 0:
67
+ base_config.multi_agent(
68
+ policies={f"p{i}" for i in range(args.num_agents)},
69
+ policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
70
+ )
71
+
72
+ # Augment
73
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/pettingzoo_shared_value_function.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ msg = """
2
+ This script is NOT yet ready, but will be available soon at this location. It will
3
+ feature a MultiRLModule with one shared value function and n policy heads for
4
+ cooperative multi-agent learning.
5
+ """
6
+
7
+ raise NotImplementedError(msg)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/two_step_game_with_grouped_agents.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The two-step game from the QMIX paper:
2
+ https://arxiv.org/pdf/1803.11485.pdf
3
+
4
+ See also: rllib/examples/centralized_critic.py for centralized critic PPO on this game.
5
+
6
+ How to run this script
7
+ ----------------------
8
+ `python [script file name].py --enable-new-api-stack --num-agents=2`
9
+
10
+ Note that in this script, we use an multi-agent environment in which both
11
+ agents that normally play this game have been merged into one agent with ID
12
+ "agents" and observation- and action-spaces being 2-tupled (1 item for each
13
+ agent). The "agents" agent is mapped to the policy with ID "p0".
14
+
15
+ For debugging, use the following additional command line options
16
+ `--no-tune --num-env-runners=0`
17
+ Which should allow you to set breakpoints anywhere in the RLlib code and
18
+ have the execution stop there for inspection and debugging.
19
+
20
+ For logging to your WandB account, use:
21
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
22
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
23
+
24
+
25
+ Results to expect
26
+ -----------------
27
+ You should expect a reward of 8.0 (the max to reach in thie game) eventually
28
+ being achieved by a simple PPO policy (no tuning, just using RLlib's default settings):
29
+
30
+ +---------------------------------+------------+-----------------+--------+
31
+ | Trial name | status | loc | iter |
32
+ |---------------------------------+------------+-----------------+--------+
33
+ | PPO_grouped_twostep_4354b_00000 | TERMINATED | 127.0.0.1:42602 | 20 |
34
+ +---------------------------------+------------+-----------------+--------+
35
+
36
+ +------------------+-------+-------------------+-------------+
37
+ | total time (s) | ts | combined reward | reward p0 |
38
+ +------------------+-------+-------------------+-------------|
39
+ | 87.5756 | 80000 | 8 | 8 |
40
+ +------------------+-------+-------------------+-------------+
41
+ """
42
+
43
+ from ray.rllib.connectors.env_to_module import FlattenObservations
44
+ from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
45
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
46
+ from ray.rllib.examples.envs.classes.two_step_game import TwoStepGameWithGroupedAgents
47
+ from ray.rllib.utils.test_utils import (
48
+ add_rllib_example_script_args,
49
+ run_rllib_example_script_experiment,
50
+ )
51
+ from ray.tune.registry import register_env, get_trainable_cls
52
+
53
+
54
+ parser = add_rllib_example_script_args(default_reward=7.0)
55
+
56
+
57
+ if __name__ == "__main__":
58
+ args = parser.parse_args()
59
+
60
+ assert args.num_agents == 2, "Must set --num-agents=2 when running this script!"
61
+ assert (
62
+ args.enable_new_api_stack
63
+ ), "Must set --enable-new-api-stack when running this script!"
64
+
65
+ register_env(
66
+ "grouped_twostep",
67
+ lambda config: TwoStepGameWithGroupedAgents(config),
68
+ )
69
+
70
+ base_config = (
71
+ get_trainable_cls(args.algo)
72
+ .get_default_config()
73
+ .environment("grouped_twostep")
74
+ .env_runners(
75
+ env_to_module_connector=lambda env: FlattenObservations(multi_agent=True),
76
+ )
77
+ .multi_agent(
78
+ policies={"p0"},
79
+ policy_mapping_fn=lambda aid, *a, **kw: "p0",
80
+ )
81
+ .rl_module(
82
+ rl_module_spec=MultiRLModuleSpec(
83
+ rl_module_specs={
84
+ "p0": RLModuleSpec(),
85
+ },
86
+ )
87
+ )
88
+ )
89
+
90
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/multi_agent/utils/self_play_callback_old_api_stack.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from ray.rllib.algorithms.callbacks import DefaultCallbacks
4
+ from ray.rllib.utils.deprecation import Deprecated
5
+ from ray.rllib.utils.metrics import ENV_RUNNER_RESULTS
6
+
7
+
8
+ @Deprecated(help="Use the example for the new RLlib API stack.", error=False)
9
+ class SelfPlayCallbackOldAPIStack(DefaultCallbacks):
10
+ def __init__(self, win_rate_threshold):
11
+ super().__init__()
12
+ # 0=RandomPolicy, 1=1st main policy snapshot,
13
+ # 2=2nd main policy snapshot, etc..
14
+ self.current_opponent = 0
15
+
16
+ self.win_rate_threshold = win_rate_threshold
17
+
18
+ def on_train_result(self, *, algorithm, result, **kwargs):
19
+ # Get the win rate for the train batch.
20
+ # Note that normally, you should set up a proper evaluation config,
21
+ # such that evaluation always happens on the already updated policy,
22
+ # instead of on the already used train_batch.
23
+ main_rew = result[ENV_RUNNER_RESULTS]["hist_stats"].pop("policy_main_reward")
24
+ opponent_rew = list(result[ENV_RUNNER_RESULTS]["hist_stats"].values())[0]
25
+ assert len(main_rew) == len(opponent_rew)
26
+ won = 0
27
+ for r_main, r_opponent in zip(main_rew, opponent_rew):
28
+ if r_main > r_opponent:
29
+ won += 1
30
+ win_rate = won / len(main_rew)
31
+ result["win_rate"] = win_rate
32
+ print(f"Iter={algorithm.iteration} win-rate={win_rate} -> ", end="")
33
+ # If win rate is good -> Snapshot current policy and play against
34
+ # it next, keeping the snapshot fixed and only improving the "main"
35
+ # policy.
36
+ if win_rate > self.win_rate_threshold:
37
+ self.current_opponent += 1
38
+ new_pol_id = f"main_v{self.current_opponent}"
39
+ print(f"adding new opponent to the mix ({new_pol_id}).")
40
+
41
+ # Re-define the mapping function, such that "main" is forced
42
+ # to play against any of the previously played policies
43
+ # (excluding "random").
44
+ def policy_mapping_fn(agent_id, episode, worker, **kwargs):
45
+ # agent_id = [0|1] -> policy depends on episode ID
46
+ # This way, we make sure that both policies sometimes play
47
+ # (start player) and sometimes agent1 (player to move 2nd).
48
+ return (
49
+ "main"
50
+ if episode.episode_id % 2 == agent_id
51
+ else "main_v{}".format(
52
+ np.random.choice(list(range(1, self.current_opponent + 1)))
53
+ )
54
+ )
55
+
56
+ main_policy = algorithm.get_policy("main")
57
+ new_policy = algorithm.add_policy(
58
+ policy_id=new_pol_id,
59
+ policy_cls=type(main_policy),
60
+ policy_mapping_fn=policy_mapping_fn,
61
+ )
62
+
63
+ # Set the weights of the new policy to the main policy.
64
+ # We'll keep training the main policy, whereas `new_pol_id` will
65
+ # remain fixed.
66
+ main_state = main_policy.get_state()
67
+ new_policy.set_state(main_state)
68
+ # We need to sync the just copied local weights (from main policy)
69
+ # to all the remote workers as well.
70
+ algorithm.env_runner_group.sync_weights()
71
+ else:
72
+ print("not good enough; will keep learning ...")
73
+
74
+ # +2 = main + random
75
+ result["league_size"] = self.current_opponent + 2
deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/__pycache__/custom_input_api.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/offline_rl.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @OldAPIStack
2
+
3
+ """Example on how to use CQL to learn from an offline JSON file.
4
+
5
+ Important node: Make sure that your offline data file contains only
6
+ a single timestep per line to mimic the way SAC pulls samples from
7
+ the buffer.
8
+
9
+ Generate the offline json file by running an SAC algo until it reaches expert
10
+ level on your command line. For example:
11
+ $ cd ray
12
+ $ rllib train -f rllib/tuned_examples/sac/pendulum-sac.yaml --no-ray-ui
13
+
14
+ Also make sure that in the above SAC yaml file (pendulum-sac.yaml),
15
+ you specify an additional "output" key with any path on your local
16
+ file system. In that path, the offline json files will be written to.
17
+
18
+ Use the generated file(s) as "input" in the CQL config below
19
+ (`config["input"] = [list of your json files]`), then run this script.
20
+ """
21
+
22
+ import argparse
23
+ import numpy as np
24
+
25
+ from ray.rllib.policy.sample_batch import convert_ma_batch_to_sample_batch
26
+ from ray.rllib.algorithms import cql as cql
27
+ from ray.rllib.execution.rollout_ops import (
28
+ synchronous_parallel_sample,
29
+ )
30
+ from ray.rllib.utils.framework import try_import_torch
31
+ from ray.rllib.utils.metrics import (
32
+ ENV_RUNNER_RESULTS,
33
+ EPISODE_RETURN_MEAN,
34
+ EVALUATION_RESULTS,
35
+ )
36
+
37
+ torch, _ = try_import_torch()
38
+
39
+ parser = argparse.ArgumentParser()
40
+ parser.add_argument(
41
+ "--as-test",
42
+ action="store_true",
43
+ help="Whether this script should be run as a test: --stop-reward must "
44
+ "be achieved within --stop-timesteps AND --stop-iters.",
45
+ )
46
+ parser.add_argument(
47
+ "--stop-iters", type=int, default=5, help="Number of iterations to train."
48
+ )
49
+ parser.add_argument(
50
+ "--stop-reward", type=float, default=50.0, help="Reward at which we stop training."
51
+ )
52
+
53
+
54
+ if __name__ == "__main__":
55
+ args = parser.parse_args()
56
+
57
+ # See rllib/tuned_examples/cql/pendulum-cql.yaml for comparison.
58
+ config = (
59
+ cql.CQLConfig()
60
+ .api_stack(
61
+ enable_env_runner_and_connector_v2=False,
62
+ enable_rl_module_and_learner=False,
63
+ )
64
+ .framework(framework="torch")
65
+ .env_runners(num_env_runners=0)
66
+ .training(
67
+ n_step=3,
68
+ bc_iters=0,
69
+ clip_actions=False,
70
+ tau=0.005,
71
+ target_entropy="auto",
72
+ q_model_config={
73
+ "fcnet_hiddens": [256, 256],
74
+ "fcnet_activation": "relu",
75
+ },
76
+ policy_model_config={
77
+ "fcnet_hiddens": [256, 256],
78
+ "fcnet_activation": "relu",
79
+ },
80
+ optimization_config={
81
+ "actor_learning_rate": 3e-4,
82
+ "critic_learning_rate": 3e-4,
83
+ "entropy_learning_rate": 3e-4,
84
+ },
85
+ train_batch_size=256,
86
+ target_network_update_freq=1,
87
+ num_steps_sampled_before_learning_starts=256,
88
+ )
89
+ .reporting(min_train_timesteps_per_iteration=1000)
90
+ .debugging(log_level="INFO")
91
+ .environment("Pendulum-v1", normalize_actions=True)
92
+ .offline_data(
93
+ input_config={
94
+ "paths": ["tests/data/pendulum/enormous.zip"],
95
+ "format": "json",
96
+ }
97
+ )
98
+ .evaluation(
99
+ evaluation_num_env_runners=1,
100
+ evaluation_interval=1,
101
+ evaluation_duration=10,
102
+ evaluation_parallel_to_training=False,
103
+ evaluation_config=cql.CQLConfig.overrides(input_="sampler"),
104
+ )
105
+ )
106
+ # evaluation_parallel_to_training should be False b/c iterations are very long
107
+ # and this would cause evaluation to lag one iter behind training.
108
+
109
+ # Check, whether we can learn from the given file in `num_iterations`
110
+ # iterations, up to a reward of `min_reward`.
111
+ num_iterations = 5
112
+ min_reward = -300
113
+
114
+ cql_algorithm = cql.CQL(config=config)
115
+ learnt = False
116
+ for i in range(num_iterations):
117
+ print(f"Iter {i}")
118
+ eval_results = cql_algorithm.train().get(EVALUATION_RESULTS)
119
+ if eval_results:
120
+ print(
121
+ "... R={}".format(eval_results[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN])
122
+ )
123
+ # Learn until some reward is reached on an actual live env.
124
+ if eval_results[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN] >= min_reward:
125
+ # Test passed gracefully.
126
+ if args.as_test:
127
+ print("Test passed after {} iterations.".format(i))
128
+ quit(0)
129
+ learnt = True
130
+ break
131
+
132
+ # Get policy and model.
133
+ cql_policy = cql_algorithm.get_policy()
134
+ cql_model = cql_policy.model
135
+
136
+ # If you would like to query CQL's learnt Q-function for arbitrary
137
+ # (cont.) actions, do the following:
138
+ obs_batch = torch.from_numpy(np.random.random(size=(5, 3)))
139
+ action_batch = torch.from_numpy(np.random.random(size=(5, 1)))
140
+ q_values = cql_model.get_q_values(obs_batch, action_batch)[0]
141
+ # If you are using the "twin_q", there'll be 2 Q-networks and
142
+ # we usually consider the min of the 2 outputs, like so:
143
+ twin_q_values = cql_model.get_twin_q_values(obs_batch, action_batch)[0]
144
+ final_q_values = torch.min(q_values, twin_q_values)[0]
145
+ print(f"final_q_values={final_q_values.detach().numpy()}")
146
+
147
+ # Example on how to do evaluation on the trained Algorithm.
148
+ # using the data from our buffer.
149
+ # Get a sample (MultiAgentBatch).
150
+
151
+ batch = synchronous_parallel_sample(worker_set=cql_algorithm.env_runner_group)
152
+ batch = convert_ma_batch_to_sample_batch(batch)
153
+ obs = torch.from_numpy(batch["obs"])
154
+ # Pass the observations through our model to get the
155
+ # features, which then to pass through the Q-head.
156
+ model_out, _ = cql_model({"obs": obs})
157
+ # The estimated Q-values from the (historic) actions in the batch.
158
+ q_values_old = cql_model.get_q_values(
159
+ model_out, torch.from_numpy(batch["actions"])
160
+ )[0]
161
+ # The estimated Q-values for the new actions computed by our policy.
162
+ actions_new = cql_policy.compute_actions_from_input_dict({"obs": obs})[0]
163
+ q_values_new = cql_model.get_q_values(model_out, torch.from_numpy(actions_new))[0]
164
+ print(f"Q-val batch={q_values_old.detach().numpy()}")
165
+ print(f"Q-val policy={q_values_new.detach().numpy()}")
166
+
167
+ cql_algorithm.stop()
deepseek/lib/python3.10/site-packages/ray/rllib/examples/offline_rl/pretrain_bc_single_agent_evaluate_as_multi_agent.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @HybridAPIStack
2
+
3
+ """Example showing how to train a (SA) BC RLModule while evaluating in a MA setup.
4
+
5
+ Here, SA=single-agent and MA=multi-agent.
6
+
7
+ Note that the BC Algorithm - by default - runs on the hybrid API stack, using RLModules,
8
+ but not `ConnectorV2` and `SingleAgentEpisode` yet.
9
+
10
+ This example:
11
+ - demonstrates how you can train a single-agent BC Policy (RLModule) from a JSON
12
+ file, which contains SampleBatch (expert or non-expert) data.
13
+ - shows how you can run evaluation in a multi-agent setup (for example vs one
14
+ or more heuristic policies), while training the BC Policy.
15
+
16
+
17
+ How to run this script
18
+ ----------------------
19
+ `python [script file name].py --checkpoint-at-end`
20
+
21
+ For debugging, use the following additional command line options
22
+ `--no-tune --num-env-runners=0`
23
+ which should allow you to set breakpoints anywhere in the RLlib code and
24
+ have the execution stop there for inspection and debugging.
25
+
26
+ For logging to your WandB account, use:
27
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
28
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
29
+
30
+
31
+ Results to expect
32
+ -----------------
33
+ In the console output, you can see that the episode returns of the "main" policy on
34
+ the evaluation track keep increasing as BC manages to more and more clone the behavior
35
+ found in our (expert) JSON file.
36
+
37
+ After 50-100 iterations, you should see the episode reward reach 450.0.
38
+ Note that the opponent (random) policy does not learn as it's a) not a trainable
39
+ RLModule and b) not being trained via the BCConfig. It's only used for evaluation
40
+ purposes here.
41
+
42
+ +---------------------+------------+-----------------+--------+--------+
43
+ | Trial name | status | loc | iter | ts |
44
+ |---------------------+------------+-----------------+--------+--------+
45
+ | BC_None_ee65e_00000 | TERMINATED | 127.0.0.1:35031 | 93 | 203754 |
46
+ +---------------------+------------+-----------------+--------+--------+
47
+ +----------------------+------------------------+
48
+ | eps. return (main) | eps. return (random) |
49
+ |----------------------+------------------------|
50
+ | 452.4 | 28.3 |
51
+ +----------------------+------------------------+
52
+ """
53
+ import os
54
+ from pathlib import Path
55
+
56
+ import gymnasium as gym
57
+
58
+ from ray import tune
59
+ from ray.air.constants import TRAINING_ITERATION
60
+ from ray.rllib.algorithms.bc import BCConfig
61
+ from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
62
+ from ray.rllib.examples._old_api_stack.policy.random_policy import RandomPolicy
63
+ from ray.rllib.policy.policy import PolicySpec
64
+ from ray.rllib.utils.metrics import (
65
+ ENV_RUNNER_RESULTS,
66
+ EVALUATION_RESULTS,
67
+ NUM_ENV_STEPS_TRAINED,
68
+ )
69
+ from ray.rllib.utils.test_utils import (
70
+ add_rllib_example_script_args,
71
+ run_rllib_example_script_experiment,
72
+ )
73
+ from ray.train.constants import TIME_TOTAL_S
74
+ from ray.tune.registry import register_env
75
+
76
+ parser = add_rllib_example_script_args(
77
+ default_reward=450.0,
78
+ default_timesteps=300000,
79
+ )
80
+ parser.set_defaults(num_agents=2)
81
+
82
+
83
+ if __name__ == "__main__":
84
+ args = parser.parse_args()
85
+
86
+ register_env("multi_cart", lambda cfg: MultiAgentCartPole(cfg))
87
+ dummy_env = gym.make("CartPole-v1")
88
+
89
+ rllib_dir = Path(__file__).parent.parent.parent
90
+ print(f"rllib dir={rllib_dir}")
91
+ offline_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
92
+
93
+ base_config = (
94
+ BCConfig()
95
+ # For offline RL, we do not specify an env here (b/c we don't want any env
96
+ # instances created on the EnvRunners). Instead, we'll provide observation-
97
+ # and action-spaces here for the RLModule to know its input- and output types.
98
+ .environment(
99
+ observation_space=dummy_env.observation_space,
100
+ action_space=dummy_env.action_space,
101
+ )
102
+ .offline_data(
103
+ input_=offline_file,
104
+ )
105
+ .multi_agent(
106
+ policies={"main"},
107
+ policy_mapping_fn=lambda *a, **kw: "main",
108
+ )
109
+ .evaluation(
110
+ evaluation_interval=1,
111
+ evaluation_num_env_runners=0,
112
+ evaluation_config=BCConfig.overrides(
113
+ # Evaluate on an actual env -> switch input back to "sampler".
114
+ input_="sampler",
115
+ # Do not explore during evaluation, but act greedily.
116
+ explore=False,
117
+ # Use a multi-agent setup for evaluation.
118
+ env="multi_cart",
119
+ env_config={"num_agents": args.num_agents},
120
+ policies={
121
+ "main": PolicySpec(),
122
+ "random": PolicySpec(policy_class=RandomPolicy),
123
+ },
124
+ # Only control agent 0 with the main (trained) policy.
125
+ policy_mapping_fn=(
126
+ lambda aid, *a, **kw: "main" if aid == 0 else "random"
127
+ ),
128
+ # Note that we do NOT have to specify the `policies_to_train` here,
129
+ # b/c we are inside the evaluation config (no policy is trained during
130
+ # evaluation). The fact that the BCConfig above is "only" setup
131
+ # as single-agent makes it automatically only train the policy found in
132
+ # the BCConfig's `policies` field (which is "main").
133
+ # policies_to_train=["main"],
134
+ ),
135
+ )
136
+ )
137
+
138
+ policy_eval_returns = (
139
+ f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/policy_reward_mean/"
140
+ )
141
+
142
+ stop = {
143
+ # Check for the "main" policy's episode return, not the combined one.
144
+ # The combined one is the sum of the "main" policy + the "random" one.
145
+ policy_eval_returns + "main": args.stop_reward,
146
+ NUM_ENV_STEPS_TRAINED: args.stop_timesteps,
147
+ TRAINING_ITERATION: args.stop_iters,
148
+ }
149
+
150
+ run_rllib_example_script_experiment(
151
+ base_config,
152
+ args,
153
+ stop=stop,
154
+ success_metric={policy_eval_returns + "main": args.stop_reward},
155
+ # We use a special progress reporter here to show the evaluation results (of the
156
+ # "main" policy).
157
+ # In the following dict, the keys are the (possibly nested) keys that can be
158
+ # found in RLlib's (BC's) result dict, produced at every training iteration, and
159
+ # the values are the column names you would like to see in your console reports.
160
+ # Note that for nested result dict keys, you need to use slashes "/" to define
161
+ # the exact path.
162
+ progress_reporter=tune.CLIReporter(
163
+ metric_columns={
164
+ TRAINING_ITERATION: "iter",
165
+ TIME_TOTAL_S: "total time (s)",
166
+ NUM_ENV_STEPS_TRAINED: "ts",
167
+ policy_eval_returns + "main": "eps. return (main)",
168
+ policy_eval_returns + "random": "eps. return (random)",
169
+ }
170
+ ),
171
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_serve/__pycache__/ray_serve_with_rllib.cpython-310.pyc ADDED
Binary file (5.37 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_serve/ray_serve_with_rllib.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example on how to run RLlib in combination with Ray Serve.
2
+
3
+ This example trains an agent with PPO on the CartPole environment, then creates
4
+ an RLModule checkpoint and returns its location. After that, it sends the checkpoint
5
+ to the Serve deployment for serving the trained RLModule (policy).
6
+
7
+ This example:
8
+ - shows how to set up a Ray Serve deployment for serving an already trained
9
+ RLModule (policy network).
10
+ - shows how to request new actions from the Ray Serve deployment while actually
11
+ running through episodes in an environment (on which the RLModule that's served
12
+ was trained).
13
+
14
+
15
+ How to run this script
16
+ ----------------------
17
+ `python [script file name].py --enable-new-api-stack --stop-reward=200.0`
18
+
19
+ Use the `--stop-iters`, `--stop-reward`, and/or `--stop-timesteps` options to
20
+ determine how long to train the policy for. Use the `--serve-episodes` option to
21
+ set the number of episodes to serve (after training) and the `--no-render` option
22
+ to NOT render the environment during the serving phase.
23
+
24
+ For debugging, use the following additional command line options
25
+ `--no-tune --num-env-runners=0`
26
+ which should allow you to set breakpoints anywhere in the RLlib code and
27
+ have the execution stop there for inspection and debugging.
28
+
29
+ For logging to your WandB account, use:
30
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
31
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
32
+
33
+ You can visualize experiment results in ~/ray_results using TensorBoard.
34
+
35
+
36
+ Results to expect
37
+ -----------------
38
+
39
+ You should see something similar to the following on the command line when using the
40
+ options: `--stop-reward=250.0`, `--num-episodes-served=2`, and `--port=12345`:
41
+
42
+ [First, the RLModule is trained through PPO]
43
+
44
+ +-----------------------------+------------+-----------------+--------+
45
+ | Trial name | status | loc | iter |
46
+ | | | | |
47
+ |-----------------------------+------------+-----------------+--------+
48
+ | PPO_CartPole-v1_84778_00000 | TERMINATED | 127.0.0.1:40411 | 1 |
49
+ +-----------------------------+------------+-----------------+--------+
50
+ +------------------+---------------------+------------------------+
51
+ | total time (s) | episode_return_mean | num_env_steps_sample |
52
+ | | | d_lifetime |
53
+ |------------------+---------------------|------------------------|
54
+ | 2.87052 | 253.2 | 12000 |
55
+ +------------------+---------------------+------------------------+
56
+
57
+ [The RLModule is deployed through Ray Serve on port 12345]
58
+
59
+ Started Ray Serve with PID: 40458
60
+
61
+ [A few episodes are played through using the policy service (w/ greedy, non-exploratory
62
+ actions)]
63
+
64
+ Episode R=500.0
65
+ Episode R=500.0
66
+ """
67
+
68
+ import atexit
69
+ import os
70
+
71
+ import requests
72
+ import subprocess
73
+ import time
74
+
75
+ import gymnasium as gym
76
+ from pathlib import Path
77
+
78
+ from ray.rllib.algorithms.ppo import PPOConfig
79
+ from ray.rllib.core import (
80
+ COMPONENT_LEARNER_GROUP,
81
+ COMPONENT_LEARNER,
82
+ COMPONENT_RL_MODULE,
83
+ DEFAULT_MODULE_ID,
84
+ )
85
+ from ray.rllib.utils.metrics import (
86
+ ENV_RUNNER_RESULTS,
87
+ EPISODE_RETURN_MEAN,
88
+ )
89
+ from ray.rllib.utils.test_utils import (
90
+ add_rllib_example_script_args,
91
+ run_rllib_example_script_experiment,
92
+ )
93
+
94
+ parser = add_rllib_example_script_args()
95
+ parser.set_defaults(
96
+ enable_new_api_stack=True,
97
+ checkpoint_freq=1,
98
+ checkpoint_at_and=True,
99
+ )
100
+ parser.add_argument("--num-episodes-served", type=int, default=2)
101
+ parser.add_argument("--no-render", action="store_true")
102
+ parser.add_argument("--port", type=int, default=12345)
103
+
104
+
105
+ def kill_proc(proc):
106
+ try:
107
+ proc.terminate() # Send SIGTERM
108
+ proc.wait(timeout=5) # Wait for process to terminate
109
+ except subprocess.TimeoutExpired:
110
+ proc.kill() # Send SIGKILL
111
+ proc.wait() # Ensure process is dead
112
+
113
+
114
+ if __name__ == "__main__":
115
+ args = parser.parse_args()
116
+
117
+ # Config for the served RLlib RLModule/Algorithm.
118
+ base_config = PPOConfig().environment("CartPole-v1")
119
+
120
+ results = run_rllib_example_script_experiment(base_config, args)
121
+ algo_checkpoint = results.get_best_result(
122
+ f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}"
123
+ ).checkpoint.path
124
+ # We only need the RLModule component from the algorithm checkpoint. It's located
125
+ # under "[algo checkpoint dir]/learner_group/learner/rl_module/[default policy ID]
126
+ rl_module_checkpoint = (
127
+ Path(algo_checkpoint)
128
+ / COMPONENT_LEARNER_GROUP
129
+ / COMPONENT_LEARNER
130
+ / COMPONENT_RL_MODULE
131
+ / DEFAULT_MODULE_ID
132
+ )
133
+
134
+ path_of_this_file = Path(__file__).parent
135
+ os.chdir(path_of_this_file)
136
+ # Start the serve app with the trained checkpoint.
137
+ serve_proc = subprocess.Popen(
138
+ [
139
+ "serve",
140
+ "run",
141
+ "classes.cartpole_deployment:rl_module",
142
+ f"rl_module_checkpoint={rl_module_checkpoint}",
143
+ f"port={args.port}",
144
+ "route_prefix=/rllib-rlmodule",
145
+ ]
146
+ )
147
+ # Register our `kill_proc` function to be called on exit to stop Ray Serve again.
148
+ atexit.register(kill_proc, serve_proc)
149
+ # Wait a while to make sure the app is ready to serve.
150
+ time.sleep(20)
151
+ print(f"Started Ray Serve with PID: {serve_proc.pid}")
152
+
153
+ try:
154
+ # Create the environment that we would like to receive
155
+ # served actions for.
156
+ env = gym.make("CartPole-v1", render_mode="human")
157
+ obs, _ = env.reset()
158
+
159
+ num_episodes = 0
160
+ episode_return = 0.0
161
+
162
+ while num_episodes < args.num_episodes_served:
163
+ # Render env if necessary.
164
+ if not args.no_render:
165
+ env.render()
166
+
167
+ # print(f"-> Requesting action for obs={obs} ...", end="")
168
+ # Send a request to serve.
169
+ resp = requests.get(
170
+ f"http://localhost:{args.port}/rllib-rlmodule",
171
+ json={"observation": obs.tolist()},
172
+ )
173
+ response = resp.json()
174
+ # print(f" received: action={response['action']}")
175
+
176
+ # Apply the action in the env.
177
+ action = response["action"]
178
+ obs, reward, terminated, truncated, _ = env.step(action)
179
+ episode_return += reward
180
+
181
+ # If episode done -> reset to get initial observation of new episode.
182
+ if terminated or truncated:
183
+ print(f"Episode R={episode_return}")
184
+ obs, _ = env.reset()
185
+ num_episodes += 1
186
+ episode_return = 0.0
187
+
188
+ finally:
189
+ # Make sure to kill the process on script termination
190
+ kill_proc(serve_proc)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/migrate_modelv2_to_new_api_stack_by_config.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/__pycache__/migrate_modelv2_to_new_api_stack_by_policy_checkpoint.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/autoregressive_actions_rl_module.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """An example script showing how to define and load an `RLModule` with
2
+ a dependent action space.
3
+
4
+ This examples:
5
+ - Defines an `RLModule` with autoregressive actions.
6
+ - It does so by implementing a prior distribution for the first couple
7
+ of actions and then using these actions in a posterior distribution.
8
+ - Furthermore, it uses in the `RLModule` our simple base `Catalog` class
9
+ to build the distributions.
10
+ - Uses this `RLModule` in a PPO training run on a simple environment
11
+ that rewards synchronized actions.
12
+ - Stops the training after 100k steps or when the mean episode return
13
+ exceeds -0.012 in evaluation, i.e. if the agent has learned to
14
+ synchronize its actions.
15
+
16
+ How to run this script
17
+ ----------------------
18
+ `python [script file name].py --enable-new-api-stack --num-env-runners 2`
19
+
20
+ Control the number of `EnvRunner`s with the `--num-env-runners` flag. This
21
+ will increase the sampling speed.
22
+
23
+ For debugging, use the following additional command line options
24
+ `--no-tune --num-env-runners=0`
25
+ which should allow you to set breakpoints anywhere in the RLlib code and
26
+ have the execution stop there for inspection and debugging.
27
+
28
+ For logging to your WandB account, use:
29
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
30
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
31
+
32
+ Results to expect
33
+ -----------------
34
+ You should expect a reward of around 155-160 after ~36,000 timesteps sampled
35
+ (trained) being achieved by a simple PPO policy (no tuning, just using RLlib's
36
+ default settings). For details take also a closer look into the
37
+ `CorrelatedActionsEnv` environment. Rewards are such that to receive a return
38
+ over 100, the agent must learn to synchronize its actions.
39
+ """
40
+
41
+
42
+ from ray.rllib.algorithms.ppo import PPOConfig
43
+ from ray.rllib.core.models.catalog import Catalog
44
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
45
+ from ray.rllib.examples.envs.classes.correlated_actions_env import (
46
+ AutoRegressiveActionEnv,
47
+ )
48
+ from ray.rllib.examples.rl_modules.classes.autoregressive_actions_rlm import (
49
+ AutoregressiveActionsTorchRLM,
50
+ )
51
+ from ray.rllib.utils.metrics import (
52
+ ENV_RUNNER_RESULTS,
53
+ EPISODE_RETURN_MEAN,
54
+ EVALUATION_RESULTS,
55
+ NUM_ENV_STEPS_SAMPLED_LIFETIME,
56
+ )
57
+ from ray.rllib.utils.test_utils import (
58
+ add_rllib_example_script_args,
59
+ run_rllib_example_script_experiment,
60
+ )
61
+ from ray.tune import register_env
62
+
63
+
64
+ register_env("correlated_actions_env", lambda _: AutoRegressiveActionEnv(_))
65
+
66
+ parser = add_rllib_example_script_args(
67
+ default_iters=200,
68
+ default_timesteps=100000,
69
+ default_reward=150.0,
70
+ )
71
+
72
+ if __name__ == "__main__":
73
+ args = parser.parse_args()
74
+
75
+ if args.algo != "PPO":
76
+ raise ValueError("This example only supports PPO. Please use --algo=PPO.")
77
+
78
+ base_config = (
79
+ PPOConfig()
80
+ .environment("correlated_actions_env")
81
+ .rl_module(
82
+ # We need to explicitly specify here RLModule to use and
83
+ # the catalog needed to build it.
84
+ rl_module_spec=RLModuleSpec(
85
+ module_class=AutoregressiveActionsTorchRLM,
86
+ model_config={
87
+ "head_fcnet_hiddens": [64, 64],
88
+ "head_fcnet_activation": "relu",
89
+ },
90
+ catalog_class=Catalog,
91
+ ),
92
+ )
93
+ .env_runners(
94
+ num_env_runners=0,
95
+ )
96
+ .evaluation(
97
+ evaluation_num_env_runners=1,
98
+ evaluation_interval=1,
99
+ # Run evaluation parallel to training to speed up the example.
100
+ evaluation_parallel_to_training=True,
101
+ )
102
+ )
103
+
104
+ # Let's stop the training after 100k steps or when the mean episode return
105
+ # exceeds -0.012 in evaluation.
106
+ stop = {
107
+ f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}": 100000,
108
+ f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": -0.012,
109
+ }
110
+
111
+ # Run the example (with Tune).
112
+ run_rllib_example_script_experiment(base_config, args, stop=stop)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from ray.rllib.examples.rl_modules.classes.rock_paper_scissors_heuristic_rlm import (
2
+ AlwaysSameHeuristicRLM,
3
+ BeatLastHeuristicRLM,
4
+ )
5
+
6
+
7
+ __all__ = [
8
+ "AlwaysSameHeuristicRLM",
9
+ "BeatLastHeuristicRLM",
10
+ ]
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__pycache__/action_masking_rlm.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/__pycache__/random_rlm.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/mobilenet_rlm.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This example shows how to take full control over what models and action distribution
3
+ are being built inside an RL Module. With this pattern, we can bypass a Catalog and
4
+ explicitly define our own models within a given RL Module.
5
+ """
6
+ # __sphinx_doc_begin__
7
+ import gymnasium as gym
8
+ import numpy as np
9
+
10
+ from ray.rllib.algorithms.ppo.ppo import PPOConfig
11
+ from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule
12
+ from ray.rllib.core.models.configs import MLPHeadConfig
13
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
14
+ from ray.rllib.examples.envs.classes.random_env import RandomEnv
15
+ from ray.rllib.examples._old_api_stack.models.mobilenet_v2_encoder import (
16
+ MobileNetV2EncoderConfig,
17
+ MOBILENET_INPUT_SHAPE,
18
+ )
19
+ from ray.rllib.core.models.configs import ActorCriticEncoderConfig
20
+
21
+
22
+ class MobileNetTorchPPORLModule(PPOTorchRLModule):
23
+ """A PPORLModules with mobilenet v2 as an encoder.
24
+
25
+ The idea behind this model is to demonstrate how we can bypass catalog to
26
+ take full control over what models and action distribution are being built.
27
+ In this example, we do this to modify an existing RLModule with a custom encoder.
28
+ """
29
+
30
+ def setup(self):
31
+ mobilenet_v2_config = MobileNetV2EncoderConfig()
32
+ # Since we want to use PPO, which is an actor-critic algorithm, we need to
33
+ # use an ActorCriticEncoderConfig to wrap the base encoder config.
34
+ actor_critic_encoder_config = ActorCriticEncoderConfig(
35
+ base_encoder_config=mobilenet_v2_config
36
+ )
37
+
38
+ self.encoder = actor_critic_encoder_config.build(framework="torch")
39
+ mobilenet_v2_output_dims = mobilenet_v2_config.output_dims
40
+
41
+ pi_config = MLPHeadConfig(
42
+ input_dims=mobilenet_v2_output_dims,
43
+ output_layer_dim=2,
44
+ )
45
+
46
+ vf_config = MLPHeadConfig(
47
+ input_dims=mobilenet_v2_output_dims, output_layer_dim=1
48
+ )
49
+
50
+ self.pi = pi_config.build(framework="torch")
51
+ self.vf = vf_config.build(framework="torch")
52
+
53
+
54
+ config = (
55
+ PPOConfig()
56
+ .rl_module(rl_module_spec=RLModuleSpec(module_class=MobileNetTorchPPORLModule))
57
+ .environment(
58
+ RandomEnv,
59
+ env_config={
60
+ "action_space": gym.spaces.Discrete(2),
61
+ # Test a simple Image observation space.
62
+ "observation_space": gym.spaces.Box(
63
+ 0.0,
64
+ 1.0,
65
+ shape=MOBILENET_INPUT_SHAPE,
66
+ dtype=np.float32,
67
+ ),
68
+ },
69
+ )
70
+ .env_runners(num_env_runners=0)
71
+ # The following training settings make it so that a training iteration is very
72
+ # quick. This is just for the sake of this example. PPO will not learn properly
73
+ # with these settings!
74
+ .training(train_batch_size_per_learner=32, minibatch_size=16, num_epochs=1)
75
+ )
76
+
77
+ config.build().train()
78
+ # __sphinx_doc_end__
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/random_rlm.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ import numpy as np
3
+ import tree # pip install dm_tree
4
+
5
+ from ray.rllib.core.columns import Columns
6
+ from ray.rllib.core.rl_module import RLModule
7
+ from ray.rllib.policy.sample_batch import SampleBatch
8
+ from ray.rllib.utils.annotations import override
9
+ from ray.rllib.utils.spaces.space_utils import batch as batch_func
10
+
11
+
12
+ class RandomRLModule(RLModule):
13
+ @override(RLModule)
14
+ def _forward(self, batch, **kwargs):
15
+ obs_batch_size = len(tree.flatten(batch[SampleBatch.OBS])[0])
16
+ actions = batch_func(
17
+ [self.action_space.sample() for _ in range(obs_batch_size)]
18
+ )
19
+ return {SampleBatch.ACTIONS: actions}
20
+
21
+ @override(RLModule)
22
+ def _forward_train(self, *args, **kwargs):
23
+ # RandomRLModule should always be configured as non-trainable.
24
+ # To do so, set in your config:
25
+ # `config.multi_agent(policies_to_train=[list of ModuleIDs to be trained,
26
+ # NOT including the ModuleID of this RLModule])`
27
+ raise NotImplementedError("Random RLModule: Should not be trained!")
28
+
29
+ @override(RLModule)
30
+ def output_specs_inference(self):
31
+ return [SampleBatch.ACTIONS]
32
+
33
+ @override(RLModule)
34
+ def output_specs_exploration(self):
35
+ return [SampleBatch.ACTIONS]
36
+
37
+ def compile(self, *args, **kwargs):
38
+ """Dummy method for compatibility with TorchRLModule.
39
+
40
+ This is hit when RolloutWorker tries to compile TorchRLModule."""
41
+ pass
42
+
43
+
44
+ class StatefulRandomRLModule(RandomRLModule):
45
+ """A stateful RLModule that returns STATE_OUT from its forward methods.
46
+
47
+ - Implements the `get_initial_state` method (returning a all-zeros dummy state).
48
+ - Returns a dummy state under the `Columns.STATE_OUT` from its forward methods.
49
+ """
50
+
51
+ def __init__(self, *args, **kwargs):
52
+ super().__init__(*args, **kwargs)
53
+ self._internal_state_space = gym.spaces.Box(-1.0, 1.0, (1,))
54
+
55
+ @override(RLModule)
56
+ def get_initial_state(self):
57
+ return {
58
+ "state": np.zeros_like([self._internal_state_space.sample()]),
59
+ }
60
+
61
+ def _random_forward(self, batch, **kwargs):
62
+ batch = super()._random_forward(batch, **kwargs)
63
+ batch[Columns.STATE_OUT] = {
64
+ "state": batch_func(
65
+ [
66
+ self._internal_state_space.sample()
67
+ for _ in range(len(batch[Columns.ACTIONS]))
68
+ ]
69
+ ),
70
+ }
71
+ return batch
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/rock_paper_scissors_heuristic_rlm.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ import numpy as np
4
+
5
+ from ray.rllib.core.columns import Columns
6
+ from ray.rllib.core.rl_module.rl_module import RLModule
7
+ from ray.rllib.utils.annotations import override
8
+
9
+
10
+ class AlwaysSameHeuristicRLM(RLModule):
11
+ """In rock-paper-scissors, always chooses the same action within an episode.
12
+
13
+ The first move is random, all the following moves are the same as the first one.
14
+ """
15
+
16
+ def __init__(self, *args, **kwargs):
17
+ super().__init__(*args, **kwargs)
18
+ self._actions_per_vector_idx = defaultdict(int)
19
+
20
+ @override(RLModule)
21
+ def _forward_inference(self, batch, **kwargs):
22
+ ret = []
23
+ # Note that the obs is the previous move of the opponens (0-2). If it's 3, it
24
+ # means that there was no previous move and thus, the episode just started.
25
+ for i, obs in enumerate(batch[Columns.OBS]):
26
+ if obs == 3:
27
+ self._actions_per_vector_idx[i] = np.random.choice([0, 1, 2])
28
+ ret.append(self._actions_per_vector_idx[i])
29
+ return {Columns.ACTIONS: np.array(ret)}
30
+
31
+ @override(RLModule)
32
+ def _forward_exploration(self, batch, **kwargs):
33
+ return self._forward_inference(batch, **kwargs)
34
+
35
+ @override(RLModule)
36
+ def _forward_train(self, batch, **kwargs):
37
+ raise NotImplementedError(
38
+ "AlwaysSameHeuristicRLM is not trainable! Make sure you do NOT include it "
39
+ "in your `config.multi_agent(policies_to_train={...})` set."
40
+ )
41
+
42
+ @override(RLModule)
43
+ def output_specs_inference(self):
44
+ return [Columns.ACTIONS]
45
+
46
+ @override(RLModule)
47
+ def output_specs_exploration(self):
48
+ return [Columns.ACTIONS]
49
+
50
+
51
+ class BeatLastHeuristicRLM(RLModule):
52
+ """In rock-paper-scissors, always acts such that it beats prev. move of opponent.
53
+
54
+ The first move is random.
55
+
56
+ For example, after opponent played `rock` (and this policy made a random
57
+ move), the next move would be `paper`(to beat `rock`).
58
+ """
59
+
60
+ @override(RLModule)
61
+ def _forward_inference(self, batch, **kwargs):
62
+ """Returns the exact action that would beat the previous action of the opponent.
63
+
64
+ The opponent's previous action is the current observation for this agent.
65
+
66
+ Both action- and observation spaces are discrete. There are 3 actions available.
67
+ (0-2) and 4 observations (0-2 plus 3, where 3 is the observation after the env
68
+ reset, when no action has been taken yet). Thereby:
69
+ 0=Rock
70
+ 1=Paper
71
+ 2=Scissors
72
+ 3=[after reset] (observation space only)
73
+ """
74
+ return {
75
+ Columns.ACTIONS: np.array(
76
+ [self._pick_single_action(obs) for obs in batch[Columns.OBS]]
77
+ ),
78
+ }
79
+
80
+ @override(RLModule)
81
+ def _forward_exploration(self, batch, **kwargs):
82
+ return self._forward_inference(batch, **kwargs)
83
+
84
+ @override(RLModule)
85
+ def _forward_train(self, batch, **kwargs):
86
+ raise NotImplementedError(
87
+ "BeatLastHeuristicRLM is not trainable! Make sure you do NOT include it in "
88
+ "your `config.multi_agent(policies_to_train={...})` set."
89
+ )
90
+
91
+ @override(RLModule)
92
+ def output_specs_inference(self):
93
+ return [Columns.ACTIONS]
94
+
95
+ @override(RLModule)
96
+ def output_specs_exploration(self):
97
+ return [Columns.ACTIONS]
98
+
99
+ @staticmethod
100
+ def _pick_single_action(prev_opponent_obs):
101
+ if prev_opponent_obs == 0:
102
+ return 1
103
+ elif prev_opponent_obs == 1:
104
+ return 2
105
+ elif prev_opponent_obs == 2:
106
+ return 0
107
+ else:
108
+ return np.random.choice([0, 1, 2])
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/classes/tiny_atari_cnn_rlm.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional
2
+
3
+ from ray.rllib.core.columns import Columns
4
+ from ray.rllib.core.rl_module.apis.value_function_api import ValueFunctionAPI
5
+ from ray.rllib.core.rl_module.torch import TorchRLModule
6
+ from ray.rllib.models.torch.misc import (
7
+ normc_initializer,
8
+ same_padding,
9
+ valid_padding,
10
+ )
11
+ from ray.rllib.utils.annotations import override
12
+ from ray.rllib.utils.framework import try_import_torch
13
+ from ray.rllib.utils.typing import TensorType
14
+
15
+ torch, nn = try_import_torch()
16
+
17
+
18
+ class TinyAtariCNN(TorchRLModule, ValueFunctionAPI):
19
+ """A tiny CNN stack for fast-learning of Atari envs.
20
+
21
+ The architecture here is the exact same as the one used by the old API stack as
22
+ CNN default ModelV2.
23
+
24
+ We stack 3 CNN layers based on the config, then a 4th one with linear activation
25
+ and n 1x1 filters, where n is the number of actions in the (discrete) action space.
26
+ Simple reshaping (no flattening or extra linear layers necessary) lead to the
27
+ action logits, which can directly be used inside a distribution or loss.
28
+
29
+ import numpy as np
30
+ import gymnasium as gym
31
+ from ray.rllib.core.rl_module.rl_module import RLModuleConfig
32
+
33
+ rl_module_config = RLModuleConfig(
34
+ observation_space=gym.spaces.Box(-1.0, 1.0, (42, 42, 4), np.float32),
35
+ action_space=gym.spaces.Discrete(4),
36
+ )
37
+ my_net = TinyAtariCNN(rl_module_config)
38
+
39
+ B = 10
40
+ w = 42
41
+ h = 42
42
+ c = 4
43
+ data = torch.from_numpy(
44
+ np.random.random_sample(size=(B, w, h, c)).astype(np.float32)
45
+ )
46
+ print(my_net.forward_inference({"obs": data}))
47
+ print(my_net.forward_exploration({"obs": data}))
48
+ print(my_net.forward_train({"obs": data}))
49
+
50
+ num_all_params = sum(int(np.prod(p.size())) for p in my_net.parameters())
51
+ print(f"num params = {num_all_params}")
52
+ """
53
+
54
+ @override(TorchRLModule)
55
+ def setup(self):
56
+ """Use this method to create all the model components that you require.
57
+
58
+ Feel free to access the following useful properties in this class:
59
+ - `self.model_config`: The config dict for this RLModule class,
60
+ which should contain flxeible settings, for example: {"hiddens": [256, 256]}.
61
+ - `self.observation|action_space`: The observation and action space that
62
+ this RLModule is subject to. Note that the observation space might not be the
63
+ exact space from your env, but that it might have already gone through
64
+ preprocessing through a connector pipeline (for example, flattening,
65
+ frame-stacking, mean/std-filtering, etc..).
66
+ """
67
+ # Get the CNN stack config from our RLModuleConfig's (self.config)
68
+ # `model_config` property:
69
+ conv_filters = self.model_config.get("conv_filters")
70
+ # Default CNN stack with 3 layers:
71
+ if conv_filters is None:
72
+ conv_filters = [
73
+ [16, 4, 2, "same"], # num filters, kernel wxh, stride wxh, padding type
74
+ [32, 4, 2, "same"],
75
+ [256, 11, 1, "valid"],
76
+ ]
77
+
78
+ # Build the CNN layers.
79
+ layers = []
80
+
81
+ # Add user-specified hidden convolutional layers first
82
+ width, height, in_depth = self.observation_space.shape
83
+ in_size = [width, height]
84
+ for filter_specs in conv_filters:
85
+ if len(filter_specs) == 4:
86
+ out_depth, kernel_size, strides, padding = filter_specs
87
+ else:
88
+ out_depth, kernel_size, strides = filter_specs
89
+ padding = "same"
90
+
91
+ # Pad like in tensorflow's SAME mode.
92
+ if padding == "same":
93
+ padding_size, out_size = same_padding(in_size, kernel_size, strides)
94
+ layers.append(nn.ZeroPad2d(padding_size))
95
+ # No actual padding is performed for "valid" mode, but we will still
96
+ # compute the output size (input for the next layer).
97
+ else:
98
+ out_size = valid_padding(in_size, kernel_size, strides)
99
+
100
+ layer = nn.Conv2d(in_depth, out_depth, kernel_size, strides, bias=True)
101
+ # Initialize CNN layer kernel and bias.
102
+ nn.init.xavier_uniform_(layer.weight)
103
+ nn.init.zeros_(layer.bias)
104
+ layers.append(layer)
105
+ # Activation.
106
+ layers.append(nn.ReLU())
107
+
108
+ in_size = out_size
109
+ in_depth = out_depth
110
+
111
+ self._base_cnn_stack = nn.Sequential(*layers)
112
+
113
+ # Add the final CNN 1x1 layer with num_filters == num_actions to be reshaped to
114
+ # yield the logits (no flattening, no additional linear layers required).
115
+ _final_conv = nn.Conv2d(in_depth, self.action_space.n, 1, 1, bias=True)
116
+ nn.init.xavier_uniform_(_final_conv.weight)
117
+ nn.init.zeros_(_final_conv.bias)
118
+ self._logits = nn.Sequential(
119
+ nn.ZeroPad2d(same_padding(in_size, 1, 1)[0]), _final_conv
120
+ )
121
+
122
+ self._values = nn.Linear(in_depth, 1)
123
+ # Mimick old API stack behavior of initializing the value function with `normc`
124
+ # std=0.01.
125
+ normc_initializer(0.01)(self._values.weight)
126
+
127
+ @override(TorchRLModule)
128
+ def _forward(self, batch, **kwargs):
129
+ # Compute the basic 1D feature tensor (inputs to policy- and value-heads).
130
+ _, logits = self._compute_embeddings_and_logits(batch)
131
+ # Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
132
+ return {
133
+ Columns.ACTION_DIST_INPUTS: logits,
134
+ }
135
+
136
+ @override(TorchRLModule)
137
+ def _forward_train(self, batch, **kwargs):
138
+ # Compute the basic 1D feature tensor (inputs to policy- and value-heads).
139
+ embeddings, logits = self._compute_embeddings_and_logits(batch)
140
+ # Return features and logits as ACTION_DIST_INPUTS (categorical distribution).
141
+ return {
142
+ Columns.ACTION_DIST_INPUTS: logits,
143
+ Columns.EMBEDDINGS: embeddings,
144
+ }
145
+
146
+ # We implement this RLModule as a ValueFunctionAPI RLModule, so it can be used
147
+ # by value-based methods like PPO or IMPALA.
148
+ @override(ValueFunctionAPI)
149
+ def compute_values(
150
+ self,
151
+ batch: Dict[str, Any],
152
+ embeddings: Optional[Any] = None,
153
+ ) -> TensorType:
154
+ # Features not provided -> We need to compute them first.
155
+ if embeddings is None:
156
+ obs = batch[Columns.OBS]
157
+ embeddings = self._base_cnn_stack(obs.permute(0, 3, 1, 2))
158
+ embeddings = torch.squeeze(embeddings, dim=[-1, -2])
159
+ return self._values(embeddings).squeeze(-1)
160
+
161
+ def _compute_embeddings_and_logits(self, batch):
162
+ obs = batch[Columns.OBS].permute(0, 3, 1, 2)
163
+ embeddings = self._base_cnn_stack(obs)
164
+ logits = self._logits(embeddings)
165
+ return (
166
+ torch.squeeze(embeddings, dim=[-1, -2]),
167
+ torch.squeeze(logits, dim=[-1, -2]),
168
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/custom_cnn_rl_module.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of implementing and configuring a custom (torch) CNN containing RLModule.
2
+
3
+ This example:
4
+ - demonstrates how you can subclass the TorchRLModule base class and set up your
5
+ own CNN-stack architecture by overriding the `setup()` method.
6
+ - shows how to override the 3 forward methods: `_forward_inference()`,
7
+ `_forward_exploration()`, and `forward_train()` to implement your own custom forward
8
+ logic(s). You will also learn, when each of these 3 methods is called by RLlib or
9
+ the users of your RLModule.
10
+ - shows how you then configure an RLlib Algorithm such that it uses your custom
11
+ RLModule (instead of a default RLModule).
12
+
13
+ We implement a tiny CNN stack here, the exact same one that is used by the old API
14
+ stack as default CNN net. It comprises 4 convolutional layers, the last of which
15
+ ends in a 1x1 filter size and the number of filters exactly matches the number of
16
+ discrete actions (logits). This way, the (non-activated) output of the last layer only
17
+ needs to be reshaped in order to receive the policy's logit outputs. No flattening
18
+ or additional dense layer required.
19
+
20
+ The network is then used in a fast ALE/Pong-v5 experiment.
21
+
22
+
23
+ How to run this script
24
+ ----------------------
25
+ `python [script file name].py --enable-new-api-stack`
26
+
27
+ For debugging, use the following additional command line options
28
+ `--no-tune --num-env-runners=0`
29
+ which should allow you to set breakpoints anywhere in the RLlib code and
30
+ have the execution stop there for inspection and debugging.
31
+
32
+ For logging to your WandB account, use:
33
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
34
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
35
+
36
+
37
+ Results to expect
38
+ -----------------
39
+ You should see the following output (during the experiment) in your console:
40
+
41
+ Number of trials: 1/1 (1 RUNNING)
42
+ +---------------------+----------+----------------+--------+------------------+
43
+ | Trial name | status | loc | iter | total time (s) |
44
+ | | | | | |
45
+ |---------------------+----------+----------------+--------+------------------+
46
+ | PPO_env_82b44_00000 | RUNNING | 127.0.0.1:9718 | 1 | 98.3585 |
47
+ +---------------------+----------+----------------+--------+------------------+
48
+ +------------------------+------------------------+------------------------+
49
+ | num_env_steps_sample | num_env_steps_traine | num_episodes_lifetim |
50
+ | d_lifetime | d_lifetime | e |
51
+ |------------------------+------------------------+------------------------|
52
+ | 4000 | 4000 | 4 |
53
+ +------------------------+------------------------+------------------------+
54
+ """
55
+ import gymnasium as gym
56
+
57
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
58
+ from ray.rllib.env.wrappers.atari_wrappers import wrap_atari_for_new_api_stack
59
+ from ray.rllib.examples.rl_modules.classes.tiny_atari_cnn_rlm import TinyAtariCNN
60
+ from ray.rllib.utils.test_utils import (
61
+ add_rllib_example_script_args,
62
+ run_rllib_example_script_experiment,
63
+ )
64
+ from ray.tune.registry import get_trainable_cls, register_env
65
+
66
+ parser = add_rllib_example_script_args(default_iters=100, default_timesteps=600000)
67
+ parser.set_defaults(
68
+ enable_new_api_stack=True,
69
+ env="ale_py:ALE/Pong-v5",
70
+ )
71
+
72
+
73
+ if __name__ == "__main__":
74
+ args = parser.parse_args()
75
+
76
+ assert (
77
+ args.enable_new_api_stack
78
+ ), "Must set --enable-new-api-stack when running this script!"
79
+
80
+ register_env(
81
+ "env",
82
+ lambda cfg: wrap_atari_for_new_api_stack(
83
+ gym.make(args.env, **cfg),
84
+ dim=42, # <- need images to be "tiny" for our custom model
85
+ framestack=4,
86
+ ),
87
+ )
88
+
89
+ base_config = (
90
+ get_trainable_cls(args.algo)
91
+ .get_default_config()
92
+ .environment(
93
+ env="env",
94
+ env_config=dict(
95
+ frameskip=1,
96
+ full_action_space=False,
97
+ repeat_action_probability=0.0,
98
+ ),
99
+ )
100
+ .rl_module(
101
+ # Plug-in our custom RLModule class.
102
+ rl_module_spec=RLModuleSpec(
103
+ module_class=TinyAtariCNN,
104
+ # Feel free to specify your own `model_config` settings below.
105
+ # The `model_config` defined here will be available inside your
106
+ # custom RLModule class through the `self.model_config`
107
+ # property.
108
+ model_config={
109
+ "conv_filters": [
110
+ # num filters, kernel wxh, stride wxh, padding type
111
+ [16, 4, 2, "same"],
112
+ [32, 4, 2, "same"],
113
+ [256, 11, 1, "valid"],
114
+ ],
115
+ },
116
+ ),
117
+ )
118
+ )
119
+
120
+ run_rllib_example_script_experiment(base_config, args)
deepseek/lib/python3.10/site-packages/ray/rllib/examples/rl_modules/pretraining_single_agent_training_multi_agent.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of running a single-agent pre-training followed with a multi-agent training.
2
+
3
+ This examples `num_agents` agents each of them with its own `RLModule` that defines its
4
+ policy. The first agent is pre-trained using a single-agent PPO algorithm. All agents
5
+ are trained together in the main training run using a multi-agent PPO algorithm where
6
+ the pre-trained module is used for the first agent.
7
+
8
+ The environment is MultiAgentCartPole, in which there are n agents both policies.
9
+
10
+ How to run this script
11
+ ----------------------
12
+ `python [script file name].py --enable-new-api-stack --num-agents=2`
13
+
14
+ For debugging, use the following additional command line options
15
+ `--no-tune --num-env-runners=0`
16
+ which should allow you to set breakpoints anywhere in the RLlib code and
17
+ have the execution stop there for inspection and debugging.
18
+
19
+ For logging to your WandB account, use:
20
+ `--wandb-key=[your WandB API key] --wandb-project=[some project name]
21
+ --wandb-run-name=[optional: WandB run name (within the defined project)]`
22
+
23
+
24
+
25
+ """
26
+
27
+ import gymnasium as gym
28
+ from ray.rllib.algorithms.ppo import PPOConfig
29
+ from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog
30
+ from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule
31
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
32
+ from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
33
+ from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
34
+ from ray.rllib.utils.test_utils import (
35
+ add_rllib_example_script_args,
36
+ run_rllib_example_script_experiment,
37
+ )
38
+ from ray.tune import register_env
39
+
40
+ # Read in common example script command line arguments.
41
+ parser = add_rllib_example_script_args(
42
+ # Use less training steps for the main training run.
43
+ default_timesteps=50000,
44
+ default_reward=200.0,
45
+ default_iters=20,
46
+ )
47
+ # Instead use mroe for the pre-training run.
48
+ parser.add_argument(
49
+ "--stop-iters-pretraining",
50
+ type=int,
51
+ default=200,
52
+ help="The number of iterations to pre-train.",
53
+ )
54
+ parser.add_argument(
55
+ "--stop-timesteps-pretraining",
56
+ type=int,
57
+ default=5000000,
58
+ help="The number of (environment sampling) timesteps to pre-train.",
59
+ )
60
+
61
+
62
+ if __name__ == "__main__":
63
+
64
+ # Parse the command line arguments.
65
+ args = parser.parse_args()
66
+
67
+ # Ensure that the user has set the number of agents.
68
+ if args.num_agents == 0:
69
+ raise ValueError(
70
+ "This pre-training example script requires at least 1 agent. "
71
+ "Try setting the command line argument `--num-agents` to the "
72
+ "number of agents you want to use."
73
+ )
74
+
75
+ # Store the user's stopping criteria for the later training run.
76
+ stop_iters = args.stop_iters
77
+ stop_timesteps = args.stop_timesteps
78
+ checkpoint_at_end = args.checkpoint_at_end
79
+ num_agents = args.num_agents
80
+ # Override these criteria for the pre-training run.
81
+ setattr(args, "stop_iters", args.stop_iters_pretraining)
82
+ setattr(args, "stop_timesteps", args.stop_timesteps_pretraining)
83
+ setattr(args, "checkpoint_at_end", True)
84
+ setattr(args, "num_agents", 0)
85
+
86
+ # Define out pre-training single-agent algorithm. We will use the same module
87
+ # configuration for the pre-training and the training.
88
+ config = (
89
+ PPOConfig()
90
+ .environment("CartPole-v1")
91
+ .rl_module(
92
+ # Use a different number of hidden units for the pre-trained module.
93
+ model_config={"fcnet_hiddens": [64]},
94
+ )
95
+ )
96
+
97
+ # Run the pre-training.
98
+ results = run_rllib_example_script_experiment(config, args)
99
+ # Get the checkpoint path.
100
+ module_chkpt_path = results.get_best_result().checkpoint.path
101
+
102
+ # Create a new MultiRLModule using the pre-trained module for policy 0.
103
+ env = gym.make("CartPole-v1")
104
+ module_specs = {}
105
+ module_class = PPOTorchRLModule
106
+ for i in range(args.num_agents):
107
+ module_specs[f"policy_{i}"] = RLModuleSpec(
108
+ module_class=PPOTorchRLModule,
109
+ observation_space=env.observation_space,
110
+ action_space=env.action_space,
111
+ model_config={"fcnet_hiddens": [32]},
112
+ catalog_class=PPOCatalog,
113
+ )
114
+
115
+ # Swap in the pre-trained module for policy 0.
116
+ module_specs["policy_0"] = RLModuleSpec(
117
+ module_class=PPOTorchRLModule,
118
+ observation_space=env.observation_space,
119
+ action_space=env.action_space,
120
+ model_config={"fcnet_hiddens": [64]},
121
+ catalog_class=PPOCatalog,
122
+ # Note, we load here the module directly from the checkpoint.
123
+ load_state_path=module_chkpt_path,
124
+ )
125
+ multi_rl_module_spec = MultiRLModuleSpec(rl_module_specs=module_specs)
126
+
127
+ # Register our environment with tune if we use multiple agents.
128
+ register_env(
129
+ "multi-agent-carpole-env",
130
+ lambda _: MultiAgentCartPole(config={"num_agents": args.num_agents}),
131
+ )
132
+
133
+ # Configure the main (multi-agent) training run.
134
+ config = (
135
+ PPOConfig()
136
+ .environment(
137
+ "multi-agent-carpole-env" if args.num_agents > 0 else "CartPole-v1"
138
+ )
139
+ .rl_module(rl_module_spec=multi_rl_module_spec)
140
+ )
141
+
142
+ # Restore the user's stopping criteria for the training run.
143
+ setattr(args, "stop_iters", stop_iters)
144
+ setattr(args, "stop_timesteps", stop_timesteps)
145
+ setattr(args, "checkpoint_at_end", checkpoint_at_end)
146
+ setattr(args, "num_agents", num_agents)
147
+
148
+ # Run the main training run.
149
+ run_rllib_example_script_experiment(config, args)