ZTWHHH commited on
Commit
1a285aa
·
verified ·
1 Parent(s): f73de89

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__init__.py +0 -0
  2. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/__init__.cpython-310.pyc +0 -0
  3. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/lambdas.cpython-310.pyc +0 -0
  4. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/normalize.cpython-310.pyc +0 -0
  5. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/pipeline.cpython-310.pyc +0 -0
  6. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/clip.py +41 -0
  7. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/lambdas.py +76 -0
  8. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/pipeline.py +61 -0
  9. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__init__.py +0 -0
  10. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/__init__.cpython-310.pyc +0 -0
  11. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/obs_preproc.cpython-310.pyc +0 -0
  12. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/pipeline.cpython-310.pyc +0 -0
  13. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/clip_reward.py +56 -0
  14. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/lambdas.py +86 -0
  15. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/pipeline.py +72 -0
  16. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/state_buffer.py +120 -0
  17. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/connector_pipeline_v2.py +381 -0
  18. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__init__.py +36 -0
  19. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__pycache__/mean_std_filter.cpython-310.pyc +0 -0
  20. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__pycache__/write_observations_to_episodes.cpython-310.pyc +0 -0
  21. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/flatten_observations.py +211 -0
  22. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/frame_stacking.py +6 -0
  23. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/observation_preprocessor.py +80 -0
  24. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/prev_actions_prev_rewards.py +168 -0
  25. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/write_observations_to_episodes.py +131 -0
  26. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/add_one_ts_to_episodes_and_truncate.cpython-310.pyc +0 -0
  27. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/frame_stacking.cpython-310.pyc +0 -0
  28. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/general_advantage_estimation.cpython-310.pyc +0 -0
  29. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/learner_connector_pipeline.cpython-310.pyc +0 -0
  30. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_columns_from_episodes_to_train_batch.py +165 -0
  31. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_next_observations_from_episodes_to_train_batch.py +103 -0
  32. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_one_ts_to_episodes_and_truncate.py +168 -0
  33. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/frame_stacking.py +6 -0
  34. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/learner_connector_pipeline.py +7 -0
  35. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/module_to_env/remove_single_ts_time_rank_from_batch.py +70 -0
  36. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  37. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/actors.cpython-310.pyc +0 -0
  38. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/deprecation.cpython-310.pyc +0 -0
  39. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/error.cpython-310.pyc +0 -0
  40. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/from_config.cpython-310.pyc +0 -0
  41. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/tf_run_builder.cpython-310.pyc +0 -0
  42. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/torch_utils.cpython-310.pyc +0 -0
  43. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/exponential_schedule.py +50 -0
  44. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/polynomial_schedule.py +67 -0
  45. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/scheduler.py +167 -0
  46. janus/lib/python3.10/_compat_pickle.py +251 -0
  47. janus/lib/python3.10/contextvars.py +4 -0
  48. janus/lib/python3.10/copyreg.py +219 -0
  49. janus/lib/python3.10/fnmatch.py +199 -0
  50. janus/lib/python3.10/lzma.py +356 -0
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/lambdas.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/normalize.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/clip.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from ray.rllib.connectors.connector import (
4
+ ActionConnector,
5
+ ConnectorContext,
6
+ )
7
+ from ray.rllib.connectors.registry import register_connector
8
+ from ray.rllib.utils.spaces.space_utils import clip_action, get_base_struct_from_space
9
+ from ray.rllib.utils.typing import ActionConnectorDataType
10
+ from ray.rllib.utils.annotations import OldAPIStack
11
+
12
+
13
+ @OldAPIStack
14
+ class ClipActionsConnector(ActionConnector):
15
+ def __init__(self, ctx: ConnectorContext):
16
+ super().__init__(ctx)
17
+
18
+ self._action_space_struct = get_base_struct_from_space(ctx.action_space)
19
+
20
+ def transform(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
21
+ assert isinstance(
22
+ ac_data.output, tuple
23
+ ), "Action connector requires PolicyOutputType data."
24
+
25
+ actions, states, fetches = ac_data.output
26
+ return ActionConnectorDataType(
27
+ ac_data.env_id,
28
+ ac_data.agent_id,
29
+ ac_data.input_dict,
30
+ (clip_action(actions, self._action_space_struct), states, fetches),
31
+ )
32
+
33
+ def to_state(self):
34
+ return ClipActionsConnector.__name__, None
35
+
36
+ @staticmethod
37
+ def from_state(ctx: ConnectorContext, params: Any):
38
+ return ClipActionsConnector(ctx)
39
+
40
+
41
+ register_connector(ClipActionsConnector.__name__, ClipActionsConnector)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/lambdas.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, Type
2
+
3
+ from ray.rllib.connectors.connector import (
4
+ ActionConnector,
5
+ ConnectorContext,
6
+ )
7
+ from ray.rllib.connectors.registry import register_connector
8
+ from ray.rllib.utils.numpy import convert_to_numpy
9
+ from ray.rllib.utils.typing import (
10
+ ActionConnectorDataType,
11
+ PolicyOutputType,
12
+ StateBatches,
13
+ TensorStructType,
14
+ )
15
+ from ray.rllib.utils.annotations import OldAPIStack
16
+
17
+
18
+ @OldAPIStack
19
+ def register_lambda_action_connector(
20
+ name: str, fn: Callable[[TensorStructType, StateBatches, Dict], PolicyOutputType]
21
+ ) -> Type[ActionConnector]:
22
+ """A util to register any function transforming PolicyOutputType as an ActionConnector.
23
+
24
+ The only requirement is that fn should take actions, states, and fetches as input,
25
+ and return transformed actions, states, and fetches.
26
+
27
+ Args:
28
+ name: Name of the resulting actor connector.
29
+ fn: The function that transforms PolicyOutputType.
30
+
31
+ Returns:
32
+ A new ActionConnector class that transforms PolicyOutputType using fn.
33
+ """
34
+
35
+ class LambdaActionConnector(ActionConnector):
36
+ def transform(
37
+ self, ac_data: ActionConnectorDataType
38
+ ) -> ActionConnectorDataType:
39
+ assert isinstance(
40
+ ac_data.output, tuple
41
+ ), "Action connector requires PolicyOutputType data."
42
+
43
+ actions, states, fetches = ac_data.output
44
+ return ActionConnectorDataType(
45
+ ac_data.env_id,
46
+ ac_data.agent_id,
47
+ ac_data.input_dict,
48
+ fn(actions, states, fetches),
49
+ )
50
+
51
+ def to_state(self):
52
+ return name, None
53
+
54
+ @staticmethod
55
+ def from_state(ctx: ConnectorContext, params: Any):
56
+ return LambdaActionConnector(ctx)
57
+
58
+ LambdaActionConnector.__name__ = name
59
+ LambdaActionConnector.__qualname__ = name
60
+
61
+ register_connector(name, LambdaActionConnector)
62
+
63
+ return LambdaActionConnector
64
+
65
+
66
+ # Convert actions and states into numpy arrays if necessary.
67
+ ConvertToNumpyConnector = OldAPIStack(
68
+ register_lambda_action_connector(
69
+ "ConvertToNumpyConnector",
70
+ lambda actions, states, fetches: (
71
+ convert_to_numpy(actions),
72
+ convert_to_numpy(states),
73
+ fetches,
74
+ ),
75
+ ),
76
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/action/pipeline.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Any, List
3
+ from collections import defaultdict
4
+
5
+ from ray.rllib.connectors.connector import (
6
+ ActionConnector,
7
+ Connector,
8
+ ConnectorContext,
9
+ ConnectorPipeline,
10
+ )
11
+ from ray.rllib.connectors.registry import get_connector, register_connector
12
+ from ray.rllib.utils.annotations import OldAPIStack
13
+ from ray.rllib.utils.typing import ActionConnectorDataType
14
+ from ray.util.timer import _Timer
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ @OldAPIStack
21
+ class ActionConnectorPipeline(ConnectorPipeline, ActionConnector):
22
+ def __init__(self, ctx: ConnectorContext, connectors: List[Connector]):
23
+ super().__init__(ctx, connectors)
24
+ self.timers = defaultdict(_Timer)
25
+
26
+ def __call__(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
27
+ for c in self.connectors:
28
+ timer = self.timers[str(c)]
29
+ with timer:
30
+ ac_data = c(ac_data)
31
+ return ac_data
32
+
33
+ def to_state(self):
34
+ children = []
35
+ for c in self.connectors:
36
+ state = c.to_state()
37
+ assert isinstance(state, tuple) and len(state) == 2, (
38
+ "Serialized connector state must be in the format of "
39
+ f"Tuple[name: str, params: Any]. Instead we got {state}"
40
+ f"for connector {c.__name__}."
41
+ )
42
+ children.append(state)
43
+ return ActionConnectorPipeline.__name__, children
44
+
45
+ @staticmethod
46
+ def from_state(ctx: ConnectorContext, params: Any):
47
+ assert (
48
+ type(params) == list
49
+ ), "ActionConnectorPipeline takes a list of connector params."
50
+ connectors = []
51
+ for state in params:
52
+ try:
53
+ name, subparams = state
54
+ connectors.append(get_connector(name, ctx, subparams))
55
+ except Exception as e:
56
+ logger.error(f"Failed to de-serialize connector state: {state}")
57
+ raise e
58
+ return ActionConnectorPipeline(ctx, connectors)
59
+
60
+
61
+ register_connector(ActionConnectorPipeline.__name__, ActionConnectorPipeline)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/obs_preproc.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (2.87 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/clip_reward.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import numpy as np
4
+
5
+ from ray.rllib.connectors.connector import (
6
+ AgentConnector,
7
+ ConnectorContext,
8
+ )
9
+ from ray.rllib.connectors.registry import register_connector
10
+ from ray.rllib.policy.sample_batch import SampleBatch
11
+ from ray.rllib.utils.typing import AgentConnectorDataType
12
+ from ray.rllib.utils.annotations import OldAPIStack
13
+
14
+
15
+ @OldAPIStack
16
+ class ClipRewardAgentConnector(AgentConnector):
17
+ def __init__(self, ctx: ConnectorContext, sign=False, limit=None):
18
+ super().__init__(ctx)
19
+ assert (
20
+ not sign or not limit
21
+ ), "should not enable both sign and limit reward clipping."
22
+ self.sign = sign
23
+ self.limit = limit
24
+
25
+ def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
26
+ d = ac_data.data
27
+ assert (
28
+ type(d) == dict
29
+ ), "Single agent data must be of type Dict[str, TensorStructType]"
30
+
31
+ if SampleBatch.REWARDS not in d:
32
+ # Nothing to clip. May happen for initial obs.
33
+ return ac_data
34
+
35
+ if self.sign:
36
+ d[SampleBatch.REWARDS] = np.sign(d[SampleBatch.REWARDS])
37
+ elif self.limit:
38
+ d[SampleBatch.REWARDS] = np.clip(
39
+ d[SampleBatch.REWARDS],
40
+ a_min=-self.limit,
41
+ a_max=self.limit,
42
+ )
43
+ return ac_data
44
+
45
+ def to_state(self):
46
+ return ClipRewardAgentConnector.__name__, {
47
+ "sign": self.sign,
48
+ "limit": self.limit,
49
+ }
50
+
51
+ @staticmethod
52
+ def from_state(ctx: ConnectorContext, params: Any):
53
+ return ClipRewardAgentConnector(ctx, **params)
54
+
55
+
56
+ register_connector(ClipRewardAgentConnector.__name__, ClipRewardAgentConnector)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/lambdas.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Type
2
+
3
+ import numpy as np
4
+ import tree # dm_tree
5
+
6
+ from ray.rllib.connectors.connector import (
7
+ AgentConnector,
8
+ ConnectorContext,
9
+ )
10
+ from ray.rllib.connectors.registry import register_connector
11
+ from ray.rllib.policy.sample_batch import SampleBatch
12
+ from ray.rllib.utils.typing import (
13
+ AgentConnectorDataType,
14
+ AgentConnectorsOutput,
15
+ )
16
+ from ray.rllib.utils.annotations import OldAPIStack
17
+
18
+
19
+ @OldAPIStack
20
+ def register_lambda_agent_connector(
21
+ name: str, fn: Callable[[Any], Any]
22
+ ) -> Type[AgentConnector]:
23
+ """A util to register any simple transforming function as an AgentConnector
24
+
25
+ The only requirement is that fn should take a single data object and return
26
+ a single data object.
27
+
28
+ Args:
29
+ name: Name of the resulting actor connector.
30
+ fn: The function that transforms env / agent data.
31
+
32
+ Returns:
33
+ A new AgentConnector class that transforms data using fn.
34
+ """
35
+
36
+ class LambdaAgentConnector(AgentConnector):
37
+ def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
38
+ return AgentConnectorDataType(
39
+ ac_data.env_id, ac_data.agent_id, fn(ac_data.data)
40
+ )
41
+
42
+ def to_state(self):
43
+ return name, None
44
+
45
+ @staticmethod
46
+ def from_state(ctx: ConnectorContext, params: Any):
47
+ return LambdaAgentConnector(ctx)
48
+
49
+ LambdaAgentConnector.__name__ = name
50
+ LambdaAgentConnector.__qualname__ = name
51
+
52
+ register_connector(name, LambdaAgentConnector)
53
+
54
+ return LambdaAgentConnector
55
+
56
+
57
+ @OldAPIStack
58
+ def flatten_data(data: AgentConnectorsOutput):
59
+ assert isinstance(
60
+ data, AgentConnectorsOutput
61
+ ), "Single agent data must be of type AgentConnectorsOutput"
62
+
63
+ raw_dict = data.raw_dict
64
+ sample_batch = data.sample_batch
65
+
66
+ flattened = {}
67
+ for k, v in sample_batch.items():
68
+ if k in [SampleBatch.INFOS, SampleBatch.ACTIONS] or k.startswith("state_out_"):
69
+ # Do not flatten infos, actions, and state_out_ columns.
70
+ flattened[k] = v
71
+ continue
72
+ if v is None:
73
+ # Keep the same column shape.
74
+ flattened[k] = None
75
+ continue
76
+ flattened[k] = np.array(tree.flatten(v))
77
+ flattened = SampleBatch(flattened, is_training=False)
78
+
79
+ return AgentConnectorsOutput(raw_dict, flattened)
80
+
81
+
82
+ # Agent connector to build and return a flattened observation SampleBatch
83
+ # in addition to the original input dict.
84
+ FlattenDataAgentConnector = OldAPIStack(
85
+ register_lambda_agent_connector("FlattenDataAgentConnector", flatten_data)
86
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/pipeline.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Any, List
3
+ from collections import defaultdict
4
+
5
+ from ray.rllib.connectors.connector import (
6
+ AgentConnector,
7
+ Connector,
8
+ ConnectorContext,
9
+ ConnectorPipeline,
10
+ )
11
+ from ray.rllib.connectors.registry import get_connector, register_connector
12
+ from ray.rllib.utils.typing import ActionConnectorDataType, AgentConnectorDataType
13
+ from ray.rllib.utils.annotations import OldAPIStack
14
+ from ray.util.timer import _Timer
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ @OldAPIStack
21
+ class AgentConnectorPipeline(ConnectorPipeline, AgentConnector):
22
+ def __init__(self, ctx: ConnectorContext, connectors: List[Connector]):
23
+ super().__init__(ctx, connectors)
24
+ self.timers = defaultdict(_Timer)
25
+
26
+ def reset(self, env_id: str):
27
+ for c in self.connectors:
28
+ c.reset(env_id)
29
+
30
+ def on_policy_output(self, output: ActionConnectorDataType):
31
+ for c in self.connectors:
32
+ c.on_policy_output(output)
33
+
34
+ def __call__(
35
+ self, acd_list: List[AgentConnectorDataType]
36
+ ) -> List[AgentConnectorDataType]:
37
+ ret = acd_list
38
+ for c in self.connectors:
39
+ timer = self.timers[str(c)]
40
+ with timer:
41
+ ret = c(ret)
42
+ return ret
43
+
44
+ def to_state(self):
45
+ children = []
46
+ for c in self.connectors:
47
+ state = c.to_state()
48
+ assert isinstance(state, tuple) and len(state) == 2, (
49
+ "Serialized connector state must be in the format of "
50
+ f"Tuple[name: str, params: Any]. Instead we got {state}"
51
+ f"for connector {c.__name__}."
52
+ )
53
+ children.append(state)
54
+ return AgentConnectorPipeline.__name__, children
55
+
56
+ @staticmethod
57
+ def from_state(ctx: ConnectorContext, params: List[Any]):
58
+ assert (
59
+ type(params) == list
60
+ ), "AgentConnectorPipeline takes a list of connector params."
61
+ connectors = []
62
+ for state in params:
63
+ try:
64
+ name, subparams = state
65
+ connectors.append(get_connector(name, ctx, subparams))
66
+ except Exception as e:
67
+ logger.error(f"Failed to de-serialize connector state: {state}")
68
+ raise e
69
+ return AgentConnectorPipeline(ctx, connectors)
70
+
71
+
72
+ register_connector(AgentConnectorPipeline.__name__, AgentConnectorPipeline)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/agent/state_buffer.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import logging
3
+ import pickle
4
+ from typing import Any
5
+
6
+ import numpy as np
7
+ from ray.rllib.utils.annotations import override
8
+ import tree # dm_tree
9
+
10
+ from ray.rllib.connectors.connector import (
11
+ AgentConnector,
12
+ Connector,
13
+ ConnectorContext,
14
+ )
15
+ from ray import cloudpickle
16
+ from ray.rllib.connectors.registry import register_connector
17
+ from ray.rllib.core.columns import Columns
18
+ from ray.rllib.policy.sample_batch import SampleBatch
19
+ from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
20
+ from ray.rllib.utils.typing import ActionConnectorDataType, AgentConnectorDataType
21
+ from ray.rllib.utils.annotations import OldAPIStack
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @OldAPIStack
28
+ class StateBufferConnector(AgentConnector):
29
+ def __init__(self, ctx: ConnectorContext, states: Any = None):
30
+ super().__init__(ctx)
31
+
32
+ self._initial_states = ctx.initial_states
33
+ self._action_space_struct = get_base_struct_from_space(ctx.action_space)
34
+
35
+ self._states = defaultdict(lambda: defaultdict(lambda: (None, None, None)))
36
+ self._enable_new_api_stack = False
37
+ # TODO(jungong) : we would not need this if policies are never stashed
38
+ # during the rollout of a single episode.
39
+ if states:
40
+ try:
41
+ self._states = cloudpickle.loads(states)
42
+ except pickle.UnpicklingError:
43
+ # StateBufferConnector states are only needed for rare cases
44
+ # like stashing then restoring a policy during the rollout of
45
+ # a single episode.
46
+ # It is ok to ignore the error for most of the cases here.
47
+ logger.info(
48
+ "Can not restore StateBufferConnector states. This warning can "
49
+ "usually be ignore, unless it is from restoring a stashed policy."
50
+ )
51
+
52
+ @override(Connector)
53
+ def in_eval(self):
54
+ super().in_eval()
55
+
56
+ def reset(self, env_id: str):
57
+ # States should not be carried over between episodes.
58
+ if env_id in self._states:
59
+ del self._states[env_id]
60
+
61
+ def on_policy_output(self, ac_data: ActionConnectorDataType):
62
+ # Buffer latest output states for next input __call__.
63
+ self._states[ac_data.env_id][ac_data.agent_id] = ac_data.output
64
+
65
+ def transform(self, ac_data: AgentConnectorDataType) -> AgentConnectorDataType:
66
+ d = ac_data.data
67
+ assert (
68
+ type(d) is dict
69
+ ), "Single agent data must be of type Dict[str, TensorStructType]"
70
+
71
+ env_id = ac_data.env_id
72
+ agent_id = ac_data.agent_id
73
+ assert (
74
+ env_id is not None and agent_id is not None
75
+ ), f"StateBufferConnector requires env_id(f{env_id}) and agent_id(f{agent_id})"
76
+
77
+ action, states, fetches = self._states[env_id][agent_id]
78
+
79
+ if action is not None:
80
+ d[SampleBatch.ACTIONS] = action # Last action
81
+ else:
82
+ # Default zero action.
83
+ d[SampleBatch.ACTIONS] = tree.map_structure(
84
+ lambda s: np.zeros_like(s.sample(), s.dtype)
85
+ if hasattr(s, "dtype")
86
+ else np.zeros_like(s.sample()),
87
+ self._action_space_struct,
88
+ )
89
+
90
+ if states is None:
91
+ states = self._initial_states
92
+ if self._enable_new_api_stack:
93
+ if states:
94
+ d[Columns.STATE_OUT] = states
95
+ else:
96
+ for i, v in enumerate(states):
97
+ d["state_out_{}".format(i)] = v
98
+
99
+ # Also add extra fetches if available.
100
+ if fetches:
101
+ d.update(fetches)
102
+
103
+ return ac_data
104
+
105
+ def to_state(self):
106
+ # Note(jungong) : it is ok to use cloudpickle here for stats because:
107
+ # 1. self._states may contain arbitary data objects, and will be hard
108
+ # to serialize otherwise.
109
+ # 2. seriazlized states are only useful if a policy is stashed and
110
+ # restored during the rollout of a single episode. So it is ok to
111
+ # use cloudpickle for such non-persistent data bits.
112
+ states = cloudpickle.dumps(self._states)
113
+ return StateBufferConnector.__name__, states
114
+
115
+ @staticmethod
116
+ def from_state(ctx: ConnectorContext, params: Any):
117
+ return StateBufferConnector(ctx, params)
118
+
119
+
120
+ register_connector(StateBufferConnector.__name__, StateBufferConnector)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/connector_pipeline_v2.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import logging
3
+ from typing import Any, Collection, Dict, List, Optional, Tuple, Type, Union
4
+
5
+ import gymnasium as gym
6
+
7
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
8
+ from ray.rllib.core.rl_module.rl_module import RLModule
9
+ from ray.rllib.utils.annotations import override
10
+ from ray.rllib.utils.checkpoints import Checkpointable
11
+ from ray.rllib.utils.typing import EpisodeType, StateDict
12
+ from ray.util.annotations import PublicAPI
13
+ from ray.util.timer import _Timer
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ @PublicAPI(stability="alpha")
19
+ class ConnectorPipelineV2(ConnectorV2):
20
+ """Utility class for quick manipulation of a connector pipeline."""
21
+
22
+ @override(ConnectorV2)
23
+ def recompute_output_observation_space(
24
+ self,
25
+ input_observation_space: gym.Space,
26
+ input_action_space: gym.Space,
27
+ ) -> gym.Space:
28
+ self._fix_spaces(input_observation_space, input_action_space)
29
+ return self.observation_space
30
+
31
+ @override(ConnectorV2)
32
+ def recompute_output_action_space(
33
+ self,
34
+ input_observation_space: gym.Space,
35
+ input_action_space: gym.Space,
36
+ ) -> gym.Space:
37
+ self._fix_spaces(input_observation_space, input_action_space)
38
+ return self.action_space
39
+
40
+ def __init__(
41
+ self,
42
+ input_observation_space: Optional[gym.Space] = None,
43
+ input_action_space: Optional[gym.Space] = None,
44
+ *,
45
+ connectors: Optional[List[ConnectorV2]] = None,
46
+ **kwargs,
47
+ ):
48
+ """Initializes a ConnectorPipelineV2 instance.
49
+
50
+ Args:
51
+ input_observation_space: The (optional) input observation space for this
52
+ connector piece. This is the space coming from a previous connector
53
+ piece in the (env-to-module or learner) pipeline or is directly
54
+ defined within the gym.Env.
55
+ input_action_space: The (optional) input action space for this connector
56
+ piece. This is the space coming from a previous connector piece in the
57
+ (module-to-env) pipeline or is directly defined within the gym.Env.
58
+ connectors: A list of individual ConnectorV2 pieces to be added to this
59
+ pipeline during construction. Note that you can always add (or remove)
60
+ more ConnectorV2 pieces later on the fly.
61
+ """
62
+ self.connectors = []
63
+
64
+ for conn in connectors:
65
+ # If we have a `ConnectorV2` instance just append.
66
+ if isinstance(conn, ConnectorV2):
67
+ self.connectors.append(conn)
68
+ # If, we have a class with `args` and `kwargs`, build the instance.
69
+ # Note that this way of constructing a pipeline should only be
70
+ # used internally when restoring the pipeline state from a
71
+ # checkpoint.
72
+ elif isinstance(conn, tuple) and len(conn) == 3:
73
+ self.connectors.append(conn[0](*conn[1], **conn[2]))
74
+
75
+ super().__init__(input_observation_space, input_action_space, **kwargs)
76
+
77
+ self.timers = defaultdict(_Timer)
78
+
79
+ def __len__(self):
80
+ return len(self.connectors)
81
+
82
+ @override(ConnectorV2)
83
+ def __call__(
84
+ self,
85
+ *,
86
+ rl_module: RLModule,
87
+ batch: Dict[str, Any],
88
+ episodes: List[EpisodeType],
89
+ explore: Optional[bool] = None,
90
+ shared_data: Optional[dict] = None,
91
+ **kwargs,
92
+ ) -> Any:
93
+ """In a pipeline, we simply call each of our connector pieces after each other.
94
+
95
+ Each connector piece receives as input the output of the previous connector
96
+ piece in the pipeline.
97
+ """
98
+ shared_data = shared_data if shared_data is not None else {}
99
+ # Loop through connector pieces and call each one with the output of the
100
+ # previous one. Thereby, time each connector piece's call.
101
+ for connector in self.connectors:
102
+ timer = self.timers[str(connector)]
103
+ with timer:
104
+ batch = connector(
105
+ rl_module=rl_module,
106
+ batch=batch,
107
+ episodes=episodes,
108
+ explore=explore,
109
+ shared_data=shared_data,
110
+ # Deprecated arg.
111
+ data=batch,
112
+ **kwargs,
113
+ )
114
+ if not isinstance(batch, dict):
115
+ raise ValueError(
116
+ f"`data` returned by ConnectorV2 {connector} must be a dict! "
117
+ f"You returned {batch}. Check your (custom) connectors' "
118
+ f"`__call__()` method's return value and make sure you return "
119
+ f"the `data` arg passed in (either altered or unchanged)."
120
+ )
121
+ return batch
122
+
123
+ def remove(self, name_or_class: Union[str, Type]):
124
+ """Remove a single connector piece in this pipeline by its name or class.
125
+
126
+ Args:
127
+ name: The name of the connector piece to be removed from the pipeline.
128
+ """
129
+ idx = -1
130
+ for i, c in enumerate(self.connectors):
131
+ if c.__class__.__name__ == name_or_class:
132
+ idx = i
133
+ break
134
+ if idx >= 0:
135
+ del self.connectors[idx]
136
+ self._fix_spaces(self.input_observation_space, self.input_action_space)
137
+ logger.info(
138
+ f"Removed connector {name_or_class} from {self.__class__.__name__}."
139
+ )
140
+ else:
141
+ logger.warning(
142
+ f"Trying to remove a non-existent connector {name_or_class}."
143
+ )
144
+
145
+ def insert_before(
146
+ self,
147
+ name_or_class: Union[str, type],
148
+ connector: ConnectorV2,
149
+ ) -> ConnectorV2:
150
+ """Insert a new connector piece before an existing piece (by name or class).
151
+
152
+ Args:
153
+ name_or_class: Name or class of the connector piece before which `connector`
154
+ will get inserted.
155
+ connector: The new connector piece to be inserted.
156
+
157
+ Returns:
158
+ The ConnectorV2 before which `connector` has been inserted.
159
+ """
160
+ idx = -1
161
+ for idx, c in enumerate(self.connectors):
162
+ if (
163
+ isinstance(name_or_class, str) and c.__class__.__name__ == name_or_class
164
+ ) or (isinstance(name_or_class, type) and c.__class__ is name_or_class):
165
+ break
166
+ if idx < 0:
167
+ raise ValueError(
168
+ f"Can not find connector with name or type '{name_or_class}'!"
169
+ )
170
+ next_connector = self.connectors[idx]
171
+
172
+ self.connectors.insert(idx, connector)
173
+ self._fix_spaces(self.input_observation_space, self.input_action_space)
174
+
175
+ logger.info(
176
+ f"Inserted {connector.__class__.__name__} before {name_or_class} "
177
+ f"to {self.__class__.__name__}."
178
+ )
179
+ return next_connector
180
+
181
+ def insert_after(
182
+ self,
183
+ name_or_class: Union[str, Type],
184
+ connector: ConnectorV2,
185
+ ) -> ConnectorV2:
186
+ """Insert a new connector piece after an existing piece (by name or class).
187
+
188
+ Args:
189
+ name_or_class: Name or class of the connector piece after which `connector`
190
+ will get inserted.
191
+ connector: The new connector piece to be inserted.
192
+
193
+ Returns:
194
+ The ConnectorV2 after which `connector` has been inserted.
195
+ """
196
+ idx = -1
197
+ for idx, c in enumerate(self.connectors):
198
+ if (
199
+ isinstance(name_or_class, str) and c.__class__.__name__ == name_or_class
200
+ ) or (isinstance(name_or_class, type) and c.__class__ is name_or_class):
201
+ break
202
+ if idx < 0:
203
+ raise ValueError(
204
+ f"Can not find connector with name or type '{name_or_class}'!"
205
+ )
206
+ prev_connector = self.connectors[idx]
207
+
208
+ self.connectors.insert(idx + 1, connector)
209
+ self._fix_spaces(self.input_observation_space, self.input_action_space)
210
+
211
+ logger.info(
212
+ f"Inserted {connector.__class__.__name__} after {name_or_class} "
213
+ f"to {self.__class__.__name__}."
214
+ )
215
+
216
+ return prev_connector
217
+
218
+ def prepend(self, connector: ConnectorV2) -> None:
219
+ """Prepend a new connector at the beginning of a connector pipeline.
220
+
221
+ Args:
222
+ connector: The new connector piece to be prepended to this pipeline.
223
+ """
224
+ self.connectors.insert(0, connector)
225
+ self._fix_spaces(self.input_observation_space, self.input_action_space)
226
+
227
+ logger.info(
228
+ f"Added {connector.__class__.__name__} to the beginning of "
229
+ f"{self.__class__.__name__}."
230
+ )
231
+
232
+ def append(self, connector: ConnectorV2) -> None:
233
+ """Append a new connector at the end of a connector pipeline.
234
+
235
+ Args:
236
+ connector: The new connector piece to be appended to this pipeline.
237
+ """
238
+ self.connectors.append(connector)
239
+ self._fix_spaces(self.input_observation_space, self.input_action_space)
240
+
241
+ logger.info(
242
+ f"Added {connector.__class__.__name__} to the end of "
243
+ f"{self.__class__.__name__}."
244
+ )
245
+
246
+ @override(ConnectorV2)
247
+ def get_state(
248
+ self,
249
+ components: Optional[Union[str, Collection[str]]] = None,
250
+ *,
251
+ not_components: Optional[Union[str, Collection[str]]] = None,
252
+ **kwargs,
253
+ ) -> StateDict:
254
+ state = {}
255
+ for conn in self.connectors:
256
+ conn_name = type(conn).__name__
257
+ if self._check_component(conn_name, components, not_components):
258
+ state[conn_name] = conn.get_state(
259
+ components=self._get_subcomponents(conn_name, components),
260
+ not_components=self._get_subcomponents(conn_name, not_components),
261
+ **kwargs,
262
+ )
263
+ return state
264
+
265
+ @override(ConnectorV2)
266
+ def set_state(self, state: Dict[str, Any]) -> None:
267
+ for conn in self.connectors:
268
+ conn_name = type(conn).__name__
269
+ if conn_name in state:
270
+ conn.set_state(state[conn_name])
271
+
272
+ @override(Checkpointable)
273
+ def get_checkpointable_components(self) -> List[Tuple[str, "Checkpointable"]]:
274
+ return [(type(conn).__name__, conn) for conn in self.connectors]
275
+
276
+ # Note that we don't have to override Checkpointable.get_ctor_args_and_kwargs and
277
+ # don't have to return the `connectors` c'tor kwarg from there. This is b/c all
278
+ # connector pieces in this pipeline are themselves Checkpointable components,
279
+ # so they will be properly written into this pipeline's checkpoint.
280
+ @override(Checkpointable)
281
+ def get_ctor_args_and_kwargs(self) -> Tuple[Tuple, Dict[str, Any]]:
282
+ return (
283
+ (self.input_observation_space, self.input_action_space), # *args
284
+ {
285
+ "connectors": [
286
+ (type(conn), *conn.get_ctor_args_and_kwargs())
287
+ for conn in self.connectors
288
+ ]
289
+ },
290
+ )
291
+
292
+ @override(ConnectorV2)
293
+ def reset_state(self) -> None:
294
+ for conn in self.connectors:
295
+ conn.reset_state()
296
+
297
+ @override(ConnectorV2)
298
+ def merge_states(self, states: List[Dict[str, Any]]) -> Dict[str, Any]:
299
+ merged_states = {}
300
+ if not states:
301
+ return merged_states
302
+ for i, (key, item) in enumerate(states[0].items()):
303
+ state_list = [state[key] for state in states]
304
+ conn = self.connectors[i]
305
+ merged_states[key] = conn.merge_states(state_list)
306
+ return merged_states
307
+
308
+ def __repr__(self, indentation: int = 0):
309
+ return "\n".join(
310
+ [" " * indentation + self.__class__.__name__]
311
+ + [c.__str__(indentation + 4) for c in self.connectors]
312
+ )
313
+
314
+ def __getitem__(
315
+ self,
316
+ key: Union[str, int, Type],
317
+ ) -> Union[ConnectorV2, List[ConnectorV2]]:
318
+ """Returns a single ConnectorV2 or list of ConnectorV2s that fit `key`.
319
+
320
+ If key is an int, we return a single ConnectorV2 at that index in this pipeline.
321
+ If key is a ConnectorV2 type or a string matching the class name of a
322
+ ConnectorV2 in this pipeline, we return a list of all ConnectorV2s in this
323
+ pipeline matching the specified class.
324
+
325
+ Args:
326
+ key: The key to find or to index by.
327
+
328
+ Returns:
329
+ A single ConnectorV2 or a list of ConnectorV2s matching `key`.
330
+ """
331
+ # Key is an int -> Index into pipeline and return.
332
+ if isinstance(key, int):
333
+ return self.connectors[key]
334
+ # Key is a class.
335
+ elif isinstance(key, type):
336
+ results = []
337
+ for c in self.connectors:
338
+ if issubclass(c.__class__, key):
339
+ results.append(c)
340
+ return results
341
+ # Key is a string -> Find connector(s) by name.
342
+ elif isinstance(key, str):
343
+ results = []
344
+ for c in self.connectors:
345
+ if c.name == key:
346
+ results.append(c)
347
+ return results
348
+ # Slicing not supported (yet).
349
+ elif isinstance(key, slice):
350
+ raise NotImplementedError(
351
+ "Slicing of ConnectorPipelineV2 is currently not supported!"
352
+ )
353
+ else:
354
+ raise NotImplementedError(
355
+ f"Indexing ConnectorPipelineV2 by {type(key)} is currently not "
356
+ f"supported!"
357
+ )
358
+
359
+ @property
360
+ def observation_space(self):
361
+ if len(self) > 0:
362
+ return self.connectors[-1].observation_space
363
+ return self._observation_space
364
+
365
+ @property
366
+ def action_space(self):
367
+ if len(self) > 0:
368
+ return self.connectors[-1].action_space
369
+ return self._action_space
370
+
371
+ def _fix_spaces(self, input_observation_space, input_action_space):
372
+ if len(self) > 0:
373
+ # Fix each connector's input_observation- and input_action space in
374
+ # the pipeline.
375
+ obs_space = input_observation_space
376
+ act_space = input_action_space
377
+ for con in self.connectors:
378
+ con.input_action_space = act_space
379
+ con.input_observation_space = obs_space
380
+ obs_space = con.observation_space
381
+ act_space = con.action_space
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import (
2
+ AddObservationsFromEpisodesToBatch,
3
+ )
4
+ from ray.rllib.connectors.common.add_states_from_episodes_to_batch import (
5
+ AddStatesFromEpisodesToBatch,
6
+ )
7
+ from ray.rllib.connectors.common.agent_to_module_mapping import AgentToModuleMapping
8
+ from ray.rllib.connectors.common.batch_individual_items import BatchIndividualItems
9
+ from ray.rllib.connectors.common.numpy_to_tensor import NumpyToTensor
10
+ from ray.rllib.connectors.env_to_module.env_to_module_pipeline import (
11
+ EnvToModulePipeline,
12
+ )
13
+ from ray.rllib.connectors.env_to_module.flatten_observations import (
14
+ FlattenObservations,
15
+ )
16
+ from ray.rllib.connectors.env_to_module.mean_std_filter import MeanStdFilter
17
+ from ray.rllib.connectors.env_to_module.prev_actions_prev_rewards import (
18
+ PrevActionsPrevRewards,
19
+ )
20
+ from ray.rllib.connectors.env_to_module.write_observations_to_episodes import (
21
+ WriteObservationsToEpisodes,
22
+ )
23
+
24
+
25
+ __all__ = [
26
+ "AddObservationsFromEpisodesToBatch",
27
+ "AddStatesFromEpisodesToBatch",
28
+ "AgentToModuleMapping",
29
+ "BatchIndividualItems",
30
+ "EnvToModulePipeline",
31
+ "FlattenObservations",
32
+ "MeanStdFilter",
33
+ "NumpyToTensor",
34
+ "PrevActionsPrevRewards",
35
+ "WriteObservationsToEpisodes",
36
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__pycache__/mean_std_filter.cpython-310.pyc ADDED
Binary file (8.33 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/__pycache__/write_observations_to_episodes.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/flatten_observations.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Collection, Dict, List, Optional
2
+
3
+ import gymnasium as gym
4
+ from gymnasium.spaces import Box
5
+ import numpy as np
6
+ import tree # pip install dm_tree
7
+
8
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
9
+ from ray.rllib.core.rl_module.rl_module import RLModule
10
+ from ray.rllib.utils.annotations import override
11
+ from ray.rllib.utils.numpy import flatten_inputs_to_1d_tensor
12
+ from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
13
+ from ray.rllib.utils.typing import AgentID, EpisodeType
14
+ from ray.util.annotations import PublicAPI
15
+
16
+
17
+ @PublicAPI(stability="alpha")
18
+ class FlattenObservations(ConnectorV2):
19
+ """A connector piece that flattens all observation components into a 1D array.
20
+
21
+ - Should be used only in env-to-module pipelines.
22
+ - Works directly on the incoming episodes list and changes the last observation
23
+ in-place (write the flattened observation back into the episode).
24
+ - This connector does NOT alter the incoming batch (`data`) when called.
25
+ - This connector does NOT work in a `LearnerConnectorPipeline` because it requires
26
+ the incoming episodes to still be ongoing (in progress) as it only alters the
27
+ latest observation, not all observations in an episode.
28
+
29
+ .. testcode::
30
+
31
+ import gymnasium as gym
32
+ import numpy as np
33
+
34
+ from ray.rllib.connectors.env_to_module import FlattenObservations
35
+ from ray.rllib.env.single_agent_episode import SingleAgentEpisode
36
+ from ray.rllib.utils.test_utils import check
37
+
38
+ # Some arbitrarily nested, complex observation space.
39
+ obs_space = gym.spaces.Dict({
40
+ "a": gym.spaces.Box(-10.0, 10.0, (), np.float32),
41
+ "b": gym.spaces.Tuple([
42
+ gym.spaces.Discrete(2),
43
+ gym.spaces.Box(-1.0, 1.0, (2, 1), np.float32),
44
+ ]),
45
+ "c": gym.spaces.MultiDiscrete([2, 3]),
46
+ })
47
+ act_space = gym.spaces.Discrete(2)
48
+
49
+ # Two example episodes, both with initial (reset) observations coming from the
50
+ # above defined observation space.
51
+ episode_1 = SingleAgentEpisode(
52
+ observations=[
53
+ {
54
+ "a": np.array(-10.0, np.float32),
55
+ "b": (1, np.array([[-1.0], [-1.0]], np.float32)),
56
+ "c": np.array([0, 2]),
57
+ },
58
+ ],
59
+ )
60
+ episode_2 = SingleAgentEpisode(
61
+ observations=[
62
+ {
63
+ "a": np.array(10.0, np.float32),
64
+ "b": (0, np.array([[1.0], [1.0]], np.float32)),
65
+ "c": np.array([1, 1]),
66
+ },
67
+ ],
68
+ )
69
+
70
+ # Construct our connector piece.
71
+ connector = FlattenObservations(obs_space, act_space)
72
+
73
+ # Call our connector piece with the example data.
74
+ output_batch = connector(
75
+ rl_module=None, # This connector works without an RLModule.
76
+ batch={}, # This connector does not alter the input batch.
77
+ episodes=[episode_1, episode_2],
78
+ explore=True,
79
+ shared_data={},
80
+ )
81
+
82
+ # The connector does not alter the data and acts as pure pass-through.
83
+ check(output_batch, {})
84
+
85
+ # The connector has flattened each item in the episodes to a 1D tensor.
86
+ check(
87
+ episode_1.get_observations(0),
88
+ # box() disc(2). box(2, 1). multidisc(2, 3)........
89
+ np.array([-10.0, 0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 0.0, 1.0]),
90
+ )
91
+ check(
92
+ episode_2.get_observations(0),
93
+ # box() disc(2). box(2, 1). multidisc(2, 3)........
94
+ np.array([10.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]),
95
+ )
96
+ """
97
+
98
+ @override(ConnectorV2)
99
+ def recompute_output_observation_space(
100
+ self,
101
+ input_observation_space,
102
+ input_action_space,
103
+ ) -> gym.Space:
104
+ self._input_obs_base_struct = get_base_struct_from_space(
105
+ self.input_observation_space
106
+ )
107
+ if self._multi_agent:
108
+ spaces = {}
109
+ for agent_id, space in self._input_obs_base_struct.items():
110
+ if self._agent_ids and agent_id not in self._agent_ids:
111
+ spaces[agent_id] = self._input_obs_base_struct[agent_id]
112
+ else:
113
+ sample = flatten_inputs_to_1d_tensor(
114
+ tree.map_structure(
115
+ lambda s: s.sample(),
116
+ self._input_obs_base_struct[agent_id],
117
+ ),
118
+ self._input_obs_base_struct[agent_id],
119
+ batch_axis=False,
120
+ )
121
+ spaces[agent_id] = Box(
122
+ float("-inf"), float("inf"), (len(sample),), np.float32
123
+ )
124
+ return gym.spaces.Dict(spaces)
125
+ else:
126
+ sample = flatten_inputs_to_1d_tensor(
127
+ tree.map_structure(
128
+ lambda s: s.sample(),
129
+ self._input_obs_base_struct,
130
+ ),
131
+ self._input_obs_base_struct,
132
+ batch_axis=False,
133
+ )
134
+ return Box(float("-inf"), float("inf"), (len(sample),), np.float32)
135
+
136
+ def __init__(
137
+ self,
138
+ input_observation_space: Optional[gym.Space] = None,
139
+ input_action_space: Optional[gym.Space] = None,
140
+ *,
141
+ multi_agent: bool = False,
142
+ agent_ids: Optional[Collection[AgentID]] = None,
143
+ **kwargs,
144
+ ):
145
+ """Initializes a FlattenObservations instance.
146
+
147
+ Args:
148
+ multi_agent: Whether this connector operates on multi-agent observations,
149
+ in which case, the top-level of the Dict space (where agent IDs are
150
+ mapped to individual agents' observation spaces) is left as-is.
151
+ agent_ids: If multi_agent is True, this argument defines a collection of
152
+ AgentIDs for which to flatten. AgentIDs not in this collection are
153
+ ignored.
154
+ If None, flatten observations for all AgentIDs. None is the default.
155
+ """
156
+ self._input_obs_base_struct = None
157
+ self._multi_agent = multi_agent
158
+ self._agent_ids = agent_ids
159
+
160
+ super().__init__(input_observation_space, input_action_space, **kwargs)
161
+
162
+ @override(ConnectorV2)
163
+ def __call__(
164
+ self,
165
+ *,
166
+ rl_module: RLModule,
167
+ batch: Dict[str, Any],
168
+ episodes: List[EpisodeType],
169
+ explore: Optional[bool] = None,
170
+ shared_data: Optional[dict] = None,
171
+ **kwargs,
172
+ ) -> Any:
173
+ for sa_episode in self.single_agent_episode_iterator(
174
+ episodes, agents_that_stepped_only=True
175
+ ):
176
+ # Episode is not finalized yet and thus still operates on lists of items.
177
+ assert not sa_episode.is_finalized
178
+
179
+ last_obs = sa_episode.get_observations(-1)
180
+
181
+ if self._multi_agent:
182
+ if (
183
+ self._agent_ids is not None
184
+ and sa_episode.agent_id not in self._agent_ids
185
+ ):
186
+ flattened_obs = last_obs
187
+ else:
188
+ flattened_obs = flatten_inputs_to_1d_tensor(
189
+ inputs=last_obs,
190
+ # In the multi-agent case, we need to use the specific agent's
191
+ # space struct, not the multi-agent observation space dict.
192
+ spaces_struct=self._input_obs_base_struct[sa_episode.agent_id],
193
+ # Our items are individual observations (no batch axis present).
194
+ batch_axis=False,
195
+ )
196
+ else:
197
+ flattened_obs = flatten_inputs_to_1d_tensor(
198
+ inputs=last_obs,
199
+ spaces_struct=self._input_obs_base_struct,
200
+ # Our items are individual observations (no batch axis present).
201
+ batch_axis=False,
202
+ )
203
+
204
+ # Write new observation directly back into the episode.
205
+ sa_episode.set_observations(at_indices=-1, new_data=flattened_obs)
206
+ # We set the Episode's observation space to ours so that we can safely
207
+ # set the last obs to the new value (without causing a space mismatch
208
+ # error).
209
+ sa_episode.observation_space = self.observation_space
210
+
211
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/frame_stacking.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ from ray.rllib.connectors.common.frame_stacking import _FrameStacking
4
+
5
+
6
+ FrameStackingEnvToModule = partial(_FrameStacking, as_learner_connector=False)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/observation_preprocessor.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ import gymnasium as gym
5
+
6
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
7
+ from ray.rllib.core.rl_module.rl_module import RLModule
8
+ from ray.rllib.utils.annotations import override
9
+ from ray.rllib.utils.typing import EpisodeType
10
+ from ray.util.annotations import PublicAPI
11
+
12
+
13
+ @PublicAPI(stability="alpha")
14
+ class ObservationPreprocessor(ConnectorV2, abc.ABC):
15
+ """Env-to-module connector performing one preprocessor step on the last observation.
16
+
17
+ This is a convenience class that simplifies the writing of few-step preprocessor
18
+ connectors.
19
+
20
+ Users must implement the `preprocess()` method, which simplifies the usual procedure
21
+ of extracting some data from a list of episodes and adding it to the batch to a mere
22
+ "old-observation --transform--> return new-observation" step.
23
+ """
24
+
25
+ @override(ConnectorV2)
26
+ def recompute_output_observation_space(
27
+ self,
28
+ input_observation_space: gym.Space,
29
+ input_action_space: gym.Space,
30
+ ) -> gym.Space:
31
+ # Users should override this method only in case the `ObservationPreprocessor`
32
+ # changes the observation space of the pipeline. In this case, return the new
33
+ # observation space based on the incoming one (`input_observation_space`).
34
+ return super().recompute_output_observation_space(
35
+ input_observation_space, input_action_space
36
+ )
37
+
38
+ @abc.abstractmethod
39
+ def preprocess(self, observation):
40
+ """Override to implement the preprocessing logic.
41
+
42
+ Args:
43
+ observation: A single (non-batched) observation item for a single agent to
44
+ be processed by this connector.
45
+
46
+ Returns:
47
+ The new observation after `observation` has been preprocessed.
48
+ """
49
+
50
+ @override(ConnectorV2)
51
+ def __call__(
52
+ self,
53
+ *,
54
+ rl_module: RLModule,
55
+ batch: Dict[str, Any],
56
+ episodes: List[EpisodeType],
57
+ explore: Optional[bool] = None,
58
+ persistent_data: Optional[dict] = None,
59
+ **kwargs,
60
+ ) -> Any:
61
+ # We process and then replace observations inside the episodes directly.
62
+ # Thus, all following connectors will only see and operate on the already
63
+ # processed observation (w/o having access anymore to the original
64
+ # observations).
65
+ for sa_episode in self.single_agent_episode_iterator(episodes):
66
+ observation = sa_episode.get_observations(-1)
67
+
68
+ # Process the observation and write the new observation back into the
69
+ # episode.
70
+ new_observation = self.preprocess(observation=observation)
71
+ sa_episode.set_observations(at_indices=-1, new_data=new_observation)
72
+ # We set the Episode's observation space to ours so that we can safely
73
+ # set the last obs to the new value (without causing a space mismatch
74
+ # error).
75
+ sa_episode.observation_space = self.observation_space
76
+
77
+ # Leave `batch` as is. RLlib's default connector will automatically
78
+ # populate the OBS column therein from the episodes' now transformed
79
+ # observations.
80
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/prev_actions_prev_rewards.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ import gymnasium as gym
4
+ from gymnasium.spaces import Box
5
+ import numpy as np
6
+
7
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
8
+ from ray.rllib.core.rl_module.rl_module import RLModule
9
+ from ray.rllib.utils.annotations import override
10
+ from ray.rllib.utils.spaces.space_utils import (
11
+ batch as batch_fn,
12
+ flatten_to_single_ndarray,
13
+ )
14
+ from ray.rllib.utils.typing import EpisodeType
15
+ from ray.util.annotations import PublicAPI
16
+
17
+
18
+ @PublicAPI(stability="alpha")
19
+ class PrevActionsPrevRewards(ConnectorV2):
20
+ """A connector piece that adds previous rewards and actions to the input obs.
21
+
22
+ - Requires Columns.OBS to be already a part of the batch.
23
+ - This connector makes the assumption that under the Columns.OBS key in batch,
24
+ there is either a list of individual env observations to be flattened (single-agent
25
+ case) or a dict mapping (AgentID, ModuleID)-tuples to lists of data items to be
26
+ flattened (multi-agent case).
27
+ - Converts Columns.OBS data into a dict (or creates a sub-dict if obs are
28
+ already a dict), and adds "prev_rewards" and "prev_actions"
29
+ to this dict. The original observations are stored under the self.ORIG_OBS_KEY in
30
+ that dict.
31
+ - If your RLModule does not handle dict inputs, you will have to plug in an
32
+ `FlattenObservations` connector piece after this one.
33
+ - Does NOT work in a Learner pipeline as it operates on individual observation
34
+ items (as opposed to batched/time-ranked data).
35
+ - Therefore, assumes that the altered (flattened) observations will be written
36
+ back into the episode by a later connector piece in the env-to-module pipeline
37
+ (which this piece is part of as well).
38
+ - Only reads reward- and action information from the given list of Episode objects.
39
+ - Does NOT write any observations (or other data) to the given Episode objects.
40
+ """
41
+
42
+ ORIG_OBS_KEY = "_orig_obs"
43
+ PREV_ACTIONS_KEY = "prev_n_actions"
44
+ PREV_REWARDS_KEY = "prev_n_rewards"
45
+
46
+ @override(ConnectorV2)
47
+ def recompute_output_observation_space(
48
+ self,
49
+ input_observation_space: gym.Space,
50
+ input_action_space: gym.Space,
51
+ ) -> gym.Space:
52
+ if self._multi_agent:
53
+ ret = {}
54
+ for agent_id, obs_space in input_observation_space.spaces.items():
55
+ act_space = input_action_space[agent_id]
56
+ ret[agent_id] = self._convert_individual_space(obs_space, act_space)
57
+ return gym.spaces.Dict(ret)
58
+ else:
59
+ return self._convert_individual_space(
60
+ input_observation_space, input_action_space
61
+ )
62
+
63
+ def __init__(
64
+ self,
65
+ input_observation_space: Optional[gym.Space] = None,
66
+ input_action_space: Optional[gym.Space] = None,
67
+ *,
68
+ multi_agent: bool = False,
69
+ n_prev_actions: int = 1,
70
+ n_prev_rewards: int = 1,
71
+ **kwargs,
72
+ ):
73
+ """Initializes a PrevActionsPrevRewards instance.
74
+
75
+ Args:
76
+ multi_agent: Whether this is a connector operating on a multi-agent
77
+ observation space mapping AgentIDs to individual agents' observations.
78
+ n_prev_actions: The number of previous actions to include in the output
79
+ data. Discrete actions are ont-hot'd. If > 1, will concatenate the
80
+ individual action tensors.
81
+ n_prev_rewards: The number of previous rewards to include in the output
82
+ data.
83
+ """
84
+ super().__init__(
85
+ input_observation_space=input_observation_space,
86
+ input_action_space=input_action_space,
87
+ **kwargs,
88
+ )
89
+
90
+ self._multi_agent = multi_agent
91
+ self.n_prev_actions = n_prev_actions
92
+ self.n_prev_rewards = n_prev_rewards
93
+
94
+ # TODO: Move into input_observation_space setter
95
+ # Thus far, this connector piece only operates on discrete action spaces.
96
+ # act_spaces = [self.input_action_space]
97
+ # if self._multi_agent:
98
+ # act_spaces = self.input_action_space.spaces.values()
99
+ # if not all(isinstance(s, gym.spaces.Discrete) for s in act_spaces):
100
+ # raise ValueError(
101
+ # f"{type(self).__name__} only works on Discrete action spaces "
102
+ # f"thus far (or, for multi-agent, on Dict spaces mapping AgentIDs to "
103
+ # f"the individual agents' Discrete action spaces)!"
104
+ # )
105
+
106
+ @override(ConnectorV2)
107
+ def __call__(
108
+ self,
109
+ *,
110
+ rl_module: RLModule,
111
+ batch: Optional[Dict[str, Any]],
112
+ episodes: List[EpisodeType],
113
+ explore: Optional[bool] = None,
114
+ shared_data: Optional[dict] = None,
115
+ **kwargs,
116
+ ) -> Any:
117
+ for sa_episode in self.single_agent_episode_iterator(
118
+ episodes, agents_that_stepped_only=True
119
+ ):
120
+ # Episode is not finalized yet and thus still operates on lists of items.
121
+ assert not sa_episode.is_finalized
122
+
123
+ augmented_obs = {self.ORIG_OBS_KEY: sa_episode.get_observations(-1)}
124
+
125
+ if self.n_prev_actions:
126
+ augmented_obs[self.PREV_ACTIONS_KEY] = flatten_to_single_ndarray(
127
+ batch_fn(
128
+ sa_episode.get_actions(
129
+ indices=slice(-self.n_prev_actions, None),
130
+ fill=0.0,
131
+ one_hot_discrete=True,
132
+ )
133
+ )
134
+ )
135
+
136
+ if self.n_prev_rewards:
137
+ augmented_obs[self.PREV_REWARDS_KEY] = np.array(
138
+ sa_episode.get_rewards(
139
+ indices=slice(-self.n_prev_rewards, None),
140
+ fill=0.0,
141
+ )
142
+ )
143
+
144
+ # Write new observation directly back into the episode.
145
+ sa_episode.set_observations(at_indices=-1, new_data=augmented_obs)
146
+ # We set the Episode's observation space to ours so that we can safely
147
+ # set the last obs to the new value (without causing a space mismatch
148
+ # error).
149
+ sa_episode.observation_space = self.observation_space
150
+
151
+ return batch
152
+
153
+ def _convert_individual_space(self, obs_space, act_space):
154
+ return gym.spaces.Dict(
155
+ {
156
+ self.ORIG_OBS_KEY: obs_space,
157
+ # Currently only works for Discrete action spaces.
158
+ self.PREV_ACTIONS_KEY: Box(
159
+ 0.0, 1.0, (act_space.n * self.n_prev_actions,), np.float32
160
+ ),
161
+ self.PREV_REWARDS_KEY: Box(
162
+ float("-inf"),
163
+ float("inf"),
164
+ (self.n_prev_rewards,),
165
+ np.float32,
166
+ ),
167
+ }
168
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/env_to_module/write_observations_to_episodes.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
4
+ from ray.rllib.core.columns import Columns
5
+ from ray.rllib.core.rl_module.rl_module import RLModule
6
+ from ray.rllib.utils.annotations import override
7
+ from ray.rllib.utils.typing import EpisodeType
8
+ from ray.util.annotations import PublicAPI
9
+
10
+
11
+ @PublicAPI(stability="alpha")
12
+ class WriteObservationsToEpisodes(ConnectorV2):
13
+ """Writes the observations from the batch into the running episodes.
14
+
15
+ Note: This is one of the default env-to-module ConnectorV2 pieces that are added
16
+ automatically by RLlib into every env-to-module connector pipelines, unless
17
+ `config.add_default_connectors_to_env_to_module_pipeline` is set to False.
18
+
19
+ The default env-to-module connector pipeline is:
20
+ [
21
+ [0 or more user defined ConnectorV2 pieces],
22
+ AddObservationsFromEpisodesToBatch,
23
+ AddStatesFromEpisodesToBatch,
24
+ AgentToModuleMapping, # only in multi-agent setups!
25
+ BatchIndividualItems,
26
+ NumpyToTensor,
27
+ ]
28
+
29
+ This ConnectorV2:
30
+ - Operates on a batch that already has observations in it and a list of Episode
31
+ objects.
32
+ - Writes the observation(s) from the batch to all the given episodes. Thereby
33
+ the number of observations in the batch must match the length of the list of
34
+ episodes given.
35
+ - Does NOT alter any observations (or other data) in the batch.
36
+ - Can only be used in an EnvToModule pipeline (writing into Episode objects in a
37
+ Learner pipeline does not make a lot of sense as - after the learner update - the
38
+ list of episodes is discarded).
39
+
40
+ .. testcode::
41
+
42
+ import gymnasium as gym
43
+ import numpy as np
44
+
45
+ from ray.rllib.connectors.env_to_module import WriteObservationsToEpisodes
46
+ from ray.rllib.env.single_agent_episode import SingleAgentEpisode
47
+ from ray.rllib.utils.test_utils import check
48
+
49
+ # Assume we have two episodes (vectorized), then our forward batch will carry
50
+ # two observation records (batch size = 2).
51
+ # The connector in this example will write these two (possibly transformed)
52
+ # observations back into the two respective SingleAgentEpisode objects.
53
+ batch = {
54
+ "obs": [np.array([0.0, 1.0], np.float32), np.array([2.0, 3.0], np.float32)],
55
+ }
56
+
57
+ # Our two episodes have one observation each (i.e. the reset one). This is the
58
+ # one that will be overwritten by the connector in this example.
59
+ obs_space = gym.spaces.Box(-10.0, 10.0, (2,), np.float32)
60
+ act_space = gym.spaces.Discrete(2)
61
+ episodes = [
62
+ SingleAgentEpisode(
63
+ observation_space=obs_space,
64
+ observations=[np.array([-10, -20], np.float32)],
65
+ len_lookback_buffer=0,
66
+ ) for _ in range(2)
67
+ ]
68
+ # Make sure everything is setup correctly.
69
+ check(episodes[0].get_observations(0), [-10.0, -20.0])
70
+ check(episodes[1].get_observations(-1), [-10.0, -20.0])
71
+
72
+ # Create our connector piece.
73
+ connector = WriteObservationsToEpisodes(obs_space, act_space)
74
+
75
+ # Call the connector (and thereby write the transformed observations back
76
+ # into the episodes).
77
+ output_batch = connector(
78
+ rl_module=None, # This particular connector works without an RLModule.
79
+ batch=batch,
80
+ episodes=episodes,
81
+ explore=True,
82
+ shared_data={},
83
+ )
84
+
85
+ # The connector does NOT change the data batch being passed through.
86
+ check(output_batch, batch)
87
+
88
+ # However, the connector has overwritten the last observations in the episodes.
89
+ check(episodes[0].get_observations(-1), [0.0, 1.0])
90
+ check(episodes[1].get_observations(0), [2.0, 3.0])
91
+ """
92
+
93
+ @override(ConnectorV2)
94
+ def __call__(
95
+ self,
96
+ *,
97
+ rl_module: RLModule,
98
+ batch: Optional[Dict[str, Any]],
99
+ episodes: List[EpisodeType],
100
+ explore: Optional[bool] = None,
101
+ shared_data: Optional[dict] = None,
102
+ **kwargs,
103
+ ) -> Any:
104
+ observations = batch.get(Columns.OBS)
105
+
106
+ if observations is None:
107
+ raise ValueError(
108
+ f"`batch` must already have a column named {Columns.OBS} in it "
109
+ f"for this connector to work!"
110
+ )
111
+
112
+ # Note that the following loop works with multi-agent as well as with
113
+ # single-agent episode, as long as the following conditions are met (these
114
+ # will be validated by `self.single_agent_episode_iterator()`):
115
+ # - Per single agent episode, one observation item is expected to exist in
116
+ # `data`, either in a list directly under the "obs" key OR for multi-agent:
117
+ # in a list sitting under a key `(agent_id, module_id)` of a dict sitting
118
+ # under the "obs" key.
119
+ for sa_episode, obs in self.single_agent_episode_iterator(
120
+ episodes=episodes, zip_with_batch_column=observations
121
+ ):
122
+ # Make sure episodes are NOT finalized yet (we are expecting to run in an
123
+ # env-to-module pipeline).
124
+ assert not sa_episode.is_finalized
125
+ # Write new information into the episode.
126
+ sa_episode.set_observations(at_indices=-1, new_data=obs)
127
+ # Change the observation space of the sa_episode.
128
+ sa_episode.observation_space = self.observation_space
129
+
130
+ # Return the unchanged `batch`.
131
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/add_one_ts_to_episodes_and_truncate.cpython-310.pyc ADDED
Binary file (4.8 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/frame_stacking.cpython-310.pyc ADDED
Binary file (375 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/general_advantage_estimation.cpython-310.pyc ADDED
Binary file (5.92 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/__pycache__/learner_connector_pipeline.cpython-310.pyc ADDED
Binary file (551 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_columns_from_episodes_to_train_batch.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
4
+ from ray.rllib.core.columns import Columns
5
+ from ray.rllib.core.rl_module.rl_module import RLModule
6
+ from ray.rllib.utils.annotations import override
7
+ from ray.rllib.utils.typing import EpisodeType
8
+ from ray.util.annotations import PublicAPI
9
+
10
+
11
+ @PublicAPI(stability="alpha")
12
+ class AddColumnsFromEpisodesToTrainBatch(ConnectorV2):
13
+ """Adds infos/actions/rewards/terminateds/... to train batch.
14
+
15
+ Note: This is one of the default Learner ConnectorV2 pieces that are added
16
+ automatically by RLlib into every Learner connector pipeline, unless
17
+ `config.add_default_connectors_to_learner_pipeline` is set to False.
18
+
19
+ The default Learner connector pipeline is:
20
+ [
21
+ [0 or more user defined ConnectorV2 pieces],
22
+ AddObservationsFromEpisodesToBatch,
23
+ AddColumnsFromEpisodesToTrainBatch,
24
+ AddStatesFromEpisodesToBatch,
25
+ AgentToModuleMapping, # only in multi-agent setups!
26
+ BatchIndividualItems,
27
+ NumpyToTensor,
28
+ ]
29
+
30
+ Does NOT add observations to train batch (these should have already been added
31
+ by another ConnectorV2 piece: `AddObservationsToTrainBatch` in the same pipeline).
32
+
33
+ If provided with `episodes` data, this connector piece makes sure that the final
34
+ train batch going into the RLModule for updating (`forward_train()` call) contains
35
+ at the minimum:
36
+ - Observations: From all episodes under the Columns.OBS key.
37
+ - Actions, rewards, terminal/truncation flags: From all episodes under the
38
+ respective keys.
39
+ - All data inside the episodes' `extra_model_outs` property, e.g. action logp and
40
+ action probs under the respective keys.
41
+ - Internal states: These will NOT be added to the batch by this connector piece
42
+ as this functionality is handled by a different default connector piece:
43
+ `AddStatesFromEpisodesToBatch`.
44
+
45
+ If the user wants to customize their own data under the given keys (e.g. obs,
46
+ actions, ...), they can extract from the episodes or recompute from `data`
47
+ their own data and store it in `data` under those keys. In this case, the default
48
+ connector will not change the data under these keys and simply act as a
49
+ pass-through.
50
+ """
51
+
52
+ @override(ConnectorV2)
53
+ def __call__(
54
+ self,
55
+ *,
56
+ rl_module: RLModule,
57
+ batch: Optional[Dict[str, Any]],
58
+ episodes: List[EpisodeType],
59
+ explore: Optional[bool] = None,
60
+ shared_data: Optional[dict] = None,
61
+ **kwargs,
62
+ ) -> Any:
63
+ # Infos.
64
+ if Columns.INFOS not in batch:
65
+ for sa_episode in self.single_agent_episode_iterator(
66
+ episodes,
67
+ agents_that_stepped_only=False,
68
+ ):
69
+ self.add_n_batch_items(
70
+ batch,
71
+ Columns.INFOS,
72
+ items_to_add=sa_episode.get_infos(slice(0, len(sa_episode))),
73
+ num_items=len(sa_episode),
74
+ single_agent_episode=sa_episode,
75
+ )
76
+
77
+ # Actions.
78
+ if Columns.ACTIONS not in batch:
79
+ for sa_episode in self.single_agent_episode_iterator(
80
+ episodes,
81
+ agents_that_stepped_only=False,
82
+ ):
83
+ self.add_n_batch_items(
84
+ batch,
85
+ Columns.ACTIONS,
86
+ items_to_add=[
87
+ sa_episode.get_actions(indices=ts)
88
+ for ts in range(len(sa_episode))
89
+ ],
90
+ num_items=len(sa_episode),
91
+ single_agent_episode=sa_episode,
92
+ )
93
+ # Rewards.
94
+ if Columns.REWARDS not in batch:
95
+ for sa_episode in self.single_agent_episode_iterator(
96
+ episodes,
97
+ agents_that_stepped_only=False,
98
+ ):
99
+ self.add_n_batch_items(
100
+ batch,
101
+ Columns.REWARDS,
102
+ items_to_add=[
103
+ sa_episode.get_rewards(indices=ts)
104
+ for ts in range(len(sa_episode))
105
+ ],
106
+ num_items=len(sa_episode),
107
+ single_agent_episode=sa_episode,
108
+ )
109
+ # Terminateds.
110
+ if Columns.TERMINATEDS not in batch:
111
+ for sa_episode in self.single_agent_episode_iterator(
112
+ episodes,
113
+ agents_that_stepped_only=False,
114
+ ):
115
+ self.add_n_batch_items(
116
+ batch,
117
+ Columns.TERMINATEDS,
118
+ items_to_add=(
119
+ [False] * (len(sa_episode) - 1) + [sa_episode.is_terminated]
120
+ if len(sa_episode) > 0
121
+ else []
122
+ ),
123
+ num_items=len(sa_episode),
124
+ single_agent_episode=sa_episode,
125
+ )
126
+ # Truncateds.
127
+ if Columns.TRUNCATEDS not in batch:
128
+ for sa_episode in self.single_agent_episode_iterator(
129
+ episodes,
130
+ agents_that_stepped_only=False,
131
+ ):
132
+ self.add_n_batch_items(
133
+ batch,
134
+ Columns.TRUNCATEDS,
135
+ items_to_add=(
136
+ [False] * (len(sa_episode) - 1) + [sa_episode.is_truncated]
137
+ if len(sa_episode) > 0
138
+ else []
139
+ ),
140
+ num_items=len(sa_episode),
141
+ single_agent_episode=sa_episode,
142
+ )
143
+ # Extra model outputs (except for STATE_OUT, which will be handled by another
144
+ # default connector piece). Also, like with all the fields above, skip
145
+ # those that the user already seemed to have populated via custom connector
146
+ # pieces.
147
+ skip_columns = set(batch.keys()) | {Columns.STATE_IN, Columns.STATE_OUT}
148
+ for sa_episode in self.single_agent_episode_iterator(
149
+ episodes,
150
+ agents_that_stepped_only=False,
151
+ ):
152
+ for column in sa_episode.extra_model_outputs.keys():
153
+ if column not in skip_columns:
154
+ self.add_n_batch_items(
155
+ batch,
156
+ column,
157
+ items_to_add=[
158
+ sa_episode.get_extra_model_outputs(key=column, indices=ts)
159
+ for ts in range(len(sa_episode))
160
+ ],
161
+ num_items=len(sa_episode),
162
+ single_agent_episode=sa_episode,
163
+ )
164
+
165
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_next_observations_from_episodes_to_train_batch.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from ray.rllib.core.columns import Columns
4
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
5
+ from ray.rllib.core.rl_module.rl_module import RLModule
6
+ from ray.rllib.utils.annotations import override
7
+ from ray.rllib.utils.typing import EpisodeType
8
+ from ray.util.annotations import PublicAPI
9
+
10
+
11
+ @PublicAPI(stability="alpha")
12
+ class AddNextObservationsFromEpisodesToTrainBatch(ConnectorV2):
13
+ """Adds the NEXT_OBS column with the correct episode observations to train batch.
14
+
15
+ - Operates on a list of Episode objects.
16
+ - Gets all observation(s) from all the given episodes (except the very first ones)
17
+ and adds them to the batch under construction in the NEXT_OBS column (as a list of
18
+ individual observations).
19
+ - Does NOT alter any observations (or other data) in the given episodes.
20
+ - Can be used in Learner connector pipelines.
21
+
22
+ .. testcode::
23
+
24
+ import gymnasium as gym
25
+ import numpy as np
26
+
27
+ from ray.rllib.connectors.learner import (
28
+ AddNextObservationsFromEpisodesToTrainBatch
29
+ )
30
+ from ray.rllib.core.columns import Columns
31
+ from ray.rllib.env.single_agent_episode import SingleAgentEpisode
32
+ from ray.rllib.utils.test_utils import check
33
+
34
+ # Create two dummy SingleAgentEpisodes, each containing 3 observations,
35
+ # 2 actions and 2 rewards (both episodes are length=2).
36
+ obs_space = gym.spaces.Box(-1.0, 1.0, (2,), np.float32)
37
+ act_space = gym.spaces.Discrete(2)
38
+
39
+ episodes = [SingleAgentEpisode(
40
+ observations=[obs_space.sample(), obs_space.sample(), obs_space.sample()],
41
+ actions=[act_space.sample(), act_space.sample()],
42
+ rewards=[1.0, 2.0],
43
+ len_lookback_buffer=0,
44
+ ) for _ in range(2)]
45
+ eps_1_next_obses = episodes[0].get_observations([1, 2])
46
+ eps_2_next_obses = episodes[1].get_observations([1, 2])
47
+ print(f"1st Episode's next obses are {eps_1_next_obses}")
48
+ print(f"2nd Episode's next obses are {eps_2_next_obses}")
49
+
50
+ # Create an instance of this class.
51
+ connector = AddNextObservationsFromEpisodesToTrainBatch()
52
+
53
+ # Call the connector with the two created episodes.
54
+ # Note that this particular connector works without an RLModule, so we
55
+ # simplify here for the sake of this example.
56
+ output_data = connector(
57
+ rl_module=None,
58
+ batch={},
59
+ episodes=episodes,
60
+ explore=True,
61
+ shared_data={},
62
+ )
63
+ # The output data should now contain the last observations of both episodes,
64
+ # in a "per-episode organized" fashion.
65
+ check(
66
+ output_data,
67
+ {
68
+ Columns.NEXT_OBS: {
69
+ (episodes[0].id_,): eps_1_next_obses,
70
+ (episodes[1].id_,): eps_2_next_obses,
71
+ },
72
+ },
73
+ )
74
+ """
75
+
76
+ @override(ConnectorV2)
77
+ def __call__(
78
+ self,
79
+ *,
80
+ rl_module: RLModule,
81
+ batch: Dict[str, Any],
82
+ episodes: List[EpisodeType],
83
+ explore: Optional[bool] = None,
84
+ shared_data: Optional[dict] = None,
85
+ **kwargs,
86
+ ) -> Any:
87
+ # If "obs" already in `batch`, early out.
88
+ if Columns.NEXT_OBS in batch:
89
+ return batch
90
+
91
+ for sa_episode in self.single_agent_episode_iterator(
92
+ # This is a Learner-only connector -> Get all episodes (for train batch).
93
+ episodes,
94
+ agents_that_stepped_only=False,
95
+ ):
96
+ self.add_n_batch_items(
97
+ batch,
98
+ Columns.NEXT_OBS,
99
+ items_to_add=sa_episode.get_observations(slice(1, len(sa_episode) + 1)),
100
+ num_items=len(sa_episode),
101
+ single_agent_episode=sa_episode,
102
+ )
103
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/add_one_ts_to_episodes_and_truncate.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
4
+ from ray.rllib.core.columns import Columns
5
+ from ray.rllib.core.rl_module.rl_module import RLModule
6
+ from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
7
+ from ray.rllib.utils.annotations import override
8
+ from ray.rllib.utils.postprocessing.episodes import add_one_ts_to_episodes_and_truncate
9
+ from ray.rllib.utils.typing import EpisodeType
10
+ from ray.util.annotations import PublicAPI
11
+
12
+
13
+ @PublicAPI(stability="alpha")
14
+ class AddOneTsToEpisodesAndTruncate(ConnectorV2):
15
+ """Adds an artificial timestep to all incoming episodes at the end.
16
+
17
+ In detail: The last observations, infos, actions, and all `extra_model_outputs`
18
+ will be duplicated and appended to each episode's data. An extra 0.0 reward
19
+ will be appended to the episode's rewards. The episode's timestep will be
20
+ increased by 1. Also, adds the truncated=True flag to each episode if the
21
+ episode is not already done (terminated or truncated).
22
+
23
+ Useful for value function bootstrapping, where it is required to compute a
24
+ forward pass for the very last timestep within the episode,
25
+ i.e. using the following input dict: {
26
+ obs=[final obs],
27
+ state=[final state output],
28
+ prev. reward=[final reward],
29
+ etc..
30
+ }
31
+
32
+ .. testcode::
33
+
34
+ from ray.rllib.connectors.learner import AddOneTsToEpisodesAndTruncate
35
+ from ray.rllib.env.single_agent_episode import SingleAgentEpisode
36
+ from ray.rllib.utils.test_utils import check
37
+
38
+ # Create 2 episodes (both to be extended by one timestep).
39
+ episode1 = SingleAgentEpisode(
40
+ observations=[0, 1, 2],
41
+ actions=[0, 1],
42
+ rewards=[0.0, 1.0],
43
+ terminated=False,
44
+ truncated=False,
45
+ len_lookback_buffer=0,
46
+ ).finalize()
47
+ check(len(episode1), 2)
48
+ check(episode1.is_truncated, False)
49
+
50
+ episode2 = SingleAgentEpisode(
51
+ observations=[0, 1, 2, 3, 4, 5],
52
+ actions=[0, 1, 2, 3, 4],
53
+ rewards=[0.0, 1.0, 2.0, 3.0, 4.0],
54
+ terminated=True, # a terminated episode
55
+ truncated=False,
56
+ len_lookback_buffer=0,
57
+ ).finalize()
58
+ check(len(episode2), 5)
59
+ check(episode2.is_truncated, False)
60
+ check(episode2.is_terminated, True)
61
+
62
+ # Create an instance of this class.
63
+ connector = AddOneTsToEpisodesAndTruncate()
64
+
65
+ # Call the connector.
66
+ shared_data = {}
67
+ _ = connector(
68
+ rl_module=None, # Connector used here does not require RLModule.
69
+ batch={},
70
+ episodes=[episode1, episode2],
71
+ shared_data=shared_data,
72
+ )
73
+ # Check on the episodes. Both of them should now be 1 timestep longer.
74
+ check(len(episode1), 3)
75
+ check(episode1.is_truncated, True)
76
+ check(len(episode2), 6)
77
+ check(episode2.is_truncated, False)
78
+ check(episode2.is_terminated, True)
79
+ """
80
+
81
+ @override(ConnectorV2)
82
+ def __call__(
83
+ self,
84
+ *,
85
+ rl_module: RLModule,
86
+ batch: Dict[str, Any],
87
+ episodes: List[EpisodeType],
88
+ explore: Optional[bool] = None,
89
+ shared_data: Optional[dict] = None,
90
+ **kwargs,
91
+ ) -> Any:
92
+ # Build the loss mask to make sure the extra added timesteps do not influence
93
+ # the final loss and fix the terminateds and truncateds in the batch.
94
+
95
+ # For proper v-trace execution, the rules must be as follows:
96
+ # Legend:
97
+ # T: terminal=True
98
+ # R: truncated=True
99
+ # B0: bootstrap with value 0 (also: terminal=True)
100
+ # Bx: bootstrap with some vf-computed value (also: terminal=True)
101
+
102
+ # batch: - - - - - - - T B0- - - - - R Bx- - - - R Bx
103
+ # mask : t t t t t t t t f t t t t t t f t t t t t f
104
+
105
+ # TODO (sven): Same situation as in TODO below, but for multi-agent episode.
106
+ # Maybe add a dedicated connector piece for this task?
107
+ # We extend the MultiAgentEpisode's ID by a running number here to make sure
108
+ # we treat each MAEpisode chunk as separate (for potentially upcoming v-trace
109
+ # and LSTM zero-padding) and don't mix data from different chunks.
110
+ if isinstance(episodes[0], MultiAgentEpisode):
111
+ for i, ma_episode in enumerate(episodes):
112
+ ma_episode.id_ += "_" + str(i)
113
+ # Also change the underlying single-agent episode's
114
+ # `multi_agent_episode_id` properties.
115
+ for sa_episode in ma_episode.agent_episodes.values():
116
+ sa_episode.multi_agent_episode_id = ma_episode.id_
117
+
118
+ for i, sa_episode in enumerate(
119
+ self.single_agent_episode_iterator(episodes, agents_that_stepped_only=False)
120
+ ):
121
+ # TODO (sven): This is a little bit of a hack: By extending the Episode's
122
+ # ID, we make sure that each episode chunk in `episodes` is treated as a
123
+ # separate episode in the `self.add_n_batch_items` below. Some algos (e.g.
124
+ # APPO) may have >1 episode chunks from the same episode (same ID) in the
125
+ # training data, thus leading to a malformatted batch in case of
126
+ # RNN-triggered zero-padding of the train batch.
127
+ # For example, if e1 (id=a len=4) and e2 (id=a len=5) are two chunks of the
128
+ # same episode in `episodes`, the resulting batch would have an additional
129
+ # timestep in the middle of the episode's "row":
130
+ # { "obs": {
131
+ # ("a", <- eps ID): [0, 1, 2, 3 <- len=4, [additional 1 ts (bad)],
132
+ # 0, 1, 2, 3, 4 <- len=5, [additional 1 ts]]
133
+ # }}
134
+ sa_episode.id_ += "_" + str(i)
135
+
136
+ len_ = len(sa_episode)
137
+
138
+ # Extend all episodes by one ts.
139
+ add_one_ts_to_episodes_and_truncate([sa_episode])
140
+
141
+ loss_mask = [True for _ in range(len_)] + [False]
142
+ self.add_n_batch_items(
143
+ batch,
144
+ Columns.LOSS_MASK,
145
+ loss_mask,
146
+ len_ + 1,
147
+ sa_episode,
148
+ )
149
+
150
+ terminateds = (
151
+ [False for _ in range(len_ - 1)]
152
+ + [bool(sa_episode.is_terminated)]
153
+ + [True] # extra timestep
154
+ )
155
+ self.add_n_batch_items(
156
+ batch,
157
+ Columns.TERMINATEDS,
158
+ terminateds,
159
+ len_ + 1,
160
+ sa_episode,
161
+ )
162
+
163
+ # Signal to following connector pieces that the loss-mask which masks out
164
+ # invalid episode ts (for the extra added ts at the end) has already been
165
+ # added to `data`.
166
+ shared_data["_added_loss_mask_for_valid_episode_ts"] = True
167
+
168
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/frame_stacking.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ from ray.rllib.connectors.common.frame_stacking import _FrameStacking
4
+
5
+
6
+ FrameStackingLearner = partial(_FrameStacking, as_learner_connector=True)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/learner/learner_connector_pipeline.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from ray.rllib.connectors.connector_pipeline_v2 import ConnectorPipelineV2
2
+ from ray.util.annotations import PublicAPI
3
+
4
+
5
+ @PublicAPI(stability="alpha")
6
+ class LearnerConnectorPipeline(ConnectorPipelineV2):
7
+ pass
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/connectors/module_to_env/remove_single_ts_time_rank_from_batch.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ import numpy as np
4
+ import tree # pip install dm_tree
5
+
6
+ from ray.rllib.connectors.connector_v2 import ConnectorV2
7
+ from ray.rllib.core.columns import Columns
8
+ from ray.rllib.core.rl_module.rl_module import RLModule
9
+ from ray.rllib.utils.annotations import override
10
+ from ray.rllib.utils.typing import EpisodeType
11
+ from ray.util.annotations import PublicAPI
12
+
13
+
14
+ @PublicAPI(stability="alpha")
15
+ class RemoveSingleTsTimeRankFromBatch(ConnectorV2):
16
+ """
17
+ Note: This is one of the default module-to-env ConnectorV2 pieces that
18
+ are added automatically by RLlib into every module-to-env connector pipeline,
19
+ unless `config.add_default_connectors_to_module_to_env_pipeline` is set to
20
+ False.
21
+
22
+ The default module-to-env connector pipeline is:
23
+ [
24
+ GetActions,
25
+ TensorToNumpy,
26
+ UnBatchToIndividualItems,
27
+ ModuleToAgentUnmapping, # only in multi-agent setups!
28
+ RemoveSingleTsTimeRankFromBatch,
29
+
30
+ [0 or more user defined ConnectorV2 pieces],
31
+
32
+ NormalizeAndClipActions,
33
+ ListifyDataForVectorEnv,
34
+ ]
35
+
36
+ """
37
+
38
+ @override(ConnectorV2)
39
+ def __call__(
40
+ self,
41
+ *,
42
+ rl_module: RLModule,
43
+ batch: Optional[Dict[str, Any]],
44
+ episodes: List[EpisodeType],
45
+ explore: Optional[bool] = None,
46
+ shared_data: Optional[dict] = None,
47
+ **kwargs,
48
+ ) -> Any:
49
+ # If single ts time-rank had not been added, early out.
50
+ if shared_data is None or not shared_data.get("_added_single_ts_time_rank"):
51
+ return batch
52
+
53
+ def _remove_single_ts(item, eps_id, aid, mid):
54
+ # Only remove time-rank for modules that are statefule (only for those has
55
+ # a timerank been added).
56
+ if mid is None or rl_module[mid].is_stateful():
57
+ return tree.map_structure(lambda s: np.squeeze(s, axis=0), item)
58
+ return item
59
+
60
+ for column, column_data in batch.copy().items():
61
+ # Skip state_out (doesn't have a time rank).
62
+ if column == Columns.STATE_OUT:
63
+ continue
64
+ self.foreach_batch_item_change_in_place(
65
+ batch,
66
+ column=column,
67
+ func=_remove_single_ts,
68
+ )
69
+
70
+ return batch
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/actors.cpython-310.pyc ADDED
Binary file (8.03 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/deprecation.cpython-310.pyc ADDED
Binary file (4.02 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/error.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/from_config.cpython-310.pyc ADDED
Binary file (7.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/tf_run_builder.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__pycache__/torch_utils.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/exponential_schedule.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.rllib.utils.annotations import OldAPIStack, override
4
+ from ray.rllib.utils.framework import try_import_torch
5
+ from ray.rllib.utils.schedules.schedule import Schedule
6
+ from ray.rllib.utils.typing import TensorType
7
+
8
+ torch, _ = try_import_torch()
9
+
10
+
11
+ @OldAPIStack
12
+ class ExponentialSchedule(Schedule):
13
+ """Exponential decay schedule from `initial_p` to `final_p`.
14
+
15
+ Reduces output over `schedule_timesteps`. After this many time steps
16
+ always returns `final_p`.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ schedule_timesteps: int,
22
+ framework: Optional[str] = None,
23
+ initial_p: float = 1.0,
24
+ decay_rate: float = 0.1,
25
+ ):
26
+ """Initializes a ExponentialSchedule instance.
27
+
28
+ Args:
29
+ schedule_timesteps: Number of time steps for which to
30
+ linearly anneal initial_p to final_p.
31
+ framework: The framework descriptor string, e.g. "tf",
32
+ "torch", or None.
33
+ initial_p: Initial output value.
34
+ decay_rate: The percentage of the original value after
35
+ 100% of the time has been reached (see formula above).
36
+ >0.0: The smaller the decay-rate, the stronger the decay.
37
+ 1.0: No decay at all.
38
+ """
39
+ super().__init__(framework=framework)
40
+ assert schedule_timesteps > 0
41
+ self.schedule_timesteps = schedule_timesteps
42
+ self.initial_p = initial_p
43
+ self.decay_rate = decay_rate
44
+
45
+ @override(Schedule)
46
+ def _value(self, t: TensorType) -> TensorType:
47
+ """Returns the result of: initial_p * decay_rate ** (`t`/t_max)."""
48
+ if self.framework == "torch" and torch and isinstance(t, torch.Tensor):
49
+ t = t.float()
50
+ return self.initial_p * self.decay_rate ** (t / self.schedule_timesteps)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/polynomial_schedule.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.rllib.utils.annotations import OldAPIStack, override
4
+ from ray.rllib.utils.framework import try_import_tf, try_import_torch
5
+ from ray.rllib.utils.schedules.schedule import Schedule
6
+ from ray.rllib.utils.typing import TensorType
7
+
8
+ tf1, tf, tfv = try_import_tf()
9
+ torch, _ = try_import_torch()
10
+
11
+
12
+ @OldAPIStack
13
+ class PolynomialSchedule(Schedule):
14
+ """Polynomial interpolation between `initial_p` and `final_p`.
15
+
16
+ Over `schedule_timesteps`. After this many time steps, always returns
17
+ `final_p`.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ schedule_timesteps: int,
23
+ final_p: float,
24
+ framework: Optional[str],
25
+ initial_p: float = 1.0,
26
+ power: float = 2.0,
27
+ ):
28
+ """Initializes a PolynomialSchedule instance.
29
+
30
+ Args:
31
+ schedule_timesteps: Number of time steps for which to
32
+ linearly anneal initial_p to final_p
33
+ final_p: Final output value.
34
+ framework: The framework descriptor string, e.g. "tf",
35
+ "torch", or None.
36
+ initial_p: Initial output value.
37
+ power: The exponent to use (default: quadratic).
38
+ """
39
+ super().__init__(framework=framework)
40
+ assert schedule_timesteps > 0
41
+ self.schedule_timesteps = schedule_timesteps
42
+ self.final_p = final_p
43
+ self.initial_p = initial_p
44
+ self.power = power
45
+
46
+ @override(Schedule)
47
+ def _value(self, t: TensorType) -> TensorType:
48
+ """Returns the result of:
49
+ final_p + (initial_p - final_p) * (1 - `t`/t_max) ** power
50
+ """
51
+ if self.framework == "torch" and torch and isinstance(t, torch.Tensor):
52
+ t = t.float()
53
+ t = min(t, self.schedule_timesteps)
54
+ return (
55
+ self.final_p
56
+ + (self.initial_p - self.final_p)
57
+ * (1.0 - (t / self.schedule_timesteps)) ** self.power
58
+ )
59
+
60
+ @override(Schedule)
61
+ def _tf_value_op(self, t: TensorType) -> TensorType:
62
+ t = tf.math.minimum(t, self.schedule_timesteps)
63
+ return (
64
+ self.final_p
65
+ + (self.initial_p - self.final_p)
66
+ * (1.0 - (t / self.schedule_timesteps)) ** self.power
67
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/schedules/scheduler.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.rllib.utils.framework import try_import_tf, try_import_torch
4
+ from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule
5
+ from ray.rllib.utils.typing import LearningRateOrSchedule, TensorType
6
+ from ray.util.annotations import DeveloperAPI
7
+
8
+
9
+ _, tf, _ = try_import_tf()
10
+ torch, _ = try_import_torch()
11
+
12
+
13
+ @DeveloperAPI
14
+ class Scheduler:
15
+ """Class to manage a scheduled (framework-dependent) tensor variable.
16
+
17
+ Uses the PiecewiseSchedule (for maximum configuration flexibility)
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ *,
23
+ fixed_value_or_schedule: LearningRateOrSchedule,
24
+ framework: str = "torch",
25
+ device: Optional[str] = None,
26
+ ):
27
+ """Initializes a Scheduler instance.
28
+
29
+ Args:
30
+ fixed_value_or_schedule: A fixed, constant value (in case no schedule should
31
+ be used) or a schedule configuration in the format of
32
+ [[timestep, value], [timestep, value], ...]
33
+ Intermediary timesteps will be assigned to linerarly interpolated
34
+ values. A schedule config's first entry must
35
+ start with timestep 0, i.e.: [[0, initial_value], [...]].
36
+ framework: The framework string, for which to create the tensor variable
37
+ that hold the current value. This is the variable that can be used in
38
+ the graph, e.g. in a loss function.
39
+ device: Optional device (for torch) to place the tensor variable on.
40
+ """
41
+ self.framework = framework
42
+ self.device = device
43
+ self.use_schedule = isinstance(fixed_value_or_schedule, (list, tuple))
44
+
45
+ if self.use_schedule:
46
+ # Custom schedule, based on list of
47
+ # ([ts], [value to be reached by ts])-tuples.
48
+ self._schedule = PiecewiseSchedule(
49
+ fixed_value_or_schedule,
50
+ outside_value=fixed_value_or_schedule[-1][-1],
51
+ framework=None,
52
+ )
53
+ # As initial tensor valie, use the first timestep's (must be 0) value.
54
+ self._curr_value = self._create_tensor_variable(
55
+ initial_value=fixed_value_or_schedule[0][1]
56
+ )
57
+
58
+ # If no schedule, pin (fix) given value.
59
+ else:
60
+ self._curr_value = fixed_value_or_schedule
61
+
62
+ @staticmethod
63
+ def validate(
64
+ *,
65
+ fixed_value_or_schedule: LearningRateOrSchedule,
66
+ setting_name: str,
67
+ description: str,
68
+ ) -> None:
69
+ """Performs checking of a certain schedule configuration.
70
+
71
+ The first entry in `value_or_schedule` (if it's not a fixed value) must have a
72
+ timestep of 0.
73
+
74
+ Args:
75
+ fixed_value_or_schedule: A fixed, constant value (in case no schedule should
76
+ be used) or a schedule configuration in the format of
77
+ [[timestep, value], [timestep, value], ...]
78
+ Intermediary timesteps will be assigned to linerarly interpolated
79
+ values. A schedule config's first entry must
80
+ start with timestep 0, i.e.: [[0, initial_value], [...]].
81
+ setting_name: The property name of the schedule setting (within a config),
82
+ e.g. `lr` or `entropy_coeff`.
83
+ description: A full text description of the property that's being scheduled,
84
+ e.g. `learning rate`.
85
+
86
+ Raises:
87
+ ValueError: In case, errors are found in the schedule's format.
88
+ """
89
+ if (
90
+ isinstance(fixed_value_or_schedule, (int, float))
91
+ or fixed_value_or_schedule is None
92
+ ):
93
+ return
94
+
95
+ if not isinstance(fixed_value_or_schedule, (list, tuple)) or (
96
+ len(fixed_value_or_schedule) < 2
97
+ ):
98
+ raise ValueError(
99
+ f"Invalid `{setting_name}` ({fixed_value_or_schedule}) specified! "
100
+ f"Must be a list of at least 2 tuples, each of the form "
101
+ f"(`timestep`, `{description} to reach`), e.g. "
102
+ "`[(0, 0.001), (1e6, 0.0001), (2e6, 0.00005)]`."
103
+ )
104
+ elif fixed_value_or_schedule[0][0] != 0:
105
+ raise ValueError(
106
+ f"When providing a `{setting_name}`, the first timestep must be 0 "
107
+ f"and the corresponding lr value is the initial {description}! You "
108
+ f"provided ts={fixed_value_or_schedule[0][0]} {description}="
109
+ f"{fixed_value_or_schedule[0][1]}."
110
+ )
111
+
112
+ def get_current_value(self) -> TensorType:
113
+ """Returns the current value (as a tensor variable).
114
+
115
+ This method should be used in loss functions of other (in-graph) places
116
+ where the current value is needed.
117
+
118
+ Returns:
119
+ The tensor variable (holding the current value to be used).
120
+ """
121
+ return self._curr_value
122
+
123
+ def update(self, timestep: int) -> float:
124
+ """Updates the underlying (framework specific) tensor variable.
125
+
126
+ In case of a fixed value, this method does nothing and only returns the fixed
127
+ value as-is.
128
+
129
+ Args:
130
+ timestep: The current timestep that the update might depend on.
131
+
132
+ Returns:
133
+ The current value of the tensor variable as a python float.
134
+ """
135
+ if self.use_schedule:
136
+ python_value = self._schedule.value(t=timestep)
137
+ if self.framework == "torch":
138
+ self._curr_value.data = torch.tensor(python_value)
139
+ else:
140
+ self._curr_value.assign(python_value)
141
+ else:
142
+ python_value = self._curr_value
143
+
144
+ return python_value
145
+
146
+ def _create_tensor_variable(self, initial_value: float) -> TensorType:
147
+ """Creates a framework-specific tensor variable to be scheduled.
148
+
149
+ Args:
150
+ initial_value: The initial (float) value for the variable to hold.
151
+
152
+ Returns:
153
+ The created framework-specific tensor variable.
154
+ """
155
+ if self.framework == "torch":
156
+ return torch.tensor(
157
+ initial_value,
158
+ requires_grad=False,
159
+ dtype=torch.float32,
160
+ device=self.device,
161
+ )
162
+ else:
163
+ return tf.Variable(
164
+ initial_value,
165
+ trainable=False,
166
+ dtype=tf.float32,
167
+ )
janus/lib/python3.10/_compat_pickle.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module is used to map the old Python 2 names to the new names used in
2
+ # Python 3 for the pickle module. This needed to make pickle streams
3
+ # generated with Python 2 loadable by Python 3.
4
+
5
+ # This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
6
+ # lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
7
+ # Thus, this could cause the module to be imported recursively.
8
+ IMPORT_MAPPING = {
9
+ '__builtin__' : 'builtins',
10
+ 'copy_reg': 'copyreg',
11
+ 'Queue': 'queue',
12
+ 'SocketServer': 'socketserver',
13
+ 'ConfigParser': 'configparser',
14
+ 'repr': 'reprlib',
15
+ 'tkFileDialog': 'tkinter.filedialog',
16
+ 'tkSimpleDialog': 'tkinter.simpledialog',
17
+ 'tkColorChooser': 'tkinter.colorchooser',
18
+ 'tkCommonDialog': 'tkinter.commondialog',
19
+ 'Dialog': 'tkinter.dialog',
20
+ 'Tkdnd': 'tkinter.dnd',
21
+ 'tkFont': 'tkinter.font',
22
+ 'tkMessageBox': 'tkinter.messagebox',
23
+ 'ScrolledText': 'tkinter.scrolledtext',
24
+ 'Tkconstants': 'tkinter.constants',
25
+ 'Tix': 'tkinter.tix',
26
+ 'ttk': 'tkinter.ttk',
27
+ 'Tkinter': 'tkinter',
28
+ 'markupbase': '_markupbase',
29
+ '_winreg': 'winreg',
30
+ 'thread': '_thread',
31
+ 'dummy_thread': '_dummy_thread',
32
+ 'dbhash': 'dbm.bsd',
33
+ 'dumbdbm': 'dbm.dumb',
34
+ 'dbm': 'dbm.ndbm',
35
+ 'gdbm': 'dbm.gnu',
36
+ 'xmlrpclib': 'xmlrpc.client',
37
+ 'SimpleXMLRPCServer': 'xmlrpc.server',
38
+ 'httplib': 'http.client',
39
+ 'htmlentitydefs' : 'html.entities',
40
+ 'HTMLParser' : 'html.parser',
41
+ 'Cookie': 'http.cookies',
42
+ 'cookielib': 'http.cookiejar',
43
+ 'BaseHTTPServer': 'http.server',
44
+ 'test.test_support': 'test.support',
45
+ 'commands': 'subprocess',
46
+ 'urlparse' : 'urllib.parse',
47
+ 'robotparser' : 'urllib.robotparser',
48
+ 'urllib2': 'urllib.request',
49
+ 'anydbm': 'dbm',
50
+ '_abcoll' : 'collections.abc',
51
+ }
52
+
53
+
54
+ # This contains rename rules that are easy to handle. We ignore the more
55
+ # complex stuff (e.g. mapping the names in the urllib and types modules).
56
+ # These rules should be run before import names are fixed.
57
+ NAME_MAPPING = {
58
+ ('__builtin__', 'xrange'): ('builtins', 'range'),
59
+ ('__builtin__', 'reduce'): ('functools', 'reduce'),
60
+ ('__builtin__', 'intern'): ('sys', 'intern'),
61
+ ('__builtin__', 'unichr'): ('builtins', 'chr'),
62
+ ('__builtin__', 'unicode'): ('builtins', 'str'),
63
+ ('__builtin__', 'long'): ('builtins', 'int'),
64
+ ('itertools', 'izip'): ('builtins', 'zip'),
65
+ ('itertools', 'imap'): ('builtins', 'map'),
66
+ ('itertools', 'ifilter'): ('builtins', 'filter'),
67
+ ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
68
+ ('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
69
+ ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
70
+ ('UserList', 'UserList'): ('collections', 'UserList'),
71
+ ('UserString', 'UserString'): ('collections', 'UserString'),
72
+ ('whichdb', 'whichdb'): ('dbm', 'whichdb'),
73
+ ('_socket', 'fromfd'): ('socket', 'fromfd'),
74
+ ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
75
+ ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
76
+ ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
77
+ ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
78
+ ('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
79
+ ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
80
+ ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
81
+ ('urllib', 'quote'): ('urllib.parse', 'quote'),
82
+ ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
83
+ ('urllib', 'unquote'): ('urllib.parse', 'unquote'),
84
+ ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
85
+ ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
86
+ ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
87
+ ('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
88
+ ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
89
+ ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
90
+ ('urllib2', 'URLError'): ('urllib.error', 'URLError'),
91
+ }
92
+
93
+ PYTHON2_EXCEPTIONS = (
94
+ "ArithmeticError",
95
+ "AssertionError",
96
+ "AttributeError",
97
+ "BaseException",
98
+ "BufferError",
99
+ "BytesWarning",
100
+ "DeprecationWarning",
101
+ "EOFError",
102
+ "EnvironmentError",
103
+ "Exception",
104
+ "FloatingPointError",
105
+ "FutureWarning",
106
+ "GeneratorExit",
107
+ "IOError",
108
+ "ImportError",
109
+ "ImportWarning",
110
+ "IndentationError",
111
+ "IndexError",
112
+ "KeyError",
113
+ "KeyboardInterrupt",
114
+ "LookupError",
115
+ "MemoryError",
116
+ "NameError",
117
+ "NotImplementedError",
118
+ "OSError",
119
+ "OverflowError",
120
+ "PendingDeprecationWarning",
121
+ "ReferenceError",
122
+ "RuntimeError",
123
+ "RuntimeWarning",
124
+ # StandardError is gone in Python 3, so we map it to Exception
125
+ "StopIteration",
126
+ "SyntaxError",
127
+ "SyntaxWarning",
128
+ "SystemError",
129
+ "SystemExit",
130
+ "TabError",
131
+ "TypeError",
132
+ "UnboundLocalError",
133
+ "UnicodeDecodeError",
134
+ "UnicodeEncodeError",
135
+ "UnicodeError",
136
+ "UnicodeTranslateError",
137
+ "UnicodeWarning",
138
+ "UserWarning",
139
+ "ValueError",
140
+ "Warning",
141
+ "ZeroDivisionError",
142
+ )
143
+
144
+ try:
145
+ WindowsError
146
+ except NameError:
147
+ pass
148
+ else:
149
+ PYTHON2_EXCEPTIONS += ("WindowsError",)
150
+
151
+ for excname in PYTHON2_EXCEPTIONS:
152
+ NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
153
+
154
+ MULTIPROCESSING_EXCEPTIONS = (
155
+ 'AuthenticationError',
156
+ 'BufferTooShort',
157
+ 'ProcessError',
158
+ 'TimeoutError',
159
+ )
160
+
161
+ for excname in MULTIPROCESSING_EXCEPTIONS:
162
+ NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
163
+
164
+ # Same, but for 3.x to 2.x
165
+ REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
166
+ assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
167
+ REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
168
+ assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
169
+
170
+ # Non-mutual mappings.
171
+
172
+ IMPORT_MAPPING.update({
173
+ 'cPickle': 'pickle',
174
+ '_elementtree': 'xml.etree.ElementTree',
175
+ 'FileDialog': 'tkinter.filedialog',
176
+ 'SimpleDialog': 'tkinter.simpledialog',
177
+ 'DocXMLRPCServer': 'xmlrpc.server',
178
+ 'SimpleHTTPServer': 'http.server',
179
+ 'CGIHTTPServer': 'http.server',
180
+ # For compatibility with broken pickles saved in old Python 3 versions
181
+ 'UserDict': 'collections',
182
+ 'UserList': 'collections',
183
+ 'UserString': 'collections',
184
+ 'whichdb': 'dbm',
185
+ 'StringIO': 'io',
186
+ 'cStringIO': 'io',
187
+ })
188
+
189
+ REVERSE_IMPORT_MAPPING.update({
190
+ '_bz2': 'bz2',
191
+ '_dbm': 'dbm',
192
+ '_functools': 'functools',
193
+ '_gdbm': 'gdbm',
194
+ '_pickle': 'pickle',
195
+ })
196
+
197
+ NAME_MAPPING.update({
198
+ ('__builtin__', 'basestring'): ('builtins', 'str'),
199
+ ('exceptions', 'StandardError'): ('builtins', 'Exception'),
200
+ ('UserDict', 'UserDict'): ('collections', 'UserDict'),
201
+ ('socket', '_socketobject'): ('socket', 'SocketType'),
202
+ })
203
+
204
+ REVERSE_NAME_MAPPING.update({
205
+ ('_functools', 'reduce'): ('__builtin__', 'reduce'),
206
+ ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
207
+ ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
208
+ ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
209
+ ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
210
+ ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
211
+ ('xmlrpc.server', 'XMLRPCDocGenerator'):
212
+ ('DocXMLRPCServer', 'XMLRPCDocGenerator'),
213
+ ('xmlrpc.server', 'DocXMLRPCRequestHandler'):
214
+ ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
215
+ ('xmlrpc.server', 'DocXMLRPCServer'):
216
+ ('DocXMLRPCServer', 'DocXMLRPCServer'),
217
+ ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
218
+ ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
219
+ ('http.server', 'SimpleHTTPRequestHandler'):
220
+ ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
221
+ ('http.server', 'CGIHTTPRequestHandler'):
222
+ ('CGIHTTPServer', 'CGIHTTPRequestHandler'),
223
+ ('_socket', 'socket'): ('socket', '_socketobject'),
224
+ })
225
+
226
+ PYTHON3_OSERROR_EXCEPTIONS = (
227
+ 'BrokenPipeError',
228
+ 'ChildProcessError',
229
+ 'ConnectionAbortedError',
230
+ 'ConnectionError',
231
+ 'ConnectionRefusedError',
232
+ 'ConnectionResetError',
233
+ 'FileExistsError',
234
+ 'FileNotFoundError',
235
+ 'InterruptedError',
236
+ 'IsADirectoryError',
237
+ 'NotADirectoryError',
238
+ 'PermissionError',
239
+ 'ProcessLookupError',
240
+ 'TimeoutError',
241
+ )
242
+
243
+ for excname in PYTHON3_OSERROR_EXCEPTIONS:
244
+ REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
245
+
246
+ PYTHON3_IMPORTERROR_EXCEPTIONS = (
247
+ 'ModuleNotFoundError',
248
+ )
249
+
250
+ for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
251
+ REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
janus/lib/python3.10/contextvars.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from _contextvars import Context, ContextVar, Token, copy_context
2
+
3
+
4
+ __all__ = ('Context', 'ContextVar', 'Token', 'copy_context')
janus/lib/python3.10/copyreg.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper to provide extensibility for pickle.
2
+
3
+ This is only useful to add pickle support for extension types defined in
4
+ C, not for instances of user-defined classes.
5
+ """
6
+
7
+ __all__ = ["pickle", "constructor",
8
+ "add_extension", "remove_extension", "clear_extension_cache"]
9
+
10
+ dispatch_table = {}
11
+
12
+ def pickle(ob_type, pickle_function, constructor_ob=None):
13
+ if not callable(pickle_function):
14
+ raise TypeError("reduction functions must be callable")
15
+ dispatch_table[ob_type] = pickle_function
16
+
17
+ # The constructor_ob function is a vestige of safe for unpickling.
18
+ # There is no reason for the caller to pass it anymore.
19
+ if constructor_ob is not None:
20
+ constructor(constructor_ob)
21
+
22
+ def constructor(object):
23
+ if not callable(object):
24
+ raise TypeError("constructors must be callable")
25
+
26
+ # Example: provide pickling support for complex numbers.
27
+
28
+ try:
29
+ complex
30
+ except NameError:
31
+ pass
32
+ else:
33
+
34
+ def pickle_complex(c):
35
+ return complex, (c.real, c.imag)
36
+
37
+ pickle(complex, pickle_complex, complex)
38
+
39
+ def pickle_union(obj):
40
+ import functools, operator
41
+ return functools.reduce, (operator.or_, obj.__args__)
42
+
43
+ pickle(type(int | str), pickle_union)
44
+
45
+ # Support for pickling new-style objects
46
+
47
+ def _reconstructor(cls, base, state):
48
+ if base is object:
49
+ obj = object.__new__(cls)
50
+ else:
51
+ obj = base.__new__(cls, state)
52
+ if base.__init__ != object.__init__:
53
+ base.__init__(obj, state)
54
+ return obj
55
+
56
+ _HEAPTYPE = 1<<9
57
+ _new_type = type(int.__new__)
58
+
59
+ # Python code for object.__reduce_ex__ for protocols 0 and 1
60
+
61
+ def _reduce_ex(self, proto):
62
+ assert proto < 2
63
+ cls = self.__class__
64
+ for base in cls.__mro__:
65
+ if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
66
+ break
67
+ new = base.__new__
68
+ if isinstance(new, _new_type) and new.__self__ is base:
69
+ break
70
+ else:
71
+ base = object # not really reachable
72
+ if base is object:
73
+ state = None
74
+ else:
75
+ if base is cls:
76
+ raise TypeError(f"cannot pickle {cls.__name__!r} object")
77
+ state = base(self)
78
+ args = (cls, base, state)
79
+ try:
80
+ getstate = self.__getstate__
81
+ except AttributeError:
82
+ if getattr(self, "__slots__", None):
83
+ raise TypeError(f"cannot pickle {cls.__name__!r} object: "
84
+ f"a class that defines __slots__ without "
85
+ f"defining __getstate__ cannot be pickled "
86
+ f"with protocol {proto}") from None
87
+ try:
88
+ dict = self.__dict__
89
+ except AttributeError:
90
+ dict = None
91
+ else:
92
+ dict = getstate()
93
+ if dict:
94
+ return _reconstructor, args, dict
95
+ else:
96
+ return _reconstructor, args
97
+
98
+ # Helper for __reduce_ex__ protocol 2
99
+
100
+ def __newobj__(cls, *args):
101
+ return cls.__new__(cls, *args)
102
+
103
+ def __newobj_ex__(cls, args, kwargs):
104
+ """Used by pickle protocol 4, instead of __newobj__ to allow classes with
105
+ keyword-only arguments to be pickled correctly.
106
+ """
107
+ return cls.__new__(cls, *args, **kwargs)
108
+
109
+ def _slotnames(cls):
110
+ """Return a list of slot names for a given class.
111
+
112
+ This needs to find slots defined by the class and its bases, so we
113
+ can't simply return the __slots__ attribute. We must walk down
114
+ the Method Resolution Order and concatenate the __slots__ of each
115
+ class found there. (This assumes classes don't modify their
116
+ __slots__ attribute to misrepresent their slots after the class is
117
+ defined.)
118
+ """
119
+
120
+ # Get the value from a cache in the class if possible
121
+ names = cls.__dict__.get("__slotnames__")
122
+ if names is not None:
123
+ return names
124
+
125
+ # Not cached -- calculate the value
126
+ names = []
127
+ if not hasattr(cls, "__slots__"):
128
+ # This class has no slots
129
+ pass
130
+ else:
131
+ # Slots found -- gather slot names from all base classes
132
+ for c in cls.__mro__:
133
+ if "__slots__" in c.__dict__:
134
+ slots = c.__dict__['__slots__']
135
+ # if class has a single slot, it can be given as a string
136
+ if isinstance(slots, str):
137
+ slots = (slots,)
138
+ for name in slots:
139
+ # special descriptors
140
+ if name in ("__dict__", "__weakref__"):
141
+ continue
142
+ # mangled names
143
+ elif name.startswith('__') and not name.endswith('__'):
144
+ stripped = c.__name__.lstrip('_')
145
+ if stripped:
146
+ names.append('_%s%s' % (stripped, name))
147
+ else:
148
+ names.append(name)
149
+ else:
150
+ names.append(name)
151
+
152
+ # Cache the outcome in the class if at all possible
153
+ try:
154
+ cls.__slotnames__ = names
155
+ except:
156
+ pass # But don't die if we can't
157
+
158
+ return names
159
+
160
+ # A registry of extension codes. This is an ad-hoc compression
161
+ # mechanism. Whenever a global reference to <module>, <name> is about
162
+ # to be pickled, the (<module>, <name>) tuple is looked up here to see
163
+ # if it is a registered extension code for it. Extension codes are
164
+ # universal, so that the meaning of a pickle does not depend on
165
+ # context. (There are also some codes reserved for local use that
166
+ # don't have this restriction.) Codes are positive ints; 0 is
167
+ # reserved.
168
+
169
+ _extension_registry = {} # key -> code
170
+ _inverted_registry = {} # code -> key
171
+ _extension_cache = {} # code -> object
172
+ # Don't ever rebind those names: pickling grabs a reference to them when
173
+ # it's initialized, and won't see a rebinding.
174
+
175
+ def add_extension(module, name, code):
176
+ """Register an extension code."""
177
+ code = int(code)
178
+ if not 1 <= code <= 0x7fffffff:
179
+ raise ValueError("code out of range")
180
+ key = (module, name)
181
+ if (_extension_registry.get(key) == code and
182
+ _inverted_registry.get(code) == key):
183
+ return # Redundant registrations are benign
184
+ if key in _extension_registry:
185
+ raise ValueError("key %s is already registered with code %s" %
186
+ (key, _extension_registry[key]))
187
+ if code in _inverted_registry:
188
+ raise ValueError("code %s is already in use for key %s" %
189
+ (code, _inverted_registry[code]))
190
+ _extension_registry[key] = code
191
+ _inverted_registry[code] = key
192
+
193
+ def remove_extension(module, name, code):
194
+ """Unregister an extension code. For testing only."""
195
+ key = (module, name)
196
+ if (_extension_registry.get(key) != code or
197
+ _inverted_registry.get(code) != key):
198
+ raise ValueError("key %s is not registered with code %s" %
199
+ (key, code))
200
+ del _extension_registry[key]
201
+ del _inverted_registry[code]
202
+ if code in _extension_cache:
203
+ del _extension_cache[code]
204
+
205
+ def clear_extension_cache():
206
+ _extension_cache.clear()
207
+
208
+ # Standard extension code assignments
209
+
210
+ # Reserved ranges
211
+
212
+ # First Last Count Purpose
213
+ # 1 127 127 Reserved for Python standard library
214
+ # 128 191 64 Reserved for Zope
215
+ # 192 239 48 Reserved for 3rd parties
216
+ # 240 255 16 Reserved for private use (will never be assigned)
217
+ # 256 Inf Inf Reserved for future assignment
218
+
219
+ # Extension codes are assigned by the Python Software Foundation.
janus/lib/python3.10/fnmatch.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Filename matching with shell patterns.
2
+
3
+ fnmatch(FILENAME, PATTERN) matches according to the local convention.
4
+ fnmatchcase(FILENAME, PATTERN) always takes case in account.
5
+
6
+ The functions operate by translating the pattern into a regular
7
+ expression. They cache the compiled regular expressions for speed.
8
+
9
+ The function translate(PATTERN) returns a regular expression
10
+ corresponding to PATTERN. (It does not compile it.)
11
+ """
12
+ import os
13
+ import posixpath
14
+ import re
15
+ import functools
16
+
17
+ __all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
18
+
19
+ # Build a thread-safe incrementing counter to help create unique regexp group
20
+ # names across calls.
21
+ from itertools import count
22
+ _nextgroupnum = count().__next__
23
+ del count
24
+
25
+ def fnmatch(name, pat):
26
+ """Test whether FILENAME matches PATTERN.
27
+
28
+ Patterns are Unix shell style:
29
+
30
+ * matches everything
31
+ ? matches any single character
32
+ [seq] matches any character in seq
33
+ [!seq] matches any char not in seq
34
+
35
+ An initial period in FILENAME is not special.
36
+ Both FILENAME and PATTERN are first case-normalized
37
+ if the operating system requires it.
38
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
39
+ """
40
+ name = os.path.normcase(name)
41
+ pat = os.path.normcase(pat)
42
+ return fnmatchcase(name, pat)
43
+
44
+ @functools.lru_cache(maxsize=256, typed=True)
45
+ def _compile_pattern(pat):
46
+ if isinstance(pat, bytes):
47
+ pat_str = str(pat, 'ISO-8859-1')
48
+ res_str = translate(pat_str)
49
+ res = bytes(res_str, 'ISO-8859-1')
50
+ else:
51
+ res = translate(pat)
52
+ return re.compile(res).match
53
+
54
+ def filter(names, pat):
55
+ """Construct a list from those elements of the iterable NAMES that match PAT."""
56
+ result = []
57
+ pat = os.path.normcase(pat)
58
+ match = _compile_pattern(pat)
59
+ if os.path is posixpath:
60
+ # normcase on posix is NOP. Optimize it away from the loop.
61
+ for name in names:
62
+ if match(name):
63
+ result.append(name)
64
+ else:
65
+ for name in names:
66
+ if match(os.path.normcase(name)):
67
+ result.append(name)
68
+ return result
69
+
70
+ def fnmatchcase(name, pat):
71
+ """Test whether FILENAME matches PATTERN, including case.
72
+
73
+ This is a version of fnmatch() which doesn't case-normalize
74
+ its arguments.
75
+ """
76
+ match = _compile_pattern(pat)
77
+ return match(name) is not None
78
+
79
+
80
+ def translate(pat):
81
+ """Translate a shell PATTERN to a regular expression.
82
+
83
+ There is no way to quote meta-characters.
84
+ """
85
+
86
+ STAR = object()
87
+ res = []
88
+ add = res.append
89
+ i, n = 0, len(pat)
90
+ while i < n:
91
+ c = pat[i]
92
+ i = i+1
93
+ if c == '*':
94
+ # compress consecutive `*` into one
95
+ if (not res) or res[-1] is not STAR:
96
+ add(STAR)
97
+ elif c == '?':
98
+ add('.')
99
+ elif c == '[':
100
+ j = i
101
+ if j < n and pat[j] == '!':
102
+ j = j+1
103
+ if j < n and pat[j] == ']':
104
+ j = j+1
105
+ while j < n and pat[j] != ']':
106
+ j = j+1
107
+ if j >= n:
108
+ add('\\[')
109
+ else:
110
+ stuff = pat[i:j]
111
+ if '-' not in stuff:
112
+ stuff = stuff.replace('\\', r'\\')
113
+ else:
114
+ chunks = []
115
+ k = i+2 if pat[i] == '!' else i+1
116
+ while True:
117
+ k = pat.find('-', k, j)
118
+ if k < 0:
119
+ break
120
+ chunks.append(pat[i:k])
121
+ i = k+1
122
+ k = k+3
123
+ chunk = pat[i:j]
124
+ if chunk:
125
+ chunks.append(chunk)
126
+ else:
127
+ chunks[-1] += '-'
128
+ # Remove empty ranges -- invalid in RE.
129
+ for k in range(len(chunks)-1, 0, -1):
130
+ if chunks[k-1][-1] > chunks[k][0]:
131
+ chunks[k-1] = chunks[k-1][:-1] + chunks[k][1:]
132
+ del chunks[k]
133
+ # Escape backslashes and hyphens for set difference (--).
134
+ # Hyphens that create ranges shouldn't be escaped.
135
+ stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-')
136
+ for s in chunks)
137
+ # Escape set operations (&&, ~~ and ||).
138
+ stuff = re.sub(r'([&~|])', r'\\\1', stuff)
139
+ i = j+1
140
+ if not stuff:
141
+ # Empty range: never match.
142
+ add('(?!)')
143
+ elif stuff == '!':
144
+ # Negated empty range: match any character.
145
+ add('.')
146
+ else:
147
+ if stuff[0] == '!':
148
+ stuff = '^' + stuff[1:]
149
+ elif stuff[0] in ('^', '['):
150
+ stuff = '\\' + stuff
151
+ add(f'[{stuff}]')
152
+ else:
153
+ add(re.escape(c))
154
+ assert i == n
155
+
156
+ # Deal with STARs.
157
+ inp = res
158
+ res = []
159
+ add = res.append
160
+ i, n = 0, len(inp)
161
+ # Fixed pieces at the start?
162
+ while i < n and inp[i] is not STAR:
163
+ add(inp[i])
164
+ i += 1
165
+ # Now deal with STAR fixed STAR fixed ...
166
+ # For an interior `STAR fixed` pairing, we want to do a minimal
167
+ # .*? match followed by `fixed`, with no possibility of backtracking.
168
+ # We can't spell that directly, but can trick it into working by matching
169
+ # .*?fixed
170
+ # in a lookahead assertion, save the matched part in a group, then
171
+ # consume that group via a backreference. If the overall match fails,
172
+ # the lookahead assertion won't try alternatives. So the translation is:
173
+ # (?=(?P<name>.*?fixed))(?P=name)
174
+ # Group names are created as needed: g0, g1, g2, ...
175
+ # The numbers are obtained from _nextgroupnum() to ensure they're unique
176
+ # across calls and across threads. This is because people rely on the
177
+ # undocumented ability to join multiple translate() results together via
178
+ # "|" to build large regexps matching "one of many" shell patterns.
179
+ while i < n:
180
+ assert inp[i] is STAR
181
+ i += 1
182
+ if i == n:
183
+ add(".*")
184
+ break
185
+ assert inp[i] is not STAR
186
+ fixed = []
187
+ while i < n and inp[i] is not STAR:
188
+ fixed.append(inp[i])
189
+ i += 1
190
+ fixed = "".join(fixed)
191
+ if i == n:
192
+ add(".*")
193
+ add(fixed)
194
+ else:
195
+ groupnum = _nextgroupnum()
196
+ add(f"(?=(?P<g{groupnum}>.*?{fixed}))(?P=g{groupnum})")
197
+ assert i == n
198
+ res = "".join(res)
199
+ return fr'(?s:{res})\Z'
janus/lib/python3.10/lzma.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interface to the liblzma compression library.
2
+
3
+ This module provides a class for reading and writing compressed files,
4
+ classes for incremental (de)compression, and convenience functions for
5
+ one-shot (de)compression.
6
+
7
+ These classes and functions support both the XZ and legacy LZMA
8
+ container formats, as well as raw compressed data streams.
9
+ """
10
+
11
+ __all__ = [
12
+ "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
13
+ "CHECK_ID_MAX", "CHECK_UNKNOWN",
14
+ "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
15
+ "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
16
+ "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
17
+ "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
18
+ "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
19
+
20
+ "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
21
+ "open", "compress", "decompress", "is_check_supported",
22
+ ]
23
+
24
+ import builtins
25
+ import io
26
+ import os
27
+ from _lzma import *
28
+ from _lzma import _encode_filter_properties, _decode_filter_properties
29
+ import _compression
30
+
31
+
32
+ _MODE_CLOSED = 0
33
+ _MODE_READ = 1
34
+ # Value 2 no longer used
35
+ _MODE_WRITE = 3
36
+
37
+
38
+ class LZMAFile(_compression.BaseStream):
39
+
40
+ """A file object providing transparent LZMA (de)compression.
41
+
42
+ An LZMAFile can act as a wrapper for an existing file object, or
43
+ refer directly to a named file on disk.
44
+
45
+ Note that LZMAFile provides a *binary* file interface - data read
46
+ is returned as bytes, and data to be written must be given as bytes.
47
+ """
48
+
49
+ def __init__(self, filename=None, mode="r", *,
50
+ format=None, check=-1, preset=None, filters=None):
51
+ """Open an LZMA-compressed file in binary mode.
52
+
53
+ filename can be either an actual file name (given as a str,
54
+ bytes, or PathLike object), in which case the named file is
55
+ opened, or it can be an existing file object to read from or
56
+ write to.
57
+
58
+ mode can be "r" for reading (default), "w" for (over)writing,
59
+ "x" for creating exclusively, or "a" for appending. These can
60
+ equivalently be given as "rb", "wb", "xb" and "ab" respectively.
61
+
62
+ format specifies the container format to use for the file.
63
+ If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
64
+ default is FORMAT_XZ.
65
+
66
+ check specifies the integrity check to use. This argument can
67
+ only be used when opening a file for writing. For FORMAT_XZ,
68
+ the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
69
+ support integrity checks - for these formats, check must be
70
+ omitted, or be CHECK_NONE.
71
+
72
+ When opening a file for reading, the *preset* argument is not
73
+ meaningful, and should be omitted. The *filters* argument should
74
+ also be omitted, except when format is FORMAT_RAW (in which case
75
+ it is required).
76
+
77
+ When opening a file for writing, the settings used by the
78
+ compressor can be specified either as a preset compression
79
+ level (with the *preset* argument), or in detail as a custom
80
+ filter chain (with the *filters* argument). For FORMAT_XZ and
81
+ FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
82
+ level. For FORMAT_RAW, the caller must always specify a filter
83
+ chain; the raw compressor does not support preset compression
84
+ levels.
85
+
86
+ preset (if provided) should be an integer in the range 0-9,
87
+ optionally OR-ed with the constant PRESET_EXTREME.
88
+
89
+ filters (if provided) should be a sequence of dicts. Each dict
90
+ should have an entry for "id" indicating ID of the filter, plus
91
+ additional entries for options to the filter.
92
+ """
93
+ self._fp = None
94
+ self._closefp = False
95
+ self._mode = _MODE_CLOSED
96
+
97
+ if mode in ("r", "rb"):
98
+ if check != -1:
99
+ raise ValueError("Cannot specify an integrity check "
100
+ "when opening a file for reading")
101
+ if preset is not None:
102
+ raise ValueError("Cannot specify a preset compression "
103
+ "level when opening a file for reading")
104
+ if format is None:
105
+ format = FORMAT_AUTO
106
+ mode_code = _MODE_READ
107
+ elif mode in ("w", "wb", "a", "ab", "x", "xb"):
108
+ if format is None:
109
+ format = FORMAT_XZ
110
+ mode_code = _MODE_WRITE
111
+ self._compressor = LZMACompressor(format=format, check=check,
112
+ preset=preset, filters=filters)
113
+ self._pos = 0
114
+ else:
115
+ raise ValueError("Invalid mode: {!r}".format(mode))
116
+
117
+ if isinstance(filename, (str, bytes, os.PathLike)):
118
+ if "b" not in mode:
119
+ mode += "b"
120
+ self._fp = builtins.open(filename, mode)
121
+ self._closefp = True
122
+ self._mode = mode_code
123
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
124
+ self._fp = filename
125
+ self._mode = mode_code
126
+ else:
127
+ raise TypeError("filename must be a str, bytes, file or PathLike object")
128
+
129
+ if self._mode == _MODE_READ:
130
+ raw = _compression.DecompressReader(self._fp, LZMADecompressor,
131
+ trailing_error=LZMAError, format=format, filters=filters)
132
+ self._buffer = io.BufferedReader(raw)
133
+
134
+ def close(self):
135
+ """Flush and close the file.
136
+
137
+ May be called more than once without error. Once the file is
138
+ closed, any other operation on it will raise a ValueError.
139
+ """
140
+ if self._mode == _MODE_CLOSED:
141
+ return
142
+ try:
143
+ if self._mode == _MODE_READ:
144
+ self._buffer.close()
145
+ self._buffer = None
146
+ elif self._mode == _MODE_WRITE:
147
+ self._fp.write(self._compressor.flush())
148
+ self._compressor = None
149
+ finally:
150
+ try:
151
+ if self._closefp:
152
+ self._fp.close()
153
+ finally:
154
+ self._fp = None
155
+ self._closefp = False
156
+ self._mode = _MODE_CLOSED
157
+
158
+ @property
159
+ def closed(self):
160
+ """True if this file is closed."""
161
+ return self._mode == _MODE_CLOSED
162
+
163
+ def fileno(self):
164
+ """Return the file descriptor for the underlying file."""
165
+ self._check_not_closed()
166
+ return self._fp.fileno()
167
+
168
+ def seekable(self):
169
+ """Return whether the file supports seeking."""
170
+ return self.readable() and self._buffer.seekable()
171
+
172
+ def readable(self):
173
+ """Return whether the file was opened for reading."""
174
+ self._check_not_closed()
175
+ return self._mode == _MODE_READ
176
+
177
+ def writable(self):
178
+ """Return whether the file was opened for writing."""
179
+ self._check_not_closed()
180
+ return self._mode == _MODE_WRITE
181
+
182
+ def peek(self, size=-1):
183
+ """Return buffered data without advancing the file position.
184
+
185
+ Always returns at least one byte of data, unless at EOF.
186
+ The exact number of bytes returned is unspecified.
187
+ """
188
+ self._check_can_read()
189
+ # Relies on the undocumented fact that BufferedReader.peek() always
190
+ # returns at least one byte (except at EOF)
191
+ return self._buffer.peek(size)
192
+
193
+ def read(self, size=-1):
194
+ """Read up to size uncompressed bytes from the file.
195
+
196
+ If size is negative or omitted, read until EOF is reached.
197
+ Returns b"" if the file is already at EOF.
198
+ """
199
+ self._check_can_read()
200
+ return self._buffer.read(size)
201
+
202
+ def read1(self, size=-1):
203
+ """Read up to size uncompressed bytes, while trying to avoid
204
+ making multiple reads from the underlying stream. Reads up to a
205
+ buffer's worth of data if size is negative.
206
+
207
+ Returns b"" if the file is at EOF.
208
+ """
209
+ self._check_can_read()
210
+ if size < 0:
211
+ size = io.DEFAULT_BUFFER_SIZE
212
+ return self._buffer.read1(size)
213
+
214
+ def readline(self, size=-1):
215
+ """Read a line of uncompressed bytes from the file.
216
+
217
+ The terminating newline (if present) is retained. If size is
218
+ non-negative, no more than size bytes will be read (in which
219
+ case the line may be incomplete). Returns b'' if already at EOF.
220
+ """
221
+ self._check_can_read()
222
+ return self._buffer.readline(size)
223
+
224
+ def write(self, data):
225
+ """Write a bytes object to the file.
226
+
227
+ Returns the number of uncompressed bytes written, which is
228
+ always the length of data in bytes. Note that due to buffering,
229
+ the file on disk may not reflect the data written until close()
230
+ is called.
231
+ """
232
+ self._check_can_write()
233
+ if isinstance(data, (bytes, bytearray)):
234
+ length = len(data)
235
+ else:
236
+ # accept any data that supports the buffer protocol
237
+ data = memoryview(data)
238
+ length = data.nbytes
239
+
240
+ compressed = self._compressor.compress(data)
241
+ self._fp.write(compressed)
242
+ self._pos += length
243
+ return length
244
+
245
+ def seek(self, offset, whence=io.SEEK_SET):
246
+ """Change the file position.
247
+
248
+ The new position is specified by offset, relative to the
249
+ position indicated by whence. Possible values for whence are:
250
+
251
+ 0: start of stream (default): offset must not be negative
252
+ 1: current stream position
253
+ 2: end of stream; offset must not be positive
254
+
255
+ Returns the new file position.
256
+
257
+ Note that seeking is emulated, so depending on the parameters,
258
+ this operation may be extremely slow.
259
+ """
260
+ self._check_can_seek()
261
+ return self._buffer.seek(offset, whence)
262
+
263
+ def tell(self):
264
+ """Return the current file position."""
265
+ self._check_not_closed()
266
+ if self._mode == _MODE_READ:
267
+ return self._buffer.tell()
268
+ return self._pos
269
+
270
+
271
+ def open(filename, mode="rb", *,
272
+ format=None, check=-1, preset=None, filters=None,
273
+ encoding=None, errors=None, newline=None):
274
+ """Open an LZMA-compressed file in binary or text mode.
275
+
276
+ filename can be either an actual file name (given as a str, bytes,
277
+ or PathLike object), in which case the named file is opened, or it
278
+ can be an existing file object to read from or write to.
279
+
280
+ The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
281
+ "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
282
+ mode.
283
+
284
+ The format, check, preset and filters arguments specify the
285
+ compression settings, as for LZMACompressor, LZMADecompressor and
286
+ LZMAFile.
287
+
288
+ For binary mode, this function is equivalent to the LZMAFile
289
+ constructor: LZMAFile(filename, mode, ...). In this case, the
290
+ encoding, errors and newline arguments must not be provided.
291
+
292
+ For text mode, an LZMAFile object is created, and wrapped in an
293
+ io.TextIOWrapper instance with the specified encoding, error
294
+ handling behavior, and line ending(s).
295
+
296
+ """
297
+ if "t" in mode:
298
+ if "b" in mode:
299
+ raise ValueError("Invalid mode: %r" % (mode,))
300
+ else:
301
+ if encoding is not None:
302
+ raise ValueError("Argument 'encoding' not supported in binary mode")
303
+ if errors is not None:
304
+ raise ValueError("Argument 'errors' not supported in binary mode")
305
+ if newline is not None:
306
+ raise ValueError("Argument 'newline' not supported in binary mode")
307
+
308
+ lz_mode = mode.replace("t", "")
309
+ binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
310
+ preset=preset, filters=filters)
311
+
312
+ if "t" in mode:
313
+ encoding = io.text_encoding(encoding)
314
+ return io.TextIOWrapper(binary_file, encoding, errors, newline)
315
+ else:
316
+ return binary_file
317
+
318
+
319
+ def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
320
+ """Compress a block of data.
321
+
322
+ Refer to LZMACompressor's docstring for a description of the
323
+ optional arguments *format*, *check*, *preset* and *filters*.
324
+
325
+ For incremental compression, use an LZMACompressor instead.
326
+ """
327
+ comp = LZMACompressor(format, check, preset, filters)
328
+ return comp.compress(data) + comp.flush()
329
+
330
+
331
+ def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
332
+ """Decompress a block of data.
333
+
334
+ Refer to LZMADecompressor's docstring for a description of the
335
+ optional arguments *format*, *check* and *filters*.
336
+
337
+ For incremental decompression, use an LZMADecompressor instead.
338
+ """
339
+ results = []
340
+ while True:
341
+ decomp = LZMADecompressor(format, memlimit, filters)
342
+ try:
343
+ res = decomp.decompress(data)
344
+ except LZMAError:
345
+ if results:
346
+ break # Leftover data is not a valid LZMA/XZ stream; ignore it.
347
+ else:
348
+ raise # Error on the first iteration; bail out.
349
+ results.append(res)
350
+ if not decomp.eof:
351
+ raise LZMAError("Compressed data ended before the "
352
+ "end-of-stream marker was reached")
353
+ data = decomp.unused_data
354
+ if not data:
355
+ break
356
+ return b"".join(results)