Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +4 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/actor_manager.py +916 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/annotations.py +213 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/filter.py +420 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/framework.py +352 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/minibatch_utils.py +337 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/policy.py +303 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__init__.py +44 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__pycache__/multi_agent_prioritized_episode_buffer.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__pycache__/simple_replay_buffer.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/fifo_replay_buffer.py +109 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_episode_buffer.py +1026 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_prioritized_episode_buffer.py +923 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_replay_buffer.py +392 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/prioritized_replay_buffer.py +240 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/replay_buffer.py +374 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/reservoir_replay_buffer.py +132 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/simple_replay_buffer.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/utils.py +440 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/sgd.py +136 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py +65 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py +1817 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/tf_utils.py +812 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/threading.py +34 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py +745 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/typing.py +310 -0
- janus/lib/libasan.so.6.0.0 +3 -0
- janus/lib/libform.so.6 +0 -0
- janus/lib/libgcc_s.so.1 +3 -0
- janus/lib/libhistory.so.8.2 +0 -0
- janus/lib/libpanelw.so.6 +0 -0
- janus/lib/libssl.so +3 -0
- janus/lib/libstdc++.so.6.0.29 +3 -0
- janus/lib/libuuid.so.1 +0 -0
- janus/lib/tcl8.6/encoding/cns11643.enc +1584 -0
- janus/lib/tcl8.6/encoding/cp1250.enc +20 -0
- janus/lib/tcl8.6/encoding/cp1251.enc +20 -0
- janus/lib/tcl8.6/encoding/cp1252.enc +20 -0
- janus/lib/tcl8.6/encoding/cp1254.enc +20 -0
- janus/lib/tcl8.6/encoding/cp1255.enc +20 -0
- janus/lib/tcl8.6/encoding/cp1257.enc +20 -0
- janus/lib/tcl8.6/encoding/cp737.enc +20 -0
- janus/lib/tcl8.6/encoding/cp775.enc +20 -0
- janus/lib/tcl8.6/encoding/cp861.enc +20 -0
- janus/lib/tcl8.6/encoding/cp866.enc +20 -0
- janus/lib/tcl8.6/encoding/cp936.enc +0 -0
- janus/lib/tcl8.6/encoding/dingbats.enc +20 -0
- janus/lib/tcl8.6/encoding/euc-cn.enc +1397 -0
- janus/lib/tcl8.6/encoding/gb1988.enc +20 -0
- janus/lib/tcl8.6/encoding/iso2022-kr.enc +7 -0
.gitattributes
CHANGED
|
@@ -1527,3 +1527,7 @@ janus/lib/liblsan.so filter=lfs diff=lfs merge=lfs -text
|
|
| 1527 |
janus/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text
|
| 1528 |
janus/lib/libtsan.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1529 |
janus/lib/libtsan.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1527 |
janus/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text
|
| 1528 |
janus/lib/libtsan.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1529 |
janus/lib/libtsan.so filter=lfs diff=lfs merge=lfs -text
|
| 1530 |
+
janus/lib/libstdc++.so.6.0.29 filter=lfs diff=lfs merge=lfs -text
|
| 1531 |
+
janus/lib/libasan.so.6.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1532 |
+
janus/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
|
| 1533 |
+
janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/actor_manager.py
ADDED
|
@@ -0,0 +1,916 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
import copy
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
import logging
|
| 5 |
+
import sys
|
| 6 |
+
import time
|
| 7 |
+
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import ray
|
| 10 |
+
from ray.actor import ActorHandle
|
| 11 |
+
from ray.exceptions import RayError, RayTaskError
|
| 12 |
+
from ray.rllib.utils.typing import T
|
| 13 |
+
from ray.util.annotations import DeveloperAPI
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@DeveloperAPI
|
| 20 |
+
class ResultOrError:
|
| 21 |
+
"""A wrapper around a result or a RayError thrown during remote task/actor calls.
|
| 22 |
+
|
| 23 |
+
This is used to return data from `FaultTolerantActorManager` that allows us to
|
| 24 |
+
distinguish between RayErrors (remote actor related) and valid results.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, result: Any = None, error: Exception = None):
|
| 28 |
+
"""One and only one of result or error should be set.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
result: The result of the computation. Note that None is a valid result if
|
| 32 |
+
the remote function does not return anything.
|
| 33 |
+
error: Alternatively, the error that occurred during the computation.
|
| 34 |
+
"""
|
| 35 |
+
self._result = result
|
| 36 |
+
self._error = (
|
| 37 |
+
# Easier to handle if we show the user the original error.
|
| 38 |
+
error.as_instanceof_cause()
|
| 39 |
+
if isinstance(error, RayTaskError)
|
| 40 |
+
else error
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def ok(self):
|
| 45 |
+
return self._error is None
|
| 46 |
+
|
| 47 |
+
def get(self):
|
| 48 |
+
"""Returns the result or the error."""
|
| 49 |
+
if self._error:
|
| 50 |
+
return self._error
|
| 51 |
+
else:
|
| 52 |
+
return self._result
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@DeveloperAPI
|
| 56 |
+
@dataclass
|
| 57 |
+
class CallResult:
|
| 58 |
+
"""Represents a single result from a call to an actor.
|
| 59 |
+
|
| 60 |
+
Each CallResult contains the index of the actor that was called
|
| 61 |
+
plus the result or error from the call.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
actor_id: int
|
| 65 |
+
result_or_error: ResultOrError
|
| 66 |
+
tag: str
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def ok(self):
|
| 70 |
+
"""Passes through the ok property from the result_or_error."""
|
| 71 |
+
return self.result_or_error.ok
|
| 72 |
+
|
| 73 |
+
def get(self):
|
| 74 |
+
"""Passes through the get method from the result_or_error."""
|
| 75 |
+
return self.result_or_error.get()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@DeveloperAPI
|
| 79 |
+
class RemoteCallResults:
|
| 80 |
+
"""Represents a list of results from calls to a set of actors.
|
| 81 |
+
|
| 82 |
+
CallResults provides convenient APIs to iterate over the results
|
| 83 |
+
while skipping errors, etc.
|
| 84 |
+
|
| 85 |
+
.. testcode::
|
| 86 |
+
:skipif: True
|
| 87 |
+
|
| 88 |
+
manager = FaultTolerantActorManager(
|
| 89 |
+
actors, max_remote_requests_in_flight_per_actor=2,
|
| 90 |
+
)
|
| 91 |
+
results = manager.foreach_actor(lambda w: w.call())
|
| 92 |
+
|
| 93 |
+
# Iterate over all results ignoring errors.
|
| 94 |
+
for result in results.ignore_errors():
|
| 95 |
+
print(result.get())
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
class _Iterator:
|
| 99 |
+
"""An iterator over the results of a remote call."""
|
| 100 |
+
|
| 101 |
+
def __init__(self, call_results: List[CallResult]):
|
| 102 |
+
self._call_results = call_results
|
| 103 |
+
|
| 104 |
+
def __iter__(self) -> Iterator[CallResult]:
|
| 105 |
+
return self
|
| 106 |
+
|
| 107 |
+
def __next__(self) -> CallResult:
|
| 108 |
+
if not self._call_results:
|
| 109 |
+
raise StopIteration
|
| 110 |
+
return self._call_results.pop(0)
|
| 111 |
+
|
| 112 |
+
def __init__(self):
|
| 113 |
+
self.result_or_errors: List[CallResult] = []
|
| 114 |
+
|
| 115 |
+
def add_result(self, actor_id: int, result_or_error: ResultOrError, tag: str):
|
| 116 |
+
"""Add index of a remote actor plus the call result to the list.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
actor_id: ID of the remote actor.
|
| 120 |
+
result_or_error: The result or error from the call.
|
| 121 |
+
tag: A description to identify the call.
|
| 122 |
+
"""
|
| 123 |
+
self.result_or_errors.append(CallResult(actor_id, result_or_error, tag))
|
| 124 |
+
|
| 125 |
+
def __iter__(self) -> Iterator[ResultOrError]:
|
| 126 |
+
"""Return an iterator over the results."""
|
| 127 |
+
# Shallow copy the list.
|
| 128 |
+
return self._Iterator(copy.copy(self.result_or_errors))
|
| 129 |
+
|
| 130 |
+
def __len__(self) -> int:
|
| 131 |
+
return len(self.result_or_errors)
|
| 132 |
+
|
| 133 |
+
def ignore_errors(self) -> Iterator[ResultOrError]:
|
| 134 |
+
"""Return an iterator over the results, skipping all errors."""
|
| 135 |
+
return self._Iterator([r for r in self.result_or_errors if r.ok])
|
| 136 |
+
|
| 137 |
+
def ignore_ray_errors(self) -> Iterator[ResultOrError]:
|
| 138 |
+
"""Return an iterator over the results, skipping only Ray errors.
|
| 139 |
+
|
| 140 |
+
Similar to ignore_errors, but only skips Errors raised because of
|
| 141 |
+
remote actor problems (often get restored automatcially).
|
| 142 |
+
This is useful for callers that want to handle application errors differently
|
| 143 |
+
from Ray errors.
|
| 144 |
+
"""
|
| 145 |
+
return self._Iterator(
|
| 146 |
+
[r for r in self.result_or_errors if not isinstance(r.get(), RayError)]
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@DeveloperAPI
|
| 151 |
+
class FaultAwareApply:
|
| 152 |
+
@DeveloperAPI
|
| 153 |
+
def ping(self) -> str:
|
| 154 |
+
"""Ping the actor. Can be used as a health check.
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
"pong" if actor is up and well.
|
| 158 |
+
"""
|
| 159 |
+
return "pong"
|
| 160 |
+
|
| 161 |
+
@DeveloperAPI
|
| 162 |
+
def apply(
|
| 163 |
+
self,
|
| 164 |
+
func: Callable[[Any, Optional[Any], Optional[Any]], T],
|
| 165 |
+
*args,
|
| 166 |
+
**kwargs,
|
| 167 |
+
) -> T:
|
| 168 |
+
"""Calls the given function with this Actor instance.
|
| 169 |
+
|
| 170 |
+
A generic interface for applying arbitrary member functions on a
|
| 171 |
+
remote actor.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
func: The function to call, with this actor as first
|
| 175 |
+
argument, followed by args, and kwargs.
|
| 176 |
+
args: Optional additional args to pass to the function call.
|
| 177 |
+
kwargs: Optional additional kwargs to pass to the function call.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
The return value of the function call.
|
| 181 |
+
"""
|
| 182 |
+
try:
|
| 183 |
+
return func(self, *args, **kwargs)
|
| 184 |
+
except Exception as e:
|
| 185 |
+
# Actor should be recreated by Ray.
|
| 186 |
+
if self.config.restart_failed_env_runners:
|
| 187 |
+
logger.exception(f"Worker exception caught during `apply()`: {e}")
|
| 188 |
+
# Small delay to allow logs messages to propagate.
|
| 189 |
+
time.sleep(self.config.delay_between_env_runner_restarts_s)
|
| 190 |
+
# Kill this worker so Ray Core can restart it.
|
| 191 |
+
sys.exit(1)
|
| 192 |
+
# Actor should be left dead.
|
| 193 |
+
else:
|
| 194 |
+
raise e
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@DeveloperAPI
|
| 198 |
+
class FaultTolerantActorManager:
|
| 199 |
+
"""A manager that is aware of the healthiness of remote actors.
|
| 200 |
+
|
| 201 |
+
.. testcode::
|
| 202 |
+
|
| 203 |
+
import time
|
| 204 |
+
import ray
|
| 205 |
+
from ray.rllib.utils.actor_manager import FaultTolerantActorManager
|
| 206 |
+
|
| 207 |
+
@ray.remote
|
| 208 |
+
class MyActor:
|
| 209 |
+
def apply(self, fn):
|
| 210 |
+
return fn(self)
|
| 211 |
+
|
| 212 |
+
def do_something(self):
|
| 213 |
+
return True
|
| 214 |
+
|
| 215 |
+
actors = [MyActor.remote() for _ in range(3)]
|
| 216 |
+
manager = FaultTolerantActorManager(
|
| 217 |
+
actors, max_remote_requests_in_flight_per_actor=2,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Synchronous remote calls.
|
| 221 |
+
results = manager.foreach_actor(lambda actor: actor.do_something())
|
| 222 |
+
# Print results ignoring returned errors.
|
| 223 |
+
print([r.get() for r in results.ignore_errors()])
|
| 224 |
+
|
| 225 |
+
# Asynchronous remote calls.
|
| 226 |
+
manager.foreach_actor_async(lambda actor: actor.do_something())
|
| 227 |
+
time.sleep(2) # Wait for the tasks to finish.
|
| 228 |
+
for r in manager.fetch_ready_async_reqs():
|
| 229 |
+
# Handle result and errors.
|
| 230 |
+
if r.ok:
|
| 231 |
+
print(r.get())
|
| 232 |
+
else:
|
| 233 |
+
print("Error: {}".format(r.get()))
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
@dataclass
|
| 237 |
+
class _ActorState:
|
| 238 |
+
"""State of a single actor."""
|
| 239 |
+
|
| 240 |
+
# Num of outstanding async requests for this actor.
|
| 241 |
+
num_in_flight_async_requests: int = 0
|
| 242 |
+
# Whether this actor is in a healthy state.
|
| 243 |
+
is_healthy: bool = True
|
| 244 |
+
|
| 245 |
+
def __init__(
|
| 246 |
+
self,
|
| 247 |
+
actors: Optional[List[ActorHandle]] = None,
|
| 248 |
+
max_remote_requests_in_flight_per_actor: int = 2,
|
| 249 |
+
init_id: int = 0,
|
| 250 |
+
):
|
| 251 |
+
"""Construct a FaultTolerantActorManager.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
actors: A list of ray remote actors to manage on. These actors must have an
|
| 255 |
+
``apply`` method which takes a function with only one parameter (the
|
| 256 |
+
actor instance itself).
|
| 257 |
+
max_remote_requests_in_flight_per_actor: The maximum number of remote
|
| 258 |
+
requests that can be in flight per actor. Any requests made to the pool
|
| 259 |
+
that cannot be scheduled because the limit has been reached will be
|
| 260 |
+
dropped. This only applies to the asynchronous remote call mode.
|
| 261 |
+
init_id: The initial ID to use for the next remote actor. Default is 0.
|
| 262 |
+
"""
|
| 263 |
+
# For historic reasons, just start remote worker ID from 1, so they never
|
| 264 |
+
# collide with local worker ID (0).
|
| 265 |
+
self._next_id = init_id
|
| 266 |
+
|
| 267 |
+
# Actors are stored in a map and indexed by a unique (int) ID.
|
| 268 |
+
self._actors: Dict[int, ActorHandle] = {}
|
| 269 |
+
self._remote_actor_states: Dict[int, self._ActorState] = {}
|
| 270 |
+
self._restored_actors = set()
|
| 271 |
+
self.add_actors(actors or [])
|
| 272 |
+
|
| 273 |
+
# Maps outstanding async requests to the IDs of the actor IDs that
|
| 274 |
+
# are executing them.
|
| 275 |
+
self._in_flight_req_to_actor_id: Dict[ray.ObjectRef, int] = {}
|
| 276 |
+
|
| 277 |
+
self._max_remote_requests_in_flight_per_actor = (
|
| 278 |
+
max_remote_requests_in_flight_per_actor
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
# Useful metric.
|
| 282 |
+
self._num_actor_restarts = 0
|
| 283 |
+
|
| 284 |
+
@DeveloperAPI
|
| 285 |
+
def actor_ids(self) -> List[int]:
|
| 286 |
+
"""Returns a list of all worker IDs (healthy or not)."""
|
| 287 |
+
return list(self._actors.keys())
|
| 288 |
+
|
| 289 |
+
@DeveloperAPI
|
| 290 |
+
def healthy_actor_ids(self) -> List[int]:
|
| 291 |
+
"""Returns a list of worker IDs that are healthy."""
|
| 292 |
+
return [k for k, v in self._remote_actor_states.items() if v.is_healthy]
|
| 293 |
+
|
| 294 |
+
@DeveloperAPI
|
| 295 |
+
def add_actors(self, actors: List[ActorHandle]):
|
| 296 |
+
"""Add a list of actors to the pool.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
actors: A list of ray remote actors to be added to the pool.
|
| 300 |
+
"""
|
| 301 |
+
for actor in actors:
|
| 302 |
+
self._actors[self._next_id] = actor
|
| 303 |
+
self._remote_actor_states[self._next_id] = self._ActorState()
|
| 304 |
+
self._next_id += 1
|
| 305 |
+
|
| 306 |
+
@DeveloperAPI
|
| 307 |
+
def remove_actor(self, actor_id: int) -> ActorHandle:
|
| 308 |
+
"""Remove an actor from the pool.
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
actor_id: ID of the actor to remove.
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
Handle to the actor that was removed.
|
| 315 |
+
"""
|
| 316 |
+
actor = self._actors[actor_id]
|
| 317 |
+
|
| 318 |
+
# Remove the actor from the pool.
|
| 319 |
+
del self._actors[actor_id]
|
| 320 |
+
del self._remote_actor_states[actor_id]
|
| 321 |
+
self._restored_actors.discard(actor_id)
|
| 322 |
+
self._remove_async_state(actor_id)
|
| 323 |
+
|
| 324 |
+
return actor
|
| 325 |
+
|
| 326 |
+
@DeveloperAPI
|
| 327 |
+
def num_actors(self) -> int:
|
| 328 |
+
"""Return the total number of actors in the pool."""
|
| 329 |
+
return len(self._actors)
|
| 330 |
+
|
| 331 |
+
@DeveloperAPI
|
| 332 |
+
def num_healthy_actors(self) -> int:
|
| 333 |
+
"""Return the number of healthy remote actors."""
|
| 334 |
+
return sum(s.is_healthy for s in self._remote_actor_states.values())
|
| 335 |
+
|
| 336 |
+
@DeveloperAPI
|
| 337 |
+
def total_num_restarts(self) -> int:
|
| 338 |
+
"""Return the number of remote actors that have been restarted."""
|
| 339 |
+
return self._num_actor_restarts
|
| 340 |
+
|
| 341 |
+
@DeveloperAPI
|
| 342 |
+
def num_outstanding_async_reqs(self) -> int:
|
| 343 |
+
"""Return the number of outstanding async requests."""
|
| 344 |
+
return len(self._in_flight_req_to_actor_id)
|
| 345 |
+
|
| 346 |
+
@DeveloperAPI
|
| 347 |
+
def is_actor_healthy(self, actor_id: int) -> bool:
|
| 348 |
+
"""Whether a remote actor is in healthy state.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
actor_id: ID of the remote actor.
|
| 352 |
+
|
| 353 |
+
Returns:
|
| 354 |
+
True if the actor is healthy, False otherwise.
|
| 355 |
+
"""
|
| 356 |
+
if actor_id not in self._remote_actor_states:
|
| 357 |
+
raise ValueError(f"Unknown actor id: {actor_id}")
|
| 358 |
+
return self._remote_actor_states[actor_id].is_healthy
|
| 359 |
+
|
| 360 |
+
@DeveloperAPI
|
| 361 |
+
def set_actor_state(self, actor_id: int, healthy: bool) -> None:
|
| 362 |
+
"""Update activate state for a specific remote actor.
|
| 363 |
+
|
| 364 |
+
Args:
|
| 365 |
+
actor_id: ID of the remote actor.
|
| 366 |
+
healthy: Whether the remote actor is healthy.
|
| 367 |
+
"""
|
| 368 |
+
if actor_id not in self._remote_actor_states:
|
| 369 |
+
raise ValueError(f"Unknown actor id: {actor_id}")
|
| 370 |
+
|
| 371 |
+
was_healthy = self._remote_actor_states[actor_id].is_healthy
|
| 372 |
+
# Set from unhealthy to healthy -> Add to restored set.
|
| 373 |
+
if not was_healthy and healthy:
|
| 374 |
+
self._restored_actors.add(actor_id)
|
| 375 |
+
# Set from healthy to unhealthy -> Remove from restored set.
|
| 376 |
+
elif was_healthy and not healthy:
|
| 377 |
+
self._restored_actors.discard(actor_id)
|
| 378 |
+
|
| 379 |
+
self._remote_actor_states[actor_id].is_healthy = healthy
|
| 380 |
+
|
| 381 |
+
if not healthy:
|
| 382 |
+
# Remove any async states.
|
| 383 |
+
self._remove_async_state(actor_id)
|
| 384 |
+
|
| 385 |
+
@DeveloperAPI
|
| 386 |
+
def clear(self):
|
| 387 |
+
"""Clean up managed actors."""
|
| 388 |
+
for actor in self._actors.values():
|
| 389 |
+
ray.kill(actor)
|
| 390 |
+
self._actors.clear()
|
| 391 |
+
self._remote_actor_states.clear()
|
| 392 |
+
self._restored_actors.clear()
|
| 393 |
+
self._in_flight_req_to_actor_id.clear()
|
| 394 |
+
|
| 395 |
+
@DeveloperAPI
|
| 396 |
+
def foreach_actor(
|
| 397 |
+
self,
|
| 398 |
+
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]],
|
| 399 |
+
*,
|
| 400 |
+
healthy_only: bool = True,
|
| 401 |
+
remote_actor_ids: Optional[List[int]] = None,
|
| 402 |
+
timeout_seconds: Optional[float] = None,
|
| 403 |
+
return_obj_refs: bool = False,
|
| 404 |
+
mark_healthy: bool = False,
|
| 405 |
+
) -> RemoteCallResults:
|
| 406 |
+
"""Calls the given function with each actor instance as arg.
|
| 407 |
+
|
| 408 |
+
Automatically marks actors unhealthy if they crash during the remote call.
|
| 409 |
+
|
| 410 |
+
Args:
|
| 411 |
+
func: A single, or a list of Callables, that get applied on the list
|
| 412 |
+
of specified remote actors.
|
| 413 |
+
healthy_only: If True, applies `func` only to actors currently tagged
|
| 414 |
+
"healthy", otherwise to all actors. If `healthy_only=False` and
|
| 415 |
+
`mark_healthy=True`, will send `func` to all actors and mark those
|
| 416 |
+
actors "healthy" that respond to the request within `timeout_seconds`
|
| 417 |
+
and are currently tagged as "unhealthy".
|
| 418 |
+
remote_actor_ids: Apply func on a selected set of remote actors. Use None
|
| 419 |
+
(default) for all actors.
|
| 420 |
+
timeout_seconds: Time to wait (in seconds) for results. Set this to 0.0 for
|
| 421 |
+
fire-and-forget. Set this to None (default) to wait infinitely (i.e. for
|
| 422 |
+
synchronous execution).
|
| 423 |
+
return_obj_refs: whether to return ObjectRef instead of actual results.
|
| 424 |
+
Note, for fault tolerance reasons, these returned ObjectRefs should
|
| 425 |
+
never be resolved with ray.get() outside of the context of this manager.
|
| 426 |
+
mark_healthy: Whether to mark all those actors healthy again that are
|
| 427 |
+
currently marked unhealthy AND that returned results from the remote
|
| 428 |
+
call (within the given `timeout_seconds`).
|
| 429 |
+
Note that actors are NOT set unhealthy, if they simply time out
|
| 430 |
+
(only if they return a RayActorError).
|
| 431 |
+
Also not that this setting is ignored if `healthy_only=True` (b/c this
|
| 432 |
+
setting only affects actors that are currently tagged as unhealthy).
|
| 433 |
+
|
| 434 |
+
Returns:
|
| 435 |
+
The list of return values of all calls to `func(actor)`. The values may be
|
| 436 |
+
actual data returned or exceptions raised during the remote call in the
|
| 437 |
+
format of RemoteCallResults.
|
| 438 |
+
"""
|
| 439 |
+
remote_actor_ids = remote_actor_ids or self.actor_ids()
|
| 440 |
+
if healthy_only:
|
| 441 |
+
func, remote_actor_ids = self._filter_func_and_remote_actor_id_by_state(
|
| 442 |
+
func, remote_actor_ids
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# Send out remote requests.
|
| 446 |
+
remote_calls = self._call_actors(
|
| 447 |
+
func=func,
|
| 448 |
+
remote_actor_ids=remote_actor_ids,
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
# Collect remote request results (if available given timeout and/or errors).
|
| 452 |
+
_, remote_results = self._fetch_result(
|
| 453 |
+
remote_actor_ids=remote_actor_ids,
|
| 454 |
+
remote_calls=remote_calls,
|
| 455 |
+
tags=[None] * len(remote_calls),
|
| 456 |
+
timeout_seconds=timeout_seconds,
|
| 457 |
+
return_obj_refs=return_obj_refs,
|
| 458 |
+
mark_healthy=mark_healthy,
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
return remote_results
|
| 462 |
+
|
| 463 |
+
@DeveloperAPI
|
| 464 |
+
def foreach_actor_async(
|
| 465 |
+
self,
|
| 466 |
+
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]],
|
| 467 |
+
tag: str = None,
|
| 468 |
+
*,
|
| 469 |
+
healthy_only: bool = True,
|
| 470 |
+
remote_actor_ids: List[int] = None,
|
| 471 |
+
) -> int:
|
| 472 |
+
"""Calls given functions against each actors without waiting for results.
|
| 473 |
+
|
| 474 |
+
Args:
|
| 475 |
+
func: A single Callable applied to all specified remote actors or a list
|
| 476 |
+
of Callables, that get applied on the list of specified remote actors.
|
| 477 |
+
In the latter case, both list of Callables and list of specified actors
|
| 478 |
+
must have the same length.
|
| 479 |
+
tag: A tag to identify the results from this async call.
|
| 480 |
+
healthy_only: If True, applies `func` only to actors currently tagged
|
| 481 |
+
"healthy", otherwise to all actors. If `healthy_only=False` and
|
| 482 |
+
later, `self.fetch_ready_async_reqs()` is called with
|
| 483 |
+
`mark_healthy=True`, will send `func` to all actors and mark those
|
| 484 |
+
actors "healthy" that respond to the request within `timeout_seconds`
|
| 485 |
+
and are currently tagged as "unhealthy".
|
| 486 |
+
remote_actor_ids: Apply func on a selected set of remote actors.
|
| 487 |
+
Note, for fault tolerance reasons, these returned ObjectRefs should
|
| 488 |
+
never be resolved with ray.get() outside of the context of this manager.
|
| 489 |
+
|
| 490 |
+
Returns:
|
| 491 |
+
The number of async requests that are actually fired.
|
| 492 |
+
"""
|
| 493 |
+
# TODO(avnishn, jungong): so thinking about this a bit more, it would be the
|
| 494 |
+
# best if we can attach multiple tags to an async all, like basically this
|
| 495 |
+
# parameter should be tags:
|
| 496 |
+
# For sync calls, tags would be ().
|
| 497 |
+
# For async call users, they can attached multiple tags for a single call, like
|
| 498 |
+
# ("rollout_worker", "sync_weight").
|
| 499 |
+
# For async fetch result, we can also specify a single, or list of tags. For
|
| 500 |
+
# example, ("eval", "sample") will fetch all the sample() calls on eval
|
| 501 |
+
# workers.
|
| 502 |
+
remote_actor_ids = remote_actor_ids or self.actor_ids()
|
| 503 |
+
|
| 504 |
+
if healthy_only:
|
| 505 |
+
func, remote_actor_ids = self._filter_func_and_remote_actor_id_by_state(
|
| 506 |
+
func, remote_actor_ids
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
if isinstance(func, list) and len(func) != len(remote_actor_ids):
|
| 510 |
+
raise ValueError(
|
| 511 |
+
f"The number of functions specified {len(func)} must match "
|
| 512 |
+
f"the number of remote actor indices {len(remote_actor_ids)}."
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
num_calls_to_make: Dict[int, int] = defaultdict(lambda: 0)
|
| 516 |
+
# Drop calls to actors that are too busy.
|
| 517 |
+
if isinstance(func, list):
|
| 518 |
+
limited_func = []
|
| 519 |
+
limited_remote_actor_ids = []
|
| 520 |
+
for i, f in zip(remote_actor_ids, func):
|
| 521 |
+
num_outstanding_reqs = self._remote_actor_states[
|
| 522 |
+
i
|
| 523 |
+
].num_in_flight_async_requests
|
| 524 |
+
if (
|
| 525 |
+
num_outstanding_reqs + num_calls_to_make[i]
|
| 526 |
+
< self._max_remote_requests_in_flight_per_actor
|
| 527 |
+
):
|
| 528 |
+
num_calls_to_make[i] += 1
|
| 529 |
+
limited_func.append(f)
|
| 530 |
+
limited_remote_actor_ids.append(i)
|
| 531 |
+
else:
|
| 532 |
+
limited_func = func
|
| 533 |
+
limited_remote_actor_ids = []
|
| 534 |
+
for i in remote_actor_ids:
|
| 535 |
+
num_outstanding_reqs = self._remote_actor_states[
|
| 536 |
+
i
|
| 537 |
+
].num_in_flight_async_requests
|
| 538 |
+
if (
|
| 539 |
+
num_outstanding_reqs + num_calls_to_make[i]
|
| 540 |
+
< self._max_remote_requests_in_flight_per_actor
|
| 541 |
+
):
|
| 542 |
+
num_calls_to_make[i] += 1
|
| 543 |
+
limited_remote_actor_ids.append(i)
|
| 544 |
+
|
| 545 |
+
remote_calls = self._call_actors(
|
| 546 |
+
func=limited_func,
|
| 547 |
+
remote_actor_ids=limited_remote_actor_ids,
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
# Save these as outstanding requests.
|
| 551 |
+
for id, call in zip(limited_remote_actor_ids, remote_calls):
|
| 552 |
+
self._remote_actor_states[id].num_in_flight_async_requests += 1
|
| 553 |
+
self._in_flight_req_to_actor_id[call] = (tag, id)
|
| 554 |
+
|
| 555 |
+
return len(remote_calls)
|
| 556 |
+
|
| 557 |
+
@DeveloperAPI
|
| 558 |
+
def fetch_ready_async_reqs(
|
| 559 |
+
self,
|
| 560 |
+
*,
|
| 561 |
+
tags: Union[str, List[str], Tuple[str]] = (),
|
| 562 |
+
timeout_seconds: Optional[float] = 0.0,
|
| 563 |
+
return_obj_refs: bool = False,
|
| 564 |
+
mark_healthy: bool = False,
|
| 565 |
+
) -> RemoteCallResults:
|
| 566 |
+
"""Get results from outstanding async requests that are ready.
|
| 567 |
+
|
| 568 |
+
Automatically mark actors unhealthy if they fail to respond.
|
| 569 |
+
|
| 570 |
+
Note: If tags is an empty tuple then results from all ready async requests are
|
| 571 |
+
returned.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
timeout_seconds: ray.get() timeout. Default is 0, which only fetched those
|
| 575 |
+
results (immediately) that are already ready.
|
| 576 |
+
tags: A tag or a list of tags to identify the results from this async call.
|
| 577 |
+
return_obj_refs: Whether to return ObjectRef instead of actual results.
|
| 578 |
+
mark_healthy: Whether to mark all those actors healthy again that are
|
| 579 |
+
currently marked unhealthy AND that returned results from the remote
|
| 580 |
+
call (within the given `timeout_seconds`).
|
| 581 |
+
Note that actors are NOT set to unhealthy, if they simply time out,
|
| 582 |
+
meaning take a longer time to fulfil the remote request. We only ever
|
| 583 |
+
mark an actor unhealthy, if they raise a RayActorError inside the remote
|
| 584 |
+
request.
|
| 585 |
+
Also note that this settings is ignored if the preceding
|
| 586 |
+
`foreach_actor_async()` call used the `healthy_only=True` argument (b/c
|
| 587 |
+
`mark_healthy` only affects actors that are currently tagged as
|
| 588 |
+
unhealthy).
|
| 589 |
+
|
| 590 |
+
Returns:
|
| 591 |
+
A list of return values of all calls to `func(actor)` that are ready.
|
| 592 |
+
The values may be actual data returned or exceptions raised during the
|
| 593 |
+
remote call in the format of RemoteCallResults.
|
| 594 |
+
"""
|
| 595 |
+
# Construct the list of in-flight requests filtered by tag.
|
| 596 |
+
remote_calls, remote_actor_ids, valid_tags = self._filter_calls_by_tag(tags)
|
| 597 |
+
ready, remote_results = self._fetch_result(
|
| 598 |
+
remote_actor_ids=remote_actor_ids,
|
| 599 |
+
remote_calls=remote_calls,
|
| 600 |
+
tags=valid_tags,
|
| 601 |
+
timeout_seconds=timeout_seconds,
|
| 602 |
+
return_obj_refs=return_obj_refs,
|
| 603 |
+
mark_healthy=mark_healthy,
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
for obj_ref, result in zip(ready, remote_results):
|
| 607 |
+
# Decrease outstanding request on this actor by 1.
|
| 608 |
+
self._remote_actor_states[result.actor_id].num_in_flight_async_requests -= 1
|
| 609 |
+
# Also, remove this call here from the in-flight list,
|
| 610 |
+
# obj_refs may have already been removed when we disable an actor.
|
| 611 |
+
if obj_ref in self._in_flight_req_to_actor_id:
|
| 612 |
+
del self._in_flight_req_to_actor_id[obj_ref]
|
| 613 |
+
|
| 614 |
+
return remote_results
|
| 615 |
+
|
| 616 |
+
@staticmethod
|
| 617 |
+
def handle_remote_call_result_errors(
|
| 618 |
+
results_or_errors: RemoteCallResults,
|
| 619 |
+
*,
|
| 620 |
+
ignore_ray_errors: bool,
|
| 621 |
+
) -> None:
|
| 622 |
+
"""Checks given results for application errors and raises them if necessary.
|
| 623 |
+
|
| 624 |
+
Args:
|
| 625 |
+
results_or_errors: The results or errors to check.
|
| 626 |
+
ignore_ray_errors: Whether to ignore RayErrors within the elements of
|
| 627 |
+
`results_or_errors`.
|
| 628 |
+
"""
|
| 629 |
+
for result_or_error in results_or_errors:
|
| 630 |
+
# Good result.
|
| 631 |
+
if result_or_error.ok:
|
| 632 |
+
continue
|
| 633 |
+
# RayError, but we ignore it.
|
| 634 |
+
elif ignore_ray_errors:
|
| 635 |
+
logger.exception(result_or_error.get())
|
| 636 |
+
# Raise RayError.
|
| 637 |
+
else:
|
| 638 |
+
raise result_or_error.get()
|
| 639 |
+
|
| 640 |
+
@DeveloperAPI
|
| 641 |
+
def probe_unhealthy_actors(
|
| 642 |
+
self,
|
| 643 |
+
timeout_seconds: Optional[float] = None,
|
| 644 |
+
mark_healthy: bool = False,
|
| 645 |
+
) -> List[int]:
|
| 646 |
+
"""Ping all unhealthy actors to try bringing them back.
|
| 647 |
+
|
| 648 |
+
Args:
|
| 649 |
+
timeout_seconds: Timeout in seconds (to avoid pinging hanging workers
|
| 650 |
+
indefinitely).
|
| 651 |
+
mark_healthy: Whether to mark all those actors healthy again that are
|
| 652 |
+
currently marked unhealthy AND that respond to the `ping` remote request
|
| 653 |
+
(within the given `timeout_seconds`).
|
| 654 |
+
Note that actors are NOT set to unhealthy, if they simply time out,
|
| 655 |
+
meaning take a longer time to fulfil the remote request. We only ever
|
| 656 |
+
mark and actor unhealthy, if they return a RayActorError from the remote
|
| 657 |
+
request.
|
| 658 |
+
Also note that this settings is ignored if `healthy_only=True` (b/c this
|
| 659 |
+
setting only affects actors that are currently tagged as unhealthy).
|
| 660 |
+
|
| 661 |
+
Returns:
|
| 662 |
+
A list of actor IDs that were restored by the `ping.remote()` call PLUS
|
| 663 |
+
those actors that were previously restored via other remote requests.
|
| 664 |
+
The cached set of such previously restored actors will be erased in this
|
| 665 |
+
call.
|
| 666 |
+
"""
|
| 667 |
+
# Collect recently restored actors (from `self._fetch_result` calls other than
|
| 668 |
+
# the one triggered here via the `ping`).
|
| 669 |
+
restored_actors = list(self._restored_actors)
|
| 670 |
+
self._restored_actors.clear()
|
| 671 |
+
|
| 672 |
+
# Probe all unhealthy actors via a simple `ping()`.
|
| 673 |
+
unhealthy_actor_ids = [
|
| 674 |
+
actor_id
|
| 675 |
+
for actor_id in self.actor_ids()
|
| 676 |
+
if not self.is_actor_healthy(actor_id)
|
| 677 |
+
]
|
| 678 |
+
# No unhealthy actors currently -> Return recently restored ones.
|
| 679 |
+
if not unhealthy_actor_ids:
|
| 680 |
+
return restored_actors
|
| 681 |
+
|
| 682 |
+
# Some unhealthy actors -> `ping()` all of them to trigger a new fetch and
|
| 683 |
+
# capture all restored ones.
|
| 684 |
+
remote_results = self.foreach_actor(
|
| 685 |
+
func=lambda actor: actor.ping(),
|
| 686 |
+
remote_actor_ids=unhealthy_actor_ids,
|
| 687 |
+
healthy_only=False, # We specifically want to ping unhealthy actors.
|
| 688 |
+
timeout_seconds=timeout_seconds,
|
| 689 |
+
mark_healthy=mark_healthy,
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
# Return previously restored actors AND actors restored via the `ping()` call.
|
| 693 |
+
return restored_actors + [
|
| 694 |
+
result.actor_id for result in remote_results if result.ok
|
| 695 |
+
]
|
| 696 |
+
|
| 697 |
+
def _call_actors(
|
| 698 |
+
self,
|
| 699 |
+
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]],
|
| 700 |
+
*,
|
| 701 |
+
remote_actor_ids: List[int] = None,
|
| 702 |
+
) -> List[ray.ObjectRef]:
|
| 703 |
+
"""Apply functions on a list of remote actors.
|
| 704 |
+
|
| 705 |
+
Args:
|
| 706 |
+
func: A single, or a list of Callables, that get applied on the list
|
| 707 |
+
of specified remote actors.
|
| 708 |
+
remote_actor_ids: Apply func on this selected set of remote actors.
|
| 709 |
+
|
| 710 |
+
Returns:
|
| 711 |
+
A list of ObjectRefs returned from the remote calls.
|
| 712 |
+
"""
|
| 713 |
+
if isinstance(func, list):
|
| 714 |
+
assert len(remote_actor_ids) == len(
|
| 715 |
+
func
|
| 716 |
+
), "Funcs must have the same number of callables as actor indices."
|
| 717 |
+
|
| 718 |
+
if remote_actor_ids is None:
|
| 719 |
+
remote_actor_ids = self.actor_ids()
|
| 720 |
+
|
| 721 |
+
if isinstance(func, list):
|
| 722 |
+
calls = [
|
| 723 |
+
self._actors[i].apply.remote(f) for i, f in zip(remote_actor_ids, func)
|
| 724 |
+
]
|
| 725 |
+
else:
|
| 726 |
+
calls = [self._actors[i].apply.remote(func) for i in remote_actor_ids]
|
| 727 |
+
|
| 728 |
+
return calls
|
| 729 |
+
|
| 730 |
+
@DeveloperAPI
|
| 731 |
+
def _fetch_result(
|
| 732 |
+
self,
|
| 733 |
+
*,
|
| 734 |
+
remote_actor_ids: List[int],
|
| 735 |
+
remote_calls: List[ray.ObjectRef],
|
| 736 |
+
tags: List[str],
|
| 737 |
+
timeout_seconds: Optional[float] = None,
|
| 738 |
+
return_obj_refs: bool = False,
|
| 739 |
+
mark_healthy: bool = False,
|
| 740 |
+
) -> Tuple[List[ray.ObjectRef], RemoteCallResults]:
|
| 741 |
+
"""Try fetching results from remote actor calls.
|
| 742 |
+
|
| 743 |
+
Mark whether an actor is healthy or not accordingly.
|
| 744 |
+
|
| 745 |
+
Args:
|
| 746 |
+
remote_actor_ids: IDs of the actors these remote
|
| 747 |
+
calls were fired against.
|
| 748 |
+
remote_calls: List of remote calls to fetch.
|
| 749 |
+
tags: List of tags used for identifying the remote calls.
|
| 750 |
+
timeout_seconds: Timeout (in sec) for the ray.wait() call. Default is None,
|
| 751 |
+
meaning wait indefinitely for all results.
|
| 752 |
+
return_obj_refs: Whether to return ObjectRef instead of actual results.
|
| 753 |
+
mark_healthy: Whether to mark certain actors healthy based on the results
|
| 754 |
+
of these remote calls. Useful, for example, to make sure actors
|
| 755 |
+
do not come back without proper state restoration.
|
| 756 |
+
|
| 757 |
+
Returns:
|
| 758 |
+
A list of ready ObjectRefs mapping to the results of those calls.
|
| 759 |
+
"""
|
| 760 |
+
# Notice that we do not return the refs to any unfinished calls to the
|
| 761 |
+
# user, since it is not safe to handle such remote actor calls outside the
|
| 762 |
+
# context of this actor manager. These requests are simply dropped.
|
| 763 |
+
timeout = float(timeout_seconds) if timeout_seconds is not None else None
|
| 764 |
+
|
| 765 |
+
# This avoids calling ray.init() in the case of 0 remote calls.
|
| 766 |
+
# This is useful if the number of remote workers is 0.
|
| 767 |
+
if not remote_calls:
|
| 768 |
+
return [], RemoteCallResults()
|
| 769 |
+
|
| 770 |
+
readies, _ = ray.wait(
|
| 771 |
+
remote_calls,
|
| 772 |
+
num_returns=len(remote_calls),
|
| 773 |
+
timeout=timeout,
|
| 774 |
+
# Make sure remote results are fetched locally in parallel.
|
| 775 |
+
fetch_local=not return_obj_refs,
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
# Remote data should already be fetched to local object store at this point.
|
| 779 |
+
remote_results = RemoteCallResults()
|
| 780 |
+
for ready in readies:
|
| 781 |
+
# Find the corresponding actor ID for this remote call.
|
| 782 |
+
actor_id = remote_actor_ids[remote_calls.index(ready)]
|
| 783 |
+
tag = tags[remote_calls.index(ready)]
|
| 784 |
+
|
| 785 |
+
# If caller wants ObjectRefs, return directly without resolving.
|
| 786 |
+
if return_obj_refs:
|
| 787 |
+
remote_results.add_result(actor_id, ResultOrError(result=ready), tag)
|
| 788 |
+
continue
|
| 789 |
+
|
| 790 |
+
# Try getting the ready results.
|
| 791 |
+
try:
|
| 792 |
+
result = ray.get(ready)
|
| 793 |
+
|
| 794 |
+
# Any error type other than `RayError` happening during ray.get() ->
|
| 795 |
+
# Throw exception right here (we don't know how to handle these non-remote
|
| 796 |
+
# worker issues and should therefore crash).
|
| 797 |
+
except RayError as e:
|
| 798 |
+
# Return error to the user.
|
| 799 |
+
remote_results.add_result(actor_id, ResultOrError(error=e), tag)
|
| 800 |
+
|
| 801 |
+
# Mark the actor as unhealthy, take it out of service, and wait for
|
| 802 |
+
# Ray Core to restore it.
|
| 803 |
+
if self.is_actor_healthy(actor_id):
|
| 804 |
+
logger.error(
|
| 805 |
+
f"Ray error ({str(e)}), taking actor {actor_id} out of service."
|
| 806 |
+
)
|
| 807 |
+
self.set_actor_state(actor_id, healthy=False)
|
| 808 |
+
|
| 809 |
+
# If no errors, add result to `RemoteCallResults` to be returned.
|
| 810 |
+
else:
|
| 811 |
+
# Return valid result to the user.
|
| 812 |
+
remote_results.add_result(actor_id, ResultOrError(result=result), tag)
|
| 813 |
+
|
| 814 |
+
# Actor came back from an unhealthy state. Mark this actor as healthy
|
| 815 |
+
# and add it to our healthy set.
|
| 816 |
+
if mark_healthy and not self.is_actor_healthy(actor_id):
|
| 817 |
+
logger.warning(
|
| 818 |
+
f"Bringing previously unhealthy, now-healthy actor {actor_id} "
|
| 819 |
+
"back into service."
|
| 820 |
+
)
|
| 821 |
+
self.set_actor_state(actor_id, healthy=True)
|
| 822 |
+
self._num_actor_restarts += 1
|
| 823 |
+
|
| 824 |
+
# Make sure, to-be-returned results are sound.
|
| 825 |
+
assert len(readies) == len(remote_results)
|
| 826 |
+
|
| 827 |
+
return readies, remote_results
|
| 828 |
+
|
| 829 |
+
def _filter_func_and_remote_actor_id_by_state(
|
| 830 |
+
self,
|
| 831 |
+
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]],
|
| 832 |
+
remote_actor_ids: List[int],
|
| 833 |
+
):
|
| 834 |
+
"""Filter out func and remote worker ids by actor state.
|
| 835 |
+
|
| 836 |
+
Args:
|
| 837 |
+
func: A single, or a list of Callables.
|
| 838 |
+
remote_actor_ids: IDs of potential remote workers to apply func on.
|
| 839 |
+
|
| 840 |
+
Returns:
|
| 841 |
+
A tuple of (filtered func, filtered remote worker ids).
|
| 842 |
+
"""
|
| 843 |
+
if isinstance(func, list):
|
| 844 |
+
assert len(remote_actor_ids) == len(
|
| 845 |
+
func
|
| 846 |
+
), "Func must have the same number of callables as remote actor ids."
|
| 847 |
+
# We are given a list of functions to apply.
|
| 848 |
+
# Need to filter the functions together with worker IDs.
|
| 849 |
+
temp_func = []
|
| 850 |
+
temp_remote_actor_ids = []
|
| 851 |
+
for f, i in zip(func, remote_actor_ids):
|
| 852 |
+
if self.is_actor_healthy(i):
|
| 853 |
+
temp_func.append(f)
|
| 854 |
+
temp_remote_actor_ids.append(i)
|
| 855 |
+
func = temp_func
|
| 856 |
+
remote_actor_ids = temp_remote_actor_ids
|
| 857 |
+
else:
|
| 858 |
+
# Simply filter the worker IDs.
|
| 859 |
+
remote_actor_ids = [i for i in remote_actor_ids if self.is_actor_healthy(i)]
|
| 860 |
+
|
| 861 |
+
return func, remote_actor_ids
|
| 862 |
+
|
| 863 |
+
def _filter_calls_by_tag(
|
| 864 |
+
self, tags: Union[str, List[str], Tuple[str]]
|
| 865 |
+
) -> Tuple[List[ray.ObjectRef], List[ActorHandle], List[str]]:
|
| 866 |
+
"""Return all the in flight requests that match the given tags, if any.
|
| 867 |
+
|
| 868 |
+
Args:
|
| 869 |
+
tags: A str or a list/tuple of str. If tags is empty, return all the in
|
| 870 |
+
flight requests.
|
| 871 |
+
|
| 872 |
+
Returns:
|
| 873 |
+
A tuple consisting of a list of the remote calls that match the tag(s),
|
| 874 |
+
a list of the corresponding remote actor IDs for these calls (same length),
|
| 875 |
+
and a list of the tags corresponding to these calls (same length).
|
| 876 |
+
"""
|
| 877 |
+
if isinstance(tags, str):
|
| 878 |
+
tags = {tags}
|
| 879 |
+
elif isinstance(tags, (list, tuple)):
|
| 880 |
+
tags = set(tags)
|
| 881 |
+
else:
|
| 882 |
+
raise ValueError(
|
| 883 |
+
f"tags must be either a str or a list/tuple of str, got {type(tags)}."
|
| 884 |
+
)
|
| 885 |
+
remote_calls = []
|
| 886 |
+
remote_actor_ids = []
|
| 887 |
+
valid_tags = []
|
| 888 |
+
for call, (tag, actor_id) in self._in_flight_req_to_actor_id.items():
|
| 889 |
+
# the default behavior is to return all ready results.
|
| 890 |
+
if len(tags) == 0 or tag in tags:
|
| 891 |
+
remote_calls.append(call)
|
| 892 |
+
remote_actor_ids.append(actor_id)
|
| 893 |
+
valid_tags.append(tag)
|
| 894 |
+
|
| 895 |
+
return remote_calls, remote_actor_ids, valid_tags
|
| 896 |
+
|
| 897 |
+
def _remove_async_state(self, actor_id: int):
|
| 898 |
+
"""Remove internal async state of for a given actor.
|
| 899 |
+
|
| 900 |
+
This is called when an actor is removed from the pool or being marked
|
| 901 |
+
unhealthy.
|
| 902 |
+
|
| 903 |
+
Args:
|
| 904 |
+
actor_id: The id of the actor.
|
| 905 |
+
"""
|
| 906 |
+
# Remove any outstanding async requests for this actor.
|
| 907 |
+
# Use `list` here to not change a looped generator while we mutate the
|
| 908 |
+
# underlying dict.
|
| 909 |
+
for id, req in list(self._in_flight_req_to_actor_id.items()):
|
| 910 |
+
if id == actor_id:
|
| 911 |
+
del self._in_flight_req_to_actor_id[req]
|
| 912 |
+
|
| 913 |
+
def actors(self):
|
| 914 |
+
# TODO(jungong) : remove this API once EnvRunnerGroup.remote_workers()
|
| 915 |
+
# and EnvRunnerGroup._remote_workers() are removed.
|
| 916 |
+
return self._actors
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/annotations.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 2 |
+
from ray.util.annotations import _mark_annotated
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def override(parent_cls):
|
| 6 |
+
"""Decorator for documenting method overrides.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
parent_cls: The superclass that provides the overridden method. If
|
| 10 |
+
`parent_class` does not actually have the method or the class, in which
|
| 11 |
+
method is defined is not a subclass of `parent_class`, an error is raised.
|
| 12 |
+
|
| 13 |
+
.. testcode::
|
| 14 |
+
:skipif: True
|
| 15 |
+
|
| 16 |
+
from ray.rllib.policy import Policy
|
| 17 |
+
class TorchPolicy(Policy):
|
| 18 |
+
...
|
| 19 |
+
# Indicates that `TorchPolicy.loss()` overrides the parent
|
| 20 |
+
# Policy class' own `loss method. Leads to an error if Policy
|
| 21 |
+
# does not have a `loss` method.
|
| 22 |
+
|
| 23 |
+
@override(Policy)
|
| 24 |
+
def loss(self, model, action_dist, train_batch):
|
| 25 |
+
...
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
class OverrideCheck:
|
| 30 |
+
def __init__(self, func, expected_parent_cls):
|
| 31 |
+
self.func = func
|
| 32 |
+
self.expected_parent_cls = expected_parent_cls
|
| 33 |
+
|
| 34 |
+
def __set_name__(self, owner, name):
|
| 35 |
+
# Check if the owner (the class) is a subclass of the expected base class
|
| 36 |
+
if not issubclass(owner, self.expected_parent_cls):
|
| 37 |
+
raise TypeError(
|
| 38 |
+
f"When using the @override decorator, {owner.__name__} must be a "
|
| 39 |
+
f"subclass of {parent_cls.__name__}!"
|
| 40 |
+
)
|
| 41 |
+
# Set the function as a regular method on the class.
|
| 42 |
+
setattr(owner, name, self.func)
|
| 43 |
+
|
| 44 |
+
def decorator(method):
|
| 45 |
+
# Check, whether `method` is actually defined by the parent class.
|
| 46 |
+
if method.__name__ not in dir(parent_cls):
|
| 47 |
+
raise NameError(
|
| 48 |
+
f"When using the @override decorator, {method.__name__} must override "
|
| 49 |
+
f"the respective method (with the same name) of {parent_cls.__name__}!"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# Check if the class is a subclass of the expected base class
|
| 53 |
+
OverrideCheck(method, parent_cls)
|
| 54 |
+
return method
|
| 55 |
+
|
| 56 |
+
return decorator
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def PublicAPI(obj):
|
| 60 |
+
"""Decorator for documenting public APIs.
|
| 61 |
+
|
| 62 |
+
Public APIs are classes and methods exposed to end users of RLlib. You
|
| 63 |
+
can expect these APIs to remain stable across RLlib releases.
|
| 64 |
+
|
| 65 |
+
Subclasses that inherit from a ``@PublicAPI`` base class can be
|
| 66 |
+
assumed part of the RLlib public API as well (e.g., all Algorithm classes
|
| 67 |
+
are in public API because Algorithm is ``@PublicAPI``).
|
| 68 |
+
|
| 69 |
+
In addition, you can assume all algo configurations are part of their
|
| 70 |
+
public API as well.
|
| 71 |
+
|
| 72 |
+
.. testcode::
|
| 73 |
+
:skipif: True
|
| 74 |
+
|
| 75 |
+
# Indicates that the `Algorithm` class is exposed to end users
|
| 76 |
+
# of RLlib and will remain stable across RLlib releases.
|
| 77 |
+
from ray import tune
|
| 78 |
+
@PublicAPI
|
| 79 |
+
class Algorithm(tune.Trainable):
|
| 80 |
+
...
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
_mark_annotated(obj)
|
| 84 |
+
return obj
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def DeveloperAPI(obj):
|
| 88 |
+
"""Decorator for documenting developer APIs.
|
| 89 |
+
|
| 90 |
+
Developer APIs are classes and methods explicitly exposed to developers
|
| 91 |
+
for the purposes of building custom algorithms or advanced training
|
| 92 |
+
strategies on top of RLlib internals. You can generally expect these APIs
|
| 93 |
+
to be stable sans minor changes (but less stable than public APIs).
|
| 94 |
+
|
| 95 |
+
Subclasses that inherit from a ``@DeveloperAPI`` base class can be
|
| 96 |
+
assumed part of the RLlib developer API as well.
|
| 97 |
+
|
| 98 |
+
.. testcode::
|
| 99 |
+
:skipif: True
|
| 100 |
+
|
| 101 |
+
# Indicates that the `TorchPolicy` class is exposed to end users
|
| 102 |
+
# of RLlib and will remain (relatively) stable across RLlib
|
| 103 |
+
# releases.
|
| 104 |
+
from ray.rllib.policy import Policy
|
| 105 |
+
@DeveloperAPI
|
| 106 |
+
class TorchPolicy(Policy):
|
| 107 |
+
...
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
_mark_annotated(obj)
|
| 111 |
+
return obj
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def ExperimentalAPI(obj):
|
| 115 |
+
"""Decorator for documenting experimental APIs.
|
| 116 |
+
|
| 117 |
+
Experimental APIs are classes and methods that are in development and may
|
| 118 |
+
change at any time in their development process. You should not expect
|
| 119 |
+
these APIs to be stable until their tag is changed to `DeveloperAPI` or
|
| 120 |
+
`PublicAPI`.
|
| 121 |
+
|
| 122 |
+
Subclasses that inherit from a ``@ExperimentalAPI`` base class can be
|
| 123 |
+
assumed experimental as well.
|
| 124 |
+
|
| 125 |
+
.. testcode::
|
| 126 |
+
:skipif: True
|
| 127 |
+
|
| 128 |
+
from ray.rllib.policy import Policy
|
| 129 |
+
class TorchPolicy(Policy):
|
| 130 |
+
...
|
| 131 |
+
# Indicates that the `TorchPolicy.loss` method is a new and
|
| 132 |
+
# experimental API and may change frequently in future
|
| 133 |
+
# releases.
|
| 134 |
+
@ExperimentalAPI
|
| 135 |
+
def loss(self, model, action_dist, train_batch):
|
| 136 |
+
...
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
_mark_annotated(obj)
|
| 140 |
+
return obj
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def OldAPIStack(obj):
|
| 144 |
+
"""Decorator for classes/methods/functions belonging to the old API stack.
|
| 145 |
+
|
| 146 |
+
These should be deprecated at some point after Ray 3.0 (RLlib GA).
|
| 147 |
+
It is recommended for users to start exploring (and coding against) the new API
|
| 148 |
+
stack instead.
|
| 149 |
+
"""
|
| 150 |
+
# No effect yet.
|
| 151 |
+
|
| 152 |
+
_mark_annotated(obj)
|
| 153 |
+
return obj
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def OverrideToImplementCustomLogic(obj):
|
| 157 |
+
"""Users should override this in their sub-classes to implement custom logic.
|
| 158 |
+
|
| 159 |
+
Used in Algorithm and Policy to tag methods that need overriding, e.g.
|
| 160 |
+
`Policy.loss()`.
|
| 161 |
+
|
| 162 |
+
.. testcode::
|
| 163 |
+
:skipif: True
|
| 164 |
+
|
| 165 |
+
from ray.rllib.policy.torch_policy import TorchPolicy
|
| 166 |
+
@overrides(TorchPolicy)
|
| 167 |
+
@OverrideToImplementCustomLogic
|
| 168 |
+
def loss(self, ...):
|
| 169 |
+
# implement custom loss function here ...
|
| 170 |
+
# ... w/o calling the corresponding `super().loss()` method.
|
| 171 |
+
...
|
| 172 |
+
|
| 173 |
+
"""
|
| 174 |
+
obj.__is_overridden__ = False
|
| 175 |
+
return obj
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def OverrideToImplementCustomLogic_CallToSuperRecommended(obj):
|
| 179 |
+
"""Users should override this in their sub-classes to implement custom logic.
|
| 180 |
+
|
| 181 |
+
Thereby, it is recommended (but not required) to call the super-class'
|
| 182 |
+
corresponding method.
|
| 183 |
+
|
| 184 |
+
Used in Algorithm and Policy to tag methods that need overriding, but the
|
| 185 |
+
super class' method should still be called, e.g.
|
| 186 |
+
`Algorithm.setup()`.
|
| 187 |
+
|
| 188 |
+
.. testcode::
|
| 189 |
+
:skipif: True
|
| 190 |
+
|
| 191 |
+
from ray import tune
|
| 192 |
+
@overrides(tune.Trainable)
|
| 193 |
+
@OverrideToImplementCustomLogic_CallToSuperRecommended
|
| 194 |
+
def setup(self, config):
|
| 195 |
+
# implement custom setup logic here ...
|
| 196 |
+
super().setup(config)
|
| 197 |
+
# ... or here (after having called super()'s setup method.
|
| 198 |
+
"""
|
| 199 |
+
obj.__is_overridden__ = False
|
| 200 |
+
return obj
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def is_overridden(obj):
|
| 204 |
+
"""Check whether a function has been overridden.
|
| 205 |
+
|
| 206 |
+
Note, this only works for API calls decorated with OverrideToImplementCustomLogic
|
| 207 |
+
or OverrideToImplementCustomLogic_CallToSuperRecommended.
|
| 208 |
+
"""
|
| 209 |
+
return getattr(obj, "__is_overridden__", True)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# Backward compatibility.
|
| 213 |
+
Deprecated = Deprecated
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/filter.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import threading
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tree # pip install dm_tree
|
| 6 |
+
|
| 7 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 8 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 9 |
+
from ray.rllib.utils.numpy import SMALL_NUMBER
|
| 10 |
+
from ray.rllib.utils.typing import TensorStructType
|
| 11 |
+
from ray.rllib.utils.serialization import _serialize_ndarray, _deserialize_ndarray
|
| 12 |
+
from ray.rllib.utils.deprecation import deprecation_warning
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@OldAPIStack
|
| 18 |
+
class Filter:
|
| 19 |
+
"""Processes input, possibly statefully."""
|
| 20 |
+
|
| 21 |
+
def apply_changes(self, other: "Filter", *args, **kwargs) -> None:
|
| 22 |
+
"""Updates self with "new state" from other filter."""
|
| 23 |
+
raise NotImplementedError
|
| 24 |
+
|
| 25 |
+
def copy(self) -> "Filter":
|
| 26 |
+
"""Creates a new object with same state as self.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
A copy of self.
|
| 30 |
+
"""
|
| 31 |
+
raise NotImplementedError
|
| 32 |
+
|
| 33 |
+
def sync(self, other: "Filter") -> None:
|
| 34 |
+
"""Copies all state from other filter to self."""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
def reset_buffer(self) -> None:
|
| 38 |
+
"""Creates copy of current state and resets accumulated state"""
|
| 39 |
+
raise NotImplementedError
|
| 40 |
+
|
| 41 |
+
def as_serializable(self) -> "Filter":
|
| 42 |
+
raise NotImplementedError
|
| 43 |
+
|
| 44 |
+
@Deprecated(new="Filter.reset_buffer()", error=True)
|
| 45 |
+
def clear_buffer(self):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@OldAPIStack
|
| 50 |
+
class NoFilter(Filter):
|
| 51 |
+
is_concurrent = True
|
| 52 |
+
|
| 53 |
+
def __call__(self, x: TensorStructType, update=True):
|
| 54 |
+
# Process no further if already np.ndarray, dict, or tuple.
|
| 55 |
+
if isinstance(x, (np.ndarray, dict, tuple)):
|
| 56 |
+
return x
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
return np.asarray(x)
|
| 60 |
+
except Exception:
|
| 61 |
+
raise ValueError("Failed to convert to array", x)
|
| 62 |
+
|
| 63 |
+
def apply_changes(self, other: "NoFilter", *args, **kwargs) -> None:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
def copy(self) -> "NoFilter":
|
| 67 |
+
return self
|
| 68 |
+
|
| 69 |
+
def sync(self, other: "NoFilter") -> None:
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def reset_buffer(self) -> None:
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
def as_serializable(self) -> "NoFilter":
|
| 76 |
+
return self
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# http://www.johndcook.com/blog/standard_deviation/
|
| 80 |
+
@OldAPIStack
|
| 81 |
+
class RunningStat:
|
| 82 |
+
def __init__(self, shape=()):
|
| 83 |
+
self.num_pushes = 0
|
| 84 |
+
self.mean_array = np.zeros(shape)
|
| 85 |
+
self.std_array = np.zeros(shape)
|
| 86 |
+
|
| 87 |
+
def copy(self):
|
| 88 |
+
other = RunningStat()
|
| 89 |
+
# TODO: Remove these safe-guards if not needed anymore.
|
| 90 |
+
other.num_pushes = self.num_pushes if hasattr(self, "num_pushes") else self._n
|
| 91 |
+
other.mean_array = (
|
| 92 |
+
np.copy(self.mean_array)
|
| 93 |
+
if hasattr(self, "mean_array")
|
| 94 |
+
else np.copy(self._M)
|
| 95 |
+
)
|
| 96 |
+
other.std_array = (
|
| 97 |
+
np.copy(self.std_array) if hasattr(self, "std_array") else np.copy(self._S)
|
| 98 |
+
)
|
| 99 |
+
return other
|
| 100 |
+
|
| 101 |
+
def push(self, x):
|
| 102 |
+
x = np.asarray(x)
|
| 103 |
+
# Unvectorized update of the running statistics.
|
| 104 |
+
if x.shape != self.mean_array.shape:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"Unexpected input shape {}, expected {}, value = {}".format(
|
| 107 |
+
x.shape, self.mean_array.shape, x
|
| 108 |
+
)
|
| 109 |
+
)
|
| 110 |
+
self.num_pushes += 1
|
| 111 |
+
if self.num_pushes == 1:
|
| 112 |
+
self.mean_array[...] = x
|
| 113 |
+
else:
|
| 114 |
+
delta = x - self.mean_array
|
| 115 |
+
self.mean_array[...] += delta / self.num_pushes
|
| 116 |
+
self.std_array[...] += (
|
| 117 |
+
(delta / self.num_pushes) * delta * (self.num_pushes - 1)
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
def update(self, other):
|
| 121 |
+
n1 = float(self.num_pushes)
|
| 122 |
+
n2 = float(other.num_pushes)
|
| 123 |
+
n = n1 + n2
|
| 124 |
+
if n == 0:
|
| 125 |
+
# Avoid divide by zero, which creates nans
|
| 126 |
+
return
|
| 127 |
+
delta = self.mean_array - other.mean_array
|
| 128 |
+
delta2 = delta * delta
|
| 129 |
+
m = (n1 * self.mean_array + n2 * other.mean_array) / n
|
| 130 |
+
s = self.std_array + other.std_array + (delta2 / n) * n1 * n2
|
| 131 |
+
self.num_pushes = n
|
| 132 |
+
self.mean_array = m
|
| 133 |
+
self.std_array = s
|
| 134 |
+
|
| 135 |
+
def __repr__(self):
|
| 136 |
+
return "(n={}, mean_mean={}, mean_std={})".format(
|
| 137 |
+
self.n, np.mean(self.mean), np.mean(self.std)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
def n(self):
|
| 142 |
+
return self.num_pushes
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def mean(self):
|
| 146 |
+
return self.mean_array
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
def var(self):
|
| 150 |
+
return (
|
| 151 |
+
self.std_array / (self.num_pushes - 1)
|
| 152 |
+
if self.num_pushes > 1
|
| 153 |
+
else np.square(self.mean_array)
|
| 154 |
+
).astype(np.float32)
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def std(self):
|
| 158 |
+
return np.sqrt(self.var)
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def shape(self):
|
| 162 |
+
return self.mean_array.shape
|
| 163 |
+
|
| 164 |
+
def to_state(self):
|
| 165 |
+
return {
|
| 166 |
+
"num_pushes": self.num_pushes,
|
| 167 |
+
"mean_array": _serialize_ndarray(self.mean_array),
|
| 168 |
+
"std_array": _serialize_ndarray(self.std_array),
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
@staticmethod
|
| 172 |
+
def from_state(state):
|
| 173 |
+
running_stats = RunningStat()
|
| 174 |
+
running_stats.num_pushes = state["num_pushes"]
|
| 175 |
+
running_stats.mean_array = _deserialize_ndarray(state["mean_array"])
|
| 176 |
+
running_stats.std_array = _deserialize_ndarray(state["std_array"])
|
| 177 |
+
return running_stats
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
@OldAPIStack
|
| 181 |
+
class MeanStdFilter(Filter):
|
| 182 |
+
"""Keeps track of a running mean for seen states"""
|
| 183 |
+
|
| 184 |
+
is_concurrent = False
|
| 185 |
+
|
| 186 |
+
def __init__(self, shape, demean=True, destd=True, clip=10.0):
|
| 187 |
+
self.shape = shape
|
| 188 |
+
# We don't have a preprocessor, if shape is None (Discrete) or
|
| 189 |
+
# flat_shape is Tuple[np.ndarray] or Dict[str, np.ndarray]
|
| 190 |
+
# (complex inputs).
|
| 191 |
+
flat_shape = tree.flatten(self.shape)
|
| 192 |
+
self.no_preprocessor = shape is None or (
|
| 193 |
+
isinstance(self.shape, (dict, tuple))
|
| 194 |
+
and len(flat_shape) > 0
|
| 195 |
+
and isinstance(flat_shape[0], np.ndarray)
|
| 196 |
+
)
|
| 197 |
+
# If preprocessing (flattening dicts/tuples), make sure shape
|
| 198 |
+
# is an np.ndarray, so we don't confuse it with a complex Tuple
|
| 199 |
+
# space's shape structure (which is a Tuple[np.ndarray]).
|
| 200 |
+
if not self.no_preprocessor:
|
| 201 |
+
self.shape = np.array(self.shape)
|
| 202 |
+
self.demean = demean
|
| 203 |
+
self.destd = destd
|
| 204 |
+
self.clip = clip
|
| 205 |
+
# Running stats.
|
| 206 |
+
self.running_stats = tree.map_structure(lambda s: RunningStat(s), self.shape)
|
| 207 |
+
|
| 208 |
+
# In distributed rollouts, each worker sees different states.
|
| 209 |
+
# The buffer is used to keep track of deltas amongst all the
|
| 210 |
+
# observation filters.
|
| 211 |
+
self.buffer = None
|
| 212 |
+
self.reset_buffer()
|
| 213 |
+
|
| 214 |
+
def reset_buffer(self) -> None:
|
| 215 |
+
self.buffer = tree.map_structure(lambda s: RunningStat(s), self.shape)
|
| 216 |
+
|
| 217 |
+
def apply_changes(
|
| 218 |
+
self, other: "MeanStdFilter", with_buffer: bool = False, *args, **kwargs
|
| 219 |
+
) -> None:
|
| 220 |
+
"""Applies updates from the buffer of another filter.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
other: Other filter to apply info from
|
| 224 |
+
with_buffer: Flag for specifying if the buffer should be
|
| 225 |
+
copied from other.
|
| 226 |
+
|
| 227 |
+
.. testcode::
|
| 228 |
+
:skipif: True
|
| 229 |
+
|
| 230 |
+
a = MeanStdFilter(())
|
| 231 |
+
a(1)
|
| 232 |
+
a(2)
|
| 233 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 234 |
+
|
| 235 |
+
.. testoutput::
|
| 236 |
+
|
| 237 |
+
[2, 1.5, 2]
|
| 238 |
+
|
| 239 |
+
.. testcode::
|
| 240 |
+
:skipif: True
|
| 241 |
+
|
| 242 |
+
b = MeanStdFilter(())
|
| 243 |
+
b(10)
|
| 244 |
+
a.apply_changes(b, with_buffer=False)
|
| 245 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 246 |
+
|
| 247 |
+
.. testoutput::
|
| 248 |
+
|
| 249 |
+
[3, 4.333333333333333, 2]
|
| 250 |
+
|
| 251 |
+
.. testcode::
|
| 252 |
+
:skipif: True
|
| 253 |
+
|
| 254 |
+
a.apply_changes(b, with_buffer=True)
|
| 255 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 256 |
+
|
| 257 |
+
.. testoutput::
|
| 258 |
+
|
| 259 |
+
[4, 5.75, 1]
|
| 260 |
+
"""
|
| 261 |
+
tree.map_structure(
|
| 262 |
+
lambda rs, other_rs: rs.update(other_rs), self.running_stats, other.buffer
|
| 263 |
+
)
|
| 264 |
+
if with_buffer:
|
| 265 |
+
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
|
| 266 |
+
|
| 267 |
+
def copy(self) -> "MeanStdFilter":
|
| 268 |
+
"""Returns a copy of `self`."""
|
| 269 |
+
other = MeanStdFilter(self.shape)
|
| 270 |
+
other.sync(self)
|
| 271 |
+
return other
|
| 272 |
+
|
| 273 |
+
def as_serializable(self) -> "MeanStdFilter":
|
| 274 |
+
return self.copy()
|
| 275 |
+
|
| 276 |
+
def sync(self, other: "MeanStdFilter") -> None:
|
| 277 |
+
"""Syncs all fields together from other filter.
|
| 278 |
+
|
| 279 |
+
.. testcode::
|
| 280 |
+
:skipif: True
|
| 281 |
+
|
| 282 |
+
a = MeanStdFilter(())
|
| 283 |
+
a(1)
|
| 284 |
+
a(2)
|
| 285 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 286 |
+
|
| 287 |
+
.. testoutput::
|
| 288 |
+
|
| 289 |
+
[2, array(1.5), 2]
|
| 290 |
+
|
| 291 |
+
.. testcode::
|
| 292 |
+
:skipif: True
|
| 293 |
+
|
| 294 |
+
b = MeanStdFilter(())
|
| 295 |
+
b(10)
|
| 296 |
+
print([b.running_stats.n, b.running_stats.mean, b.buffer.n])
|
| 297 |
+
|
| 298 |
+
.. testoutput::
|
| 299 |
+
|
| 300 |
+
[1, array(10.0), 1]
|
| 301 |
+
|
| 302 |
+
.. testcode::
|
| 303 |
+
:skipif: True
|
| 304 |
+
|
| 305 |
+
a.sync(b)
|
| 306 |
+
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
|
| 307 |
+
|
| 308 |
+
.. testoutput::
|
| 309 |
+
|
| 310 |
+
[1, array(10.0), 1]
|
| 311 |
+
"""
|
| 312 |
+
self.demean = other.demean
|
| 313 |
+
self.destd = other.destd
|
| 314 |
+
self.clip = other.clip
|
| 315 |
+
self.running_stats = tree.map_structure(
|
| 316 |
+
lambda rs: rs.copy(), other.running_stats
|
| 317 |
+
)
|
| 318 |
+
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
|
| 319 |
+
|
| 320 |
+
def __call__(self, x: TensorStructType, update: bool = True) -> TensorStructType:
|
| 321 |
+
if self.no_preprocessor:
|
| 322 |
+
x = tree.map_structure(lambda x_: np.asarray(x_), x)
|
| 323 |
+
else:
|
| 324 |
+
x = np.asarray(x)
|
| 325 |
+
|
| 326 |
+
def _helper(x, rs, buffer, shape):
|
| 327 |
+
# Discrete|MultiDiscrete spaces -> No normalization.
|
| 328 |
+
if shape is None:
|
| 329 |
+
return x
|
| 330 |
+
|
| 331 |
+
# Keep dtype as is througout this filter.
|
| 332 |
+
orig_dtype = x.dtype
|
| 333 |
+
|
| 334 |
+
if update:
|
| 335 |
+
if len(x.shape) == len(rs.shape) + 1:
|
| 336 |
+
# The vectorized case.
|
| 337 |
+
for i in range(x.shape[0]):
|
| 338 |
+
rs.push(x[i])
|
| 339 |
+
buffer.push(x[i])
|
| 340 |
+
else:
|
| 341 |
+
# The unvectorized case.
|
| 342 |
+
rs.push(x)
|
| 343 |
+
buffer.push(x)
|
| 344 |
+
if self.demean:
|
| 345 |
+
x = x - rs.mean
|
| 346 |
+
if self.destd:
|
| 347 |
+
x = x / (rs.std + SMALL_NUMBER)
|
| 348 |
+
if self.clip:
|
| 349 |
+
x = np.clip(x, -self.clip, self.clip)
|
| 350 |
+
return x.astype(orig_dtype)
|
| 351 |
+
|
| 352 |
+
if self.no_preprocessor:
|
| 353 |
+
return tree.map_structure_up_to(
|
| 354 |
+
x, _helper, x, self.running_stats, self.buffer, self.shape
|
| 355 |
+
)
|
| 356 |
+
else:
|
| 357 |
+
return _helper(x, self.running_stats, self.buffer, self.shape)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@OldAPIStack
|
| 361 |
+
class ConcurrentMeanStdFilter(MeanStdFilter):
|
| 362 |
+
is_concurrent = True
|
| 363 |
+
|
| 364 |
+
def __init__(self, *args, **kwargs):
|
| 365 |
+
super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs)
|
| 366 |
+
deprecation_warning(
|
| 367 |
+
old="ConcurrentMeanStdFilter",
|
| 368 |
+
error=False,
|
| 369 |
+
help="ConcurrentMeanStd filters are only used for testing and will "
|
| 370 |
+
"therefore be deprecated in the course of moving to the "
|
| 371 |
+
"Connetors API, where testing of filters will be done by other "
|
| 372 |
+
"means.",
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
self._lock = threading.RLock()
|
| 376 |
+
|
| 377 |
+
def lock_wrap(func):
|
| 378 |
+
def wrapper(*args, **kwargs):
|
| 379 |
+
with self._lock:
|
| 380 |
+
return func(*args, **kwargs)
|
| 381 |
+
|
| 382 |
+
return wrapper
|
| 383 |
+
|
| 384 |
+
self.__getattribute__ = lock_wrap(self.__getattribute__)
|
| 385 |
+
|
| 386 |
+
def as_serializable(self) -> "MeanStdFilter":
|
| 387 |
+
"""Returns non-concurrent version of current class"""
|
| 388 |
+
other = MeanStdFilter(self.shape)
|
| 389 |
+
other.sync(self)
|
| 390 |
+
return other
|
| 391 |
+
|
| 392 |
+
def copy(self) -> "ConcurrentMeanStdFilter":
|
| 393 |
+
"""Returns a copy of Filter."""
|
| 394 |
+
other = ConcurrentMeanStdFilter(self.shape)
|
| 395 |
+
other.sync(self)
|
| 396 |
+
return other
|
| 397 |
+
|
| 398 |
+
def __repr__(self) -> str:
|
| 399 |
+
return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format(
|
| 400 |
+
self.shape,
|
| 401 |
+
self.demean,
|
| 402 |
+
self.destd,
|
| 403 |
+
self.clip,
|
| 404 |
+
self.running_stats,
|
| 405 |
+
self.buffer,
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
@OldAPIStack
|
| 410 |
+
def get_filter(filter_config, shape):
|
| 411 |
+
if filter_config == "MeanStdFilter":
|
| 412 |
+
return MeanStdFilter(shape, clip=None)
|
| 413 |
+
elif filter_config == "ConcurrentMeanStdFilter":
|
| 414 |
+
return ConcurrentMeanStdFilter(shape, clip=None)
|
| 415 |
+
elif filter_config == "NoFilter":
|
| 416 |
+
return NoFilter()
|
| 417 |
+
elif callable(filter_config):
|
| 418 |
+
return filter_config(shape)
|
| 419 |
+
else:
|
| 420 |
+
raise Exception("Unknown observation_filter: " + str(filter_config))
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/framework.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from typing import Any, Optional
|
| 6 |
+
|
| 7 |
+
import tree # pip install dm_tree
|
| 8 |
+
|
| 9 |
+
from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
|
| 10 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 11 |
+
from ray.rllib.utils.typing import (
|
| 12 |
+
TensorShape,
|
| 13 |
+
TensorStructType,
|
| 14 |
+
TensorType,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@PublicAPI
|
| 21 |
+
def convert_to_tensor(
|
| 22 |
+
data: TensorStructType,
|
| 23 |
+
framework: str,
|
| 24 |
+
device: Optional[str] = None,
|
| 25 |
+
):
|
| 26 |
+
"""Converts any nested numpy struct into framework-specific tensors.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
data: The input data (numpy) to convert to framework-specific tensors.
|
| 30 |
+
framework: The framework to convert to. Only "torch" and "tf2" allowed.
|
| 31 |
+
device: An optional device name (for torch only).
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
The converted tensor struct matching the input data.
|
| 35 |
+
"""
|
| 36 |
+
if framework == "torch":
|
| 37 |
+
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
|
| 38 |
+
|
| 39 |
+
return convert_to_torch_tensor(data, device=device)
|
| 40 |
+
elif framework == "tf2":
|
| 41 |
+
_, tf, _ = try_import_tf()
|
| 42 |
+
|
| 43 |
+
return tree.map_structure(lambda s: tf.convert_to_tensor(s), data)
|
| 44 |
+
raise NotImplementedError(
|
| 45 |
+
f"framework={framework} not supported in `convert_to_tensor()`!"
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@PublicAPI
|
| 50 |
+
def try_import_jax(error: bool = False):
|
| 51 |
+
"""Tries importing JAX and FLAX and returns both modules (or Nones).
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
error: Whether to raise an error if JAX/FLAX cannot be imported.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Tuple containing the jax- and the flax modules.
|
| 58 |
+
|
| 59 |
+
Raises:
|
| 60 |
+
ImportError: If error=True and JAX is not installed.
|
| 61 |
+
"""
|
| 62 |
+
if "RLLIB_TEST_NO_JAX_IMPORT" in os.environ:
|
| 63 |
+
logger.warning("Not importing JAX for test purposes.")
|
| 64 |
+
return None, None
|
| 65 |
+
|
| 66 |
+
try:
|
| 67 |
+
import jax
|
| 68 |
+
import flax
|
| 69 |
+
except ImportError:
|
| 70 |
+
if error:
|
| 71 |
+
raise ImportError(
|
| 72 |
+
"Could not import JAX! RLlib requires you to "
|
| 73 |
+
"install at least one deep-learning framework: "
|
| 74 |
+
"`pip install [torch|tensorflow|jax]`."
|
| 75 |
+
)
|
| 76 |
+
return None, None
|
| 77 |
+
|
| 78 |
+
return jax, flax
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@PublicAPI
|
| 82 |
+
def try_import_tf(error: bool = False):
|
| 83 |
+
"""Tries importing tf and returns the module (or None).
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
error: Whether to raise an error if tf cannot be imported.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
Tuple containing
|
| 90 |
+
1) tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
|
| 91 |
+
2) tf module (resulting from `import tensorflow`). Either tf1.x or
|
| 92 |
+
2.x. 3) The actually installed tf version as int: 1 or 2.
|
| 93 |
+
|
| 94 |
+
Raises:
|
| 95 |
+
ImportError: If error=True and tf is not installed.
|
| 96 |
+
"""
|
| 97 |
+
tf_stub = _TFStub()
|
| 98 |
+
# Make sure, these are reset after each test case
|
| 99 |
+
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
|
| 100 |
+
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
|
| 101 |
+
logger.warning("Not importing TensorFlow for test purposes")
|
| 102 |
+
return None, tf_stub, None
|
| 103 |
+
|
| 104 |
+
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
|
| 105 |
+
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
| 106 |
+
|
| 107 |
+
# Try to reuse already imported tf module. This will avoid going through
|
| 108 |
+
# the initial import steps below and thereby switching off v2_behavior
|
| 109 |
+
# (switching off v2 behavior twice breaks all-framework tests for eager).
|
| 110 |
+
was_imported = False
|
| 111 |
+
if "tensorflow" in sys.modules:
|
| 112 |
+
tf_module = sys.modules["tensorflow"]
|
| 113 |
+
was_imported = True
|
| 114 |
+
|
| 115 |
+
else:
|
| 116 |
+
try:
|
| 117 |
+
import tensorflow as tf_module
|
| 118 |
+
except ImportError:
|
| 119 |
+
if error:
|
| 120 |
+
raise ImportError(
|
| 121 |
+
"Could not import TensorFlow! RLlib requires you to "
|
| 122 |
+
"install at least one deep-learning framework: "
|
| 123 |
+
"`pip install [torch|tensorflow|jax]`."
|
| 124 |
+
)
|
| 125 |
+
return None, tf_stub, None
|
| 126 |
+
|
| 127 |
+
# Try "reducing" tf to tf.compat.v1.
|
| 128 |
+
try:
|
| 129 |
+
tf1_module = tf_module.compat.v1
|
| 130 |
+
tf1_module.logging.set_verbosity(tf1_module.logging.ERROR)
|
| 131 |
+
if not was_imported:
|
| 132 |
+
tf1_module.disable_v2_behavior()
|
| 133 |
+
tf1_module.enable_resource_variables()
|
| 134 |
+
tf1_module.logging.set_verbosity(tf1_module.logging.WARN)
|
| 135 |
+
# No compat.v1 -> return tf as is.
|
| 136 |
+
except AttributeError:
|
| 137 |
+
tf1_module = tf_module
|
| 138 |
+
|
| 139 |
+
if not hasattr(tf_module, "__version__"):
|
| 140 |
+
version = 1 # sphinx doc gen
|
| 141 |
+
else:
|
| 142 |
+
version = 2 if "2." in tf_module.__version__[:2] else 1
|
| 143 |
+
|
| 144 |
+
return tf1_module, tf_module, version
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# Fake module for tf.
|
| 148 |
+
class _TFStub:
|
| 149 |
+
def __init__(self) -> None:
|
| 150 |
+
self.keras = _KerasStub()
|
| 151 |
+
|
| 152 |
+
def __bool__(self):
|
| 153 |
+
# if tf should return False
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# Fake module for tf.keras.
|
| 158 |
+
class _KerasStub:
|
| 159 |
+
def __init__(self) -> None:
|
| 160 |
+
self.Model = _FakeTfClassStub
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# Fake classes under keras (e.g for tf.keras.Model)
|
| 164 |
+
class _FakeTfClassStub:
|
| 165 |
+
def __init__(self, *a, **kw):
|
| 166 |
+
raise ImportError("Could not import `tensorflow`. Try pip install tensorflow.")
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@DeveloperAPI
|
| 170 |
+
def tf_function(tf_module):
|
| 171 |
+
"""Conditional decorator for @tf.function.
|
| 172 |
+
|
| 173 |
+
Use @tf_function(tf) instead to avoid errors if tf is not installed."""
|
| 174 |
+
|
| 175 |
+
# The actual decorator to use (pass in `tf` (which could be None)).
|
| 176 |
+
def decorator(func):
|
| 177 |
+
# If tf not installed -> return function as is (won't be used anyways).
|
| 178 |
+
if tf_module is None or tf_module.executing_eagerly():
|
| 179 |
+
return func
|
| 180 |
+
# If tf installed, return @tf.function-decorated function.
|
| 181 |
+
return tf_module.function(func)
|
| 182 |
+
|
| 183 |
+
return decorator
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@PublicAPI
|
| 187 |
+
def try_import_tfp(error: bool = False):
|
| 188 |
+
"""Tries importing tfp and returns the module (or None).
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
error: Whether to raise an error if tfp cannot be imported.
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
The tfp module.
|
| 195 |
+
|
| 196 |
+
Raises:
|
| 197 |
+
ImportError: If error=True and tfp is not installed.
|
| 198 |
+
"""
|
| 199 |
+
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
|
| 200 |
+
logger.warning("Not importing TensorFlow Probability for test purposes.")
|
| 201 |
+
return None
|
| 202 |
+
|
| 203 |
+
try:
|
| 204 |
+
import tensorflow_probability as tfp
|
| 205 |
+
|
| 206 |
+
return tfp
|
| 207 |
+
except ImportError as e:
|
| 208 |
+
if error:
|
| 209 |
+
raise e
|
| 210 |
+
return None
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
# Fake module for torch.nn.
|
| 214 |
+
class _NNStub:
|
| 215 |
+
def __init__(self, *a, **kw):
|
| 216 |
+
# Fake nn.functional module within torch.nn.
|
| 217 |
+
self.functional = None
|
| 218 |
+
self.Module = _FakeTorchClassStub
|
| 219 |
+
self.parallel = _ParallelStub()
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# Fake class for e.g. torch.nn.Module to allow it to be inherited from.
|
| 223 |
+
class _FakeTorchClassStub:
|
| 224 |
+
def __init__(self, *a, **kw):
|
| 225 |
+
raise ImportError("Could not import `torch`. Try pip install torch.")
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class _ParallelStub:
|
| 229 |
+
def __init__(self, *a, **kw):
|
| 230 |
+
self.DataParallel = _FakeTorchClassStub
|
| 231 |
+
self.DistributedDataParallel = _FakeTorchClassStub
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@PublicAPI
|
| 235 |
+
def try_import_torch(error: bool = False):
|
| 236 |
+
"""Tries importing torch and returns the module (or None).
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
error: Whether to raise an error if torch cannot be imported.
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
Tuple consisting of the torch- AND torch.nn modules.
|
| 243 |
+
|
| 244 |
+
Raises:
|
| 245 |
+
ImportError: If error=True and PyTorch is not installed.
|
| 246 |
+
"""
|
| 247 |
+
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
|
| 248 |
+
logger.warning("Not importing PyTorch for test purposes.")
|
| 249 |
+
return _torch_stubs()
|
| 250 |
+
|
| 251 |
+
try:
|
| 252 |
+
import torch
|
| 253 |
+
import torch.nn as nn
|
| 254 |
+
|
| 255 |
+
return torch, nn
|
| 256 |
+
except ImportError:
|
| 257 |
+
if error:
|
| 258 |
+
raise ImportError(
|
| 259 |
+
"Could not import PyTorch! RLlib requires you to "
|
| 260 |
+
"install at least one deep-learning framework: "
|
| 261 |
+
"`pip install [torch|tensorflow|jax]`."
|
| 262 |
+
)
|
| 263 |
+
return _torch_stubs()
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _torch_stubs():
|
| 267 |
+
nn = _NNStub()
|
| 268 |
+
return None, nn
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
@DeveloperAPI
|
| 272 |
+
def get_variable(
|
| 273 |
+
value: Any,
|
| 274 |
+
framework: str = "tf",
|
| 275 |
+
trainable: bool = False,
|
| 276 |
+
tf_name: str = "unnamed-variable",
|
| 277 |
+
torch_tensor: bool = False,
|
| 278 |
+
device: Optional[str] = None,
|
| 279 |
+
shape: Optional[TensorShape] = None,
|
| 280 |
+
dtype: Optional[TensorType] = None,
|
| 281 |
+
) -> Any:
|
| 282 |
+
"""Creates a tf variable, a torch tensor, or a python primitive.
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
value: The initial value to use. In the non-tf case, this will
|
| 286 |
+
be returned as is. In the tf case, this could be a tf-Initializer
|
| 287 |
+
object.
|
| 288 |
+
framework: One of "tf", "torch", or None.
|
| 289 |
+
trainable: Whether the generated variable should be
|
| 290 |
+
trainable (tf)/require_grad (torch) or not (default: False).
|
| 291 |
+
tf_name: For framework="tf": An optional name for the
|
| 292 |
+
tf.Variable.
|
| 293 |
+
torch_tensor: For framework="torch": Whether to actually create
|
| 294 |
+
a torch.tensor, or just a python value (default).
|
| 295 |
+
device: An optional torch device to use for
|
| 296 |
+
the created torch tensor.
|
| 297 |
+
shape: An optional shape to use iff `value`
|
| 298 |
+
does not have any (e.g. if it's an initializer w/o explicit value).
|
| 299 |
+
dtype: An optional dtype to use iff `value` does
|
| 300 |
+
not have any (e.g. if it's an initializer w/o explicit value).
|
| 301 |
+
This should always be a numpy dtype (e.g. np.float32, np.int64).
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
A framework-specific variable (tf.Variable, torch.tensor, or
|
| 305 |
+
python primitive).
|
| 306 |
+
"""
|
| 307 |
+
if framework in ["tf2", "tf"]:
|
| 308 |
+
import tensorflow as tf
|
| 309 |
+
|
| 310 |
+
dtype = dtype or getattr(
|
| 311 |
+
value,
|
| 312 |
+
"dtype",
|
| 313 |
+
tf.float32
|
| 314 |
+
if isinstance(value, float)
|
| 315 |
+
else tf.int32
|
| 316 |
+
if isinstance(value, int)
|
| 317 |
+
else None,
|
| 318 |
+
)
|
| 319 |
+
return tf.compat.v1.get_variable(
|
| 320 |
+
tf_name,
|
| 321 |
+
initializer=value,
|
| 322 |
+
dtype=dtype,
|
| 323 |
+
trainable=trainable,
|
| 324 |
+
**({} if shape is None else {"shape": shape}),
|
| 325 |
+
)
|
| 326 |
+
elif framework == "torch" and torch_tensor is True:
|
| 327 |
+
torch, _ = try_import_torch()
|
| 328 |
+
if not isinstance(value, np.ndarray):
|
| 329 |
+
value = np.array(value)
|
| 330 |
+
var_ = torch.from_numpy(value)
|
| 331 |
+
if dtype in [torch.float32, np.float32]:
|
| 332 |
+
var_ = var_.float()
|
| 333 |
+
elif dtype in [torch.int32, np.int32]:
|
| 334 |
+
var_ = var_.int()
|
| 335 |
+
elif dtype in [torch.float64, np.float64]:
|
| 336 |
+
var_ = var_.double()
|
| 337 |
+
|
| 338 |
+
if device:
|
| 339 |
+
var_ = var_.to(device)
|
| 340 |
+
var_.requires_grad = trainable
|
| 341 |
+
return var_
|
| 342 |
+
# torch or None: Return python primitive.
|
| 343 |
+
return value
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@Deprecated(
|
| 347 |
+
old="rllib/utils/framework.py::get_activation_fn",
|
| 348 |
+
new="rllib/models/utils.py::get_activation_fn",
|
| 349 |
+
error=True,
|
| 350 |
+
)
|
| 351 |
+
def get_activation_fn(name: Optional[str] = None, framework: str = "tf"):
|
| 352 |
+
pass
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/minibatch_utils.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch, concat_samples
|
| 5 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 6 |
+
from ray.rllib.utils.annotations import DeveloperAPI
|
| 7 |
+
from ray.rllib.utils.typing import EpisodeType
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@DeveloperAPI
|
| 11 |
+
class MiniBatchIteratorBase:
|
| 12 |
+
"""The base class for all minibatch iterators."""
|
| 13 |
+
|
| 14 |
+
def __init__(
|
| 15 |
+
self,
|
| 16 |
+
batch: MultiAgentBatch,
|
| 17 |
+
*,
|
| 18 |
+
num_epochs: int = 1,
|
| 19 |
+
shuffle_batch_per_epoch: bool = True,
|
| 20 |
+
minibatch_size: int,
|
| 21 |
+
num_total_minibatches: int = 0,
|
| 22 |
+
) -> None:
|
| 23 |
+
"""Initializes a MiniBatchIteratorBase instance.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
batch: The input multi-agent batch.
|
| 27 |
+
num_epochs: The number of complete passes over the entire train batch. Each
|
| 28 |
+
pass might be further split into n minibatches (if `minibatch_size`
|
| 29 |
+
provided). The train batch is generated from the given `episodes`
|
| 30 |
+
through the Learner connector pipeline.
|
| 31 |
+
minibatch_size: The size of minibatches to use to further split the train
|
| 32 |
+
batch into per epoch. The train batch is generated from the given
|
| 33 |
+
`episodes` through the Learner connector pipeline.
|
| 34 |
+
num_total_minibatches: The total number of minibatches to loop through
|
| 35 |
+
(over all `num_epochs` epochs). It's only required to set this to != 0
|
| 36 |
+
in multi-agent + multi-GPU situations, in which the MultiAgentEpisodes
|
| 37 |
+
themselves are roughly sharded equally, however, they might contain
|
| 38 |
+
SingleAgentEpisodes with very lopsided length distributions. Thus,
|
| 39 |
+
without this fixed, pre-computed value, one Learner might go through a
|
| 40 |
+
different number of minibatche passes than others causing a deadlock.
|
| 41 |
+
"""
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@DeveloperAPI
|
| 46 |
+
class MiniBatchCyclicIterator(MiniBatchIteratorBase):
|
| 47 |
+
"""This implements a simple multi-agent minibatch iterator.
|
| 48 |
+
|
| 49 |
+
This iterator will split the input multi-agent batch into minibatches where the
|
| 50 |
+
size of batch for each module_id (aka policy_id) is equal to minibatch_size. If the
|
| 51 |
+
input batch is smaller than minibatch_size, then the iterator will cycle through
|
| 52 |
+
the batch until it has covered `num_epochs` epochs.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
batch: MultiAgentBatch,
|
| 58 |
+
*,
|
| 59 |
+
num_epochs: int = 1,
|
| 60 |
+
minibatch_size: int,
|
| 61 |
+
shuffle_batch_per_epoch: bool = True,
|
| 62 |
+
num_total_minibatches: int = 0,
|
| 63 |
+
_uses_new_env_runners: bool = False,
|
| 64 |
+
) -> None:
|
| 65 |
+
"""Initializes a MiniBatchCyclicIterator instance."""
|
| 66 |
+
super().__init__(
|
| 67 |
+
batch,
|
| 68 |
+
num_epochs=num_epochs,
|
| 69 |
+
minibatch_size=minibatch_size,
|
| 70 |
+
shuffle_batch_per_epoch=shuffle_batch_per_epoch,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
self._batch = batch
|
| 74 |
+
self._minibatch_size = minibatch_size
|
| 75 |
+
self._num_epochs = num_epochs
|
| 76 |
+
self._shuffle_batch_per_epoch = shuffle_batch_per_epoch
|
| 77 |
+
|
| 78 |
+
# mapping from module_id to the start index of the batch
|
| 79 |
+
self._start = {mid: 0 for mid in batch.policy_batches.keys()}
|
| 80 |
+
# mapping from module_id to the number of epochs covered for each module_id
|
| 81 |
+
self._num_covered_epochs = {mid: 0 for mid in batch.policy_batches.keys()}
|
| 82 |
+
|
| 83 |
+
self._uses_new_env_runners = _uses_new_env_runners
|
| 84 |
+
|
| 85 |
+
self._minibatch_count = 0
|
| 86 |
+
self._num_total_minibatches = num_total_minibatches
|
| 87 |
+
|
| 88 |
+
def __iter__(self):
|
| 89 |
+
while (
|
| 90 |
+
# Make sure each item in the total batch gets at least iterated over
|
| 91 |
+
# `self._num_epochs` times.
|
| 92 |
+
(
|
| 93 |
+
self._num_total_minibatches == 0
|
| 94 |
+
and min(self._num_covered_epochs.values()) < self._num_epochs
|
| 95 |
+
)
|
| 96 |
+
# Make sure we reach at least the given minimum number of mini-batches.
|
| 97 |
+
or (
|
| 98 |
+
self._num_total_minibatches > 0
|
| 99 |
+
and self._minibatch_count < self._num_total_minibatches
|
| 100 |
+
)
|
| 101 |
+
):
|
| 102 |
+
minibatch = {}
|
| 103 |
+
for module_id, module_batch in self._batch.policy_batches.items():
|
| 104 |
+
|
| 105 |
+
if len(module_batch) == 0:
|
| 106 |
+
raise ValueError(
|
| 107 |
+
f"The batch for module_id {module_id} is empty! "
|
| 108 |
+
"This will create an infinite loop because we need to cover "
|
| 109 |
+
"the same number of samples for each module_id."
|
| 110 |
+
)
|
| 111 |
+
s = self._start[module_id] # start
|
| 112 |
+
|
| 113 |
+
# TODO (sven): Fix this bug for LSTMs:
|
| 114 |
+
# In an RNN-setting, the Learner connector already has zero-padded
|
| 115 |
+
# and added a timerank to the batch. Thus, n_step would still be based
|
| 116 |
+
# on the BxT dimension, rather than the new B dimension (excluding T),
|
| 117 |
+
# which then leads to minibatches way too large.
|
| 118 |
+
# However, changing this already would break APPO/IMPALA w/o LSTMs as
|
| 119 |
+
# these setups require sequencing, BUT their batches are not yet time-
|
| 120 |
+
# ranked (this is done only in their loss functions via the
|
| 121 |
+
# `make_time_major` utility).
|
| 122 |
+
# Get rid of the _uses_new_env_runners c'tor arg, once this work is
|
| 123 |
+
# done.
|
| 124 |
+
n_steps = self._minibatch_size
|
| 125 |
+
|
| 126 |
+
samples_to_concat = []
|
| 127 |
+
|
| 128 |
+
# get_len is a function that returns the length of a batch
|
| 129 |
+
# if we are not slicing the batch in the batch dimension B, then
|
| 130 |
+
# the length of the batch is simply the length of the batch
|
| 131 |
+
# o.w the length of the batch is the length list of seq_lens.
|
| 132 |
+
if module_batch._slice_seq_lens_in_B:
|
| 133 |
+
assert module_batch.get(SampleBatch.SEQ_LENS) is not None, (
|
| 134 |
+
"MiniBatchCyclicIterator requires SampleBatch.SEQ_LENS"
|
| 135 |
+
"to be present in the batch for slicing a batch in the batch "
|
| 136 |
+
"dimension B."
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
def get_len(b):
|
| 140 |
+
return len(b[SampleBatch.SEQ_LENS])
|
| 141 |
+
|
| 142 |
+
if self._uses_new_env_runners:
|
| 143 |
+
n_steps = int(
|
| 144 |
+
get_len(module_batch)
|
| 145 |
+
* (self._minibatch_size / len(module_batch))
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
else:
|
| 149 |
+
|
| 150 |
+
def get_len(b):
|
| 151 |
+
return len(b)
|
| 152 |
+
|
| 153 |
+
# Cycle through the batch until we have enough samples.
|
| 154 |
+
while s + n_steps >= get_len(module_batch):
|
| 155 |
+
sample = module_batch[s:]
|
| 156 |
+
samples_to_concat.append(sample)
|
| 157 |
+
len_sample = get_len(sample)
|
| 158 |
+
assert len_sample > 0, "Length of a sample must be > 0!"
|
| 159 |
+
n_steps -= len_sample
|
| 160 |
+
s = 0
|
| 161 |
+
self._num_covered_epochs[module_id] += 1
|
| 162 |
+
# Shuffle the individual single-agent batch, if required.
|
| 163 |
+
# This should happen once per minibatch iteration in order to make
|
| 164 |
+
# each iteration go through a different set of minibatches.
|
| 165 |
+
if self._shuffle_batch_per_epoch:
|
| 166 |
+
module_batch.shuffle()
|
| 167 |
+
|
| 168 |
+
e = s + n_steps # end
|
| 169 |
+
if e > s:
|
| 170 |
+
samples_to_concat.append(module_batch[s:e])
|
| 171 |
+
|
| 172 |
+
# concatenate all the samples, we should have minibatch_size of sample
|
| 173 |
+
# after this step
|
| 174 |
+
minibatch[module_id] = concat_samples(samples_to_concat)
|
| 175 |
+
# roll minibatch to zero when we reach the end of the batch
|
| 176 |
+
self._start[module_id] = e
|
| 177 |
+
|
| 178 |
+
# Note (Kourosh): env_steps is the total number of env_steps that this
|
| 179 |
+
# multi-agent batch is covering. It should be simply inherited from the
|
| 180 |
+
# original multi-agent batch.
|
| 181 |
+
minibatch = MultiAgentBatch(minibatch, len(self._batch))
|
| 182 |
+
yield minibatch
|
| 183 |
+
|
| 184 |
+
self._minibatch_count += 1
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class MiniBatchDummyIterator(MiniBatchIteratorBase):
|
| 188 |
+
def __init__(self, batch: MultiAgentBatch, **kwargs):
|
| 189 |
+
super().__init__(batch, **kwargs)
|
| 190 |
+
self._batch = batch
|
| 191 |
+
|
| 192 |
+
def __iter__(self):
|
| 193 |
+
yield self._batch
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@DeveloperAPI
|
| 197 |
+
class ShardBatchIterator:
|
| 198 |
+
"""Iterator for sharding batch into num_shards batches.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
batch: The input multi-agent batch.
|
| 202 |
+
num_shards: The number of shards to split the batch into.
|
| 203 |
+
|
| 204 |
+
Yields:
|
| 205 |
+
A MultiAgentBatch of size len(batch) / num_shards.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
def __init__(self, batch: MultiAgentBatch, num_shards: int):
|
| 209 |
+
self._batch = batch
|
| 210 |
+
self._num_shards = num_shards
|
| 211 |
+
|
| 212 |
+
def __iter__(self):
|
| 213 |
+
for i in range(self._num_shards):
|
| 214 |
+
# TODO (sven): The following way of sharding a multi-agent batch destroys
|
| 215 |
+
# the relationship of the different agents' timesteps to each other.
|
| 216 |
+
# Thus, in case the algorithm requires agent-synchronized data (aka.
|
| 217 |
+
# "lockstep"), the `ShardBatchIterator` cannot be used.
|
| 218 |
+
batch_to_send = {}
|
| 219 |
+
for pid, sub_batch in self._batch.policy_batches.items():
|
| 220 |
+
batch_size = math.ceil(len(sub_batch) / self._num_shards)
|
| 221 |
+
start = batch_size * i
|
| 222 |
+
end = min(start + batch_size, len(sub_batch))
|
| 223 |
+
batch_to_send[pid] = sub_batch[int(start) : int(end)]
|
| 224 |
+
# TODO (Avnish): int(batch_size) ? How should we shard MA batches really?
|
| 225 |
+
new_batch = MultiAgentBatch(batch_to_send, int(batch_size))
|
| 226 |
+
yield new_batch
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@DeveloperAPI
|
| 230 |
+
class ShardEpisodesIterator:
|
| 231 |
+
"""Iterator for sharding a list of Episodes into `num_shards` lists of Episodes."""
|
| 232 |
+
|
| 233 |
+
def __init__(
|
| 234 |
+
self,
|
| 235 |
+
episodes: List[EpisodeType],
|
| 236 |
+
num_shards: int,
|
| 237 |
+
len_lookback_buffer: Optional[int] = None,
|
| 238 |
+
):
|
| 239 |
+
"""Initializes a ShardEpisodesIterator instance.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
episodes: The input list of Episodes.
|
| 243 |
+
num_shards: The number of shards to split the episodes into.
|
| 244 |
+
len_lookback_buffer: An optional length of a lookback buffer to enforce
|
| 245 |
+
on the returned shards. When spitting an episode, the second piece
|
| 246 |
+
might need a lookback buffer (into the first piece) depending on the
|
| 247 |
+
user's settings.
|
| 248 |
+
"""
|
| 249 |
+
self._episodes = sorted(episodes, key=len, reverse=True)
|
| 250 |
+
self._num_shards = num_shards
|
| 251 |
+
self._len_lookback_buffer = len_lookback_buffer
|
| 252 |
+
self._total_length = sum(len(e) for e in episodes)
|
| 253 |
+
self._target_lengths = [0 for _ in range(self._num_shards)]
|
| 254 |
+
remaining_length = self._total_length
|
| 255 |
+
for s in range(self._num_shards):
|
| 256 |
+
len_ = remaining_length // (num_shards - s)
|
| 257 |
+
self._target_lengths[s] = len_
|
| 258 |
+
remaining_length -= len_
|
| 259 |
+
|
| 260 |
+
def __iter__(self) -> List[EpisodeType]:
|
| 261 |
+
"""Runs one iteration through this sharder.
|
| 262 |
+
|
| 263 |
+
Yields:
|
| 264 |
+
A sub-list of Episodes of size roughly `len(episodes) / num_shards`. The
|
| 265 |
+
yielded sublists might have slightly different total sums of episode
|
| 266 |
+
lengths, in order to not have to drop even a single timestep.
|
| 267 |
+
"""
|
| 268 |
+
sublists = [[] for _ in range(self._num_shards)]
|
| 269 |
+
lengths = [0 for _ in range(self._num_shards)]
|
| 270 |
+
episode_index = 0
|
| 271 |
+
|
| 272 |
+
while episode_index < len(self._episodes):
|
| 273 |
+
episode = self._episodes[episode_index]
|
| 274 |
+
min_index = lengths.index(min(lengths))
|
| 275 |
+
|
| 276 |
+
# Add the whole episode if it fits within the target length
|
| 277 |
+
if lengths[min_index] + len(episode) <= self._target_lengths[min_index]:
|
| 278 |
+
sublists[min_index].append(episode)
|
| 279 |
+
lengths[min_index] += len(episode)
|
| 280 |
+
episode_index += 1
|
| 281 |
+
# Otherwise, slice the episode
|
| 282 |
+
else:
|
| 283 |
+
remaining_length = self._target_lengths[min_index] - lengths[min_index]
|
| 284 |
+
if remaining_length > 0:
|
| 285 |
+
slice_part, remaining_part = (
|
| 286 |
+
# Note that the first slice will automatically "inherit" the
|
| 287 |
+
# lookback buffer size of the episode.
|
| 288 |
+
episode[:remaining_length],
|
| 289 |
+
# However, the second slice might need a user defined lookback
|
| 290 |
+
# buffer (into the first slice).
|
| 291 |
+
episode.slice(
|
| 292 |
+
slice(remaining_length, None),
|
| 293 |
+
len_lookback_buffer=self._len_lookback_buffer,
|
| 294 |
+
),
|
| 295 |
+
)
|
| 296 |
+
sublists[min_index].append(slice_part)
|
| 297 |
+
lengths[min_index] += len(slice_part)
|
| 298 |
+
self._episodes[episode_index] = remaining_part
|
| 299 |
+
else:
|
| 300 |
+
assert remaining_length == 0
|
| 301 |
+
sublists[min_index].append(episode)
|
| 302 |
+
episode_index += 1
|
| 303 |
+
|
| 304 |
+
for sublist in sublists:
|
| 305 |
+
yield sublist
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
@DeveloperAPI
|
| 309 |
+
class ShardObjectRefIterator:
|
| 310 |
+
"""Iterator for sharding a list of ray ObjectRefs into num_shards sub-lists.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
object_refs: The input list of ray ObjectRefs.
|
| 314 |
+
num_shards: The number of shards to split the references into.
|
| 315 |
+
|
| 316 |
+
Yields:
|
| 317 |
+
A sub-list of ray ObjectRefs with lengths as equal as possible.
|
| 318 |
+
"""
|
| 319 |
+
|
| 320 |
+
def __init__(self, object_refs, num_shards: int):
|
| 321 |
+
self._object_refs = object_refs
|
| 322 |
+
self._num_shards = num_shards
|
| 323 |
+
|
| 324 |
+
def __iter__(self):
|
| 325 |
+
# Calculate the size of each sublist
|
| 326 |
+
n = len(self._object_refs)
|
| 327 |
+
sublist_size = n // self._num_shards
|
| 328 |
+
remaining_elements = n % self._num_shards
|
| 329 |
+
|
| 330 |
+
start = 0
|
| 331 |
+
for i in range(self._num_shards):
|
| 332 |
+
# Determine the end index for the current sublist
|
| 333 |
+
end = start + sublist_size + (1 if i < remaining_elements else 0)
|
| 334 |
+
# Append the sublist to the result
|
| 335 |
+
yield self._object_refs[start:end]
|
| 336 |
+
# Update the start index for the next sublist
|
| 337 |
+
start = end
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/policy.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium as gym
|
| 2 |
+
import logging
|
| 3 |
+
import numpy as np
|
| 4 |
+
from typing import (
|
| 5 |
+
Callable,
|
| 6 |
+
Dict,
|
| 7 |
+
List,
|
| 8 |
+
Optional,
|
| 9 |
+
Tuple,
|
| 10 |
+
Type,
|
| 11 |
+
Union,
|
| 12 |
+
TYPE_CHECKING,
|
| 13 |
+
)
|
| 14 |
+
import tree # pip install dm_tree
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
import ray.cloudpickle as pickle
|
| 18 |
+
from ray.rllib.core.rl_module import validate_module_id
|
| 19 |
+
from ray.rllib.models.preprocessors import ATARI_OBS_SHAPE
|
| 20 |
+
from ray.rllib.policy.policy import PolicySpec
|
| 21 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 22 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 23 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 24 |
+
from ray.rllib.utils.typing import (
|
| 25 |
+
ActionConnectorDataType,
|
| 26 |
+
AgentConnectorDataType,
|
| 27 |
+
AgentConnectorsOutput,
|
| 28 |
+
PartialAlgorithmConfigDict,
|
| 29 |
+
PolicyState,
|
| 30 |
+
TensorStructType,
|
| 31 |
+
TensorType,
|
| 32 |
+
)
|
| 33 |
+
from ray.util import log_once
|
| 34 |
+
from ray.util.annotations import PublicAPI
|
| 35 |
+
|
| 36 |
+
if TYPE_CHECKING:
|
| 37 |
+
from ray.rllib.policy.policy import Policy
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
tf1, tf, tfv = try_import_tf()
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@PublicAPI
|
| 45 |
+
def create_policy_for_framework(
|
| 46 |
+
policy_id: str,
|
| 47 |
+
policy_class: Type["Policy"],
|
| 48 |
+
merged_config: PartialAlgorithmConfigDict,
|
| 49 |
+
observation_space: gym.Space,
|
| 50 |
+
action_space: gym.Space,
|
| 51 |
+
worker_index: int = 0,
|
| 52 |
+
session_creator: Optional[Callable[[], "tf1.Session"]] = None,
|
| 53 |
+
seed: Optional[int] = None,
|
| 54 |
+
):
|
| 55 |
+
"""Framework-specific policy creation logics.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
policy_id: Policy ID.
|
| 59 |
+
policy_class: Policy class type.
|
| 60 |
+
merged_config: Complete policy config.
|
| 61 |
+
observation_space: Observation space of env.
|
| 62 |
+
action_space: Action space of env.
|
| 63 |
+
worker_index: Index of worker holding this policy. Default is 0.
|
| 64 |
+
session_creator: An optional tf1.Session creation callable.
|
| 65 |
+
seed: Optional random seed.
|
| 66 |
+
"""
|
| 67 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 68 |
+
|
| 69 |
+
if isinstance(merged_config, AlgorithmConfig):
|
| 70 |
+
merged_config = merged_config.to_dict()
|
| 71 |
+
|
| 72 |
+
# add policy_id to merged_config
|
| 73 |
+
merged_config["__policy_id"] = policy_id
|
| 74 |
+
|
| 75 |
+
framework = merged_config.get("framework", "tf")
|
| 76 |
+
# Tf.
|
| 77 |
+
if framework in ["tf2", "tf"]:
|
| 78 |
+
var_scope = policy_id + (f"_wk{worker_index}" if worker_index else "")
|
| 79 |
+
# For tf static graph, build every policy in its own graph
|
| 80 |
+
# and create a new session for it.
|
| 81 |
+
if framework == "tf":
|
| 82 |
+
with tf1.Graph().as_default():
|
| 83 |
+
# Session creator function provided manually -> Use this one to
|
| 84 |
+
# create the tf1 session.
|
| 85 |
+
if session_creator:
|
| 86 |
+
sess = session_creator()
|
| 87 |
+
# Use a default session creator, based only on our `tf_session_args` in
|
| 88 |
+
# the config.
|
| 89 |
+
else:
|
| 90 |
+
sess = tf1.Session(
|
| 91 |
+
config=tf1.ConfigProto(**merged_config["tf_session_args"])
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
with sess.as_default():
|
| 95 |
+
# Set graph-level seed.
|
| 96 |
+
if seed is not None:
|
| 97 |
+
tf1.set_random_seed(seed)
|
| 98 |
+
with tf1.variable_scope(var_scope):
|
| 99 |
+
return policy_class(
|
| 100 |
+
observation_space, action_space, merged_config
|
| 101 |
+
)
|
| 102 |
+
# For tf-eager: no graph, no session.
|
| 103 |
+
else:
|
| 104 |
+
with tf1.variable_scope(var_scope):
|
| 105 |
+
return policy_class(observation_space, action_space, merged_config)
|
| 106 |
+
# Non-tf: No graph, no session.
|
| 107 |
+
else:
|
| 108 |
+
return policy_class(observation_space, action_space, merged_config)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@PublicAPI(stability="alpha")
|
| 112 |
+
def parse_policy_specs_from_checkpoint(
|
| 113 |
+
path: str,
|
| 114 |
+
) -> Tuple[PartialAlgorithmConfigDict, Dict[str, PolicySpec], Dict[str, PolicyState]]:
|
| 115 |
+
"""Read and parse policy specifications from a checkpoint file.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
path: Path to a policy checkpoint.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
A tuple of: base policy config, dictionary of policy specs, and
|
| 122 |
+
dictionary of policy states.
|
| 123 |
+
"""
|
| 124 |
+
with open(path, "rb") as f:
|
| 125 |
+
checkpoint_dict = pickle.load(f)
|
| 126 |
+
# Policy data is contained as a serialized binary blob under their
|
| 127 |
+
# ID keys.
|
| 128 |
+
w = pickle.loads(checkpoint_dict["worker"])
|
| 129 |
+
|
| 130 |
+
policy_config = w["policy_config"]
|
| 131 |
+
policy_states = w.get("policy_states", w["state"])
|
| 132 |
+
serialized_policy_specs = w["policy_specs"]
|
| 133 |
+
policy_specs = {
|
| 134 |
+
id: PolicySpec.deserialize(spec) for id, spec in serialized_policy_specs.items()
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
return policy_config, policy_specs, policy_states
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@PublicAPI(stability="alpha")
|
| 141 |
+
def local_policy_inference(
|
| 142 |
+
policy: "Policy",
|
| 143 |
+
env_id: str,
|
| 144 |
+
agent_id: str,
|
| 145 |
+
obs: TensorStructType,
|
| 146 |
+
reward: Optional[float] = None,
|
| 147 |
+
terminated: Optional[bool] = None,
|
| 148 |
+
truncated: Optional[bool] = None,
|
| 149 |
+
info: Optional[Dict] = None,
|
| 150 |
+
explore: bool = None,
|
| 151 |
+
timestep: Optional[int] = None,
|
| 152 |
+
) -> TensorStructType:
|
| 153 |
+
"""Run a connector enabled policy using environment observation.
|
| 154 |
+
|
| 155 |
+
policy_inference manages policy and agent/action connectors,
|
| 156 |
+
so the user does not have to care about RNN state buffering or
|
| 157 |
+
extra fetch dictionaries.
|
| 158 |
+
Note that connectors are intentionally run separately from
|
| 159 |
+
compute_actions_from_input_dict(), so we can have the option
|
| 160 |
+
of running per-user connectors on the client side in a
|
| 161 |
+
server-client deployment.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
policy: Policy object used in inference.
|
| 165 |
+
env_id: Environment ID. RLlib builds environments' trajectories internally with
|
| 166 |
+
connectors based on this, i.e. one trajectory per (env_id, agent_id) tuple.
|
| 167 |
+
agent_id: Agent ID. RLlib builds agents' trajectories internally with connectors
|
| 168 |
+
based on this, i.e. one trajectory per (env_id, agent_id) tuple.
|
| 169 |
+
obs: Environment observation to base the action on.
|
| 170 |
+
reward: Reward that is potentially used during inference. If not required,
|
| 171 |
+
may be left empty. Some policies have ViewRequirements that require this.
|
| 172 |
+
This can be set to zero at the first inference step - for example after
|
| 173 |
+
calling gmy.Env.reset.
|
| 174 |
+
terminated: `Terminated` flag that is potentially used during inference. If not
|
| 175 |
+
required, may be left None. Some policies have ViewRequirements that
|
| 176 |
+
require this extra information.
|
| 177 |
+
truncated: `Truncated` flag that is potentially used during inference. If not
|
| 178 |
+
required, may be left None. Some policies have ViewRequirements that
|
| 179 |
+
require this extra information.
|
| 180 |
+
info: Info that is potentially used durin inference. If not required,
|
| 181 |
+
may be left empty. Some policies have ViewRequirements that require this.
|
| 182 |
+
explore: Whether to pick an exploitation or exploration action
|
| 183 |
+
(default: None -> use self.config["explore"]).
|
| 184 |
+
timestep: The current (sampling) time step.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
List of outputs from policy forward pass.
|
| 188 |
+
"""
|
| 189 |
+
assert (
|
| 190 |
+
policy.agent_connectors
|
| 191 |
+
), "policy_inference only works with connector enabled policies."
|
| 192 |
+
|
| 193 |
+
__check_atari_obs_space(obs)
|
| 194 |
+
|
| 195 |
+
# Put policy in inference mode, so we don't spend time on training
|
| 196 |
+
# only transformations.
|
| 197 |
+
policy.agent_connectors.in_eval()
|
| 198 |
+
policy.action_connectors.in_eval()
|
| 199 |
+
|
| 200 |
+
# TODO(jungong) : support multiple env, multiple agent inference.
|
| 201 |
+
input_dict = {SampleBatch.NEXT_OBS: obs}
|
| 202 |
+
if reward is not None:
|
| 203 |
+
input_dict[SampleBatch.REWARDS] = reward
|
| 204 |
+
if terminated is not None:
|
| 205 |
+
input_dict[SampleBatch.TERMINATEDS] = terminated
|
| 206 |
+
if truncated is not None:
|
| 207 |
+
input_dict[SampleBatch.TRUNCATEDS] = truncated
|
| 208 |
+
if info is not None:
|
| 209 |
+
input_dict[SampleBatch.INFOS] = info
|
| 210 |
+
|
| 211 |
+
acd_list: List[AgentConnectorDataType] = [
|
| 212 |
+
AgentConnectorDataType(env_id, agent_id, input_dict)
|
| 213 |
+
]
|
| 214 |
+
ac_outputs: List[AgentConnectorsOutput] = policy.agent_connectors(acd_list)
|
| 215 |
+
outputs = []
|
| 216 |
+
for ac in ac_outputs:
|
| 217 |
+
policy_output = policy.compute_actions_from_input_dict(
|
| 218 |
+
ac.data.sample_batch,
|
| 219 |
+
explore=explore,
|
| 220 |
+
timestep=timestep,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# Note (Kourosh): policy output is batched, the AgentConnectorDataType should
|
| 224 |
+
# not be batched during inference. This is the assumption made in AgentCollector
|
| 225 |
+
policy_output = tree.map_structure(lambda x: x[0], policy_output)
|
| 226 |
+
|
| 227 |
+
action_connector_data = ActionConnectorDataType(
|
| 228 |
+
env_id, agent_id, ac.data.raw_dict, policy_output
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
if policy.action_connectors:
|
| 232 |
+
acd = policy.action_connectors(action_connector_data)
|
| 233 |
+
actions = acd.output
|
| 234 |
+
else:
|
| 235 |
+
actions = policy_output[0]
|
| 236 |
+
|
| 237 |
+
outputs.append(actions)
|
| 238 |
+
|
| 239 |
+
# Notify agent connectors with this new policy output.
|
| 240 |
+
# Necessary for state buffering agent connectors, for example.
|
| 241 |
+
policy.agent_connectors.on_policy_output(action_connector_data)
|
| 242 |
+
return outputs
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
@PublicAPI
|
| 246 |
+
def compute_log_likelihoods_from_input_dict(
|
| 247 |
+
policy: "Policy", batch: Union[SampleBatch, Dict[str, TensorStructType]]
|
| 248 |
+
):
|
| 249 |
+
"""Returns log likelihood for actions in given batch for policy.
|
| 250 |
+
|
| 251 |
+
Computes likelihoods by passing the observations through the current
|
| 252 |
+
policy's `compute_log_likelihoods()` method
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
batch: The SampleBatch or MultiAgentBatch to calculate action
|
| 256 |
+
log likelihoods from. This batch/batches must contain OBS
|
| 257 |
+
and ACTIONS keys.
|
| 258 |
+
|
| 259 |
+
Returns:
|
| 260 |
+
The probabilities of the actions in the batch, given the
|
| 261 |
+
observations and the policy.
|
| 262 |
+
"""
|
| 263 |
+
num_state_inputs = 0
|
| 264 |
+
for k in batch.keys():
|
| 265 |
+
if k.startswith("state_in_"):
|
| 266 |
+
num_state_inputs += 1
|
| 267 |
+
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
|
| 268 |
+
log_likelihoods: TensorType = policy.compute_log_likelihoods(
|
| 269 |
+
actions=batch[SampleBatch.ACTIONS],
|
| 270 |
+
obs_batch=batch[SampleBatch.OBS],
|
| 271 |
+
state_batches=[batch[k] for k in state_keys],
|
| 272 |
+
prev_action_batch=batch.get(SampleBatch.PREV_ACTIONS),
|
| 273 |
+
prev_reward_batch=batch.get(SampleBatch.PREV_REWARDS),
|
| 274 |
+
actions_normalized=policy.config.get("actions_in_input_normalized", False),
|
| 275 |
+
)
|
| 276 |
+
return log_likelihoods
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
@Deprecated(new="Policy.from_checkpoint([checkpoint path], [policy IDs]?)", error=True)
|
| 280 |
+
def load_policies_from_checkpoint(path, policy_ids=None):
|
| 281 |
+
pass
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def __check_atari_obs_space(obs):
|
| 285 |
+
# TODO(Artur): Remove this after we have migrated deepmind style preprocessing into
|
| 286 |
+
# connectors (and don't auto-wrap in RW anymore)
|
| 287 |
+
if any(
|
| 288 |
+
o.shape == ATARI_OBS_SHAPE if isinstance(o, np.ndarray) else False
|
| 289 |
+
for o in tree.flatten(obs)
|
| 290 |
+
):
|
| 291 |
+
if log_once("warn_about_possibly_non_wrapped_atari_env"):
|
| 292 |
+
logger.warning(
|
| 293 |
+
"The observation you fed into local_policy_inference() has "
|
| 294 |
+
"dimensions (210, 160, 3), which is the standard for atari "
|
| 295 |
+
"environments. If RLlib raises an error including a related "
|
| 296 |
+
"dimensionality mismatch, you may need to use "
|
| 297 |
+
"ray.rllib.env.wrappers.atari_wrappers.wrap_deepmind to wrap "
|
| 298 |
+
"you environment."
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# @OldAPIStack
|
| 303 |
+
validate_policy_id = validate_module_id
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.rllib.utils.replay_buffers.episode_replay_buffer import EpisodeReplayBuffer
|
| 2 |
+
from ray.rllib.utils.replay_buffers.fifo_replay_buffer import FifoReplayBuffer
|
| 3 |
+
from ray.rllib.utils.replay_buffers.multi_agent_mixin_replay_buffer import (
|
| 4 |
+
MultiAgentMixInReplayBuffer,
|
| 5 |
+
)
|
| 6 |
+
from ray.rllib.utils.replay_buffers.multi_agent_episode_buffer import (
|
| 7 |
+
MultiAgentEpisodeReplayBuffer,
|
| 8 |
+
)
|
| 9 |
+
from ray.rllib.utils.replay_buffers.multi_agent_prioritized_episode_buffer import (
|
| 10 |
+
MultiAgentPrioritizedEpisodeReplayBuffer,
|
| 11 |
+
)
|
| 12 |
+
from ray.rllib.utils.replay_buffers.multi_agent_prioritized_replay_buffer import (
|
| 13 |
+
MultiAgentPrioritizedReplayBuffer,
|
| 14 |
+
)
|
| 15 |
+
from ray.rllib.utils.replay_buffers.multi_agent_replay_buffer import (
|
| 16 |
+
MultiAgentReplayBuffer,
|
| 17 |
+
ReplayMode,
|
| 18 |
+
)
|
| 19 |
+
from ray.rllib.utils.replay_buffers.prioritized_episode_buffer import (
|
| 20 |
+
PrioritizedEpisodeReplayBuffer,
|
| 21 |
+
)
|
| 22 |
+
from ray.rllib.utils.replay_buffers.prioritized_replay_buffer import (
|
| 23 |
+
PrioritizedReplayBuffer,
|
| 24 |
+
)
|
| 25 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer, StorageUnit
|
| 26 |
+
from ray.rllib.utils.replay_buffers.reservoir_replay_buffer import ReservoirReplayBuffer
|
| 27 |
+
from ray.rllib.utils.replay_buffers import utils
|
| 28 |
+
|
| 29 |
+
__all__ = [
|
| 30 |
+
"EpisodeReplayBuffer",
|
| 31 |
+
"FifoReplayBuffer",
|
| 32 |
+
"MultiAgentEpisodeReplayBuffer",
|
| 33 |
+
"MultiAgentMixInReplayBuffer",
|
| 34 |
+
"MultiAgentPrioritizedEpisodeReplayBuffer",
|
| 35 |
+
"MultiAgentPrioritizedReplayBuffer",
|
| 36 |
+
"MultiAgentReplayBuffer",
|
| 37 |
+
"PrioritizedEpisodeReplayBuffer",
|
| 38 |
+
"PrioritizedReplayBuffer",
|
| 39 |
+
"ReplayMode",
|
| 40 |
+
"ReplayBuffer",
|
| 41 |
+
"ReservoirReplayBuffer",
|
| 42 |
+
"StorageUnit",
|
| 43 |
+
"utils",
|
| 44 |
+
]
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__pycache__/multi_agent_prioritized_episode_buffer.cpython-310.pyc
ADDED
|
Binary file (25 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/__pycache__/simple_replay_buffer.cpython-310.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/fifo_replay_buffer.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from typing import Any, Dict, Optional
|
| 3 |
+
|
| 4 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch
|
| 5 |
+
from ray.rllib.utils.annotations import override
|
| 6 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer, StorageUnit
|
| 7 |
+
from ray.rllib.utils.typing import SampleBatchType
|
| 8 |
+
from ray.util.annotations import DeveloperAPI
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@DeveloperAPI
|
| 12 |
+
class FifoReplayBuffer(ReplayBuffer):
|
| 13 |
+
"""This replay buffer implements a FIFO queue.
|
| 14 |
+
|
| 15 |
+
Sometimes, e.g. for offline use cases, it may be desirable to use
|
| 16 |
+
off-policy algorithms without a Replay Buffer.
|
| 17 |
+
This FifoReplayBuffer can be used in-place to achieve the same effect
|
| 18 |
+
without having to introduce separate algorithm execution branches.
|
| 19 |
+
|
| 20 |
+
For simplicity and efficiency reasons, this replay buffer stores incoming
|
| 21 |
+
sample batches as-is, and returns them one at time.
|
| 22 |
+
This is to avoid any additional load when this replay buffer is used.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, *args, **kwargs):
|
| 26 |
+
"""Initializes a FifoReplayBuffer.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
``*args`` : Forward compatibility args.
|
| 30 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 31 |
+
"""
|
| 32 |
+
# Completely by-passing underlying ReplayBuffer by setting its
|
| 33 |
+
# capacity to 1 (lowest allowed capacity).
|
| 34 |
+
ReplayBuffer.__init__(self, 1, StorageUnit.FRAGMENTS, **kwargs)
|
| 35 |
+
|
| 36 |
+
self._queue = []
|
| 37 |
+
|
| 38 |
+
@DeveloperAPI
|
| 39 |
+
@override(ReplayBuffer)
|
| 40 |
+
def add(self, batch: SampleBatchType, **kwargs) -> None:
|
| 41 |
+
return self._queue.append(batch)
|
| 42 |
+
|
| 43 |
+
@DeveloperAPI
|
| 44 |
+
@override(ReplayBuffer)
|
| 45 |
+
def sample(self, *args, **kwargs) -> Optional[SampleBatchType]:
|
| 46 |
+
"""Sample a saved training batch from this buffer.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
``*args`` : Forward compatibility args.
|
| 50 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
A single training batch from the queue.
|
| 54 |
+
"""
|
| 55 |
+
if len(self._queue) <= 0:
|
| 56 |
+
# Return empty SampleBatch if queue is empty.
|
| 57 |
+
return MultiAgentBatch({}, 0)
|
| 58 |
+
batch = self._queue.pop(0)
|
| 59 |
+
# Equal weights of 1.0.
|
| 60 |
+
batch["weights"] = np.ones(len(batch))
|
| 61 |
+
return batch
|
| 62 |
+
|
| 63 |
+
@DeveloperAPI
|
| 64 |
+
def update_priorities(self, *args, **kwargs) -> None:
|
| 65 |
+
"""Update priorities of items at given indices.
|
| 66 |
+
|
| 67 |
+
No-op for this replay buffer.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
``*args`` : Forward compatibility args.
|
| 71 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 72 |
+
"""
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
@DeveloperAPI
|
| 76 |
+
@override(ReplayBuffer)
|
| 77 |
+
def stats(self, debug: bool = False) -> Dict:
|
| 78 |
+
"""Returns the stats of this buffer.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
debug: If true, adds sample eviction statistics to the returned stats dict.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
A dictionary of stats about this buffer.
|
| 85 |
+
"""
|
| 86 |
+
# As if this replay buffer has never existed.
|
| 87 |
+
return {}
|
| 88 |
+
|
| 89 |
+
@DeveloperAPI
|
| 90 |
+
@override(ReplayBuffer)
|
| 91 |
+
def get_state(self) -> Dict[str, Any]:
|
| 92 |
+
"""Returns all local state.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
The serializable local state.
|
| 96 |
+
"""
|
| 97 |
+
# Pass through replay buffer does not save states.
|
| 98 |
+
return {}
|
| 99 |
+
|
| 100 |
+
@DeveloperAPI
|
| 101 |
+
@override(ReplayBuffer)
|
| 102 |
+
def set_state(self, state: Dict[str, Any]) -> None:
|
| 103 |
+
"""Restores all local state to the provided `state`.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
state: The new state to set this buffer. Can be obtained by calling
|
| 107 |
+
`self.get_state()`.
|
| 108 |
+
"""
|
| 109 |
+
pass
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_episode_buffer.py
ADDED
|
@@ -0,0 +1,1026 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from collections import defaultdict, deque
|
| 3 |
+
from gymnasium.core import ActType, ObsType
|
| 4 |
+
import numpy as np
|
| 5 |
+
import scipy
|
| 6 |
+
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
| 7 |
+
|
| 8 |
+
from ray.rllib.core.columns import Columns
|
| 9 |
+
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
|
| 10 |
+
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
|
| 11 |
+
from ray.rllib.utils.replay_buffers.episode_replay_buffer import EpisodeReplayBuffer
|
| 12 |
+
from ray.rllib.utils import force_list
|
| 13 |
+
from ray.rllib.utils.annotations import override, DeveloperAPI
|
| 14 |
+
from ray.rllib.utils.spaces.space_utils import batch
|
| 15 |
+
from ray.rllib.utils.typing import AgentID, ModuleID, SampleBatchType
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@DeveloperAPI
|
| 19 |
+
class MultiAgentEpisodeReplayBuffer(EpisodeReplayBuffer):
|
| 20 |
+
"""Multi-agent episode replay buffer that stores episodes by their IDs.
|
| 21 |
+
|
| 22 |
+
This class implements a replay buffer as used in "playing Atari with Deep
|
| 23 |
+
Reinforcement Learning" (Mnih et al., 2013) for multi-agent reinforcement
|
| 24 |
+
learning,
|
| 25 |
+
|
| 26 |
+
Each "row" (a slot in a deque) in the buffer is occupied by one episode. If an
|
| 27 |
+
incomplete episode is added to the buffer and then another chunk of that episode is
|
| 28 |
+
added at a later time, the buffer will automatically concatenate the new fragment to
|
| 29 |
+
the original episode. This way, episodes can be completed via subsequent `add`
|
| 30 |
+
calls.
|
| 31 |
+
|
| 32 |
+
Sampling returns a size `B` episode list (number of 'rows'), where each episode
|
| 33 |
+
holds a tuple tuple of the form
|
| 34 |
+
|
| 35 |
+
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
|
| 36 |
+
|
| 37 |
+
where `o_t` is the observation in `t`, `a_t` the action chosen at observation `o_t`,
|
| 38 |
+
`o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is the sum of
|
| 39 |
+
all rewards collected over the time steps between `t+1` and `t+n`. The `n`-step can
|
| 40 |
+
be chosen freely when sampling and defaults to `1`. If `n_step` is a tuple it is
|
| 41 |
+
sampled uniformly across the interval defined by the tuple (for each row in the
|
| 42 |
+
batch).
|
| 43 |
+
|
| 44 |
+
Each episode contains - in addition to the data tuples presented above - two further
|
| 45 |
+
elements in its `extra_model_outputs`, namely `n_steps` and `weights`. The former
|
| 46 |
+
holds the `n_step` used for the sampled timesteps in the episode and the latter the
|
| 47 |
+
corresponding (importance sampling) weight for the transition.
|
| 48 |
+
|
| 49 |
+
.. testcode::
|
| 50 |
+
|
| 51 |
+
import gymnasium as gym
|
| 52 |
+
|
| 53 |
+
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
|
| 54 |
+
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
|
| 55 |
+
from ray.rllib.utils.replay_buffers import MultiAgentEpisodeReplayBuffer
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Create the environment.
|
| 59 |
+
env = MultiAgentCartPole({"num_agents": 2})
|
| 60 |
+
|
| 61 |
+
# Set up the loop variables
|
| 62 |
+
agent_ids = env.agents
|
| 63 |
+
agent_ids.append("__all__")
|
| 64 |
+
terminateds = {aid: False for aid in agent_ids}
|
| 65 |
+
truncateds = {aid: False for aid in agent_ids}
|
| 66 |
+
num_timesteps = 10000
|
| 67 |
+
episodes = []
|
| 68 |
+
|
| 69 |
+
# Initialize the first episode entries.
|
| 70 |
+
eps = MultiAgentEpisode()
|
| 71 |
+
obs, infos = env.reset()
|
| 72 |
+
eps.add_env_reset(observations=obs, infos=infos)
|
| 73 |
+
|
| 74 |
+
# Sample 10,000 env timesteps.
|
| 75 |
+
for i in range(num_timesteps):
|
| 76 |
+
# If terminated we create a new episode.
|
| 77 |
+
if eps.is_done:
|
| 78 |
+
episodes.append(eps.finalize())
|
| 79 |
+
eps = MultiAgentEpisode()
|
| 80 |
+
terminateds = {aid: False for aid in agent_ids}
|
| 81 |
+
truncateds = {aid: False for aid in agent_ids}
|
| 82 |
+
obs, infos = env.reset()
|
| 83 |
+
eps.add_env_reset(observations=obs, infos=infos)
|
| 84 |
+
|
| 85 |
+
# Sample a random action for all agents that should step in the episode
|
| 86 |
+
# next.
|
| 87 |
+
actions = {
|
| 88 |
+
aid: env.get_action_space(aid).sample()
|
| 89 |
+
for aid in eps.get_agents_to_act()
|
| 90 |
+
}
|
| 91 |
+
obs, rewards, terminateds, truncateds, infos = env.step(actions)
|
| 92 |
+
eps.add_env_step(
|
| 93 |
+
obs,
|
| 94 |
+
actions,
|
| 95 |
+
rewards,
|
| 96 |
+
infos,
|
| 97 |
+
terminateds=terminateds,
|
| 98 |
+
truncateds=truncateds
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# Add the last (truncated) episode to the list of episodes.
|
| 102 |
+
if not eps.is_done:
|
| 103 |
+
episodes.append(eps)
|
| 104 |
+
|
| 105 |
+
# Create the buffer.
|
| 106 |
+
buffer = MultiAgentEpisodeReplayBuffer()
|
| 107 |
+
# Add the list of episodes sampled.
|
| 108 |
+
buffer.add(episodes)
|
| 109 |
+
|
| 110 |
+
# Pull a sample from the buffer using an `n-step` of 3.
|
| 111 |
+
sample = buffer.sample(num_items=256, gamma=0.95, n_step=3)
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
capacity: int = 10000,
|
| 117 |
+
*,
|
| 118 |
+
batch_size_B: int = 16,
|
| 119 |
+
batch_length_T: int = 1,
|
| 120 |
+
**kwargs,
|
| 121 |
+
):
|
| 122 |
+
"""Initializes a multi-agent episode replay buffer.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
capacity: The total number of timesteps to be storable in this buffer.
|
| 126 |
+
Will start ejecting old episodes once this limit is reached.
|
| 127 |
+
batch_size_B: The number of episodes returned from `sample()`.
|
| 128 |
+
batch_length_T: The length of each episode in the episode list returned from
|
| 129 |
+
`sample()`.
|
| 130 |
+
"""
|
| 131 |
+
# Initialize the base episode replay buffer.
|
| 132 |
+
super().__init__(
|
| 133 |
+
capacity=capacity,
|
| 134 |
+
batch_size_B=batch_size_B,
|
| 135 |
+
batch_length_T=batch_length_T,
|
| 136 |
+
**kwargs,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# Stores indices of module (single-agent) timesteps. Each index is a tuple
|
| 140 |
+
# of the form:
|
| 141 |
+
# `(ma_episode_idx, agent_id, timestep)`.
|
| 142 |
+
# This information is stored for each timestep of an episode and is used in
|
| 143 |
+
# the `"independent"`` sampling process. The multi-agent episode index amd the
|
| 144 |
+
# agent ID are used to retrieve the single-agent episode. The timestep is then
|
| 145 |
+
# needed to retrieve the corresponding timestep data from that single-agent
|
| 146 |
+
# episode.
|
| 147 |
+
self._module_to_indices: Dict[
|
| 148 |
+
ModuleID, List[Tuple[int, AgentID, int]]
|
| 149 |
+
] = defaultdict(list)
|
| 150 |
+
|
| 151 |
+
# Stores the number of single-agent timesteps in the buffer.
|
| 152 |
+
self._num_agent_timesteps: int = 0
|
| 153 |
+
# Stores the number of single-agent timesteps per module.
|
| 154 |
+
self._num_module_timesteps: Dict[ModuleID, int] = defaultdict(int)
|
| 155 |
+
|
| 156 |
+
# Stores the number of added single-agent timesteps over the
|
| 157 |
+
# lifetime of the buffer.
|
| 158 |
+
self._num_agent_timesteps_added: int = 0
|
| 159 |
+
# Stores the number of added single-agent timesteps per module
|
| 160 |
+
# over the lifetime of the buffer.
|
| 161 |
+
self._num_module_timesteps_added: Dict[ModuleID, int] = defaultdict(int)
|
| 162 |
+
|
| 163 |
+
self._num_module_episodes: Dict[ModuleID, int] = defaultdict(int)
|
| 164 |
+
# Stores the number of module episodes evicted. Note, this is
|
| 165 |
+
# important for indexing.
|
| 166 |
+
self._num_module_episodes_evicted: Dict[ModuleID, int] = defaultdict(int)
|
| 167 |
+
|
| 168 |
+
# Stores hte number of module timesteps sampled.
|
| 169 |
+
self.sampled_timesteps_per_module: Dict[ModuleID, int] = defaultdict(int)
|
| 170 |
+
|
| 171 |
+
@override(EpisodeReplayBuffer)
|
| 172 |
+
def add(
|
| 173 |
+
self,
|
| 174 |
+
episodes: Union[List["MultiAgentEpisode"], "MultiAgentEpisode"],
|
| 175 |
+
) -> None:
|
| 176 |
+
"""Adds episodes to the replay buffer.
|
| 177 |
+
|
| 178 |
+
Note, if the incoming episodes' time steps cause the buffer to overflow,
|
| 179 |
+
older episodes are evicted. Because episodes usually come in chunks and
|
| 180 |
+
not complete, this could lead to edge cases (e.g. with very small capacity
|
| 181 |
+
or very long episode length) where the first part of an episode is evicted
|
| 182 |
+
while the next part just comes in.
|
| 183 |
+
To defend against such case, the complete episode is evicted, including
|
| 184 |
+
the new chunk, unless the episode is the only one in the buffer. In the
|
| 185 |
+
latter case the buffer will be allowed to overflow in a temporary fashion,
|
| 186 |
+
i.e. during the next addition of samples to the buffer an attempt is made
|
| 187 |
+
to fall below capacity again.
|
| 188 |
+
|
| 189 |
+
The user is advised to select a large enough buffer with regard to the maximum
|
| 190 |
+
expected episode length.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
episodes: The multi-agent episodes to add to the replay buffer. Can be a
|
| 194 |
+
single episode or a list of episodes.
|
| 195 |
+
"""
|
| 196 |
+
episodes: List["MultiAgentEpisode"] = force_list(episodes)
|
| 197 |
+
|
| 198 |
+
new_episode_ids: Set[str] = {eps.id_ for eps in episodes}
|
| 199 |
+
total_env_timesteps = sum([eps.env_steps() for eps in episodes])
|
| 200 |
+
self._num_timesteps += total_env_timesteps
|
| 201 |
+
self._num_timesteps_added += total_env_timesteps
|
| 202 |
+
|
| 203 |
+
# Evict old episodes.
|
| 204 |
+
eps_evicted_ids: Set[Union[str, int]] = set()
|
| 205 |
+
eps_evicted_idxs: Set[int] = set()
|
| 206 |
+
while (
|
| 207 |
+
self._num_timesteps > self.capacity
|
| 208 |
+
and self._num_remaining_episodes(new_episode_ids, eps_evicted_ids) != 1
|
| 209 |
+
):
|
| 210 |
+
# Evict episode.
|
| 211 |
+
evicted_episode = self.episodes.popleft()
|
| 212 |
+
eps_evicted_ids.add(evicted_episode.id_)
|
| 213 |
+
eps_evicted_idxs.add(self.episode_id_to_index.pop(evicted_episode.id_))
|
| 214 |
+
# If this episode has a new chunk in the new episodes added,
|
| 215 |
+
# we subtract it again.
|
| 216 |
+
# TODO (sven, simon): Should we just treat such an episode chunk
|
| 217 |
+
# as a new episode?
|
| 218 |
+
if evicted_episode.id_ in new_episode_ids:
|
| 219 |
+
idx = next(
|
| 220 |
+
i
|
| 221 |
+
for i, eps in enumerate(episodes)
|
| 222 |
+
if eps.id_ == evicted_episode.id_
|
| 223 |
+
)
|
| 224 |
+
new_eps_to_evict = episodes.pop(idx)
|
| 225 |
+
self._num_timesteps -= new_eps_to_evict.env_steps()
|
| 226 |
+
self._num_timesteps_added -= new_eps_to_evict.env_steps()
|
| 227 |
+
# Remove the timesteps of the evicted episode from the counter.
|
| 228 |
+
self._num_timesteps -= evicted_episode.env_steps()
|
| 229 |
+
self._num_agent_timesteps -= evicted_episode.agent_steps()
|
| 230 |
+
self._num_episodes_evicted += 1
|
| 231 |
+
# Remove the module timesteps of the evicted episode from the counters.
|
| 232 |
+
self._evict_module_episodes(evicted_episode)
|
| 233 |
+
del evicted_episode
|
| 234 |
+
|
| 235 |
+
# Add agent and module steps.
|
| 236 |
+
for eps in episodes:
|
| 237 |
+
self._num_agent_timesteps += eps.agent_steps()
|
| 238 |
+
self._num_agent_timesteps_added += eps.agent_steps()
|
| 239 |
+
# Update the module counters by the module timesteps.
|
| 240 |
+
self._update_module_counters(eps)
|
| 241 |
+
|
| 242 |
+
# Remove corresponding indices, if episodes were evicted.
|
| 243 |
+
if eps_evicted_idxs:
|
| 244 |
+
# If the episode is not exvicted, we keep the index.
|
| 245 |
+
# Note, ach index 2-tuple is of the form (ma_episode_idx, timestep) and
|
| 246 |
+
# refers to a certain environment timestep in a certain multi-agent
|
| 247 |
+
# episode.
|
| 248 |
+
self._indices = [
|
| 249 |
+
idx_tuple
|
| 250 |
+
for idx_tuple in self._indices
|
| 251 |
+
if idx_tuple[0] not in eps_evicted_idxs
|
| 252 |
+
]
|
| 253 |
+
# Also remove corresponding module indices.
|
| 254 |
+
for module_id, module_indices in self._module_to_indices.items():
|
| 255 |
+
# Each index 3-tuple is of the form
|
| 256 |
+
# (ma_episode_idx, agent_id, timestep) and refers to a certain
|
| 257 |
+
# agent timestep in a certain multi-agent episode.
|
| 258 |
+
self._module_to_indices[module_id] = [
|
| 259 |
+
idx_triplet
|
| 260 |
+
for idx_triplet in module_indices
|
| 261 |
+
if idx_triplet[0] not in eps_evicted_idxs
|
| 262 |
+
]
|
| 263 |
+
|
| 264 |
+
for eps in episodes:
|
| 265 |
+
eps = copy.deepcopy(eps)
|
| 266 |
+
# If the episode is part of an already existing episode, concatenate.
|
| 267 |
+
if eps.id_ in self.episode_id_to_index:
|
| 268 |
+
eps_idx = self.episode_id_to_index[eps.id_]
|
| 269 |
+
existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]
|
| 270 |
+
existing_len = len(existing_eps)
|
| 271 |
+
self._indices.extend(
|
| 272 |
+
[
|
| 273 |
+
(
|
| 274 |
+
eps_idx,
|
| 275 |
+
existing_len + i,
|
| 276 |
+
)
|
| 277 |
+
for i in range(len(eps))
|
| 278 |
+
]
|
| 279 |
+
)
|
| 280 |
+
# Add new module indices.
|
| 281 |
+
self._add_new_module_indices(eps, eps_idx, True)
|
| 282 |
+
# Concatenate the episode chunk.
|
| 283 |
+
existing_eps.concat_episode(eps)
|
| 284 |
+
# Otherwise, create a new entry.
|
| 285 |
+
else:
|
| 286 |
+
# New episode.
|
| 287 |
+
self.episodes.append(eps)
|
| 288 |
+
eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted
|
| 289 |
+
self.episode_id_to_index[eps.id_] = eps_idx
|
| 290 |
+
self._indices.extend([(eps_idx, i) for i in range(len(eps))])
|
| 291 |
+
# Add new module indices.
|
| 292 |
+
self._add_new_module_indices(eps, eps_idx, False)
|
| 293 |
+
|
| 294 |
+
@override(EpisodeReplayBuffer)
|
| 295 |
+
def sample(
|
| 296 |
+
self,
|
| 297 |
+
num_items: Optional[int] = None,
|
| 298 |
+
*,
|
| 299 |
+
batch_size_B: Optional[int] = None,
|
| 300 |
+
batch_length_T: Optional[int] = None,
|
| 301 |
+
n_step: Optional[Union[int, Tuple]] = 1,
|
| 302 |
+
gamma: float = 0.99,
|
| 303 |
+
include_infos: bool = False,
|
| 304 |
+
include_extra_model_outputs: bool = False,
|
| 305 |
+
replay_mode: str = "independent",
|
| 306 |
+
modules_to_sample: Optional[List[ModuleID]] = None,
|
| 307 |
+
**kwargs,
|
| 308 |
+
) -> Union[List["MultiAgentEpisode"], List["SingleAgentEpisode"]]:
|
| 309 |
+
"""Samples a batch of multi-agent transitions.
|
| 310 |
+
|
| 311 |
+
Multi-agent transitions can be sampled either `"independent"` or
|
| 312 |
+
`"synchronized"` with the former sampling for each module independent agent
|
| 313 |
+
steps and the latter sampling agent transitions from the same environment step.
|
| 314 |
+
|
| 315 |
+
The n-step parameter can be either a single integer or a tuple of two integers.
|
| 316 |
+
In the former case, the n-step is fixed to the given integer and in the latter
|
| 317 |
+
case, the n-step is sampled uniformly from the given range. Large n-steps could
|
| 318 |
+
potentially lead to a many retries because not all samples might have a full
|
| 319 |
+
n-step transition.
|
| 320 |
+
|
| 321 |
+
Sampling returns batches of size B (number of 'rows'), where each row is a tuple
|
| 322 |
+
of the form
|
| 323 |
+
|
| 324 |
+
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
|
| 325 |
+
|
| 326 |
+
where `o_t` is the observation in `t`, `a_t` the action chosen at observation
|
| 327 |
+
`o_t`, `o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is
|
| 328 |
+
the sum of all rewards collected over the time steps between `t+1` and `t+n`.
|
| 329 |
+
The n`-step can be chosen freely when sampling and defaults to `1`. If `n_step`
|
| 330 |
+
is a tuple it is sampled uniformly across the interval defined by the tuple (for
|
| 331 |
+
each row in the batch).
|
| 332 |
+
|
| 333 |
+
Each batch contains - in addition to the data tuples presented above - two
|
| 334 |
+
further columns, namely `n_steps` and `weigths`. The former holds the `n_step`
|
| 335 |
+
used for each row in the batch and the latter a (default) weight of `1.0` for
|
| 336 |
+
each row in the batch. This weight is used for weighted loss calculations in
|
| 337 |
+
the training process.
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
num_items: The number of items to sample. If provided, `batch_size_B`
|
| 341 |
+
should be `None`.
|
| 342 |
+
batch_size_B: The batch size to sample. If provided, `num_items`
|
| 343 |
+
should be `None`.
|
| 344 |
+
batch_length_T: The length of the sampled batch. If not provided, the
|
| 345 |
+
default batch length is used. This feature is not yet implemented.
|
| 346 |
+
n_step: The n-step to sample. If the n-step is a tuple, the n-step is
|
| 347 |
+
sampled uniformly from the given range. If not provided, the default
|
| 348 |
+
n-step of `1` is used.
|
| 349 |
+
gamma: The discount factor for the n-step reward calculation.
|
| 350 |
+
include_infos: Whether to include the infos in the sampled batch.
|
| 351 |
+
include_extra_model_outputs: Whether to include the extra model outputs
|
| 352 |
+
in the sampled batch.
|
| 353 |
+
replay_mode: The replay mode to use for sampling. Either `"independent"`
|
| 354 |
+
or `"synchronized"`.
|
| 355 |
+
modules_to_sample: A list of module IDs to sample from. If not provided,
|
| 356 |
+
transitions for aall modules are sampled.
|
| 357 |
+
|
| 358 |
+
Returns:
|
| 359 |
+
A dictionary of the form `ModuleID -> SampleBatchType` containing the
|
| 360 |
+
sampled data for each module or each module in `modules_to_sample`,
|
| 361 |
+
if provided.
|
| 362 |
+
"""
|
| 363 |
+
if num_items is not None:
|
| 364 |
+
assert batch_size_B is None, (
|
| 365 |
+
"Cannot call `sample()` with both `num_items` and `batch_size_B` "
|
| 366 |
+
"provided! Use either one."
|
| 367 |
+
)
|
| 368 |
+
batch_size_B = num_items
|
| 369 |
+
|
| 370 |
+
# Use our default values if no sizes/lengths provided.
|
| 371 |
+
batch_size_B = batch_size_B or self.batch_size_B
|
| 372 |
+
# TODO (simon): Implement trajectory sampling for RNNs.
|
| 373 |
+
batch_length_T = batch_length_T or self.batch_length_T
|
| 374 |
+
|
| 375 |
+
# Sample for each module independently.
|
| 376 |
+
if replay_mode == "independent":
|
| 377 |
+
return self._sample_independent(
|
| 378 |
+
batch_size_B=batch_size_B,
|
| 379 |
+
batch_length_T=batch_length_T,
|
| 380 |
+
n_step=n_step,
|
| 381 |
+
gamma=gamma,
|
| 382 |
+
include_infos=include_infos,
|
| 383 |
+
include_extra_model_outputs=include_extra_model_outputs,
|
| 384 |
+
modules_to_sample=modules_to_sample,
|
| 385 |
+
)
|
| 386 |
+
else:
|
| 387 |
+
return self._sample_synchonized(
|
| 388 |
+
batch_size_B=batch_size_B,
|
| 389 |
+
batch_length_T=batch_length_T,
|
| 390 |
+
n_step=n_step,
|
| 391 |
+
gamma=gamma,
|
| 392 |
+
include_infos=include_infos,
|
| 393 |
+
include_extra_model_outputs=include_extra_model_outputs,
|
| 394 |
+
modules_to_sample=modules_to_sample,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
def get_added_agent_timesteps(self) -> int:
|
| 398 |
+
"""Returns number of agent timesteps that have been added in buffer's lifetime.
|
| 399 |
+
|
| 400 |
+
Note, this could be more than the `get_added_timesteps` returns as an
|
| 401 |
+
environment timestep could contain multiple agent timesteps (for eaxch agent
|
| 402 |
+
one).
|
| 403 |
+
"""
|
| 404 |
+
return self._num_agent_timesteps_added
|
| 405 |
+
|
| 406 |
+
def get_module_ids(self) -> List[ModuleID]:
|
| 407 |
+
"""Returns a list of module IDs stored in the buffer."""
|
| 408 |
+
return list(self._module_to_indices.keys())
|
| 409 |
+
|
| 410 |
+
def get_num_agent_timesteps(self) -> int:
|
| 411 |
+
"""Returns number of agent timesteps stored in the buffer.
|
| 412 |
+
|
| 413 |
+
Note, this could be more than the `num_timesteps` as an environment timestep
|
| 414 |
+
could contain multiple agent timesteps (for eaxch agent one).
|
| 415 |
+
"""
|
| 416 |
+
return self._num_agent_timesteps
|
| 417 |
+
|
| 418 |
+
@override(EpisodeReplayBuffer)
|
| 419 |
+
def get_num_episodes(self, module_id: Optional[ModuleID] = None) -> int:
|
| 420 |
+
"""Returns number of episodes stored for a module in the buffer.
|
| 421 |
+
|
| 422 |
+
Note, episodes could be either complete or truncated.
|
| 423 |
+
|
| 424 |
+
Args:
|
| 425 |
+
module_id: The ID of the module to query. If not provided, the number of
|
| 426 |
+
episodes for all modules is returned.
|
| 427 |
+
|
| 428 |
+
Returns:
|
| 429 |
+
The number of episodes stored for the module or all modules.
|
| 430 |
+
"""
|
| 431 |
+
return (
|
| 432 |
+
self._num_module_episodes[module_id]
|
| 433 |
+
if module_id
|
| 434 |
+
else super().get_num_episodes()
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
@override(EpisodeReplayBuffer)
|
| 438 |
+
def get_num_episodes_evicted(self, module_id: Optional[ModuleID] = None) -> int:
|
| 439 |
+
"""Returns number of episodes evicted for a module in the buffer."""
|
| 440 |
+
return (
|
| 441 |
+
self._num_module_episodes_evicted[module_id]
|
| 442 |
+
if module_id
|
| 443 |
+
else super().get_num_episodes_evicted()
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
@override(EpisodeReplayBuffer)
|
| 447 |
+
def get_num_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
|
| 448 |
+
"""Returns number of individual timesteps for a module stored in the buffer.
|
| 449 |
+
|
| 450 |
+
Args:
|
| 451 |
+
module_id: The ID of the module to query. If not provided, the number of
|
| 452 |
+
timesteps for all modules are returned.
|
| 453 |
+
|
| 454 |
+
Returns:
|
| 455 |
+
The number of timesteps stored for the module or all modules.
|
| 456 |
+
"""
|
| 457 |
+
return (
|
| 458 |
+
self._num_module_timesteps[module_id]
|
| 459 |
+
if module_id
|
| 460 |
+
else super().get_num_timesteps()
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
@override(EpisodeReplayBuffer)
|
| 464 |
+
def get_sampled_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
|
| 465 |
+
"""Returns number of timesteps that have been sampled for a module.
|
| 466 |
+
|
| 467 |
+
Args:
|
| 468 |
+
module_id: The ID of the module to query. If not provided, the number of
|
| 469 |
+
sampled timesteps for all modules are returned.
|
| 470 |
+
|
| 471 |
+
Returns:
|
| 472 |
+
The number of timesteps sampled for the module or all modules.
|
| 473 |
+
"""
|
| 474 |
+
return (
|
| 475 |
+
self.sampled_timesteps_per_module[module_id]
|
| 476 |
+
if module_id
|
| 477 |
+
else super().get_sampled_timesteps()
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
@override(EpisodeReplayBuffer)
|
| 481 |
+
def get_added_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
|
| 482 |
+
"""Returns the number of timesteps added in buffer's lifetime for given module.
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
module_id: The ID of the module to query. If not provided, the total number
|
| 486 |
+
of timesteps ever added.
|
| 487 |
+
|
| 488 |
+
Returns:
|
| 489 |
+
The number of timesteps added for `module_id` (or all modules if `module_id`
|
| 490 |
+
is None).
|
| 491 |
+
"""
|
| 492 |
+
return (
|
| 493 |
+
self._num_module_timesteps_added[module_id]
|
| 494 |
+
if module_id
|
| 495 |
+
else super().get_added_timesteps()
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
@override(EpisodeReplayBuffer)
|
| 499 |
+
def get_state(self) -> Dict[str, Any]:
|
| 500 |
+
"""Gets a pickable state of the buffer.
|
| 501 |
+
|
| 502 |
+
This is used for checkpointing the buffer's state. It is specifically helpful,
|
| 503 |
+
for example, when a trial is paused and resumed later on. The buffer's state
|
| 504 |
+
can be saved to disk and reloaded when the trial is resumed.
|
| 505 |
+
|
| 506 |
+
Returns:
|
| 507 |
+
A dict containing all necessary information to restore the buffer's state.
|
| 508 |
+
"""
|
| 509 |
+
return super().get_state() | {
|
| 510 |
+
"_module_to_indices": list(self._module_to_indices.items()),
|
| 511 |
+
"_num_agent_timesteps": self._num_agent_timesteps,
|
| 512 |
+
"_num_agent_timesteps_added": self._num_agent_timesteps_added,
|
| 513 |
+
"_num_module_timesteps": list(self._num_module_timesteps.items()),
|
| 514 |
+
"_num_module_timesteps_added": list(
|
| 515 |
+
self._num_module_timesteps_added.items()
|
| 516 |
+
),
|
| 517 |
+
"_num_module_episodes": list(self._num_module_episodes.items()),
|
| 518 |
+
"_num_module_episodes_evicted": list(
|
| 519 |
+
self._num_module_episodes_evicted.items()
|
| 520 |
+
),
|
| 521 |
+
"sampled_timesteps_per_module": list(
|
| 522 |
+
self.sampled_timesteps_per_module.items()
|
| 523 |
+
),
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
@override(EpisodeReplayBuffer)
|
| 527 |
+
def set_state(self, state) -> None:
|
| 528 |
+
"""Sets the state of a buffer from a previously stored state.
|
| 529 |
+
|
| 530 |
+
See `get_state()` for more information on what is stored in the state. This
|
| 531 |
+
method is used to restore the buffer's state from a previously stored state.
|
| 532 |
+
It is specifically helpful, for example, when a trial is paused and resumed
|
| 533 |
+
later on. The buffer's state can be saved to disk and reloaded when the trial
|
| 534 |
+
is resumed.
|
| 535 |
+
|
| 536 |
+
Args:
|
| 537 |
+
state: The state to restore the buffer from.
|
| 538 |
+
"""
|
| 539 |
+
# Set the episodes.
|
| 540 |
+
self._set_episodes(state)
|
| 541 |
+
# Set the super's state.
|
| 542 |
+
super().set_state(state)
|
| 543 |
+
# Now set the remaining attributes.
|
| 544 |
+
self._module_to_indices = defaultdict(list, dict(state["_module_to_indices"]))
|
| 545 |
+
self._num_agent_timesteps = state["_num_agent_timesteps"]
|
| 546 |
+
self._num_agent_timesteps_added = state["_num_agent_timesteps_added"]
|
| 547 |
+
self._num_module_timesteps = defaultdict(
|
| 548 |
+
int, dict(state["_num_module_timesteps"])
|
| 549 |
+
)
|
| 550 |
+
self._num_module_timesteps_added = defaultdict(
|
| 551 |
+
int, dict(state["_num_module_timesteps_added"])
|
| 552 |
+
)
|
| 553 |
+
self._num_module_episodes = defaultdict(
|
| 554 |
+
int, dict(state["_num_module_episodes"])
|
| 555 |
+
)
|
| 556 |
+
self._num_module_episodes_evicted = defaultdict(
|
| 557 |
+
int, dict(state["_num_module_episodes_evicted"])
|
| 558 |
+
)
|
| 559 |
+
self.sampled_timesteps_per_module = defaultdict(
|
| 560 |
+
list, dict(state["sampled_timesteps_per_module"])
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
def _set_episodes(self, state: Dict[str, Any]) -> None:
|
| 564 |
+
"""Sets the episodes from the state."""
|
| 565 |
+
if not self.episodes:
|
| 566 |
+
self.episodes = deque(
|
| 567 |
+
[
|
| 568 |
+
MultiAgentEpisode.from_state(eps_data)
|
| 569 |
+
for eps_data in state["episodes"]
|
| 570 |
+
]
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
def _sample_independent(
|
| 574 |
+
self,
|
| 575 |
+
batch_size_B: Optional[int],
|
| 576 |
+
batch_length_T: Optional[int],
|
| 577 |
+
n_step: Optional[Union[int, Tuple[int, int]]],
|
| 578 |
+
gamma: float,
|
| 579 |
+
include_infos: bool,
|
| 580 |
+
include_extra_model_outputs: bool,
|
| 581 |
+
modules_to_sample: Optional[Set[ModuleID]],
|
| 582 |
+
) -> List["SingleAgentEpisode"]:
|
| 583 |
+
"""Samples a batch of independent multi-agent transitions."""
|
| 584 |
+
|
| 585 |
+
actual_n_step = n_step or 1
|
| 586 |
+
# Sample the n-step if necessary.
|
| 587 |
+
random_n_step = isinstance(n_step, (tuple, list))
|
| 588 |
+
|
| 589 |
+
sampled_episodes = []
|
| 590 |
+
# TODO (simon): Ensure that the module has data and if not, skip it.
|
| 591 |
+
# TODO (sven): Should we then error out or skip? I think the Learner
|
| 592 |
+
# should handle this case when a module has no train data.
|
| 593 |
+
modules_to_sample = modules_to_sample or set(self._module_to_indices.keys())
|
| 594 |
+
for module_id in modules_to_sample:
|
| 595 |
+
module_indices = self._module_to_indices[module_id]
|
| 596 |
+
B = 0
|
| 597 |
+
while B < batch_size_B:
|
| 598 |
+
# Now sample from the single-agent timesteps.
|
| 599 |
+
index_tuple = module_indices[self.rng.integers(len(module_indices))]
|
| 600 |
+
|
| 601 |
+
# This will be an agent timestep (not env timestep).
|
| 602 |
+
# TODO (simon, sven): Maybe deprecate sa_episode_idx (_) in the index
|
| 603 |
+
# quads. Is there any need for it?
|
| 604 |
+
ma_episode_idx, agent_id, sa_episode_ts = (
|
| 605 |
+
index_tuple[0] - self._num_episodes_evicted,
|
| 606 |
+
index_tuple[1],
|
| 607 |
+
index_tuple[2],
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
# Get the multi-agent episode.
|
| 611 |
+
ma_episode = self.episodes[ma_episode_idx]
|
| 612 |
+
# Retrieve the single-agent episode for filtering.
|
| 613 |
+
sa_episode = ma_episode.agent_episodes[agent_id]
|
| 614 |
+
|
| 615 |
+
# If we use random n-step sampling, draw the n-step for this item.
|
| 616 |
+
if random_n_step:
|
| 617 |
+
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
|
| 618 |
+
# If we cannnot make the n-step, we resample.
|
| 619 |
+
if sa_episode_ts + actual_n_step > len(sa_episode):
|
| 620 |
+
continue
|
| 621 |
+
# Note, this will be the reward after executing action
|
| 622 |
+
# `a_(episode_ts)`. For `n_step>1` this will be the discounted sum
|
| 623 |
+
# of all rewards that were collected over the last n steps.
|
| 624 |
+
sa_raw_rewards = sa_episode.get_rewards(
|
| 625 |
+
slice(sa_episode_ts, sa_episode_ts + actual_n_step)
|
| 626 |
+
)
|
| 627 |
+
sa_rewards = scipy.signal.lfilter(
|
| 628 |
+
[1], [1, -gamma], sa_raw_rewards[::-1], axis=0
|
| 629 |
+
)[-1]
|
| 630 |
+
|
| 631 |
+
sampled_sa_episode = SingleAgentEpisode(
|
| 632 |
+
id_=sa_episode.id_,
|
| 633 |
+
# Provide the IDs for the learner connector.
|
| 634 |
+
agent_id=sa_episode.agent_id,
|
| 635 |
+
module_id=sa_episode.module_id,
|
| 636 |
+
multi_agent_episode_id=ma_episode.id_,
|
| 637 |
+
# Ensure that each episode contains a tuple of the form:
|
| 638 |
+
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
|
| 639 |
+
# Two observations (t and t+n).
|
| 640 |
+
observations=[
|
| 641 |
+
sa_episode.get_observations(sa_episode_ts),
|
| 642 |
+
sa_episode.get_observations(sa_episode_ts + actual_n_step),
|
| 643 |
+
],
|
| 644 |
+
observation_space=sa_episode.observation_space,
|
| 645 |
+
infos=(
|
| 646 |
+
[
|
| 647 |
+
sa_episode.get_infos(sa_episode_ts),
|
| 648 |
+
sa_episode.get_infos(sa_episode_ts + actual_n_step),
|
| 649 |
+
]
|
| 650 |
+
if include_infos
|
| 651 |
+
else None
|
| 652 |
+
),
|
| 653 |
+
actions=[sa_episode.get_actions(sa_episode_ts)],
|
| 654 |
+
action_space=sa_episode.action_space,
|
| 655 |
+
rewards=[sa_rewards],
|
| 656 |
+
# If the sampled single-agent episode is the single-agent episode's
|
| 657 |
+
# last time step, check, if the single-agent episode is terminated
|
| 658 |
+
# or truncated.
|
| 659 |
+
terminated=(
|
| 660 |
+
sa_episode_ts + actual_n_step >= len(sa_episode)
|
| 661 |
+
and sa_episode.is_terminated
|
| 662 |
+
),
|
| 663 |
+
truncated=(
|
| 664 |
+
sa_episode_ts + actual_n_step >= len(sa_episode)
|
| 665 |
+
and sa_episode.is_truncated
|
| 666 |
+
),
|
| 667 |
+
extra_model_outputs={
|
| 668 |
+
"weights": [1.0],
|
| 669 |
+
"n_step": [actual_n_step],
|
| 670 |
+
**(
|
| 671 |
+
{
|
| 672 |
+
k: [
|
| 673 |
+
sa_episode.get_extra_model_outputs(k, sa_episode_ts)
|
| 674 |
+
]
|
| 675 |
+
for k in sa_episode.extra_model_outputs.keys()
|
| 676 |
+
}
|
| 677 |
+
if include_extra_model_outputs
|
| 678 |
+
else {}
|
| 679 |
+
),
|
| 680 |
+
},
|
| 681 |
+
# TODO (sven): Support lookback buffers.
|
| 682 |
+
len_lookback_buffer=0,
|
| 683 |
+
t_started=sa_episode_ts,
|
| 684 |
+
)
|
| 685 |
+
# Append single-agent episode to the list of sampled episodes.
|
| 686 |
+
sampled_episodes.append(sampled_sa_episode)
|
| 687 |
+
|
| 688 |
+
# Increase counter.
|
| 689 |
+
B += 1
|
| 690 |
+
|
| 691 |
+
# Increase the per module timesteps counter.
|
| 692 |
+
self.sampled_timesteps_per_module[module_id] += B
|
| 693 |
+
|
| 694 |
+
# Increase the counter for environment timesteps.
|
| 695 |
+
self.sampled_timesteps += batch_size_B
|
| 696 |
+
# Return multi-agent dictionary.
|
| 697 |
+
return sampled_episodes
|
| 698 |
+
|
| 699 |
+
def _sample_synchonized(
|
| 700 |
+
self,
|
| 701 |
+
batch_size_B: Optional[int],
|
| 702 |
+
batch_length_T: Optional[int],
|
| 703 |
+
n_step: Optional[Union[int, Tuple]],
|
| 704 |
+
gamma: float,
|
| 705 |
+
include_infos: bool,
|
| 706 |
+
include_extra_model_outputs: bool,
|
| 707 |
+
modules_to_sample: Optional[List[ModuleID]],
|
| 708 |
+
) -> SampleBatchType:
|
| 709 |
+
"""Samples a batch of synchronized multi-agent transitions."""
|
| 710 |
+
# Sample the n-step if necessary.
|
| 711 |
+
if isinstance(n_step, tuple):
|
| 712 |
+
# Use random n-step sampling.
|
| 713 |
+
random_n_step = True
|
| 714 |
+
else:
|
| 715 |
+
actual_n_step = n_step or 1
|
| 716 |
+
random_n_step = False
|
| 717 |
+
|
| 718 |
+
# Containers for the sampled data.
|
| 719 |
+
observations: Dict[ModuleID, List[ObsType]] = defaultdict(list)
|
| 720 |
+
next_observations: Dict[ModuleID, List[ObsType]] = defaultdict(list)
|
| 721 |
+
actions: Dict[ModuleID, List[ActType]] = defaultdict(list)
|
| 722 |
+
rewards: Dict[ModuleID, List[float]] = defaultdict(list)
|
| 723 |
+
is_terminated: Dict[ModuleID, List[bool]] = defaultdict(list)
|
| 724 |
+
is_truncated: Dict[ModuleID, List[bool]] = defaultdict(list)
|
| 725 |
+
weights: Dict[ModuleID, List[float]] = defaultdict(list)
|
| 726 |
+
n_steps: Dict[ModuleID, List[int]] = defaultdict(list)
|
| 727 |
+
# If `info` should be included, construct also a container for them.
|
| 728 |
+
if include_infos:
|
| 729 |
+
infos: Dict[ModuleID, List[Dict[str, Any]]] = defaultdict(list)
|
| 730 |
+
# If `extra_model_outputs` should be included, construct a container for them.
|
| 731 |
+
if include_extra_model_outputs:
|
| 732 |
+
extra_model_outputs: Dict[ModuleID, List[Dict[str, Any]]] = defaultdict(
|
| 733 |
+
list
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
B = 0
|
| 737 |
+
while B < batch_size_B:
|
| 738 |
+
index_tuple = self._indices[self.rng.integers(len(self._indices))]
|
| 739 |
+
|
| 740 |
+
# This will be an env timestep (not agent timestep)
|
| 741 |
+
ma_episode_idx, ma_episode_ts = (
|
| 742 |
+
index_tuple[0] - self._num_episodes_evicted,
|
| 743 |
+
index_tuple[1],
|
| 744 |
+
)
|
| 745 |
+
# If we use random n-step sampling, draw the n-step for this item.
|
| 746 |
+
if random_n_step:
|
| 747 |
+
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
|
| 748 |
+
# If we are at the end of an episode, continue.
|
| 749 |
+
# Note, priority sampling got us `o_(t+n)` and we need for the loss
|
| 750 |
+
# calculation in addition `o_t`.
|
| 751 |
+
# TODO (simon): Maybe introduce a variable `num_retries` until the
|
| 752 |
+
# while loop should break when not enough samples have been collected
|
| 753 |
+
# to make n-step possible.
|
| 754 |
+
if ma_episode_ts - actual_n_step < 0:
|
| 755 |
+
continue
|
| 756 |
+
|
| 757 |
+
# Retrieve the multi-agent episode.
|
| 758 |
+
ma_episode = self.episodes[ma_episode_idx]
|
| 759 |
+
|
| 760 |
+
# Ensure that each row contains a tuple of the form:
|
| 761 |
+
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
|
| 762 |
+
# TODO (simon): Implement version for sequence sampling when using RNNs.
|
| 763 |
+
eps_observation = ma_episode.get_observations(
|
| 764 |
+
slice(ma_episode_ts - actual_n_step, ma_episode_ts + 1),
|
| 765 |
+
return_list=True,
|
| 766 |
+
)
|
| 767 |
+
# Note, `MultiAgentEpisode` stores the action that followed
|
| 768 |
+
# `o_t` with `o_(t+1)`, therefore, we need the next one.
|
| 769 |
+
# TODO (simon): This gets the wrong action as long as the getters are not
|
| 770 |
+
# fixed.
|
| 771 |
+
eps_actions = ma_episode.get_actions(ma_episode_ts - actual_n_step)
|
| 772 |
+
# Make sure that at least a single agent should have full transition.
|
| 773 |
+
# TODO (simon): Filter for the `modules_to_sample`.
|
| 774 |
+
agents_to_sample = self._agents_with_full_transitions(
|
| 775 |
+
eps_observation,
|
| 776 |
+
eps_actions,
|
| 777 |
+
)
|
| 778 |
+
# If not, we resample.
|
| 779 |
+
if not agents_to_sample:
|
| 780 |
+
continue
|
| 781 |
+
# TODO (simon, sven): Do we need to include the common agent rewards?
|
| 782 |
+
# Note, the reward that is collected by transitioning from `o_t` to
|
| 783 |
+
# `o_(t+1)` is stored in the next transition in `MultiAgentEpisode`.
|
| 784 |
+
eps_rewards = ma_episode.get_rewards(
|
| 785 |
+
slice(ma_episode_ts - actual_n_step, ma_episode_ts),
|
| 786 |
+
return_list=True,
|
| 787 |
+
)
|
| 788 |
+
# TODO (simon, sven): Do we need to include the common infos? And are
|
| 789 |
+
# there common extra model outputs?
|
| 790 |
+
if include_infos:
|
| 791 |
+
# If infos are included we include the ones from the last timestep
|
| 792 |
+
# as usually the info contains additional values about the last state.
|
| 793 |
+
eps_infos = ma_episode.get_infos(ma_episode_ts)
|
| 794 |
+
if include_extra_model_outputs:
|
| 795 |
+
# If `extra_model_outputs` are included we include the ones from the
|
| 796 |
+
# first timestep as usually the `extra_model_outputs` contain additional
|
| 797 |
+
# values from the forward pass that produced the action at the first
|
| 798 |
+
# timestep.
|
| 799 |
+
# Note, we extract them into single row dictionaries similar to the
|
| 800 |
+
# infos, in a connector we can then extract these into single batch
|
| 801 |
+
# rows.
|
| 802 |
+
eps_extra_model_outputs = {
|
| 803 |
+
k: ma_episode.get_extra_model_outputs(
|
| 804 |
+
k, ma_episode_ts - actual_n_step
|
| 805 |
+
)
|
| 806 |
+
for k in ma_episode.extra_model_outputs.keys()
|
| 807 |
+
}
|
| 808 |
+
# If the sampled time step is the episode's last time step check, if
|
| 809 |
+
# the episode is terminated or truncated.
|
| 810 |
+
episode_terminated = False
|
| 811 |
+
episode_truncated = False
|
| 812 |
+
if ma_episode_ts == ma_episode.env_t:
|
| 813 |
+
episode_terminated = ma_episode.is_terminated
|
| 814 |
+
episode_truncated = ma_episode.is_truncated
|
| 815 |
+
# TODO (simon): Filter for the `modules_to_sample`.
|
| 816 |
+
# TODO (sven, simon): We could here also sample for all agents in the
|
| 817 |
+
# `modules_to_sample` and then adapt the `n_step` for agents that
|
| 818 |
+
# have not a full transition.
|
| 819 |
+
for agent_id in agents_to_sample:
|
| 820 |
+
# Map our agent to the corresponding module we want to
|
| 821 |
+
# train.
|
| 822 |
+
module_id = ma_episode._agent_to_module_mapping[agent_id]
|
| 823 |
+
# Sample only for the modules in `modules_to_sample`.
|
| 824 |
+
if module_id not in (
|
| 825 |
+
modules_to_sample or self._module_to_indices.keys()
|
| 826 |
+
):
|
| 827 |
+
continue
|
| 828 |
+
# TODO (simon, sven): Here we could skip for modules not
|
| 829 |
+
# to be sampled in `modules_to_sample`.
|
| 830 |
+
observations[module_id].append(eps_observation[0][agent_id])
|
| 831 |
+
next_observations[module_id].append(eps_observation[-1][agent_id])
|
| 832 |
+
# Fill missing rewards with zeros.
|
| 833 |
+
agent_rewards = [r[agent_id] or 0.0 for r in eps_rewards]
|
| 834 |
+
rewards[module_id].append(
|
| 835 |
+
scipy.signal.lfilter([1], [1, -gamma], agent_rewards[::-1], axis=0)[
|
| 836 |
+
-1
|
| 837 |
+
]
|
| 838 |
+
)
|
| 839 |
+
# Note, this should exist, as we filtered for agents with full
|
| 840 |
+
# transitions.
|
| 841 |
+
actions[module_id].append(eps_actions[agent_id])
|
| 842 |
+
if include_infos:
|
| 843 |
+
infos[module_id].append(eps_infos[agent_id])
|
| 844 |
+
if include_extra_model_outputs:
|
| 845 |
+
extra_model_outputs[module_id].append(
|
| 846 |
+
{
|
| 847 |
+
k: eps_extra_model_outputs[agent_id][k]
|
| 848 |
+
for k in eps_extra_model_outputs[agent_id].keys()
|
| 849 |
+
}
|
| 850 |
+
)
|
| 851 |
+
# If sampled observation is terminal for the agent. Either MAE
|
| 852 |
+
# episode is truncated/terminated or SAE episode is truncated/
|
| 853 |
+
# terminated at this ts.
|
| 854 |
+
# TODO (simon, sven): Add method agent_alive(ts) to MAE.
|
| 855 |
+
# or add slicing to get_terminateds().
|
| 856 |
+
agent_ts = ma_episode.env_t_to_agent_t[agent_id][ma_episode_ts]
|
| 857 |
+
agent_eps = ma_episode.agent_episodes[agent_id]
|
| 858 |
+
agent_terminated = agent_ts == agent_eps.t and agent_eps.is_terminated
|
| 859 |
+
agent_truncated = (
|
| 860 |
+
agent_ts == agent_eps.t
|
| 861 |
+
and agent_eps.is_truncated
|
| 862 |
+
and not agent_eps.is_terminated
|
| 863 |
+
)
|
| 864 |
+
if episode_terminated or agent_terminated:
|
| 865 |
+
is_terminated[module_id].append(True)
|
| 866 |
+
is_truncated[module_id].append(False)
|
| 867 |
+
elif episode_truncated or agent_truncated:
|
| 868 |
+
is_truncated[module_id].append(True)
|
| 869 |
+
is_terminated[module_id].append(False)
|
| 870 |
+
else:
|
| 871 |
+
is_terminated[module_id].append(False)
|
| 872 |
+
is_truncated[module_id].append(False)
|
| 873 |
+
# Increase the per module counter.
|
| 874 |
+
self.sampled_timesteps_per_module[module_id] += 1
|
| 875 |
+
|
| 876 |
+
# Increase counter.
|
| 877 |
+
B += 1
|
| 878 |
+
# Increase the counter for environment timesteps.
|
| 879 |
+
self.sampled_timesteps += batch_size_B
|
| 880 |
+
|
| 881 |
+
# Should be convertible to MultiAgentBatch.
|
| 882 |
+
ret = {
|
| 883 |
+
**{
|
| 884 |
+
module_id: {
|
| 885 |
+
Columns.OBS: batch(observations[module_id]),
|
| 886 |
+
Columns.ACTIONS: batch(actions[module_id]),
|
| 887 |
+
Columns.REWARDS: np.array(rewards[module_id]),
|
| 888 |
+
Columns.NEXT_OBS: batch(next_observations[module_id]),
|
| 889 |
+
Columns.TERMINATEDS: np.array(is_terminated[module_id]),
|
| 890 |
+
Columns.TRUNCATEDS: np.array(is_truncated[module_id]),
|
| 891 |
+
"weights": np.array(weights[module_id]),
|
| 892 |
+
"n_step": np.array(n_steps[module_id]),
|
| 893 |
+
}
|
| 894 |
+
for module_id in observations.keys()
|
| 895 |
+
}
|
| 896 |
+
}
|
| 897 |
+
|
| 898 |
+
# Return multi-agent dictionary.
|
| 899 |
+
return ret
|
| 900 |
+
|
| 901 |
+
def _num_remaining_episodes(self, new_eps, evicted_eps):
|
| 902 |
+
"""Calculates the number of remaining episodes.
|
| 903 |
+
|
| 904 |
+
When adding episodes and evicting them in the `add()` method
|
| 905 |
+
this function calculates iteratively the number of remaining
|
| 906 |
+
episodes.
|
| 907 |
+
|
| 908 |
+
Args:
|
| 909 |
+
new_eps: List of new episode IDs.
|
| 910 |
+
evicted_eps: List of evicted episode IDs.
|
| 911 |
+
|
| 912 |
+
Returns:
|
| 913 |
+
Number of episodes remaining after evicting the episodes in
|
| 914 |
+
`evicted_eps` and adding the episode in `new_eps`.
|
| 915 |
+
"""
|
| 916 |
+
return len(
|
| 917 |
+
set(self.episode_id_to_index.keys()).union(set(new_eps)) - set(evicted_eps)
|
| 918 |
+
)
|
| 919 |
+
|
| 920 |
+
def _evict_module_episodes(self, ma_episode: MultiAgentEpisode) -> None:
|
| 921 |
+
"""Evicts the module episodes from the buffer adn updates all counters.
|
| 922 |
+
|
| 923 |
+
Args:
|
| 924 |
+
multi_agent_eps: The multi-agent episode to evict from the buffer.
|
| 925 |
+
"""
|
| 926 |
+
|
| 927 |
+
# Note we need to take the agent ids from the evicted episode because
|
| 928 |
+
# different episodes can have different agents and module mappings.
|
| 929 |
+
for agent_id in ma_episode.agent_episodes:
|
| 930 |
+
# Retrieve the corresponding module ID and module episode.
|
| 931 |
+
module_id = ma_episode._agent_to_module_mapping[agent_id]
|
| 932 |
+
module_eps = ma_episode.agent_episodes[agent_id]
|
| 933 |
+
# Update all counters.
|
| 934 |
+
self._num_module_timesteps[module_id] -= module_eps.env_steps()
|
| 935 |
+
self._num_module_episodes[module_id] -= 1
|
| 936 |
+
self._num_module_episodes_evicted[module_id] += 1
|
| 937 |
+
|
| 938 |
+
def _update_module_counters(self, ma_episode: MultiAgentEpisode) -> None:
|
| 939 |
+
"""Updates the module counters after adding an episode.
|
| 940 |
+
|
| 941 |
+
Args:
|
| 942 |
+
multi_agent_episode: The multi-agent episode to update the module counters
|
| 943 |
+
for.
|
| 944 |
+
"""
|
| 945 |
+
for agent_id in ma_episode.agent_ids:
|
| 946 |
+
agent_steps = ma_episode.agent_episodes[agent_id].env_steps()
|
| 947 |
+
# Only add if the agent has stepped in the episode (chunk).
|
| 948 |
+
if agent_steps > 0:
|
| 949 |
+
# Receive the corresponding module ID.
|
| 950 |
+
module_id = ma_episode.module_for(agent_id)
|
| 951 |
+
self._num_module_timesteps[module_id] += agent_steps
|
| 952 |
+
self._num_module_timesteps_added[module_id] += agent_steps
|
| 953 |
+
# if ma_episode.agent_episodes[agent_id].is_done:
|
| 954 |
+
# # TODO (simon): Check, if we do not count the same episode
|
| 955 |
+
# # multiple times.
|
| 956 |
+
# # Also add to the module episode counter.
|
| 957 |
+
# self._num_module_episodes[module_id] += 1
|
| 958 |
+
|
| 959 |
+
def _add_new_module_indices(
|
| 960 |
+
self,
|
| 961 |
+
ma_episode: MultiAgentEpisode,
|
| 962 |
+
episode_idx: int,
|
| 963 |
+
ma_episode_exists: bool = True,
|
| 964 |
+
) -> None:
|
| 965 |
+
"""Adds the module indices for new episode chunks.
|
| 966 |
+
|
| 967 |
+
Args:
|
| 968 |
+
ma_episode: The multi-agent episode to add the module indices for.
|
| 969 |
+
episode_idx: The index of the episode in the `self.episodes`.
|
| 970 |
+
ma_episode_exists: Whether `ma_episode` is already in this buffer (with a
|
| 971 |
+
predecessor chunk to which we'll concatenate `ma_episode` later).
|
| 972 |
+
"""
|
| 973 |
+
existing_ma_episode = None
|
| 974 |
+
if ma_episode_exists:
|
| 975 |
+
existing_ma_episode = self.episodes[
|
| 976 |
+
self.episode_id_to_index[ma_episode.id_] - self._num_episodes_evicted
|
| 977 |
+
]
|
| 978 |
+
|
| 979 |
+
# Note, we iterate through the agent episodes b/c we want to store records
|
| 980 |
+
# and some agents could not have entered the environment.
|
| 981 |
+
for agent_id in ma_episode.agent_episodes:
|
| 982 |
+
# Get the corresponding module id.
|
| 983 |
+
module_id = ma_episode.module_for(agent_id)
|
| 984 |
+
# Get the module episode.
|
| 985 |
+
module_eps = ma_episode.agent_episodes[agent_id]
|
| 986 |
+
|
| 987 |
+
# Is the agent episode already in the buffer's existing `ma_episode`?
|
| 988 |
+
if ma_episode_exists and agent_id in existing_ma_episode.agent_episodes:
|
| 989 |
+
existing_sa_eps_len = len(existing_ma_episode.agent_episodes[agent_id])
|
| 990 |
+
# Otherwise, it is a new single-agent episode and we increase the counter.
|
| 991 |
+
else:
|
| 992 |
+
existing_sa_eps_len = 0
|
| 993 |
+
self._num_module_episodes[module_id] += 1
|
| 994 |
+
|
| 995 |
+
# Add new module indices.
|
| 996 |
+
self._module_to_indices[module_id].extend(
|
| 997 |
+
[
|
| 998 |
+
(
|
| 999 |
+
# Keep the MAE index for sampling
|
| 1000 |
+
episode_idx,
|
| 1001 |
+
agent_id,
|
| 1002 |
+
existing_sa_eps_len + i,
|
| 1003 |
+
)
|
| 1004 |
+
for i in range(len(module_eps))
|
| 1005 |
+
]
|
| 1006 |
+
)
|
| 1007 |
+
|
| 1008 |
+
def _agents_with_full_transitions(
|
| 1009 |
+
self, observations: Dict[AgentID, ObsType], actions: Dict[AgentID, ActType]
|
| 1010 |
+
):
|
| 1011 |
+
"""Filters for agents that have full transitions.
|
| 1012 |
+
|
| 1013 |
+
Args:
|
| 1014 |
+
observations: The observations of the episode.
|
| 1015 |
+
actions: The actions of the episode.
|
| 1016 |
+
|
| 1017 |
+
Returns:
|
| 1018 |
+
List of agent IDs that have full transitions.
|
| 1019 |
+
"""
|
| 1020 |
+
agents_to_sample = []
|
| 1021 |
+
for agent_id in observations[0].keys():
|
| 1022 |
+
# Only if the agent has an action at the first and an observation
|
| 1023 |
+
# at the first and last timestep of the n-step transition, we can sample it.
|
| 1024 |
+
if agent_id in actions and agent_id in observations[-1]:
|
| 1025 |
+
agents_to_sample.append(agent_id)
|
| 1026 |
+
return agents_to_sample
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_prioritized_episode_buffer.py
ADDED
|
@@ -0,0 +1,923 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy
|
| 4 |
+
|
| 5 |
+
from collections import defaultdict, deque
|
| 6 |
+
from numpy.typing import NDArray
|
| 7 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
| 8 |
+
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
|
| 9 |
+
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
|
| 10 |
+
from ray.rllib.utils import force_list
|
| 11 |
+
from ray.rllib.utils.annotations import override
|
| 12 |
+
from ray.rllib.utils.replay_buffers.multi_agent_episode_buffer import (
|
| 13 |
+
MultiAgentEpisodeReplayBuffer,
|
| 14 |
+
)
|
| 15 |
+
from ray.rllib.utils.replay_buffers.prioritized_episode_buffer import (
|
| 16 |
+
PrioritizedEpisodeReplayBuffer,
|
| 17 |
+
)
|
| 18 |
+
from ray.rllib.utils.typing import ModuleID
|
| 19 |
+
from ray.rllib.execution.segment_tree import MinSegmentTree, SumSegmentTree
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MultiAgentPrioritizedEpisodeReplayBuffer(
|
| 23 |
+
MultiAgentEpisodeReplayBuffer, PrioritizedEpisodeReplayBuffer
|
| 24 |
+
):
|
| 25 |
+
"""Multi-agent episode replay buffer that stores episodes by their IDs.
|
| 26 |
+
|
| 27 |
+
This class implements a replay buffer as used in "Prioritized Experience
|
| 28 |
+
Replay" (Schaul et al., 2016) for multi-agent reinforcement learning,
|
| 29 |
+
|
| 30 |
+
Each "row" (a slot in a deque) in the buffer is occupied by one episode. If an
|
| 31 |
+
incomplete episode is added to the buffer and then another chunk of that episode is
|
| 32 |
+
added at a later time, the buffer will automatically concatenate the new fragment to
|
| 33 |
+
the original episode. This way, episodes can be completed via subsequent `add`
|
| 34 |
+
calls.
|
| 35 |
+
|
| 36 |
+
Sampling returns a size `B` episode list (number of 'rows'), where each episode
|
| 37 |
+
holds a tuple tuple of the form
|
| 38 |
+
|
| 39 |
+
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
|
| 40 |
+
|
| 41 |
+
where `o_t` is the observation in `t`, `a_t` the action chosen at observation `o_t`,
|
| 42 |
+
`o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is the sum of
|
| 43 |
+
all rewards collected over the time steps between `t+1` and `t+n`. The `n`-step can
|
| 44 |
+
be chosen freely when sampling and defaults to `1`. If `n_step` is a tuple it is
|
| 45 |
+
sampled uniformly across the interval defined by the tuple (for each row in the
|
| 46 |
+
batch).
|
| 47 |
+
|
| 48 |
+
Each episode contains - in addition to the data tuples presented above - two further
|
| 49 |
+
elements in its `extra_model_outputs`, namely `n_steps` and `weights`. The former
|
| 50 |
+
holds the `n_step` used for the sampled timesteps in the episode and the latter the
|
| 51 |
+
corresponding (importance sampling) weight for the transition.
|
| 52 |
+
|
| 53 |
+
After sampling priorities can be updated (for the last sampled episode list) with
|
| 54 |
+
`self.update_priorities`. This method assigns the new priorities automatically to
|
| 55 |
+
the last sampled timesteps. Note, this implies that sampling timesteps and updating
|
| 56 |
+
their corresponding priorities needs to alternate (e.g. sampling several times and
|
| 57 |
+
then updating the priorities would not work because the buffer caches the last
|
| 58 |
+
sampled timestep indices).
|
| 59 |
+
|
| 60 |
+
.. testcode::
|
| 61 |
+
|
| 62 |
+
import gymnasium as gym
|
| 63 |
+
|
| 64 |
+
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
|
| 65 |
+
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
|
| 66 |
+
from ray.rllib.utils.replay_buffers import (
|
| 67 |
+
MultiAgentPrioritizedEpisodeReplayBuffer,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# Create the environment.
|
| 72 |
+
env = MultiAgentCartPole({"num_agents": 2})
|
| 73 |
+
|
| 74 |
+
# Set up the loop variables
|
| 75 |
+
agent_ids = env.agents
|
| 76 |
+
agent_ids.append("__all__")
|
| 77 |
+
terminateds = {aid: False for aid in agent_ids}
|
| 78 |
+
truncateds = {aid: False for aid in agent_ids}
|
| 79 |
+
num_timesteps = 10000
|
| 80 |
+
episodes = []
|
| 81 |
+
|
| 82 |
+
# Initialize the first episode entries.
|
| 83 |
+
eps = MultiAgentEpisode()
|
| 84 |
+
obs, infos = env.reset()
|
| 85 |
+
eps.add_env_reset(observations=obs, infos=infos)
|
| 86 |
+
|
| 87 |
+
# Sample 10,000 env timesteps.
|
| 88 |
+
for i in range(num_timesteps):
|
| 89 |
+
# If terminated we create a new episode.
|
| 90 |
+
if eps.is_done:
|
| 91 |
+
episodes.append(eps.finalize())
|
| 92 |
+
eps = MultiAgentEpisode()
|
| 93 |
+
terminateds = {aid: False for aid in agent_ids}
|
| 94 |
+
truncateds = {aid: False for aid in agent_ids}
|
| 95 |
+
obs, infos = env.reset()
|
| 96 |
+
eps.add_env_reset(observations=obs, infos=infos)
|
| 97 |
+
|
| 98 |
+
# Sample a random action for all agents that should step in the episode
|
| 99 |
+
# next.
|
| 100 |
+
actions = {
|
| 101 |
+
aid: env.get_action_space(aid).sample()
|
| 102 |
+
for aid in eps.get_agents_to_act()
|
| 103 |
+
}
|
| 104 |
+
obs, rewards, terminateds, truncateds, infos = env.step(actions)
|
| 105 |
+
eps.add_env_step(
|
| 106 |
+
obs,
|
| 107 |
+
actions,
|
| 108 |
+
rewards,
|
| 109 |
+
infos,
|
| 110 |
+
terminateds=terminateds,
|
| 111 |
+
truncateds=truncateds
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# Add the last (truncated) episode to the list of episodes.
|
| 115 |
+
if not eps.is_done:
|
| 116 |
+
episodes.append(eps)
|
| 117 |
+
|
| 118 |
+
# Create the buffer.
|
| 119 |
+
buffer = MultiAgentPrioritizedEpisodeReplayBuffer()
|
| 120 |
+
# Add the list of episodes sampled.
|
| 121 |
+
buffer.add(episodes)
|
| 122 |
+
|
| 123 |
+
# Pull a sample from the buffer using an `n-step` of 3.
|
| 124 |
+
sample = buffer.sample(num_items=256, gamma=0.95, n_step=3, beta=0.5)
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
def __init__(
|
| 128 |
+
self,
|
| 129 |
+
capacity: int = 10000,
|
| 130 |
+
*,
|
| 131 |
+
batch_size_B: int = 16,
|
| 132 |
+
batch_length_T: int = 1,
|
| 133 |
+
alpha: float = 1.0,
|
| 134 |
+
**kwargs,
|
| 135 |
+
):
|
| 136 |
+
"""Initializes a `MultiAgentPrioritizedEpisodeReplayBuffer` object
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
capacity: The total number of timesteps to be storable in this buffer.
|
| 140 |
+
Will start ejecting old episodes once this limit is reached.
|
| 141 |
+
batch_size_B: The number of episodes returned from `sample()`.
|
| 142 |
+
batch_length_T: The length of each episode in the episode list returned from
|
| 143 |
+
`sample()`.
|
| 144 |
+
alpha: The amount of prioritization to be used: `alpha=1.0` means full
|
| 145 |
+
prioritization, `alpha=0.0` means no prioritization.
|
| 146 |
+
"""
|
| 147 |
+
# Initialize the parents.
|
| 148 |
+
MultiAgentEpisodeReplayBuffer.__init__(
|
| 149 |
+
self,
|
| 150 |
+
capacity=capacity,
|
| 151 |
+
batch_size_B=batch_size_B,
|
| 152 |
+
batch_length_T=batch_length_T,
|
| 153 |
+
**kwargs,
|
| 154 |
+
)
|
| 155 |
+
PrioritizedEpisodeReplayBuffer.__init__(
|
| 156 |
+
self,
|
| 157 |
+
capacity=capacity,
|
| 158 |
+
batch_size_B=batch_size_B,
|
| 159 |
+
batch_length_T=batch_length_T,
|
| 160 |
+
alpha=alpha,
|
| 161 |
+
**kwargs,
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# TODO (simon): If not needed in synchronized sampling, remove.
|
| 165 |
+
# Maps indices from samples to their corresponding tree index.
|
| 166 |
+
self._sample_idx_to_tree_idx = {}
|
| 167 |
+
# Initialize segment trees for the priority weights per module. Note, b/c
|
| 168 |
+
# the trees are binary we need for them a capacity that is an exponential
|
| 169 |
+
# of 2. Double it to enable temporary buffer overflow (we need then free
|
| 170 |
+
# nodes in the trees).
|
| 171 |
+
tree_capacity = int(2 ** np.ceil(np.log2(self.capacity)))
|
| 172 |
+
|
| 173 |
+
# Each module receives its own segment trees for independent sampling.
|
| 174 |
+
self._module_to_max_priority: Dict[ModuleID, float] = defaultdict(lambda: 1.0)
|
| 175 |
+
self._module_to_sum_segment: Dict[ModuleID, "SumSegmentTree"] = defaultdict(
|
| 176 |
+
lambda: SumSegmentTree(2 * tree_capacity)
|
| 177 |
+
)
|
| 178 |
+
self._module_to_min_segment: Dict[ModuleID, "MinSegmentTree"] = defaultdict(
|
| 179 |
+
lambda: MinSegmentTree(2 * tree_capacity)
|
| 180 |
+
)
|
| 181 |
+
# At initialization all nodes are free.
|
| 182 |
+
self._module_to_free_nodes: Dict[ModuleID, "deque"] = defaultdict(
|
| 183 |
+
lambda: deque(list(range(2 * tree_capacity)), maxlen=2 * tree_capacity)
|
| 184 |
+
)
|
| 185 |
+
# Keep track of the maximum index used from the trees. This helps
|
| 186 |
+
# to not traverse the complete trees.
|
| 187 |
+
self._module_to_max_idx: Dict[ModuleID, int] = defaultdict(lambda: 0)
|
| 188 |
+
# Map from tree indices to sample indices (i.e. `self._indices`).
|
| 189 |
+
self._module_to_tree_idx_to_sample_idx: Dict[ModuleID, dict] = defaultdict(
|
| 190 |
+
lambda: {}
|
| 191 |
+
)
|
| 192 |
+
# Map from module ID to the last sampled indices to update priorities.
|
| 193 |
+
self._module_to_last_sampled_indices: Dict[ModuleID, list] = defaultdict(
|
| 194 |
+
lambda: []
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 198 |
+
def add(
|
| 199 |
+
self,
|
| 200 |
+
episodes: Union[List["MultiAgentEpisode"], "MultiAgentEpisode"],
|
| 201 |
+
weight: Optional[Union[float, Dict[ModuleID, float]]] = None,
|
| 202 |
+
) -> None:
|
| 203 |
+
"""Adds incoming episodes to the replay buffer.
|
| 204 |
+
|
| 205 |
+
Note, if the incoming episodes' time steps cause the buffer to overflow,
|
| 206 |
+
older episodes are evicted. Because episodes usually come in chunks and
|
| 207 |
+
not complete, this could lead to edge cases (e.g. with very small capacity
|
| 208 |
+
or very long episode length) where the first part of an episode is evicted
|
| 209 |
+
while the next part just comes in.
|
| 210 |
+
To defend against such case, the complete episode is evicted, including
|
| 211 |
+
the new chunk, unless the episode is the only one in the buffer. In the
|
| 212 |
+
latter case the buffer will be allowed to overflow in a temporary fashion,
|
| 213 |
+
i.e. during the next addition of samples to the buffer an attempt is made
|
| 214 |
+
to fall below capacity again.
|
| 215 |
+
|
| 216 |
+
The user is advised to select a large enough buffer with regard to the maximum
|
| 217 |
+
expected episode length.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
episodes: A list of `SingleAgentEpisode`s that contain the episode data.
|
| 221 |
+
weight: A starting priority for the time steps in `episodes`. If `None`
|
| 222 |
+
the maximum priority is used, i.e. 1.0 (as suggested in the original
|
| 223 |
+
paper we scale weights to the interval [0.0, 1.0]). If a dictionary
|
| 224 |
+
is provided, it must contain the weights for each module.
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
# Define the weights.
|
| 228 |
+
weight_per_module = {}
|
| 229 |
+
# If no weight is provided, use the maximum priority.
|
| 230 |
+
if weight is None:
|
| 231 |
+
weight = self._max_priority
|
| 232 |
+
# If `weight` is a dictionary, use the module weights.
|
| 233 |
+
elif isinstance(dict, weight):
|
| 234 |
+
weight_per_module = weight
|
| 235 |
+
# Define the weight as the mean of the module weights.
|
| 236 |
+
weight = np.mean(list(weight.values()))
|
| 237 |
+
|
| 238 |
+
episodes: List["MultiAgentEpisode"] = force_list(episodes)
|
| 239 |
+
|
| 240 |
+
new_episode_ids: List[str] = [eps.id_ for eps in episodes]
|
| 241 |
+
# Calculate the total number of environment timesteps in the new episodes.
|
| 242 |
+
# Note, we need the potential new sum of timesteps to decide whether to
|
| 243 |
+
# evict episodes.
|
| 244 |
+
total_env_timesteps = sum([eps.env_steps() for eps in episodes])
|
| 245 |
+
self._num_timesteps += total_env_timesteps
|
| 246 |
+
self._num_timesteps_added += total_env_timesteps
|
| 247 |
+
|
| 248 |
+
# Evict old episodes.
|
| 249 |
+
eps_evicted_ids: Set[Union[str, int]] = set()
|
| 250 |
+
eps_evicted_idxs: Set[int] = set()
|
| 251 |
+
# Only evict episodes if the buffer is full and there is more than one
|
| 252 |
+
|
| 253 |
+
while (
|
| 254 |
+
self._num_timesteps > self.capacity
|
| 255 |
+
and self._num_remaining_episodes(new_episode_ids, eps_evicted_ids) != 1
|
| 256 |
+
):
|
| 257 |
+
# Evict episode.
|
| 258 |
+
evicted_episode = self.episodes.popleft()
|
| 259 |
+
eps_evicted_ids.add(evicted_episode.id_)
|
| 260 |
+
eps_evicted_idxs.add(self.episode_id_to_index.pop(evicted_episode.id_))
|
| 261 |
+
# If this episode has a new chunk in the new episodes added,
|
| 262 |
+
# we subtract it again.
|
| 263 |
+
# TODO (sven, simon): Should we just treat such an episode chunk
|
| 264 |
+
# as a new episode?
|
| 265 |
+
if evicted_episode.id_ in new_episode_ids:
|
| 266 |
+
idx = next(
|
| 267 |
+
i
|
| 268 |
+
for i, eps in enumerate(episodes)
|
| 269 |
+
if eps.id_ == evicted_episode.id_
|
| 270 |
+
)
|
| 271 |
+
new_eps_to_evict = episodes.pop(idx)
|
| 272 |
+
# Remove the timesteps of the evicted new episode from the counter.
|
| 273 |
+
self._num_timesteps -= new_eps_to_evict.env_steps()
|
| 274 |
+
self._num_timesteps_added -= new_eps_to_evict.env_steps()
|
| 275 |
+
# Remove the timesteps of the evicted old episode from the counter.
|
| 276 |
+
self._num_timesteps -= evicted_episode.env_steps()
|
| 277 |
+
self._num_agent_timesteps -= evicted_episode.agent_steps()
|
| 278 |
+
self._num_episodes_evicted += 1
|
| 279 |
+
# Remove the module timesteps of the evicted episode from the counters.
|
| 280 |
+
self._evict_module_episodes(evicted_episode)
|
| 281 |
+
del evicted_episode
|
| 282 |
+
|
| 283 |
+
# Add agent and module steps.
|
| 284 |
+
for eps in episodes:
|
| 285 |
+
self._num_agent_timesteps += eps.agent_steps()
|
| 286 |
+
self._num_agent_timesteps_added += eps.agent_steps()
|
| 287 |
+
# Update the module counters by the module timesteps.
|
| 288 |
+
self._update_module_counters(eps)
|
| 289 |
+
|
| 290 |
+
# Remove corresponding indices, if episodes were evicted.
|
| 291 |
+
if eps_evicted_idxs:
|
| 292 |
+
new_indices = []
|
| 293 |
+
# Each index 2-tuple is of the form (ma_episode_idx, timestep) and
|
| 294 |
+
# refers to a certain environment timestep in a certain multi-agent
|
| 295 |
+
# episode.
|
| 296 |
+
i = 0
|
| 297 |
+
for idx_tuple in self._indices:
|
| 298 |
+
# If episode index is from an evicted episode, remove it from the
|
| 299 |
+
# indices and clean up.
|
| 300 |
+
if idx_tuple[0] in eps_evicted_idxs:
|
| 301 |
+
# Here we need the index of a multi-agent sample in the segment
|
| 302 |
+
# tree.
|
| 303 |
+
self._free_nodes.appendleft(idx_tuple[2])
|
| 304 |
+
# Remove also the potentially maximum index.
|
| 305 |
+
self._max_idx -= 1 if self._max_idx == idx_tuple[2] else 0
|
| 306 |
+
# Reset to defaults.
|
| 307 |
+
self._sum_segment[idx_tuple[2]] = 0.0
|
| 308 |
+
self._min_segment[idx_tuple[2]] = float("inf")
|
| 309 |
+
sample_idx = self._tree_idx_to_sample_idx[idx_tuple[2]]
|
| 310 |
+
self._tree_idx_to_sample_idx.pop(idx_tuple[2])
|
| 311 |
+
self._sample_idx_to_tree_idx.pop(sample_idx)
|
| 312 |
+
# Otherwise, keep the index.
|
| 313 |
+
else:
|
| 314 |
+
new_indices.append(idx_tuple)
|
| 315 |
+
self._tree_idx_to_sample_idx[idx_tuple[2]] = i
|
| 316 |
+
self._sample_idx_to_tree_idx[i] = idx_tuple[2]
|
| 317 |
+
i += 1
|
| 318 |
+
# Assign the new list of indices.
|
| 319 |
+
self._indices = new_indices
|
| 320 |
+
# Also remove corresponding module indices.
|
| 321 |
+
for module_id, module_indices in self._module_to_indices.items():
|
| 322 |
+
new_module_indices = []
|
| 323 |
+
# Each index 4-tuple is of the form
|
| 324 |
+
# (ma_episode_idx, agent_id, timestep, segtree_idx) and refers to a
|
| 325 |
+
# certain agent timestep in a certain multi-agent episode.
|
| 326 |
+
i = 0
|
| 327 |
+
for idx_quadlet in module_indices:
|
| 328 |
+
# If episode index is from an evicted episode, remove it from the
|
| 329 |
+
# indices and clean up.
|
| 330 |
+
if idx_quadlet[0] in eps_evicted_idxs:
|
| 331 |
+
# Here we need the index of a multi-agent sample in the segment
|
| 332 |
+
# tree.
|
| 333 |
+
self._module_to_free_nodes[module_id].appendleft(idx_quadlet[3])
|
| 334 |
+
# Remove also the potentially maximum index per module.
|
| 335 |
+
self._module_to_max_idx[module_id] -= (
|
| 336 |
+
1
|
| 337 |
+
if self._module_to_max_idx[module_id] == idx_quadlet[3]
|
| 338 |
+
else 0
|
| 339 |
+
)
|
| 340 |
+
# Set to defaults.
|
| 341 |
+
self._module_to_sum_segment[module_id][idx_quadlet[3]] = 0.0
|
| 342 |
+
self._module_to_min_segment[module_id][idx_quadlet[3]] = float(
|
| 343 |
+
"inf"
|
| 344 |
+
)
|
| 345 |
+
self._module_to_tree_idx_to_sample_idx[module_id].pop(
|
| 346 |
+
idx_quadlet[3]
|
| 347 |
+
)
|
| 348 |
+
# Otherwise, keep the index.
|
| 349 |
+
else:
|
| 350 |
+
new_module_indices.append(idx_quadlet)
|
| 351 |
+
self._module_to_tree_idx_to_sample_idx[module_id][
|
| 352 |
+
idx_quadlet[3]
|
| 353 |
+
] = i
|
| 354 |
+
i += 1
|
| 355 |
+
# Assign the new list of indices for the module.
|
| 356 |
+
self._module_to_indices[module_id] = new_module_indices
|
| 357 |
+
|
| 358 |
+
j = len(self._indices)
|
| 359 |
+
for eps in episodes:
|
| 360 |
+
eps = copy.deepcopy(eps)
|
| 361 |
+
# If the episode is part of an already existing episode, concatenate.
|
| 362 |
+
if eps.id_ in self.episode_id_to_index:
|
| 363 |
+
eps_idx = self.episode_id_to_index[eps.id_]
|
| 364 |
+
existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]
|
| 365 |
+
existing_len = len(existing_eps)
|
| 366 |
+
self._indices.extend(
|
| 367 |
+
[
|
| 368 |
+
(
|
| 369 |
+
eps_idx,
|
| 370 |
+
existing_len + i,
|
| 371 |
+
# Get the index in the segment trees.
|
| 372 |
+
self._get_free_node_and_assign(j + i, weight),
|
| 373 |
+
)
|
| 374 |
+
for i in range(len(eps))
|
| 375 |
+
]
|
| 376 |
+
)
|
| 377 |
+
# Add new module indices.
|
| 378 |
+
self._add_new_module_indices(eps, eps_idx, True, weight_per_module)
|
| 379 |
+
# Concatenate the episode chunk.
|
| 380 |
+
existing_eps.concat_episode(eps)
|
| 381 |
+
# Otherwise, create a new entry.
|
| 382 |
+
else:
|
| 383 |
+
# New episode.
|
| 384 |
+
self.episodes.append(eps)
|
| 385 |
+
eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted
|
| 386 |
+
self.episode_id_to_index[eps.id_] = eps_idx
|
| 387 |
+
self._indices.extend(
|
| 388 |
+
[
|
| 389 |
+
(eps_idx, i, self._get_free_node_and_assign(j + i, weight))
|
| 390 |
+
for i in range(len(eps))
|
| 391 |
+
]
|
| 392 |
+
)
|
| 393 |
+
# Add new module indices.
|
| 394 |
+
self._add_new_module_indices(eps, eps_idx, False, weight_per_module)
|
| 395 |
+
# Increase index to the new length of `self._indices`.
|
| 396 |
+
j = len(self._indices)
|
| 397 |
+
|
| 398 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 399 |
+
def sample(
|
| 400 |
+
self,
|
| 401 |
+
num_items: Optional[int] = None,
|
| 402 |
+
*,
|
| 403 |
+
batch_size_B: Optional[int] = None,
|
| 404 |
+
batch_length_T: Optional[int] = None,
|
| 405 |
+
n_step: Optional[Union[int, Tuple]] = 1,
|
| 406 |
+
gamma: float = 0.99,
|
| 407 |
+
include_infos: bool = False,
|
| 408 |
+
include_extra_model_outputs: bool = False,
|
| 409 |
+
replay_mode: str = "independent",
|
| 410 |
+
modules_to_sample: Optional[List[ModuleID]] = None,
|
| 411 |
+
beta: float = 0.0,
|
| 412 |
+
**kwargs,
|
| 413 |
+
) -> Union[List["MultiAgentEpisode"], List["SingleAgentEpisode"]]:
|
| 414 |
+
"""Samples a list of episodes with multi-agent transitions.
|
| 415 |
+
|
| 416 |
+
This sampling method also adds (importance sampling) weights to the returned
|
| 417 |
+
batch. See for prioritized sampling Schaul et al. (2016).
|
| 418 |
+
|
| 419 |
+
Multi-agent transitions can be sampled either `"independent"` or
|
| 420 |
+
`"synchronized"` with the former sampling for each module independent agent
|
| 421 |
+
steps and the latter sampling agent transitions from the same environment step.
|
| 422 |
+
|
| 423 |
+
The n-step parameter can be either a single integer or a tuple of two integers.
|
| 424 |
+
In the former case, the n-step is fixed to the given integer and in the latter
|
| 425 |
+
case, the n-step is sampled uniformly from the given range. Large n-steps could
|
| 426 |
+
potentially lead to many retries because not all samples might have a full
|
| 427 |
+
n-step transition.
|
| 428 |
+
|
| 429 |
+
Sampling returns episode lists of size B (number of 'rows'), where each episode
|
| 430 |
+
holds a transition of the form
|
| 431 |
+
|
| 432 |
+
`(o_t, a_t, sum(r_t+1:t+n), o_t+n, terminated_t+n, truncated_t+n)`
|
| 433 |
+
|
| 434 |
+
where `o_t` is the observation in `t`, `a_t` the action chosen at observation
|
| 435 |
+
`o_t`, `o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is
|
| 436 |
+
the sum of all rewards collected over the time steps between `t+1` and `t+n`.
|
| 437 |
+
The `n`-step can be chosen freely when sampling and defaults to `1`. If `n_step`
|
| 438 |
+
is a tuple it is sampled uniformly across the interval defined by the tuple (for
|
| 439 |
+
each row in the batch), i.e. from the interval `[n_step[0], n_step[1]]`.
|
| 440 |
+
|
| 441 |
+
If requested, `info`s of a transition's first and last timestep `t+n` and/or
|
| 442 |
+
`extra_model_outputs` from the first timestep (e.g. log-probabilities, etc.) are
|
| 443 |
+
added to the batch.
|
| 444 |
+
|
| 445 |
+
Each episode contains - in addition to the data tuples presented above - two
|
| 446 |
+
further entries in its `extra_model_outputs`, namely `n_steps` and `weigths`.
|
| 447 |
+
The former holds the `n_step` used for each transition and the latter the
|
| 448 |
+
(importance sampling) weight of `1.0` for each row in the batch. This weight
|
| 449 |
+
is used for weighted loss calculations in the training process.
|
| 450 |
+
|
| 451 |
+
Args:
|
| 452 |
+
num_items: The number of items to sample. If provided, `batch_size_B`
|
| 453 |
+
should be `None`.
|
| 454 |
+
batch_size_B: The batch size to sample. If provided, `num_items`
|
| 455 |
+
should be `None`.
|
| 456 |
+
batch_length_T: The length of the sampled batch. If not provided, the
|
| 457 |
+
default batch length is used. This feature is not yet implemented.
|
| 458 |
+
n_step: The n-step to sample. If the n-step is a tuple, the n-step is
|
| 459 |
+
sampled uniformly from the given range. If not provided, the default
|
| 460 |
+
n-step of `1` is used.
|
| 461 |
+
gamma: The discount factor for the n-step reward calculation.
|
| 462 |
+
include_infos: Whether to include the infos in the sampled episodes.
|
| 463 |
+
include_extra_model_outputs: Whether to include the extra model outputs
|
| 464 |
+
in the sampled episodes.
|
| 465 |
+
replay_mode: The replay mode to use for sampling. Either `"independent"`
|
| 466 |
+
or `"synchronized"`.
|
| 467 |
+
modules_to_sample: A list of module IDs to sample from. If not provided,
|
| 468 |
+
transitions for aall modules are sampled.
|
| 469 |
+
beta: The exponent of the importance sampling weight (see Schaul et
|
| 470 |
+
al. (2016)). A `beta=0.0` does not correct for the bias introduced
|
| 471 |
+
by prioritized replay and `beta=1.0` fully corrects for it.
|
| 472 |
+
|
| 473 |
+
Returns:
|
| 474 |
+
A list of 1-step long single-agent episodes containing all basic episode
|
| 475 |
+
data and if requested infos and extra model outputs. In addition extra model
|
| 476 |
+
outputs hold the (importance sampling) weights and the n-step used for each
|
| 477 |
+
transition.
|
| 478 |
+
"""
|
| 479 |
+
assert beta >= 0.0
|
| 480 |
+
|
| 481 |
+
if num_items is not None:
|
| 482 |
+
assert batch_size_B is None, (
|
| 483 |
+
"Cannot call `sample()` with both `num_items` and `batch_size_B` "
|
| 484 |
+
"provided! Use either one."
|
| 485 |
+
)
|
| 486 |
+
batch_size_B = num_items
|
| 487 |
+
|
| 488 |
+
# Use our default values if no sizes/lengths provided.
|
| 489 |
+
batch_size_B = batch_size_B or self.batch_size_B
|
| 490 |
+
# TODO (simon): Implement trajectory sampling for RNNs.
|
| 491 |
+
batch_length_T = batch_length_T or self.batch_length_T
|
| 492 |
+
|
| 493 |
+
# Sample for each module independently.
|
| 494 |
+
if replay_mode == "independent":
|
| 495 |
+
return self._sample_independent(
|
| 496 |
+
batch_size_B=batch_size_B,
|
| 497 |
+
batch_length_T=batch_length_T,
|
| 498 |
+
n_step=n_step,
|
| 499 |
+
gamma=gamma,
|
| 500 |
+
include_infos=include_infos,
|
| 501 |
+
include_extra_model_outputs=include_extra_model_outputs,
|
| 502 |
+
modules_to_sample=modules_to_sample,
|
| 503 |
+
beta=beta,
|
| 504 |
+
)
|
| 505 |
+
# Otherwise, sample synchronized.
|
| 506 |
+
else:
|
| 507 |
+
return self._sample_synchonized(
|
| 508 |
+
batch_size_B=batch_size_B,
|
| 509 |
+
batch_length_T=batch_length_T,
|
| 510 |
+
n_step=n_step,
|
| 511 |
+
gamma=gamma,
|
| 512 |
+
include_infos=include_infos,
|
| 513 |
+
include_extra_model_outputs=include_extra_model_outputs,
|
| 514 |
+
modules_to_sample=modules_to_sample,
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
@override(PrioritizedEpisodeReplayBuffer)
|
| 518 |
+
def update_priorities(
|
| 519 |
+
self,
|
| 520 |
+
priorities: Union[NDArray, Dict[ModuleID, NDArray]],
|
| 521 |
+
module_id: ModuleID,
|
| 522 |
+
) -> None:
|
| 523 |
+
"""Update the priorities of items at corresponding indices.
|
| 524 |
+
|
| 525 |
+
Usually, incoming priorities are TD-errors.
|
| 526 |
+
|
| 527 |
+
Args:
|
| 528 |
+
priorities: Numpy array containing the new priorities to be used
|
| 529 |
+
in sampling for the items in the last sampled batch.
|
| 530 |
+
"""
|
| 531 |
+
|
| 532 |
+
assert len(priorities) == len(self._module_to_last_sampled_indices[module_id])
|
| 533 |
+
|
| 534 |
+
for idx, priority in zip(
|
| 535 |
+
self._module_to_last_sampled_indices[module_id], priorities
|
| 536 |
+
):
|
| 537 |
+
# sample_idx = self._module_to_tree_idx_to_sample_idx[module_id][idx]
|
| 538 |
+
# ma_episode_idx = (
|
| 539 |
+
# self._module_to_indices[module_id][sample_idx][0]
|
| 540 |
+
# - self._num_episodes_evicted
|
| 541 |
+
# )
|
| 542 |
+
|
| 543 |
+
# ma_episode_indices.append(ma_episode_idx)
|
| 544 |
+
# Note, TD-errors come in as absolute values or results from
|
| 545 |
+
# cross-entropy loss calculations.
|
| 546 |
+
# assert priority > 0, f"priority was {priority}"
|
| 547 |
+
priority = max(priority, 1e-12)
|
| 548 |
+
assert 0 <= idx < self._module_to_sum_segment[module_id].capacity
|
| 549 |
+
# TODO (simon): Create metrics.
|
| 550 |
+
# delta = priority**self._alpha - self._sum_segment[idx]
|
| 551 |
+
# Update the priorities in the segment trees.
|
| 552 |
+
self._module_to_sum_segment[module_id][idx] = priority**self._alpha
|
| 553 |
+
self._module_to_min_segment[module_id][idx] = priority**self._alpha
|
| 554 |
+
# Update the maximal priority.
|
| 555 |
+
self._module_to_max_priority[module_id] = max(
|
| 556 |
+
self._module_to_max_priority[module_id], priority
|
| 557 |
+
)
|
| 558 |
+
# Clear the corresponding index list for the module.
|
| 559 |
+
self._module_to_last_sampled_indices[module_id].clear()
|
| 560 |
+
|
| 561 |
+
# TODO (simon): Use this later for synchronized sampling.
|
| 562 |
+
# for ma_episode_idx in ma_episode_indices:
|
| 563 |
+
# ma_episode_tree_idx = self._sample_idx_to_tree_idx(ma_episode_idx)
|
| 564 |
+
# ma_episode_idx =
|
| 565 |
+
|
| 566 |
+
# # Update the weights
|
| 567 |
+
# self._sum_segment[tree_idx] = sum(
|
| 568 |
+
# self._module_to_sum_segment[module_id][idx]
|
| 569 |
+
# for module_id, idx in self._tree_idx_to_sample_idx[tree_idx]
|
| 570 |
+
# )
|
| 571 |
+
# self._min_segment[tree_idx] = min(
|
| 572 |
+
# self._module_to_min_segment[module_id][idx]
|
| 573 |
+
# for module_id, idx in self._tree_idx_to_sample_idx[tree_idx]
|
| 574 |
+
# )
|
| 575 |
+
|
| 576 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 577 |
+
def get_state(self):
|
| 578 |
+
return (
|
| 579 |
+
MultiAgentEpisodeReplayBuffer.get_state(self)
|
| 580 |
+
| PrioritizedEpisodeReplayBuffer.get_state(self)
|
| 581 |
+
| {
|
| 582 |
+
"_module_to_max_priority": list(self._module_to_max_priority.items()),
|
| 583 |
+
"_module_to_sum_segment": list(self._module_to_sum_segment.items()),
|
| 584 |
+
"_module_to_min_segment": list(self._module_to_min_segment.items()),
|
| 585 |
+
"_module_to_free_nodes": list(self._module_to_free_nodes.items()),
|
| 586 |
+
"_module_to_max_idx": list(self._module_to_max_idx.items()),
|
| 587 |
+
"_module_to_tree_idx_to_sample_idx": list(
|
| 588 |
+
self._module_to_tree_idx_to_sample_idx.items()
|
| 589 |
+
),
|
| 590 |
+
"_module_to_last_sampled_indices": list(
|
| 591 |
+
self._module_to_last_sampled_indices.items()
|
| 592 |
+
),
|
| 593 |
+
}
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 597 |
+
def set_state(self, state) -> None:
|
| 598 |
+
MultiAgentEpisodeReplayBuffer.set_state(self, state)
|
| 599 |
+
PrioritizedEpisodeReplayBuffer.set_state(self, state)
|
| 600 |
+
self._module_to_max_priority = defaultdict(
|
| 601 |
+
lambda: 1.0, dict(state["_module_to_max_priority"])
|
| 602 |
+
)
|
| 603 |
+
tree_capacity = int(2 ** np.ceil(np.log2(self.capacity)))
|
| 604 |
+
self._module_to_sum_segment = defaultdict(
|
| 605 |
+
lambda: SumSegmentTree(2 * tree_capacity),
|
| 606 |
+
dict(state["_module_to_sum_segment"]),
|
| 607 |
+
)
|
| 608 |
+
self._module_to_min_segment = defaultdict(
|
| 609 |
+
lambda: SumSegmentTree(2 * tree_capacity),
|
| 610 |
+
dict(state["_module_to_min_segment"]),
|
| 611 |
+
)
|
| 612 |
+
self._module_to_free_nodes = defaultdict(
|
| 613 |
+
lambda: deque(list(range(2 * tree_capacity)), maxlen=2 * tree_capacity),
|
| 614 |
+
dict(state["_module_to_free_nodes"]),
|
| 615 |
+
)
|
| 616 |
+
self._module_to_max_idx = defaultdict(
|
| 617 |
+
lambda: 0, dict(state["_module_to_max_idx"])
|
| 618 |
+
)
|
| 619 |
+
self._module_to_tree_idx_to_sample_idx = defaultdict(
|
| 620 |
+
lambda: {}, dict(state["_module_to_tree_idx_to_sample_idx"])
|
| 621 |
+
)
|
| 622 |
+
self._module_to_last_sampled_indices = defaultdict(
|
| 623 |
+
lambda: [], dict(state["_module_to_last_sampled_indices"])
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 627 |
+
def _add_new_module_indices(
|
| 628 |
+
self,
|
| 629 |
+
ma_episode: MultiAgentEpisode,
|
| 630 |
+
ma_episode_idx: int,
|
| 631 |
+
ma_episode_exists: bool = True,
|
| 632 |
+
weight: Optional[Union[float, Dict[ModuleID, float]]] = None,
|
| 633 |
+
) -> None:
|
| 634 |
+
"""Adds the module indices for new episode chunks.
|
| 635 |
+
|
| 636 |
+
Args:
|
| 637 |
+
multi_agent_episode: The multi-agent episode to add the module indices for.
|
| 638 |
+
episode_idx: The index of the episode in the `self.episodes`.
|
| 639 |
+
"""
|
| 640 |
+
existing_ma_episode = None
|
| 641 |
+
if ma_episode_exists:
|
| 642 |
+
existing_ma_episode = self.episodes[
|
| 643 |
+
self.episode_id_to_index[ma_episode.id_] - self._num_episodes_evicted
|
| 644 |
+
]
|
| 645 |
+
|
| 646 |
+
for agent_id in ma_episode.agent_ids:
|
| 647 |
+
# Get the corresponding module id.
|
| 648 |
+
module_id = ma_episode.module_for(agent_id)
|
| 649 |
+
# Get the module episode.
|
| 650 |
+
module_eps = ma_episode.agent_episodes[agent_id]
|
| 651 |
+
|
| 652 |
+
# Is the agent episode already in the buffer's existing `ma_episode`?
|
| 653 |
+
if ma_episode_exists and agent_id in existing_ma_episode.agent_episodes:
|
| 654 |
+
existing_sa_eps_len = len(existing_ma_episode.agent_episodes[agent_id])
|
| 655 |
+
# Otherwise, it is a new single-agent episode and we increase the counter.
|
| 656 |
+
else:
|
| 657 |
+
existing_sa_eps_len = 0
|
| 658 |
+
self._num_module_episodes[module_id] += 1
|
| 659 |
+
|
| 660 |
+
# Add new module indices.
|
| 661 |
+
module_weight = weight.get(
|
| 662 |
+
module_id, self._module_to_max_priority[module_id]
|
| 663 |
+
)
|
| 664 |
+
j = len(self._module_to_indices[module_id])
|
| 665 |
+
self._module_to_indices[module_id].extend(
|
| 666 |
+
[
|
| 667 |
+
(
|
| 668 |
+
# Keep the MAE index for sampling.
|
| 669 |
+
ma_episode_idx,
|
| 670 |
+
agent_id,
|
| 671 |
+
existing_sa_eps_len + i,
|
| 672 |
+
# Get the index in the segment trees.
|
| 673 |
+
self._get_free_node_per_module_and_assign(
|
| 674 |
+
module_id,
|
| 675 |
+
j + i,
|
| 676 |
+
module_weight,
|
| 677 |
+
),
|
| 678 |
+
)
|
| 679 |
+
for i in range(len(module_eps))
|
| 680 |
+
]
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
@override(PrioritizedEpisodeReplayBuffer)
|
| 684 |
+
def _get_free_node_and_assign(self, sample_index, weight: float = 1.0) -> int:
|
| 685 |
+
"""Gets the next free node in the segment trees.
|
| 686 |
+
|
| 687 |
+
In addition the initial priorities for a new transition are added
|
| 688 |
+
to the segment trees and the index of the nodes is added to the
|
| 689 |
+
index mapping.
|
| 690 |
+
|
| 691 |
+
Args:
|
| 692 |
+
sample_index: The index of the sample in the `self._indices` list.
|
| 693 |
+
weight: The initial priority weight to be used in sampling for
|
| 694 |
+
the item at index `sample_index`.
|
| 695 |
+
|
| 696 |
+
Returns:
|
| 697 |
+
The index in the segment trees `self._sum_segment` and
|
| 698 |
+
`self._min_segment` for the item at index `sample_index` in
|
| 699 |
+
``self._indices`.
|
| 700 |
+
"""
|
| 701 |
+
# Get an index from the free nodes in the segment trees.
|
| 702 |
+
idx = self._free_nodes.popleft()
|
| 703 |
+
self._max_idx = idx if idx > self._max_idx else self._max_idx
|
| 704 |
+
# Add the weight to the segments.
|
| 705 |
+
self._sum_segment[idx] = weight**self._alpha
|
| 706 |
+
self._min_segment[idx] = weight**self._alpha
|
| 707 |
+
# Map the index in the trees to the index in `self._indices`.
|
| 708 |
+
self._tree_idx_to_sample_idx[idx] = sample_index
|
| 709 |
+
self._sample_idx_to_tree_idx[sample_index] = idx
|
| 710 |
+
# Return the index.
|
| 711 |
+
return idx
|
| 712 |
+
|
| 713 |
+
def _get_free_node_per_module_and_assign(
|
| 714 |
+
self, module_id: ModuleID, sample_index, weight: float = 1.0
|
| 715 |
+
) -> int:
|
| 716 |
+
"""Gets the next free node in the segment trees.
|
| 717 |
+
|
| 718 |
+
In addition the initial priorities for a new transition are added
|
| 719 |
+
to the segment trees and the index of the nodes is added to the
|
| 720 |
+
index mapping.
|
| 721 |
+
|
| 722 |
+
Args:
|
| 723 |
+
sample_index: The index of the sample in the `self._indices` list.
|
| 724 |
+
weight: The initial priority weight to be used in sampling for
|
| 725 |
+
the item at index `sample_index`.
|
| 726 |
+
|
| 727 |
+
Returns:
|
| 728 |
+
The index in the segment trees `self._sum_segment` and
|
| 729 |
+
`self._min_segment` for the item at index `sample_index` in
|
| 730 |
+
``self._indices`.
|
| 731 |
+
"""
|
| 732 |
+
# Get an index from the free nodes in the segment trees.
|
| 733 |
+
idx = self._module_to_free_nodes[module_id].popleft()
|
| 734 |
+
self._module_to_max_idx[module_id] = (
|
| 735 |
+
idx
|
| 736 |
+
if idx > self._module_to_max_idx[module_id]
|
| 737 |
+
else self._module_to_max_idx[module_id]
|
| 738 |
+
)
|
| 739 |
+
# Add the weight to the segments.
|
| 740 |
+
# TODO (simon): Allow alpha to be chosen per module.
|
| 741 |
+
self._module_to_sum_segment[module_id][idx] = weight**self._alpha
|
| 742 |
+
self._module_to_min_segment[module_id][idx] = weight**self._alpha
|
| 743 |
+
# Map the index in the trees to the index in `self._indices`.
|
| 744 |
+
self._module_to_tree_idx_to_sample_idx[module_id][idx] = sample_index
|
| 745 |
+
# Return the index.
|
| 746 |
+
return idx
|
| 747 |
+
|
| 748 |
+
@override(MultiAgentEpisodeReplayBuffer)
|
| 749 |
+
def _sample_independent(
|
| 750 |
+
self,
|
| 751 |
+
batch_size_B: Optional[int],
|
| 752 |
+
batch_length_T: Optional[int],
|
| 753 |
+
n_step: Optional[Union[int, Tuple]],
|
| 754 |
+
gamma: float,
|
| 755 |
+
include_infos: bool,
|
| 756 |
+
include_extra_model_outputs: bool,
|
| 757 |
+
modules_to_sample: Optional[List[ModuleID]],
|
| 758 |
+
beta: Optional[float],
|
| 759 |
+
) -> List["SingleAgentEpisode"]:
|
| 760 |
+
"""Samples a single-agent episode list with independent transitions.
|
| 761 |
+
|
| 762 |
+
Note, independent sampling means that each module samples its transitions
|
| 763 |
+
independently from the replay buffer. This is the default sampling mode.
|
| 764 |
+
In contrast, synchronized sampling samples transitions from the same
|
| 765 |
+
environment step.
|
| 766 |
+
"""
|
| 767 |
+
|
| 768 |
+
actual_n_step = n_step or 1
|
| 769 |
+
# Sample the n-step if necessary.
|
| 770 |
+
random_n_step = isinstance(n_step, tuple)
|
| 771 |
+
|
| 772 |
+
# Keep track of the indices that were sampled last for updating the
|
| 773 |
+
# weights later (see `ray.rllib.utils.replay_buffer.utils.
|
| 774 |
+
# update_priorities_in_episode_replay_buffer`).
|
| 775 |
+
# self._last_sampled_indices = defaultdict(lambda: [])
|
| 776 |
+
|
| 777 |
+
sampled_episodes = []
|
| 778 |
+
# TODO (simon): Ensure that the module has data and if not, skip it.
|
| 779 |
+
# TODO (sven): Should we then error out or skip? I think the Learner
|
| 780 |
+
# should handle this case when a module has no train data.
|
| 781 |
+
modules_to_sample = modules_to_sample or set(self._module_to_indices.keys())
|
| 782 |
+
for module_id in modules_to_sample:
|
| 783 |
+
# Sample proportionally from the replay buffer's module segments using the
|
| 784 |
+
# respective weights.
|
| 785 |
+
module_total_segment_sum = self._module_to_sum_segment[module_id].sum()
|
| 786 |
+
module_p_min = (
|
| 787 |
+
self._module_to_min_segment[module_id].min() / module_total_segment_sum
|
| 788 |
+
)
|
| 789 |
+
# TODO (simon): Allow individual betas per module.
|
| 790 |
+
module_max_weight = (module_p_min * self.get_num_timesteps(module_id)) ** (
|
| 791 |
+
-beta
|
| 792 |
+
)
|
| 793 |
+
B = 0
|
| 794 |
+
while B < batch_size_B:
|
| 795 |
+
# First, draw a random sample from Uniform(0, sum over all weights).
|
| 796 |
+
# Note, transitions with higher weight get sampled more often (as
|
| 797 |
+
# more random draws fall into larger intervals).
|
| 798 |
+
module_random_sum = (
|
| 799 |
+
self.rng.random() * self._module_to_sum_segment[module_id].sum()
|
| 800 |
+
)
|
| 801 |
+
# Get the highest index in the sum-tree for which the sum is
|
| 802 |
+
# smaller or equal the random sum sample.
|
| 803 |
+
# Note, in contrast to Schaul et al. (2018) (who sample
|
| 804 |
+
# `o_(t + n_step)`, Algorithm 1) we sample `o_t`.
|
| 805 |
+
module_idx = self._module_to_sum_segment[module_id].find_prefixsum_idx(
|
| 806 |
+
module_random_sum
|
| 807 |
+
)
|
| 808 |
+
# Get the theoretical probability mass for drawing this sample.
|
| 809 |
+
module_p_sample = (
|
| 810 |
+
self._module_to_sum_segment[module_id][module_idx]
|
| 811 |
+
/ module_total_segment_sum
|
| 812 |
+
)
|
| 813 |
+
# Compute the importance sampling weight.
|
| 814 |
+
module_weight = (
|
| 815 |
+
module_p_sample * self.get_num_timesteps(module_id)
|
| 816 |
+
) ** (-beta)
|
| 817 |
+
# Now, get the transition stored at this index.
|
| 818 |
+
index_quadlet = self._module_to_indices[module_id][
|
| 819 |
+
self._module_to_tree_idx_to_sample_idx[module_id][module_idx]
|
| 820 |
+
]
|
| 821 |
+
|
| 822 |
+
# This will be an agent timestep (not env timestep).
|
| 823 |
+
# TODO (simon, sven): Maybe deprecate sa_episode_idx (_) in the index
|
| 824 |
+
# quads. Is there any need for it?
|
| 825 |
+
ma_episode_idx, agent_id, sa_episode_ts = (
|
| 826 |
+
index_quadlet[0] - self._num_episodes_evicted,
|
| 827 |
+
index_quadlet[1],
|
| 828 |
+
index_quadlet[2],
|
| 829 |
+
)
|
| 830 |
+
|
| 831 |
+
# Get the multi-agent episode.
|
| 832 |
+
ma_episode = self.episodes[ma_episode_idx]
|
| 833 |
+
# Retrieve the single-agent episode for filtering.
|
| 834 |
+
sa_episode = ma_episode.agent_episodes[agent_id]
|
| 835 |
+
|
| 836 |
+
# If we use random n-step sampling, draw the n-step for this item.
|
| 837 |
+
if random_n_step:
|
| 838 |
+
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
|
| 839 |
+
# If we cannnot make the n-step, we resample.
|
| 840 |
+
if sa_episode_ts + actual_n_step > len(sa_episode):
|
| 841 |
+
continue
|
| 842 |
+
# Note, this will be the reward after executing action
|
| 843 |
+
# `a_(episode_ts)`. For `n_step>1` this will be the discounted sum
|
| 844 |
+
# of all rewards that were collected over the last n steps.
|
| 845 |
+
sa_raw_rewards = sa_episode.get_rewards(
|
| 846 |
+
slice(sa_episode_ts, sa_episode_ts + actual_n_step)
|
| 847 |
+
)
|
| 848 |
+
sa_rewards = scipy.signal.lfilter(
|
| 849 |
+
[1], [1, -gamma], sa_raw_rewards[::-1], axis=0
|
| 850 |
+
)[-1]
|
| 851 |
+
|
| 852 |
+
sampled_sa_episode = SingleAgentEpisode(
|
| 853 |
+
id_=sa_episode.id_,
|
| 854 |
+
# Provide the IDs for the learner connector.
|
| 855 |
+
agent_id=sa_episode.agent_id,
|
| 856 |
+
module_id=sa_episode.module_id,
|
| 857 |
+
multi_agent_episode_id=ma_episode.id_,
|
| 858 |
+
# Ensure that each episode contains a tuple of the form:
|
| 859 |
+
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
|
| 860 |
+
# Two observations (t and t+n).
|
| 861 |
+
observations=[
|
| 862 |
+
sa_episode.get_observations(sa_episode_ts),
|
| 863 |
+
sa_episode.get_observations(sa_episode_ts + actual_n_step),
|
| 864 |
+
],
|
| 865 |
+
observation_space=sa_episode.observation_space,
|
| 866 |
+
infos=(
|
| 867 |
+
[
|
| 868 |
+
sa_episode.get_infos(sa_episode_ts),
|
| 869 |
+
sa_episode.get_infos(sa_episode_ts + actual_n_step),
|
| 870 |
+
]
|
| 871 |
+
if include_infos
|
| 872 |
+
else None
|
| 873 |
+
),
|
| 874 |
+
actions=[sa_episode.get_actions(sa_episode_ts)],
|
| 875 |
+
action_space=sa_episode.action_space,
|
| 876 |
+
rewards=[sa_rewards],
|
| 877 |
+
# If the sampled single-agent episode is the single-agent episode's
|
| 878 |
+
# last time step, check, if the single-agent episode is terminated
|
| 879 |
+
# or truncated.
|
| 880 |
+
terminated=(
|
| 881 |
+
sa_episode_ts + actual_n_step >= len(sa_episode)
|
| 882 |
+
and sa_episode.is_terminated
|
| 883 |
+
),
|
| 884 |
+
truncated=(
|
| 885 |
+
sa_episode_ts + actual_n_step >= len(sa_episode)
|
| 886 |
+
and sa_episode.is_truncated
|
| 887 |
+
),
|
| 888 |
+
extra_model_outputs={
|
| 889 |
+
"weights": [
|
| 890 |
+
module_weight / module_max_weight * 1
|
| 891 |
+
], # actual_size=1
|
| 892 |
+
"n_step": [actual_n_step],
|
| 893 |
+
**(
|
| 894 |
+
{
|
| 895 |
+
k: [
|
| 896 |
+
sa_episode.get_extra_model_outputs(k, sa_episode_ts)
|
| 897 |
+
]
|
| 898 |
+
for k in sa_episode.extra_model_outputs.keys()
|
| 899 |
+
}
|
| 900 |
+
if include_extra_model_outputs
|
| 901 |
+
else {}
|
| 902 |
+
),
|
| 903 |
+
},
|
| 904 |
+
# TODO (sven): Support lookback buffers.
|
| 905 |
+
len_lookback_buffer=0,
|
| 906 |
+
t_started=sa_episode_ts,
|
| 907 |
+
)
|
| 908 |
+
# Append single-agent episode to the list of sampled episodes.
|
| 909 |
+
sampled_episodes.append(sampled_sa_episode)
|
| 910 |
+
|
| 911 |
+
# Increase counter.
|
| 912 |
+
B += 1
|
| 913 |
+
# Keep track of sampled indices for updating priorities later for each
|
| 914 |
+
# module.
|
| 915 |
+
self._module_to_last_sampled_indices[module_id].append(module_idx)
|
| 916 |
+
|
| 917 |
+
# Increase the per module timesteps counter.
|
| 918 |
+
self.sampled_timesteps_per_module[module_id] += B
|
| 919 |
+
|
| 920 |
+
# Increase the counter for environment timesteps.
|
| 921 |
+
self.sampled_timesteps += batch_size_B
|
| 922 |
+
# Return multi-agent dictionary.
|
| 923 |
+
return sampled_episodes
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/multi_agent_replay_buffer.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import logging
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from typing import Any, Dict, Optional
|
| 5 |
+
|
| 6 |
+
from ray.util.timer import _Timer
|
| 7 |
+
from ray.rllib.policy.rnn_sequencing import timeslice_along_seq_lens_with_overlap
|
| 8 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
|
| 9 |
+
from ray.rllib.utils.annotations import override
|
| 10 |
+
from ray.rllib.utils.deprecation import Deprecated
|
| 11 |
+
from ray.rllib.utils.from_config import from_config
|
| 12 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import (
|
| 13 |
+
_ALL_POLICIES,
|
| 14 |
+
ReplayBuffer,
|
| 15 |
+
StorageUnit,
|
| 16 |
+
)
|
| 17 |
+
from ray.rllib.utils.typing import PolicyID, SampleBatchType
|
| 18 |
+
from ray.util.annotations import DeveloperAPI
|
| 19 |
+
from ray.util.debug import log_once
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@DeveloperAPI
|
| 25 |
+
class ReplayMode(Enum):
|
| 26 |
+
LOCKSTEP = "lockstep"
|
| 27 |
+
INDEPENDENT = "independent"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@DeveloperAPI
|
| 31 |
+
def merge_dicts_with_warning(args_on_init, args_on_call):
|
| 32 |
+
"""Merge argument dicts, overwriting args_on_call with warning.
|
| 33 |
+
|
| 34 |
+
The MultiAgentReplayBuffer supports setting standard arguments for calls
|
| 35 |
+
of methods of the underlying buffers. These arguments can be
|
| 36 |
+
overwritten. Such overwrites trigger a warning to the user.
|
| 37 |
+
"""
|
| 38 |
+
for arg_name, arg_value in args_on_call.items():
|
| 39 |
+
if arg_name in args_on_init:
|
| 40 |
+
if log_once("overwrite_argument_{}".format((str(arg_name)))):
|
| 41 |
+
logger.warning(
|
| 42 |
+
"Replay Buffer was initialized to have "
|
| 43 |
+
"underlying buffers methods called with "
|
| 44 |
+
"argument `{}={}`, but was subsequently called "
|
| 45 |
+
"with `{}={}`.".format(
|
| 46 |
+
arg_name,
|
| 47 |
+
args_on_init[arg_name],
|
| 48 |
+
arg_name,
|
| 49 |
+
arg_value,
|
| 50 |
+
)
|
| 51 |
+
)
|
| 52 |
+
return {**args_on_init, **args_on_call}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@DeveloperAPI
|
| 56 |
+
class MultiAgentReplayBuffer(ReplayBuffer):
|
| 57 |
+
"""A replay buffer shard for multiagent setups.
|
| 58 |
+
|
| 59 |
+
This buffer is meant to be run in parallel to distribute experiences
|
| 60 |
+
across `num_shards` shards. Unlike simpler buffers, it holds a set of
|
| 61 |
+
buffers - one for each policy ID.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
capacity: int = 10000,
|
| 67 |
+
storage_unit: str = "timesteps",
|
| 68 |
+
num_shards: int = 1,
|
| 69 |
+
replay_mode: str = "independent",
|
| 70 |
+
replay_sequence_override: bool = True,
|
| 71 |
+
replay_sequence_length: int = 1,
|
| 72 |
+
replay_burn_in: int = 0,
|
| 73 |
+
replay_zero_init_states: bool = True,
|
| 74 |
+
underlying_buffer_config: dict = None,
|
| 75 |
+
**kwargs
|
| 76 |
+
):
|
| 77 |
+
"""Initializes a MultiAgentReplayBuffer instance.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
capacity: The capacity of the buffer, measured in `storage_unit`.
|
| 81 |
+
storage_unit: Either 'timesteps', 'sequences' or
|
| 82 |
+
'episodes'. Specifies how experiences are stored. If they
|
| 83 |
+
are stored in episodes, replay_sequence_length is ignored.
|
| 84 |
+
num_shards: The number of buffer shards that exist in total
|
| 85 |
+
(including this one).
|
| 86 |
+
replay_mode: One of "independent" or "lockstep". Determines,
|
| 87 |
+
whether batches are sampled independently or to an equal
|
| 88 |
+
amount.
|
| 89 |
+
replay_sequence_override: If True, ignore sequences found in incoming
|
| 90 |
+
batches, slicing them into sequences as specified by
|
| 91 |
+
`replay_sequence_length` and `replay_sequence_burn_in`. This only has
|
| 92 |
+
an effect if storage_unit is `sequences`.
|
| 93 |
+
replay_sequence_length: The sequence length (T) of a single
|
| 94 |
+
sample. If > 1, we will sample B x T from this buffer. This
|
| 95 |
+
only has an effect if storage_unit is 'timesteps'.
|
| 96 |
+
replay_burn_in: This is the number of timesteps
|
| 97 |
+
each sequence overlaps with the previous one to generate a
|
| 98 |
+
better internal state (=state after the burn-in), instead of
|
| 99 |
+
starting from 0.0 each RNN rollout. This only has an effect
|
| 100 |
+
if storage_unit is `sequences`.
|
| 101 |
+
replay_zero_init_states: Whether the initial states in the
|
| 102 |
+
buffer (if replay_sequence_length > 0) are alwayas 0.0 or
|
| 103 |
+
should be updated with the previous train_batch state outputs.
|
| 104 |
+
underlying_buffer_config: A config that contains all necessary
|
| 105 |
+
constructor arguments and arguments for methods to call on
|
| 106 |
+
the underlying buffers.
|
| 107 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 108 |
+
"""
|
| 109 |
+
shard_capacity = capacity // num_shards
|
| 110 |
+
ReplayBuffer.__init__(self, capacity, storage_unit)
|
| 111 |
+
|
| 112 |
+
# If the user provides an underlying buffer config, we use to
|
| 113 |
+
# instantiate and interact with underlying buffers
|
| 114 |
+
self.underlying_buffer_config = underlying_buffer_config
|
| 115 |
+
if self.underlying_buffer_config is not None:
|
| 116 |
+
self.underlying_buffer_call_args = self.underlying_buffer_config
|
| 117 |
+
else:
|
| 118 |
+
self.underlying_buffer_call_args = {}
|
| 119 |
+
self.replay_sequence_override = replay_sequence_override
|
| 120 |
+
self.replay_mode = replay_mode
|
| 121 |
+
self.replay_sequence_length = replay_sequence_length
|
| 122 |
+
self.replay_burn_in = replay_burn_in
|
| 123 |
+
self.replay_zero_init_states = replay_zero_init_states
|
| 124 |
+
self.replay_sequence_override = replay_sequence_override
|
| 125 |
+
|
| 126 |
+
if (
|
| 127 |
+
replay_sequence_length > 1
|
| 128 |
+
and self.storage_unit is not StorageUnit.SEQUENCES
|
| 129 |
+
):
|
| 130 |
+
logger.warning(
|
| 131 |
+
"MultiAgentReplayBuffer configured with "
|
| 132 |
+
"`replay_sequence_length={}`, but `storage_unit={}`. "
|
| 133 |
+
"replay_sequence_length will be ignored and set to 1.".format(
|
| 134 |
+
replay_sequence_length, storage_unit
|
| 135 |
+
)
|
| 136 |
+
)
|
| 137 |
+
self.replay_sequence_length = 1
|
| 138 |
+
|
| 139 |
+
if replay_sequence_length == 1 and self.storage_unit is StorageUnit.SEQUENCES:
|
| 140 |
+
logger.warning(
|
| 141 |
+
"MultiAgentReplayBuffer configured with "
|
| 142 |
+
"`replay_sequence_length={}`, but `storage_unit={}`. "
|
| 143 |
+
"This will result in sequences equal to timesteps.".format(
|
| 144 |
+
replay_sequence_length, storage_unit
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
if replay_mode in ["lockstep", ReplayMode.LOCKSTEP]:
|
| 149 |
+
self.replay_mode = ReplayMode.LOCKSTEP
|
| 150 |
+
if self.storage_unit in [StorageUnit.EPISODES, StorageUnit.SEQUENCES]:
|
| 151 |
+
raise ValueError(
|
| 152 |
+
"MultiAgentReplayBuffer does not support "
|
| 153 |
+
"lockstep mode with storage unit `episodes`"
|
| 154 |
+
"or `sequences`."
|
| 155 |
+
)
|
| 156 |
+
elif replay_mode in ["independent", ReplayMode.INDEPENDENT]:
|
| 157 |
+
self.replay_mode = ReplayMode.INDEPENDENT
|
| 158 |
+
else:
|
| 159 |
+
raise ValueError("Unsupported replay mode: {}".format(replay_mode))
|
| 160 |
+
|
| 161 |
+
if self.underlying_buffer_config:
|
| 162 |
+
ctor_args = {
|
| 163 |
+
**{"capacity": shard_capacity, "storage_unit": StorageUnit.FRAGMENTS},
|
| 164 |
+
**self.underlying_buffer_config,
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
def new_buffer():
|
| 168 |
+
return from_config(self.underlying_buffer_config["type"], ctor_args)
|
| 169 |
+
|
| 170 |
+
else:
|
| 171 |
+
# Default case
|
| 172 |
+
def new_buffer():
|
| 173 |
+
self.underlying_buffer_call_args = {}
|
| 174 |
+
return ReplayBuffer(
|
| 175 |
+
self.capacity,
|
| 176 |
+
storage_unit=StorageUnit.FRAGMENTS,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
self.replay_buffers = collections.defaultdict(new_buffer)
|
| 180 |
+
|
| 181 |
+
# Metrics.
|
| 182 |
+
self.add_batch_timer = _Timer()
|
| 183 |
+
self.replay_timer = _Timer()
|
| 184 |
+
self._num_added = 0
|
| 185 |
+
|
| 186 |
+
def __len__(self) -> int:
|
| 187 |
+
"""Returns the number of items currently stored in this buffer."""
|
| 188 |
+
return sum(len(buffer._storage) for buffer in self.replay_buffers.values())
|
| 189 |
+
|
| 190 |
+
@DeveloperAPI
|
| 191 |
+
@Deprecated(
|
| 192 |
+
old="ReplayBuffer.replay()",
|
| 193 |
+
new="ReplayBuffer.sample(num_items)",
|
| 194 |
+
error=True,
|
| 195 |
+
)
|
| 196 |
+
def replay(self, num_items: int = None, **kwargs) -> Optional[SampleBatchType]:
|
| 197 |
+
"""Deprecated in favor of new ReplayBuffer API."""
|
| 198 |
+
pass
|
| 199 |
+
|
| 200 |
+
@DeveloperAPI
|
| 201 |
+
@override(ReplayBuffer)
|
| 202 |
+
def add(self, batch: SampleBatchType, **kwargs) -> None:
|
| 203 |
+
"""Adds a batch to the appropriate policy's replay buffer.
|
| 204 |
+
|
| 205 |
+
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
|
| 206 |
+
it is not a MultiAgentBatch. Subsequently, adds the individual policy
|
| 207 |
+
batches to the storage.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
batch : The batch to be added.
|
| 211 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 212 |
+
"""
|
| 213 |
+
if batch is None:
|
| 214 |
+
if log_once("empty_batch_added_to_buffer"):
|
| 215 |
+
logger.info(
|
| 216 |
+
"A batch that is `None` was added to {}. This can be "
|
| 217 |
+
"normal at the beginning of execution but might "
|
| 218 |
+
"indicate an issue.".format(type(self).__name__)
|
| 219 |
+
)
|
| 220 |
+
return
|
| 221 |
+
# Make a copy so the replay buffer doesn't pin plasma memory.
|
| 222 |
+
batch = batch.copy()
|
| 223 |
+
# Handle everything as if multi-agent.
|
| 224 |
+
batch = batch.as_multi_agent()
|
| 225 |
+
|
| 226 |
+
with self.add_batch_timer:
|
| 227 |
+
pids_and_batches = self._maybe_split_into_policy_batches(batch)
|
| 228 |
+
for policy_id, sample_batch in pids_and_batches.items():
|
| 229 |
+
self._add_to_underlying_buffer(policy_id, sample_batch, **kwargs)
|
| 230 |
+
|
| 231 |
+
self._num_added += batch.count
|
| 232 |
+
|
| 233 |
+
@DeveloperAPI
|
| 234 |
+
def _add_to_underlying_buffer(
|
| 235 |
+
self, policy_id: PolicyID, batch: SampleBatchType, **kwargs
|
| 236 |
+
) -> None:
|
| 237 |
+
"""Add a batch of experiences to the underlying buffer of a policy.
|
| 238 |
+
|
| 239 |
+
If the storage unit is `timesteps`, cut the batch into timeslices
|
| 240 |
+
before adding them to the appropriate buffer. Otherwise, let the
|
| 241 |
+
underlying buffer decide how slice batches.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
policy_id: ID of the policy that corresponds to the underlying
|
| 245 |
+
buffer
|
| 246 |
+
batch: SampleBatch to add to the underlying buffer
|
| 247 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 248 |
+
"""
|
| 249 |
+
# Merge kwargs, overwriting standard call arguments
|
| 250 |
+
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
|
| 251 |
+
|
| 252 |
+
# For the storage unit `timesteps`, the underlying buffer will
|
| 253 |
+
# simply store the samples how they arrive. For sequences and
|
| 254 |
+
# episodes, the underlying buffer may split them itself.
|
| 255 |
+
if self.storage_unit is StorageUnit.TIMESTEPS:
|
| 256 |
+
timeslices = batch.timeslices(1)
|
| 257 |
+
elif self.storage_unit is StorageUnit.SEQUENCES:
|
| 258 |
+
timeslices = timeslice_along_seq_lens_with_overlap(
|
| 259 |
+
sample_batch=batch,
|
| 260 |
+
seq_lens=batch.get(SampleBatch.SEQ_LENS)
|
| 261 |
+
if self.replay_sequence_override
|
| 262 |
+
else None,
|
| 263 |
+
zero_pad_max_seq_len=self.replay_sequence_length,
|
| 264 |
+
pre_overlap=self.replay_burn_in,
|
| 265 |
+
zero_init_states=self.replay_zero_init_states,
|
| 266 |
+
)
|
| 267 |
+
elif self.storage_unit == StorageUnit.EPISODES:
|
| 268 |
+
timeslices = []
|
| 269 |
+
for eps in batch.split_by_episode():
|
| 270 |
+
if eps.get(SampleBatch.T)[0] == 0 and (
|
| 271 |
+
eps.get(SampleBatch.TERMINATEDS, [True])[-1]
|
| 272 |
+
or eps.get(SampleBatch.TRUNCATEDS, [False])[-1]
|
| 273 |
+
):
|
| 274 |
+
# Only add full episodes to the buffer
|
| 275 |
+
timeslices.append(eps)
|
| 276 |
+
else:
|
| 277 |
+
if log_once("only_full_episodes"):
|
| 278 |
+
logger.info(
|
| 279 |
+
"This buffer uses episodes as a storage "
|
| 280 |
+
"unit and thus allows only full episodes "
|
| 281 |
+
"to be added to it. Some samples may be "
|
| 282 |
+
"dropped."
|
| 283 |
+
)
|
| 284 |
+
elif self.storage_unit == StorageUnit.FRAGMENTS:
|
| 285 |
+
timeslices = [batch]
|
| 286 |
+
else:
|
| 287 |
+
raise ValueError("Unknown `storage_unit={}`".format(self.storage_unit))
|
| 288 |
+
|
| 289 |
+
for slice in timeslices:
|
| 290 |
+
self.replay_buffers[policy_id].add(slice, **kwargs)
|
| 291 |
+
|
| 292 |
+
@DeveloperAPI
|
| 293 |
+
@override(ReplayBuffer)
|
| 294 |
+
def sample(
|
| 295 |
+
self, num_items: int, policy_id: Optional[PolicyID] = None, **kwargs
|
| 296 |
+
) -> Optional[SampleBatchType]:
|
| 297 |
+
"""Samples a MultiAgentBatch of `num_items` per one policy's buffer.
|
| 298 |
+
|
| 299 |
+
If less than `num_items` records are in the policy's buffer,
|
| 300 |
+
some samples in the results may be repeated to fulfil the batch size
|
| 301 |
+
`num_items` request. Returns an empty batch if there are no items in
|
| 302 |
+
the buffer.
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
num_items: Number of items to sample from a policy's buffer.
|
| 306 |
+
policy_id: ID of the policy that created the experiences we sample. If
|
| 307 |
+
none is given, sample from all policies.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
Concatenated MultiAgentBatch of items.
|
| 311 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 312 |
+
"""
|
| 313 |
+
# Merge kwargs, overwriting standard call arguments
|
| 314 |
+
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
|
| 315 |
+
|
| 316 |
+
with self.replay_timer:
|
| 317 |
+
# Lockstep mode: Sample from all policies at the same time an
|
| 318 |
+
# equal amount of steps.
|
| 319 |
+
if self.replay_mode == ReplayMode.LOCKSTEP:
|
| 320 |
+
assert (
|
| 321 |
+
policy_id is None
|
| 322 |
+
), "`policy_id` specifier not allowed in `lockstep` mode!"
|
| 323 |
+
# In lockstep mode we sample MultiAgentBatches
|
| 324 |
+
return self.replay_buffers[_ALL_POLICIES].sample(num_items, **kwargs)
|
| 325 |
+
elif policy_id is not None:
|
| 326 |
+
sample = self.replay_buffers[policy_id].sample(num_items, **kwargs)
|
| 327 |
+
return MultiAgentBatch({policy_id: sample}, sample.count)
|
| 328 |
+
else:
|
| 329 |
+
samples = {}
|
| 330 |
+
for policy_id, replay_buffer in self.replay_buffers.items():
|
| 331 |
+
samples[policy_id] = replay_buffer.sample(num_items, **kwargs)
|
| 332 |
+
return MultiAgentBatch(samples, sum(s.count for s in samples.values()))
|
| 333 |
+
|
| 334 |
+
@DeveloperAPI
|
| 335 |
+
@override(ReplayBuffer)
|
| 336 |
+
def stats(self, debug: bool = False) -> Dict:
|
| 337 |
+
"""Returns the stats of this buffer and all underlying buffers.
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
debug: If True, stats of underlying replay buffers will
|
| 341 |
+
be fetched with debug=True.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
stat: Dictionary of buffer stats.
|
| 345 |
+
"""
|
| 346 |
+
stat = {
|
| 347 |
+
"add_batch_time_ms": round(1000 * self.add_batch_timer.mean, 3),
|
| 348 |
+
"replay_time_ms": round(1000 * self.replay_timer.mean, 3),
|
| 349 |
+
}
|
| 350 |
+
for policy_id, replay_buffer in self.replay_buffers.items():
|
| 351 |
+
stat.update(
|
| 352 |
+
{"policy_{}".format(policy_id): replay_buffer.stats(debug=debug)}
|
| 353 |
+
)
|
| 354 |
+
return stat
|
| 355 |
+
|
| 356 |
+
@DeveloperAPI
|
| 357 |
+
@override(ReplayBuffer)
|
| 358 |
+
def get_state(self) -> Dict[str, Any]:
|
| 359 |
+
"""Returns all local state.
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
The serializable local state.
|
| 363 |
+
"""
|
| 364 |
+
state = {"num_added": self._num_added, "replay_buffers": {}}
|
| 365 |
+
for policy_id, replay_buffer in self.replay_buffers.items():
|
| 366 |
+
state["replay_buffers"][policy_id] = replay_buffer.get_state()
|
| 367 |
+
return state
|
| 368 |
+
|
| 369 |
+
@DeveloperAPI
|
| 370 |
+
@override(ReplayBuffer)
|
| 371 |
+
def set_state(self, state: Dict[str, Any]) -> None:
|
| 372 |
+
"""Restores all local state to the provided `state`.
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
state: The new state to set this buffer. Can be obtained by
|
| 376 |
+
calling `self.get_state()`.
|
| 377 |
+
"""
|
| 378 |
+
self._num_added = state["num_added"]
|
| 379 |
+
buffer_states = state["replay_buffers"]
|
| 380 |
+
for policy_id in buffer_states.keys():
|
| 381 |
+
self.replay_buffers[policy_id].set_state(buffer_states[policy_id])
|
| 382 |
+
|
| 383 |
+
def _maybe_split_into_policy_batches(self, batch: SampleBatchType):
|
| 384 |
+
"""Returns a dict of policy IDs and batches, depending on our replay mode.
|
| 385 |
+
|
| 386 |
+
This method helps with splitting up MultiAgentBatches only if the
|
| 387 |
+
self.replay_mode requires it.
|
| 388 |
+
"""
|
| 389 |
+
if self.replay_mode == ReplayMode.LOCKSTEP:
|
| 390 |
+
return {_ALL_POLICIES: batch}
|
| 391 |
+
else:
|
| 392 |
+
return batch.policy_batches
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/prioritized_replay_buffer.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
from typing import Any, Dict, List, Optional
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
# Import ray before psutil will make sure we use psutil's bundled version
|
| 6 |
+
import ray # noqa F401
|
| 7 |
+
import psutil # noqa E402
|
| 8 |
+
|
| 9 |
+
from ray.rllib.execution.segment_tree import SumSegmentTree, MinSegmentTree
|
| 10 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 11 |
+
from ray.rllib.utils.annotations import override
|
| 12 |
+
from ray.rllib.utils.metrics.window_stat import WindowStat
|
| 13 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer
|
| 14 |
+
from ray.rllib.utils.typing import SampleBatchType
|
| 15 |
+
from ray.util.annotations import DeveloperAPI
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@DeveloperAPI
|
| 19 |
+
class PrioritizedReplayBuffer(ReplayBuffer):
|
| 20 |
+
"""This buffer implements Prioritized Experience Replay.
|
| 21 |
+
|
| 22 |
+
The algorithm has been described by Tom Schaul et. al. in "Prioritized
|
| 23 |
+
Experience Replay". See https://arxiv.org/pdf/1511.05952.pdf for
|
| 24 |
+
the full paper.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
capacity: int = 10000,
|
| 30 |
+
storage_unit: str = "timesteps",
|
| 31 |
+
alpha: float = 1.0,
|
| 32 |
+
**kwargs
|
| 33 |
+
):
|
| 34 |
+
"""Initializes a PrioritizedReplayBuffer instance.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
capacity: Max number of timesteps to store in the FIFO
|
| 38 |
+
buffer. After reaching this number, older samples will be
|
| 39 |
+
dropped to make space for new ones.
|
| 40 |
+
storage_unit: Either 'timesteps', 'sequences' or
|
| 41 |
+
'episodes'. Specifies how experiences are stored.
|
| 42 |
+
alpha: How much prioritization is used
|
| 43 |
+
(0.0=no prioritization, 1.0=full prioritization).
|
| 44 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 45 |
+
"""
|
| 46 |
+
ReplayBuffer.__init__(self, capacity, storage_unit, **kwargs)
|
| 47 |
+
|
| 48 |
+
assert alpha > 0
|
| 49 |
+
self._alpha = alpha
|
| 50 |
+
|
| 51 |
+
# Segment tree must have capacity that is a power of 2
|
| 52 |
+
it_capacity = 1
|
| 53 |
+
while it_capacity < self.capacity:
|
| 54 |
+
it_capacity *= 2
|
| 55 |
+
|
| 56 |
+
self._it_sum = SumSegmentTree(it_capacity)
|
| 57 |
+
self._it_min = MinSegmentTree(it_capacity)
|
| 58 |
+
self._max_priority = 1.0
|
| 59 |
+
self._prio_change_stats = WindowStat("reprio", 1000)
|
| 60 |
+
|
| 61 |
+
@DeveloperAPI
|
| 62 |
+
@override(ReplayBuffer)
|
| 63 |
+
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
|
| 64 |
+
"""Add a batch of experiences to self._storage with weight.
|
| 65 |
+
|
| 66 |
+
An item consists of either one or more timesteps, a sequence or an
|
| 67 |
+
episode. Differs from add() in that it does not consider the storage
|
| 68 |
+
unit or type of batch and simply stores it.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
item: The item to be added.
|
| 72 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 73 |
+
"""
|
| 74 |
+
weight = kwargs.get("weight", None)
|
| 75 |
+
|
| 76 |
+
if weight is None:
|
| 77 |
+
weight = self._max_priority
|
| 78 |
+
|
| 79 |
+
self._it_sum[self._next_idx] = weight**self._alpha
|
| 80 |
+
self._it_min[self._next_idx] = weight**self._alpha
|
| 81 |
+
|
| 82 |
+
ReplayBuffer._add_single_batch(self, item)
|
| 83 |
+
|
| 84 |
+
def _sample_proportional(self, num_items: int) -> List[int]:
|
| 85 |
+
res = []
|
| 86 |
+
for _ in range(num_items):
|
| 87 |
+
# TODO(szymon): should we ensure no repeats?
|
| 88 |
+
mass = random.random() * self._it_sum.sum(0, len(self._storage))
|
| 89 |
+
idx = self._it_sum.find_prefixsum_idx(mass)
|
| 90 |
+
res.append(idx)
|
| 91 |
+
return res
|
| 92 |
+
|
| 93 |
+
@DeveloperAPI
|
| 94 |
+
@override(ReplayBuffer)
|
| 95 |
+
def sample(
|
| 96 |
+
self, num_items: int, beta: float, **kwargs
|
| 97 |
+
) -> Optional[SampleBatchType]:
|
| 98 |
+
"""Sample `num_items` items from this buffer, including prio. weights.
|
| 99 |
+
|
| 100 |
+
Samples in the results may be repeated.
|
| 101 |
+
|
| 102 |
+
Examples for storage of SamplesBatches:
|
| 103 |
+
- If storage unit `timesteps` has been chosen and batches of
|
| 104 |
+
size 5 have been added, sample(5) will yield a concatenated batch of
|
| 105 |
+
15 timesteps.
|
| 106 |
+
- If storage unit 'sequences' has been chosen and sequences of
|
| 107 |
+
different lengths have been added, sample(5) will yield a concatenated
|
| 108 |
+
batch with a number of timesteps equal to the sum of timesteps in
|
| 109 |
+
the 5 sampled sequences.
|
| 110 |
+
- If storage unit 'episodes' has been chosen and episodes of
|
| 111 |
+
different lengths have been added, sample(5) will yield a concatenated
|
| 112 |
+
batch with a number of timesteps equal to the sum of timesteps in
|
| 113 |
+
the 5 sampled episodes.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
num_items: Number of items to sample from this buffer.
|
| 117 |
+
beta: To what degree to use importance weights (0 - no corrections,
|
| 118 |
+
1 - full correction).
|
| 119 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
Concatenated SampleBatch of items including "weights" and
|
| 123 |
+
"batch_indexes" fields denoting IS of each sampled
|
| 124 |
+
transition and original idxes in buffer of sampled experiences.
|
| 125 |
+
"""
|
| 126 |
+
assert beta >= 0.0
|
| 127 |
+
|
| 128 |
+
if len(self) == 0:
|
| 129 |
+
raise ValueError("Trying to sample from an empty buffer.")
|
| 130 |
+
|
| 131 |
+
idxes = self._sample_proportional(num_items)
|
| 132 |
+
|
| 133 |
+
weights = []
|
| 134 |
+
batch_indexes = []
|
| 135 |
+
p_min = self._it_min.min() / self._it_sum.sum()
|
| 136 |
+
max_weight = (p_min * len(self)) ** (-beta)
|
| 137 |
+
|
| 138 |
+
for idx in idxes:
|
| 139 |
+
p_sample = self._it_sum[idx] / self._it_sum.sum()
|
| 140 |
+
weight = (p_sample * len(self)) ** (-beta)
|
| 141 |
+
count = self._storage[idx].count
|
| 142 |
+
# If zero-padded, count will not be the actual batch size of the
|
| 143 |
+
# data.
|
| 144 |
+
if (
|
| 145 |
+
isinstance(self._storage[idx], SampleBatch)
|
| 146 |
+
and self._storage[idx].zero_padded
|
| 147 |
+
):
|
| 148 |
+
actual_size = self._storage[idx].max_seq_len
|
| 149 |
+
else:
|
| 150 |
+
actual_size = count
|
| 151 |
+
weights.extend([weight / max_weight] * actual_size)
|
| 152 |
+
batch_indexes.extend([idx] * actual_size)
|
| 153 |
+
self._num_timesteps_sampled += count
|
| 154 |
+
batch = self._encode_sample(idxes)
|
| 155 |
+
|
| 156 |
+
# Note: prioritization is not supported in multi agent lockstep
|
| 157 |
+
if isinstance(batch, SampleBatch):
|
| 158 |
+
batch["weights"] = np.array(weights)
|
| 159 |
+
batch["batch_indexes"] = np.array(batch_indexes)
|
| 160 |
+
|
| 161 |
+
return batch
|
| 162 |
+
|
| 163 |
+
@DeveloperAPI
|
| 164 |
+
def update_priorities(self, idxes: List[int], priorities: List[float]) -> None:
|
| 165 |
+
"""Update priorities of items at given indices.
|
| 166 |
+
|
| 167 |
+
Sets priority of item at index idxes[i] in buffer
|
| 168 |
+
to priorities[i].
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
idxes: List of indices of items
|
| 172 |
+
priorities: List of updated priorities corresponding to items at the
|
| 173 |
+
idxes denoted by variable `idxes`.
|
| 174 |
+
"""
|
| 175 |
+
# Making sure we don't pass in e.g. a torch tensor.
|
| 176 |
+
assert isinstance(
|
| 177 |
+
idxes, (list, np.ndarray)
|
| 178 |
+
), "ERROR: `idxes` is not a list or np.ndarray, but {}!".format(
|
| 179 |
+
type(idxes).__name__
|
| 180 |
+
)
|
| 181 |
+
assert len(idxes) == len(priorities)
|
| 182 |
+
for idx, priority in zip(idxes, priorities):
|
| 183 |
+
assert priority > 0
|
| 184 |
+
assert 0 <= idx < len(self._storage)
|
| 185 |
+
delta = priority**self._alpha - self._it_sum[idx]
|
| 186 |
+
self._prio_change_stats.push(delta)
|
| 187 |
+
self._it_sum[idx] = priority**self._alpha
|
| 188 |
+
self._it_min[idx] = priority**self._alpha
|
| 189 |
+
|
| 190 |
+
self._max_priority = max(self._max_priority, priority)
|
| 191 |
+
|
| 192 |
+
@DeveloperAPI
|
| 193 |
+
@override(ReplayBuffer)
|
| 194 |
+
def stats(self, debug: bool = False) -> Dict:
|
| 195 |
+
"""Returns the stats of this buffer.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
debug: If true, adds sample eviction statistics to the returned stats dict.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
A dictionary of stats about this buffer.
|
| 202 |
+
"""
|
| 203 |
+
parent = ReplayBuffer.stats(self, debug)
|
| 204 |
+
if debug:
|
| 205 |
+
parent.update(self._prio_change_stats.stats())
|
| 206 |
+
return parent
|
| 207 |
+
|
| 208 |
+
@DeveloperAPI
|
| 209 |
+
@override(ReplayBuffer)
|
| 210 |
+
def get_state(self) -> Dict[str, Any]:
|
| 211 |
+
"""Returns all local state.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
The serializable local state.
|
| 215 |
+
"""
|
| 216 |
+
# Get parent state.
|
| 217 |
+
state = super().get_state()
|
| 218 |
+
# Add prio weights.
|
| 219 |
+
state.update(
|
| 220 |
+
{
|
| 221 |
+
"sum_segment_tree": self._it_sum.get_state(),
|
| 222 |
+
"min_segment_tree": self._it_min.get_state(),
|
| 223 |
+
"max_priority": self._max_priority,
|
| 224 |
+
}
|
| 225 |
+
)
|
| 226 |
+
return state
|
| 227 |
+
|
| 228 |
+
@DeveloperAPI
|
| 229 |
+
@override(ReplayBuffer)
|
| 230 |
+
def set_state(self, state: Dict[str, Any]) -> None:
|
| 231 |
+
"""Restores all local state to the provided `state`.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
state: The new state to set this buffer. Can be obtained by calling
|
| 235 |
+
`self.get_state()`.
|
| 236 |
+
"""
|
| 237 |
+
super().set_state(state)
|
| 238 |
+
self._it_sum.set_state(state["sum_segment_tree"])
|
| 239 |
+
self._it_min.set_state(state["min_segment_tree"])
|
| 240 |
+
self._max_priority = state["max_priority"]
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/replay_buffer.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
import logging
|
| 3 |
+
import numpy as np
|
| 4 |
+
import random
|
| 5 |
+
from typing import Any, Dict, List, Optional, Union
|
| 6 |
+
|
| 7 |
+
# Import ray before psutil will make sure we use psutil's bundled version
|
| 8 |
+
import ray # noqa F401
|
| 9 |
+
import psutil
|
| 10 |
+
|
| 11 |
+
from ray.rllib.policy.sample_batch import SampleBatch, concat_samples
|
| 12 |
+
from ray.rllib.utils.actor_manager import FaultAwareApply
|
| 13 |
+
from ray.rllib.utils.annotations import override
|
| 14 |
+
from ray.rllib.utils.metrics.window_stat import WindowStat
|
| 15 |
+
from ray.rllib.utils.replay_buffers.base import ReplayBufferInterface
|
| 16 |
+
from ray.rllib.utils.typing import SampleBatchType
|
| 17 |
+
from ray.util.annotations import DeveloperAPI
|
| 18 |
+
from ray.util.debug import log_once
|
| 19 |
+
|
| 20 |
+
# Constant that represents all policies in lockstep replay mode.
|
| 21 |
+
_ALL_POLICIES = "__all__"
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@DeveloperAPI
|
| 27 |
+
class StorageUnit(Enum):
|
| 28 |
+
"""Specifies how batches are structured in a ReplayBuffer.
|
| 29 |
+
|
| 30 |
+
timesteps: One buffer slot per timestep.
|
| 31 |
+
sequences: One buffer slot per sequence.
|
| 32 |
+
episodes: One buffer slot per episode.
|
| 33 |
+
fragemts: One buffer slot per incoming batch.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
TIMESTEPS = "timesteps"
|
| 37 |
+
SEQUENCES = "sequences"
|
| 38 |
+
EPISODES = "episodes"
|
| 39 |
+
FRAGMENTS = "fragments"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@DeveloperAPI
|
| 43 |
+
def warn_replay_capacity(*, item: SampleBatchType, num_items: int) -> None:
|
| 44 |
+
"""Warn if the configured replay buffer capacity is too large."""
|
| 45 |
+
if log_once("replay_capacity"):
|
| 46 |
+
item_size = item.size_bytes()
|
| 47 |
+
psutil_mem = psutil.virtual_memory()
|
| 48 |
+
total_gb = psutil_mem.total / 1e9
|
| 49 |
+
mem_size = num_items * item_size / 1e9
|
| 50 |
+
msg = (
|
| 51 |
+
"Estimated max memory usage for replay buffer is {} GB "
|
| 52 |
+
"({} batches of size {}, {} bytes each), "
|
| 53 |
+
"available system memory is {} GB".format(
|
| 54 |
+
mem_size, num_items, item.count, item_size, total_gb
|
| 55 |
+
)
|
| 56 |
+
)
|
| 57 |
+
if mem_size > total_gb:
|
| 58 |
+
raise ValueError(msg)
|
| 59 |
+
elif mem_size > 0.2 * total_gb:
|
| 60 |
+
logger.warning(msg)
|
| 61 |
+
else:
|
| 62 |
+
logger.info(msg)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@DeveloperAPI
|
| 66 |
+
class ReplayBuffer(ReplayBufferInterface, FaultAwareApply):
|
| 67 |
+
"""The lowest-level replay buffer interface used by RLlib.
|
| 68 |
+
|
| 69 |
+
This class implements a basic ring-type of buffer with random sampling.
|
| 70 |
+
ReplayBuffer is the base class for advanced types that add functionality while
|
| 71 |
+
retaining compatibility through inheritance.
|
| 72 |
+
|
| 73 |
+
The following examples show how buffers behave with different storage_units
|
| 74 |
+
and capacities. This behaviour is generally similar for other buffers, although
|
| 75 |
+
they might not implement all storage_units.
|
| 76 |
+
|
| 77 |
+
Examples:
|
| 78 |
+
|
| 79 |
+
.. testcode::
|
| 80 |
+
|
| 81 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer
|
| 82 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import StorageUnit
|
| 83 |
+
from ray.rllib.policy.sample_batch import SampleBatch
|
| 84 |
+
|
| 85 |
+
# Store any batch as a whole
|
| 86 |
+
buffer = ReplayBuffer(capacity=10, storage_unit=StorageUnit.FRAGMENTS)
|
| 87 |
+
buffer.add(SampleBatch({"a": [1], "b": [2, 3, 4]}))
|
| 88 |
+
buffer.sample(1)
|
| 89 |
+
|
| 90 |
+
# Store only complete episodes
|
| 91 |
+
buffer = ReplayBuffer(capacity=10,
|
| 92 |
+
storage_unit=StorageUnit.EPISODES)
|
| 93 |
+
buffer.add(SampleBatch({"c": [1, 2, 3, 4],
|
| 94 |
+
SampleBatch.T: [0, 1, 0, 1],
|
| 95 |
+
SampleBatch.TERMINATEDS: [False, True, False, True],
|
| 96 |
+
SampleBatch.EPS_ID: [0, 0, 1, 1]}))
|
| 97 |
+
buffer.sample(1)
|
| 98 |
+
|
| 99 |
+
# Store single timesteps
|
| 100 |
+
buffer = ReplayBuffer(capacity=2, storage_unit=StorageUnit.TIMESTEPS)
|
| 101 |
+
buffer.add(SampleBatch({"a": [1, 2], SampleBatch.T: [0, 1]}))
|
| 102 |
+
buffer.sample(1)
|
| 103 |
+
|
| 104 |
+
buffer.add(SampleBatch({"a": [3], SampleBatch.T: [2]}))
|
| 105 |
+
print(buffer._eviction_started)
|
| 106 |
+
buffer.sample(1)
|
| 107 |
+
|
| 108 |
+
buffer = ReplayBuffer(capacity=10, storage_unit=StorageUnit.SEQUENCES)
|
| 109 |
+
buffer.add(SampleBatch({"c": [1, 2, 3], SampleBatch.SEQ_LENS: [1, 2]}))
|
| 110 |
+
buffer.sample(1)
|
| 111 |
+
|
| 112 |
+
.. testoutput::
|
| 113 |
+
|
| 114 |
+
True
|
| 115 |
+
|
| 116 |
+
`True` is not the output of the above testcode, but an artifact of unexpected
|
| 117 |
+
behaviour of sphinx doctests.
|
| 118 |
+
(see https://github.com/ray-project/ray/pull/32477#discussion_r1106776101)
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
def __init__(
|
| 122 |
+
self,
|
| 123 |
+
capacity: int = 10000,
|
| 124 |
+
storage_unit: Union[str, StorageUnit] = "timesteps",
|
| 125 |
+
**kwargs,
|
| 126 |
+
):
|
| 127 |
+
"""Initializes a (FIFO) ReplayBuffer instance.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
capacity: Max number of timesteps to store in this FIFO
|
| 131 |
+
buffer. After reaching this number, older samples will be
|
| 132 |
+
dropped to make space for new ones.
|
| 133 |
+
storage_unit: If not a StorageUnit, either 'timesteps', 'sequences' or
|
| 134 |
+
'episodes'. Specifies how experiences are stored.
|
| 135 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
if storage_unit in ["timesteps", StorageUnit.TIMESTEPS]:
|
| 139 |
+
self.storage_unit = StorageUnit.TIMESTEPS
|
| 140 |
+
elif storage_unit in ["sequences", StorageUnit.SEQUENCES]:
|
| 141 |
+
self.storage_unit = StorageUnit.SEQUENCES
|
| 142 |
+
elif storage_unit in ["episodes", StorageUnit.EPISODES]:
|
| 143 |
+
self.storage_unit = StorageUnit.EPISODES
|
| 144 |
+
elif storage_unit in ["fragments", StorageUnit.FRAGMENTS]:
|
| 145 |
+
self.storage_unit = StorageUnit.FRAGMENTS
|
| 146 |
+
else:
|
| 147 |
+
raise ValueError(
|
| 148 |
+
f"storage_unit must be either '{StorageUnit.TIMESTEPS}', "
|
| 149 |
+
f"'{StorageUnit.SEQUENCES}', '{StorageUnit.EPISODES}' "
|
| 150 |
+
f"or '{StorageUnit.FRAGMENTS}', but is {storage_unit}"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# The actual storage (list of SampleBatches or MultiAgentBatches).
|
| 154 |
+
self._storage = []
|
| 155 |
+
|
| 156 |
+
# Caps the number of timesteps stored in this buffer
|
| 157 |
+
if capacity <= 0:
|
| 158 |
+
raise ValueError(
|
| 159 |
+
"Capacity of replay buffer has to be greater than zero "
|
| 160 |
+
"but was set to {}.".format(capacity)
|
| 161 |
+
)
|
| 162 |
+
self.capacity = capacity
|
| 163 |
+
# The next index to override in the buffer.
|
| 164 |
+
self._next_idx = 0
|
| 165 |
+
# len(self._hit_count) must always be less than len(capacity)
|
| 166 |
+
self._hit_count = np.zeros(self.capacity)
|
| 167 |
+
|
| 168 |
+
# Whether we have already hit our capacity (and have therefore
|
| 169 |
+
# started to evict older samples).
|
| 170 |
+
self._eviction_started = False
|
| 171 |
+
|
| 172 |
+
# Number of (single) timesteps that have been added to the buffer
|
| 173 |
+
# over its lifetime. Note that each added item (batch) may contain
|
| 174 |
+
# more than one timestep.
|
| 175 |
+
self._num_timesteps_added = 0
|
| 176 |
+
self._num_timesteps_added_wrap = 0
|
| 177 |
+
|
| 178 |
+
# Number of (single) timesteps that have been sampled from the buffer
|
| 179 |
+
# over its lifetime.
|
| 180 |
+
self._num_timesteps_sampled = 0
|
| 181 |
+
|
| 182 |
+
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
|
| 183 |
+
self._est_size_bytes = 0
|
| 184 |
+
|
| 185 |
+
self.batch_size = None
|
| 186 |
+
|
| 187 |
+
@override(ReplayBufferInterface)
|
| 188 |
+
def __len__(self) -> int:
|
| 189 |
+
return len(self._storage)
|
| 190 |
+
|
| 191 |
+
@override(ReplayBufferInterface)
|
| 192 |
+
def add(self, batch: SampleBatchType, **kwargs) -> None:
|
| 193 |
+
"""Adds a batch of experiences or other data to this buffer.
|
| 194 |
+
|
| 195 |
+
Splits batch into chunks of timesteps, sequences or episodes, depending on
|
| 196 |
+
`self._storage_unit`. Calls `self._add_single_batch` to add resulting slices
|
| 197 |
+
to the buffer storage.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
batch: The batch to add.
|
| 201 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 202 |
+
"""
|
| 203 |
+
if not batch.count > 0:
|
| 204 |
+
return
|
| 205 |
+
|
| 206 |
+
warn_replay_capacity(item=batch, num_items=self.capacity / batch.count)
|
| 207 |
+
|
| 208 |
+
if self.storage_unit == StorageUnit.TIMESTEPS:
|
| 209 |
+
timeslices = batch.timeslices(1)
|
| 210 |
+
for t in timeslices:
|
| 211 |
+
self._add_single_batch(t, **kwargs)
|
| 212 |
+
|
| 213 |
+
elif self.storage_unit == StorageUnit.SEQUENCES:
|
| 214 |
+
timestep_count = 0
|
| 215 |
+
for seq_len in batch.get(SampleBatch.SEQ_LENS):
|
| 216 |
+
start_seq = timestep_count
|
| 217 |
+
end_seq = timestep_count + seq_len
|
| 218 |
+
self._add_single_batch(batch[start_seq:end_seq], **kwargs)
|
| 219 |
+
timestep_count = end_seq
|
| 220 |
+
|
| 221 |
+
elif self.storage_unit == StorageUnit.EPISODES:
|
| 222 |
+
for eps in batch.split_by_episode():
|
| 223 |
+
if eps.get(SampleBatch.T, [0])[0] == 0 and (
|
| 224 |
+
eps.get(SampleBatch.TERMINATEDS, [True])[-1]
|
| 225 |
+
or eps.get(SampleBatch.TRUNCATEDS, [False])[-1]
|
| 226 |
+
):
|
| 227 |
+
# Only add full episodes to the buffer
|
| 228 |
+
# Check only if info is available
|
| 229 |
+
self._add_single_batch(eps, **kwargs)
|
| 230 |
+
else:
|
| 231 |
+
if log_once("only_full_episodes"):
|
| 232 |
+
logger.info(
|
| 233 |
+
"This buffer uses episodes as a storage "
|
| 234 |
+
"unit and thus allows only full episodes "
|
| 235 |
+
"to be added to it (starting from T=0 and ending in "
|
| 236 |
+
"`terminateds=True` or `truncateds=True`. "
|
| 237 |
+
"Some samples may be dropped."
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
elif self.storage_unit == StorageUnit.FRAGMENTS:
|
| 241 |
+
self._add_single_batch(batch, **kwargs)
|
| 242 |
+
|
| 243 |
+
@DeveloperAPI
|
| 244 |
+
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
|
| 245 |
+
"""Add a SampleBatch of experiences to self._storage.
|
| 246 |
+
|
| 247 |
+
An item consists of either one or more timesteps, a sequence or an
|
| 248 |
+
episode. Differs from add() in that it does not consider the storage
|
| 249 |
+
unit or type of batch and simply stores it.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
item: The batch to be added.
|
| 253 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 254 |
+
"""
|
| 255 |
+
self._num_timesteps_added += item.count
|
| 256 |
+
self._num_timesteps_added_wrap += item.count
|
| 257 |
+
|
| 258 |
+
if self._next_idx >= len(self._storage):
|
| 259 |
+
self._storage.append(item)
|
| 260 |
+
self._est_size_bytes += item.size_bytes()
|
| 261 |
+
else:
|
| 262 |
+
item_to_be_removed = self._storage[self._next_idx]
|
| 263 |
+
self._est_size_bytes -= item_to_be_removed.size_bytes()
|
| 264 |
+
self._storage[self._next_idx] = item
|
| 265 |
+
self._est_size_bytes += item.size_bytes()
|
| 266 |
+
|
| 267 |
+
# Eviction of older samples has already started (buffer is "full").
|
| 268 |
+
if self._eviction_started:
|
| 269 |
+
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
|
| 270 |
+
self._hit_count[self._next_idx] = 0
|
| 271 |
+
|
| 272 |
+
# Wrap around storage as a circular buffer once we hit capacity.
|
| 273 |
+
if self._num_timesteps_added_wrap >= self.capacity:
|
| 274 |
+
self._eviction_started = True
|
| 275 |
+
self._num_timesteps_added_wrap = 0
|
| 276 |
+
self._next_idx = 0
|
| 277 |
+
else:
|
| 278 |
+
self._next_idx += 1
|
| 279 |
+
|
| 280 |
+
@override(ReplayBufferInterface)
|
| 281 |
+
def sample(
|
| 282 |
+
self, num_items: Optional[int] = None, **kwargs
|
| 283 |
+
) -> Optional[SampleBatchType]:
|
| 284 |
+
"""Samples `num_items` items from this buffer.
|
| 285 |
+
|
| 286 |
+
The items depend on the buffer's storage_unit.
|
| 287 |
+
Samples in the results may be repeated.
|
| 288 |
+
|
| 289 |
+
Examples for sampling results:
|
| 290 |
+
|
| 291 |
+
1) If storage unit 'timesteps' has been chosen and batches of
|
| 292 |
+
size 5 have been added, sample(5) will yield a concatenated batch of
|
| 293 |
+
15 timesteps.
|
| 294 |
+
|
| 295 |
+
2) If storage unit 'sequences' has been chosen and sequences of
|
| 296 |
+
different lengths have been added, sample(5) will yield a concatenated
|
| 297 |
+
batch with a number of timesteps equal to the sum of timesteps in
|
| 298 |
+
the 5 sampled sequences.
|
| 299 |
+
|
| 300 |
+
3) If storage unit 'episodes' has been chosen and episodes of
|
| 301 |
+
different lengths have been added, sample(5) will yield a concatenated
|
| 302 |
+
batch with a number of timesteps equal to the sum of timesteps in
|
| 303 |
+
the 5 sampled episodes.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
num_items: Number of items to sample from this buffer.
|
| 307 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
Concatenated batch of items.
|
| 311 |
+
"""
|
| 312 |
+
if len(self) == 0:
|
| 313 |
+
raise ValueError("Trying to sample from an empty buffer.")
|
| 314 |
+
idxes = [random.randint(0, len(self) - 1) for _ in range(num_items)]
|
| 315 |
+
sample = self._encode_sample(idxes)
|
| 316 |
+
self._num_timesteps_sampled += sample.count
|
| 317 |
+
return sample
|
| 318 |
+
|
| 319 |
+
@DeveloperAPI
|
| 320 |
+
def stats(self, debug: bool = False) -> dict:
|
| 321 |
+
"""Returns the stats of this buffer.
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
debug: If True, adds sample eviction statistics to the returned
|
| 325 |
+
stats dict.
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
A dictionary of stats about this buffer.
|
| 329 |
+
"""
|
| 330 |
+
data = {
|
| 331 |
+
"added_count": self._num_timesteps_added,
|
| 332 |
+
"added_count_wrapped": self._num_timesteps_added_wrap,
|
| 333 |
+
"eviction_started": self._eviction_started,
|
| 334 |
+
"sampled_count": self._num_timesteps_sampled,
|
| 335 |
+
"est_size_bytes": self._est_size_bytes,
|
| 336 |
+
"num_entries": len(self._storage),
|
| 337 |
+
}
|
| 338 |
+
if debug:
|
| 339 |
+
data.update(self._evicted_hit_stats.stats())
|
| 340 |
+
return data
|
| 341 |
+
|
| 342 |
+
@override(ReplayBufferInterface)
|
| 343 |
+
def get_state(self) -> Dict[str, Any]:
|
| 344 |
+
state = {"_storage": self._storage, "_next_idx": self._next_idx}
|
| 345 |
+
state.update(self.stats(debug=False))
|
| 346 |
+
return state
|
| 347 |
+
|
| 348 |
+
@override(ReplayBufferInterface)
|
| 349 |
+
def set_state(self, state: Dict[str, Any]) -> None:
|
| 350 |
+
# The actual storage.
|
| 351 |
+
self._storage = state["_storage"]
|
| 352 |
+
self._next_idx = state["_next_idx"]
|
| 353 |
+
# Stats and counts.
|
| 354 |
+
self._num_timesteps_added = state["added_count"]
|
| 355 |
+
self._num_timesteps_added_wrap = state["added_count_wrapped"]
|
| 356 |
+
self._eviction_started = state["eviction_started"]
|
| 357 |
+
self._num_timesteps_sampled = state["sampled_count"]
|
| 358 |
+
self._est_size_bytes = state["est_size_bytes"]
|
| 359 |
+
|
| 360 |
+
@DeveloperAPI
|
| 361 |
+
def _encode_sample(self, idxes: List[int]) -> SampleBatchType:
|
| 362 |
+
"""Fetches concatenated samples at given indices from the storage."""
|
| 363 |
+
samples = []
|
| 364 |
+
for i in idxes:
|
| 365 |
+
self._hit_count[i] += 1
|
| 366 |
+
samples.append(self._storage[i])
|
| 367 |
+
|
| 368 |
+
if samples:
|
| 369 |
+
# We assume all samples are of same type
|
| 370 |
+
out = concat_samples(samples)
|
| 371 |
+
else:
|
| 372 |
+
out = SampleBatch()
|
| 373 |
+
out.decompress_if_needed()
|
| 374 |
+
return out
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/reservoir_replay_buffer.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
# Import ray before psutil will make sure we use psutil's bundled version
|
| 5 |
+
import ray # noqa F401
|
| 6 |
+
import psutil # noqa E402
|
| 7 |
+
|
| 8 |
+
from ray.rllib.utils.annotations import ExperimentalAPI, override
|
| 9 |
+
from ray.rllib.utils.replay_buffers.replay_buffer import (
|
| 10 |
+
ReplayBuffer,
|
| 11 |
+
warn_replay_capacity,
|
| 12 |
+
)
|
| 13 |
+
from ray.rllib.utils.typing import SampleBatchType
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# __sphinx_doc_reservoir_buffer__begin__
|
| 17 |
+
@ExperimentalAPI
|
| 18 |
+
class ReservoirReplayBuffer(ReplayBuffer):
|
| 19 |
+
"""This buffer implements reservoir sampling.
|
| 20 |
+
|
| 21 |
+
The algorithm has been described by Jeffrey S. Vitter in "Random sampling
|
| 22 |
+
with a reservoir".
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self, capacity: int = 10000, storage_unit: str = "timesteps", **kwargs
|
| 27 |
+
):
|
| 28 |
+
"""Initializes a ReservoirBuffer instance.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
capacity: Max number of timesteps to store in the FIFO
|
| 32 |
+
buffer. After reaching this number, older samples will be
|
| 33 |
+
dropped to make space for new ones.
|
| 34 |
+
storage_unit: Either 'timesteps', 'sequences' or
|
| 35 |
+
'episodes'. Specifies how experiences are stored.
|
| 36 |
+
"""
|
| 37 |
+
ReplayBuffer.__init__(self, capacity, storage_unit)
|
| 38 |
+
self._num_add_calls = 0
|
| 39 |
+
self._num_evicted = 0
|
| 40 |
+
|
| 41 |
+
@ExperimentalAPI
|
| 42 |
+
@override(ReplayBuffer)
|
| 43 |
+
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
|
| 44 |
+
"""Add a SampleBatch of experiences to self._storage.
|
| 45 |
+
|
| 46 |
+
An item consists of either one or more timesteps, a sequence or an
|
| 47 |
+
episode. Differs from add() in that it does not consider the storage
|
| 48 |
+
unit or type of batch and simply stores it.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
item: The batch to be added.
|
| 52 |
+
``**kwargs``: Forward compatibility kwargs.
|
| 53 |
+
"""
|
| 54 |
+
self._num_timesteps_added += item.count
|
| 55 |
+
self._num_timesteps_added_wrap += item.count
|
| 56 |
+
|
| 57 |
+
# Update add counts.
|
| 58 |
+
self._num_add_calls += 1
|
| 59 |
+
# Update our timesteps counts.
|
| 60 |
+
|
| 61 |
+
if self._num_timesteps_added < self.capacity:
|
| 62 |
+
self._storage.append(item)
|
| 63 |
+
self._est_size_bytes += item.size_bytes()
|
| 64 |
+
else:
|
| 65 |
+
# Eviction of older samples has already started (buffer is "full")
|
| 66 |
+
self._eviction_started = True
|
| 67 |
+
idx = random.randint(0, self._num_add_calls - 1)
|
| 68 |
+
if idx < len(self._storage):
|
| 69 |
+
self._num_evicted += 1
|
| 70 |
+
self._evicted_hit_stats.push(self._hit_count[idx])
|
| 71 |
+
self._hit_count[idx] = 0
|
| 72 |
+
# This is a bit of a hack: ReplayBuffer always inserts at
|
| 73 |
+
# self._next_idx
|
| 74 |
+
self._next_idx = idx
|
| 75 |
+
self._evicted_hit_stats.push(self._hit_count[idx])
|
| 76 |
+
self._hit_count[idx] = 0
|
| 77 |
+
|
| 78 |
+
item_to_be_removed = self._storage[idx]
|
| 79 |
+
self._est_size_bytes -= item_to_be_removed.size_bytes()
|
| 80 |
+
self._storage[idx] = item
|
| 81 |
+
self._est_size_bytes += item.size_bytes()
|
| 82 |
+
|
| 83 |
+
assert item.count > 0, item
|
| 84 |
+
warn_replay_capacity(item=item, num_items=self.capacity / item.count)
|
| 85 |
+
|
| 86 |
+
@ExperimentalAPI
|
| 87 |
+
@override(ReplayBuffer)
|
| 88 |
+
def stats(self, debug: bool = False) -> dict:
|
| 89 |
+
"""Returns the stats of this buffer.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
debug: If True, adds sample eviction statistics to the returned
|
| 93 |
+
stats dict.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
A dictionary of stats about this buffer.
|
| 97 |
+
"""
|
| 98 |
+
data = {
|
| 99 |
+
"num_evicted": self._num_evicted,
|
| 100 |
+
"num_add_calls": self._num_add_calls,
|
| 101 |
+
}
|
| 102 |
+
parent = ReplayBuffer.stats(self, debug)
|
| 103 |
+
parent.update(data)
|
| 104 |
+
return parent
|
| 105 |
+
|
| 106 |
+
@ExperimentalAPI
|
| 107 |
+
@override(ReplayBuffer)
|
| 108 |
+
def get_state(self) -> Dict[str, Any]:
|
| 109 |
+
"""Returns all local state.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
The serializable local state.
|
| 113 |
+
"""
|
| 114 |
+
parent = ReplayBuffer.get_state(self)
|
| 115 |
+
parent.update(self.stats())
|
| 116 |
+
return parent
|
| 117 |
+
|
| 118 |
+
@ExperimentalAPI
|
| 119 |
+
@override(ReplayBuffer)
|
| 120 |
+
def set_state(self, state: Dict[str, Any]) -> None:
|
| 121 |
+
"""Restores all local state to the provided `state`.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
state: The new state to set this buffer. Can be
|
| 125 |
+
obtained by calling `self.get_state()`.
|
| 126 |
+
"""
|
| 127 |
+
self._num_evicted = state["num_evicted"]
|
| 128 |
+
self._num_add_calls = state["num_add_calls"]
|
| 129 |
+
ReplayBuffer.set_state(self, state)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# __sphinx_doc_reservoir_buffer__end__
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/simple_replay_buffer.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/replay_buffers/utils.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import psutil
|
| 3 |
+
from typing import Any, Dict, Optional
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from ray.rllib.utils import deprecation_warning
|
| 8 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 9 |
+
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
|
| 10 |
+
from ray.rllib.utils.from_config import from_config
|
| 11 |
+
from ray.rllib.utils.metrics import ALL_MODULES, TD_ERROR_KEY
|
| 12 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
|
| 13 |
+
from ray.rllib.utils.replay_buffers import (
|
| 14 |
+
EpisodeReplayBuffer,
|
| 15 |
+
MultiAgentPrioritizedReplayBuffer,
|
| 16 |
+
PrioritizedEpisodeReplayBuffer,
|
| 17 |
+
ReplayBuffer,
|
| 18 |
+
MultiAgentReplayBuffer,
|
| 19 |
+
)
|
| 20 |
+
from ray.rllib.policy.sample_batch import concat_samples, MultiAgentBatch, SampleBatch
|
| 21 |
+
from ray.rllib.utils.typing import (
|
| 22 |
+
AlgorithmConfigDict,
|
| 23 |
+
ModuleID,
|
| 24 |
+
ResultDict,
|
| 25 |
+
SampleBatchType,
|
| 26 |
+
TensorType,
|
| 27 |
+
)
|
| 28 |
+
from ray.util import log_once
|
| 29 |
+
from ray.util.annotations import DeveloperAPI
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@DeveloperAPI
|
| 35 |
+
def update_priorities_in_episode_replay_buffer(
|
| 36 |
+
*,
|
| 37 |
+
replay_buffer: EpisodeReplayBuffer,
|
| 38 |
+
td_errors: Dict[ModuleID, TensorType],
|
| 39 |
+
) -> None:
|
| 40 |
+
# Only update priorities, if the buffer supports them.
|
| 41 |
+
if isinstance(replay_buffer, PrioritizedEpisodeReplayBuffer):
|
| 42 |
+
|
| 43 |
+
# The `ResultDict` will be multi-agent.
|
| 44 |
+
for module_id, td_error in td_errors.items():
|
| 45 |
+
# Skip the `"__all__"` keys.
|
| 46 |
+
if module_id in ["__all__", ALL_MODULES]:
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
# Warn once, if we have no TD-errors to update priorities.
|
| 50 |
+
if TD_ERROR_KEY not in td_error or td_error[TD_ERROR_KEY] is None:
|
| 51 |
+
if log_once(
|
| 52 |
+
"no_td_error_in_train_results_from_module_{}".format(module_id)
|
| 53 |
+
):
|
| 54 |
+
logger.warning(
|
| 55 |
+
"Trying to update priorities for module with ID "
|
| 56 |
+
f"`{module_id}` in prioritized episode replay buffer without "
|
| 57 |
+
"providing `td_errors` in train_results. Priority update for "
|
| 58 |
+
"this policy is being skipped."
|
| 59 |
+
)
|
| 60 |
+
continue
|
| 61 |
+
# TODO (simon): Implement multi-agent version. Remove, happens in buffer.
|
| 62 |
+
# assert len(td_error[TD_ERROR_KEY]) == len(
|
| 63 |
+
# replay_buffer._last_sampled_indices
|
| 64 |
+
# )
|
| 65 |
+
# TODO (simon): Implement for stateful modules.
|
| 66 |
+
|
| 67 |
+
replay_buffer.update_priorities(td_error[TD_ERROR_KEY], module_id)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@OldAPIStack
|
| 71 |
+
def update_priorities_in_replay_buffer(
|
| 72 |
+
replay_buffer: ReplayBuffer,
|
| 73 |
+
config: AlgorithmConfigDict,
|
| 74 |
+
train_batch: SampleBatchType,
|
| 75 |
+
train_results: ResultDict,
|
| 76 |
+
) -> None:
|
| 77 |
+
"""Updates the priorities in a prioritized replay buffer, given training results.
|
| 78 |
+
|
| 79 |
+
The `abs(TD-error)` from the loss (inside `train_results`) is used as new
|
| 80 |
+
priorities for the row-indices that were sampled for the train batch.
|
| 81 |
+
|
| 82 |
+
Don't do anything if the given buffer does not support prioritized replay.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
replay_buffer: The replay buffer, whose priority values to update. This may also
|
| 86 |
+
be a buffer that does not support priorities.
|
| 87 |
+
config: The Algorithm's config dict.
|
| 88 |
+
train_batch: The batch used for the training update.
|
| 89 |
+
train_results: A train results dict, generated by e.g. the `train_one_step()`
|
| 90 |
+
utility.
|
| 91 |
+
"""
|
| 92 |
+
# Only update priorities if buffer supports them.
|
| 93 |
+
if isinstance(replay_buffer, MultiAgentPrioritizedReplayBuffer):
|
| 94 |
+
# Go through training results for the different policies (maybe multi-agent).
|
| 95 |
+
prio_dict = {}
|
| 96 |
+
for policy_id, info in train_results.items():
|
| 97 |
+
# TODO(sven): This is currently structured differently for
|
| 98 |
+
# torch/tf. Clean up these results/info dicts across
|
| 99 |
+
# policies (note: fixing this in torch_policy.py will
|
| 100 |
+
# break e.g. DDPPO!).
|
| 101 |
+
td_error = info.get("td_error", info[LEARNER_STATS_KEY].get("td_error"))
|
| 102 |
+
|
| 103 |
+
policy_batch = train_batch.policy_batches[policy_id]
|
| 104 |
+
# Set the get_interceptor to None in order to be able to access the numpy
|
| 105 |
+
# arrays directly (instead of e.g. a torch array).
|
| 106 |
+
policy_batch.set_get_interceptor(None)
|
| 107 |
+
# Get the replay buffer row indices that make up the `train_batch`.
|
| 108 |
+
batch_indices = policy_batch.get("batch_indexes")
|
| 109 |
+
|
| 110 |
+
if SampleBatch.SEQ_LENS in policy_batch:
|
| 111 |
+
# Batch_indices are represented per column, in order to update
|
| 112 |
+
# priorities, we need one index per td_error
|
| 113 |
+
_batch_indices = []
|
| 114 |
+
|
| 115 |
+
# Sequenced batches have been zero padded to max_seq_len.
|
| 116 |
+
# Depending on how batches are split during learning, not all
|
| 117 |
+
# sequences have an associated td_error (trailing ones missing).
|
| 118 |
+
if policy_batch.zero_padded:
|
| 119 |
+
seq_lens = len(td_error) * [policy_batch.max_seq_len]
|
| 120 |
+
else:
|
| 121 |
+
seq_lens = policy_batch[SampleBatch.SEQ_LENS][: len(td_error)]
|
| 122 |
+
|
| 123 |
+
# Go through all indices by sequence that they represent and shrink
|
| 124 |
+
# them to one index per sequences
|
| 125 |
+
sequence_sum = 0
|
| 126 |
+
for seq_len in seq_lens:
|
| 127 |
+
_batch_indices.append(batch_indices[sequence_sum])
|
| 128 |
+
sequence_sum += seq_len
|
| 129 |
+
batch_indices = np.array(_batch_indices)
|
| 130 |
+
|
| 131 |
+
if td_error is None:
|
| 132 |
+
if log_once(
|
| 133 |
+
"no_td_error_in_train_results_from_policy_{}".format(policy_id)
|
| 134 |
+
):
|
| 135 |
+
logger.warning(
|
| 136 |
+
"Trying to update priorities for policy with id `{}` in "
|
| 137 |
+
"prioritized replay buffer without providing td_errors in "
|
| 138 |
+
"train_results. Priority update for this policy is being "
|
| 139 |
+
"skipped.".format(policy_id)
|
| 140 |
+
)
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
if batch_indices is None:
|
| 144 |
+
if log_once(
|
| 145 |
+
"no_batch_indices_in_train_result_for_policy_{}".format(policy_id)
|
| 146 |
+
):
|
| 147 |
+
logger.warning(
|
| 148 |
+
"Trying to update priorities for policy with id `{}` in "
|
| 149 |
+
"prioritized replay buffer without providing batch_indices in "
|
| 150 |
+
"train_batch. Priority update for this policy is being "
|
| 151 |
+
"skipped.".format(policy_id)
|
| 152 |
+
)
|
| 153 |
+
continue
|
| 154 |
+
|
| 155 |
+
# Try to transform batch_indices to td_error dimensions
|
| 156 |
+
if len(batch_indices) != len(td_error):
|
| 157 |
+
T = replay_buffer.replay_sequence_length
|
| 158 |
+
assert (
|
| 159 |
+
len(batch_indices) > len(td_error) and len(batch_indices) % T == 0
|
| 160 |
+
)
|
| 161 |
+
batch_indices = batch_indices.reshape([-1, T])[:, 0]
|
| 162 |
+
assert len(batch_indices) == len(td_error)
|
| 163 |
+
prio_dict[policy_id] = (batch_indices, td_error)
|
| 164 |
+
|
| 165 |
+
# Make the actual buffer API call to update the priority weights on all
|
| 166 |
+
# policies.
|
| 167 |
+
replay_buffer.update_priorities(prio_dict)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@DeveloperAPI
|
| 171 |
+
def sample_min_n_steps_from_buffer(
|
| 172 |
+
replay_buffer: ReplayBuffer, min_steps: int, count_by_agent_steps: bool
|
| 173 |
+
) -> Optional[SampleBatchType]:
|
| 174 |
+
"""Samples a minimum of n timesteps from a given replay buffer.
|
| 175 |
+
|
| 176 |
+
This utility method is primarily used by the QMIX algorithm and helps with
|
| 177 |
+
sampling a given number of time steps which has stored samples in units
|
| 178 |
+
of sequences or complete episodes. Samples n batches from replay buffer
|
| 179 |
+
until the total number of timesteps reaches `train_batch_size`.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
replay_buffer: The replay buffer to sample from
|
| 183 |
+
num_timesteps: The number of timesteps to sample
|
| 184 |
+
count_by_agent_steps: Whether to count agent steps or env steps
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
A concatenated SampleBatch or MultiAgentBatch with samples from the
|
| 188 |
+
buffer.
|
| 189 |
+
"""
|
| 190 |
+
train_batch_size = 0
|
| 191 |
+
train_batches = []
|
| 192 |
+
while train_batch_size < min_steps:
|
| 193 |
+
batch = replay_buffer.sample(num_items=1)
|
| 194 |
+
batch_len = batch.agent_steps() if count_by_agent_steps else batch.env_steps()
|
| 195 |
+
if batch_len == 0:
|
| 196 |
+
# Replay has not started, so we can't accumulate timesteps here
|
| 197 |
+
return batch
|
| 198 |
+
train_batches.append(batch)
|
| 199 |
+
train_batch_size += batch_len
|
| 200 |
+
# All batch types are the same type, hence we can use any concat_samples()
|
| 201 |
+
train_batch = concat_samples(train_batches)
|
| 202 |
+
return train_batch
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@DeveloperAPI
|
| 206 |
+
def validate_buffer_config(config: dict) -> None:
|
| 207 |
+
"""Checks and fixes values in the replay buffer config.
|
| 208 |
+
|
| 209 |
+
Checks the replay buffer config for common misconfigurations, warns or raises
|
| 210 |
+
error in case validation fails. The type "key" is changed into the inferred
|
| 211 |
+
replay buffer class.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
config: The replay buffer config to be validated.
|
| 215 |
+
|
| 216 |
+
Raises:
|
| 217 |
+
ValueError: When detecting severe misconfiguration.
|
| 218 |
+
"""
|
| 219 |
+
if config.get("replay_buffer_config", None) is None:
|
| 220 |
+
config["replay_buffer_config"] = {}
|
| 221 |
+
|
| 222 |
+
if config.get("worker_side_prioritization", DEPRECATED_VALUE) != DEPRECATED_VALUE:
|
| 223 |
+
deprecation_warning(
|
| 224 |
+
old="config['worker_side_prioritization']",
|
| 225 |
+
new="config['replay_buffer_config']['worker_side_prioritization']",
|
| 226 |
+
error=True,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
prioritized_replay = config.get("prioritized_replay", DEPRECATED_VALUE)
|
| 230 |
+
if prioritized_replay != DEPRECATED_VALUE:
|
| 231 |
+
deprecation_warning(
|
| 232 |
+
old="config['prioritized_replay'] or config['replay_buffer_config']["
|
| 233 |
+
"'prioritized_replay']",
|
| 234 |
+
help="Replay prioritization specified by config key. RLlib's new replay "
|
| 235 |
+
"buffer API requires setting `config["
|
| 236 |
+
"'replay_buffer_config']['type']`, e.g. `config["
|
| 237 |
+
"'replay_buffer_config']['type'] = "
|
| 238 |
+
"'MultiAgentPrioritizedReplayBuffer'` to change the default "
|
| 239 |
+
"behaviour.",
|
| 240 |
+
error=True,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
capacity = config.get("buffer_size", DEPRECATED_VALUE)
|
| 244 |
+
if capacity == DEPRECATED_VALUE:
|
| 245 |
+
capacity = config["replay_buffer_config"].get("buffer_size", DEPRECATED_VALUE)
|
| 246 |
+
if capacity != DEPRECATED_VALUE:
|
| 247 |
+
deprecation_warning(
|
| 248 |
+
old="config['buffer_size'] or config['replay_buffer_config']["
|
| 249 |
+
"'buffer_size']",
|
| 250 |
+
new="config['replay_buffer_config']['capacity']",
|
| 251 |
+
error=True,
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
replay_burn_in = config.get("burn_in", DEPRECATED_VALUE)
|
| 255 |
+
if replay_burn_in != DEPRECATED_VALUE:
|
| 256 |
+
config["replay_buffer_config"]["replay_burn_in"] = replay_burn_in
|
| 257 |
+
deprecation_warning(
|
| 258 |
+
old="config['burn_in']",
|
| 259 |
+
help="config['replay_buffer_config']['replay_burn_in']",
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
replay_batch_size = config.get("replay_batch_size", DEPRECATED_VALUE)
|
| 263 |
+
if replay_batch_size == DEPRECATED_VALUE:
|
| 264 |
+
replay_batch_size = config["replay_buffer_config"].get(
|
| 265 |
+
"replay_batch_size", DEPRECATED_VALUE
|
| 266 |
+
)
|
| 267 |
+
if replay_batch_size != DEPRECATED_VALUE:
|
| 268 |
+
deprecation_warning(
|
| 269 |
+
old="config['replay_batch_size'] or config['replay_buffer_config']["
|
| 270 |
+
"'replay_batch_size']",
|
| 271 |
+
help="Specification of replay_batch_size is not supported anymore but is "
|
| 272 |
+
"derived from `train_batch_size`. Specify the number of "
|
| 273 |
+
"items you want to replay upon calling the sample() method of replay "
|
| 274 |
+
"buffers if this does not work for you.",
|
| 275 |
+
error=True,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# Deprecation of old-style replay buffer args
|
| 279 |
+
# Warnings before checking of we need local buffer so that algorithms
|
| 280 |
+
# Without local buffer also get warned
|
| 281 |
+
keys_with_deprecated_positions = [
|
| 282 |
+
"prioritized_replay_alpha",
|
| 283 |
+
"prioritized_replay_beta",
|
| 284 |
+
"prioritized_replay_eps",
|
| 285 |
+
"no_local_replay_buffer",
|
| 286 |
+
"replay_zero_init_states",
|
| 287 |
+
"replay_buffer_shards_colocated_with_driver",
|
| 288 |
+
]
|
| 289 |
+
for k in keys_with_deprecated_positions:
|
| 290 |
+
if config.get(k, DEPRECATED_VALUE) != DEPRECATED_VALUE:
|
| 291 |
+
deprecation_warning(
|
| 292 |
+
old="config['{}']".format(k),
|
| 293 |
+
help="config['replay_buffer_config']['{}']" "".format(k),
|
| 294 |
+
error=False,
|
| 295 |
+
)
|
| 296 |
+
# Copy values over to new location in config to support new
|
| 297 |
+
# and old configuration style.
|
| 298 |
+
if config.get("replay_buffer_config") is not None:
|
| 299 |
+
config["replay_buffer_config"][k] = config[k]
|
| 300 |
+
|
| 301 |
+
learning_starts = config.get(
|
| 302 |
+
"learning_starts",
|
| 303 |
+
config.get("replay_buffer_config", {}).get("learning_starts", DEPRECATED_VALUE),
|
| 304 |
+
)
|
| 305 |
+
if learning_starts != DEPRECATED_VALUE:
|
| 306 |
+
deprecation_warning(
|
| 307 |
+
old="config['learning_starts'] or"
|
| 308 |
+
"config['replay_buffer_config']['learning_starts']",
|
| 309 |
+
help="config['num_steps_sampled_before_learning_starts']",
|
| 310 |
+
error=True,
|
| 311 |
+
)
|
| 312 |
+
config["num_steps_sampled_before_learning_starts"] = learning_starts
|
| 313 |
+
|
| 314 |
+
# Can't use DEPRECATED_VALUE here because this is also a deliberate
|
| 315 |
+
# value set for some algorithms
|
| 316 |
+
# TODO: (Artur): Compare to DEPRECATED_VALUE on deprecation
|
| 317 |
+
replay_sequence_length = config.get("replay_sequence_length", None)
|
| 318 |
+
if replay_sequence_length is not None:
|
| 319 |
+
config["replay_buffer_config"][
|
| 320 |
+
"replay_sequence_length"
|
| 321 |
+
] = replay_sequence_length
|
| 322 |
+
deprecation_warning(
|
| 323 |
+
old="config['replay_sequence_length']",
|
| 324 |
+
help="Replay sequence length specified at new "
|
| 325 |
+
"location config['replay_buffer_config']["
|
| 326 |
+
"'replay_sequence_length'] will be overwritten.",
|
| 327 |
+
error=True,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
replay_buffer_config = config["replay_buffer_config"]
|
| 331 |
+
assert (
|
| 332 |
+
"type" in replay_buffer_config
|
| 333 |
+
), "Can not instantiate ReplayBuffer from config without 'type' key."
|
| 334 |
+
|
| 335 |
+
# Check if old replay buffer should be instantiated
|
| 336 |
+
buffer_type = config["replay_buffer_config"]["type"]
|
| 337 |
+
|
| 338 |
+
if isinstance(buffer_type, str) and buffer_type.find(".") == -1:
|
| 339 |
+
# Create valid full [module].[class] string for from_config
|
| 340 |
+
config["replay_buffer_config"]["type"] = (
|
| 341 |
+
"ray.rllib.utils.replay_buffers." + buffer_type
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
# Instantiate a dummy buffer to fail early on misconfiguration and find out about
|
| 345 |
+
# inferred buffer class
|
| 346 |
+
dummy_buffer = from_config(buffer_type, config["replay_buffer_config"])
|
| 347 |
+
|
| 348 |
+
config["replay_buffer_config"]["type"] = type(dummy_buffer)
|
| 349 |
+
|
| 350 |
+
if hasattr(dummy_buffer, "update_priorities"):
|
| 351 |
+
if (
|
| 352 |
+
config["replay_buffer_config"].get("replay_mode", "independent")
|
| 353 |
+
== "lockstep"
|
| 354 |
+
):
|
| 355 |
+
raise ValueError(
|
| 356 |
+
"Prioritized replay is not supported when replay_mode=lockstep."
|
| 357 |
+
)
|
| 358 |
+
elif config["replay_buffer_config"].get("replay_sequence_length", 0) > 1:
|
| 359 |
+
raise ValueError(
|
| 360 |
+
"Prioritized replay is not supported when "
|
| 361 |
+
"replay_sequence_length > 1."
|
| 362 |
+
)
|
| 363 |
+
else:
|
| 364 |
+
if config["replay_buffer_config"].get("worker_side_prioritization"):
|
| 365 |
+
raise ValueError(
|
| 366 |
+
"Worker side prioritization is not supported when "
|
| 367 |
+
"prioritized_replay=False."
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@DeveloperAPI
|
| 372 |
+
def warn_replay_buffer_capacity(*, item: SampleBatchType, capacity: int) -> None:
|
| 373 |
+
"""Warn if the configured replay buffer capacity is too large for machine's memory.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
item: A (example) item that's supposed to be added to the buffer.
|
| 377 |
+
This is used to compute the overall memory footprint estimate for the
|
| 378 |
+
buffer.
|
| 379 |
+
capacity: The capacity value of the buffer. This is interpreted as the
|
| 380 |
+
number of items (such as given `item`) that will eventually be stored in
|
| 381 |
+
the buffer.
|
| 382 |
+
|
| 383 |
+
Raises:
|
| 384 |
+
ValueError: If computed memory footprint for the buffer exceeds the machine's
|
| 385 |
+
RAM.
|
| 386 |
+
"""
|
| 387 |
+
if log_once("warn_replay_buffer_capacity"):
|
| 388 |
+
item_size = item.size_bytes()
|
| 389 |
+
psutil_mem = psutil.virtual_memory()
|
| 390 |
+
total_gb = psutil_mem.total / 1e9
|
| 391 |
+
mem_size = capacity * item_size / 1e9
|
| 392 |
+
msg = (
|
| 393 |
+
"Estimated max memory usage for replay buffer is {} GB "
|
| 394 |
+
"({} batches of size {}, {} bytes each), "
|
| 395 |
+
"available system memory is {} GB".format(
|
| 396 |
+
mem_size, capacity, item.count, item_size, total_gb
|
| 397 |
+
)
|
| 398 |
+
)
|
| 399 |
+
if mem_size > total_gb:
|
| 400 |
+
raise ValueError(msg)
|
| 401 |
+
elif mem_size > 0.2 * total_gb:
|
| 402 |
+
logger.warning(msg)
|
| 403 |
+
else:
|
| 404 |
+
logger.info(msg)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def patch_buffer_with_fake_sampling_method(
|
| 408 |
+
buffer: ReplayBuffer, fake_sample_output: SampleBatchType
|
| 409 |
+
) -> None:
|
| 410 |
+
"""Patch a ReplayBuffer such that we always sample fake_sample_output.
|
| 411 |
+
|
| 412 |
+
Transforms fake_sample_output into a MultiAgentBatch if it is not a
|
| 413 |
+
MultiAgentBatch and the buffer is a MultiAgentBuffer. This is useful for testing
|
| 414 |
+
purposes if we need deterministic sampling.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
buffer: The buffer to be patched
|
| 418 |
+
fake_sample_output: The output to be sampled
|
| 419 |
+
|
| 420 |
+
"""
|
| 421 |
+
if isinstance(buffer, MultiAgentReplayBuffer) and not isinstance(
|
| 422 |
+
fake_sample_output, MultiAgentBatch
|
| 423 |
+
):
|
| 424 |
+
fake_sample_output = SampleBatch(fake_sample_output).as_multi_agent()
|
| 425 |
+
|
| 426 |
+
def fake_sample(_: Any = None, **kwargs) -> Optional[SampleBatchType]:
|
| 427 |
+
"""Always returns a predefined batch.
|
| 428 |
+
|
| 429 |
+
Args:
|
| 430 |
+
_: dummy arg to match signature of sample() method
|
| 431 |
+
__: dummy arg to match signature of sample() method
|
| 432 |
+
``**kwargs``: dummy args to match signature of sample() method
|
| 433 |
+
|
| 434 |
+
Returns:
|
| 435 |
+
Predefined MultiAgentBatch fake_sample_output
|
| 436 |
+
"""
|
| 437 |
+
|
| 438 |
+
return fake_sample_output
|
| 439 |
+
|
| 440 |
+
buffer.sample = fake_sample
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/sgd.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utils for minibatch SGD across multiple RLlib policies."""
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import numpy as np
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 8 |
+
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
|
| 9 |
+
from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@OldAPIStack
|
| 15 |
+
def standardized(array: np.ndarray):
|
| 16 |
+
"""Normalize the values in an array.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
array (np.ndarray): Array of values to normalize.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
array with zero mean and unit standard deviation.
|
| 23 |
+
"""
|
| 24 |
+
return (array - array.mean()) / max(1e-4, array.std())
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@OldAPIStack
|
| 28 |
+
def minibatches(samples: SampleBatch, sgd_minibatch_size: int, shuffle: bool = True):
|
| 29 |
+
"""Return a generator yielding minibatches from a sample batch.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
samples: SampleBatch to split up.
|
| 33 |
+
sgd_minibatch_size: Size of minibatches to return.
|
| 34 |
+
shuffle: Whether to shuffle the order of the generated minibatches.
|
| 35 |
+
Note that in case of a non-recurrent policy, the incoming batch
|
| 36 |
+
is globally shuffled first regardless of this setting, before
|
| 37 |
+
the minibatches are generated from it!
|
| 38 |
+
|
| 39 |
+
Yields:
|
| 40 |
+
SampleBatch: Each of size `sgd_minibatch_size`.
|
| 41 |
+
"""
|
| 42 |
+
if not sgd_minibatch_size:
|
| 43 |
+
yield samples
|
| 44 |
+
return
|
| 45 |
+
|
| 46 |
+
if isinstance(samples, MultiAgentBatch):
|
| 47 |
+
raise NotImplementedError(
|
| 48 |
+
"Minibatching not implemented for multi-agent in simple mode"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
if "state_in_0" not in samples and "state_out_0" not in samples:
|
| 52 |
+
samples.shuffle()
|
| 53 |
+
|
| 54 |
+
all_slices = samples._get_slice_indices(sgd_minibatch_size)
|
| 55 |
+
data_slices, state_slices = all_slices
|
| 56 |
+
|
| 57 |
+
if len(state_slices) == 0:
|
| 58 |
+
if shuffle:
|
| 59 |
+
random.shuffle(data_slices)
|
| 60 |
+
for i, j in data_slices:
|
| 61 |
+
yield samples[i:j]
|
| 62 |
+
else:
|
| 63 |
+
all_slices = list(zip(data_slices, state_slices))
|
| 64 |
+
if shuffle:
|
| 65 |
+
# Make sure to shuffle data and states while linked together.
|
| 66 |
+
random.shuffle(all_slices)
|
| 67 |
+
for (i, j), (si, sj) in all_slices:
|
| 68 |
+
yield samples.slice(i, j, si, sj)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@OldAPIStack
|
| 72 |
+
def do_minibatch_sgd(
|
| 73 |
+
samples,
|
| 74 |
+
policies,
|
| 75 |
+
local_worker,
|
| 76 |
+
num_sgd_iter,
|
| 77 |
+
sgd_minibatch_size,
|
| 78 |
+
standardize_fields,
|
| 79 |
+
):
|
| 80 |
+
"""Execute minibatch SGD.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
samples: Batch of samples to optimize.
|
| 84 |
+
policies: Dictionary of policies to optimize.
|
| 85 |
+
local_worker: Master rollout worker instance.
|
| 86 |
+
num_sgd_iter: Number of epochs of optimization to take.
|
| 87 |
+
sgd_minibatch_size: Size of minibatches to use for optimization.
|
| 88 |
+
standardize_fields: List of sample field names that should be
|
| 89 |
+
normalized prior to optimization.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
averaged info fetches over the last SGD epoch taken.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
# Handle everything as if multi-agent.
|
| 96 |
+
samples = samples.as_multi_agent()
|
| 97 |
+
|
| 98 |
+
# Use LearnerInfoBuilder as a unified way to build the final
|
| 99 |
+
# results dict from `learn_on_loaded_batch` call(s).
|
| 100 |
+
# This makes sure results dicts always have the same structure
|
| 101 |
+
# no matter the setup (multi-GPU, multi-agent, minibatch SGD,
|
| 102 |
+
# tf vs torch).
|
| 103 |
+
learner_info_builder = LearnerInfoBuilder(num_devices=1)
|
| 104 |
+
for policy_id, policy in policies.items():
|
| 105 |
+
if policy_id not in samples.policy_batches:
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
batch = samples.policy_batches[policy_id]
|
| 109 |
+
for field in standardize_fields:
|
| 110 |
+
batch[field] = standardized(batch[field])
|
| 111 |
+
|
| 112 |
+
# Check to make sure that the sgd_minibatch_size is not smaller
|
| 113 |
+
# than max_seq_len otherwise this will cause indexing errors while
|
| 114 |
+
# performing sgd when using a RNN or Attention model
|
| 115 |
+
if (
|
| 116 |
+
policy.is_recurrent()
|
| 117 |
+
and policy.config["model"]["max_seq_len"] > sgd_minibatch_size
|
| 118 |
+
):
|
| 119 |
+
raise ValueError(
|
| 120 |
+
"`sgd_minibatch_size` ({}) cannot be smaller than"
|
| 121 |
+
"`max_seq_len` ({}).".format(
|
| 122 |
+
sgd_minibatch_size, policy.config["model"]["max_seq_len"]
|
| 123 |
+
)
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
for i in range(num_sgd_iter):
|
| 127 |
+
for minibatch in minibatches(batch, sgd_minibatch_size):
|
| 128 |
+
results = (
|
| 129 |
+
local_worker.learn_on_batch(
|
| 130 |
+
MultiAgentBatch({policy_id: minibatch}, minibatch.count)
|
| 131 |
+
)
|
| 132 |
+
)[policy_id]
|
| 133 |
+
learner_info_builder.add_learn_on_batch_results(results, policy_id)
|
| 134 |
+
|
| 135 |
+
learner_info = learner_info_builder.finalize()
|
| 136 |
+
return learner_info
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/tensor_dtype.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.typing import TensorType
|
| 4 |
+
from ray.rllib.utils.framework import try_import_torch, try_import_tf
|
| 5 |
+
from ray.util.annotations import PublicAPI
|
| 6 |
+
|
| 7 |
+
torch, _ = try_import_torch()
|
| 8 |
+
_, tf, _ = try_import_tf()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Dict of NumPy dtype -> torch dtype
|
| 12 |
+
if torch:
|
| 13 |
+
numpy_to_torch_dtype_dict = {
|
| 14 |
+
np.bool_: torch.bool,
|
| 15 |
+
np.uint8: torch.uint8,
|
| 16 |
+
np.int8: torch.int8,
|
| 17 |
+
np.int16: torch.int16,
|
| 18 |
+
np.int32: torch.int32,
|
| 19 |
+
np.int64: torch.int64,
|
| 20 |
+
np.float16: torch.float16,
|
| 21 |
+
np.float32: torch.float32,
|
| 22 |
+
np.float64: torch.float64,
|
| 23 |
+
np.complex64: torch.complex64,
|
| 24 |
+
np.complex128: torch.complex128,
|
| 25 |
+
}
|
| 26 |
+
else:
|
| 27 |
+
numpy_to_torch_dtype_dict = {}
|
| 28 |
+
|
| 29 |
+
# Dict of NumPy dtype -> tf dtype
|
| 30 |
+
if tf:
|
| 31 |
+
numpy_to_tf_dtype_dict = {
|
| 32 |
+
np.bool_: tf.bool,
|
| 33 |
+
np.uint8: tf.uint8,
|
| 34 |
+
np.int8: tf.int8,
|
| 35 |
+
np.int16: tf.int16,
|
| 36 |
+
np.int32: tf.int32,
|
| 37 |
+
np.int64: tf.int64,
|
| 38 |
+
np.float16: tf.float16,
|
| 39 |
+
np.float32: tf.float32,
|
| 40 |
+
np.float64: tf.float64,
|
| 41 |
+
np.complex64: tf.complex64,
|
| 42 |
+
np.complex128: tf.complex128,
|
| 43 |
+
}
|
| 44 |
+
else:
|
| 45 |
+
numpy_to_tf_dtype_dict = {}
|
| 46 |
+
|
| 47 |
+
# Dict of torch dtype -> NumPy dtype
|
| 48 |
+
torch_to_numpy_dtype_dict = {
|
| 49 |
+
value: key for (key, value) in numpy_to_torch_dtype_dict.items()
|
| 50 |
+
}
|
| 51 |
+
# Dict of tf dtype -> NumPy dtype
|
| 52 |
+
tf_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_tf_dtype_dict.items()}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@PublicAPI(stability="alpha")
|
| 56 |
+
def get_np_dtype(x: TensorType) -> np.dtype:
|
| 57 |
+
"""Returns the NumPy dtype of the given tensor or array."""
|
| 58 |
+
if torch and isinstance(x, torch.Tensor):
|
| 59 |
+
return torch_to_numpy_dtype_dict[x.dtype]
|
| 60 |
+
if tf and isinstance(x, tf.Tensor):
|
| 61 |
+
return tf_to_numpy_dtype_dict[x.dtype]
|
| 62 |
+
elif isinstance(x, np.ndarray):
|
| 63 |
+
return x.dtype
|
| 64 |
+
else:
|
| 65 |
+
raise TypeError("Unsupported type: {}".format(type(x)))
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/test_utils.py
ADDED
|
@@ -0,0 +1,1817 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import pprint
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
import time
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
Dict,
|
| 13 |
+
List,
|
| 14 |
+
Optional,
|
| 15 |
+
Tuple,
|
| 16 |
+
Type,
|
| 17 |
+
Union,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import gymnasium as gym
|
| 21 |
+
from gymnasium.spaces import Box, Discrete, MultiDiscrete, MultiBinary
|
| 22 |
+
from gymnasium.spaces import Dict as GymDict
|
| 23 |
+
from gymnasium.spaces import Tuple as GymTuple
|
| 24 |
+
import numpy as np
|
| 25 |
+
import tree # pip install dm_tree
|
| 26 |
+
|
| 27 |
+
import ray
|
| 28 |
+
from ray import air, tune
|
| 29 |
+
from ray.air.constants import TRAINING_ITERATION
|
| 30 |
+
from ray.air.integrations.wandb import WandbLoggerCallback, WANDB_ENV_VAR
|
| 31 |
+
from ray.rllib.core import DEFAULT_MODULE_ID, Columns
|
| 32 |
+
from ray.rllib.env.wrappers.atari_wrappers import is_atari, wrap_deepmind
|
| 33 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 34 |
+
from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch
|
| 35 |
+
from ray.rllib.utils.metrics import (
|
| 36 |
+
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY,
|
| 37 |
+
ENV_RUNNER_RESULTS,
|
| 38 |
+
EPISODE_RETURN_MEAN,
|
| 39 |
+
EVALUATION_RESULTS,
|
| 40 |
+
NUM_ENV_STEPS_TRAINED,
|
| 41 |
+
NUM_ENV_STEPS_SAMPLED_LIFETIME,
|
| 42 |
+
)
|
| 43 |
+
from ray.rllib.utils.typing import ResultDict
|
| 44 |
+
from ray.rllib.utils.error import UnsupportedSpaceException
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
from ray.tune import CLIReporter
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if TYPE_CHECKING:
|
| 51 |
+
from ray.rllib.algorithms import Algorithm, AlgorithmConfig
|
| 52 |
+
from ray.rllib.offline.dataset_reader import DatasetReader
|
| 53 |
+
|
| 54 |
+
jax, _ = try_import_jax()
|
| 55 |
+
tf1, tf, tfv = try_import_tf()
|
| 56 |
+
torch, _ = try_import_torch()
|
| 57 |
+
|
| 58 |
+
logger = logging.getLogger(__name__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def add_rllib_example_script_args(
|
| 62 |
+
parser: Optional[argparse.ArgumentParser] = None,
|
| 63 |
+
default_reward: float = 100.0,
|
| 64 |
+
default_iters: int = 200,
|
| 65 |
+
default_timesteps: int = 100000,
|
| 66 |
+
) -> argparse.ArgumentParser:
|
| 67 |
+
"""Adds RLlib-typical (and common) examples scripts command line args to a parser.
|
| 68 |
+
|
| 69 |
+
TODO (sven): This function should be used by most of our examples scripts, which
|
| 70 |
+
already mostly have this logic in them (but written out).
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
parser: The parser to add the arguments to. If None, create a new one.
|
| 74 |
+
default_reward: The default value for the --stop-reward option.
|
| 75 |
+
default_iters: The default value for the --stop-iters option.
|
| 76 |
+
default_timesteps: The default value for the --stop-timesteps option.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
The altered (or newly created) parser object.
|
| 80 |
+
"""
|
| 81 |
+
if parser is None:
|
| 82 |
+
parser = argparse.ArgumentParser()
|
| 83 |
+
|
| 84 |
+
# Algo and Algo config options.
|
| 85 |
+
parser.add_argument(
|
| 86 |
+
"--algo", type=str, default="PPO", help="The RLlib-registered algorithm to use."
|
| 87 |
+
)
|
| 88 |
+
parser.add_argument(
|
| 89 |
+
"--enable-new-api-stack",
|
| 90 |
+
action="store_true",
|
| 91 |
+
help="Whether to use the `enable_rl_module_and_learner` config setting.",
|
| 92 |
+
)
|
| 93 |
+
parser.add_argument(
|
| 94 |
+
"--framework",
|
| 95 |
+
choices=["tf", "tf2", "torch"],
|
| 96 |
+
default="torch",
|
| 97 |
+
help="The DL framework specifier.",
|
| 98 |
+
)
|
| 99 |
+
parser.add_argument(
|
| 100 |
+
"--env",
|
| 101 |
+
type=str,
|
| 102 |
+
default=None,
|
| 103 |
+
help="The gym.Env identifier to run the experiment with.",
|
| 104 |
+
)
|
| 105 |
+
parser.add_argument(
|
| 106 |
+
"--num-env-runners",
|
| 107 |
+
type=int,
|
| 108 |
+
default=None,
|
| 109 |
+
help="The number of (remote) EnvRunners to use for the experiment.",
|
| 110 |
+
)
|
| 111 |
+
parser.add_argument(
|
| 112 |
+
"--num-envs-per-env-runner",
|
| 113 |
+
type=int,
|
| 114 |
+
default=None,
|
| 115 |
+
help="The number of (vectorized) environments per EnvRunner. Note that "
|
| 116 |
+
"this is identical to the batch size for (inference) action computations.",
|
| 117 |
+
)
|
| 118 |
+
parser.add_argument(
|
| 119 |
+
"--num-agents",
|
| 120 |
+
type=int,
|
| 121 |
+
default=0,
|
| 122 |
+
help="If 0 (default), will run as single-agent. If > 0, will run as "
|
| 123 |
+
"multi-agent with the environment simply cloned n times and each agent acting "
|
| 124 |
+
"independently at every single timestep. The overall reward for this "
|
| 125 |
+
"experiment is then the sum over all individual agents' rewards.",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Evaluation options.
|
| 129 |
+
parser.add_argument(
|
| 130 |
+
"--evaluation-num-env-runners",
|
| 131 |
+
type=int,
|
| 132 |
+
default=0,
|
| 133 |
+
help="The number of evaluation (remote) EnvRunners to use for the experiment.",
|
| 134 |
+
)
|
| 135 |
+
parser.add_argument(
|
| 136 |
+
"--evaluation-interval",
|
| 137 |
+
type=int,
|
| 138 |
+
default=0,
|
| 139 |
+
help="Every how many iterations to run one round of evaluation. "
|
| 140 |
+
"Use 0 (default) to disable evaluation.",
|
| 141 |
+
)
|
| 142 |
+
parser.add_argument(
|
| 143 |
+
"--evaluation-duration",
|
| 144 |
+
type=lambda v: v if v == "auto" else int(v),
|
| 145 |
+
default=10,
|
| 146 |
+
help="The number of evaluation units to run each evaluation round. "
|
| 147 |
+
"Use `--evaluation-duration-unit` to count either in 'episodes' "
|
| 148 |
+
"or 'timesteps'. If 'auto', will run as many as possible during train pass ("
|
| 149 |
+
"`--evaluation-parallel-to-training` must be set then).",
|
| 150 |
+
)
|
| 151 |
+
parser.add_argument(
|
| 152 |
+
"--evaluation-duration-unit",
|
| 153 |
+
type=str,
|
| 154 |
+
default="episodes",
|
| 155 |
+
choices=["episodes", "timesteps"],
|
| 156 |
+
help="The evaluation duration unit to count by. One of 'episodes' or "
|
| 157 |
+
"'timesteps'. This unit will be run `--evaluation-duration` times in each "
|
| 158 |
+
"evaluation round. If `--evaluation-duration=auto`, this setting does not "
|
| 159 |
+
"matter.",
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--evaluation-parallel-to-training",
|
| 163 |
+
action="store_true",
|
| 164 |
+
help="Whether to run evaluation parallel to training. This might help speed up "
|
| 165 |
+
"your overall iteration time. Be aware that when using this option, your "
|
| 166 |
+
"reported evaluation results are referring to one iteration before the current "
|
| 167 |
+
"one.",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# RLlib logging options.
|
| 171 |
+
parser.add_argument(
|
| 172 |
+
"--output",
|
| 173 |
+
type=str,
|
| 174 |
+
default=None,
|
| 175 |
+
help="The output directory to write trajectories to, which are collected by "
|
| 176 |
+
"the algo's EnvRunners.",
|
| 177 |
+
)
|
| 178 |
+
parser.add_argument(
|
| 179 |
+
"--log-level",
|
| 180 |
+
type=str,
|
| 181 |
+
default=None, # None -> use default
|
| 182 |
+
choices=["INFO", "DEBUG", "WARN", "ERROR"],
|
| 183 |
+
help="The log-level to be used by the RLlib logger.",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# tune.Tuner options.
|
| 187 |
+
parser.add_argument(
|
| 188 |
+
"--no-tune",
|
| 189 |
+
action="store_true",
|
| 190 |
+
help="Whether to NOT use tune.Tuner(), but rather a simple for-loop calling "
|
| 191 |
+
"`algo.train()` repeatedly until one of the stop criteria is met.",
|
| 192 |
+
)
|
| 193 |
+
parser.add_argument(
|
| 194 |
+
"--num-samples",
|
| 195 |
+
type=int,
|
| 196 |
+
default=1,
|
| 197 |
+
help="How many (tune.Tuner.fit()) experiments to execute - if possible in "
|
| 198 |
+
"parallel.",
|
| 199 |
+
)
|
| 200 |
+
parser.add_argument(
|
| 201 |
+
"--max-concurrent-trials",
|
| 202 |
+
type=int,
|
| 203 |
+
default=None,
|
| 204 |
+
help="How many (tune.Tuner) trials to run concurrently.",
|
| 205 |
+
)
|
| 206 |
+
parser.add_argument(
|
| 207 |
+
"--verbose",
|
| 208 |
+
type=int,
|
| 209 |
+
default=2,
|
| 210 |
+
help="The verbosity level for the `tune.Tuner()` running the experiment.",
|
| 211 |
+
)
|
| 212 |
+
parser.add_argument(
|
| 213 |
+
"--checkpoint-freq",
|
| 214 |
+
type=int,
|
| 215 |
+
default=0,
|
| 216 |
+
help=(
|
| 217 |
+
"The frequency (in training iterations) with which to create checkpoints. "
|
| 218 |
+
"Note that if --wandb-key is provided, all checkpoints will "
|
| 219 |
+
"automatically be uploaded to WandB."
|
| 220 |
+
),
|
| 221 |
+
)
|
| 222 |
+
parser.add_argument(
|
| 223 |
+
"--checkpoint-at-end",
|
| 224 |
+
action="store_true",
|
| 225 |
+
help=(
|
| 226 |
+
"Whether to create a checkpoint at the very end of the experiment. "
|
| 227 |
+
"Note that if --wandb-key is provided, all checkpoints will "
|
| 228 |
+
"automatically be uploaded to WandB."
|
| 229 |
+
),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# WandB logging options.
|
| 233 |
+
parser.add_argument(
|
| 234 |
+
"--wandb-key",
|
| 235 |
+
type=str,
|
| 236 |
+
default=None,
|
| 237 |
+
help="The WandB API key to use for uploading results.",
|
| 238 |
+
)
|
| 239 |
+
parser.add_argument(
|
| 240 |
+
"--wandb-project",
|
| 241 |
+
type=str,
|
| 242 |
+
default=None,
|
| 243 |
+
help="The WandB project name to use.",
|
| 244 |
+
)
|
| 245 |
+
parser.add_argument(
|
| 246 |
+
"--wandb-run-name",
|
| 247 |
+
type=str,
|
| 248 |
+
default=None,
|
| 249 |
+
help="The WandB run name to use.",
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Experiment stopping and testing criteria.
|
| 253 |
+
parser.add_argument(
|
| 254 |
+
"--stop-reward",
|
| 255 |
+
type=float,
|
| 256 |
+
default=default_reward,
|
| 257 |
+
help="Reward at which the script should stop training.",
|
| 258 |
+
)
|
| 259 |
+
parser.add_argument(
|
| 260 |
+
"--stop-iters",
|
| 261 |
+
type=int,
|
| 262 |
+
default=default_iters,
|
| 263 |
+
help="The number of iterations to train.",
|
| 264 |
+
)
|
| 265 |
+
parser.add_argument(
|
| 266 |
+
"--stop-timesteps",
|
| 267 |
+
type=int,
|
| 268 |
+
default=default_timesteps,
|
| 269 |
+
help="The number of (environment sampling) timesteps to train.",
|
| 270 |
+
)
|
| 271 |
+
parser.add_argument(
|
| 272 |
+
"--as-test",
|
| 273 |
+
action="store_true",
|
| 274 |
+
help="Whether this script should be run as a test. If set, --stop-reward must "
|
| 275 |
+
"be achieved within --stop-timesteps AND --stop-iters, otherwise this "
|
| 276 |
+
"script will throw an exception at the end.",
|
| 277 |
+
)
|
| 278 |
+
parser.add_argument(
|
| 279 |
+
"--as-release-test",
|
| 280 |
+
action="store_true",
|
| 281 |
+
help="Whether this script should be run as a release test. If set, "
|
| 282 |
+
"all that applies to the --as-test option is true, plus, a short JSON summary "
|
| 283 |
+
"will be written into a results file whose location is given by the ENV "
|
| 284 |
+
"variable `TEST_OUTPUT_JSON`.",
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
# Learner scaling options.
|
| 288 |
+
parser.add_argument(
|
| 289 |
+
"--num-learners",
|
| 290 |
+
type=int,
|
| 291 |
+
default=None,
|
| 292 |
+
help="The number of Learners to use. If none, use the algorithm's default "
|
| 293 |
+
"value.",
|
| 294 |
+
)
|
| 295 |
+
parser.add_argument(
|
| 296 |
+
"--num-gpus-per-learner",
|
| 297 |
+
type=float,
|
| 298 |
+
default=None,
|
| 299 |
+
help="The number of GPUs per Learner to use. If none and there are enough GPUs "
|
| 300 |
+
"for all required Learners (--num-learners), use a value of 1, otherwise 0.",
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
# Ray init options.
|
| 304 |
+
parser.add_argument("--num-cpus", type=int, default=0)
|
| 305 |
+
parser.add_argument(
|
| 306 |
+
"--local-mode",
|
| 307 |
+
action="store_true",
|
| 308 |
+
help="Init Ray in local mode for easier debugging.",
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Old API stack: config.num_gpus.
|
| 312 |
+
parser.add_argument(
|
| 313 |
+
"--num-gpus",
|
| 314 |
+
type=int,
|
| 315 |
+
default=0,
|
| 316 |
+
help="The number of GPUs to use (if on the old API stack).",
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
return parser
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
|
| 323 |
+
"""
|
| 324 |
+
Checks two structures (dict, tuple, list,
|
| 325 |
+
np.array, float, int, etc..) for (almost) numeric identity.
|
| 326 |
+
All numbers in the two structures have to match up to `decimal` digits
|
| 327 |
+
after the floating point. Uses assertions.
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
x: The value to be compared (to the expectation: `y`). This
|
| 331 |
+
may be a Tensor.
|
| 332 |
+
y: The expected value to be compared to `x`. This must not
|
| 333 |
+
be a tf-Tensor, but may be a tf/torch-Tensor.
|
| 334 |
+
decimals: The number of digits after the floating point up to
|
| 335 |
+
which all numeric values have to match.
|
| 336 |
+
atol: Absolute tolerance of the difference between x and y
|
| 337 |
+
(overrides `decimals` if given).
|
| 338 |
+
rtol: Relative tolerance of the difference between x and y
|
| 339 |
+
(overrides `decimals` if given).
|
| 340 |
+
false: Whether to check that x and y are NOT the same.
|
| 341 |
+
"""
|
| 342 |
+
# A dict type.
|
| 343 |
+
if isinstance(x, dict):
|
| 344 |
+
assert isinstance(y, dict), "ERROR: If x is dict, y needs to be a dict as well!"
|
| 345 |
+
y_keys = set(x.keys())
|
| 346 |
+
for key, value in x.items():
|
| 347 |
+
assert key in y, f"ERROR: y does not have x's key='{key}'! y={y}"
|
| 348 |
+
check(value, y[key], decimals=decimals, atol=atol, rtol=rtol, false=false)
|
| 349 |
+
y_keys.remove(key)
|
| 350 |
+
assert not y_keys, "ERROR: y contains keys ({}) that are not in x! y={}".format(
|
| 351 |
+
list(y_keys), y
|
| 352 |
+
)
|
| 353 |
+
# A tuple type.
|
| 354 |
+
elif isinstance(x, (tuple, list)):
|
| 355 |
+
assert isinstance(
|
| 356 |
+
y, (tuple, list)
|
| 357 |
+
), "ERROR: If x is tuple/list, y needs to be a tuple/list as well!"
|
| 358 |
+
assert len(y) == len(
|
| 359 |
+
x
|
| 360 |
+
), "ERROR: y does not have the same length as x ({} vs {})!".format(
|
| 361 |
+
len(y), len(x)
|
| 362 |
+
)
|
| 363 |
+
for i, value in enumerate(x):
|
| 364 |
+
check(value, y[i], decimals=decimals, atol=atol, rtol=rtol, false=false)
|
| 365 |
+
# Boolean comparison.
|
| 366 |
+
elif isinstance(x, (np.bool_, bool)):
|
| 367 |
+
if false is True:
|
| 368 |
+
assert bool(x) is not bool(y), f"ERROR: x ({x}) is y ({y})!"
|
| 369 |
+
else:
|
| 370 |
+
assert bool(x) is bool(y), f"ERROR: x ({x}) is not y ({y})!"
|
| 371 |
+
# Nones or primitives (excluding int vs float, which should be compared with
|
| 372 |
+
# tolerance/decimals as well).
|
| 373 |
+
elif (
|
| 374 |
+
x is None
|
| 375 |
+
or y is None
|
| 376 |
+
or isinstance(x, str)
|
| 377 |
+
or (isinstance(x, int) and isinstance(y, int))
|
| 378 |
+
):
|
| 379 |
+
if false is True:
|
| 380 |
+
assert x != y, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 381 |
+
else:
|
| 382 |
+
assert x == y, f"ERROR: x ({x}) is not the same as y ({y})!"
|
| 383 |
+
# String/byte comparisons.
|
| 384 |
+
elif (
|
| 385 |
+
hasattr(x, "dtype") and (x.dtype == object or str(x.dtype).startswith("<U"))
|
| 386 |
+
) or isinstance(x, bytes):
|
| 387 |
+
try:
|
| 388 |
+
np.testing.assert_array_equal(x, y)
|
| 389 |
+
if false is True:
|
| 390 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 391 |
+
except AssertionError as e:
|
| 392 |
+
if false is False:
|
| 393 |
+
raise e
|
| 394 |
+
# Everything else (assume numeric or tf/torch.Tensor).
|
| 395 |
+
# Also includes int vs float comparison, which is performed with tolerance/decimals.
|
| 396 |
+
else:
|
| 397 |
+
if tf1 is not None:
|
| 398 |
+
# y should never be a Tensor (y=expected value).
|
| 399 |
+
if isinstance(y, (tf1.Tensor, tf1.Variable)):
|
| 400 |
+
# In eager mode, numpyize tensors.
|
| 401 |
+
if tf.executing_eagerly():
|
| 402 |
+
y = y.numpy()
|
| 403 |
+
else:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
"`y` (expected value) must not be a Tensor. "
|
| 406 |
+
"Use numpy.ndarray instead"
|
| 407 |
+
)
|
| 408 |
+
if isinstance(x, (tf1.Tensor, tf1.Variable)):
|
| 409 |
+
# In eager mode, numpyize tensors.
|
| 410 |
+
if tf1.executing_eagerly():
|
| 411 |
+
x = x.numpy()
|
| 412 |
+
# Otherwise, use a new tf-session.
|
| 413 |
+
else:
|
| 414 |
+
with tf1.Session() as sess:
|
| 415 |
+
x = sess.run(x)
|
| 416 |
+
return check(
|
| 417 |
+
x, y, decimals=decimals, atol=atol, rtol=rtol, false=false
|
| 418 |
+
)
|
| 419 |
+
if torch is not None:
|
| 420 |
+
if isinstance(x, torch.Tensor):
|
| 421 |
+
x = x.detach().cpu().numpy()
|
| 422 |
+
if isinstance(y, torch.Tensor):
|
| 423 |
+
y = y.detach().cpu().numpy()
|
| 424 |
+
|
| 425 |
+
# Stats objects.
|
| 426 |
+
from ray.rllib.utils.metrics.stats import Stats
|
| 427 |
+
|
| 428 |
+
if isinstance(x, Stats):
|
| 429 |
+
x = x.peek()
|
| 430 |
+
if isinstance(y, Stats):
|
| 431 |
+
y = y.peek()
|
| 432 |
+
|
| 433 |
+
# Using decimals.
|
| 434 |
+
if atol is None and rtol is None:
|
| 435 |
+
# Assert equality of both values.
|
| 436 |
+
try:
|
| 437 |
+
np.testing.assert_almost_equal(x, y, decimal=decimals)
|
| 438 |
+
# Both values are not equal.
|
| 439 |
+
except AssertionError as e:
|
| 440 |
+
# Raise error in normal case.
|
| 441 |
+
if false is False:
|
| 442 |
+
raise e
|
| 443 |
+
# Both values are equal.
|
| 444 |
+
else:
|
| 445 |
+
# If false is set -> raise error (not expected to be equal).
|
| 446 |
+
if false is True:
|
| 447 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 448 |
+
|
| 449 |
+
# Using atol/rtol.
|
| 450 |
+
else:
|
| 451 |
+
# Provide defaults for either one of atol/rtol.
|
| 452 |
+
if atol is None:
|
| 453 |
+
atol = 0
|
| 454 |
+
if rtol is None:
|
| 455 |
+
rtol = 1e-7
|
| 456 |
+
try:
|
| 457 |
+
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
|
| 458 |
+
except AssertionError as e:
|
| 459 |
+
if false is False:
|
| 460 |
+
raise e
|
| 461 |
+
else:
|
| 462 |
+
if false is True:
|
| 463 |
+
assert False, f"ERROR: x ({x}) is the same as y ({y})!"
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def check_compute_single_action(
|
| 467 |
+
algorithm, include_state=False, include_prev_action_reward=False
|
| 468 |
+
):
|
| 469 |
+
"""Tests different combinations of args for algorithm.compute_single_action.
|
| 470 |
+
|
| 471 |
+
Args:
|
| 472 |
+
algorithm: The Algorithm object to test.
|
| 473 |
+
include_state: Whether to include the initial state of the Policy's
|
| 474 |
+
Model in the `compute_single_action` call.
|
| 475 |
+
include_prev_action_reward: Whether to include the prev-action and
|
| 476 |
+
-reward in the `compute_single_action` call.
|
| 477 |
+
|
| 478 |
+
Raises:
|
| 479 |
+
ValueError: If anything unexpected happens.
|
| 480 |
+
"""
|
| 481 |
+
# Have to import this here to avoid circular dependency.
|
| 482 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
|
| 483 |
+
|
| 484 |
+
# Some Algorithms may not abide to the standard API.
|
| 485 |
+
pid = DEFAULT_POLICY_ID
|
| 486 |
+
try:
|
| 487 |
+
# Multi-agent: Pick any learnable policy (or DEFAULT_POLICY if it's the only
|
| 488 |
+
# one).
|
| 489 |
+
pid = next(iter(algorithm.env_runner.get_policies_to_train()))
|
| 490 |
+
pol = algorithm.get_policy(pid)
|
| 491 |
+
except AttributeError:
|
| 492 |
+
pol = algorithm.policy
|
| 493 |
+
# Get the policy's model.
|
| 494 |
+
model = pol.model
|
| 495 |
+
|
| 496 |
+
action_space = pol.action_space
|
| 497 |
+
|
| 498 |
+
def _test(
|
| 499 |
+
what, method_to_test, obs_space, full_fetch, explore, timestep, unsquash, clip
|
| 500 |
+
):
|
| 501 |
+
call_kwargs = {}
|
| 502 |
+
if what is algorithm:
|
| 503 |
+
call_kwargs["full_fetch"] = full_fetch
|
| 504 |
+
call_kwargs["policy_id"] = pid
|
| 505 |
+
|
| 506 |
+
obs = obs_space.sample()
|
| 507 |
+
if isinstance(obs_space, Box):
|
| 508 |
+
obs = np.clip(obs, -1.0, 1.0)
|
| 509 |
+
state_in = None
|
| 510 |
+
if include_state:
|
| 511 |
+
state_in = model.get_initial_state()
|
| 512 |
+
if not state_in:
|
| 513 |
+
state_in = []
|
| 514 |
+
i = 0
|
| 515 |
+
while f"state_in_{i}" in model.view_requirements:
|
| 516 |
+
state_in.append(
|
| 517 |
+
model.view_requirements[f"state_in_{i}"].space.sample()
|
| 518 |
+
)
|
| 519 |
+
i += 1
|
| 520 |
+
action_in = action_space.sample() if include_prev_action_reward else None
|
| 521 |
+
reward_in = 1.0 if include_prev_action_reward else None
|
| 522 |
+
|
| 523 |
+
if method_to_test == "input_dict":
|
| 524 |
+
assert what is pol
|
| 525 |
+
|
| 526 |
+
input_dict = {SampleBatch.OBS: obs}
|
| 527 |
+
if include_prev_action_reward:
|
| 528 |
+
input_dict[SampleBatch.PREV_ACTIONS] = action_in
|
| 529 |
+
input_dict[SampleBatch.PREV_REWARDS] = reward_in
|
| 530 |
+
if state_in:
|
| 531 |
+
if what.config.get("enable_rl_module_and_learner", False):
|
| 532 |
+
input_dict["state_in"] = state_in
|
| 533 |
+
else:
|
| 534 |
+
for i, s in enumerate(state_in):
|
| 535 |
+
input_dict[f"state_in_{i}"] = s
|
| 536 |
+
input_dict_batched = SampleBatch(
|
| 537 |
+
tree.map_structure(lambda s: np.expand_dims(s, 0), input_dict)
|
| 538 |
+
)
|
| 539 |
+
action = pol.compute_actions_from_input_dict(
|
| 540 |
+
input_dict=input_dict_batched,
|
| 541 |
+
explore=explore,
|
| 542 |
+
timestep=timestep,
|
| 543 |
+
**call_kwargs,
|
| 544 |
+
)
|
| 545 |
+
# Unbatch everything to be able to compare against single
|
| 546 |
+
# action below.
|
| 547 |
+
# ARS and ES return action batches as lists.
|
| 548 |
+
if isinstance(action[0], list):
|
| 549 |
+
action = (np.array(action[0]), action[1], action[2])
|
| 550 |
+
action = tree.map_structure(lambda s: s[0], action)
|
| 551 |
+
|
| 552 |
+
try:
|
| 553 |
+
action2 = pol.compute_single_action(
|
| 554 |
+
input_dict=input_dict,
|
| 555 |
+
explore=explore,
|
| 556 |
+
timestep=timestep,
|
| 557 |
+
**call_kwargs,
|
| 558 |
+
)
|
| 559 |
+
# Make sure these are the same, unless we have exploration
|
| 560 |
+
# switched on (or noisy layers).
|
| 561 |
+
if not explore and not pol.config.get("noisy"):
|
| 562 |
+
check(action, action2)
|
| 563 |
+
except TypeError:
|
| 564 |
+
pass
|
| 565 |
+
else:
|
| 566 |
+
action = what.compute_single_action(
|
| 567 |
+
obs,
|
| 568 |
+
state_in,
|
| 569 |
+
prev_action=action_in,
|
| 570 |
+
prev_reward=reward_in,
|
| 571 |
+
explore=explore,
|
| 572 |
+
timestep=timestep,
|
| 573 |
+
unsquash_action=unsquash,
|
| 574 |
+
clip_action=clip,
|
| 575 |
+
**call_kwargs,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
state_out = None
|
| 579 |
+
if state_in or full_fetch or what is pol:
|
| 580 |
+
action, state_out, _ = action
|
| 581 |
+
if state_out:
|
| 582 |
+
for si, so in zip(tree.flatten(state_in), tree.flatten(state_out)):
|
| 583 |
+
if tf.is_tensor(si):
|
| 584 |
+
# If si is a tensor of Dimensions, we need to convert it
|
| 585 |
+
# We expect this to be the case for TF RLModules who's initial
|
| 586 |
+
# states are Tf Tensors.
|
| 587 |
+
si_shape = si.shape.as_list()
|
| 588 |
+
else:
|
| 589 |
+
si_shape = list(si.shape)
|
| 590 |
+
check(si_shape, so.shape)
|
| 591 |
+
|
| 592 |
+
if unsquash is None:
|
| 593 |
+
unsquash = what.config["normalize_actions"]
|
| 594 |
+
if clip is None:
|
| 595 |
+
clip = what.config["clip_actions"]
|
| 596 |
+
|
| 597 |
+
# Test whether unsquash/clipping works on the Algorithm's
|
| 598 |
+
# compute_single_action method: Both flags should force the action
|
| 599 |
+
# to be within the space's bounds.
|
| 600 |
+
if method_to_test == "single" and what == algorithm:
|
| 601 |
+
if not action_space.contains(action) and (
|
| 602 |
+
clip or unsquash or not isinstance(action_space, Box)
|
| 603 |
+
):
|
| 604 |
+
raise ValueError(
|
| 605 |
+
f"Returned action ({action}) of algorithm/policy {what} "
|
| 606 |
+
f"not in Env's action_space {action_space}"
|
| 607 |
+
)
|
| 608 |
+
# We are operating in normalized space: Expect only smaller action
|
| 609 |
+
# values.
|
| 610 |
+
if (
|
| 611 |
+
isinstance(action_space, Box)
|
| 612 |
+
and not unsquash
|
| 613 |
+
and what.config.get("normalize_actions")
|
| 614 |
+
and np.any(np.abs(action) > 15.0)
|
| 615 |
+
):
|
| 616 |
+
raise ValueError(
|
| 617 |
+
f"Returned action ({action}) of algorithm/policy {what} "
|
| 618 |
+
"should be in normalized space, but seems too large/small "
|
| 619 |
+
"for that!"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
# Loop through: Policy vs Algorithm; Different API methods to calculate
|
| 623 |
+
# actions; unsquash option; clip option; full fetch or not.
|
| 624 |
+
for what in [pol, algorithm]:
|
| 625 |
+
if what is algorithm:
|
| 626 |
+
# Get the obs-space from Workers.env (not Policy) due to possible
|
| 627 |
+
# pre-processor up front.
|
| 628 |
+
worker_set = getattr(algorithm, "env_runner_group", None)
|
| 629 |
+
assert worker_set
|
| 630 |
+
if not worker_set.local_env_runner:
|
| 631 |
+
obs_space = algorithm.get_policy(pid).observation_space
|
| 632 |
+
else:
|
| 633 |
+
obs_space = worker_set.local_env_runner.for_policy(
|
| 634 |
+
lambda p: p.observation_space, policy_id=pid
|
| 635 |
+
)
|
| 636 |
+
obs_space = getattr(obs_space, "original_space", obs_space)
|
| 637 |
+
else:
|
| 638 |
+
obs_space = pol.observation_space
|
| 639 |
+
|
| 640 |
+
for method_to_test in ["single"] + (["input_dict"] if what is pol else []):
|
| 641 |
+
for explore in [True, False]:
|
| 642 |
+
for full_fetch in [False, True] if what is algorithm else [False]:
|
| 643 |
+
timestep = random.randint(0, 100000)
|
| 644 |
+
for unsquash in [True, False, None]:
|
| 645 |
+
for clip in [False] if unsquash else [True, False, None]:
|
| 646 |
+
print("-" * 80)
|
| 647 |
+
print(f"what={what}")
|
| 648 |
+
print(f"method_to_test={method_to_test}")
|
| 649 |
+
print(f"explore={explore}")
|
| 650 |
+
print(f"full_fetch={full_fetch}")
|
| 651 |
+
print(f"unsquash={unsquash}")
|
| 652 |
+
print(f"clip={clip}")
|
| 653 |
+
_test(
|
| 654 |
+
what,
|
| 655 |
+
method_to_test,
|
| 656 |
+
obs_space,
|
| 657 |
+
full_fetch,
|
| 658 |
+
explore,
|
| 659 |
+
timestep,
|
| 660 |
+
unsquash,
|
| 661 |
+
clip,
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def check_inference_w_connectors(policy, env_name, max_steps: int = 100):
|
| 666 |
+
"""Checks whether the given policy can infer actions from an env with connectors.
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
policy: The policy to check.
|
| 670 |
+
env_name: Name of the environment to check
|
| 671 |
+
max_steps: The maximum number of steps to run the environment for.
|
| 672 |
+
|
| 673 |
+
Raises:
|
| 674 |
+
ValueError: If the policy cannot infer actions from the environment.
|
| 675 |
+
"""
|
| 676 |
+
# Avoids circular import
|
| 677 |
+
from ray.rllib.utils.policy import local_policy_inference
|
| 678 |
+
|
| 679 |
+
env = gym.make(env_name)
|
| 680 |
+
|
| 681 |
+
# Potentially wrap the env like we do in RolloutWorker
|
| 682 |
+
if is_atari(env):
|
| 683 |
+
env = wrap_deepmind(
|
| 684 |
+
env,
|
| 685 |
+
dim=policy.config["model"]["dim"],
|
| 686 |
+
framestack=policy.config["model"].get("framestack"),
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
obs, info = env.reset()
|
| 690 |
+
reward, terminated, truncated = 0.0, False, False
|
| 691 |
+
ts = 0
|
| 692 |
+
while not terminated and not truncated and ts < max_steps:
|
| 693 |
+
action_out = local_policy_inference(
|
| 694 |
+
policy,
|
| 695 |
+
env_id=0,
|
| 696 |
+
agent_id=0,
|
| 697 |
+
obs=obs,
|
| 698 |
+
reward=reward,
|
| 699 |
+
terminated=terminated,
|
| 700 |
+
truncated=truncated,
|
| 701 |
+
info=info,
|
| 702 |
+
)
|
| 703 |
+
obs, reward, terminated, truncated, info = env.step(action_out[0][0])
|
| 704 |
+
|
| 705 |
+
ts += 1
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
def check_learning_achieved(
|
| 709 |
+
tune_results: "tune.ResultGrid",
|
| 710 |
+
min_value: float,
|
| 711 |
+
evaluation: Optional[bool] = None,
|
| 712 |
+
metric: str = f"{ENV_RUNNER_RESULTS}/episode_return_mean",
|
| 713 |
+
):
|
| 714 |
+
"""Throws an error if `min_reward` is not reached within tune_results.
|
| 715 |
+
|
| 716 |
+
Checks the last iteration found in tune_results for its
|
| 717 |
+
"episode_return_mean" value and compares it to `min_reward`.
|
| 718 |
+
|
| 719 |
+
Args:
|
| 720 |
+
tune_results: The tune.Tuner().fit() returned results object.
|
| 721 |
+
min_reward: The min reward that must be reached.
|
| 722 |
+
evaluation: If True, use `evaluation/env_runners/[metric]`, if False, use
|
| 723 |
+
`env_runners/[metric]`, if None, use evaluation sampler results if
|
| 724 |
+
available otherwise, use train sampler results.
|
| 725 |
+
|
| 726 |
+
Raises:
|
| 727 |
+
ValueError: If `min_reward` not reached.
|
| 728 |
+
"""
|
| 729 |
+
# Get maximum value of `metrics` over all trials
|
| 730 |
+
# (check if at least one trial achieved some learning, not just the final one).
|
| 731 |
+
recorded_values = []
|
| 732 |
+
for _, row in tune_results.get_dataframe().iterrows():
|
| 733 |
+
if evaluation or (
|
| 734 |
+
evaluation is None and f"{EVALUATION_RESULTS}/{metric}" in row
|
| 735 |
+
):
|
| 736 |
+
recorded_values.append(row[f"{EVALUATION_RESULTS}/{metric}"])
|
| 737 |
+
else:
|
| 738 |
+
recorded_values.append(row[metric])
|
| 739 |
+
best_value = max(recorded_values)
|
| 740 |
+
if best_value < min_value:
|
| 741 |
+
raise ValueError(f"`{metric}` of {min_value} not reached!")
|
| 742 |
+
print(f"`{metric}` of {min_value} reached! ok")
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def check_off_policyness(
|
| 746 |
+
results: ResultDict,
|
| 747 |
+
upper_limit: float,
|
| 748 |
+
lower_limit: float = 0.0,
|
| 749 |
+
) -> Optional[float]:
|
| 750 |
+
"""Verifies that the off-policy'ness of some update is within some range.
|
| 751 |
+
|
| 752 |
+
Off-policy'ness is defined as the average (across n workers) diff
|
| 753 |
+
between the number of gradient updates performed on the policy used
|
| 754 |
+
for sampling vs the number of gradient updates that have been performed
|
| 755 |
+
on the trained policy (usually the one on the local worker).
|
| 756 |
+
|
| 757 |
+
Uses the published DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY metric inside
|
| 758 |
+
a training results dict and compares to the given bounds.
|
| 759 |
+
|
| 760 |
+
Note: Only works with single-agent results thus far.
|
| 761 |
+
|
| 762 |
+
Args:
|
| 763 |
+
results: The training results dict.
|
| 764 |
+
upper_limit: The upper limit to for the off_policy_ness value.
|
| 765 |
+
lower_limit: The lower limit to for the off_policy_ness value.
|
| 766 |
+
|
| 767 |
+
Returns:
|
| 768 |
+
The off-policy'ness value (described above).
|
| 769 |
+
|
| 770 |
+
Raises:
|
| 771 |
+
AssertionError: If the value is out of bounds.
|
| 772 |
+
"""
|
| 773 |
+
|
| 774 |
+
# Have to import this here to avoid circular dependency.
|
| 775 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 776 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
|
| 777 |
+
|
| 778 |
+
# Assert that the off-policy'ness is within the given bounds.
|
| 779 |
+
learner_info = results["info"][LEARNER_INFO]
|
| 780 |
+
if DEFAULT_POLICY_ID not in learner_info:
|
| 781 |
+
return None
|
| 782 |
+
off_policy_ness = learner_info[DEFAULT_POLICY_ID][
|
| 783 |
+
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY
|
| 784 |
+
]
|
| 785 |
+
# Roughly: Reaches up to 0.4 for 2 rollout workers and up to 0.2 for
|
| 786 |
+
# 1 rollout worker.
|
| 787 |
+
if not (lower_limit <= off_policy_ness <= upper_limit):
|
| 788 |
+
raise AssertionError(
|
| 789 |
+
f"`off_policy_ness` ({off_policy_ness}) is outside the given bounds "
|
| 790 |
+
f"({lower_limit} - {upper_limit})!"
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
return off_policy_ness
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
def check_train_results_new_api_stack(train_results: ResultDict) -> None:
|
| 797 |
+
"""Checks proper structure of a Algorithm.train() returned dict.
|
| 798 |
+
|
| 799 |
+
Args:
|
| 800 |
+
train_results: The train results dict to check.
|
| 801 |
+
|
| 802 |
+
Raises:
|
| 803 |
+
AssertionError: If `train_results` doesn't have the proper structure or
|
| 804 |
+
data in it.
|
| 805 |
+
"""
|
| 806 |
+
# Import these here to avoid circular dependencies.
|
| 807 |
+
from ray.rllib.utils.metrics import (
|
| 808 |
+
ENV_RUNNER_RESULTS,
|
| 809 |
+
FAULT_TOLERANCE_STATS,
|
| 810 |
+
LEARNER_RESULTS,
|
| 811 |
+
TIMERS,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
# Assert that some keys are where we would expect them.
|
| 815 |
+
for key in [
|
| 816 |
+
ENV_RUNNER_RESULTS,
|
| 817 |
+
FAULT_TOLERANCE_STATS,
|
| 818 |
+
LEARNER_RESULTS,
|
| 819 |
+
TIMERS,
|
| 820 |
+
TRAINING_ITERATION,
|
| 821 |
+
"config",
|
| 822 |
+
]:
|
| 823 |
+
assert (
|
| 824 |
+
key in train_results
|
| 825 |
+
), f"'{key}' not found in `train_results` ({train_results})!"
|
| 826 |
+
|
| 827 |
+
# Make sure, `config` is an actual dict, not an AlgorithmConfig object.
|
| 828 |
+
assert isinstance(
|
| 829 |
+
train_results["config"], dict
|
| 830 |
+
), "`config` in results not a python dict!"
|
| 831 |
+
|
| 832 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 833 |
+
|
| 834 |
+
is_multi_agent = (
|
| 835 |
+
AlgorithmConfig()
|
| 836 |
+
.update_from_dict({"policies": train_results["config"]["policies"]})
|
| 837 |
+
.is_multi_agent()
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
# Check in particular the "info" dict.
|
| 841 |
+
learner_results = train_results[LEARNER_RESULTS]
|
| 842 |
+
|
| 843 |
+
# Make sure we have a `DEFAULT_MODULE_ID key if we are not in a
|
| 844 |
+
# multi-agent setup.
|
| 845 |
+
if not is_multi_agent:
|
| 846 |
+
assert len(learner_results) == 0 or DEFAULT_MODULE_ID in learner_results, (
|
| 847 |
+
f"'{DEFAULT_MODULE_ID}' not found in "
|
| 848 |
+
f"train_results['{LEARNER_RESULTS}']!"
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
for module_id, module_metrics in learner_results.items():
|
| 852 |
+
# The ModuleID can be __all_modules__ in multi-agent case when the new learner
|
| 853 |
+
# stack is enabled.
|
| 854 |
+
if module_id == "__all_modules__":
|
| 855 |
+
continue
|
| 856 |
+
|
| 857 |
+
# On the new API stack, policy has no LEARNER_STATS_KEY under it anymore.
|
| 858 |
+
for key, value in module_metrics.items():
|
| 859 |
+
# Min- and max-stats should be single values.
|
| 860 |
+
if key.endswith("_min") or key.endswith("_max"):
|
| 861 |
+
assert np.isscalar(value), f"'key' value not a scalar ({value})!"
|
| 862 |
+
|
| 863 |
+
return train_results
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
@OldAPIStack
|
| 867 |
+
def check_train_results(train_results: ResultDict):
|
| 868 |
+
"""Checks proper structure of a Algorithm.train() returned dict.
|
| 869 |
+
|
| 870 |
+
Args:
|
| 871 |
+
train_results: The train results dict to check.
|
| 872 |
+
|
| 873 |
+
Raises:
|
| 874 |
+
AssertionError: If `train_results` doesn't have the proper structure or
|
| 875 |
+
data in it.
|
| 876 |
+
"""
|
| 877 |
+
# Import these here to avoid circular dependencies.
|
| 878 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 879 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY
|
| 880 |
+
|
| 881 |
+
# Assert that some keys are where we would expect them.
|
| 882 |
+
for key in [
|
| 883 |
+
"config",
|
| 884 |
+
"custom_metrics",
|
| 885 |
+
ENV_RUNNER_RESULTS,
|
| 886 |
+
"info",
|
| 887 |
+
"iterations_since_restore",
|
| 888 |
+
"num_healthy_workers",
|
| 889 |
+
"perf",
|
| 890 |
+
"time_since_restore",
|
| 891 |
+
"time_this_iter_s",
|
| 892 |
+
"timers",
|
| 893 |
+
"time_total_s",
|
| 894 |
+
TRAINING_ITERATION,
|
| 895 |
+
]:
|
| 896 |
+
assert (
|
| 897 |
+
key in train_results
|
| 898 |
+
), f"'{key}' not found in `train_results` ({train_results})!"
|
| 899 |
+
|
| 900 |
+
for key in [
|
| 901 |
+
"episode_len_mean",
|
| 902 |
+
"episode_reward_max",
|
| 903 |
+
"episode_reward_mean",
|
| 904 |
+
"episode_reward_min",
|
| 905 |
+
"hist_stats",
|
| 906 |
+
"policy_reward_max",
|
| 907 |
+
"policy_reward_mean",
|
| 908 |
+
"policy_reward_min",
|
| 909 |
+
"sampler_perf",
|
| 910 |
+
]:
|
| 911 |
+
assert key in train_results[ENV_RUNNER_RESULTS], (
|
| 912 |
+
f"'{key}' not found in `train_results[ENV_RUNNER_RESULTS]` "
|
| 913 |
+
f"({train_results[ENV_RUNNER_RESULTS]})!"
|
| 914 |
+
)
|
| 915 |
+
|
| 916 |
+
# Make sure, `config` is an actual dict, not an AlgorithmConfig object.
|
| 917 |
+
assert isinstance(
|
| 918 |
+
train_results["config"], dict
|
| 919 |
+
), "`config` in results not a python dict!"
|
| 920 |
+
|
| 921 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 922 |
+
|
| 923 |
+
is_multi_agent = (
|
| 924 |
+
AlgorithmConfig()
|
| 925 |
+
.update_from_dict({"policies": train_results["config"]["policies"]})
|
| 926 |
+
.is_multi_agent()
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
# Check in particular the "info" dict.
|
| 930 |
+
info = train_results["info"]
|
| 931 |
+
assert LEARNER_INFO in info, f"'learner' not in train_results['infos'] ({info})!"
|
| 932 |
+
assert (
|
| 933 |
+
"num_steps_trained" in info or NUM_ENV_STEPS_TRAINED in info
|
| 934 |
+
), f"'num_(env_)?steps_trained' not in train_results['infos'] ({info})!"
|
| 935 |
+
|
| 936 |
+
learner_info = info[LEARNER_INFO]
|
| 937 |
+
|
| 938 |
+
# Make sure we have a default_policy key if we are not in a
|
| 939 |
+
# multi-agent setup.
|
| 940 |
+
if not is_multi_agent:
|
| 941 |
+
# APEX algos sometimes have an empty learner info dict (no metrics
|
| 942 |
+
# collected yet).
|
| 943 |
+
assert len(learner_info) == 0 or DEFAULT_POLICY_ID in learner_info, (
|
| 944 |
+
f"'{DEFAULT_POLICY_ID}' not found in "
|
| 945 |
+
f"train_results['infos']['learner'] ({learner_info})!"
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
for pid, policy_stats in learner_info.items():
|
| 949 |
+
if pid == "batch_count":
|
| 950 |
+
continue
|
| 951 |
+
|
| 952 |
+
# the pid can be __all__ in multi-agent case when the new learner stack is
|
| 953 |
+
# enabled.
|
| 954 |
+
if pid == "__all__":
|
| 955 |
+
continue
|
| 956 |
+
|
| 957 |
+
# On the new API stack, policy has no LEARNER_STATS_KEY under it anymore.
|
| 958 |
+
if LEARNER_STATS_KEY in policy_stats:
|
| 959 |
+
learner_stats = policy_stats[LEARNER_STATS_KEY]
|
| 960 |
+
else:
|
| 961 |
+
learner_stats = policy_stats
|
| 962 |
+
for key, value in learner_stats.items():
|
| 963 |
+
# Min- and max-stats should be single values.
|
| 964 |
+
if key.startswith("min_") or key.startswith("max_"):
|
| 965 |
+
assert np.isscalar(value), f"'key' value not a scalar ({value})!"
|
| 966 |
+
|
| 967 |
+
return train_results
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
# TODO (sven): Make this the de-facto, well documented, and unified utility for most of
|
| 971 |
+
# our tests:
|
| 972 |
+
# - CI (label: "learning_tests")
|
| 973 |
+
# - release tests (benchmarks)
|
| 974 |
+
# - example scripts
|
| 975 |
+
def run_rllib_example_script_experiment(
|
| 976 |
+
base_config: "AlgorithmConfig",
|
| 977 |
+
args: Optional[argparse.Namespace] = None,
|
| 978 |
+
*,
|
| 979 |
+
stop: Optional[Dict] = None,
|
| 980 |
+
success_metric: Optional[Dict] = None,
|
| 981 |
+
trainable: Optional[Type] = None,
|
| 982 |
+
tune_callbacks: Optional[List] = None,
|
| 983 |
+
keep_config: bool = False,
|
| 984 |
+
scheduler=None,
|
| 985 |
+
progress_reporter=None,
|
| 986 |
+
) -> Union[ResultDict, tune.result_grid.ResultGrid]:
|
| 987 |
+
"""Given an algorithm config and some command line args, runs an experiment.
|
| 988 |
+
|
| 989 |
+
There are some constraints on what properties must be defined in `args`.
|
| 990 |
+
It should ideally be generated via calling
|
| 991 |
+
`args = add_rllib_example_script_args()`, which can be found in this very module
|
| 992 |
+
here.
|
| 993 |
+
|
| 994 |
+
The function sets up an Algorithm object from the given config (altered by the
|
| 995 |
+
contents of `args`), then runs the Algorithm via Tune (or manually, if
|
| 996 |
+
`args.no_tune` is set to True) using the stopping criteria in `stop`.
|
| 997 |
+
|
| 998 |
+
At the end of the experiment, if `args.as_test` is True, checks, whether the
|
| 999 |
+
Algorithm reached the `success_metric` (if None, use `env_runners/
|
| 1000 |
+
episode_return_mean` with a minimum value of `args.stop_reward`).
|
| 1001 |
+
|
| 1002 |
+
See https://github.com/ray-project/ray/tree/master/rllib/examples for an overview
|
| 1003 |
+
of all supported command line options.
|
| 1004 |
+
|
| 1005 |
+
Args:
|
| 1006 |
+
base_config: The AlgorithmConfig object to use for this experiment. This base
|
| 1007 |
+
config will be automatically "extended" based on some of the provided
|
| 1008 |
+
`args`. For example, `args.num_env_runners` is used to set
|
| 1009 |
+
`config.num_env_runners`, etc..
|
| 1010 |
+
args: A argparse.Namespace object, ideally returned by calling
|
| 1011 |
+
`args = add_rllib_example_script_args()`. It must have the following
|
| 1012 |
+
properties defined: `stop_iters`, `stop_reward`, `stop_timesteps`,
|
| 1013 |
+
`no_tune`, `verbose`, `checkpoint_freq`, `as_test`. Optionally, for WandB
|
| 1014 |
+
logging: `wandb_key`, `wandb_project`, `wandb_run_name`.
|
| 1015 |
+
stop: An optional dict mapping ResultDict key strings (using "/" in case of
|
| 1016 |
+
nesting, e.g. "env_runners/episode_return_mean" for referring to
|
| 1017 |
+
`result_dict['env_runners']['episode_return_mean']` to minimum
|
| 1018 |
+
values, reaching of which will stop the experiment). Default is:
|
| 1019 |
+
{
|
| 1020 |
+
"env_runners/episode_return_mean": args.stop_reward,
|
| 1021 |
+
"training_iteration": args.stop_iters,
|
| 1022 |
+
"num_env_steps_sampled_lifetime": args.stop_timesteps,
|
| 1023 |
+
}
|
| 1024 |
+
success_metric: Only relevant if `args.as_test` is True.
|
| 1025 |
+
A dict mapping a single(!) ResultDict key string (using "/" in
|
| 1026 |
+
case of nesting, e.g. "env_runners/episode_return_mean" for referring
|
| 1027 |
+
to `result_dict['env_runners']['episode_return_mean']` to a single(!)
|
| 1028 |
+
minimum value to be reached in order for the experiment to count as
|
| 1029 |
+
successful. If `args.as_test` is True AND this `success_metric` is not
|
| 1030 |
+
reached with the bounds defined by `stop`, will raise an Exception.
|
| 1031 |
+
trainable: The Trainable sub-class to run in the tune.Tuner. If None (default),
|
| 1032 |
+
use the registered RLlib Algorithm class specified by args.algo.
|
| 1033 |
+
tune_callbacks: A list of Tune callbacks to configure with the tune.Tuner.
|
| 1034 |
+
In case `args.wandb_key` is provided, appends a WandB logger to this
|
| 1035 |
+
list.
|
| 1036 |
+
keep_config: Set this to True, if you don't want this utility to change the
|
| 1037 |
+
given `base_config` in any way and leave it as-is. This is helpful
|
| 1038 |
+
for those example scripts which demonstrate how to set config settings
|
| 1039 |
+
that are taken care of automatically in this function otherwise (e.g.
|
| 1040 |
+
`num_env_runners`).
|
| 1041 |
+
|
| 1042 |
+
Returns:
|
| 1043 |
+
The last ResultDict from a --no-tune run OR the tune.Tuner.fit()
|
| 1044 |
+
results.
|
| 1045 |
+
"""
|
| 1046 |
+
if args is None:
|
| 1047 |
+
parser = add_rllib_example_script_args()
|
| 1048 |
+
args = parser.parse_args()
|
| 1049 |
+
|
| 1050 |
+
# If run --as-release-test, --as-test must also be set.
|
| 1051 |
+
if args.as_release_test:
|
| 1052 |
+
args.as_test = True
|
| 1053 |
+
|
| 1054 |
+
# Initialize Ray.
|
| 1055 |
+
ray.init(
|
| 1056 |
+
num_cpus=args.num_cpus or None,
|
| 1057 |
+
local_mode=args.local_mode,
|
| 1058 |
+
ignore_reinit_error=True,
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
# Define one or more stopping criteria.
|
| 1062 |
+
if stop is None:
|
| 1063 |
+
stop = {
|
| 1064 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
|
| 1065 |
+
f"{ENV_RUNNER_RESULTS}/{NUM_ENV_STEPS_SAMPLED_LIFETIME}": (
|
| 1066 |
+
args.stop_timesteps
|
| 1067 |
+
),
|
| 1068 |
+
TRAINING_ITERATION: args.stop_iters,
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
config = base_config
|
| 1072 |
+
|
| 1073 |
+
# Enhance the `base_config`, based on provided `args`.
|
| 1074 |
+
if not keep_config:
|
| 1075 |
+
# Set the framework.
|
| 1076 |
+
config.framework(args.framework)
|
| 1077 |
+
|
| 1078 |
+
# Add an env specifier (only if not already set in config)?
|
| 1079 |
+
if args.env is not None and config.env is None:
|
| 1080 |
+
config.environment(args.env)
|
| 1081 |
+
|
| 1082 |
+
# Enable the new API stack?
|
| 1083 |
+
if args.enable_new_api_stack:
|
| 1084 |
+
config.api_stack(
|
| 1085 |
+
enable_rl_module_and_learner=True,
|
| 1086 |
+
enable_env_runner_and_connector_v2=True,
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
# Define EnvRunner/RolloutWorker scaling and behavior.
|
| 1090 |
+
if args.num_env_runners is not None:
|
| 1091 |
+
config.env_runners(num_env_runners=args.num_env_runners)
|
| 1092 |
+
|
| 1093 |
+
# Define compute resources used automatically (only using the --num-learners
|
| 1094 |
+
# and --num-gpus-per-learner args).
|
| 1095 |
+
# New stack.
|
| 1096 |
+
if config.enable_rl_module_and_learner:
|
| 1097 |
+
if args.num_gpus > 0:
|
| 1098 |
+
raise ValueError(
|
| 1099 |
+
"--num-gpus is not supported on the new API stack! To train on "
|
| 1100 |
+
"GPUs, use the command line options `--num-gpus-per-learner=1` and "
|
| 1101 |
+
"`--num-learners=[your number of available GPUs]`, instead."
|
| 1102 |
+
)
|
| 1103 |
+
|
| 1104 |
+
# Do we have GPUs available in the cluster?
|
| 1105 |
+
num_gpus_available = ray.cluster_resources().get("GPU", 0)
|
| 1106 |
+
# Number of actual Learner instances (including the local Learner if
|
| 1107 |
+
# `num_learners=0`).
|
| 1108 |
+
num_actual_learners = (
|
| 1109 |
+
args.num_learners
|
| 1110 |
+
if args.num_learners is not None
|
| 1111 |
+
else config.num_learners
|
| 1112 |
+
) or 1 # 1: There is always a local Learner, if num_learners=0.
|
| 1113 |
+
# How many were hard-requested by the user
|
| 1114 |
+
# (through explicit `--num-gpus-per-learner >= 1`).
|
| 1115 |
+
num_gpus_requested = (args.num_gpus_per_learner or 0) * num_actual_learners
|
| 1116 |
+
# Number of GPUs needed, if `num_gpus_per_learner=None` (auto).
|
| 1117 |
+
num_gpus_needed_if_available = (
|
| 1118 |
+
args.num_gpus_per_learner
|
| 1119 |
+
if args.num_gpus_per_learner is not None
|
| 1120 |
+
else 1
|
| 1121 |
+
) * num_actual_learners
|
| 1122 |
+
# Define compute resources used.
|
| 1123 |
+
config.resources(num_gpus=0) # old API stack setting
|
| 1124 |
+
if args.num_learners is not None:
|
| 1125 |
+
config.learners(num_learners=args.num_learners)
|
| 1126 |
+
|
| 1127 |
+
# User wants to use GPUs if available, but doesn't hard-require them.
|
| 1128 |
+
if args.num_gpus_per_learner is None:
|
| 1129 |
+
if num_gpus_available >= num_gpus_needed_if_available:
|
| 1130 |
+
config.learners(num_gpus_per_learner=1)
|
| 1131 |
+
else:
|
| 1132 |
+
config.learners(num_gpus_per_learner=0, num_cpus_per_learner=1)
|
| 1133 |
+
|
| 1134 |
+
# User hard-requires n GPUs, but they are not available -> Error.
|
| 1135 |
+
elif num_gpus_available < num_gpus_requested:
|
| 1136 |
+
raise ValueError(
|
| 1137 |
+
"You are running your script with --num-learners="
|
| 1138 |
+
f"{args.num_learners} and --num-gpus-per-learner="
|
| 1139 |
+
f"{args.num_gpus_per_learner}, but your cluster only has "
|
| 1140 |
+
f"{num_gpus_available} GPUs! Will run "
|
| 1141 |
+
f"with {num_gpus_available} CPU Learners instead."
|
| 1142 |
+
)
|
| 1143 |
+
|
| 1144 |
+
# All required GPUs are available -> Use them.
|
| 1145 |
+
else:
|
| 1146 |
+
config.learners(num_gpus_per_learner=args.num_gpus_per_learner)
|
| 1147 |
+
|
| 1148 |
+
# Old stack.
|
| 1149 |
+
else:
|
| 1150 |
+
config.resources(num_gpus=args.num_gpus)
|
| 1151 |
+
|
| 1152 |
+
# Evaluation setup.
|
| 1153 |
+
if args.evaluation_interval > 0:
|
| 1154 |
+
config.evaluation(
|
| 1155 |
+
evaluation_num_env_runners=args.evaluation_num_env_runners,
|
| 1156 |
+
evaluation_interval=args.evaluation_interval,
|
| 1157 |
+
evaluation_duration=args.evaluation_duration,
|
| 1158 |
+
evaluation_duration_unit=args.evaluation_duration_unit,
|
| 1159 |
+
evaluation_parallel_to_training=args.evaluation_parallel_to_training,
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
# Set the log-level (if applicable).
|
| 1163 |
+
if args.log_level is not None:
|
| 1164 |
+
config.debugging(log_level=args.log_level)
|
| 1165 |
+
|
| 1166 |
+
# Set the output dir (if applicable).
|
| 1167 |
+
if args.output is not None:
|
| 1168 |
+
config.offline_data(output=args.output)
|
| 1169 |
+
|
| 1170 |
+
# Run the experiment w/o Tune (directly operate on the RLlib Algorithm object).
|
| 1171 |
+
if args.no_tune:
|
| 1172 |
+
assert not args.as_test and not args.as_release_test
|
| 1173 |
+
algo = config.build()
|
| 1174 |
+
for i in range(stop.get(TRAINING_ITERATION, args.stop_iters)):
|
| 1175 |
+
results = algo.train()
|
| 1176 |
+
if ENV_RUNNER_RESULTS in results:
|
| 1177 |
+
mean_return = results[ENV_RUNNER_RESULTS].get(
|
| 1178 |
+
EPISODE_RETURN_MEAN, np.nan
|
| 1179 |
+
)
|
| 1180 |
+
print(f"iter={i} R={mean_return}", end="")
|
| 1181 |
+
if EVALUATION_RESULTS in results:
|
| 1182 |
+
Reval = results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][
|
| 1183 |
+
EPISODE_RETURN_MEAN
|
| 1184 |
+
]
|
| 1185 |
+
print(f" R(eval)={Reval}", end="")
|
| 1186 |
+
print()
|
| 1187 |
+
for key, threshold in stop.items():
|
| 1188 |
+
val = results
|
| 1189 |
+
for k in key.split("/"):
|
| 1190 |
+
try:
|
| 1191 |
+
val = val[k]
|
| 1192 |
+
except KeyError:
|
| 1193 |
+
val = None
|
| 1194 |
+
break
|
| 1195 |
+
if val is not None and not np.isnan(val) and val >= threshold:
|
| 1196 |
+
print(f"Stop criterium ({key}={threshold}) fulfilled!")
|
| 1197 |
+
ray.shutdown()
|
| 1198 |
+
return results
|
| 1199 |
+
|
| 1200 |
+
ray.shutdown()
|
| 1201 |
+
return results
|
| 1202 |
+
|
| 1203 |
+
# Run the experiment using Ray Tune.
|
| 1204 |
+
|
| 1205 |
+
# Log results using WandB.
|
| 1206 |
+
tune_callbacks = tune_callbacks or []
|
| 1207 |
+
if hasattr(args, "wandb_key") and (
|
| 1208 |
+
args.wandb_key is not None or WANDB_ENV_VAR in os.environ
|
| 1209 |
+
):
|
| 1210 |
+
wandb_key = args.wandb_key or os.environ[WANDB_ENV_VAR]
|
| 1211 |
+
project = args.wandb_project or (
|
| 1212 |
+
args.algo.lower() + "-" + re.sub("\\W+", "-", str(config.env).lower())
|
| 1213 |
+
)
|
| 1214 |
+
tune_callbacks.append(
|
| 1215 |
+
WandbLoggerCallback(
|
| 1216 |
+
api_key=wandb_key,
|
| 1217 |
+
project=project,
|
| 1218 |
+
upload_checkpoints=True,
|
| 1219 |
+
**({"name": args.wandb_run_name} if args.wandb_run_name else {}),
|
| 1220 |
+
)
|
| 1221 |
+
)
|
| 1222 |
+
|
| 1223 |
+
# Auto-configure a CLIReporter (to log the results to the console).
|
| 1224 |
+
# Use better ProgressReporter for multi-agent cases: List individual policy rewards.
|
| 1225 |
+
if progress_reporter is None and args.num_agents > 0:
|
| 1226 |
+
progress_reporter = CLIReporter(
|
| 1227 |
+
metric_columns={
|
| 1228 |
+
**{
|
| 1229 |
+
TRAINING_ITERATION: "iter",
|
| 1230 |
+
"time_total_s": "total time (s)",
|
| 1231 |
+
NUM_ENV_STEPS_SAMPLED_LIFETIME: "ts",
|
| 1232 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": "combined return",
|
| 1233 |
+
},
|
| 1234 |
+
**{
|
| 1235 |
+
(
|
| 1236 |
+
f"{ENV_RUNNER_RESULTS}/module_episode_returns_mean/" f"{pid}"
|
| 1237 |
+
): f"return {pid}"
|
| 1238 |
+
for pid in config.policies
|
| 1239 |
+
},
|
| 1240 |
+
},
|
| 1241 |
+
)
|
| 1242 |
+
|
| 1243 |
+
# Force Tuner to use old progress output as the new one silently ignores our custom
|
| 1244 |
+
# `CLIReporter`.
|
| 1245 |
+
os.environ["RAY_AIR_NEW_OUTPUT"] = "0"
|
| 1246 |
+
|
| 1247 |
+
# Run the actual experiment (using Tune).
|
| 1248 |
+
start_time = time.time()
|
| 1249 |
+
results = tune.Tuner(
|
| 1250 |
+
trainable or config.algo_class,
|
| 1251 |
+
param_space=config,
|
| 1252 |
+
run_config=air.RunConfig(
|
| 1253 |
+
stop=stop,
|
| 1254 |
+
verbose=args.verbose,
|
| 1255 |
+
callbacks=tune_callbacks,
|
| 1256 |
+
checkpoint_config=air.CheckpointConfig(
|
| 1257 |
+
checkpoint_frequency=args.checkpoint_freq,
|
| 1258 |
+
checkpoint_at_end=args.checkpoint_at_end,
|
| 1259 |
+
),
|
| 1260 |
+
progress_reporter=progress_reporter,
|
| 1261 |
+
),
|
| 1262 |
+
tune_config=tune.TuneConfig(
|
| 1263 |
+
num_samples=args.num_samples,
|
| 1264 |
+
max_concurrent_trials=args.max_concurrent_trials,
|
| 1265 |
+
scheduler=scheduler,
|
| 1266 |
+
),
|
| 1267 |
+
).fit()
|
| 1268 |
+
time_taken = time.time() - start_time
|
| 1269 |
+
|
| 1270 |
+
ray.shutdown()
|
| 1271 |
+
|
| 1272 |
+
# If run as a test, check whether we reached the specified success criteria.
|
| 1273 |
+
test_passed = False
|
| 1274 |
+
if args.as_test:
|
| 1275 |
+
# Success metric not provided, try extracting it from `stop`.
|
| 1276 |
+
if success_metric is None:
|
| 1277 |
+
for try_it in [
|
| 1278 |
+
f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
|
| 1279 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}",
|
| 1280 |
+
]:
|
| 1281 |
+
if try_it in stop:
|
| 1282 |
+
success_metric = {try_it: stop[try_it]}
|
| 1283 |
+
break
|
| 1284 |
+
if success_metric is None:
|
| 1285 |
+
success_metric = {
|
| 1286 |
+
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
|
| 1287 |
+
}
|
| 1288 |
+
# TODO (sven): Make this work for more than one metric (AND-logic?).
|
| 1289 |
+
# Get maximum value of `metric` over all trials
|
| 1290 |
+
# (check if at least one trial achieved some learning, not just the final one).
|
| 1291 |
+
success_metric_key, success_metric_value = next(iter(success_metric.items()))
|
| 1292 |
+
best_value = max(
|
| 1293 |
+
row[success_metric_key] for _, row in results.get_dataframe().iterrows()
|
| 1294 |
+
)
|
| 1295 |
+
if best_value >= success_metric_value:
|
| 1296 |
+
test_passed = True
|
| 1297 |
+
print(f"`{success_metric_key}` of {success_metric_value} reached! ok")
|
| 1298 |
+
|
| 1299 |
+
if args.as_release_test:
|
| 1300 |
+
trial = results._experiment_analysis.trials[0]
|
| 1301 |
+
stats = trial.last_result
|
| 1302 |
+
stats.pop("config", None)
|
| 1303 |
+
json_summary = {
|
| 1304 |
+
"time_taken": float(time_taken),
|
| 1305 |
+
"trial_states": [trial.status],
|
| 1306 |
+
"last_update": float(time.time()),
|
| 1307 |
+
"stats": stats,
|
| 1308 |
+
"passed": [test_passed],
|
| 1309 |
+
"not_passed": [not test_passed],
|
| 1310 |
+
"failures": {str(trial): 1} if not test_passed else {},
|
| 1311 |
+
}
|
| 1312 |
+
with open(
|
| 1313 |
+
os.environ.get("TEST_OUTPUT_JSON", "/tmp/learning_test.json"),
|
| 1314 |
+
"wt",
|
| 1315 |
+
) as f:
|
| 1316 |
+
try:
|
| 1317 |
+
json.dump(json_summary, f)
|
| 1318 |
+
# Something went wrong writing json. Try again w/ simplified stats.
|
| 1319 |
+
except Exception:
|
| 1320 |
+
from ray.rllib.algorithms.algorithm import Algorithm
|
| 1321 |
+
|
| 1322 |
+
simplified_stats = {
|
| 1323 |
+
k: stats[k] for k in Algorithm._progress_metrics if k in stats
|
| 1324 |
+
}
|
| 1325 |
+
json_summary["stats"] = simplified_stats
|
| 1326 |
+
json.dump(json_summary, f)
|
| 1327 |
+
|
| 1328 |
+
if not test_passed:
|
| 1329 |
+
raise ValueError(
|
| 1330 |
+
f"`{success_metric_key}` of {success_metric_value} not reached!"
|
| 1331 |
+
)
|
| 1332 |
+
|
| 1333 |
+
return results
|
| 1334 |
+
|
| 1335 |
+
|
| 1336 |
+
def check_same_batch(batch1, batch2) -> None:
|
| 1337 |
+
"""Check if both batches are (almost) identical.
|
| 1338 |
+
|
| 1339 |
+
For MultiAgentBatches, the step count and individual policy's
|
| 1340 |
+
SampleBatches are checked for identity. For SampleBatches, identity is
|
| 1341 |
+
checked as the almost numerical key-value-pair identity between batches
|
| 1342 |
+
with ray.rllib.utils.test_utils.check(). unroll_id is compared only if
|
| 1343 |
+
both batches have an unroll_id.
|
| 1344 |
+
|
| 1345 |
+
Args:
|
| 1346 |
+
batch1: Batch to compare against batch2
|
| 1347 |
+
batch2: Batch to compare against batch1
|
| 1348 |
+
"""
|
| 1349 |
+
# Avoids circular import
|
| 1350 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
|
| 1351 |
+
|
| 1352 |
+
assert type(batch1) is type(
|
| 1353 |
+
batch2
|
| 1354 |
+
), "Input batches are of different types {} and {}".format(
|
| 1355 |
+
str(type(batch1)), str(type(batch2))
|
| 1356 |
+
)
|
| 1357 |
+
|
| 1358 |
+
def check_sample_batches(_batch1, _batch2, _policy_id=None):
|
| 1359 |
+
unroll_id_1 = _batch1.get("unroll_id", None)
|
| 1360 |
+
unroll_id_2 = _batch2.get("unroll_id", None)
|
| 1361 |
+
# unroll IDs only have to fit if both batches have them
|
| 1362 |
+
if unroll_id_1 is not None and unroll_id_2 is not None:
|
| 1363 |
+
assert unroll_id_1 == unroll_id_2
|
| 1364 |
+
|
| 1365 |
+
batch1_keys = set()
|
| 1366 |
+
for k, v in _batch1.items():
|
| 1367 |
+
# unroll_id is compared above already
|
| 1368 |
+
if k == "unroll_id":
|
| 1369 |
+
continue
|
| 1370 |
+
check(v, _batch2[k])
|
| 1371 |
+
batch1_keys.add(k)
|
| 1372 |
+
|
| 1373 |
+
batch2_keys = set(_batch2.keys())
|
| 1374 |
+
# unroll_id is compared above already
|
| 1375 |
+
batch2_keys.discard("unroll_id")
|
| 1376 |
+
_difference = batch1_keys.symmetric_difference(batch2_keys)
|
| 1377 |
+
|
| 1378 |
+
# Cases where one batch has info and the other has not
|
| 1379 |
+
if _policy_id:
|
| 1380 |
+
assert not _difference, (
|
| 1381 |
+
"SampleBatches for policy with ID {} "
|
| 1382 |
+
"don't share information on the "
|
| 1383 |
+
"following information: \n{}"
|
| 1384 |
+
"".format(_policy_id, _difference)
|
| 1385 |
+
)
|
| 1386 |
+
else:
|
| 1387 |
+
assert not _difference, (
|
| 1388 |
+
"SampleBatches don't share information "
|
| 1389 |
+
"on the following information: \n{}"
|
| 1390 |
+
"".format(_difference)
|
| 1391 |
+
)
|
| 1392 |
+
|
| 1393 |
+
if type(batch1) is SampleBatch:
|
| 1394 |
+
check_sample_batches(batch1, batch2)
|
| 1395 |
+
elif type(batch1) is MultiAgentBatch:
|
| 1396 |
+
assert batch1.count == batch2.count
|
| 1397 |
+
batch1_ids = set()
|
| 1398 |
+
for policy_id, policy_batch in batch1.policy_batches.items():
|
| 1399 |
+
check_sample_batches(
|
| 1400 |
+
policy_batch, batch2.policy_batches[policy_id], policy_id
|
| 1401 |
+
)
|
| 1402 |
+
batch1_ids.add(policy_id)
|
| 1403 |
+
|
| 1404 |
+
# Case where one ma batch has info on a policy the other has not
|
| 1405 |
+
batch2_ids = set(batch2.policy_batches.keys())
|
| 1406 |
+
difference = batch1_ids.symmetric_difference(batch2_ids)
|
| 1407 |
+
assert (
|
| 1408 |
+
not difference
|
| 1409 |
+
), f"MultiAgentBatches don't share the following information: \n{difference}."
|
| 1410 |
+
else:
|
| 1411 |
+
raise ValueError("Unsupported batch type " + str(type(batch1)))
|
| 1412 |
+
|
| 1413 |
+
|
| 1414 |
+
def check_reproducibilty(
|
| 1415 |
+
algo_class: Type["Algorithm"],
|
| 1416 |
+
algo_config: "AlgorithmConfig",
|
| 1417 |
+
*,
|
| 1418 |
+
fw_kwargs: Dict[str, Any],
|
| 1419 |
+
training_iteration: int = 1,
|
| 1420 |
+
) -> None:
|
| 1421 |
+
# TODO @kourosh: we can get rid of examples/deterministic_training.py once
|
| 1422 |
+
# this is added to all algorithms
|
| 1423 |
+
"""Check if the algorithm is reproducible across different testing conditions:
|
| 1424 |
+
|
| 1425 |
+
frameworks: all input frameworks
|
| 1426 |
+
num_gpus: int(os.environ.get("RLLIB_NUM_GPUS", "0"))
|
| 1427 |
+
num_workers: 0 (only local workers) or
|
| 1428 |
+
4 ((1) local workers + (4) remote workers)
|
| 1429 |
+
num_envs_per_env_runner: 2
|
| 1430 |
+
|
| 1431 |
+
Args:
|
| 1432 |
+
algo_class: Algorithm class to test.
|
| 1433 |
+
algo_config: Base config to use for the algorithm.
|
| 1434 |
+
fw_kwargs: Framework iterator keyword arguments.
|
| 1435 |
+
training_iteration: Number of training iterations to run.
|
| 1436 |
+
|
| 1437 |
+
Returns:
|
| 1438 |
+
None
|
| 1439 |
+
|
| 1440 |
+
Raises:
|
| 1441 |
+
It raises an AssertionError if the algorithm is not reproducible.
|
| 1442 |
+
"""
|
| 1443 |
+
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
|
| 1444 |
+
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
|
| 1445 |
+
|
| 1446 |
+
stop_dict = {TRAINING_ITERATION: training_iteration}
|
| 1447 |
+
# use 0 and 2 workers (for more that 4 workers we have to make sure the instance
|
| 1448 |
+
# type in ci build has enough resources)
|
| 1449 |
+
for num_workers in [0, 2]:
|
| 1450 |
+
algo_config = (
|
| 1451 |
+
algo_config.debugging(seed=42).env_runners(
|
| 1452 |
+
num_env_runners=num_workers, num_envs_per_env_runner=2
|
| 1453 |
+
)
|
| 1454 |
+
# new API
|
| 1455 |
+
.learners(
|
| 1456 |
+
num_gpus_per_learner=int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
| 1457 |
+
)
|
| 1458 |
+
# old API
|
| 1459 |
+
.resources(
|
| 1460 |
+
num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")),
|
| 1461 |
+
)
|
| 1462 |
+
)
|
| 1463 |
+
|
| 1464 |
+
print(
|
| 1465 |
+
f"Testing reproducibility of {algo_class.__name__}"
|
| 1466 |
+
f" with {num_workers} workers"
|
| 1467 |
+
)
|
| 1468 |
+
print("/// config")
|
| 1469 |
+
pprint.pprint(algo_config.to_dict())
|
| 1470 |
+
# test tune.Tuner().fit() reproducibility
|
| 1471 |
+
results1 = tune.Tuner(
|
| 1472 |
+
algo_class,
|
| 1473 |
+
param_space=algo_config.to_dict(),
|
| 1474 |
+
run_config=air.RunConfig(stop=stop_dict, verbose=1),
|
| 1475 |
+
).fit()
|
| 1476 |
+
results1 = results1.get_best_result().metrics
|
| 1477 |
+
|
| 1478 |
+
results2 = tune.Tuner(
|
| 1479 |
+
algo_class,
|
| 1480 |
+
param_space=algo_config.to_dict(),
|
| 1481 |
+
run_config=air.RunConfig(stop=stop_dict, verbose=1),
|
| 1482 |
+
).fit()
|
| 1483 |
+
results2 = results2.get_best_result().metrics
|
| 1484 |
+
|
| 1485 |
+
# Test rollout behavior.
|
| 1486 |
+
check(
|
| 1487 |
+
results1[ENV_RUNNER_RESULTS]["hist_stats"],
|
| 1488 |
+
results2[ENV_RUNNER_RESULTS]["hist_stats"],
|
| 1489 |
+
)
|
| 1490 |
+
# As well as training behavior (minibatch sequence during SGD
|
| 1491 |
+
# iterations).
|
| 1492 |
+
# As well as training behavior (minibatch sequence during SGD
|
| 1493 |
+
# iterations).
|
| 1494 |
+
if algo_config.enable_rl_module_and_learner:
|
| 1495 |
+
check(
|
| 1496 |
+
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID],
|
| 1497 |
+
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID],
|
| 1498 |
+
)
|
| 1499 |
+
else:
|
| 1500 |
+
check(
|
| 1501 |
+
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
|
| 1502 |
+
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
|
| 1503 |
+
)
|
| 1504 |
+
|
| 1505 |
+
|
| 1506 |
+
def get_cartpole_dataset_reader(batch_size: int = 1) -> "DatasetReader":
|
| 1507 |
+
"""Returns a DatasetReader for the cartpole dataset.
|
| 1508 |
+
Args:
|
| 1509 |
+
batch_size: The batch size to use for the reader.
|
| 1510 |
+
Returns:
|
| 1511 |
+
A rllib DatasetReader for the cartpole dataset.
|
| 1512 |
+
"""
|
| 1513 |
+
from ray.rllib.algorithms import AlgorithmConfig
|
| 1514 |
+
from ray.rllib.offline import IOContext
|
| 1515 |
+
from ray.rllib.offline.dataset_reader import (
|
| 1516 |
+
DatasetReader,
|
| 1517 |
+
get_dataset_and_shards,
|
| 1518 |
+
)
|
| 1519 |
+
|
| 1520 |
+
path = "tests/data/cartpole/large.json"
|
| 1521 |
+
input_config = {"format": "json", "paths": path}
|
| 1522 |
+
dataset, _ = get_dataset_and_shards(
|
| 1523 |
+
AlgorithmConfig().offline_data(input_="dataset", input_config=input_config)
|
| 1524 |
+
)
|
| 1525 |
+
ioctx = IOContext(
|
| 1526 |
+
config=(
|
| 1527 |
+
AlgorithmConfig()
|
| 1528 |
+
.training(train_batch_size=batch_size)
|
| 1529 |
+
.offline_data(actions_in_input_normalized=True)
|
| 1530 |
+
),
|
| 1531 |
+
worker_index=0,
|
| 1532 |
+
)
|
| 1533 |
+
reader = DatasetReader(dataset, ioctx)
|
| 1534 |
+
return reader
|
| 1535 |
+
|
| 1536 |
+
|
| 1537 |
+
class ModelChecker:
|
| 1538 |
+
"""Helper class to compare architecturally identical Models across frameworks.
|
| 1539 |
+
|
| 1540 |
+
Holds a ModelConfig, such that individual models can be added simply via their
|
| 1541 |
+
framework string (by building them with config.build(framework=...).
|
| 1542 |
+
A call to `check()` forces all added models to be compared in terms of their
|
| 1543 |
+
number of trainable and non-trainable parameters, as well as, their
|
| 1544 |
+
computation results given a common weights structure and values and identical
|
| 1545 |
+
inputs to the models.
|
| 1546 |
+
"""
|
| 1547 |
+
|
| 1548 |
+
def __init__(self, config):
|
| 1549 |
+
self.config = config
|
| 1550 |
+
|
| 1551 |
+
# To compare number of params between frameworks.
|
| 1552 |
+
self.param_counts = {}
|
| 1553 |
+
# To compare computed outputs from fixed-weights-nets between frameworks.
|
| 1554 |
+
self.output_values = {}
|
| 1555 |
+
|
| 1556 |
+
# We will pass an observation filled with this one random value through
|
| 1557 |
+
# all DL networks (after they have been set to fixed-weights) to compare
|
| 1558 |
+
# the computed outputs.
|
| 1559 |
+
self.random_fill_input_value = np.random.uniform(-0.01, 0.01)
|
| 1560 |
+
|
| 1561 |
+
# Dict of models to check against each other.
|
| 1562 |
+
self.models = {}
|
| 1563 |
+
|
| 1564 |
+
def add(self, framework: str = "torch", obs=True, state=False) -> Any:
|
| 1565 |
+
"""Builds a new Model for the given framework."""
|
| 1566 |
+
model = self.models[framework] = self.config.build(framework=framework)
|
| 1567 |
+
|
| 1568 |
+
# Pass a B=1 observation through the model.
|
| 1569 |
+
inputs = np.full(
|
| 1570 |
+
[1] + ([1] if state else []) + list(self.config.input_dims),
|
| 1571 |
+
self.random_fill_input_value,
|
| 1572 |
+
)
|
| 1573 |
+
if obs:
|
| 1574 |
+
inputs = {Columns.OBS: inputs}
|
| 1575 |
+
if state:
|
| 1576 |
+
inputs[Columns.STATE_IN] = tree.map_structure(
|
| 1577 |
+
lambda s: np.zeros(shape=[1] + list(s)), state
|
| 1578 |
+
)
|
| 1579 |
+
if framework == "torch":
|
| 1580 |
+
from ray.rllib.utils.torch_utils import convert_to_torch_tensor
|
| 1581 |
+
|
| 1582 |
+
inputs = convert_to_torch_tensor(inputs)
|
| 1583 |
+
# w/ old specs: inputs = model.input_specs.fill(self.random_fill_input_value)
|
| 1584 |
+
|
| 1585 |
+
outputs = model(inputs)
|
| 1586 |
+
|
| 1587 |
+
# Bring model into a reproducible, comparable state (so we can compare
|
| 1588 |
+
# computations across frameworks). Use only a value-sequence of len=1 here
|
| 1589 |
+
# as it could possibly be that the layers are stored in different order
|
| 1590 |
+
# across the different frameworks.
|
| 1591 |
+
model._set_to_dummy_weights(value_sequence=(self.random_fill_input_value,))
|
| 1592 |
+
|
| 1593 |
+
# Perform another forward pass.
|
| 1594 |
+
comparable_outputs = model(inputs)
|
| 1595 |
+
|
| 1596 |
+
# Store the number of parameters for this framework's net.
|
| 1597 |
+
self.param_counts[framework] = model.get_num_parameters()
|
| 1598 |
+
# Store the fixed-weights-net outputs for this framework's net.
|
| 1599 |
+
if framework == "torch":
|
| 1600 |
+
self.output_values[framework] = tree.map_structure(
|
| 1601 |
+
lambda s: s.detach().numpy() if s is not None else None,
|
| 1602 |
+
comparable_outputs,
|
| 1603 |
+
)
|
| 1604 |
+
else:
|
| 1605 |
+
self.output_values[framework] = tree.map_structure(
|
| 1606 |
+
lambda s: s.numpy() if s is not None else None, comparable_outputs
|
| 1607 |
+
)
|
| 1608 |
+
return outputs
|
| 1609 |
+
|
| 1610 |
+
def check(self):
|
| 1611 |
+
"""Compares all added Models with each other and possibly raises errors."""
|
| 1612 |
+
|
| 1613 |
+
main_key = next(iter(self.models.keys()))
|
| 1614 |
+
# Compare number of trainable and non-trainable params between all
|
| 1615 |
+
# frameworks.
|
| 1616 |
+
for c in self.param_counts.values():
|
| 1617 |
+
check(c, self.param_counts[main_key])
|
| 1618 |
+
|
| 1619 |
+
# Compare dummy outputs by exact values given that all nets received the
|
| 1620 |
+
# same input and all nets have the same (dummy) weight values.
|
| 1621 |
+
for v in self.output_values.values():
|
| 1622 |
+
check(v, self.output_values[main_key], atol=0.0005)
|
| 1623 |
+
|
| 1624 |
+
|
| 1625 |
+
def _get_mean_action_from_algorithm(alg: "Algorithm", obs: np.ndarray) -> np.ndarray:
|
| 1626 |
+
"""Returns the mean action computed by the given algorithm.
|
| 1627 |
+
|
| 1628 |
+
Note: This makes calls to `Algorithm.compute_single_action`
|
| 1629 |
+
|
| 1630 |
+
Args:
|
| 1631 |
+
alg: The constructed algorithm to run inference on.
|
| 1632 |
+
obs: The observation to compute the action for.
|
| 1633 |
+
|
| 1634 |
+
Returns:
|
| 1635 |
+
The mean action computed by the algorithm over 5000 samples.
|
| 1636 |
+
|
| 1637 |
+
"""
|
| 1638 |
+
out = []
|
| 1639 |
+
for _ in range(5000):
|
| 1640 |
+
out.append(float(alg.compute_single_action(obs)))
|
| 1641 |
+
return np.mean(out)
|
| 1642 |
+
|
| 1643 |
+
|
| 1644 |
+
def check_supported_spaces(
|
| 1645 |
+
alg: str,
|
| 1646 |
+
config: "AlgorithmConfig",
|
| 1647 |
+
train: bool = True,
|
| 1648 |
+
check_bounds: bool = False,
|
| 1649 |
+
frameworks: Optional[Tuple[str]] = None,
|
| 1650 |
+
use_gpu: bool = False,
|
| 1651 |
+
):
|
| 1652 |
+
"""Checks whether the given algorithm supports different action and obs spaces.
|
| 1653 |
+
|
| 1654 |
+
Performs the checks by constructing an rllib algorithm from the config and
|
| 1655 |
+
checking to see that the model inside the policy is the correct one given
|
| 1656 |
+
the action and obs spaces. For example if the action space is discrete and
|
| 1657 |
+
the obs space is an image, then the model should be a vision network with
|
| 1658 |
+
a categorical action distribution.
|
| 1659 |
+
|
| 1660 |
+
Args:
|
| 1661 |
+
alg: The name of the algorithm to test.
|
| 1662 |
+
config: The config to use for the algorithm.
|
| 1663 |
+
train: Whether to train the algorithm for a few iterations.
|
| 1664 |
+
check_bounds: Whether to check the bounds of the action space.
|
| 1665 |
+
frameworks: The frameworks to test the algorithm with.
|
| 1666 |
+
use_gpu: Whether to check support for training on a gpu.
|
| 1667 |
+
|
| 1668 |
+
|
| 1669 |
+
"""
|
| 1670 |
+
# Do these imports here because otherwise we have circular imports.
|
| 1671 |
+
from ray.rllib.examples.envs.classes.random_env import RandomEnv
|
| 1672 |
+
from ray.rllib.models.torch.complex_input_net import (
|
| 1673 |
+
ComplexInputNetwork as TorchComplexNet,
|
| 1674 |
+
)
|
| 1675 |
+
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFCNet
|
| 1676 |
+
from ray.rllib.models.torch.visionnet import VisionNetwork as TorchVisionNet
|
| 1677 |
+
|
| 1678 |
+
action_spaces_to_test = {
|
| 1679 |
+
# Test discrete twice here until we support multi_binary action spaces
|
| 1680 |
+
"discrete": Discrete(5),
|
| 1681 |
+
"continuous": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1682 |
+
"int_actions": Box(0, 3, (2, 3), dtype=np.int32),
|
| 1683 |
+
"multidiscrete": MultiDiscrete([1, 2, 3, 4]),
|
| 1684 |
+
"tuple": GymTuple(
|
| 1685 |
+
[Discrete(2), Discrete(3), Box(-1.0, 1.0, (5,), dtype=np.float32)]
|
| 1686 |
+
),
|
| 1687 |
+
"dict": GymDict(
|
| 1688 |
+
{
|
| 1689 |
+
"action_choice": Discrete(3),
|
| 1690 |
+
"parameters": Box(-1.0, 1.0, (1,), dtype=np.float32),
|
| 1691 |
+
"yet_another_nested_dict": GymDict(
|
| 1692 |
+
{"a": GymTuple([Discrete(2), Discrete(3)])}
|
| 1693 |
+
),
|
| 1694 |
+
}
|
| 1695 |
+
),
|
| 1696 |
+
}
|
| 1697 |
+
|
| 1698 |
+
observation_spaces_to_test = {
|
| 1699 |
+
"multi_binary": MultiBinary([3, 10, 10]),
|
| 1700 |
+
"discrete": Discrete(5),
|
| 1701 |
+
"continuous": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1702 |
+
"vector2d": Box(-1.0, 1.0, (5, 5), dtype=np.float32),
|
| 1703 |
+
"image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
|
| 1704 |
+
"tuple": GymTuple([Discrete(10), Box(-1.0, 1.0, (5,), dtype=np.float32)]),
|
| 1705 |
+
"dict": GymDict(
|
| 1706 |
+
{
|
| 1707 |
+
"task": Discrete(10),
|
| 1708 |
+
"position": Box(-1.0, 1.0, (5,), dtype=np.float32),
|
| 1709 |
+
}
|
| 1710 |
+
),
|
| 1711 |
+
}
|
| 1712 |
+
|
| 1713 |
+
# The observation spaces that we test RLModules with
|
| 1714 |
+
rlmodule_supported_observation_spaces = [
|
| 1715 |
+
"multi_binary",
|
| 1716 |
+
"discrete",
|
| 1717 |
+
"continuous",
|
| 1718 |
+
"image",
|
| 1719 |
+
"tuple",
|
| 1720 |
+
"dict",
|
| 1721 |
+
]
|
| 1722 |
+
|
| 1723 |
+
# The action spaces that we test RLModules with
|
| 1724 |
+
rlmodule_supported_action_spaces = ["discrete", "continuous"]
|
| 1725 |
+
|
| 1726 |
+
default_observation_space = default_action_space = "discrete"
|
| 1727 |
+
|
| 1728 |
+
config["log_level"] = "ERROR"
|
| 1729 |
+
config["env"] = RandomEnv
|
| 1730 |
+
|
| 1731 |
+
def _do_check(alg, config, a_name, o_name):
|
| 1732 |
+
# We need to copy here so that this validation does not affect the actual
|
| 1733 |
+
# validation method call further down the line.
|
| 1734 |
+
config_copy = config.copy()
|
| 1735 |
+
config_copy.validate()
|
| 1736 |
+
# If RLModules are enabled, we need to skip a few tests for now:
|
| 1737 |
+
if config_copy.enable_rl_module_and_learner:
|
| 1738 |
+
# Skip PPO cases in which RLModules don't support the given spaces yet.
|
| 1739 |
+
if o_name not in rlmodule_supported_observation_spaces:
|
| 1740 |
+
logger.warning(
|
| 1741 |
+
"Skipping PPO test with RLModules for obs space {}".format(o_name)
|
| 1742 |
+
)
|
| 1743 |
+
return
|
| 1744 |
+
if a_name not in rlmodule_supported_action_spaces:
|
| 1745 |
+
logger.warning(
|
| 1746 |
+
"Skipping PPO test with RLModules for action space {}".format(
|
| 1747 |
+
a_name
|
| 1748 |
+
)
|
| 1749 |
+
)
|
| 1750 |
+
return
|
| 1751 |
+
|
| 1752 |
+
fw = config["framework"]
|
| 1753 |
+
action_space = action_spaces_to_test[a_name]
|
| 1754 |
+
obs_space = observation_spaces_to_test[o_name]
|
| 1755 |
+
print(
|
| 1756 |
+
"=== Testing {} (fw={}) action_space={} obs_space={} ===".format(
|
| 1757 |
+
alg, fw, action_space, obs_space
|
| 1758 |
+
)
|
| 1759 |
+
)
|
| 1760 |
+
t0 = time.time()
|
| 1761 |
+
config.update_from_dict(
|
| 1762 |
+
dict(
|
| 1763 |
+
env_config=dict(
|
| 1764 |
+
action_space=action_space,
|
| 1765 |
+
observation_space=obs_space,
|
| 1766 |
+
reward_space=Box(1.0, 1.0, shape=(), dtype=np.float32),
|
| 1767 |
+
p_terminated=1.0,
|
| 1768 |
+
check_action_bounds=check_bounds,
|
| 1769 |
+
)
|
| 1770 |
+
)
|
| 1771 |
+
)
|
| 1772 |
+
stat = "ok"
|
| 1773 |
+
|
| 1774 |
+
try:
|
| 1775 |
+
algo = config.build()
|
| 1776 |
+
except ray.exceptions.RayActorError as e:
|
| 1777 |
+
if len(e.args) >= 2 and isinstance(e.args[2], UnsupportedSpaceException):
|
| 1778 |
+
stat = "unsupported"
|
| 1779 |
+
elif isinstance(e.args[0].args[2], UnsupportedSpaceException):
|
| 1780 |
+
stat = "unsupported"
|
| 1781 |
+
else:
|
| 1782 |
+
raise
|
| 1783 |
+
except UnsupportedSpaceException:
|
| 1784 |
+
stat = "unsupported"
|
| 1785 |
+
else:
|
| 1786 |
+
if alg not in ["SAC", "PPO"]:
|
| 1787 |
+
# 2D (image) input: Expect VisionNet.
|
| 1788 |
+
if o_name in ["atari", "image"]:
|
| 1789 |
+
assert isinstance(algo.get_policy().model, TorchVisionNet)
|
| 1790 |
+
# 1D input: Expect FCNet.
|
| 1791 |
+
elif o_name == "continuous":
|
| 1792 |
+
assert isinstance(algo.get_policy().model, TorchFCNet)
|
| 1793 |
+
# Could be either one: ComplexNet (if disabled Preprocessor)
|
| 1794 |
+
# or FCNet (w/ Preprocessor).
|
| 1795 |
+
elif o_name == "vector2d":
|
| 1796 |
+
assert isinstance(
|
| 1797 |
+
algo.get_policy().model, (TorchComplexNet, TorchFCNet)
|
| 1798 |
+
)
|
| 1799 |
+
if train:
|
| 1800 |
+
algo.train()
|
| 1801 |
+
algo.stop()
|
| 1802 |
+
print("Test: {}, ran in {}s".format(stat, time.time() - t0))
|
| 1803 |
+
|
| 1804 |
+
if not frameworks:
|
| 1805 |
+
frameworks = ("tf2", "tf", "torch")
|
| 1806 |
+
|
| 1807 |
+
_do_check_remote = ray.remote(_do_check)
|
| 1808 |
+
_do_check_remote = _do_check_remote.options(num_gpus=1 if use_gpu else 0)
|
| 1809 |
+
# Test all action spaces first.
|
| 1810 |
+
for a_name in action_spaces_to_test.keys():
|
| 1811 |
+
o_name = default_observation_space
|
| 1812 |
+
ray.get(_do_check_remote.remote(alg, config, a_name, o_name))
|
| 1813 |
+
|
| 1814 |
+
# Now test all observation spaces.
|
| 1815 |
+
for o_name in observation_spaces_to_test.keys():
|
| 1816 |
+
a_name = default_action_space
|
| 1817 |
+
ray.get(_do_check_remote.remote(alg, config, a_name, o_name))
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/tf_utils.py
ADDED
|
@@ -0,0 +1,812 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union
|
| 3 |
+
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import numpy as np
|
| 6 |
+
import tree # pip install dm_tree
|
| 7 |
+
from gymnasium.spaces import Discrete, MultiDiscrete
|
| 8 |
+
|
| 9 |
+
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
|
| 10 |
+
from ray.rllib.utils.framework import try_import_tf
|
| 11 |
+
from ray.rllib.utils.numpy import SMALL_NUMBER
|
| 12 |
+
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
|
| 13 |
+
from ray.rllib.utils.typing import (
|
| 14 |
+
LocalOptimizer,
|
| 15 |
+
ModelGradients,
|
| 16 |
+
NetworkType,
|
| 17 |
+
PartialAlgorithmConfigDict,
|
| 18 |
+
SpaceStruct,
|
| 19 |
+
TensorStructType,
|
| 20 |
+
TensorType,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
|
| 25 |
+
from ray.rllib.core.learner.learner import ParamDict
|
| 26 |
+
from ray.rllib.policy.eager_tf_policy import EagerTFPolicy
|
| 27 |
+
from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2
|
| 28 |
+
from ray.rllib.policy.tf_policy import TFPolicy
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
tf1, tf, tfv = try_import_tf()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@PublicAPI
|
| 35 |
+
def clip_gradients(
|
| 36 |
+
gradients_dict: "ParamDict",
|
| 37 |
+
*,
|
| 38 |
+
grad_clip: Optional[float] = None,
|
| 39 |
+
grad_clip_by: str,
|
| 40 |
+
) -> Optional[float]:
|
| 41 |
+
"""Performs gradient clipping on a grad-dict based on a clip value and clip mode.
|
| 42 |
+
|
| 43 |
+
Changes the provided gradient dict in place.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
gradients_dict: The gradients dict, mapping str to gradient tensors.
|
| 47 |
+
grad_clip: The value to clip with. The way gradients are clipped is defined
|
| 48 |
+
by the `grad_clip_by` arg (see below).
|
| 49 |
+
grad_clip_by: One of 'value', 'norm', or 'global_norm'.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
If `grad_clip_by`="global_norm" and `grad_clip` is not None, returns the global
|
| 53 |
+
norm of all tensors, otherwise returns None.
|
| 54 |
+
"""
|
| 55 |
+
# No clipping, return.
|
| 56 |
+
if grad_clip is None:
|
| 57 |
+
return
|
| 58 |
+
|
| 59 |
+
# Clip by value (each gradient individually).
|
| 60 |
+
if grad_clip_by == "value":
|
| 61 |
+
for k, v in gradients_dict.copy().items():
|
| 62 |
+
gradients_dict[k] = tf.clip_by_value(v, -grad_clip, grad_clip)
|
| 63 |
+
|
| 64 |
+
# Clip by L2-norm (per gradient tensor).
|
| 65 |
+
elif grad_clip_by == "norm":
|
| 66 |
+
for k, v in gradients_dict.copy().items():
|
| 67 |
+
gradients_dict[k] = tf.clip_by_norm(v, grad_clip)
|
| 68 |
+
|
| 69 |
+
# Clip by global L2-norm (across all gradient tensors).
|
| 70 |
+
else:
|
| 71 |
+
assert grad_clip_by == "global_norm"
|
| 72 |
+
|
| 73 |
+
clipped_grads, global_norm = tf.clip_by_global_norm(
|
| 74 |
+
list(gradients_dict.values()), grad_clip
|
| 75 |
+
)
|
| 76 |
+
for k, v in zip(gradients_dict.copy().keys(), clipped_grads):
|
| 77 |
+
gradients_dict[k] = v
|
| 78 |
+
|
| 79 |
+
# Return the computed global norm scalar.
|
| 80 |
+
return global_norm
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@PublicAPI
|
| 84 |
+
def explained_variance(y: TensorType, pred: TensorType) -> TensorType:
|
| 85 |
+
"""Computes the explained variance for a pair of labels and predictions.
|
| 86 |
+
|
| 87 |
+
The formula used is:
|
| 88 |
+
max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2))
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
y: The labels.
|
| 92 |
+
pred: The predictions.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
The explained variance given a pair of labels and predictions.
|
| 96 |
+
"""
|
| 97 |
+
_, y_var = tf.nn.moments(y, axes=[0])
|
| 98 |
+
_, diff_var = tf.nn.moments(y - pred, axes=[0])
|
| 99 |
+
return tf.maximum(-1.0, 1 - (diff_var / (y_var + SMALL_NUMBER)))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@PublicAPI
|
| 103 |
+
def flatten_inputs_to_1d_tensor(
|
| 104 |
+
inputs: TensorStructType,
|
| 105 |
+
spaces_struct: Optional[SpaceStruct] = None,
|
| 106 |
+
time_axis: bool = False,
|
| 107 |
+
) -> TensorType:
|
| 108 |
+
"""Flattens arbitrary input structs according to the given spaces struct.
|
| 109 |
+
|
| 110 |
+
Returns a single 1D tensor resulting from the different input
|
| 111 |
+
components' values.
|
| 112 |
+
|
| 113 |
+
Thereby:
|
| 114 |
+
- Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes
|
| 115 |
+
are not treated differently from other types of Boxes and get
|
| 116 |
+
flattened as well.
|
| 117 |
+
- Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with
|
| 118 |
+
Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]].
|
| 119 |
+
- MultiDiscrete values are multi-one-hot'd, e.g. a batch of
|
| 120 |
+
[[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in
|
| 121 |
+
[[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]].
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
inputs: The inputs to be flattened.
|
| 125 |
+
spaces_struct: The structure of the spaces that behind the input
|
| 126 |
+
time_axis: Whether all inputs have a time-axis (after the batch axis).
|
| 127 |
+
If True, will keep not only the batch axis (0th), but the time axis
|
| 128 |
+
(1st) as-is and flatten everything from the 2nd axis up.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
A single 1D tensor resulting from concatenating all
|
| 132 |
+
flattened/one-hot'd input components. Depending on the time_axis flag,
|
| 133 |
+
the shape is (B, n) or (B, T, n).
|
| 134 |
+
|
| 135 |
+
.. testcode::
|
| 136 |
+
:skipif: True
|
| 137 |
+
|
| 138 |
+
# B=2
|
| 139 |
+
from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor
|
| 140 |
+
from gymnasium.spaces import Discrete, Box
|
| 141 |
+
out = flatten_inputs_to_1d_tensor(
|
| 142 |
+
{"a": [1, 0], "b": [[[0.0], [0.1]], [1.0], [1.1]]},
|
| 143 |
+
spaces_struct=dict(a=Discrete(2), b=Box(shape=(2, 1)))
|
| 144 |
+
)
|
| 145 |
+
print(out)
|
| 146 |
+
|
| 147 |
+
# B=2; T=2
|
| 148 |
+
out = flatten_inputs_to_1d_tensor(
|
| 149 |
+
([[1, 0], [0, 1]],
|
| 150 |
+
[[[0.0, 0.1], [1.0, 1.1]], [[2.0, 2.1], [3.0, 3.1]]]),
|
| 151 |
+
spaces_struct=tuple([Discrete(2), Box(shape=(2, ))]),
|
| 152 |
+
time_axis=True
|
| 153 |
+
)
|
| 154 |
+
print(out)
|
| 155 |
+
|
| 156 |
+
.. testoutput::
|
| 157 |
+
|
| 158 |
+
[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]] # B=2 n=4
|
| 159 |
+
[[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]],
|
| 160 |
+
[[1.0, 0.0, 2.0, 2.1], [0.0, 1.0, 3.0, 3.1]]] # B=2 T=2 n=4
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
flat_inputs = tree.flatten(inputs)
|
| 164 |
+
flat_spaces = (
|
| 165 |
+
tree.flatten(spaces_struct)
|
| 166 |
+
if spaces_struct is not None
|
| 167 |
+
else [None] * len(flat_inputs)
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
B = None
|
| 171 |
+
T = None
|
| 172 |
+
out = []
|
| 173 |
+
for input_, space in zip(flat_inputs, flat_spaces):
|
| 174 |
+
input_ = tf.convert_to_tensor(input_)
|
| 175 |
+
shape = tf.shape(input_)
|
| 176 |
+
# Store batch and (if applicable) time dimension.
|
| 177 |
+
if B is None:
|
| 178 |
+
B = shape[0]
|
| 179 |
+
if time_axis:
|
| 180 |
+
T = shape[1]
|
| 181 |
+
|
| 182 |
+
# One-hot encoding.
|
| 183 |
+
if isinstance(space, Discrete):
|
| 184 |
+
if time_axis:
|
| 185 |
+
input_ = tf.reshape(input_, [B * T])
|
| 186 |
+
out.append(tf.cast(one_hot(input_, space), tf.float32))
|
| 187 |
+
elif isinstance(space, MultiDiscrete):
|
| 188 |
+
if time_axis:
|
| 189 |
+
input_ = tf.reshape(input_, [B * T, -1])
|
| 190 |
+
out.append(tf.cast(one_hot(input_, space), tf.float32))
|
| 191 |
+
# Flatten.
|
| 192 |
+
else:
|
| 193 |
+
if time_axis:
|
| 194 |
+
input_ = tf.reshape(input_, [B * T, -1])
|
| 195 |
+
else:
|
| 196 |
+
input_ = tf.reshape(input_, [B, -1])
|
| 197 |
+
out.append(tf.cast(input_, tf.float32))
|
| 198 |
+
|
| 199 |
+
merged = tf.concat(out, axis=-1)
|
| 200 |
+
# Restore the time-dimension, if applicable.
|
| 201 |
+
if time_axis:
|
| 202 |
+
merged = tf.reshape(merged, [B, T, -1])
|
| 203 |
+
|
| 204 |
+
return merged
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@PublicAPI
|
| 208 |
+
def get_gpu_devices() -> List[str]:
|
| 209 |
+
"""Returns a list of GPU device names, e.g. ["/gpu:0", "/gpu:1"].
|
| 210 |
+
|
| 211 |
+
Supports both tf1.x and tf2.x.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
List of GPU device names (str).
|
| 215 |
+
"""
|
| 216 |
+
if tfv == 1:
|
| 217 |
+
from tensorflow.python.client import device_lib
|
| 218 |
+
|
| 219 |
+
devices = device_lib.list_local_devices()
|
| 220 |
+
else:
|
| 221 |
+
try:
|
| 222 |
+
devices = tf.config.list_physical_devices()
|
| 223 |
+
except Exception:
|
| 224 |
+
devices = tf.config.experimental.list_physical_devices()
|
| 225 |
+
|
| 226 |
+
# Expect "GPU", but also stuff like: "XLA_GPU".
|
| 227 |
+
return [d.name for d in devices if "GPU" in d.device_type]
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@PublicAPI
|
| 231 |
+
def get_placeholder(
|
| 232 |
+
*,
|
| 233 |
+
space: Optional[gym.Space] = None,
|
| 234 |
+
value: Optional[Any] = None,
|
| 235 |
+
name: Optional[str] = None,
|
| 236 |
+
time_axis: bool = False,
|
| 237 |
+
flatten: bool = True,
|
| 238 |
+
) -> "tf1.placeholder":
|
| 239 |
+
"""Returns a tf1.placeholder object given optional hints, such as a space.
|
| 240 |
+
|
| 241 |
+
Note that the returned placeholder will always have a leading batch
|
| 242 |
+
dimension (None).
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
space: An optional gym.Space to hint the shape and dtype of the
|
| 246 |
+
placeholder.
|
| 247 |
+
value: An optional value to hint the shape and dtype of the
|
| 248 |
+
placeholder.
|
| 249 |
+
name: An optional name for the placeholder.
|
| 250 |
+
time_axis: Whether the placeholder should also receive a time
|
| 251 |
+
dimension (None).
|
| 252 |
+
flatten: Whether to flatten the given space into a plain Box space
|
| 253 |
+
and then create the placeholder from the resulting space.
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
The tf1 placeholder.
|
| 257 |
+
"""
|
| 258 |
+
from ray.rllib.models.catalog import ModelCatalog
|
| 259 |
+
|
| 260 |
+
if space is not None:
|
| 261 |
+
if isinstance(space, (gym.spaces.Dict, gym.spaces.Tuple)):
|
| 262 |
+
if flatten:
|
| 263 |
+
return ModelCatalog.get_action_placeholder(space, None)
|
| 264 |
+
else:
|
| 265 |
+
return tree.map_structure_with_path(
|
| 266 |
+
lambda path, component: get_placeholder(
|
| 267 |
+
space=component,
|
| 268 |
+
name=name + "." + ".".join([str(p) for p in path]),
|
| 269 |
+
),
|
| 270 |
+
get_base_struct_from_space(space),
|
| 271 |
+
)
|
| 272 |
+
return tf1.placeholder(
|
| 273 |
+
shape=(None,) + ((None,) if time_axis else ()) + space.shape,
|
| 274 |
+
dtype=tf.float32 if space.dtype == np.float64 else space.dtype,
|
| 275 |
+
name=name,
|
| 276 |
+
)
|
| 277 |
+
else:
|
| 278 |
+
assert value is not None
|
| 279 |
+
shape = value.shape[1:]
|
| 280 |
+
return tf1.placeholder(
|
| 281 |
+
shape=(None,)
|
| 282 |
+
+ ((None,) if time_axis else ())
|
| 283 |
+
+ (shape if isinstance(shape, tuple) else tuple(shape.as_list())),
|
| 284 |
+
dtype=tf.float32 if value.dtype == np.float64 else value.dtype,
|
| 285 |
+
name=name,
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
@PublicAPI
|
| 290 |
+
def get_tf_eager_cls_if_necessary(
|
| 291 |
+
orig_cls: Type["TFPolicy"],
|
| 292 |
+
config: Union["AlgorithmConfig", PartialAlgorithmConfigDict],
|
| 293 |
+
) -> Type[Union["TFPolicy", "EagerTFPolicy", "EagerTFPolicyV2"]]:
|
| 294 |
+
"""Returns the corresponding tf-eager class for a given TFPolicy class.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
orig_cls: The original TFPolicy class to get the corresponding tf-eager
|
| 298 |
+
class for.
|
| 299 |
+
config: The Algorithm config dict or AlgorithmConfig object.
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
The tf eager policy class corresponding to the given TFPolicy class.
|
| 303 |
+
"""
|
| 304 |
+
cls = orig_cls
|
| 305 |
+
framework = config.get("framework", "tf")
|
| 306 |
+
|
| 307 |
+
if framework in ["tf2", "tf"] and not tf1:
|
| 308 |
+
raise ImportError("Could not import tensorflow!")
|
| 309 |
+
|
| 310 |
+
if framework == "tf2":
|
| 311 |
+
if not tf1.executing_eagerly():
|
| 312 |
+
tf1.enable_eager_execution()
|
| 313 |
+
assert tf1.executing_eagerly()
|
| 314 |
+
|
| 315 |
+
from ray.rllib.policy.tf_policy import TFPolicy
|
| 316 |
+
from ray.rllib.policy.eager_tf_policy import EagerTFPolicy
|
| 317 |
+
from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2
|
| 318 |
+
|
| 319 |
+
# Create eager-class (if not already one).
|
| 320 |
+
if hasattr(orig_cls, "as_eager") and not issubclass(orig_cls, EagerTFPolicy):
|
| 321 |
+
cls = orig_cls.as_eager()
|
| 322 |
+
# Could be some other type of policy or already
|
| 323 |
+
# eager-ized.
|
| 324 |
+
elif not issubclass(orig_cls, TFPolicy):
|
| 325 |
+
pass
|
| 326 |
+
else:
|
| 327 |
+
raise ValueError(
|
| 328 |
+
"This policy does not support eager execution: {}".format(orig_cls)
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
# Now that we know, policy is an eager one, add tracing, if necessary.
|
| 332 |
+
if config.get("eager_tracing") and issubclass(
|
| 333 |
+
cls, (EagerTFPolicy, EagerTFPolicyV2)
|
| 334 |
+
):
|
| 335 |
+
cls = cls.with_tracing()
|
| 336 |
+
return cls
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
@PublicAPI
|
| 340 |
+
def huber_loss(x: TensorType, delta: float = 1.0) -> TensorType:
|
| 341 |
+
"""Computes the huber loss for a given term and delta parameter.
|
| 342 |
+
|
| 343 |
+
Reference: https://en.wikipedia.org/wiki/Huber_loss
|
| 344 |
+
Note that the factor of 0.5 is implicitly included in the calculation.
|
| 345 |
+
|
| 346 |
+
Formula:
|
| 347 |
+
L = 0.5 * x^2 for small abs x (delta threshold)
|
| 348 |
+
L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold)
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
x: The input term, e.g. a TD error.
|
| 352 |
+
delta: The delta parmameter in the above formula.
|
| 353 |
+
|
| 354 |
+
Returns:
|
| 355 |
+
The Huber loss resulting from `x` and `delta`.
|
| 356 |
+
"""
|
| 357 |
+
return tf.where(
|
| 358 |
+
tf.abs(x) < delta, # for small x -> apply the Huber correction
|
| 359 |
+
tf.math.square(x) * 0.5,
|
| 360 |
+
delta * (tf.abs(x) - 0.5 * delta),
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
@PublicAPI
|
| 365 |
+
def l2_loss(x: TensorType) -> TensorType:
|
| 366 |
+
"""Computes half the L2 norm over a tensor's values without the sqrt.
|
| 367 |
+
|
| 368 |
+
output = 0.5 * sum(x ** 2)
|
| 369 |
+
|
| 370 |
+
Args:
|
| 371 |
+
x: The input tensor.
|
| 372 |
+
|
| 373 |
+
Returns:
|
| 374 |
+
0.5 times the L2 norm over the given tensor's values (w/o sqrt).
|
| 375 |
+
"""
|
| 376 |
+
return 0.5 * tf.reduce_sum(tf.pow(x, 2.0))
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
@PublicAPI
|
| 380 |
+
def make_tf_callable(
|
| 381 |
+
session_or_none: Optional["tf1.Session"], dynamic_shape: bool = False
|
| 382 |
+
) -> Callable:
|
| 383 |
+
"""Returns a function that can be executed in either graph or eager mode.
|
| 384 |
+
|
| 385 |
+
The function must take only positional args.
|
| 386 |
+
|
| 387 |
+
If eager is enabled, this will act as just a function. Otherwise, it
|
| 388 |
+
will build a function that executes a session run with placeholders
|
| 389 |
+
internally.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
session_or_none: tf.Session if in graph mode, else None.
|
| 393 |
+
dynamic_shape: True if the placeholders should have a dynamic
|
| 394 |
+
batch dimension. Otherwise they will be fixed shape.
|
| 395 |
+
|
| 396 |
+
Returns:
|
| 397 |
+
A function that can be called in either eager or static-graph mode.
|
| 398 |
+
"""
|
| 399 |
+
|
| 400 |
+
if tf.executing_eagerly():
|
| 401 |
+
assert session_or_none is None
|
| 402 |
+
else:
|
| 403 |
+
assert session_or_none is not None
|
| 404 |
+
|
| 405 |
+
def make_wrapper(fn):
|
| 406 |
+
# Static-graph mode: Create placeholders and make a session call each
|
| 407 |
+
# time the wrapped function is called. Returns the output of this
|
| 408 |
+
# session call.
|
| 409 |
+
if session_or_none is not None:
|
| 410 |
+
args_placeholders = []
|
| 411 |
+
kwargs_placeholders = {}
|
| 412 |
+
|
| 413 |
+
symbolic_out = [None]
|
| 414 |
+
|
| 415 |
+
def call(*args, **kwargs):
|
| 416 |
+
args_flat = []
|
| 417 |
+
for a in args:
|
| 418 |
+
if type(a) is list:
|
| 419 |
+
args_flat.extend(a)
|
| 420 |
+
else:
|
| 421 |
+
args_flat.append(a)
|
| 422 |
+
args = args_flat
|
| 423 |
+
|
| 424 |
+
# We have not built any placeholders yet: Do this once here,
|
| 425 |
+
# then reuse the same placeholders each time we call this
|
| 426 |
+
# function again.
|
| 427 |
+
if symbolic_out[0] is None:
|
| 428 |
+
with session_or_none.graph.as_default():
|
| 429 |
+
|
| 430 |
+
def _create_placeholders(path, value):
|
| 431 |
+
if dynamic_shape:
|
| 432 |
+
if len(value.shape) > 0:
|
| 433 |
+
shape = (None,) + value.shape[1:]
|
| 434 |
+
else:
|
| 435 |
+
shape = ()
|
| 436 |
+
else:
|
| 437 |
+
shape = value.shape
|
| 438 |
+
return tf1.placeholder(
|
| 439 |
+
dtype=value.dtype,
|
| 440 |
+
shape=shape,
|
| 441 |
+
name=".".join([str(p) for p in path]),
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
placeholders = tree.map_structure_with_path(
|
| 445 |
+
_create_placeholders, args
|
| 446 |
+
)
|
| 447 |
+
for ph in tree.flatten(placeholders):
|
| 448 |
+
args_placeholders.append(ph)
|
| 449 |
+
|
| 450 |
+
placeholders = tree.map_structure_with_path(
|
| 451 |
+
_create_placeholders, kwargs
|
| 452 |
+
)
|
| 453 |
+
for k, ph in placeholders.items():
|
| 454 |
+
kwargs_placeholders[k] = ph
|
| 455 |
+
|
| 456 |
+
symbolic_out[0] = fn(*args_placeholders, **kwargs_placeholders)
|
| 457 |
+
feed_dict = dict(zip(args_placeholders, tree.flatten(args)))
|
| 458 |
+
tree.map_structure(
|
| 459 |
+
lambda ph, v: feed_dict.__setitem__(ph, v),
|
| 460 |
+
kwargs_placeholders,
|
| 461 |
+
kwargs,
|
| 462 |
+
)
|
| 463 |
+
ret = session_or_none.run(symbolic_out[0], feed_dict)
|
| 464 |
+
return ret
|
| 465 |
+
|
| 466 |
+
return call
|
| 467 |
+
# Eager mode (call function as is).
|
| 468 |
+
else:
|
| 469 |
+
return fn
|
| 470 |
+
|
| 471 |
+
return make_wrapper
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
# TODO (sven): Deprecate this function once we have moved completely to the Learner API.
|
| 475 |
+
# Replaced with `clip_gradients()`.
|
| 476 |
+
@PublicAPI
|
| 477 |
+
def minimize_and_clip(
|
| 478 |
+
optimizer: LocalOptimizer,
|
| 479 |
+
objective: TensorType,
|
| 480 |
+
var_list: List["tf.Variable"],
|
| 481 |
+
clip_val: float = 10.0,
|
| 482 |
+
) -> ModelGradients:
|
| 483 |
+
"""Computes, then clips gradients using objective, optimizer and var list.
|
| 484 |
+
|
| 485 |
+
Ensures the norm of the gradients for each variable is clipped to
|
| 486 |
+
`clip_val`.
|
| 487 |
+
|
| 488 |
+
Args:
|
| 489 |
+
optimizer: Either a shim optimizer (tf eager) containing a
|
| 490 |
+
tf.GradientTape under `self.tape` or a tf1 local optimizer
|
| 491 |
+
object.
|
| 492 |
+
objective: The loss tensor to calculate gradients on.
|
| 493 |
+
var_list: The list of tf.Variables to compute gradients over.
|
| 494 |
+
clip_val: The global norm clip value. Will clip around -clip_val and
|
| 495 |
+
+clip_val.
|
| 496 |
+
|
| 497 |
+
Returns:
|
| 498 |
+
The resulting model gradients (list or tuples of grads + vars)
|
| 499 |
+
corresponding to the input `var_list`.
|
| 500 |
+
"""
|
| 501 |
+
# Accidentally passing values < 0.0 will break all gradients.
|
| 502 |
+
assert clip_val is None or clip_val > 0.0, clip_val
|
| 503 |
+
|
| 504 |
+
if tf.executing_eagerly():
|
| 505 |
+
tape = optimizer.tape
|
| 506 |
+
grads_and_vars = list(zip(list(tape.gradient(objective, var_list)), var_list))
|
| 507 |
+
else:
|
| 508 |
+
grads_and_vars = optimizer.compute_gradients(objective, var_list=var_list)
|
| 509 |
+
|
| 510 |
+
return [
|
| 511 |
+
(tf.clip_by_norm(g, clip_val) if clip_val is not None else g, v)
|
| 512 |
+
for (g, v) in grads_and_vars
|
| 513 |
+
if g is not None
|
| 514 |
+
]
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
@PublicAPI
|
| 518 |
+
def one_hot(x: TensorType, space: gym.Space) -> TensorType:
|
| 519 |
+
"""Returns a one-hot tensor, given and int tensor and a space.
|
| 520 |
+
|
| 521 |
+
Handles the MultiDiscrete case as well.
|
| 522 |
+
|
| 523 |
+
Args:
|
| 524 |
+
x: The input tensor.
|
| 525 |
+
space: The space to use for generating the one-hot tensor.
|
| 526 |
+
|
| 527 |
+
Returns:
|
| 528 |
+
The resulting one-hot tensor.
|
| 529 |
+
|
| 530 |
+
Raises:
|
| 531 |
+
ValueError: If the given space is not a discrete one.
|
| 532 |
+
|
| 533 |
+
.. testcode::
|
| 534 |
+
:skipif: True
|
| 535 |
+
|
| 536 |
+
import gymnasium as gym
|
| 537 |
+
import tensorflow as tf
|
| 538 |
+
from ray.rllib.utils.tf_utils import one_hot
|
| 539 |
+
x = tf.Variable([0, 3], dtype=tf.int32) # batch-dim=2
|
| 540 |
+
# Discrete space with 4 (one-hot) slots per batch item.
|
| 541 |
+
s = gym.spaces.Discrete(4)
|
| 542 |
+
one_hot(x, s)
|
| 543 |
+
|
| 544 |
+
.. testoutput::
|
| 545 |
+
|
| 546 |
+
<tf.Tensor 'one_hot:0' shape=(2, 4) dtype=float32>
|
| 547 |
+
|
| 548 |
+
.. testcode::
|
| 549 |
+
:skipif: True
|
| 550 |
+
|
| 551 |
+
x = tf.Variable([[0, 1, 2, 3]], dtype=tf.int32) # batch-dim=1
|
| 552 |
+
# MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots
|
| 553 |
+
# per batch item.
|
| 554 |
+
s = gym.spaces.MultiDiscrete([5, 4, 4, 7])
|
| 555 |
+
one_hot(x, s)
|
| 556 |
+
|
| 557 |
+
.. testoutput::
|
| 558 |
+
|
| 559 |
+
<tf.Tensor 'concat:0' shape=(1, 20) dtype=float32>
|
| 560 |
+
"""
|
| 561 |
+
if isinstance(space, Discrete):
|
| 562 |
+
return tf.one_hot(x, space.n, dtype=tf.float32)
|
| 563 |
+
elif isinstance(space, MultiDiscrete):
|
| 564 |
+
if isinstance(space.nvec[0], np.ndarray):
|
| 565 |
+
nvec = np.ravel(space.nvec)
|
| 566 |
+
x = tf.reshape(x, (x.shape[0], -1))
|
| 567 |
+
else:
|
| 568 |
+
nvec = space.nvec
|
| 569 |
+
return tf.concat(
|
| 570 |
+
[tf.one_hot(x[:, i], n, dtype=tf.float32) for i, n in enumerate(nvec)],
|
| 571 |
+
axis=-1,
|
| 572 |
+
)
|
| 573 |
+
else:
|
| 574 |
+
raise ValueError("Unsupported space for `one_hot`: {}".format(space))
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@PublicAPI
|
| 578 |
+
def reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType:
|
| 579 |
+
"""Same as tf.reduce_mean() but ignores -inf values.
|
| 580 |
+
|
| 581 |
+
Args:
|
| 582 |
+
x: The input tensor to reduce mean over.
|
| 583 |
+
axis: The axis over which to reduce. None for all axes.
|
| 584 |
+
|
| 585 |
+
Returns:
|
| 586 |
+
The mean reduced inputs, ignoring inf values.
|
| 587 |
+
"""
|
| 588 |
+
mask = tf.not_equal(x, tf.float32.min)
|
| 589 |
+
x_zeroed = tf.where(mask, x, tf.zeros_like(x))
|
| 590 |
+
return tf.math.reduce_sum(x_zeroed, axis) / tf.math.reduce_sum(
|
| 591 |
+
tf.cast(mask, tf.float32), axis
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
@PublicAPI
|
| 596 |
+
def scope_vars(
|
| 597 |
+
scope: Union[str, "tf1.VariableScope"], trainable_only: bool = False
|
| 598 |
+
) -> List["tf.Variable"]:
|
| 599 |
+
"""Get variables inside a given scope.
|
| 600 |
+
|
| 601 |
+
Args:
|
| 602 |
+
scope: Scope in which the variables reside.
|
| 603 |
+
trainable_only: Whether or not to return only the variables that were
|
| 604 |
+
marked as trainable.
|
| 605 |
+
|
| 606 |
+
Returns:
|
| 607 |
+
The list of variables in the given `scope`.
|
| 608 |
+
"""
|
| 609 |
+
return tf1.get_collection(
|
| 610 |
+
tf1.GraphKeys.TRAINABLE_VARIABLES
|
| 611 |
+
if trainable_only
|
| 612 |
+
else tf1.GraphKeys.VARIABLES,
|
| 613 |
+
scope=scope if isinstance(scope, str) else scope.name,
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
@PublicAPI
|
| 618 |
+
def symlog(x: "tf.Tensor") -> "tf.Tensor":
|
| 619 |
+
"""The symlog function as described in [1]:
|
| 620 |
+
|
| 621 |
+
[1] Mastering Diverse Domains through World Models - 2023
|
| 622 |
+
D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
|
| 623 |
+
https://arxiv.org/pdf/2301.04104v1.pdf
|
| 624 |
+
"""
|
| 625 |
+
return tf.math.sign(x) * tf.math.log(tf.math.abs(x) + 1)
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
@PublicAPI
|
| 629 |
+
def inverse_symlog(y: "tf.Tensor") -> "tf.Tensor":
|
| 630 |
+
"""Inverse of the `symlog` function as desribed in [1]:
|
| 631 |
+
|
| 632 |
+
[1] Mastering Diverse Domains through World Models - 2023
|
| 633 |
+
D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
|
| 634 |
+
https://arxiv.org/pdf/2301.04104v1.pdf
|
| 635 |
+
"""
|
| 636 |
+
# To get to symlog inverse, we solve the symlog equation for x:
|
| 637 |
+
# y = sign(x) * log(|x| + 1)
|
| 638 |
+
# <=> y / sign(x) = log(|x| + 1)
|
| 639 |
+
# <=> y = log( x + 1) V x >= 0
|
| 640 |
+
# -y = log(-x + 1) V x < 0
|
| 641 |
+
# <=> exp(y) = x + 1 V x >= 0
|
| 642 |
+
# exp(-y) = -x + 1 V x < 0
|
| 643 |
+
# <=> exp(y) - 1 = x V x >= 0
|
| 644 |
+
# exp(-y) - 1 = -x V x < 0
|
| 645 |
+
# <=> exp(y) - 1 = x V x >= 0 (if x >= 0, then y must also be >= 0)
|
| 646 |
+
# -exp(-y) - 1 = x V x < 0 (if x < 0, then y must also be < 0)
|
| 647 |
+
# <=> sign(y) * (exp(|y|) - 1) = x
|
| 648 |
+
return tf.math.sign(y) * (tf.math.exp(tf.math.abs(y)) - 1)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
@PublicAPI
|
| 652 |
+
def two_hot(
|
| 653 |
+
value: "tf.Tensor",
|
| 654 |
+
num_buckets: int = 255,
|
| 655 |
+
lower_bound: float = -20.0,
|
| 656 |
+
upper_bound: float = 20.0,
|
| 657 |
+
dtype=None,
|
| 658 |
+
):
|
| 659 |
+
"""Returns a two-hot vector of dim=num_buckets with two entries that are non-zero.
|
| 660 |
+
|
| 661 |
+
See [1] for more details:
|
| 662 |
+
[1] Mastering Diverse Domains through World Models - 2023
|
| 663 |
+
D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
|
| 664 |
+
https://arxiv.org/pdf/2301.04104v1.pdf
|
| 665 |
+
|
| 666 |
+
Entries in the vector represent equally sized buckets within some fixed range
|
| 667 |
+
(`lower_bound` to `upper_bound`).
|
| 668 |
+
Those entries not 0.0 at positions k and k+1 encode the actual `value` and sum
|
| 669 |
+
up to 1.0. They are the weights multiplied by the buckets values at k and k+1 for
|
| 670 |
+
retrieving `value`.
|
| 671 |
+
|
| 672 |
+
Example:
|
| 673 |
+
num_buckets=11
|
| 674 |
+
lower_bound=-5
|
| 675 |
+
upper_bound=5
|
| 676 |
+
value=2.5
|
| 677 |
+
-> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0]
|
| 678 |
+
-> [-5 -4 -3 -2 -1 0 1 2 3 4 5] (0.5*2 + 0.5*3=2.5)
|
| 679 |
+
|
| 680 |
+
Example:
|
| 681 |
+
num_buckets=5
|
| 682 |
+
lower_bound=-1
|
| 683 |
+
upper_bound=1
|
| 684 |
+
value=0.1
|
| 685 |
+
-> [0.0, 0.0, 0.8, 0.2, 0.0]
|
| 686 |
+
-> [-1 -0.5 0 0.5 1] (0.2*0.5 + 0.8*0=0.1)
|
| 687 |
+
|
| 688 |
+
Args:
|
| 689 |
+
value: The input tensor of shape (B,) to be two-hot encoded.
|
| 690 |
+
num_buckets: The number of buckets to two-hot encode into.
|
| 691 |
+
lower_bound: The lower bound value used for the encoding. If input values are
|
| 692 |
+
lower than this boundary, they will be encoded as `lower_bound`.
|
| 693 |
+
upper_bound: The upper bound value used for the encoding. If input values are
|
| 694 |
+
higher than this boundary, they will be encoded as `upper_bound`.
|
| 695 |
+
|
| 696 |
+
Returns:
|
| 697 |
+
The two-hot encoded tensor of shape (B, num_buckets).
|
| 698 |
+
"""
|
| 699 |
+
# First make sure, values are clipped.
|
| 700 |
+
value = tf.clip_by_value(value, lower_bound, upper_bound)
|
| 701 |
+
# Tensor of batch indices: [0, B=batch size).
|
| 702 |
+
batch_indices = tf.cast(
|
| 703 |
+
tf.range(0, tf.shape(value)[0]),
|
| 704 |
+
dtype=dtype or tf.float32,
|
| 705 |
+
)
|
| 706 |
+
# Calculate the step deltas (how much space between each bucket's central value?).
|
| 707 |
+
bucket_delta = (upper_bound - lower_bound) / (num_buckets - 1)
|
| 708 |
+
# Compute the float indices (might be non-int numbers: sitting between two buckets).
|
| 709 |
+
idx = (-lower_bound + value) / bucket_delta
|
| 710 |
+
# k
|
| 711 |
+
k = tf.math.floor(idx)
|
| 712 |
+
# k+1
|
| 713 |
+
kp1 = tf.math.ceil(idx)
|
| 714 |
+
# In case k == kp1 (idx is exactly on the bucket boundary), move kp1 up by 1.0.
|
| 715 |
+
# Otherwise, this would result in a NaN in the returned two-hot tensor.
|
| 716 |
+
kp1 = tf.where(tf.equal(k, kp1), kp1 + 1.0, kp1)
|
| 717 |
+
# Iff `kp1` is one beyond our last index (because incoming value is larger than
|
| 718 |
+
# `upper_bound`), move it to one before k (kp1's weight is going to be 0.0 anyways,
|
| 719 |
+
# so it doesn't matter where it points to; we are just avoiding an index error
|
| 720 |
+
# with this).
|
| 721 |
+
kp1 = tf.where(tf.equal(kp1, num_buckets), kp1 - 2.0, kp1)
|
| 722 |
+
# The actual values found at k and k+1 inside the set of buckets.
|
| 723 |
+
values_k = lower_bound + k * bucket_delta
|
| 724 |
+
values_kp1 = lower_bound + kp1 * bucket_delta
|
| 725 |
+
# Compute the two-hot weights (adding up to 1.0) to use at index k and k+1.
|
| 726 |
+
weights_k = (value - values_kp1) / (values_k - values_kp1)
|
| 727 |
+
weights_kp1 = 1.0 - weights_k
|
| 728 |
+
# Compile a tensor of full paths (indices from batch index to feature index) to
|
| 729 |
+
# use for the scatter_nd op.
|
| 730 |
+
indices_k = tf.stack([batch_indices, k], -1)
|
| 731 |
+
indices_kp1 = tf.stack([batch_indices, kp1], -1)
|
| 732 |
+
indices = tf.concat([indices_k, indices_kp1], 0)
|
| 733 |
+
# The actual values (weights adding up to 1.0) to place at the computed indices.
|
| 734 |
+
updates = tf.concat([weights_k, weights_kp1], 0)
|
| 735 |
+
# Call the actual scatter update op, returning a zero-filled tensor, only changed
|
| 736 |
+
# at the given indices.
|
| 737 |
+
return tf.scatter_nd(
|
| 738 |
+
tf.cast(indices, tf.int32),
|
| 739 |
+
updates,
|
| 740 |
+
shape=(tf.shape(value)[0], num_buckets),
|
| 741 |
+
)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
@PublicAPI
|
| 745 |
+
def update_target_network(
|
| 746 |
+
main_net: NetworkType,
|
| 747 |
+
target_net: NetworkType,
|
| 748 |
+
tau: float,
|
| 749 |
+
) -> None:
|
| 750 |
+
"""Updates a keras.Model target network using Polyak averaging.
|
| 751 |
+
|
| 752 |
+
new_target_net_weight = (
|
| 753 |
+
tau * main_net_weight + (1.0 - tau) * current_target_net_weight
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
Args:
|
| 757 |
+
main_net: The keras.Model to update from.
|
| 758 |
+
target_net: The target network to update.
|
| 759 |
+
tau: The tau value to use in the Polyak averaging formula.
|
| 760 |
+
"""
|
| 761 |
+
for old_var, current_var in zip(target_net.variables, main_net.variables):
|
| 762 |
+
updated_var = tau * current_var + (1.0 - tau) * old_var
|
| 763 |
+
old_var.assign(updated_var)
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
@PublicAPI
|
| 767 |
+
def zero_logps_from_actions(actions: TensorStructType) -> TensorType:
|
| 768 |
+
"""Helper function useful for returning dummy logp's (0) for some actions.
|
| 769 |
+
|
| 770 |
+
Args:
|
| 771 |
+
actions: The input actions. This can be any struct
|
| 772 |
+
of complex action components or a simple tensor of different
|
| 773 |
+
dimensions, e.g. [B], [B, 2], or {"a": [B, 4, 5], "b": [B]}.
|
| 774 |
+
|
| 775 |
+
Returns:
|
| 776 |
+
A 1D tensor of 0.0 (dummy logp's) matching the batch
|
| 777 |
+
dim of `actions` (shape=[B]).
|
| 778 |
+
"""
|
| 779 |
+
# Need to flatten `actions` in case we have a complex action space.
|
| 780 |
+
# Take the 0th component to extract the batch dim.
|
| 781 |
+
action_component = tree.flatten(actions)[0]
|
| 782 |
+
logp_ = tf.zeros_like(action_component, dtype=tf.float32)
|
| 783 |
+
# Logp's should be single values (but with the same batch dim as
|
| 784 |
+
# `deterministic_actions` or `stochastic_actions`). In case
|
| 785 |
+
# actions are just [B], zeros_like works just fine here, but if
|
| 786 |
+
# actions are [B, ...], we have to reduce logp back to just [B].
|
| 787 |
+
while len(logp_.shape) > 1:
|
| 788 |
+
logp_ = logp_[:, 0]
|
| 789 |
+
return logp_
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
@DeveloperAPI
|
| 793 |
+
def warn_if_infinite_kl_divergence(
|
| 794 |
+
policy: Type["TFPolicy"], mean_kl: TensorType
|
| 795 |
+
) -> None:
|
| 796 |
+
def print_warning():
|
| 797 |
+
logger.warning(
|
| 798 |
+
"KL divergence is non-finite, this will likely destabilize your model and"
|
| 799 |
+
" the training process. Action(s) in a specific state have near-zero"
|
| 800 |
+
" probability. This can happen naturally in deterministic environments"
|
| 801 |
+
" where the optimal policy has zero mass for a specific action. To fix this"
|
| 802 |
+
" issue, consider setting the coefficient for the KL loss term to zero or"
|
| 803 |
+
" increasing policy entropy."
|
| 804 |
+
)
|
| 805 |
+
return tf.constant(0.0)
|
| 806 |
+
|
| 807 |
+
if policy.loss_initialized():
|
| 808 |
+
tf.cond(
|
| 809 |
+
tf.math.is_inf(mean_kl),
|
| 810 |
+
false_fn=lambda: tf.constant(0.0),
|
| 811 |
+
true_fn=lambda: print_warning(),
|
| 812 |
+
)
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/threading.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable
|
| 2 |
+
|
| 3 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@OldAPIStack
|
| 7 |
+
def with_lock(func: Callable) -> Callable:
|
| 8 |
+
"""Use as decorator (@withlock) around object methods that need locking.
|
| 9 |
+
|
| 10 |
+
Note: The object must have a self._lock = threading.Lock() property.
|
| 11 |
+
Locking thus works on the object level (no two locked methods of the same
|
| 12 |
+
object can be called asynchronously).
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
func: The function to decorate/wrap.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
The wrapped (object-level locked) function.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def wrapper(self, *a, **k):
|
| 22 |
+
try:
|
| 23 |
+
with self._lock:
|
| 24 |
+
return func(self, *a, **k)
|
| 25 |
+
except AttributeError as e:
|
| 26 |
+
if "has no attribute '_lock'" in e.args[0]:
|
| 27 |
+
raise AttributeError(
|
| 28 |
+
"Object {} must have a `self._lock` property (assigned "
|
| 29 |
+
"to a threading.RLock() object in its "
|
| 30 |
+
"constructor)!".format(self)
|
| 31 |
+
)
|
| 32 |
+
raise e
|
| 33 |
+
|
| 34 |
+
return wrapper
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/torch_utils.py
ADDED
|
@@ -0,0 +1,745 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import Dict, List, Optional, TYPE_CHECKING, Union
|
| 5 |
+
|
| 6 |
+
import gymnasium as gym
|
| 7 |
+
from gymnasium.spaces import Discrete, MultiDiscrete
|
| 8 |
+
import numpy as np
|
| 9 |
+
from packaging import version
|
| 10 |
+
import tree # pip install dm_tree
|
| 11 |
+
|
| 12 |
+
from ray.rllib.models.repeated_values import RepeatedValues
|
| 13 |
+
from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI
|
| 14 |
+
from ray.rllib.utils.framework import try_import_torch
|
| 15 |
+
from ray.rllib.utils.numpy import SMALL_NUMBER
|
| 16 |
+
from ray.rllib.utils.typing import (
|
| 17 |
+
LocalOptimizer,
|
| 18 |
+
NetworkType,
|
| 19 |
+
SpaceStruct,
|
| 20 |
+
TensorStructType,
|
| 21 |
+
TensorType,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from ray.rllib.core.learner.learner import ParamDict, ParamList
|
| 26 |
+
from ray.rllib.policy.torch_policy import TorchPolicy
|
| 27 |
+
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
torch, nn = try_import_torch()
|
| 31 |
+
|
| 32 |
+
# Limit values suitable for use as close to a -inf logit. These are useful
|
| 33 |
+
# since -inf / inf cause NaNs during backprop.
|
| 34 |
+
FLOAT_MIN = -3.4e38
|
| 35 |
+
FLOAT_MAX = 3.4e38
|
| 36 |
+
|
| 37 |
+
if torch:
|
| 38 |
+
TORCH_COMPILE_REQUIRED_VERSION = version.parse("2.0.0")
|
| 39 |
+
else:
|
| 40 |
+
TORCH_COMPILE_REQUIRED_VERSION = ValueError(
|
| 41 |
+
"torch is not installed. " "TORCH_COMPILE_REQUIRED_VERSION is " "not defined."
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# TODO (sven): Deprecate this function once we have moved completely to the Learner API.
|
| 46 |
+
# Replaced with `clip_gradients()`.
|
| 47 |
+
@PublicAPI
|
| 48 |
+
def apply_grad_clipping(
|
| 49 |
+
policy: "TorchPolicy", optimizer: LocalOptimizer, loss: TensorType
|
| 50 |
+
) -> Dict[str, TensorType]:
|
| 51 |
+
"""Applies gradient clipping to already computed grads inside `optimizer`.
|
| 52 |
+
|
| 53 |
+
Note: This function does NOT perform an analogous operation as
|
| 54 |
+
tf.clip_by_global_norm. It merely clips by norm (per gradient tensor) and
|
| 55 |
+
then computes the global norm across all given tensors (but without clipping
|
| 56 |
+
by that global norm).
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
policy: The TorchPolicy, which calculated `loss`.
|
| 60 |
+
optimizer: A local torch optimizer object.
|
| 61 |
+
loss: The torch loss tensor.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
An info dict containing the "grad_norm" key and the resulting clipped
|
| 65 |
+
gradients.
|
| 66 |
+
"""
|
| 67 |
+
grad_gnorm = 0
|
| 68 |
+
if policy.config["grad_clip"] is not None:
|
| 69 |
+
clip_value = policy.config["grad_clip"]
|
| 70 |
+
else:
|
| 71 |
+
clip_value = np.inf
|
| 72 |
+
|
| 73 |
+
num_none_grads = 0
|
| 74 |
+
for param_group in optimizer.param_groups:
|
| 75 |
+
# Make sure we only pass params with grad != None into torch
|
| 76 |
+
# clip_grad_norm_. Would fail otherwise.
|
| 77 |
+
params = list(filter(lambda p: p.grad is not None, param_group["params"]))
|
| 78 |
+
if params:
|
| 79 |
+
# PyTorch clips gradients inplace and returns the norm before clipping
|
| 80 |
+
# We therefore need to compute grad_gnorm further down (fixes #4965)
|
| 81 |
+
global_norm = nn.utils.clip_grad_norm_(params, clip_value)
|
| 82 |
+
|
| 83 |
+
if isinstance(global_norm, torch.Tensor):
|
| 84 |
+
global_norm = global_norm.cpu().numpy()
|
| 85 |
+
|
| 86 |
+
grad_gnorm += min(global_norm, clip_value)
|
| 87 |
+
else:
|
| 88 |
+
num_none_grads += 1
|
| 89 |
+
|
| 90 |
+
# Note (Kourosh): grads could indeed be zero. This method should still return
|
| 91 |
+
# grad_gnorm in that case.
|
| 92 |
+
if num_none_grads == len(optimizer.param_groups):
|
| 93 |
+
# No grads available
|
| 94 |
+
return {}
|
| 95 |
+
return {"grad_gnorm": grad_gnorm}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@PublicAPI
|
| 99 |
+
def clip_gradients(
|
| 100 |
+
gradients_dict: "ParamDict",
|
| 101 |
+
*,
|
| 102 |
+
grad_clip: Optional[float] = None,
|
| 103 |
+
grad_clip_by: str = "value",
|
| 104 |
+
) -> TensorType:
|
| 105 |
+
"""Performs gradient clipping on a grad-dict based on a clip value and clip mode.
|
| 106 |
+
|
| 107 |
+
Changes the provided gradient dict in place.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
gradients_dict: The gradients dict, mapping str to gradient tensors.
|
| 111 |
+
grad_clip: The value to clip with. The way gradients are clipped is defined
|
| 112 |
+
by the `grad_clip_by` arg (see below).
|
| 113 |
+
grad_clip_by: One of 'value', 'norm', or 'global_norm'.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
If `grad_clip_by`="global_norm" and `grad_clip` is not None, returns the global
|
| 117 |
+
norm of all tensors, otherwise returns None.
|
| 118 |
+
"""
|
| 119 |
+
# No clipping, return.
|
| 120 |
+
if grad_clip is None:
|
| 121 |
+
return
|
| 122 |
+
|
| 123 |
+
# Clip by value (each gradient individually).
|
| 124 |
+
if grad_clip_by == "value":
|
| 125 |
+
for k, v in gradients_dict.copy().items():
|
| 126 |
+
gradients_dict[k] = (
|
| 127 |
+
None if v is None else torch.clip(v, -grad_clip, grad_clip)
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# Clip by L2-norm (per gradient tensor).
|
| 131 |
+
elif grad_clip_by == "norm":
|
| 132 |
+
for k, v in gradients_dict.copy().items():
|
| 133 |
+
if v is not None:
|
| 134 |
+
# Compute the L2-norm of the gradient tensor.
|
| 135 |
+
norm = v.norm(2).nan_to_num(neginf=-10e8, posinf=10e8)
|
| 136 |
+
# Clip all the gradients.
|
| 137 |
+
if norm > grad_clip:
|
| 138 |
+
v.mul_(grad_clip / norm)
|
| 139 |
+
|
| 140 |
+
# Clip by global L2-norm (across all gradient tensors).
|
| 141 |
+
else:
|
| 142 |
+
assert (
|
| 143 |
+
grad_clip_by == "global_norm"
|
| 144 |
+
), f"`grad_clip_by` ({grad_clip_by}) must be one of [value|norm|global_norm]!"
|
| 145 |
+
gradients_list = list(gradients_dict.values())
|
| 146 |
+
total_norm = compute_global_norm(gradients_list)
|
| 147 |
+
# We do want the coefficient to be in between 0.0 and 1.0, therefore
|
| 148 |
+
# if the global_norm is smaller than the clip value, we use the clip value
|
| 149 |
+
# as normalization constant.
|
| 150 |
+
device = gradients_list[0].device
|
| 151 |
+
clip_coef = grad_clip / torch.maximum(
|
| 152 |
+
torch.tensor(grad_clip).to(device), total_norm + 1e-6
|
| 153 |
+
)
|
| 154 |
+
# Note: multiplying by the clamped coef is redundant when the coef is clamped to
|
| 155 |
+
# 1, but doing so avoids a `if clip_coef < 1:` conditional which can require a
|
| 156 |
+
# CPU <=> device synchronization when the gradients do not reside in CPU memory.
|
| 157 |
+
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
|
| 158 |
+
for g in gradients_list:
|
| 159 |
+
if g is not None:
|
| 160 |
+
g.detach().mul_(clip_coef_clamped.to(g.device))
|
| 161 |
+
return total_norm
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@PublicAPI
|
| 165 |
+
def compute_global_norm(gradients_list: "ParamList") -> TensorType:
|
| 166 |
+
"""Computes the global norm for a gradients dict.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
gradients_list: The gradients list containing parameters.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
Returns the global norm of all tensors in `gradients_list`.
|
| 173 |
+
"""
|
| 174 |
+
# Define the norm type to be L2.
|
| 175 |
+
norm_type = 2.0
|
| 176 |
+
# If we have no grads, return zero.
|
| 177 |
+
if len(gradients_list) == 0:
|
| 178 |
+
return torch.tensor(0.0)
|
| 179 |
+
device = gradients_list[0].device
|
| 180 |
+
|
| 181 |
+
# Compute the global norm.
|
| 182 |
+
total_norm = torch.norm(
|
| 183 |
+
torch.stack(
|
| 184 |
+
[
|
| 185 |
+
torch.norm(g.detach(), norm_type)
|
| 186 |
+
# Note, we want to avoid overflow in the norm computation, this does
|
| 187 |
+
# not affect the gradients themselves as we clamp by multiplying and
|
| 188 |
+
# not by overriding tensor values.
|
| 189 |
+
.nan_to_num(neginf=-10e8, posinf=10e8).to(device)
|
| 190 |
+
for g in gradients_list
|
| 191 |
+
if g is not None
|
| 192 |
+
]
|
| 193 |
+
),
|
| 194 |
+
norm_type,
|
| 195 |
+
).nan_to_num(neginf=-10e8, posinf=10e8)
|
| 196 |
+
if torch.logical_or(total_norm.isnan(), total_norm.isinf()):
|
| 197 |
+
raise RuntimeError(
|
| 198 |
+
f"The total norm of order {norm_type} for gradients from "
|
| 199 |
+
"`parameters` is non-finite, so it cannot be clipped. "
|
| 200 |
+
)
|
| 201 |
+
# Return the global norm.
|
| 202 |
+
return total_norm
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@PublicAPI
|
| 206 |
+
def concat_multi_gpu_td_errors(
|
| 207 |
+
policy: Union["TorchPolicy", "TorchPolicyV2"]
|
| 208 |
+
) -> Dict[str, TensorType]:
|
| 209 |
+
"""Concatenates multi-GPU (per-tower) TD error tensors given TorchPolicy.
|
| 210 |
+
|
| 211 |
+
TD-errors are extracted from the TorchPolicy via its tower_stats property.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
policy: The TorchPolicy to extract the TD-error values from.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
A dict mapping strings "td_error" and "mean_td_error" to the
|
| 218 |
+
corresponding concatenated and mean-reduced values.
|
| 219 |
+
"""
|
| 220 |
+
td_error = torch.cat(
|
| 221 |
+
[
|
| 222 |
+
t.tower_stats.get("td_error", torch.tensor([0.0])).to(policy.device)
|
| 223 |
+
for t in policy.model_gpu_towers
|
| 224 |
+
],
|
| 225 |
+
dim=0,
|
| 226 |
+
)
|
| 227 |
+
policy.td_error = td_error
|
| 228 |
+
return {
|
| 229 |
+
"td_error": td_error,
|
| 230 |
+
"mean_td_error": torch.mean(td_error),
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@PublicAPI
|
| 235 |
+
def convert_to_torch_tensor(
|
| 236 |
+
x: TensorStructType,
|
| 237 |
+
device: Optional[str] = None,
|
| 238 |
+
pin_memory: bool = False,
|
| 239 |
+
):
|
| 240 |
+
"""Converts any struct to torch.Tensors.
|
| 241 |
+
|
| 242 |
+
Args:
|
| 243 |
+
x: Any (possibly nested) struct, the values in which will be
|
| 244 |
+
converted and returned as a new struct with all leaves converted
|
| 245 |
+
to torch tensors.
|
| 246 |
+
device: The device to create the tensor on.
|
| 247 |
+
pin_memory: If True, will call the `pin_memory()` method on the created tensors.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
Any: A new struct with the same structure as `x`, but with all
|
| 251 |
+
values converted to torch Tensor types. This does not convert possibly
|
| 252 |
+
nested elements that are None because torch has no representation for that.
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
def mapping(item):
|
| 256 |
+
if item is None:
|
| 257 |
+
# Torch has no representation for `None`, so we return None
|
| 258 |
+
return item
|
| 259 |
+
|
| 260 |
+
# Special handling of "Repeated" values.
|
| 261 |
+
if isinstance(item, RepeatedValues):
|
| 262 |
+
return RepeatedValues(
|
| 263 |
+
tree.map_structure(mapping, item.values), item.lengths, item.max_len
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Already torch tensor -> make sure it's on right device.
|
| 267 |
+
if torch.is_tensor(item):
|
| 268 |
+
tensor = item
|
| 269 |
+
# Numpy arrays.
|
| 270 |
+
elif isinstance(item, np.ndarray):
|
| 271 |
+
# Object type (e.g. info dicts in train batch): leave as-is.
|
| 272 |
+
# str type (e.g. agent_id in train batch): leave as-is.
|
| 273 |
+
if item.dtype == object or item.dtype.type is np.str_:
|
| 274 |
+
return item
|
| 275 |
+
# Non-writable numpy-arrays will cause PyTorch warning.
|
| 276 |
+
elif item.flags.writeable is False:
|
| 277 |
+
with warnings.catch_warnings():
|
| 278 |
+
warnings.simplefilter("ignore")
|
| 279 |
+
tensor = torch.from_numpy(item)
|
| 280 |
+
# Already numpy: Wrap as torch tensor.
|
| 281 |
+
else:
|
| 282 |
+
tensor = torch.from_numpy(item)
|
| 283 |
+
# Everything else: Convert to numpy, then wrap as torch tensor.
|
| 284 |
+
else:
|
| 285 |
+
tensor = torch.from_numpy(np.asarray(item))
|
| 286 |
+
|
| 287 |
+
# Floatify all float64 tensors (but leave float16 as-is).
|
| 288 |
+
if tensor.is_floating_point() and str(tensor.dtype) != "torch.float16":
|
| 289 |
+
tensor = tensor.float()
|
| 290 |
+
|
| 291 |
+
# Pin the tensor's memory (for faster transfer to GPU later).
|
| 292 |
+
if pin_memory and torch.cuda.is_available():
|
| 293 |
+
tensor.pin_memory()
|
| 294 |
+
|
| 295 |
+
return tensor if device is None else tensor.to(device)
|
| 296 |
+
|
| 297 |
+
return tree.map_structure(mapping, x)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@PublicAPI
|
| 301 |
+
def copy_torch_tensors(x: TensorStructType, device: Optional[str] = None):
|
| 302 |
+
"""Creates a copy of `x` and makes deep copies torch.Tensors in x.
|
| 303 |
+
|
| 304 |
+
Also moves the copied tensors to the specified device (if not None).
|
| 305 |
+
|
| 306 |
+
Note if an object in x is not a torch.Tensor, it will be shallow-copied.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
x : Any (possibly nested) struct possibly containing torch.Tensors.
|
| 310 |
+
device : The device to move the tensors to.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
Any: A new struct with the same structure as `x`, but with all
|
| 314 |
+
torch.Tensors deep-copied and moved to the specified device.
|
| 315 |
+
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def mapping(item):
|
| 319 |
+
if isinstance(item, torch.Tensor):
|
| 320 |
+
return (
|
| 321 |
+
torch.clone(item.detach())
|
| 322 |
+
if device is None
|
| 323 |
+
else item.detach().to(device)
|
| 324 |
+
)
|
| 325 |
+
else:
|
| 326 |
+
return item
|
| 327 |
+
|
| 328 |
+
return tree.map_structure(mapping, x)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@PublicAPI
|
| 332 |
+
def explained_variance(y: TensorType, pred: TensorType) -> TensorType:
|
| 333 |
+
"""Computes the explained variance for a pair of labels and predictions.
|
| 334 |
+
|
| 335 |
+
The formula used is:
|
| 336 |
+
max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2))
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
y: The labels.
|
| 340 |
+
pred: The predictions.
|
| 341 |
+
|
| 342 |
+
Returns:
|
| 343 |
+
The explained variance given a pair of labels and predictions.
|
| 344 |
+
"""
|
| 345 |
+
y_var = torch.var(y, dim=[0])
|
| 346 |
+
diff_var = torch.var(y - pred, dim=[0])
|
| 347 |
+
min_ = torch.tensor([-1.0]).to(pred.device)
|
| 348 |
+
return torch.max(min_, 1 - (diff_var / (y_var + SMALL_NUMBER)))[0]
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
@PublicAPI
|
| 352 |
+
def flatten_inputs_to_1d_tensor(
|
| 353 |
+
inputs: TensorStructType,
|
| 354 |
+
spaces_struct: Optional[SpaceStruct] = None,
|
| 355 |
+
time_axis: bool = False,
|
| 356 |
+
) -> TensorType:
|
| 357 |
+
"""Flattens arbitrary input structs according to the given spaces struct.
|
| 358 |
+
|
| 359 |
+
Returns a single 1D tensor resulting from the different input
|
| 360 |
+
components' values.
|
| 361 |
+
|
| 362 |
+
Thereby:
|
| 363 |
+
- Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes
|
| 364 |
+
are not treated differently from other types of Boxes and get
|
| 365 |
+
flattened as well.
|
| 366 |
+
- Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with
|
| 367 |
+
Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]].
|
| 368 |
+
- MultiDiscrete values are multi-one-hot'd, e.g. a batch of
|
| 369 |
+
[[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in
|
| 370 |
+
[[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]].
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
inputs: The inputs to be flattened.
|
| 374 |
+
spaces_struct: The structure of the spaces that behind the input
|
| 375 |
+
time_axis: Whether all inputs have a time-axis (after the batch axis).
|
| 376 |
+
If True, will keep not only the batch axis (0th), but the time axis
|
| 377 |
+
(1st) as-is and flatten everything from the 2nd axis up.
|
| 378 |
+
|
| 379 |
+
Returns:
|
| 380 |
+
A single 1D tensor resulting from concatenating all
|
| 381 |
+
flattened/one-hot'd input components. Depending on the time_axis flag,
|
| 382 |
+
the shape is (B, n) or (B, T, n).
|
| 383 |
+
|
| 384 |
+
.. testcode::
|
| 385 |
+
|
| 386 |
+
from gymnasium.spaces import Discrete, Box
|
| 387 |
+
from ray.rllib.utils.torch_utils import flatten_inputs_to_1d_tensor
|
| 388 |
+
import torch
|
| 389 |
+
struct = {
|
| 390 |
+
"a": np.array([1, 3]),
|
| 391 |
+
"b": (
|
| 392 |
+
np.array([[1.0, 2.0], [4.0, 5.0]]),
|
| 393 |
+
np.array(
|
| 394 |
+
[[[8.0], [7.0]], [[5.0], [4.0]]]
|
| 395 |
+
),
|
| 396 |
+
),
|
| 397 |
+
"c": {
|
| 398 |
+
"cb": np.array([1.0, 2.0]),
|
| 399 |
+
},
|
| 400 |
+
}
|
| 401 |
+
struct_torch = tree.map_structure(lambda s: torch.from_numpy(s), struct)
|
| 402 |
+
spaces = dict(
|
| 403 |
+
{
|
| 404 |
+
"a": gym.spaces.Discrete(4),
|
| 405 |
+
"b": (gym.spaces.Box(-1.0, 10.0, (2,)), gym.spaces.Box(-1.0, 1.0, (2,
|
| 406 |
+
1))),
|
| 407 |
+
"c": dict(
|
| 408 |
+
{
|
| 409 |
+
"cb": gym.spaces.Box(-1.0, 1.0, ()),
|
| 410 |
+
}
|
| 411 |
+
),
|
| 412 |
+
}
|
| 413 |
+
)
|
| 414 |
+
print(flatten_inputs_to_1d_tensor(struct_torch, spaces_struct=spaces))
|
| 415 |
+
|
| 416 |
+
.. testoutput::
|
| 417 |
+
|
| 418 |
+
tensor([[0., 1., 0., 0., 1., 2., 8., 7., 1.],
|
| 419 |
+
[0., 0., 0., 1., 4., 5., 5., 4., 2.]])
|
| 420 |
+
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
flat_inputs = tree.flatten(inputs)
|
| 424 |
+
flat_spaces = (
|
| 425 |
+
tree.flatten(spaces_struct)
|
| 426 |
+
if spaces_struct is not None
|
| 427 |
+
else [None] * len(flat_inputs)
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
B = None
|
| 431 |
+
T = None
|
| 432 |
+
out = []
|
| 433 |
+
for input_, space in zip(flat_inputs, flat_spaces):
|
| 434 |
+
# Store batch and (if applicable) time dimension.
|
| 435 |
+
if B is None:
|
| 436 |
+
B = input_.shape[0]
|
| 437 |
+
if time_axis:
|
| 438 |
+
T = input_.shape[1]
|
| 439 |
+
|
| 440 |
+
# One-hot encoding.
|
| 441 |
+
if isinstance(space, Discrete):
|
| 442 |
+
if time_axis:
|
| 443 |
+
input_ = torch.reshape(input_, [B * T])
|
| 444 |
+
out.append(one_hot(input_, space).float())
|
| 445 |
+
# Multi one-hot encoding.
|
| 446 |
+
elif isinstance(space, MultiDiscrete):
|
| 447 |
+
if time_axis:
|
| 448 |
+
input_ = torch.reshape(input_, [B * T, -1])
|
| 449 |
+
out.append(one_hot(input_, space).float())
|
| 450 |
+
# Box: Flatten.
|
| 451 |
+
else:
|
| 452 |
+
if time_axis:
|
| 453 |
+
input_ = torch.reshape(input_, [B * T, -1])
|
| 454 |
+
else:
|
| 455 |
+
input_ = torch.reshape(input_, [B, -1])
|
| 456 |
+
out.append(input_.float())
|
| 457 |
+
|
| 458 |
+
merged = torch.cat(out, dim=-1)
|
| 459 |
+
# Restore the time-dimension, if applicable.
|
| 460 |
+
if time_axis:
|
| 461 |
+
merged = torch.reshape(merged, [B, T, -1])
|
| 462 |
+
|
| 463 |
+
return merged
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
@PublicAPI
|
| 467 |
+
def global_norm(tensors: List[TensorType]) -> TensorType:
|
| 468 |
+
"""Returns the global L2 norm over a list of tensors.
|
| 469 |
+
|
| 470 |
+
output = sqrt(SUM(t ** 2 for t in tensors)),
|
| 471 |
+
where SUM reduces over all tensors and over all elements in tensors.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
tensors: The list of tensors to calculate the global norm over.
|
| 475 |
+
|
| 476 |
+
Returns:
|
| 477 |
+
The global L2 norm over the given tensor list.
|
| 478 |
+
"""
|
| 479 |
+
# List of single tensors' L2 norms: SQRT(SUM(xi^2)) over all xi in tensor.
|
| 480 |
+
single_l2s = [torch.pow(torch.sum(torch.pow(t, 2.0)), 0.5) for t in tensors]
|
| 481 |
+
# Compute global norm from all single tensors' L2 norms.
|
| 482 |
+
return torch.pow(sum(torch.pow(l2, 2.0) for l2 in single_l2s), 0.5)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
@PublicAPI
|
| 486 |
+
def huber_loss(x: TensorType, delta: float = 1.0) -> TensorType:
|
| 487 |
+
"""Computes the huber loss for a given term and delta parameter.
|
| 488 |
+
|
| 489 |
+
Reference: https://en.wikipedia.org/wiki/Huber_loss
|
| 490 |
+
Note that the factor of 0.5 is implicitly included in the calculation.
|
| 491 |
+
|
| 492 |
+
Formula:
|
| 493 |
+
L = 0.5 * x^2 for small abs x (delta threshold)
|
| 494 |
+
L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold)
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
x: The input term, e.g. a TD error.
|
| 498 |
+
delta: The delta parmameter in the above formula.
|
| 499 |
+
|
| 500 |
+
Returns:
|
| 501 |
+
The Huber loss resulting from `x` and `delta`.
|
| 502 |
+
"""
|
| 503 |
+
return torch.where(
|
| 504 |
+
torch.abs(x) < delta,
|
| 505 |
+
torch.pow(x, 2.0) * 0.5,
|
| 506 |
+
delta * (torch.abs(x) - 0.5 * delta),
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
@PublicAPI
|
| 511 |
+
def l2_loss(x: TensorType) -> TensorType:
|
| 512 |
+
"""Computes half the L2 norm over a tensor's values without the sqrt.
|
| 513 |
+
|
| 514 |
+
output = 0.5 * sum(x ** 2)
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
x: The input tensor.
|
| 518 |
+
|
| 519 |
+
Returns:
|
| 520 |
+
0.5 times the L2 norm over the given tensor's values (w/o sqrt).
|
| 521 |
+
"""
|
| 522 |
+
return 0.5 * torch.sum(torch.pow(x, 2.0))
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@PublicAPI
|
| 526 |
+
def minimize_and_clip(
|
| 527 |
+
optimizer: "torch.optim.Optimizer", clip_val: float = 10.0
|
| 528 |
+
) -> None:
|
| 529 |
+
"""Clips grads found in `optimizer.param_groups` to given value in place.
|
| 530 |
+
|
| 531 |
+
Ensures the norm of the gradients for each variable is clipped to
|
| 532 |
+
`clip_val`.
|
| 533 |
+
|
| 534 |
+
Args:
|
| 535 |
+
optimizer: The torch.optim.Optimizer to get the variables from.
|
| 536 |
+
clip_val: The global norm clip value. Will clip around -clip_val and
|
| 537 |
+
+clip_val.
|
| 538 |
+
"""
|
| 539 |
+
# Loop through optimizer's variables and norm per variable.
|
| 540 |
+
for param_group in optimizer.param_groups:
|
| 541 |
+
for p in param_group["params"]:
|
| 542 |
+
if p.grad is not None:
|
| 543 |
+
torch.nn.utils.clip_grad_norm_(p.grad, clip_val)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
@PublicAPI
|
| 547 |
+
def one_hot(x: TensorType, space: gym.Space) -> TensorType:
|
| 548 |
+
"""Returns a one-hot tensor, given and int tensor and a space.
|
| 549 |
+
|
| 550 |
+
Handles the MultiDiscrete case as well.
|
| 551 |
+
|
| 552 |
+
Args:
|
| 553 |
+
x: The input tensor.
|
| 554 |
+
space: The space to use for generating the one-hot tensor.
|
| 555 |
+
|
| 556 |
+
Returns:
|
| 557 |
+
The resulting one-hot tensor.
|
| 558 |
+
|
| 559 |
+
Raises:
|
| 560 |
+
ValueError: If the given space is not a discrete one.
|
| 561 |
+
|
| 562 |
+
.. testcode::
|
| 563 |
+
|
| 564 |
+
import torch
|
| 565 |
+
import gymnasium as gym
|
| 566 |
+
from ray.rllib.utils.torch_utils import one_hot
|
| 567 |
+
x = torch.IntTensor([0, 3]) # batch-dim=2
|
| 568 |
+
# Discrete space with 4 (one-hot) slots per batch item.
|
| 569 |
+
s = gym.spaces.Discrete(4)
|
| 570 |
+
print(one_hot(x, s))
|
| 571 |
+
x = torch.IntTensor([[0, 1, 2, 3]]) # batch-dim=1
|
| 572 |
+
# MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots
|
| 573 |
+
# per batch item.
|
| 574 |
+
s = gym.spaces.MultiDiscrete([5, 4, 4, 7])
|
| 575 |
+
print(one_hot(x, s))
|
| 576 |
+
|
| 577 |
+
.. testoutput::
|
| 578 |
+
|
| 579 |
+
tensor([[1, 0, 0, 0],
|
| 580 |
+
[0, 0, 0, 1]])
|
| 581 |
+
tensor([[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]])
|
| 582 |
+
"""
|
| 583 |
+
if isinstance(space, Discrete):
|
| 584 |
+
return nn.functional.one_hot(x.long(), space.n)
|
| 585 |
+
elif isinstance(space, MultiDiscrete):
|
| 586 |
+
if isinstance(space.nvec[0], np.ndarray):
|
| 587 |
+
nvec = np.ravel(space.nvec)
|
| 588 |
+
x = x.reshape(x.shape[0], -1)
|
| 589 |
+
else:
|
| 590 |
+
nvec = space.nvec
|
| 591 |
+
return torch.cat(
|
| 592 |
+
[nn.functional.one_hot(x[:, i].long(), n) for i, n in enumerate(nvec)],
|
| 593 |
+
dim=-1,
|
| 594 |
+
)
|
| 595 |
+
else:
|
| 596 |
+
raise ValueError("Unsupported space for `one_hot`: {}".format(space))
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
@PublicAPI
|
| 600 |
+
def reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType:
|
| 601 |
+
"""Same as torch.mean() but ignores -inf values.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
x: The input tensor to reduce mean over.
|
| 605 |
+
axis: The axis over which to reduce. None for all axes.
|
| 606 |
+
|
| 607 |
+
Returns:
|
| 608 |
+
The mean reduced inputs, ignoring inf values.
|
| 609 |
+
"""
|
| 610 |
+
mask = torch.ne(x, float("-inf"))
|
| 611 |
+
x_zeroed = torch.where(mask, x, torch.zeros_like(x))
|
| 612 |
+
return torch.sum(x_zeroed, axis) / torch.sum(mask.float(), axis)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
@PublicAPI
|
| 616 |
+
def sequence_mask(
|
| 617 |
+
lengths: TensorType,
|
| 618 |
+
maxlen: Optional[int] = None,
|
| 619 |
+
dtype=None,
|
| 620 |
+
time_major: bool = False,
|
| 621 |
+
) -> TensorType:
|
| 622 |
+
"""Offers same behavior as tf.sequence_mask for torch.
|
| 623 |
+
|
| 624 |
+
Thanks to Dimitris Papatheodorou
|
| 625 |
+
(https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/
|
| 626 |
+
39036).
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
lengths: The tensor of individual lengths to mask by.
|
| 630 |
+
maxlen: The maximum length to use for the time axis. If None, use
|
| 631 |
+
the max of `lengths`.
|
| 632 |
+
dtype: The torch dtype to use for the resulting mask.
|
| 633 |
+
time_major: Whether to return the mask as [B, T] (False; default) or
|
| 634 |
+
as [T, B] (True).
|
| 635 |
+
|
| 636 |
+
Returns:
|
| 637 |
+
The sequence mask resulting from the given input and parameters.
|
| 638 |
+
"""
|
| 639 |
+
# If maxlen not given, use the longest lengths in the `lengths` tensor.
|
| 640 |
+
if maxlen is None:
|
| 641 |
+
maxlen = lengths.max()
|
| 642 |
+
|
| 643 |
+
mask = torch.ones(tuple(lengths.shape) + (int(maxlen),))
|
| 644 |
+
|
| 645 |
+
mask = ~(mask.to(lengths.device).cumsum(dim=1).t() > lengths)
|
| 646 |
+
# Time major transformation.
|
| 647 |
+
if not time_major:
|
| 648 |
+
mask = mask.t()
|
| 649 |
+
|
| 650 |
+
# By default, set the mask to be boolean.
|
| 651 |
+
mask.type(dtype or torch.bool)
|
| 652 |
+
|
| 653 |
+
return mask
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@PublicAPI
|
| 657 |
+
def update_target_network(
|
| 658 |
+
main_net: NetworkType,
|
| 659 |
+
target_net: NetworkType,
|
| 660 |
+
tau: float,
|
| 661 |
+
) -> None:
|
| 662 |
+
"""Updates a torch.nn.Module target network using Polyak averaging.
|
| 663 |
+
|
| 664 |
+
new_target_net_weight = (
|
| 665 |
+
tau * main_net_weight + (1.0 - tau) * current_target_net_weight
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
main_net: The nn.Module to update from.
|
| 670 |
+
target_net: The target network to update.
|
| 671 |
+
tau: The tau value to use in the Polyak averaging formula.
|
| 672 |
+
"""
|
| 673 |
+
# Get the current parameters from the Q network.
|
| 674 |
+
state_dict = main_net.state_dict()
|
| 675 |
+
# Use here Polyak averaging.
|
| 676 |
+
new_state_dict = {
|
| 677 |
+
k: tau * state_dict[k] + (1 - tau) * v
|
| 678 |
+
for k, v in target_net.state_dict().items()
|
| 679 |
+
}
|
| 680 |
+
# Apply the new parameters to the target Q network.
|
| 681 |
+
target_net.load_state_dict(new_state_dict)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
@DeveloperAPI
|
| 685 |
+
def warn_if_infinite_kl_divergence(
|
| 686 |
+
policy: "TorchPolicy",
|
| 687 |
+
kl_divergence: TensorType,
|
| 688 |
+
) -> None:
|
| 689 |
+
if policy.loss_initialized() and kl_divergence.isinf():
|
| 690 |
+
logger.warning(
|
| 691 |
+
"KL divergence is non-finite, this will likely destabilize your model and"
|
| 692 |
+
" the training process. Action(s) in a specific state have near-zero"
|
| 693 |
+
" probability. This can happen naturally in deterministic environments"
|
| 694 |
+
" where the optimal policy has zero mass for a specific action. To fix this"
|
| 695 |
+
" issue, consider setting the coefficient for the KL loss term to zero or"
|
| 696 |
+
" increasing policy entropy."
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
@PublicAPI
|
| 701 |
+
def set_torch_seed(seed: Optional[int] = None) -> None:
|
| 702 |
+
"""Sets the torch random seed to the given value.
|
| 703 |
+
|
| 704 |
+
Args:
|
| 705 |
+
seed: The seed to use or None for no seeding.
|
| 706 |
+
"""
|
| 707 |
+
if seed is not None and torch:
|
| 708 |
+
torch.manual_seed(seed)
|
| 709 |
+
# See https://github.com/pytorch/pytorch/issues/47672.
|
| 710 |
+
cuda_version = torch.version.cuda
|
| 711 |
+
if cuda_version is not None and float(torch.version.cuda) >= 10.2:
|
| 712 |
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = "4096:8"
|
| 713 |
+
else:
|
| 714 |
+
# Not all Operations support this.
|
| 715 |
+
torch.use_deterministic_algorithms(True)
|
| 716 |
+
# This is only for Convolution no problem.
|
| 717 |
+
torch.backends.cudnn.deterministic = True
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
@PublicAPI
|
| 721 |
+
def softmax_cross_entropy_with_logits(
|
| 722 |
+
logits: TensorType,
|
| 723 |
+
labels: TensorType,
|
| 724 |
+
) -> TensorType:
|
| 725 |
+
"""Same behavior as tf.nn.softmax_cross_entropy_with_logits.
|
| 726 |
+
|
| 727 |
+
Args:
|
| 728 |
+
x: The input predictions.
|
| 729 |
+
labels: The labels corresponding to `x`.
|
| 730 |
+
|
| 731 |
+
Returns:
|
| 732 |
+
The resulting softmax cross-entropy given predictions and labels.
|
| 733 |
+
"""
|
| 734 |
+
return torch.sum(-labels * nn.functional.log_softmax(logits, -1), -1)
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
def _dynamo_is_available():
|
| 738 |
+
# This only works if torch._dynamo is available
|
| 739 |
+
try:
|
| 740 |
+
# TODO(Artur): Remove this once torch._dynamo is available on CI
|
| 741 |
+
import torch._dynamo as dynamo # noqa: F401
|
| 742 |
+
|
| 743 |
+
return True
|
| 744 |
+
except ImportError:
|
| 745 |
+
return False
|
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/typing.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
TYPE_CHECKING,
|
| 3 |
+
Any,
|
| 4 |
+
Callable,
|
| 5 |
+
Dict,
|
| 6 |
+
Hashable,
|
| 7 |
+
List,
|
| 8 |
+
Optional,
|
| 9 |
+
Sequence,
|
| 10 |
+
Tuple,
|
| 11 |
+
Type,
|
| 12 |
+
TypeVar,
|
| 13 |
+
Union,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import gymnasium as gym
|
| 18 |
+
|
| 19 |
+
from ray.rllib.utils.annotations import OldAPIStack
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
|
| 23 |
+
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
|
| 24 |
+
from ray.rllib.env.env_context import EnvContext
|
| 25 |
+
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
|
| 26 |
+
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
|
| 27 |
+
from ray.rllib.policy.dynamic_tf_policy_v2 import DynamicTFPolicyV2
|
| 28 |
+
from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2
|
| 29 |
+
from ray.rllib.policy.policy import PolicySpec
|
| 30 |
+
from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch
|
| 31 |
+
from ray.rllib.policy.view_requirement import ViewRequirement
|
| 32 |
+
from ray.rllib.utils import try_import_jax, try_import_tf, try_import_torch
|
| 33 |
+
|
| 34 |
+
_, tf, _ = try_import_tf()
|
| 35 |
+
torch, _ = try_import_torch()
|
| 36 |
+
jax, _ = try_import_jax()
|
| 37 |
+
jnp = None
|
| 38 |
+
if jax is not None:
|
| 39 |
+
jnp = jax.numpy
|
| 40 |
+
|
| 41 |
+
# Represents a generic tensor type.
|
| 42 |
+
# This could be an np.ndarray, tf.Tensor, or a torch.Tensor.
|
| 43 |
+
TensorType = Union[np.array, "jnp.ndarray", "tf.Tensor", "torch.Tensor"]
|
| 44 |
+
|
| 45 |
+
# Either a plain tensor, or a dict or tuple of tensors (or StructTensors).
|
| 46 |
+
TensorStructType = Union[TensorType, dict, tuple]
|
| 47 |
+
|
| 48 |
+
# A shape of a tensor.
|
| 49 |
+
TensorShape = Union[Tuple[int], List[int]]
|
| 50 |
+
|
| 51 |
+
# A neural network
|
| 52 |
+
NetworkType = Union["torch.nn.Module", "tf.keras.Model"]
|
| 53 |
+
|
| 54 |
+
# An RLModule spec (single-agent or multi-agent).
|
| 55 |
+
RLModuleSpecType = Union["RLModuleSpec", "MultiRLModuleSpec"]
|
| 56 |
+
|
| 57 |
+
# A state dict of an RLlib component (e.g. EnvRunner, Learner, RLModule).
|
| 58 |
+
StateDict = Dict[str, Any]
|
| 59 |
+
|
| 60 |
+
# Represents a fully filled out config of a Algorithm class.
|
| 61 |
+
# Note: Policy config dicts are usually the same as AlgorithmConfigDict, but
|
| 62 |
+
# parts of it may sometimes be altered in e.g. a multi-agent setup,
|
| 63 |
+
# where we have >1 Policies in the same Algorithm.
|
| 64 |
+
AlgorithmConfigDict = dict # @OldAPIStack
|
| 65 |
+
|
| 66 |
+
# An algorithm config dict that only has overrides. It needs to be combined with
|
| 67 |
+
# the default algorithm config to be used.
|
| 68 |
+
PartialAlgorithmConfigDict = dict # @OldAPIStack
|
| 69 |
+
|
| 70 |
+
# Represents the model config sub-dict of the algo config that is passed to
|
| 71 |
+
# the model catalog.
|
| 72 |
+
ModelConfigDict = dict # @OldAPIStack
|
| 73 |
+
|
| 74 |
+
# Conv2D configuration format.
|
| 75 |
+
# Each entry in the outer list represents one Conv2D layer.
|
| 76 |
+
# Each inner list has the format: [num_output_filters, kernel, stride], where kernel
|
| 77 |
+
# and stride may be single ints (width and height are the same) or 2-tuples (int, int)
|
| 78 |
+
# for width and height (different values).
|
| 79 |
+
ConvFilterSpec = List[
|
| 80 |
+
Tuple[int, Union[int, Tuple[int, int]], Union[int, Tuple[int, int]]]
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
# Objects that can be created through the `from_config()` util method
|
| 84 |
+
# need a config dict with a "type" key, a class path (str), or a type directly.
|
| 85 |
+
FromConfigSpec = Union[Dict[str, Any], type, str]
|
| 86 |
+
|
| 87 |
+
# Represents the env_config sub-dict of the algo config that is passed to
|
| 88 |
+
# the env constructor.
|
| 89 |
+
EnvConfigDict = dict
|
| 90 |
+
|
| 91 |
+
# Represents an environment id. These could be:
|
| 92 |
+
# - An int index for a sub-env within a vectorized env.
|
| 93 |
+
# - An external env ID (str), which changes(!) each episode.
|
| 94 |
+
EnvID = Union[int, str]
|
| 95 |
+
|
| 96 |
+
# Represents a BaseEnv, MultiAgentEnv, ExternalEnv, ExternalMultiAgentEnv,
|
| 97 |
+
# VectorEnv, gym.Env, or ActorHandle.
|
| 98 |
+
# TODO (sven): Specify this type more strictly (it should just be gym.Env).
|
| 99 |
+
EnvType = Union[Any, gym.Env]
|
| 100 |
+
|
| 101 |
+
# A callable, taking a EnvContext object
|
| 102 |
+
# (config dict + properties: `worker_index`, `vector_index`, `num_workers`,
|
| 103 |
+
# and `remote`) and returning an env object (or None if no env is used).
|
| 104 |
+
EnvCreator = Callable[["EnvContext"], Optional[EnvType]]
|
| 105 |
+
|
| 106 |
+
# Represents a generic identifier for an agent (e.g., "agent1").
|
| 107 |
+
AgentID = Any
|
| 108 |
+
|
| 109 |
+
# Represents a generic identifier for a policy (e.g., "pol1").
|
| 110 |
+
PolicyID = str # @OldAPIStack
|
| 111 |
+
# Represents a generic identifier for a (single-agent) RLModule.
|
| 112 |
+
ModuleID = str
|
| 113 |
+
|
| 114 |
+
# Type of the config.policies dict for multi-agent training.
|
| 115 |
+
MultiAgentPolicyConfigDict = Dict[PolicyID, "PolicySpec"] # @OldAPIStack
|
| 116 |
+
|
| 117 |
+
# A new stack Episode type: Either single-agent or multi-agent.
|
| 118 |
+
EpisodeType = Union["SingleAgentEpisode", "MultiAgentEpisode"]
|
| 119 |
+
|
| 120 |
+
# Is Policy to train callable.
|
| 121 |
+
# @OldAPIStack
|
| 122 |
+
IsPolicyToTrain = Callable[[PolicyID, Optional["MultiAgentBatch"]], bool]
|
| 123 |
+
|
| 124 |
+
# Agent to module mapping and should-module-be-updated.
|
| 125 |
+
AgentToModuleMappingFn = Callable[[AgentID, EpisodeType], ModuleID]
|
| 126 |
+
ShouldModuleBeUpdatedFn = Union[
|
| 127 |
+
Sequence[ModuleID],
|
| 128 |
+
Callable[[ModuleID, Optional["MultiAgentBatch"]], bool],
|
| 129 |
+
]
|
| 130 |
+
|
| 131 |
+
# State dict of a Policy, mapping strings (e.g. "weights") to some state
|
| 132 |
+
# data (TensorStructType).
|
| 133 |
+
PolicyState = Dict[str, TensorStructType] # @OldAPIStack
|
| 134 |
+
|
| 135 |
+
# Any tf Policy type (static-graph or eager Policy).
|
| 136 |
+
TFPolicyV2Type = Type[Union["DynamicTFPolicyV2", "EagerTFPolicyV2"]] # @OldAPIStack
|
| 137 |
+
|
| 138 |
+
# Represents an episode id (old and new API stack).
|
| 139 |
+
EpisodeID = Union[int, str]
|
| 140 |
+
|
| 141 |
+
# Represents an "unroll" (maybe across different sub-envs in a vector env).
|
| 142 |
+
UnrollID = int # @OldAPIStack
|
| 143 |
+
|
| 144 |
+
# A dict keyed by agent ids, e.g. {"agent-1": value}.
|
| 145 |
+
MultiAgentDict = Dict[AgentID, Any]
|
| 146 |
+
|
| 147 |
+
# A dict keyed by env ids that contain further nested dictionaries keyed by
|
| 148 |
+
# agent ids. e.g., {"env-1": {"agent-1": value}}.
|
| 149 |
+
MultiEnvDict = Dict[EnvID, MultiAgentDict]
|
| 150 |
+
|
| 151 |
+
# Represents an observation returned from the env.
|
| 152 |
+
EnvObsType = Any
|
| 153 |
+
|
| 154 |
+
# Represents an action passed to the env.
|
| 155 |
+
EnvActionType = Any
|
| 156 |
+
|
| 157 |
+
# Info dictionary returned by calling `reset()` or `step()` on `gymnasium.Env`
|
| 158 |
+
# instances. Might be an empty dict.
|
| 159 |
+
EnvInfoDict = dict
|
| 160 |
+
|
| 161 |
+
# Represents a File object
|
| 162 |
+
FileType = Any
|
| 163 |
+
|
| 164 |
+
# Represents a ViewRequirements dict mapping column names (str) to
|
| 165 |
+
# ViewRequirement objects.
|
| 166 |
+
ViewRequirementsDict = Dict[str, "ViewRequirement"] # @OldAPIStack
|
| 167 |
+
|
| 168 |
+
# Represents the result dict returned by Algorithm.train() and algorithm components,
|
| 169 |
+
# such as EnvRunners, LearnerGroup, etc.. Also, the MetricsLogger used by all these
|
| 170 |
+
# components returns this upon its `reduce()` method call, so a ResultDict can further
|
| 171 |
+
# be accumulated (and reduced again) by downstream components.
|
| 172 |
+
ResultDict = Dict
|
| 173 |
+
|
| 174 |
+
# A tf or torch local optimizer object.
|
| 175 |
+
LocalOptimizer = Union["torch.optim.Optimizer", "tf.keras.optimizers.Optimizer"]
|
| 176 |
+
Optimizer = LocalOptimizer
|
| 177 |
+
Param = Union["torch.Tensor", "tf.Variable"]
|
| 178 |
+
ParamRef = Hashable
|
| 179 |
+
ParamDict = Dict[ParamRef, Param]
|
| 180 |
+
ParamList = List[Param]
|
| 181 |
+
|
| 182 |
+
# A single learning rate or a learning rate schedule (list of sub-lists, each of
|
| 183 |
+
# the format: [ts (int), lr_to_reach_by_ts (float)]).
|
| 184 |
+
LearningRateOrSchedule = Union[
|
| 185 |
+
float,
|
| 186 |
+
List[List[Union[int, float]]],
|
| 187 |
+
List[Tuple[int, Union[int, float]]],
|
| 188 |
+
]
|
| 189 |
+
|
| 190 |
+
# Dict of tensors returned by compute gradients on the policy, e.g.,
|
| 191 |
+
# {"td_error": [...], "learner_stats": {"vf_loss": ..., ...}}, for multi-agent,
|
| 192 |
+
# {"policy1": {"learner_stats": ..., }, "policy2": ...}.
|
| 193 |
+
GradInfoDict = dict
|
| 194 |
+
|
| 195 |
+
# Dict of learner stats returned by compute gradients on the policy, e.g.,
|
| 196 |
+
# {"vf_loss": ..., ...}. This will always be nested under the "learner_stats"
|
| 197 |
+
# key(s) of a GradInfoDict. In the multi-agent case, this will be keyed by
|
| 198 |
+
# policy id.
|
| 199 |
+
LearnerStatsDict = dict
|
| 200 |
+
|
| 201 |
+
# List of grads+var tuples (tf) or list of gradient tensors (torch)
|
| 202 |
+
# representing model gradients and returned by compute_gradients().
|
| 203 |
+
ModelGradients = Union[List[Tuple[TensorType, TensorType]], List[TensorType]]
|
| 204 |
+
|
| 205 |
+
# Type of dict returned by get_weights() representing model weights.
|
| 206 |
+
ModelWeights = dict
|
| 207 |
+
|
| 208 |
+
# An input dict used for direct ModelV2 calls.
|
| 209 |
+
ModelInputDict = Dict[str, TensorType]
|
| 210 |
+
|
| 211 |
+
# Some kind of sample batch.
|
| 212 |
+
SampleBatchType = Union["SampleBatch", "MultiAgentBatch", Dict[str, Any]]
|
| 213 |
+
|
| 214 |
+
# A (possibly nested) space struct: Either a gym.spaces.Space or a
|
| 215 |
+
# (possibly nested) dict|tuple of gym.space.Spaces.
|
| 216 |
+
SpaceStruct = Union[gym.spaces.Space, dict, tuple]
|
| 217 |
+
|
| 218 |
+
# A list of batches of RNN states.
|
| 219 |
+
# Each item in this list has dimension [B, S] (S=state vector size)
|
| 220 |
+
StateBatches = List[List[Any]] # @OldAPIStack
|
| 221 |
+
|
| 222 |
+
# Format of data output from policy forward pass.
|
| 223 |
+
# __sphinx_doc_begin_policy_output_type__
|
| 224 |
+
PolicyOutputType = Tuple[TensorStructType, StateBatches, Dict] # @OldAPIStack
|
| 225 |
+
# __sphinx_doc_end_policy_output_type__
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
# __sphinx_doc_begin_agent_connector_data_type__
|
| 229 |
+
@OldAPIStack
|
| 230 |
+
class AgentConnectorDataType:
|
| 231 |
+
"""Data type that is fed into and yielded from agent connectors.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
env_id: ID of the environment.
|
| 235 |
+
agent_id: ID to help identify the agent from which the data is received.
|
| 236 |
+
data: A payload (``data``). With RLlib's default sampler, the payload
|
| 237 |
+
is a dictionary of arbitrary data columns (obs, rewards, terminateds,
|
| 238 |
+
truncateds, etc).
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
def __init__(self, env_id: str, agent_id: str, data: Any):
|
| 242 |
+
self.env_id = env_id
|
| 243 |
+
self.agent_id = agent_id
|
| 244 |
+
self.data = data
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
# __sphinx_doc_end_agent_connector_data_type__
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# __sphinx_doc_begin_action_connector_output__
|
| 251 |
+
@OldAPIStack
|
| 252 |
+
class ActionConnectorDataType:
|
| 253 |
+
"""Data type that is fed into and yielded from agent connectors.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
env_id: ID of the environment.
|
| 257 |
+
agent_id: ID to help identify the agent from which the data is received.
|
| 258 |
+
input_dict: Input data that was passed into the policy.
|
| 259 |
+
Sometimes output must be adapted based on the input, for example
|
| 260 |
+
action masking. So the entire input data structure is provided here.
|
| 261 |
+
output: An object of PolicyOutputType. It is is composed of the
|
| 262 |
+
action output, the internal state output, and additional data fetches.
|
| 263 |
+
|
| 264 |
+
"""
|
| 265 |
+
|
| 266 |
+
def __init__(
|
| 267 |
+
self,
|
| 268 |
+
env_id: str,
|
| 269 |
+
agent_id: str,
|
| 270 |
+
input_dict: TensorStructType,
|
| 271 |
+
output: PolicyOutputType,
|
| 272 |
+
):
|
| 273 |
+
self.env_id = env_id
|
| 274 |
+
self.agent_id = agent_id
|
| 275 |
+
self.input_dict = input_dict
|
| 276 |
+
self.output = output
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# __sphinx_doc_end_action_connector_output__
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
# __sphinx_doc_begin_agent_connector_output__
|
| 283 |
+
@OldAPIStack
|
| 284 |
+
class AgentConnectorsOutput:
|
| 285 |
+
"""Final output data type of agent connectors.
|
| 286 |
+
|
| 287 |
+
Args are populated depending on the AgentConnector settings.
|
| 288 |
+
The branching happens in ViewRequirementAgentConnector.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
raw_dict: The raw input dictionary that sampler can use to
|
| 292 |
+
build episodes and training batches.
|
| 293 |
+
This raw dict also gets passed into ActionConnectors in case
|
| 294 |
+
it contains data useful for action adaptation (e.g. action masks).
|
| 295 |
+
sample_batch: The SampleBatch that can be immediately used for
|
| 296 |
+
querying the policy for next action.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
def __init__(
|
| 300 |
+
self, raw_dict: Dict[str, TensorStructType], sample_batch: "SampleBatch"
|
| 301 |
+
):
|
| 302 |
+
self.raw_dict = raw_dict
|
| 303 |
+
self.sample_batch = sample_batch
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# __sphinx_doc_end_agent_connector_output__
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
# Generic type var.
|
| 310 |
+
T = TypeVar("T")
|
janus/lib/libasan.so.6.0.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a8a7995a4d84a8817af8d1604bef621e99d0622df4eda14f6fe5245735a952e
|
| 3 |
+
size 7575272
|
janus/lib/libform.so.6
ADDED
|
Binary file (91.4 kB). View file
|
|
|
janus/lib/libgcc_s.so.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d912bad5e511194c15b506fe7eafe4454bc2dc4073a0fdebc60e86af59a0f2bc
|
| 3 |
+
size 475272
|
janus/lib/libhistory.so.8.2
ADDED
|
Binary file (58.5 kB). View file
|
|
|
janus/lib/libpanelw.so.6
ADDED
|
Binary file (20.7 kB). View file
|
|
|
janus/lib/libssl.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3480c91df4e0c1a33514955568641405e37924f680e8ba42f494a209640516c6
|
| 3 |
+
size 775712
|
janus/lib/libstdc++.so.6.0.29
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4
|
| 3 |
+
size 17981480
|
janus/lib/libuuid.so.1
ADDED
|
Binary file (35.9 kB). View file
|
|
|
janus/lib/tcl8.6/encoding/cns11643.enc
ADDED
|
@@ -0,0 +1,1584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cns11643, double-byte
|
| 2 |
+
D
|
| 3 |
+
2134 0 93
|
| 4 |
+
21
|
| 5 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 6 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 7 |
+
00004E284E364E3F4E854E054E04518251965338536953B64E2A4E874E4951E2
|
| 8 |
+
4E464E8F4EBC4EBE516651E35204529C53B95902590A5B805DDB5E7A5E7F5EF4
|
| 9 |
+
5F505F515F61961D4E3C4E634E624EA351854EC54ECF4ECE4ECC518451865722
|
| 10 |
+
572351E45205529E529D52FD5300533A5C735346535D538653B7620953CC6C15
|
| 11 |
+
53CE57216C3F5E005F0C623762386534653565E04F0E738D4E974EE04F144EF1
|
| 12 |
+
4EE74EF74EE64F1D4F024F054F2256D8518B518C519951E55213520B52A60000
|
| 13 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 14 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 15 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 16 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 17 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 18 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 19 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 20 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 21 |
+
22
|
| 22 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 23 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 24 |
+
000053225304530353075407531E535F536D538953BA53D0598053F653F753F9
|
| 25 |
+
597E53F4597F5B565724590459185932593059345DDF59755E845B825BF95C14
|
| 26 |
+
5FD55FD45FCF625C625E626462615E815E835F0D5F52625A5FCA5FC7623965EE
|
| 27 |
+
624F65E7672F6B7A6C39673F673C6C376C446C45738C75927676909390926C4B
|
| 28 |
+
6C4C4E214E204E224E684E894E984EF94EEF7F5182784EF84F064F034EFC4EEE
|
| 29 |
+
4F1690994F284F1C4F074F1A4EFA4F17514A962351724F3B51B451B351B20000
|
| 30 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 31 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 32 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 33 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 34 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 35 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 36 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 37 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 38 |
+
23
|
| 39 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 40 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 41 |
+
00004F6451E84F675214520F5215521852A84F33534B534F518F5350521C538B
|
| 42 |
+
522153BE52AE53D2541653FF538E540054305405541354155445541956E35735
|
| 43 |
+
57365731573258EE59054E545447593656E756E55741597A574C5986574B5752
|
| 44 |
+
5B865F535C1859985C3D5C78598E59A25990598F5C8059A15E085B925C285C2A
|
| 45 |
+
5C8D5EF55F0E5C8B5C895C925FD35FDA5C935FDB5DE0620F625D625F62676257
|
| 46 |
+
9F505E8D65EB65EA5F7867375FD2673267366B226BCE5FEE6C586C516C770000
|
| 47 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 48 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 49 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 50 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 51 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 52 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 53 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 54 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 55 |
+
24
|
| 56 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 57 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 58 |
+
00006C3C5FFA6C5A5FF76C53706F7072706E6283628C707372B172B26287738F
|
| 59 |
+
627B627A6270793C6288808D808E6272827B65F08D718FB99096909A67454E24
|
| 60 |
+
4E7167554E9C4F454F4A4F394F37674B4F324F426C1A4F444F4B6C6B4F404F35
|
| 61 |
+
4F3151516C6F5150514E6C6D6C87519D6C9C51B551B851EC522352275226521F
|
| 62 |
+
522B522052B452B372C65325533B537473957397739373947392544D75397594
|
| 63 |
+
543A7681793D5444544C5423541A5432544B5421828F54345449545054220000
|
| 64 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 65 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 66 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 67 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 68 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 69 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 70 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 71 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 72 |
+
25
|
| 73 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 74 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 75 |
+
0000543F5451545A542F8FC956E956F256F356EF56ED56EC56E6574896285744
|
| 76 |
+
573F573C575357564F85575F5743575857574F744F894F8457464F4C573D4F6A
|
| 77 |
+
57425754575558F158F258F0590B9EA656F1593D4F955994598C519E599C51BE
|
| 78 |
+
5235599F5233599B52315989599A530B658853925B8D54875BFE5BFF5BFD5C2B
|
| 79 |
+
54885C845C8E5C9C5465546C5C855DF55E09546F54615E0B54985E925E905F03
|
| 80 |
+
56F75F1E5F6357725FE75FFE5FE65FDC5FCE57805FFC5FDF5FEC5FF657620000
|
| 81 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 82 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 83 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 84 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 85 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 86 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 87 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 88 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 89 |
+
26
|
| 90 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 91 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 92 |
+
00005FF25FF05FF95945621359BA59CF623B623C628259C159B659BC6278628B
|
| 93 |
+
59B1629E62A5629B629C6299628D6285629D62755C445C475CAE65F65CA05CB5
|
| 94 |
+
5CAF66F5675B5C9F675467525CA267586744674A67615CB66C7F6C916C9E5E14
|
| 95 |
+
6C6E6C7C6C9F6C755F246C566CA26C795F7D6CA15FE56CAA6CA0601970797077
|
| 96 |
+
707E600A7075707B7264601E72BB72BC72C772B972BE72B66011600C7398601C
|
| 97 |
+
6214623D62AD7593768062BE768376C076C162AE62B377F477F562A97ACC0000
|
| 98 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 99 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 100 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 101 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 102 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 103 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 104 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 105 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 106 |
+
27
|
| 107 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 108 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 109 |
+
00007ACD7CFA809F80918097809466048286828C65FB8295660B866C66058FB5
|
| 110 |
+
8FBE8FC766F68FC190A990A4678E6792677690A896279626962B963396349629
|
| 111 |
+
4E3D679F4E9D4F934F8A677D67814F6D4F8E4FA04FA24FA14F9F4FA36C1D4F72
|
| 112 |
+
6CEC4F8C51566CD96CB651906CAD6CE76CB751ED51FE522F6CC3523C52345239
|
| 113 |
+
52B952B552BF53556C9D5376537A53936D3053C153C253D554856CCF545F5493
|
| 114 |
+
548954799EFE548F5469546D70915494546A548A708356FD56FB56F872D80000
|
| 115 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 116 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 117 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 118 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 119 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 120 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 121 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 122 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 123 |
+
28
|
| 124 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 125 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 126 |
+
000056FC56F6576557815763576772D1576E5778577F73A673A258F3594B594C
|
| 127 |
+
74DD74E8753F59AD753E59C4759859C259B076F176F076F577F859BF77F959C9
|
| 128 |
+
59B859AC7942793F79C559B759D77AFB5B607CFD5B965B9E5B945B9F5B9D80B5
|
| 129 |
+
5C005C1982A082C05C495C4A82985CBB5CC182A782AE82BC5CB95C9E5CB45CBA
|
| 130 |
+
5DF65E135E125E7782C35E9882A25E995E9D5EF8866E5EF98FD25F065F218FCD
|
| 131 |
+
5F255F558FD790B290B45F845F8360306007963D6036963A96434FCD5FE90000
|
| 132 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 133 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 134 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 135 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 136 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 137 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 138 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 139 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 140 |
+
29
|
| 141 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 142 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 143 |
+
0000603D60084FC94FCB62BA62B24FDC62B762E462A74FDB4FC74FD662D562E1
|
| 144 |
+
62DD62A662C162C562C062DF62E062DE53976589539965A665BA54A165FF54A5
|
| 145 |
+
66176618660165FE54AE670C54B6676B67966782678A54BC67A354BE67A2678F
|
| 146 |
+
54B067F967806B266B276B686B69579D6B816BB46BD1578F57996C1C579A5795
|
| 147 |
+
58F4590D59536C976C6C6CDF5A006CEA59DD6CE46CD86CB26CCE6CC859F2708B
|
| 148 |
+
70887090708F59F570877089708D70815BA8708C5CD05CD872405CD75CCB0000
|
| 149 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 150 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 151 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 152 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 153 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 154 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 155 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 156 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 157 |
+
2A
|
| 158 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 159 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 160 |
+
00007265726672685CC95CC772CD72D372DB5CD472CF73A773A3739E5CDF73AF
|
| 161 |
+
5DF95E2173AA739C5E2075427544753B75415E9B759B759E5F0779C479C379C6
|
| 162 |
+
6037603979C7607279CA604560537ACF7C767C747CFF7CFC6042605F7F5980A8
|
| 163 |
+
6058606680B0624280B362CF80A480B680A780AC630380A65367820E82C4833E
|
| 164 |
+
829C63006313631462FA631582AA62F082C9654365AA82A682B2662166326635
|
| 165 |
+
8FCC8FD98FCA8FD88FCF90B7661D90AD90B99637670F9641963E96B697510000
|
| 166 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 167 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 168 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 169 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 170 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 171 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 172 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 173 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 174 |
+
2B
|
| 175 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 176 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 177 |
+
000097634E574E794EB24EB04EAF4EB14FD24FD567E44FBE4FB84FB04FB14FC8
|
| 178 |
+
67F667EE4FC64FCC4FE54FE34FB4516A67B2519F67C651C167CC51C251C35245
|
| 179 |
+
524867C967CA524F67EA67CB52C552CA52C453275358537D6BE053DD53DC53DA
|
| 180 |
+
53D954B96D1F54D054B454CA6D0A54A354DA54A46D1954B2549E549F54B56D1D
|
| 181 |
+
6D4254CD6D1854CC6D03570057AC5791578E578D579257A1579057A657A8709F
|
| 182 |
+
579C579657A770A170B470B570A958F572495909590872705952726E72CA0000
|
| 183 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 184 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 185 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 186 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 187 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 188 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 189 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 190 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 191 |
+
2C
|
| 192 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 193 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 194 |
+
000059DF72E859EB59EF59F059D55A0D5A0459F95A0259F859E259D959E75B6A
|
| 195 |
+
73B473EB5BAB73C75C1B5C2F73C6663C73CB74EC74EE5CD15CDC5CE65CE15CCD
|
| 196 |
+
76795CE25CDD5CE55DFB5DFA5E1E76F75EA176FA77E75EFC5EFB5F2F78127805
|
| 197 |
+
5F66780F780E7809605C7813604E6051794B794560236031607C605279D66060
|
| 198 |
+
604A60617AD162187B017C7A7C787C797C7F7C807C81631F631762EA63216304
|
| 199 |
+
63057FBE6531654465408014654265BE80C76629661B80C86623662C661A0000
|
| 200 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 201 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 202 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 203 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 204 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 205 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 206 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 207 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 208 |
+
2D
|
| 209 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 210 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 211 |
+
00006630663B661E6637663880C9670E80D780E667E867D6822167C767BC6852
|
| 212 |
+
67BF67D567FE836367FB833A67B168016805680067D782F26B2A6B6B82FB82F6
|
| 213 |
+
82F082EA6BE182E082FA6D236CFF6D146D056D136D066D21884E6D156CAF6CF4
|
| 214 |
+
6D026D458A076D268FE36D448FEE6D2470A590BD70A390D570A270BB70A070AA
|
| 215 |
+
90C891D470A870B670B270A79653964A70B9722E5005723C5013726D5030501B
|
| 216 |
+
72E772ED503372EC72E572E24FF773C473BD73CF73C973C173D0503173CE0000
|
| 217 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 218 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 219 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 220 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 221 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 222 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 223 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 224 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 225 |
+
2E
|
| 226 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 227 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 228 |
+
000074ED74EB519374EF754975507546754A5261754D75A6525E525F525575A8
|
| 229 |
+
52CD530E76C776FF54E276FD77E6780A54F37804780B78075504781578085511
|
| 230 |
+
79D379D479D079D77A7C54F854E07A7D7A837A8257017AD47AD57AD37AD07AD2
|
| 231 |
+
7AFE7AFC7C777C7C7C7B57B657BF57C757D057B957C1590E594A7F8F80D35A2D
|
| 232 |
+
80CB80D25A0F810980E280DF80C65B6C822482F782D882DD5C565C5482F882FC
|
| 233 |
+
5CEE5CF182E95D0082EE5E2982D0830E82E2830B82FD517986765F6786780000
|
| 234 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 235 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 236 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 237 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 238 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 239 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 240 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 241 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 242 |
+
2F
|
| 243 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 244 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 245 |
+
0000605A60678675867D6088884288666081898C8A0560958A0660978C9F609C
|
| 246 |
+
8FF18FE78FE98FEF90C290BC632C90C690C06336634390CD90C9634B90C4633C
|
| 247 |
+
958163419CEC50324FF9501D4FFF50044FF05003635150024FFC4FF250245008
|
| 248 |
+
5036502E65C35010503850394FFD50564FFB51A351A651A1681A684951C751C9
|
| 249 |
+
5260526452595265526752575263682B5253682F52CF684452CE52D052D152CC
|
| 250 |
+
68266828682E550D54F46825551354EF54F554F9550255006B6D808255180000
|
| 251 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 252 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 253 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 254 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 255 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 256 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 257 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 258 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 259 |
+
30
|
| 260 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 261 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 262 |
+
000054F054F66BE86BE355196BE7570557C96D6357B757CD6D0D6D616D9257BE
|
| 263 |
+
57BB6D6D57DB57C857C457C557D157CA57C06D676D605A215A2A6D7C5A1D6D82
|
| 264 |
+
5A0B6D2F6D686D8B6D7E5A226D846D165A246D7B5A145A316D905A2F5A1A5A12
|
| 265 |
+
70DD70CB5A2670E270D75BBC5BBB5BB75C055C065C525C5370C770DA5CFA5CEB
|
| 266 |
+
72425CF35CF55CE95CEF72FA5E2A5E305E2E5E2C5E2F5EAF5EA973D95EFD5F32
|
| 267 |
+
5F8E5F935F8F604F609973D2607E73D46074604B6073607573E874DE60560000
|
| 268 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 269 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 270 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 271 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 272 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 273 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 274 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 275 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 276 |
+
31
|
| 277 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 278 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 279 |
+
000060A9608B60A6755B609360AE609E60A7624575C075BF632E75BA63526330
|
| 280 |
+
635B771B6319631B77126331635D6337633563537722635C633F654B78227835
|
| 281 |
+
658B7828659A66506646664E6640782A664B6648795B66606644664D79526837
|
| 282 |
+
682479EC79E0681B683679EA682C681968566847683E681E7A8B681568226827
|
| 283 |
+
685968586855683068236B2E6B2B6B306B6C7B096B8B7C846BE96BEA6BE56D6B
|
| 284 |
+
7C8D7C856D736D577D117D0E6D5D6D566D8F6D5B6D1C6D9A6D9B6D997F610000
|
| 285 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 286 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 287 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 288 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 289 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 290 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 291 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 292 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 293 |
+
32
|
| 294 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 295 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 296 |
+
00006D816D717F5D7F5B6D726D5C6D9670C470DB70CC70D070E370DF80F270D6
|
| 297 |
+
70EE70D580FB81008201822F727A833372F573028319835173E273EC73D573F9
|
| 298 |
+
73DF73E683228342834E831B73E473E174F3834D831683248320755675557558
|
| 299 |
+
7557755E75C38353831E75B4834B75B18348865376CB76CC772A86967716770F
|
| 300 |
+
869E8687773F772B770E772486857721771877DD86A7869578247836869D7958
|
| 301 |
+
79598843796279DA79D9887679E179E579E879DB886F79E279F08874887C0000
|
| 302 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 303 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 304 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 305 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 306 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 307 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 308 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 309 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 310 |
+
33
|
| 311 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 312 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 313 |
+
00008A128C477ADA7ADD8CA47ADB7ADC8D788DB57B0D7B0B7B147C8E7C868FF5
|
| 314 |
+
7C877C837C8B90048FFC8FF690D67D2490D990DA90E37D257F627F937F997F97
|
| 315 |
+
90DC90E47FC47FC6800A91D591E28040803C803B80F680FF80EE810481038107
|
| 316 |
+
506A506180F750605053822D505D82278229831F8357505B504A506250158321
|
| 317 |
+
505F506983188358506450465040506E50738684869F869B868986A68692868F
|
| 318 |
+
86A0884F8878887A886E887B88848873555055348A0D8A0B8A19553655350000
|
| 319 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 320 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 321 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 322 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 323 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 324 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 325 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 326 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 327 |
+
34
|
| 328 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 329 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 330 |
+
0000553055525545550C8FF990099008553990DE9151553B554091DB91DF91DE
|
| 331 |
+
91D691E095859660965957F4965657ED57FD96BD57F8580B5042505958075044
|
| 332 |
+
50665052505450715050507B507C505857E758015079506C507851A851D151CF
|
| 333 |
+
5268527652D45A5553A053C45A385558554C55685A5F55495A6C5A53555D5529
|
| 334 |
+
5A43555455535A44555A5A48553A553F552B57EA5A4C57EF5A695A4757DD57FE
|
| 335 |
+
5A4257DE57E65B6E57E857FF580358F768A6591F5D1A595B595D595E5D0D0000
|
| 336 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 337 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 338 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 339 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 340 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 341 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 342 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 343 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 344 |
+
35
|
| 345 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 346 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 347 |
+
00005D265A2B5D0F5A3B5D125D235A615A3A5A6E5A4B5A6B5EB45EB95A455A4E
|
| 348 |
+
5A685A3D5A715A3F5A6F5A7560905A735A2C5A595A545A4F5A6360CF60E45BC8
|
| 349 |
+
60DD5BC360B15C5B5C6160CA5D215D0A5D0960C05D2C5D08638A63825D2A5D15
|
| 350 |
+
639E5D105D1363975D2F5D18636F5DE35E395E355E3A5E32639C636D63AE637C
|
| 351 |
+
5EBB5EBA5F345F39638563816391638D6098655360D066656661665B60D760AA
|
| 352 |
+
666260A160A4688760EE689C60E7686E68AE60DE6956686F637E638B68A90000
|
| 353 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 354 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 355 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 356 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 357 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 358 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 359 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 360 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 361 |
+
36
|
| 362 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 363 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 364 |
+
0000687563796386639368776373636A686B636C68AA637F687163B263BA6896
|
| 365 |
+
688B6366637468A4655A687B654E654D658D658E65AD6B3365C765CA6B9165C9
|
| 366 |
+
6B8D65E366576C2A66636667671A671967166DAC6DE9689E68B6689868736E00
|
| 367 |
+
689A688E68B768DB68A5686C68C168846DDB6DF46895687A68996DF068B868B9
|
| 368 |
+
68706DCF6B356DD06B906BBB6BED6DD76DCD6DE36DC16DC36DCE70F771176DAD
|
| 369 |
+
6E0470F06DB970F36DE770FC6E086E0671136E0A6DB070F66DF86E0C710E0000
|
| 370 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 371 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 372 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 373 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 374 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 375 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 376 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 377 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 378 |
+
37
|
| 379 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 380 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 381 |
+
00006DB1727B6E026E076E096E016E176DFF6E12730A730871037107710170F5
|
| 382 |
+
70F1710870F2710F740170FE7407740073FA731A7310730E740273F374087564
|
| 383 |
+
73FB75CE75D275CF751B752375617568768F756775D37739772F769077317732
|
| 384 |
+
76D576D776D67730773B7726784877407849771E784A784C782678477850784B
|
| 385 |
+
7851784F78427846796B796E796C79F279F879F179F579F379F97A907B357B3B
|
| 386 |
+
7A9A7A937A917AE17B247B337B217B1C7B167B177B367B1F7B2F7C937C990000
|
| 387 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 388 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 389 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 390 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 391 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 392 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 393 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 394 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 395 |
+
38
|
| 396 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 397 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 398 |
+
00007C9A7C9C7C947D497C967D347D377D3D7D2D7D367D4C7D457D2C7D487D41
|
| 399 |
+
7D477F3B7D3F7D4A7D3B7D288008801A7F9C801D7F9B8049804580447C9B7FD1
|
| 400 |
+
7FC7812A812E801F801E81318047811A8134811781258119811B831D83718384
|
| 401 |
+
8380837283A18127837983918211839F83AD823A8234832382748385839C83B7
|
| 402 |
+
8658865A8373865786B2838F86AE8395839983758845889C889488A3888F88A5
|
| 403 |
+
88A988A6888A88A0889089928991899483B08A268A328A2883AE83768A1C0000
|
| 404 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 405 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 406 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 407 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 408 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 409 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 410 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 411 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 412 |
+
39
|
| 413 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 414 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 415 |
+
000086568A2B8A2086C28A2986C586BA86B08A218C3A86B38C5B8C588C7C86BB
|
| 416 |
+
8CA68CAE8CAD8D6588528D7E88958D7C8D7F8D7A8DBD889188A18DC08DBB8EAD
|
| 417 |
+
8EAF8ED6889788A488AC888C88938ED9898289D69012900E90258A27901390EE
|
| 418 |
+
8C3990AB90F78C5D9159915491F291F091E591F68DC28DB995878DC1965A8EDE
|
| 419 |
+
8EDD966E8ED78EE08EE19679900B98E198E6900C9EC49ED24E8090F04E81508F
|
| 420 |
+
50975088508990EC90E950815160915A91535E4251D391F491F151D251D60000
|
| 421 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 422 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 423 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 424 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 425 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 426 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 427 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 428 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 429 |
+
3A
|
| 430 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 431 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 432 |
+
0000527391F9527091EB91F791E853A853A653C5559755DE966D966B559655B4
|
| 433 |
+
96BF55859804559B55A0509B555950945586508B50A355AF557A508E509D5068
|
| 434 |
+
559E509255A9570F570E581A5312581F53A4583C5818583E582655AD583A5645
|
| 435 |
+
5822559358FB5963596455815AA85AA35A825A885AA15A855A9855955A99558E
|
| 436 |
+
5A895A815A965A80581E58275A91582857F5584858255ACF581B5833583F5836
|
| 437 |
+
582E58395A875AA0582C5A7959615A865AAB5AAA5AA45A8D5A7E5A785BD50000
|
| 438 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 439 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 440 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 441 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 442 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 443 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 444 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 445 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 446 |
+
3B
|
| 447 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 448 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 449 |
+
00005A7C5AA55AAC5C1E5C5F5C5E5D445D3E5A975D485D1C5AA95D5B5D4D5A8C
|
| 450 |
+
5A9C5D575A935D535D4F5BCD5D3B5D465BD15BCA5E465E475C305E485EC05EBD
|
| 451 |
+
5EBF5D4B5F115D355F3E5F3B5D555F3A5D3A5D525D3D5FA75D5960EA5D396107
|
| 452 |
+
6122610C5D325D3660B360D660D25E4160E360E560E95FAB60C9611160FD60E2
|
| 453 |
+
60CE611E61206121621E611663E263DE63E660F860FC60FE60C163F8611863FE
|
| 454 |
+
63C163BF63F763D1655F6560656163B063CE65D163E863EF667D666B667F0000
|
| 455 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 456 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 457 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 458 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 459 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 460 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 461 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 462 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 463 |
+
3C
|
| 464 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 465 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 466 |
+
000063CA63E066736681666D6669646163DF671E68ED63DC63C463D863D36903
|
| 467 |
+
63C768FE68E5691E690263D763D9690968CA690065646901691868E268CF659D
|
| 468 |
+
692E68C568FF65D2691C68C3667B6B6F66716B6E666A6BBE67016BF46C2D6904
|
| 469 |
+
6DB66E756E1E68EA6E18690F6E4868F76E4F68E46E426E6A6E706DFE68E16907
|
| 470 |
+
6E6D69086E7B6E7E6E5968EF6E5769146E806E5068FD6E296E766E2A6E4C712A
|
| 471 |
+
68CE7135712C7137711D68F468D1713868D47134712B7133712771246B3B0000
|
| 472 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 473 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 474 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 475 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 476 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 477 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 478 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 479 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 480 |
+
3D
|
| 481 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 482 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 483 |
+
0000712D7232728372827287730673247338732A732C732B6DFC732F73287417
|
| 484 |
+
6E496E88741974386E45741F7414743C73F7741C74157418743974F975246E51
|
| 485 |
+
6E3B6E03756E756D7571758E6E6175E56E286E606E716E6B769476B36E3076D9
|
| 486 |
+
6E657748774977436E776E55774277DF6E66786378766E5A785F786679667971
|
| 487 |
+
712E713179767984797579FF7A0771287A0E7A09724B725A7288728972867285
|
| 488 |
+
7AE77AE27B55733073227B437B577B6C7B427B5373267B417335730C7CA70000
|
| 489 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 490 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 491 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 492 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 493 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 494 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 495 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 496 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 497 |
+
3E
|
| 498 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 499 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 500 |
+
00007CA07CA67CA47D74741A7D59742D7D607D577D6C7D7E7D6474207D5A7D5D
|
| 501 |
+
752F756F756C7D767D4D7D7575E67FD37FD675E475D78060804E8145813B7747
|
| 502 |
+
814881428149814081148141774C81EF81F68203786483ED785C83DA841883D2
|
| 503 |
+
8408787084007868785E786284178346841483D38405841F8402841683CD83E6
|
| 504 |
+
7AE6865D86D586E17B447B487B4C7B4E86EE884788467CA27C9E88BB7CA188BF
|
| 505 |
+
88B47D6388B57D56899A8A437D4F7D6D8A5A7D6B7D527D548A358A388A420000
|
| 506 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 507 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 508 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 509 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 510 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 511 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 512 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 513 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 514 |
+
3F
|
| 515 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 516 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 517 |
+
00008A498A5D8A4B8A3D7F667FA27FA07FA18C608C5E8C7F8C7E8C8380D48CB1
|
| 518 |
+
8D878152814F8D888D83814D813A8D868D8B8D828DCA8DD28204823C8DD48DC9
|
| 519 |
+
8EB0833B83CF83F98EF28EE48EF38EEA83E78EFD83FC8F9D902B902A83C89028
|
| 520 |
+
9029902C840183DD903A90309037903B83CB910A83D683F583C991FE922083DE
|
| 521 |
+
920B84069218922283D5921B920883D1920E9213839A83C3959583EE83C483FB
|
| 522 |
+
968C967B967F968183FE968286E286E686D386E386DA96EE96ED86EB96EC0000
|
| 523 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 524 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 525 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 526 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 527 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 528 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 529 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 530 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 531 |
+
40
|
| 532 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 533 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 534 |
+
0000975F976F86D7976D86D188488856885588BA88D798F088B888C088BE9AA9
|
| 535 |
+
88BC88B79AE04EB7890188C950CC50BC899750AA50B989DB50AB50C350CD517E
|
| 536 |
+
527E52798A588A4452E152E052E7538053AB53AA53A953E055EA8C8055D78CBE
|
| 537 |
+
8CB055C157158D84586C8D89585C58505861586A5869585658605866585F5923
|
| 538 |
+
596659688EEF8EF75ACE8EF95AC55AC38EE58EF55AD08EE88EF68EEB8EF18EEC
|
| 539 |
+
8EF45B745B765BDC5BD75BDA5BDB91045C205D6D5D6690F95D645D6E91000000
|
| 540 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 541 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 542 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 543 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 544 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 545 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 546 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 547 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 548 |
+
41
|
| 549 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 550 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 551 |
+
00005D605F425F5A5F6E9164915F6130613A612A614361196131921A613D920F
|
| 552 |
+
920C92006408643264389206643192276419921C6411921992176429641D957B
|
| 553 |
+
958D958C643C96876446644796899683643A640796C8656B96F16570656D9770
|
| 554 |
+
65E4669398A998EB9CE69EF9668F4E844EB6669250BF668E50AE694650CA50B4
|
| 555 |
+
50C850C250B050C150BA693150CB50C9693E50B8697C694352786973527C6955
|
| 556 |
+
55DB55CC6985694D69506947696769366964696155BF697D6B446B406B710000
|
| 557 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 558 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 559 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 560 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 561 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 562 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 563 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 564 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 565 |
+
42
|
| 566 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 567 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 568 |
+
00006B736B9C55C855F255CD6BC155C26BFA6C316C325864584F6EB86EA8586F
|
| 569 |
+
6E916EBB585D6E9A5865585B6EA9586358716EB56E6C6EE85ACB6EDD6EDA6EE6
|
| 570 |
+
6EAC5AB05ABF5AC86ED96EE36EE96EDB5ACA716F5AB65ACD71485A90714A716B
|
| 571 |
+
5BD9714F715771745D635D4A5D6571457151716D5D6872517250724E5E4F7341
|
| 572 |
+
5E4A732E73465EC574275EC674487453743D5FAF745D74566149741E74477443
|
| 573 |
+
74587449612E744C7445743E61297501751E91686223757A75EE760276970000
|
| 574 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 575 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 576 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 577 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 578 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 579 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 580 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 581 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 582 |
+
43
|
| 583 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 584 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 585 |
+
00007698641064126409775D77647753775878827890788A6439787A787D6423
|
| 586 |
+
788B787864306428788D788878927881797E798364256427640B7980641B642E
|
| 587 |
+
64217A0F656F65927A1D66867AA17AA466907AE97AEA66997B627B6B67207B5E
|
| 588 |
+
695F7B79694E69627B6F7B686945696A7CAE6942695769597CB069487D906935
|
| 589 |
+
7D8A69337D8B7D997D9569787D877D787D977D897D986976695869417FA3694C
|
| 590 |
+
693B694B7FDD8057694F8163816A816C692F697B693C815D81756B43815F0000
|
| 591 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 592 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 593 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 594 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 595 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 596 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 597 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 598 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 599 |
+
44
|
| 600 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 601 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 602 |
+
00006B48817D816D6BFB6BFC8241844F84846E9B847F6EC88448842A847B8472
|
| 603 |
+
8464842E845C84536EC6844184C86EC184628480843E848384716EA6844A8455
|
| 604 |
+
84586EC36EDC6ED886FC86FD87156E8D871686FF6EBF6EB36ED0885888CF88E0
|
| 605 |
+
6EA371477154715289E78A6A8A80715D8A6F8A6571788A788A7D8A8871587143
|
| 606 |
+
8A648A7E715F8A678C638C88714D8CCD724F8CC9728C8DED7290728E733C7342
|
| 607 |
+
733B733A73408EB1734974448F048F9E8FA090439046904890459040904C0000
|
| 608 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 609 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 610 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 611 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 612 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 613 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 614 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 615 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 616 |
+
45
|
| 617 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 618 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 619 |
+
000074427446910C9113911574FF916B9167925D9255923569839259922F923C
|
| 620 |
+
928F925C926A9262925F926B926E923B92449241959A7699959976DD7755775F
|
| 621 |
+
968F77529696775A7769776796F496FC776D9755788797797894788F788497EE
|
| 622 |
+
97F57886980B788398F37899788098F798FF98F5798298EC98F17A117A18999A
|
| 623 |
+
7A129AE29B3D9B5D9CE87A1B9CEB9CEF9CEE9E819F1450D050D950DC50D87B69
|
| 624 |
+
50E150EB7B737B7150F450E250DE7B767B637CB251F47CAF7D887D8652ED0000
|
| 625 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 626 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 627 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 628 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 629 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 630 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 631 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 632 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 633 |
+
46
|
| 634 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 635 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 636 |
+
000052EA7D7F53327D7A53AE53B07D8355FB5603560B7D8456077D9255F87F6B
|
| 637 |
+
5628561E7F6C5618561156515605571758928164588C817758785884587358AD
|
| 638 |
+
58975895587758725896588D59108161596C82495AE782405AE4824584F15AEF
|
| 639 |
+
5626847684795AF05D7B84655D83844084865D8B5D8C844D5D785E5284598474
|
| 640 |
+
5ED05ECF85075FB35FB4843A8434847A617B8478616F6181613C614261386133
|
| 641 |
+
844261606169617D6186622C62288452644C84C56457647C8447843664550000
|
| 642 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 643 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 644 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 645 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 646 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 647 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 648 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 649 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 650 |
+
47
|
| 651 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 652 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 653 |
+
000064626471646A6456643B6481846E644F647E646486F7870C86FA86D686F5
|
| 654 |
+
657186F8870E66A5669A669C870D66A688D666A4698F69C569C8699269B288CC
|
| 655 |
+
88D0898569E369C069D669D1699F69A269D289DC89E68A7669E169D5699D8A3F
|
| 656 |
+
8A7769988A846B746BA18A816EF06EF38C3C8C4B6F1B6F0C6F1D6F346F286F17
|
| 657 |
+
8C856F446F426F046F116EFA6F4A7191718E8D93718B718D717F718C717E717C
|
| 658 |
+
71838DEE71888DE98DE372948DE773557353734F7354746C7465746674610000
|
| 659 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 660 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 661 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 662 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 663 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 664 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 665 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 666 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 667 |
+
48
|
| 668 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 669 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 670 |
+
0000746B746874768F0B7460903F74747506760E91107607910F911176B99114
|
| 671 |
+
76B776E2916E7774777777767775923A777877719265777A715B777B78A678AE
|
| 672 |
+
78B8926C924F926078B178AF923679897987923192547A2992507A2A924E7A2D
|
| 673 |
+
7A2C92567A32959F7AEC7AF07B817B9E7B8396917B9296CE7BA37B9F7B9396F5
|
| 674 |
+
7B867CB87CB79772980F980D980E98AC7DC87DB699AF7DD199B07DA87DAB9AAB
|
| 675 |
+
7DB37DCD9CED7DCF7DA49EFD50E67F417F6F7F7150F350DB50EA50DD50E40000
|
| 676 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 677 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 678 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 679 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 680 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 681 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 682 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 683 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 684 |
+
49
|
| 685 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 686 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 687 |
+
000050D38023805B50EF8061805F818152805281818482135330824A824C5615
|
| 688 |
+
560C561284BD8495561C849284C35602849684A584B584B384A384E484D884D5
|
| 689 |
+
589884B784AD84DA84938736587A58875891873D872B87478739587B8745871D
|
| 690 |
+
58FE88FF88EA5AEE88F55AD5890088ED890388E95AF35AE289EA5ADB8A9B8A8E
|
| 691 |
+
8AA25AD98A9C8A948A908AA98AAC5C638A9F5D805D7D8A9D5D7A8C675D775D8A
|
| 692 |
+
8CD08CD68CD48D988D9A8D975D7F5E585E598E0B8E088E018EB48EB35EDC0000
|
| 693 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 694 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 695 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 696 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 697 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 698 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 699 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 700 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 701 |
+
4A
|
| 702 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 703 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 704 |
+
00008FA18FA25ED2905A5F449061905F5FB6612C9125917B9176917C61739289
|
| 705 |
+
92F692B192AD929292819284617A92AE9290929E616A6161615695A295A7622B
|
| 706 |
+
642B644D645B645D96A0969D969F96D0647D96D1646664A6975964829764645C
|
| 707 |
+
644B64539819645098149815981A646B645964656477990665A098F89901669F
|
| 708 |
+
99BE99BC99B799B699C069C999B869CE699669B099C469BC99BF69999ADA9AE4
|
| 709 |
+
9AE99AE89AEA9AE569BF9B2669BD69A49B4069B969CA699A69CF69B369930000
|
| 710 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 711 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 712 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 713 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 714 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 715 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 716 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 717 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 718 |
+
4B
|
| 719 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 720 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 721 |
+
000069AA9EBD699E69D969976990510E69B550F769C650FC510D510151DA51D9
|
| 722 |
+
51DB5286528E52EE533353B16EF15647562D56546F37564B5652563156445656
|
| 723 |
+
5650562B6F18564D5637564F58A258B76F7358B26EEE58AA58B558B06F3C58B4
|
| 724 |
+
58A458A76F0E59265AFE6EFD5B046F395AFC6EFC5B065B0A5AFA5B0D5B005B0E
|
| 725 |
+
7187719071895D9171855D8F5D905D985DA45D9B5DA35D965DE45E5A72957293
|
| 726 |
+
5E5E734D5FB86157615C61A661956188747261A3618F75006164750361590000
|
| 727 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 728 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 729 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 730 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 731 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 732 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 733 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 734 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 735 |
+
4C
|
| 736 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 737 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 738 |
+
00006178761661856187619E7611760A6198619C7781777C622F6480649B648E
|
| 739 |
+
648D649464C678B264A8648378AD64B9648664B464AF649178A064AA64A164A7
|
| 740 |
+
66B666B3798B66BC66AC799466AD6A0E79886A1C6A1A7A2B7A4A6A0B7A2F69EF
|
| 741 |
+
6A0C69F06A227AAC69D87B886A1269FA7B916A2A7B966A107B8C7B9B6A2969F9
|
| 742 |
+
69EA6A2C6A247BA469E96B526B4F6B537CBA7DA76F106F656F757DAA7DC17DC0
|
| 743 |
+
7DC56FD07DCE6F5C6F3D6F717DCC6F916F0B6F796F816F8F7DA66F596F740000
|
| 744 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 745 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 746 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 747 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 748 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 749 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 750 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 751 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 752 |
+
4D
|
| 753 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 754 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 755 |
+
00007DA171AE7F7371A371AD7FE57FDE71AB71A671A2818952F2725772557299
|
| 756 |
+
734B747A8215849784A4748C748484BA84CE74827493747B84AB750984B484C1
|
| 757 |
+
84CD84AA849A84B1778A849D779084BB78C678D378C078D278C778C284AF799F
|
| 758 |
+
799D799E84B67A4184A07A387A3A7A4284DB84B07A3E7AB07BAE7BB38728876B
|
| 759 |
+
7BBF872E871E7BCD87197BB28743872C8741873E8746872087327CC47CCD7CC2
|
| 760 |
+
7CC67CC37CC97CC787427DF887277DED7DE2871A873087117DDC7E027E010000
|
| 761 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 762 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 763 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 764 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 765 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 766 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 767 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 768 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 769 |
+
4E
|
| 770 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 771 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 772 |
+
000088F27DD688FE7DE47DFE88F67E007DFC7DFD88EB7DF57DFF899F7DEB7DE5
|
| 773 |
+
7F787FAE7FE78A998065806A80668068806B819481A18192819681938D968E09
|
| 774 |
+
85018DFF84F88DFD84F58E0385048E068E058DFE8E00851B85038533853484ED
|
| 775 |
+
9123911C853591228505911D911A91249121877D917A91729179877192A5885C
|
| 776 |
+
88E6890F891B92A089A989A589EE8AB1929A8ACC8ACE92978AB792A38AB58AE9
|
| 777 |
+
8AB492958AB38AC18AAF8ACA8AD09286928C92998C8E927E92878CE98CDB0000
|
| 778 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 779 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 780 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 781 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 782 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 783 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 784 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 785 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 786 |
+
4F
|
| 787 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 788 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 789 |
+
0000928B8CEB8DA496A18DA28D9D977D977A977E97838E2A8E28977B97848EB8
|
| 790 |
+
8EB68EB98EB78F228F2B8F278F198FA499078FB3999C9071906A99BB99BA9188
|
| 791 |
+
918C92BF92B892BE92DC92E59B3F9B6092D492D69CF192DA92ED92F392DB5103
|
| 792 |
+
92B992E292EB95AF50F695B295B3510C50FD510A96A396A552F152EF56485642
|
| 793 |
+
970A563597879789978C97EF982A98225640981F563D9919563E99CA99DA563A
|
| 794 |
+
571A58AB99DE99C899E058A39AB69AB558A59AF458FF9B6B9B699B729B630000
|
| 795 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 796 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 797 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 798 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 799 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 800 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 801 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 802 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 803 |
+
50
|
| 804 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 805 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 806 |
+
00005AF69D0D5AF89D019D0C5B019CF85B055B0F9CFE9D029E845D9F9EAB9EAA
|
| 807 |
+
511D51165DA0512B511E511B5290529453145E605E5C56675EDB567B5EE1565F
|
| 808 |
+
5661618B6183617961B161B061A2618958C358CA58BB58C058C459015B1F5B18
|
| 809 |
+
5B115B1561B35B125B1C64705B225B795DA664975DB35DAB5EEA648A5F5B64A3
|
| 810 |
+
649F61B761CE61B961BD61CF61C06199619765B361BB61D061C4623166B764D3
|
| 811 |
+
64C06A006A066A1769E564DC64D164C869E464D566C369EC69E266BF66C50000
|
| 812 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 813 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 814 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 815 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 816 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 817 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 818 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 819 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 820 |
+
51
|
| 821 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 822 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 823 |
+
000069FE66CD66C167066A1467246A636A426A5269E66A436A3369FC6A6C6A57
|
| 824 |
+
6A046A4C6A6E6A0F69F66A266A0769F46A376B516A716A4A6A366BA66A536C00
|
| 825 |
+
6A456A706F416F266A5C6B586B576F926F8D6F896F8C6F626F4F6FBB6F5A6F96
|
| 826 |
+
6FBE6F6C6F826F556FB56FD36F9F6F576FB76FF571B76F0071BB6F6B71D16F67
|
| 827 |
+
71BA6F5371B671CC6F7F6F9571D3749B6F6A6F7B749674A2749D750A750E719A
|
| 828 |
+
7581762C76377636763B71A476A171AA719C779871B37796729A735873520000
|
| 829 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 830 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 831 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 832 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 833 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 834 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 835 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 836 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 837 |
+
52
|
| 838 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 839 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 840 |
+
000078D678EB736078DC735B79A579A998347A537A4574897A4F74867ABD7ABB
|
| 841 |
+
7AF17488747C7BEC7BED7507757E7CD3761E7CE1761D7E197623761A76287E27
|
| 842 |
+
7E26769D769E806E81AF778F778981AD78CD81AA821878CC78D178CE78D4856F
|
| 843 |
+
854C78C48542799A855C8570855F79A2855A854B853F878A7AB4878B87A1878E
|
| 844 |
+
7BBE7BAC8799885E885F892489A78AEA8AFD8AF98AE38AE57DDB7DEA8AEC7DD7
|
| 845 |
+
7DE17E037DFA8CF27DF68CEF7DF08DA67DDF7F767FAC8E3B8E437FED8E320000
|
| 846 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 847 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 848 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 849 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 850 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 851 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 852 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 853 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 854 |
+
53
|
| 855 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 856 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 857 |
+
00008F318F307FE68F2D8F3C8FA78FA5819F819E819591379195918E82169196
|
| 858 |
+
82539345930A824E825192FD9317931C930793319332932C9330930393058527
|
| 859 |
+
95C284FB95B884FA95C1850C84F4852A96AB96B784F784EB97159714851284EA
|
| 860 |
+
970C971784FE9793851D97D2850284FD983698319833983C982E983A84F0983D
|
| 861 |
+
84F998B5992299239920991C991D866299A0876399EF99E899EB877387588754
|
| 862 |
+
99E199E68761875A9AF89AF5876D876A9B839B949B84875D9B8B9B8F877A0000
|
| 863 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 864 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 865 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 866 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 867 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 868 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 869 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 870 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 871 |
+
54
|
| 872 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 873 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 874 |
+
00009B8C875C9B89874F9B8E8775876287679D249D0F89059D139D0A890B8917
|
| 875 |
+
891889199D2A9D1A89119D279D169D2189A49E859EAC9EC69EC59ED79F538AB8
|
| 876 |
+
5128512751DF8AD5533553B38ABE568A567D56898AC358CD58D08AD95B2B5B33
|
| 877 |
+
5B295B355B315B375C365DBE8CDD5DB98DA05DBB8DA161E261DB61DD61DC61DA
|
| 878 |
+
8E2E61D98E1B8E1664DF8E198E2664E18E1464EE8E1865B566D466D58E1A66D0
|
| 879 |
+
66D166CE66D78F208F236A7D6A8A90736AA7906F6A996A826A88912B91290000
|
| 880 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 881 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 882 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 883 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 884 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 885 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 886 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 887 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 888 |
+
55
|
| 889 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 890 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 891 |
+
00006A8691326A986A9D918591866A8F91816AAA91846B5D92D06C0A92C46FD7
|
| 892 |
+
6FD66FE592CF92F192DF6FD96FDA6FEA92DD6FF692EF92C271E392CA71E992CE
|
| 893 |
+
71EB71EF71F371EA92E092DE92E792D192D3737192E174AE92C674B3957C74AC
|
| 894 |
+
95AB95AE75837645764E764476A376A577A677A4978A77A977AF97D097CF981E
|
| 895 |
+
78F078F878F198287A49981B982798B27AC27AF27AF37BFA99167BF67BFC7C18
|
| 896 |
+
7C087C1299D399D47CDB7CDA99D699D899CB7E2C7E4D9AB39AEC7F467FF60000
|
| 897 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 898 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 899 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 900 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 901 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 902 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 903 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 904 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 905 |
+
56
|
| 906 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 907 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 908 |
+
0000802B807481B881C89B679B749B71859285939B75857F85AB85979B6C9CFC
|
| 909 |
+
85AC9CFD9CFF9CF787CE9D0087CD9CFB9D0887C187B187C79ED389409F10893F
|
| 910 |
+
893951178943511151DE533489AB56708B1F8B098B0C566656638C4056728C96
|
| 911 |
+
56778CF68CF758C88E468E4F58BF58BA58C28F3D8F4193669378935D93699374
|
| 912 |
+
937D936E93729373936293489353935F93685DB1937F936B5DB595C45DAE96AF
|
| 913 |
+
96AD96B25DAD5DAF971A971B5E685E665E6F5EE9979B979F5EE85EE55F4B0000
|
| 914 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 915 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 916 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 917 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 918 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 919 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 920 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 921 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 922 |
+
57
|
| 923 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 924 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 925 |
+
00005FBC5FBB619D61A86196984061B4984761C198B761BA61BF61B8618C64D7
|
| 926 |
+
99A264D064CF9A0099F3648964C399F564F364D99ABD9B009B0265A29B349B49
|
| 927 |
+
9B9F66CA9BA39BCD9B999B9D66BA66CC9D396A349D446A496A679D356A686A3E
|
| 928 |
+
9EAF6A6D512F6A5B6A519F8E6A5A569F569B569E5696569456A06A4F5B3B6A6F
|
| 929 |
+
6A695B3A5DC15F4D5F5D61F36A4D6A4E6A466B5564F664E564EA64E765056BC8
|
| 930 |
+
64F96C046C036C066AAB6AED6AB26AB06AB56ABE6AC16AC86FC46AC06ABC0000
|
| 931 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 932 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 933 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 934 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 935 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 936 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 937 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 938 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 939 |
+
58
|
| 940 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 941 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 942 |
+
00006AB16AC46ABF6FA56FAE700870036FFD7010700270136FA271FA720074B9
|
| 943 |
+
74BC6FB2765B7651764F76EB77B871D677B977C177C077BE790B71C77907790A
|
| 944 |
+
790871BC790D7906791579AF729E736973667AF5736C73657C2E736A7C1B749A
|
| 945 |
+
7C1A7C24749274957CE67CE37580762F7E5D7E4F7E667E5B7F477FB476327630
|
| 946 |
+
76BB7FFA802E779D77A181CE779B77A282197795779985CC85B278E985BB85C1
|
| 947 |
+
78DE78E378DB87E987EE87F087D6880E87DA8948894A894E894D89B189B00000
|
| 948 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 949 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 950 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 951 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 952 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 953 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 954 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 955 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 956 |
+
59
|
| 957 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 958 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 959 |
+
000089B37AB78B388B327BE78B2D7BD58B347BDA8B298C747BD47BEA8D037BDC
|
| 960 |
+
7BEB8DA98E587CD27CD48EBF8EC18F4A8FAC7E219089913D913C91A993A07E0E
|
| 961 |
+
93907E159393938B93AD93BB93B87E0D7E14939C95D895D77F7B7F7C7F7A975D
|
| 962 |
+
97A997DA8029806C81B181A6985481B99855984B81B0983F98B981B281B781A7
|
| 963 |
+
81F29938993699408556993B993999A4855385619A089A0C85469A1085419B07
|
| 964 |
+
85449BD285479BC29BBB9BCC9BCB854E856E9D4D9D639D4E85609D509D550000
|
| 965 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 966 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 967 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 968 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 969 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 970 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 971 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 972 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 973 |
+
5A
|
| 974 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 975 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 976 |
+
0000855D9D5E85659E909EB29EB186649ECA9F029F279F26879356AF58E058DC
|
| 977 |
+
87965B39877987875B7C5BF3879087915C6B5DC4650B6508650A8789891E65DC
|
| 978 |
+
8930892D66E166DF6ACE6AD46AE36AD76AE2892C891F89F18AE06AD86AD56AD2
|
| 979 |
+
8AF58ADD701E702C70256FF37204720872158AE874C474C974C774C876A977C6
|
| 980 |
+
77C57918791A79208CF37A667A647A6A8DA78E338E3E8E388E408E457C357C34
|
| 981 |
+
8E3D8E417E6C8E3F7E6E7E718F2E81D481D6821A82628265827685DB85D60000
|
| 982 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 983 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 984 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 985 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 986 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 987 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 988 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 989 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 990 |
+
5B
|
| 991 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 992 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 993 |
+
0000908685E79133913585F4919387FD87D58807918F880F87F89308931F8987
|
| 994 |
+
930F89B589F5933C8B3F8B438B4C93018D0B8E6B8E688E708E758E7792FA8EC3
|
| 995 |
+
92F993E993EA93CB93C593C6932993ED93D3932A93E5930C930B93DB93EB93E0
|
| 996 |
+
93C1931695BC95DD95BE95B995BA95B695BF95B595BD96A996D497B297B497B1
|
| 997 |
+
97B597F2979497F097F89856982F98329924994499279A269A1F9A189A219A17
|
| 998 |
+
99E49B0999E399EA9BC59BDF9AB99BE39AB49BE99BEE9AFA9AF99D669D7A0000
|
| 999 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1000 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1001 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1002 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1003 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1004 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1005 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1006 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1007 |
+
5C
|
| 1008 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1009 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1010 |
+
00009B809D6E9D919D839D769D7E9D6D9B939E959EE39B7A9B959F039F049D25
|
| 1011 |
+
9F179D2051369D1453369D1D5B429D229D105B445B465B7E5DCA5DC85DCC5EF0
|
| 1012 |
+
9ED5658566E566E79F3D512651256AF451246AE9512952F45693568C568D703D
|
| 1013 |
+
56847036567E7216567F7212720F72177211720B5B2D5B2574CD74D074CC74CE
|
| 1014 |
+
74D15B2F75895B7B7A6F7C4B7C445E6C5E6A5FBE61C361B57E7F8B7161E0802F
|
| 1015 |
+
807A807B807C64EF64E964E385FC861086026581658085EE860366D2860D0000
|
| 1016 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1017 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1018 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1019 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1020 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1021 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1022 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1023 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1024 |
+
5D
|
| 1025 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1026 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1027 |
+
000086138608860F881888126A9B6AA18967896589BB8B698B626A838B6E6AA4
|
| 1028 |
+
8B616A7F8B648B4D8C516A8C6A928E838EC66C09941F6FA99404941794089405
|
| 1029 |
+
6FED93F3941E9402941A941B9427941C71E196B571E871F2973371F097349731
|
| 1030 |
+
97B897BA749797FC74AB749098C374AD994D74A59A2F7510751175129AC97584
|
| 1031 |
+
9AC89AC49B2A9B389B5076E99C0A9BFB9C049BFC9BFE77B477B177A89C029BF6
|
| 1032 |
+
9C1B9BF99C159C109BFF9C009C0C78F978FE9D959DA579A87A5C7A5B7A560000
|
| 1033 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1034 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1035 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1036 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1037 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1038 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1039 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1040 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1041 |
+
5E
|
| 1042 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1043 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1044 |
+
00009E989EC17A5A9F5A516456BB7C0558E65B495BF77BFF7BFB5DD07BF45FC2
|
| 1045 |
+
7BF365117C096AFF6AFE6AFD7BFD6B017BF07BF1704B704D704774D376687667
|
| 1046 |
+
7E33984877D179307932792E7E479F9D7AC97AC87E3B7C567C517E3A7F457F7F
|
| 1047 |
+
7E857E897E8E7E84802C826A862B862F862881C586168615861D881A825A825C
|
| 1048 |
+
858389BC8B758B7C85958D118D128F5C91BB85A493F4859E8577942D858985A1
|
| 1049 |
+
96E497379736976797BE97BD97E29868986698C898CA98C798DC8585994F0000
|
| 1050 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1051 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1052 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1053 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1054 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1055 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1056 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1057 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1058 |
+
5F
|
| 1059 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1060 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1061 |
+
000099A99A3C85909A3B9ACE87BE9B149B5387C59C2E87AC9C1F87B587BC87AE
|
| 1062 |
+
87C99DB09DBD87CC87B79DAE9DC49E7B87B487B69E9E87B89F0587DE9F699FA1
|
| 1063 |
+
56C7571D5B4A5DD389525F72620289AD62356527651E651F8B1E8B186B076B06
|
| 1064 |
+
8B058B0B7054721C72207AF88B077C5D7C588B067E927F4E8B1A8C4F8C708827
|
| 1065 |
+
8C718B818B838C948C448D6F8E4E8E4D8E539442944D9454944E8F409443907E
|
| 1066 |
+
9138973C974097C09199919F91A1919D995A9A5193839ADD936493569C380000
|
| 1067 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1068 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1069 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1070 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1071 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1072 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1073 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1074 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1075 |
+
60
|
| 1076 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1077 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1078 |
+
0000937C9C459C3A93769C359350935193609EF1938F9F93529A937993578641
|
| 1079 |
+
5DD7934F65289377937B936170537059936772219359766F793779B57C627C5E
|
| 1080 |
+
7CF596AE96B0863D9720882D89898B8D8B878B908D1A8E99979E979D97D5945F
|
| 1081 |
+
97F1984194569461945B945A945C9465992B9741992A9933986E986C986D9931
|
| 1082 |
+
99AA9A5C9A589ADE9A029C4F9C5199F79C5399F899F699FB9DFC9F3999FC513E
|
| 1083 |
+
9ABE56D29AFD5B4F6B149B487A727A739B9E9B9B9BA68B919BA59BA491BF0000
|
| 1084 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1085 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1086 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1087 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1088 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1089 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1090 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1091 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1092 |
+
61
|
| 1093 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1094 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1095 |
+
00009BA2946C9BAF9D3396E697459D3697C897E4995D9D389B219D459B2C9B57
|
| 1096 |
+
9D3E9D379C5D9C619C659E089E8A9E899E8D9EB09EC89F459EFB9EFF620566EF
|
| 1097 |
+
6B1B6B1D722572247C6D512E8642864956978978898A8B9759708C9B8D1C5C6A
|
| 1098 |
+
8EA25E6D5E6E61D861DF61ED61EE61F161EA9C6C61EB9C6F61E99E0E65049F08
|
| 1099 |
+
9F1D9FA3650364FC5F606B1C66DA66DB66D87CF36AB98B9B8EA791C46ABA947A
|
| 1100 |
+
6AB76AC79A619A639AD79C766C0B9FA5700C7067700172AB864A897D8B9D0000
|
| 1101 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1102 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1103 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1104 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1105 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1106 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1107 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1108 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1109 |
+
62
|
| 1110 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1111 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1112 |
+
00008C538F65947B6FFC98CD98DD72019B309E16720371FD737674B874C096E7
|
| 1113 |
+
9E189EA274B69F7C74C27E9E9484765C9E1C76597C7197CA7657765A76A69EA3
|
| 1114 |
+
76EC9C7B9F97790C7913975079097910791257275C1379AC7A5F7C1C7C297C19
|
| 1115 |
+
7C205FC87C2D7C1D7C267C287C2267657C307E5C52BD7E565B667E5865F96788
|
| 1116 |
+
6CE66CCB7E574FBD5F8D7FB36018604880756B2970A681D07706825E85B485C6
|
| 1117 |
+
5A105CFC5CFE85B385B585BD85C785C485BF70C985CE85C885C585B185B60000
|
| 1118 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1119 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1120 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1121 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1122 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1123 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1124 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1125 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1126 |
+
63
|
| 1127 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1128 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1129 |
+
000085D28624957985B796BA866987E787E687E287DB87EB87EA7B29812887F3
|
| 1130 |
+
8A2E87D487DC87D39AD987D8582B584587D963FA87F487E887DD6E86894B894F
|
| 1131 |
+
894C89468950586789495BDD656E8B238B338B308C878B4750D250DF8B3E8B31
|
| 1132 |
+
8B258B3769BA8B366B9D8B2480598B3D8B3A8C428C758C998C988C978CFE8D04
|
| 1133 |
+
8D028D008E5C6F8A8E608E577BC37BC28E658E678E5B8E5A90F68E5D98238E54
|
| 1134 |
+
8F468F478F488F4B71CD7499913B913E91A891A591A7984291AA93B5938C0000
|
| 1135 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1136 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1137 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1138 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1139 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1140 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1141 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1142 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1143 |
+
64
|
| 1144 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1145 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1146 |
+
000093927F84939B939D938993A7938E8D0E939E9861939593888B73939F9C27
|
| 1147 |
+
938D945877D69B2D93A493A893B493A395D295D395D196B396D796DA5DC296DF
|
| 1148 |
+
96D896DD97239722972597AC97AE97A84F664F684FE7503F97A550A6510F523E
|
| 1149 |
+
53245365539B517F54CB55735571556B55F456225620569256BA569156B05759
|
| 1150 |
+
578A580F581258135847589B5900594D5AD15AD35B675C575C775CD55D755D8E
|
| 1151 |
+
5DA55DB65DBF5E655ECD5EED5F945F9A5FBA6125615062A36360636463B60000
|
| 1152 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1153 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1154 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1155 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1156 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1157 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1158 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1159 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1160 |
+
65
|
| 1161 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1162 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1163 |
+
0000640364B6651A7A255C2166E2670267A467AC68106806685E685A692C6929
|
| 1164 |
+
6A2D6A776A7A6ACA6AE66AF56B0D6B0E6BDC6BDD6BF66C1E6C636DA56E0F6E8A
|
| 1165 |
+
6E846E8B6E7C6F4C6F486F496F9D6F996FF8702E702D705C79CC70BF70EA70E5
|
| 1166 |
+
71117112713F7139713B713D71777175717671717196719371B471DD71DE720E
|
| 1167 |
+
591172187347734873EF7412743B74A4748D74B47673767776BC7819781B783D
|
| 1168 |
+
78537854785878B778D878EE7922794D7986799979A379BC7AA77B377B590000
|
| 1169 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1170 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1171 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1172 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1173 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1174 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1175 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1176 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1177 |
+
66
|
| 1178 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1179 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1180 |
+
00007BD07C2F7C327C427C4E7C687CA97CED7DD07E077DD37E647F40791E8041
|
| 1181 |
+
806380BB6711672582488310836283128421841E84E284DE84E1857385D485F5
|
| 1182 |
+
863786458672874A87A987A587F5883488508887895489848B038C528CD88D0C
|
| 1183 |
+
8D188DB08EBC8ED58FAA909C85E8915C922B9221927392F492F5933F93429386
|
| 1184 |
+
93BE93BC93BD93F193F293EF94229423942494679466959795CE95E7973B974D
|
| 1185 |
+
98E499429B1D9B9889629D4964495E715E8561D3990E8002781E898889B70000
|
| 1186 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1187 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1188 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1189 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1190 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1191 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1192 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1193 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1194 |
+
67
|
| 1195 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1196 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1197 |
+
00005528557255BA55F055EE56B856B956C4805392B08B558B518B428B528B57
|
| 1198 |
+
8C438C778C768C9A8D068D078D098DAC8DAA8DAD8DAB8E6D8E788E738E6A8E6F
|
| 1199 |
+
8E7B8EC28F528F518F4F8F508F538FB49140913F91B091AD93DE93C793CF93C2
|
| 1200 |
+
93DA93D093F993EC93CC93D993A993E693CA93D493EE93E393D593C493CE93C0
|
| 1201 |
+
93D293A593E7957D95DA95DB96E19729972B972C9728972697B397B797B697DD
|
| 1202 |
+
97DE97DF985C9859985D985798BF98BD98BB98BE99489947994399A699A70000
|
| 1203 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1204 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1205 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1206 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1207 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1208 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1209 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1210 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1211 |
+
68
|
| 1212 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1213 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1214 |
+
00009A1A9A159A259A1D9A249A1B9A229A209A279A239A1E9A1C9A149AC29B0B
|
| 1215 |
+
9B0A9B0E9B0C9B379BEA9BEB9BE09BDE9BE49BE69BE29BF09BD49BD79BEC9BDC
|
| 1216 |
+
9BD99BE59BD59BE19BDA9D779D819D8A9D849D889D719D809D789D869D8B9D8C
|
| 1217 |
+
9D7D9D6B9D749D759D709D699D859D739D7B9D829D6F9D799D7F9D879D689E94
|
| 1218 |
+
9E919EC09EFC9F2D9F409F419F4D9F569F579F58533756B256B556B358E35B45
|
| 1219 |
+
5DC65DC75EEE5EEF5FC05FC161F9651765166515651365DF66E866E366E40000
|
| 1220 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1221 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1222 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1223 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1224 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1225 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1226 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1227 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1228 |
+
69
|
| 1229 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1230 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1231 |
+
00006AF36AF06AEA6AE86AF96AF16AEE6AEF703C7035702F7037703470317042
|
| 1232 |
+
7038703F703A7039702A7040703B703370417213721472A8737D737C74BA76AB
|
| 1233 |
+
76AA76BE76ED77CC77CE77CF77CD77F279257923792779287924792979B27A6E
|
| 1234 |
+
7A6C7A6D7AF77C497C487C4A7C477C457CEE7E7B7E7E7E817E807FBA7FFF8079
|
| 1235 |
+
81DB81D982688269862285FF860185FE861B860085F6860486098605860C85FD
|
| 1236 |
+
8819881088118817881388168963896689B989F78B608B6A8B5D8B688B630000
|
| 1237 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1238 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1239 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1240 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1241 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1242 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1243 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1244 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1245 |
+
6A
|
| 1246 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1247 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1248 |
+
00008B658B678B6D8DAE8E868E888E848F598F568F578F558F588F5A908D9143
|
| 1249 |
+
914191B791B591B291B3940B941393FB9420940F941493FE9415941094289419
|
| 1250 |
+
940D93F5940093F79407940E9416941293FA940993F8943C940A93FF93FC940C
|
| 1251 |
+
93F69411940695DE95E095DF972E972F97B997BB97FD97FE986098629863985F
|
| 1252 |
+
98C198C29950994E9959994C994B99539A329A349A319A2C9A2A9A369A299A2E
|
| 1253 |
+
9A389A2D9AC79ACA9AC69B109B129B119C0B9C089BF79C059C129BF89C400000
|
| 1254 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1255 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1256 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1257 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1258 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1259 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1260 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1261 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1262 |
+
6B
|
| 1263 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1264 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1265 |
+
00009C079C0E9C069C179C149C099D9F9D999DA49D9D9D929D989D909D9B9DA0
|
| 1266 |
+
9D949D9C9DAA9D979DA19D9A9DA29DA89D9E9DA39DBF9DA99D969DA69DA79E99
|
| 1267 |
+
9E9B9E9A9EE59EE49EE79EE69F309F2E9F5B9F609F5E9F5D9F599F91513A5139
|
| 1268 |
+
5298529756C356BD56BE5B485B475DCB5DCF5EF161FD651B6B026AFC6B036AF8
|
| 1269 |
+
6B0070437044704A7048704970457046721D721A7219737E7517766A77D0792D
|
| 1270 |
+
7931792F7C547C537CF27E8A7E877E887E8B7E867E8D7F4D7FBB803081DD0000
|
| 1271 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1272 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1273 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1274 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1275 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1276 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1277 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1278 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1279 |
+
6C
|
| 1280 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1281 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1282 |
+
00008618862A8626861F8623861C86198627862E862186208629861E86258829
|
| 1283 |
+
881D881B88208824881C882B884A896D8969896E896B89FA8B798B788B458B7A
|
| 1284 |
+
8B7B8D108D148DAF8E8E8E8C8F5E8F5B8F5D91469144914591B9943F943B9436
|
| 1285 |
+
9429943D94309439942A9437942C9440943195E595E495E39735973A97BF97E1
|
| 1286 |
+
986498C998C698C0995899569A399A3D9A469A449A429A419A3A9A3F9ACD9B15
|
| 1287 |
+
9B179B189B169B3A9B529C2B9C1D9C1C9C2C9C239C289C299C249C219DB70000
|
| 1288 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1289 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1290 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1291 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1292 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1293 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1294 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1295 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1296 |
+
6D
|
| 1297 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1298 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1299 |
+
00009DB69DBC9DC19DC79DCA9DCF9DBE9DC59DC39DBB9DB59DCE9DB99DBA9DAC
|
| 1300 |
+
9DC89DB19DAD9DCC9DB39DCD9DB29E7A9E9C9EEB9EEE9EED9F1B9F189F1A9F31
|
| 1301 |
+
9F4E9F659F649F924EB956C656C556CB59715B4B5B4C5DD55DD15EF265216520
|
| 1302 |
+
652665226B0B6B086B096C0D7055705670577052721E721F72A9737F74D874D5
|
| 1303 |
+
74D974D7766D76AD793579B47A707A717C577C5C7C597C5B7C5A7CF47CF17E91
|
| 1304 |
+
7F4F7F8781DE826B863486358633862C86328636882C88288826882A88250000
|
| 1305 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1306 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1307 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1308 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1309 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1310 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1311 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1312 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1313 |
+
6E
|
| 1314 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1315 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1316 |
+
0000897189BF89BE89FB8B7E8B848B828B868B858B7F8D158E958E948E9A8E92
|
| 1317 |
+
8E908E968E978F608F629147944C9450944A944B944F94479445944894499446
|
| 1318 |
+
973F97E3986A986998CB9954995B9A4E9A539A549A4C9A4F9A489A4A9A499A52
|
| 1319 |
+
9A509AD09B199B2B9B3B9B569B559C469C489C3F9C449C399C339C419C3C9C37
|
| 1320 |
+
9C349C329C3D9C369DDB9DD29DDE9DDA9DCB9DD09DDC9DD19DDF9DE99DD99DD8
|
| 1321 |
+
9DD69DF59DD59DDD9EB69EF09F359F339F329F429F6B9F959FA2513D52990000
|
| 1322 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1323 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1324 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1325 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1326 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1327 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1328 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1329 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1330 |
+
6F
|
| 1331 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1332 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1333 |
+
000058E858E759725B4D5DD8882F5F4F62016203620465296525659666EB6B11
|
| 1334 |
+
6B126B0F6BCA705B705A7222738273817383767077D47C677C667E95826C863A
|
| 1335 |
+
86408639863C8631863B863E88308832882E883389768974897389FE8B8C8B8E
|
| 1336 |
+
8B8B8B888C458D198E988F648F6391BC94629455945D9457945E97C497C59800
|
| 1337 |
+
9A569A599B1E9B1F9B209C529C589C509C4A9C4D9C4B9C559C599C4C9C4E9DFB
|
| 1338 |
+
9DF79DEF9DE39DEB9DF89DE49DF69DE19DEE9DE69DF29DF09DE29DEC9DF40000
|
| 1339 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1340 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1341 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1342 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1343 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1344 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1345 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1346 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1347 |
+
70
|
| 1348 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1349 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1350 |
+
00009DF39DE89DED9EC29ED09EF29EF39F069F1C9F389F379F369F439F4F9F71
|
| 1351 |
+
9F709F6E9F6F56D356CD5B4E5C6D652D66ED66EE6B13705F7061705D70607223
|
| 1352 |
+
74DB74E577D5793879B779B67C6A7E977F89826D8643883888378835884B8B94
|
| 1353 |
+
8B958E9E8E9F8EA08E9D91BE91BD91C2946B9468946996E597469743974797C7
|
| 1354 |
+
97E59A5E9AD59B599C639C679C669C629C5E9C609E029DFE9E079E039E069E05
|
| 1355 |
+
9E009E019E099DFF9DFD9E049EA09F1E9F469F749F759F7656D4652E65B80000
|
| 1356 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1357 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1358 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1359 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1360 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1361 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1362 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1363 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1364 |
+
71
|
| 1365 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1366 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1367 |
+
00006B186B196B176B1A7062722672AA77D877D979397C697C6B7CF67E9A7E98
|
| 1368 |
+
7E9B7E9981E081E18646864786488979897A897C897B89FF8B988B998EA58EA4
|
| 1369 |
+
8EA3946E946D946F9471947397499872995F9C689C6E9C6D9E0B9E0D9E109E0F
|
| 1370 |
+
9E129E119EA19EF59F099F479F789F7B9F7A9F79571E70667C6F883C8DB28EA6
|
| 1371 |
+
91C394749478947694759A609B2E9C749C739C719C759E149E139EF69F0A9FA4
|
| 1372 |
+
706870657CF7866A883E883D883F8B9E8C9C8EA98EC9974B9873987498CC0000
|
| 1373 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1374 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1375 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1376 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1377 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1378 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1379 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1380 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1381 |
+
72
|
| 1382 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1383 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1384 |
+
0000996199AB9A649A669A679B249E159E179F4862076B1E7227864C8EA89482
|
| 1385 |
+
948094819A699A689E19864B8B9F94839C799EB776759A6B9C7A9E1D7069706A
|
| 1386 |
+
72299EA49F7E9F499F988AF68AFC8C6B8C6D8C938CF48E448E318E348E428E39
|
| 1387 |
+
8E358F3B8F2F8F388F338FA88FA69075907490789072907C907A913491929320
|
| 1388 |
+
933692F89333932F932292FC932B9304931A9310932693219315932E931995BB
|
| 1389 |
+
96A796A896AA96D5970E97119716970D9713970F975B975C9766979898300000
|
| 1390 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1391 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1392 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1393 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1394 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1395 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1396 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1397 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1398 |
+
73
|
| 1399 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1400 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1401 |
+
00009838983B9837982D9839982499109928991E991B9921991A99ED99E299F1
|
| 1402 |
+
9AB89ABC9AFB9AED9B289B919D159D239D269D289D129D1B9ED89ED49F8D9F9C
|
| 1403 |
+
512A511F5121513252F5568E5680569056855687568F58D558D358D158CE5B30
|
| 1404 |
+
5B2A5B245B7A5C375C685DBC5DBA5DBD5DB85E6B5F4C5FBD61C961C261C761E6
|
| 1405 |
+
61CB6232623464CE64CA64D864E064F064E664EC64F164E264ED6582658366D9
|
| 1406 |
+
66D66A806A946A846AA26A9C6ADB6AA36A7E6A976A906AA06B5C6BAE6BDA0000
|
| 1407 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1408 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1409 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1410 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1411 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1412 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1413 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1414 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1415 |
+
74
|
| 1416 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1417 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1418 |
+
00006C086FD86FF16FDF6FE06FDB6FE46FEB6FEF6F806FEC6FE16FE96FD56FEE
|
| 1419 |
+
6FF071E771DF71EE71E671E571ED71EC71F471E0723572467370737274A974B0
|
| 1420 |
+
74A674A876467642764C76EA77B377AA77B077AC77A777AD77EF78F778FA78F4
|
| 1421 |
+
78EF790179A779AA7A577ABF7C077C0D7BFE7BF77C0C7BE07CE07CDC7CDE7CE2
|
| 1422 |
+
7CDF7CD97CDD7E2E7E3E7E467E377E327E437E2B7E3D7E317E457E417E347E39
|
| 1423 |
+
7E487E357E3F7E2F7F447FF37FFC807180728070806F807381C681C381BA0000
|
| 1424 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1425 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1426 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1427 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1428 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1429 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1430 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1431 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1432 |
+
75
|
| 1433 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1434 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1435 |
+
000081C281C081BF81BD81C981BE81E88209827185AA8584857E859C85918594
|
| 1436 |
+
85AF859B858785A8858A85A6866787C087D187B387D287C687AB87BB87BA87C8
|
| 1437 |
+
87CB893B893689448938893D89AC8B0E8B178B198B1B8B0A8B208B1D8B048B10
|
| 1438 |
+
8C418C3F8C738CFA8CFD8CFC8CF88CFB8DA88E498E4B8E488E4A8F448F3E8F42
|
| 1439 |
+
8F458F3F907F907D9084908190829080913991A3919E919C934D938293289375
|
| 1440 |
+
934A9365934B9318937E936C935B9370935A935495CA95CB95CC95C895C60000
|
| 1441 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1442 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1443 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1444 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1445 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1446 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1447 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1448 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1449 |
+
76
|
| 1450 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1451 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1452 |
+
000096B196B896D6971C971E97A097D3984698B699359A0199FF9BAE9BAB9BAA
|
| 1453 |
+
9BAD9D3B9D3F9E8B9ECF9EDE9EDC9EDD9EDB9F3E9F4B53E2569556AE58D958D8
|
| 1454 |
+
5B385F5E61E3623364F464F264FE650664FA64FB64F765B766DC67266AB36AAC
|
| 1455 |
+
6AC36ABB6AB86AC26AAE6AAF6B5F6B786BAF7009700B6FFE70066FFA7011700F
|
| 1456 |
+
71FB71FC71FE71F87377737574A774BF751576567658765277BD77BF77BB77BC
|
| 1457 |
+
790E79AE7A617A627A607AC47AC57C2B7C277C2A7C1E7C237C217CE77E540000
|
| 1458 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1459 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1460 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1461 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1462 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1463 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1464 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1465 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1466 |
+
77
|
| 1467 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1468 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1469 |
+
00007E557E5E7E5A7E617E527E597F487FF97FFB8077807681CD81CF820A85CF
|
| 1470 |
+
85A985CD85D085C985B085BA85B987EF87EC87F287E0898689B289F48B288B39
|
| 1471 |
+
8B2C8B2B8C508D058E598E638E668E648E5F8E558EC08F498F4D908790839088
|
| 1472 |
+
91AB91AC91D09394938A939693A293B393AE93AC93B09398939A939795D495D6
|
| 1473 |
+
95D095D596E296DC96D996DB96DE972497A397A697AD97F9984D984F984C984E
|
| 1474 |
+
985398BA993E993F993D992E99A59A0E9AC19B039B069B4F9B4E9B4D9BCA0000
|
| 1475 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1476 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1477 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1478 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1479 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1480 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1481 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1482 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1483 |
+
78
|
| 1484 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1485 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1486 |
+
00009BC99BFD9BC89BC09D519D5D9D609EE09F159F2C513356A556A858DE58DF
|
| 1487 |
+
58E25BF59F905EEC61F261F761F661F56500650F66E066DD6AE56ADD6ADA6AD3
|
| 1488 |
+
701B701F7028701A701D701570187206720D725872A27378737A74BD74CA74E3
|
| 1489 |
+
75877586765F766177C7791979B17A6B7A697C3E7C3F7C387C3D7C377C407E6B
|
| 1490 |
+
7E6D7E797E697E6A7E737F857FB67FB97FB881D885E985DD85EA85D585E485E5
|
| 1491 |
+
85F787FB8805880D87F987FE8960895F8956895E8B418B5C8B588B498B5A0000
|
| 1492 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1493 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1494 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1495 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1496 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1497 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1498 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1499 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1500 |
+
79
|
| 1501 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1502 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1503 |
+
00008B4E8B4F8B468B598D088D0A8E7C8E728E878E768E6C8E7A8E748F548F4E
|
| 1504 |
+
8FAD908A908B91B191AE93E193D193DF93C393C893DC93DD93D693E293CD93D8
|
| 1505 |
+
93E493D793E895DC96B496E3972A9727976197DC97FB985E9858985B98BC9945
|
| 1506 |
+
99499A169A199B0D9BE89BE79BD69BDB9D899D619D729D6A9D6C9E929E979E93
|
| 1507 |
+
9EB452F856B756B656B456BC58E45B405B435B7D5BF65DC961F861FA65186514
|
| 1508 |
+
651966E667276AEC703E703070327210737B74CF766276657926792A792C0000
|
| 1509 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1510 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1511 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1512 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1513 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1514 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1515 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1516 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1517 |
+
7A
|
| 1518 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1519 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1520 |
+
0000792B7AC77AF67C4C7C437C4D7CEF7CF08FAE7E7D7E7C7E827F4C800081DA
|
| 1521 |
+
826685FB85F9861185FA8606860B8607860A88148815896489BA89F88B708B6C
|
| 1522 |
+
8B668B6F8B5F8B6B8D0F8D0D8E898E818E858E8291B491CB9418940393FD95E1
|
| 1523 |
+
973098C49952995199A89A2B9A309A379A359C139C0D9E799EB59EE89F2F9F5F
|
| 1524 |
+
9F639F615137513856C156C056C259145C6C5DCD61FC61FE651D651C659566E9
|
| 1525 |
+
6AFB6B046AFA6BB2704C721B72A774D674D4766977D37C507E8F7E8C7FBC0000
|
| 1526 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1527 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1528 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1529 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1530 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1531 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1532 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1533 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1534 |
+
7B
|
| 1535 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1536 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1537 |
+
00008617862D861A882388228821881F896A896C89BD8B748B778B7D8D138E8A
|
| 1538 |
+
8E8D8E8B8F5F8FAF91BA942E94339435943A94389432942B95E2973897399732
|
| 1539 |
+
97FF9867986599579A459A439A409A3E9ACF9B549B519C2D9C259DAF9DB49DC2
|
| 1540 |
+
9DB89E9D9EEF9F199F5C9F669F67513C513B56C856CA56C95B7F5DD45DD25F4E
|
| 1541 |
+
61FF65246B0A6B6170517058738074E4758A766E766C79B37C607C5F807E807D
|
| 1542 |
+
81DF8972896F89FC8B808D168D178E918E938F619148944494519452973D0000
|
| 1543 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1544 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1545 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1546 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1547 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1548 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1549 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1550 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1551 |
+
7C
|
| 1552 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1553 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1554 |
+
0000973E97C397C1986B99559A559A4D9AD29B1A9C499C319C3E9C3B9DD39DD7
|
| 1555 |
+
9F349F6C9F6A9F9456CC5DD662006523652B652A66EC6B1074DA7ACA7C647C63
|
| 1556 |
+
7C657E937E967E9481E28638863F88318B8A9090908F9463946094649768986F
|
| 1557 |
+
995C9A5A9A5B9A579AD39AD49AD19C549C579C569DE59E9F9EF456D158E9652C
|
| 1558 |
+
705E7671767277D77F507F888836883988628B938B928B9682778D1B91C0946A
|
| 1559 |
+
97429748974497C698709A5F9B229B589C5F9DF99DFA9E7C9E7D9F079F770000
|
| 1560 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1561 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1562 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1563 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1564 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1565 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1566 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1567 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1568 |
+
7D
|
| 1569 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1570 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1571 |
+
00009F725EF36B1670637C6C7C6E883B89C08EA191C1947294709871995E9AD6
|
| 1572 |
+
9B239ECC706477DA8B9A947797C99A629A657E9C8B9C8EAA91C5947D947E947C
|
| 1573 |
+
9C779C789EF78C54947F9E1A72289A6A9B319E1B9E1E7C720000000000000000
|
| 1574 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1575 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1576 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1577 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1578 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1579 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1580 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1581 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1582 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1583 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1584 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
janus/lib/tcl8.6/encoding/cp1250.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1250, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
20AC0081201A0083201E2026202020210088203001602039015A0164017D0179
|
| 14 |
+
009020182019201C201D202220132014009821220161203A015B0165017E017A
|
| 15 |
+
00A002C702D8014100A4010400A600A700A800A9015E00AB00AC00AD00AE017B
|
| 16 |
+
00B000B102DB014200B400B500B600B700B80105015F00BB013D02DD013E017C
|
| 17 |
+
015400C100C2010200C40139010600C7010C00C9011800CB011A00CD00CE010E
|
| 18 |
+
01100143014700D300D4015000D600D70158016E00DA017000DC00DD016200DF
|
| 19 |
+
015500E100E2010300E4013A010700E7010D00E9011900EB011B00ED00EE010F
|
| 20 |
+
01110144014800F300F4015100F600F70159016F00FA017100FC00FD016302D9
|
janus/lib/tcl8.6/encoding/cp1251.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1251, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
04020403201A0453201E20262020202120AC203004092039040A040C040B040F
|
| 14 |
+
045220182019201C201D202220132014009821220459203A045A045C045B045F
|
| 15 |
+
00A0040E045E040800A4049000A600A7040100A9040400AB00AC00AD00AE0407
|
| 16 |
+
00B000B104060456049100B500B600B704512116045400BB0458040504550457
|
| 17 |
+
0410041104120413041404150416041704180419041A041B041C041D041E041F
|
| 18 |
+
0420042104220423042404250426042704280429042A042B042C042D042E042F
|
| 19 |
+
0430043104320433043404350436043704380439043A043B043C043D043E043F
|
| 20 |
+
0440044104420443044404450446044704480449044A044B044C044D044E044F
|
janus/lib/tcl8.6/encoding/cp1252.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1252, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
20AC0081201A0192201E20262020202102C62030016020390152008D017D008F
|
| 14 |
+
009020182019201C201D20222013201402DC21220161203A0153009D017E0178
|
| 15 |
+
00A000A100A200A300A400A500A600A700A800A900AA00AB00AC00AD00AE00AF
|
| 16 |
+
00B000B100B200B300B400B500B600B700B800B900BA00BB00BC00BD00BE00BF
|
| 17 |
+
00C000C100C200C300C400C500C600C700C800C900CA00CB00CC00CD00CE00CF
|
| 18 |
+
00D000D100D200D300D400D500D600D700D800D900DA00DB00DC00DD00DE00DF
|
| 19 |
+
00E000E100E200E300E400E500E600E700E800E900EA00EB00EC00ED00EE00EF
|
| 20 |
+
00F000F100F200F300F400F500F600F700F800F900FA00FB00FC00FD00FE00FF
|
janus/lib/tcl8.6/encoding/cp1254.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1254, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
20AC0081201A0192201E20262020202102C62030016020390152008D008E008F
|
| 14 |
+
009020182019201C201D20222013201402DC21220161203A0153009D009E0178
|
| 15 |
+
00A000A100A200A300A400A500A600A700A800A900AA00AB00AC00AD00AE00AF
|
| 16 |
+
00B000B100B200B300B400B500B600B700B800B900BA00BB00BC00BD00BE00BF
|
| 17 |
+
00C000C100C200C300C400C500C600C700C800C900CA00CB00CC00CD00CE00CF
|
| 18 |
+
011E00D100D200D300D400D500D600D700D800D900DA00DB00DC0130015E00DF
|
| 19 |
+
00E000E100E200E300E400E500E600E700E800E900EA00EB00EC00ED00EE00EF
|
| 20 |
+
011F00F100F200F300F400F500F600F700F800F900FA00FB00FC0131015F00FF
|
janus/lib/tcl8.6/encoding/cp1255.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1255, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
20AC0081201A0192201E20262020202102C62030008A2039008C008D008E008F
|
| 14 |
+
009020182019201C201D20222013201402DC2122009A203A009C009D009E009F
|
| 15 |
+
00A000A100A200A320AA00A500A600A700A800A900D700AB00AC00AD00AE00AF
|
| 16 |
+
00B000B100B200B300B400B500B600B700B800B900F700BB00BC00BD00BE00BF
|
| 17 |
+
05B005B105B205B305B405B505B605B705B805B9000005BB05BC05BD05BE05BF
|
| 18 |
+
05C005C105C205C305F005F105F205F305F40000000000000000000000000000
|
| 19 |
+
05D005D105D205D305D405D505D605D705D805D905DA05DB05DC05DD05DE05DF
|
| 20 |
+
05E005E105E205E305E405E505E605E705E805E905EA00000000200E200F0000
|
janus/lib/tcl8.6/encoding/cp1257.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp1257, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
20AC0081201A0083201E20262020202100882030008A2039008C00A802C700B8
|
| 14 |
+
009020182019201C201D20222013201400982122009A203A009C00AF02DB009F
|
| 15 |
+
00A0000000A200A300A4000000A600A700D800A9015600AB00AC00AD00AE00C6
|
| 16 |
+
00B000B100B200B300B400B500B600B700F800B9015700BB00BC00BD00BE00E6
|
| 17 |
+
0104012E0100010600C400C501180112010C00C90179011601220136012A013B
|
| 18 |
+
01600143014500D3014C00D500D600D701720141015A016A00DC017B017D00DF
|
| 19 |
+
0105012F0101010700E400E501190113010D00E9017A011701230137012B013C
|
| 20 |
+
01610144014600F3014D00F500F600F701730142015B016B00FC017C017E02D9
|
janus/lib/tcl8.6/encoding/cp737.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp737, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
039103920393039403950396039703980399039A039B039C039D039E039F03A0
|
| 14 |
+
03A103A303A403A503A603A703A803A903B103B203B303B403B503B603B703B8
|
| 15 |
+
03B903BA03BB03BC03BD03BE03BF03C003C103C303C203C403C503C603C703C8
|
| 16 |
+
259125922593250225242561256225562555256325512557255D255C255B2510
|
| 17 |
+
25142534252C251C2500253C255E255F255A25542569256625602550256C2567
|
| 18 |
+
2568256425652559255825522553256B256A2518250C25882584258C25902580
|
| 19 |
+
03C903AC03AD03AE03CA03AF03CC03CD03CB03CE038603880389038A038C038E
|
| 20 |
+
038F00B12265226403AA03AB00F7224800B0221900B7221A207F00B225A000A0
|
janus/lib/tcl8.6/encoding/cp775.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp775, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
010600FC00E9010100E4012300E501070142011301560157012B017900C400C5
|
| 14 |
+
00C900E600C6014D00F6012200A2015A015B00D600DC00F800A300D800D700A4
|
| 15 |
+
0100012A00F3017B017C017A201D00A600A900AE00AC00BD00BC014100AB00BB
|
| 16 |
+
259125922593250225240104010C01180116256325512557255D012E01602510
|
| 17 |
+
25142534252C251C2500253C0172016A255A25542569256625602550256C017D
|
| 18 |
+
0105010D01190117012F01610173016B017E2518250C25882584258C25902580
|
| 19 |
+
00D300DF014C014300F500D500B5014401360137013B013C0146011201452019
|
| 20 |
+
00AD00B1201C00BE00B600A700F7201E00B0221900B700B900B300B225A000A0
|
janus/lib/tcl8.6/encoding/cp861.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp861, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
00C700FC00E900E200E400E000E500E700EA00EB00E800D000F000DE00C400C5
|
| 14 |
+
00C900E600C600F400F600FE00FB00DD00FD00D600DC00F800A300D820A70192
|
| 15 |
+
00E100ED00F300FA00C100CD00D300DA00BF231000AC00BD00BC00A100AB00BB
|
| 16 |
+
259125922593250225242561256225562555256325512557255D255C255B2510
|
| 17 |
+
25142534252C251C2500253C255E255F255A25542569256625602550256C2567
|
| 18 |
+
2568256425652559255825522553256B256A2518250C25882584258C25902580
|
| 19 |
+
03B100DF039303C003A303C300B503C403A6039803A903B4221E03C603B52229
|
| 20 |
+
226100B1226522642320232100F7224800B0221900B7221A207F00B225A000A0
|
janus/lib/tcl8.6/encoding/cp866.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: cp866, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
0410041104120413041404150416041704180419041A041B041C041D041E041F
|
| 14 |
+
0420042104220423042404250426042704280429042A042B042C042D042E042F
|
| 15 |
+
0430043104320433043404350436043704380439043A043B043C043D043E043F
|
| 16 |
+
259125922593250225242561256225562555256325512557255D255C255B2510
|
| 17 |
+
25142534252C251C2500253C255E255F255A25542569256625602550256C2567
|
| 18 |
+
2568256425652559255825522553256B256A2518250C25882584258C25902580
|
| 19 |
+
0440044104420443044404450446044704480449044A044B044C044D044E044F
|
| 20 |
+
040104510404045404070457040E045E00B0221900B7221A211600A425A000A0
|
janus/lib/tcl8.6/encoding/cp936.enc
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/tcl8.6/encoding/dingbats.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: dingbats, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 1 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
00202701270227032704260E2706270727082709261B261E270C270D270E270F
|
| 8 |
+
2710271127122713271427152716271727182719271A271B271C271D271E271F
|
| 9 |
+
2720272127222723272427252726272726052729272A272B272C272D272E272F
|
| 10 |
+
2730273127322733273427352736273727382739273A273B273C273D273E273F
|
| 11 |
+
2740274127422743274427452746274727482749274A274B25CF274D25A0274F
|
| 12 |
+
27502751275225B225BC25C6275625D727582759275A275B275C275D275E007F
|
| 13 |
+
0080008100820083008400850086008700880089008A008B008C008D008E008F
|
| 14 |
+
0090009100920093009400950096009700980099009A009B009C009D009E009F
|
| 15 |
+
0000276127622763276427652766276726632666266526602460246124622463
|
| 16 |
+
2464246524662467246824692776277727782779277A277B277C277D277E277F
|
| 17 |
+
2780278127822783278427852786278727882789278A278B278C278D278E278F
|
| 18 |
+
2790279127922793279421922194219527982799279A279B279C279D279E279F
|
| 19 |
+
27A027A127A227A327A427A527A627A727A827A927AA27AB27AC27AD27AE27AF
|
| 20 |
+
000027B127B227B327B427B527B627B727B827B927BA27BB27BC27BD27BE0000
|
janus/lib/tcl8.6/encoding/euc-cn.enc
ADDED
|
@@ -0,0 +1,1397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: euc-cn, multi-byte
|
| 2 |
+
M
|
| 3 |
+
003F 0 82
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
0020002100220023002400250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D007E007F
|
| 13 |
+
0080008100820083008400850086008700880089008A008B008C008D008E008F
|
| 14 |
+
0090009100920093009400950096009700980099009A009B009C009D009E009F
|
| 15 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 16 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 17 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 18 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 19 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 20 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 21 |
+
A1
|
| 22 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 23 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 24 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 25 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 26 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 27 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 28 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 29 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 30 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 31 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 32 |
+
000030003001300230FB02C902C700A8300330052015FF5E2225202620182019
|
| 33 |
+
201C201D3014301530083009300A300B300C300D300E300F3016301730103011
|
| 34 |
+
00B100D700F72236222722282211220F222A222922082237221A22A522252220
|
| 35 |
+
23122299222B222E2261224C2248223D221D2260226E226F22642265221E2235
|
| 36 |
+
22342642264000B0203220332103FF0400A4FFE0FFE1203000A7211626062605
|
| 37 |
+
25CB25CF25CE25C725C625A125A025B325B2203B219221902191219330130000
|
| 38 |
+
A2
|
| 39 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 40 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 41 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 42 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 43 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 44 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 45 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 46 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 47 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 48 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 49 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 50 |
+
000024882489248A248B248C248D248E248F2490249124922493249424952496
|
| 51 |
+
249724982499249A249B247424752476247724782479247A247B247C247D247E
|
| 52 |
+
247F248024812482248324842485248624872460246124622463246424652466
|
| 53 |
+
2467246824690000000032203221322232233224322532263227322832290000
|
| 54 |
+
00002160216121622163216421652166216721682169216A216B000000000000
|
| 55 |
+
A3
|
| 56 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 57 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 58 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 59 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 60 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 61 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 62 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 63 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 64 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 65 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 66 |
+
0000FF01FF02FF03FFE5FF05FF06FF07FF08FF09FF0AFF0BFF0CFF0DFF0EFF0F
|
| 67 |
+
FF10FF11FF12FF13FF14FF15FF16FF17FF18FF19FF1AFF1BFF1CFF1DFF1EFF1F
|
| 68 |
+
FF20FF21FF22FF23FF24FF25FF26FF27FF28FF29FF2AFF2BFF2CFF2DFF2EFF2F
|
| 69 |
+
FF30FF31FF32FF33FF34FF35FF36FF37FF38FF39FF3AFF3BFF3CFF3DFF3EFF3F
|
| 70 |
+
FF40FF41FF42FF43FF44FF45FF46FF47FF48FF49FF4AFF4BFF4CFF4DFF4EFF4F
|
| 71 |
+
FF50FF51FF52FF53FF54FF55FF56FF57FF58FF59FF5AFF5BFF5CFF5DFFE30000
|
| 72 |
+
A4
|
| 73 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 74 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 75 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 76 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 77 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 78 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 79 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 80 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 81 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 82 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 83 |
+
0000304130423043304430453046304730483049304A304B304C304D304E304F
|
| 84 |
+
3050305130523053305430553056305730583059305A305B305C305D305E305F
|
| 85 |
+
3060306130623063306430653066306730683069306A306B306C306D306E306F
|
| 86 |
+
3070307130723073307430753076307730783079307A307B307C307D307E307F
|
| 87 |
+
3080308130823083308430853086308730883089308A308B308C308D308E308F
|
| 88 |
+
3090309130923093000000000000000000000000000000000000000000000000
|
| 89 |
+
A5
|
| 90 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 91 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 92 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 93 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 94 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 95 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 96 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 97 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 98 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 99 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 100 |
+
000030A130A230A330A430A530A630A730A830A930AA30AB30AC30AD30AE30AF
|
| 101 |
+
30B030B130B230B330B430B530B630B730B830B930BA30BB30BC30BD30BE30BF
|
| 102 |
+
30C030C130C230C330C430C530C630C730C830C930CA30CB30CC30CD30CE30CF
|
| 103 |
+
30D030D130D230D330D430D530D630D730D830D930DA30DB30DC30DD30DE30DF
|
| 104 |
+
30E030E130E230E330E430E530E630E730E830E930EA30EB30EC30ED30EE30EF
|
| 105 |
+
30F030F130F230F330F430F530F6000000000000000000000000000000000000
|
| 106 |
+
A6
|
| 107 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 108 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 109 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 110 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 111 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 112 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 113 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 114 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 115 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 116 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 117 |
+
0000039103920393039403950396039703980399039A039B039C039D039E039F
|
| 118 |
+
03A003A103A303A403A503A603A703A803A90000000000000000000000000000
|
| 119 |
+
000003B103B203B303B403B503B603B703B803B903BA03BB03BC03BD03BE03BF
|
| 120 |
+
03C003C103C303C403C503C603C703C803C90000000000000000000000000000
|
| 121 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 122 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 123 |
+
A7
|
| 124 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 125 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 126 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 127 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 128 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 129 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 130 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 131 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 132 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 133 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 134 |
+
000004100411041204130414041504010416041704180419041A041B041C041D
|
| 135 |
+
041E041F0420042104220423042404250426042704280429042A042B042C042D
|
| 136 |
+
042E042F00000000000000000000000000000000000000000000000000000000
|
| 137 |
+
000004300431043204330434043504510436043704380439043A043B043C043D
|
| 138 |
+
043E043F0440044104420443044404450446044704480449044A044B044C044D
|
| 139 |
+
044E044F00000000000000000000000000000000000000000000000000000000
|
| 140 |
+
A8
|
| 141 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 142 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 143 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 144 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 145 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 146 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 147 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 148 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 149 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 150 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 151 |
+
0000010100E101CE00E0011300E9011B00E8012B00ED01D000EC014D00F301D2
|
| 152 |
+
00F2016B00FA01D400F901D601D801DA01DC00FC00EA00000000000000000000
|
| 153 |
+
0000000000000000000031053106310731083109310A310B310C310D310E310F
|
| 154 |
+
3110311131123113311431153116311731183119311A311B311C311D311E311F
|
| 155 |
+
3120312131223123312431253126312731283129000000000000000000000000
|
| 156 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 157 |
+
A9
|
| 158 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 159 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 160 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 161 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 162 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 163 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 164 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 165 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 166 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 167 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 168 |
+
00000000000000002500250125022503250425052506250725082509250A250B
|
| 169 |
+
250C250D250E250F2510251125122513251425152516251725182519251A251B
|
| 170 |
+
251C251D251E251F2520252125222523252425252526252725282529252A252B
|
| 171 |
+
252C252D252E252F2530253125322533253425352536253725382539253A253B
|
| 172 |
+
253C253D253E253F2540254125422543254425452546254725482549254A254B
|
| 173 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 174 |
+
B0
|
| 175 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 176 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 177 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 178 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 179 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 180 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 181 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 182 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 183 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 184 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 185 |
+
0000554A963F57C3632854CE550954C07691764C853C77EE827E788D72319698
|
| 186 |
+
978D6C285B894FFA630966975CB880FA684880AE660276CE51F9655671AC7FF1
|
| 187 |
+
888450B2596561CA6FB382AD634C625253ED54277B06516B75A45DF462D48DCB
|
| 188 |
+
9776628A8019575D97387F627238767D67CF767E64464F708D2562DC7A176591
|
| 189 |
+
73ED642C6273822C9881677F7248626E62CC4F3474E3534A529E7ECA90A65E2E
|
| 190 |
+
6886699C81807ED168D278C5868C9551508D8C2482DE80DE5305891252650000
|
| 191 |
+
B1
|
| 192 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 193 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 194 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 195 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 196 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 197 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 198 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 199 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 200 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 201 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 202 |
+
0000858496F94FDD582199715B9D62B162A566B48C799C8D7206676F789160B2
|
| 203 |
+
535153178F8880CC8D1D94A1500D72C8590760EB711988AB595482EF672C7B28
|
| 204 |
+
5D297EF7752D6CF58E668FF8903C9F3B6BD491197B145F7C78A784D6853D6BD5
|
| 205 |
+
6BD96BD65E015E8775F995ED655D5F0A5FC58F9F58C181C2907F965B97AD8FB9
|
| 206 |
+
7F168D2C62414FBF53D8535E8FA88FA98FAB904D68075F6A819888689CD6618B
|
| 207 |
+
522B762A5F6C658C6FD26EE85BBE6448517551B067C44E1979C9997C70B30000
|
| 208 |
+
B2
|
| 209 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 210 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 211 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 212 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 213 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 214 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 215 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 216 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 217 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 218 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 219 |
+
000075C55E7673BB83E064AD62E894B56CE2535A52C3640F94C27B944F2F5E1B
|
| 220 |
+
82368116818A6E246CCA9A736355535C54FA886557E04E0D5E036B657C3F90E8
|
| 221 |
+
601664E6731C88C16750624D8D22776C8E2991C75F6983DC8521991053C28695
|
| 222 |
+
6B8B60ED60E8707F82CD82314ED36CA785CF64CD7CD969FD66F9834953957B56
|
| 223 |
+
4FA7518C6D4B5C428E6D63D253C9832C833667E578B4643D5BDF5C945DEE8BE7
|
| 224 |
+
62C667F48C7A640063BA8749998B8C177F2094F24EA7961098A4660C73160000
|
| 225 |
+
B3
|
| 226 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 227 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 228 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 229 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 230 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 231 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 232 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 233 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 234 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 235 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 236 |
+
0000573A5C1D5E38957F507F80A05382655E7545553150218D856284949E671D
|
| 237 |
+
56326F6E5DE2543570928F66626F64A463A35F7B6F8890F481E38FB05C186668
|
| 238 |
+
5FF16C8996488D81886C649179F057CE6A59621054484E587A0B60E96F848BDA
|
| 239 |
+
627F901E9A8B79E4540375F4630153196C608FDF5F1B9A70803B9F7F4F885C3A
|
| 240 |
+
8D647FC565A570BD514551B2866B5D075BA062BD916C75748E0C7A2061017B79
|
| 241 |
+
4EC77EF877854E1181ED521D51FA6A7153A88E87950496CF6EC19664695A0000
|
| 242 |
+
B4
|
| 243 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 244 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 245 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 246 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 247 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 248 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 249 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 250 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 251 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 252 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 253 |
+
0000784050A877D7641089E6590463E35DDD7A7F693D4F20823955984E3275AE
|
| 254 |
+
7A975E625E8A95EF521B5439708A6376952457826625693F918755076DF37EAF
|
| 255 |
+
882262337EF075B5832878C196CC8F9E614874F78BCD6B64523A8D506B21806A
|
| 256 |
+
847156F153064ECE4E1B51D17C97918B7C074FC38E7F7BE17A9C64675D1450AC
|
| 257 |
+
810676017CB96DEC7FE067515B585BF878CB64AE641363AA632B9519642D8FBE
|
| 258 |
+
7B5476296253592754466B7950A362345E266B864EE38D37888B5F85902E0000
|
| 259 |
+
B5
|
| 260 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 261 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 262 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 263 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 264 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 265 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 266 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 267 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 268 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 269 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 270 |
+
00006020803D62C54E39535590F863B880C665E66C2E4F4660EE6DE18BDE5F39
|
| 271 |
+
86CB5F536321515A83616863520063638E4850125C9B79775BFC52307A3B60BC
|
| 272 |
+
905376D75FB75F9776848E6C706F767B7B4977AA51F3909358244F4E6EF48FEA
|
| 273 |
+
654C7B1B72C46DA47FDF5AE162B55E95573084827B2C5E1D5F1F90127F1498A0
|
| 274 |
+
63826EC7789870B95178975B57AB75354F4375385E9760E659606DC06BBF7889
|
| 275 |
+
53FC96D551CB52016389540A94938C038DCC7239789F87768FED8C0D53E00000
|
| 276 |
+
B6
|
| 277 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 278 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 279 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 280 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 281 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 282 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 283 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 284 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 285 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 286 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 287 |
+
00004E0176EF53EE948998769F0E952D5B9A8BA24E224E1C51AC846361C252A8
|
| 288 |
+
680B4F97606B51BB6D1E515C6296659796618C46901775D890FD77636BD2728A
|
| 289 |
+
72EC8BFB583577798D4C675C9540809A5EA66E2159927AEF77ED953B6BB565AD
|
| 290 |
+
7F0E58065151961F5BF958A954288E726566987F56E4949D76FE9041638754C6
|
| 291 |
+
591A593A579B8EB267358DFA8235524160F0581586FE5CE89E454FC4989D8BB9
|
| 292 |
+
5A2560765384627C904F9102997F6069800C513F80335C1499756D314E8C0000
|
| 293 |
+
B7
|
| 294 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 295 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 296 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 297 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 298 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 299 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 300 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 301 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 302 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 303 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 304 |
+
00008D3053D17F5A7B4F4F104E4F96006CD573D085E95E06756A7FFB6A0A77FE
|
| 305 |
+
94927E4151E170E653CD8FD483038D2972AF996D6CDB574A82B365B980AA623F
|
| 306 |
+
963259A84EFF8BBF7EBA653E83F2975E556198DE80A5532A8BFD542080BA5E9F
|
| 307 |
+
6CB88D3982AC915A54296C1B52067EB7575F711A6C7E7C89594B4EFD5FFF6124
|
| 308 |
+
7CAA4E305C0167AB87025CF0950B98CE75AF70FD902251AF7F1D8BBD594951E4
|
| 309 |
+
4F5B5426592B657780A45B75627662C28F905E456C1F7B264F0F4FD8670D0000
|
| 310 |
+
B8
|
| 311 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 312 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 313 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 314 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 315 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 316 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 317 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 318 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 319 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 320 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 321 |
+
00006D6E6DAA798F88B15F17752B629A8F854FEF91DC65A7812F81515E9C8150
|
| 322 |
+
8D74526F89868D4B590D50854ED8961C723681798D1F5BCC8BA3964459877F1A
|
| 323 |
+
54905676560E8BE565396982949976D66E895E727518674667D17AFF809D8D76
|
| 324 |
+
611F79C665628D635188521A94A27F38809B7EB25C976E2F67607BD9768B9AD8
|
| 325 |
+
818F7F947CD5641E95507A3F544A54E56B4C640162089E3D80F3759952729769
|
| 326 |
+
845B683C86E49601969494EC4E2A54047ED968398DDF801566F45E9A7FB90000
|
| 327 |
+
B9
|
| 328 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 329 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 330 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 331 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 332 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 333 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 334 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 335 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 336 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 337 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 338 |
+
000057C2803F68975DE5653B529F606D9F9A4F9B8EAC516C5BAB5F135DE96C5E
|
| 339 |
+
62F18D21517194A952FE6C9F82DF72D757A267848D2D591F8F9C83C754957B8D
|
| 340 |
+
4F306CBD5B6459D19F1353E486CA9AA88C3780A16545987E56FA96C7522E74DC
|
| 341 |
+
52505BE1630289024E5662D0602A68FA51735B9851A089C27BA199867F5060EF
|
| 342 |
+
704C8D2F51495E7F901B747089C4572D78455F529F9F95FA8F689B3C8BE17678
|
| 343 |
+
684267DC8DEA8D35523D8F8A6EDA68CD950590ED56FD679C88F98FC754C80000
|
| 344 |
+
BA
|
| 345 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 346 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 347 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 348 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 349 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 350 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 351 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 352 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 353 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 354 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 355 |
+
00009AB85B696D776C264EA55BB39A87916361A890AF97E9542B6DB55BD251FD
|
| 356 |
+
558A7F557FF064BC634D65F161BE608D710A6C576C49592F676D822A58D5568E
|
| 357 |
+
8C6A6BEB90DD597D801753F76D695475559D837783CF683879BE548C4F555408
|
| 358 |
+
76D28C8996026CB36DB88D6B89109E648D3A563F9ED175D55F8872E0606854FC
|
| 359 |
+
4EA86A2A886160528F7054C470D886799E3F6D2A5B8F5F187EA255894FAF7334
|
| 360 |
+
543C539A5019540E547C4E4E5FFD745A58F6846B80E1877472D07CCA6E560000
|
| 361 |
+
BB
|
| 362 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 363 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 364 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 365 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 366 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 367 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 368 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 369 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 370 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 371 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 372 |
+
00005F27864E552C62A44E926CAA623782B154D7534E733E6ED1753B52125316
|
| 373 |
+
8BDD69D05F8A60006DEE574F6B2273AF68538FD87F13636260A3552475EA8C62
|
| 374 |
+
71156DA35BA65E7B8352614C9EC478FA87577C27768751F060F6714C66435E4C
|
| 375 |
+
604D8C0E707063258F895FBD606286D456DE6BC160946167534960E066668D3F
|
| 376 |
+
79FD4F1A70E96C478BB38BF27ED88364660F5A5A9B426D516DF78C416D3B4F19
|
| 377 |
+
706B83B7621660D1970D8D27797851FB573E57FA673A75787A3D79EF7B950000
|
| 378 |
+
BC
|
| 379 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 380 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 381 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 382 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 383 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 384 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 385 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 386 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 387 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 388 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 389 |
+
0000808C99658FF96FC08BA59E2159EC7EE97F095409678168D88F917C4D96C6
|
| 390 |
+
53CA602575BE6C7253735AC97EA7632451E0810A5DF184DF628051805B634F0E
|
| 391 |
+
796D524260B86D4E5BC45BC28BA18BB065E25FCC964559937EE77EAA560967B7
|
| 392 |
+
59394F735BB652A0835A988A8D3E753294BE50477A3C4EF767B69A7E5AC16B7C
|
| 393 |
+
76D1575A5C167B3A95F4714E517C80A9827059787F04832768C067EC78B17877
|
| 394 |
+
62E363617B804FED526A51CF835069DB92748DF58D3189C1952E7BAD4EF60000
|
| 395 |
+
BD
|
| 396 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 397 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 398 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 399 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 400 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 401 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 402 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 403 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 404 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 405 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 406 |
+
0000506582305251996F6E106E856DA75EFA50F559DC5C066D466C5F7586848B
|
| 407 |
+
686859568BB253209171964D854969127901712680F64EA490CA6D479A845A07
|
| 408 |
+
56BC640594F077EB4FA5811A72E189D2997A7F347EDE527F655991758F7F8F83
|
| 409 |
+
53EB7A9663ED63A5768679F888579636622A52AB8282685467706377776B7AED
|
| 410 |
+
6D017ED389E359D0621285C982A5754C501F4ECB75A58BEB5C4A5DFE7B4B65A4
|
| 411 |
+
91D14ECA6D25895F7D2795264EC58C288FDB9773664B79818FD170EC6D780000
|
| 412 |
+
BE
|
| 413 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 414 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 415 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 416 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 417 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 418 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 419 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 420 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 421 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 422 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 423 |
+
00005C3D52B283465162830E775B66769CB84EAC60CA7CBE7CB37ECF4E958B66
|
| 424 |
+
666F988897595883656C955C5F8475C997567ADF7ADE51C070AF7A9863EA7A76
|
| 425 |
+
7EA0739697ED4E4570784E5D915253A9655165E781FC8205548E5C31759A97A0
|
| 426 |
+
62D872D975BD5C459A7983CA5C40548077E94E3E6CAE805A62D2636E5DE85177
|
| 427 |
+
8DDD8E1E952F4FF153E560E770AC526763509E435A1F5026773753777EE26485
|
| 428 |
+
652B628963985014723589C951B38BC07EDD574783CC94A7519B541B5CFB0000
|
| 429 |
+
BF
|
| 430 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 431 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 432 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 433 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 434 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 435 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 436 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 437 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 438 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 439 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 440 |
+
00004FCA7AE36D5A90E19A8F55805496536154AF5F0063E9697751EF6168520A
|
| 441 |
+
582A52D8574E780D770B5EB761777CE0625B62974EA27095800362F770E49760
|
| 442 |
+
577782DB67EF68F578D5989779D158F354B353EF6E34514B523B5BA28BFE80AF
|
| 443 |
+
554357A660735751542D7A7A60505B5463A762A053E362635BC767AF54ED7A9F
|
| 444 |
+
82E691775E9388E4593857AE630E8DE880EF57577B774FA95FEB5BBD6B3E5321
|
| 445 |
+
7B5072C2684677FF773665F751B54E8F76D45CBF7AA58475594E9B4150800000
|
| 446 |
+
C0
|
| 447 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 448 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 449 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 450 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 451 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 452 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 453 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 454 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 455 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 456 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 457 |
+
0000998861276E8357646606634656F062EC62695ED39614578362C955878721
|
| 458 |
+
814A8FA3556683B167658D5684DD5A6A680F62E67BEE961151706F9C8C3063FD
|
| 459 |
+
89C861D27F0670C26EE57405699472FC5ECA90CE67176D6A635E52B372628001
|
| 460 |
+
4F6C59E5916A70D96D9D52D24E5096F7956D857E78CA7D2F5121579264C2808B
|
| 461 |
+
7C7B6CEA68F1695E51B7539868A872819ECE7BF172F879BB6F137406674E91CC
|
| 462 |
+
9CA4793C83898354540F68174E3D538952B1783E5386522950884F8B4FD00000
|
| 463 |
+
C1
|
| 464 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 465 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 466 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 467 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 468 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 469 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 470 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 471 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 472 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 473 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 474 |
+
000075E27ACB7C926CA596B6529B748354E94FE9805483B28FDE95705EC9601C
|
| 475 |
+
6D9F5E18655B813894FE604B70BC7EC37CAE51C968817CB1826F4E248F8691CF
|
| 476 |
+
667E4EAE8C0564A9804A50DA759771CE5BE58FBD6F664E86648295635ED66599
|
| 477 |
+
521788C270C852A3730E7433679778F797164E3490BB9CDE6DCB51DB8D41541D
|
| 478 |
+
62CE73B283F196F69F8494C34F367F9A51CC707596755CAD988653E64EE46E9C
|
| 479 |
+
740969B4786B998F7559521876246D4167F3516D9F99804B54997B3C7ABF0000
|
| 480 |
+
C2
|
| 481 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 482 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 483 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 484 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 485 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 486 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 487 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 488 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 489 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 490 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 491 |
+
00009686578462E29647697C5A0464027BD36F0F964B82A6536298855E907089
|
| 492 |
+
63B35364864F9C819E93788C97328DEF8D429E7F6F5E79845F559646622E9A74
|
| 493 |
+
541594DD4FA365C55C655C617F1586516C2F5F8B73876EE47EFF5CE6631B5B6A
|
| 494 |
+
6EE653754E7163A0756562A18F6E4F264ED16CA67EB68BBA841D87BA7F57903B
|
| 495 |
+
95237BA99AA188F8843D6D1B9A867EDC59889EBB739B780186829A6C9A82561B
|
| 496 |
+
541757CB4E709EA653568FC881097792999286EE6EE1851366FC61626F2B0000
|
| 497 |
+
C3
|
| 498 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 499 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 500 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 501 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 502 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 503 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 504 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 505 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 506 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 507 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 508 |
+
00008C298292832B76F26C135FD983BD732B8305951A6BDB77DB94C6536F8302
|
| 509 |
+
51925E3D8C8C8D384E4873AB679A68859176970971646CA177095A9295416BCF
|
| 510 |
+
7F8E66275BD059B95A9A95E895F74EEC840C84996AAC76DF9530731B68A65B5F
|
| 511 |
+
772F919A97617CDC8FF78C1C5F257C7379D889C56CCC871C5BC65E4268C97720
|
| 512 |
+
7EF55195514D52C95A297F05976282D763CF778485D079D26E3A5E9959998511
|
| 513 |
+
706D6C1162BF76BF654F60AF95FD660E879F9E2394ED540D547D8C2C64780000
|
| 514 |
+
C4
|
| 515 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 516 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 517 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 518 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 519 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 520 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 521 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 522 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 523 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 524 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 525 |
+
0000647986116A21819C78E864699B5462B9672B83AB58A89ED86CAB6F205BDE
|
| 526 |
+
964C8C0B725F67D062C772614EA959C66BCD589366AE5E5552DF6155672876EE
|
| 527 |
+
776672677A4662FF54EA545094A090A35A1C7EB36C164E435976801059485357
|
| 528 |
+
753796BE56CA63208111607C95F96DD65462998151855AE980FD59AE9713502A
|
| 529 |
+
6CE55C3C62DF4F60533F817B90066EBA852B62C85E7478BE64B5637B5FF55A18
|
| 530 |
+
917F9E1F5C3F634F80425B7D556E954A954D6D8560A867E072DE51DD5B810000
|
| 531 |
+
C5
|
| 532 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 533 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 534 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 535 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 536 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 537 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 538 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 539 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 540 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 541 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 542 |
+
000062E76CDE725B626D94AE7EBD81136D53519C5F04597452AA601259736696
|
| 543 |
+
8650759F632A61E67CEF8BFA54E66B279E256BB485D5545550766CA4556A8DB4
|
| 544 |
+
722C5E156015743662CD6392724C5F986E436D3E65006F5876D878D076FC7554
|
| 545 |
+
522453DB4E535E9E65C1802A80D6629B5486522870AE888D8DD16CE1547880DA
|
| 546 |
+
57F988F48D54966A914D4F696C9B55B776C6783062A870F96F8E5F6D84EC68DA
|
| 547 |
+
787C7BF781A8670B9E4F636778B0576F78129739627962AB528874356BD70000
|
| 548 |
+
C6
|
| 549 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 550 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 551 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 552 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 553 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 554 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 555 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 556 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 557 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 558 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 559 |
+
00005564813E75B276AE533975DE50FB5C418B6C7BC7504F72479A9798D86F02
|
| 560 |
+
74E27968648777A562FC98918D2B54C180584E52576A82F9840D5E7351ED74F6
|
| 561 |
+
8BC45C4F57616CFC98875A4678349B448FEB7C955256625194FA4EC683868461
|
| 562 |
+
83E984B257D467345703666E6D668C3166DD7011671F6B3A6816621A59BB4E03
|
| 563 |
+
51C46F0667D26C8F517668CB59476B6775665D0E81109F5065D7794879419A91
|
| 564 |
+
8D775C824E5E4F01542F5951780C56686C148FC45F036C7D6CE38BAB63900000
|
| 565 |
+
C7
|
| 566 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 567 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 568 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 569 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 570 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 571 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 572 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 573 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 574 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 575 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 576 |
+
000060706D3D72756266948E94C553438FC17B7E4EDF8C264E7E9ED494B194B3
|
| 577 |
+
524D6F5C90636D458C3458115D4C6B206B4967AA545B81547F8C589985375F3A
|
| 578 |
+
62A26A47953965726084686577A74E544FA85DE7979864AC7FD85CED4FCF7A8D
|
| 579 |
+
520783044E14602F7A8394A64FB54EB279E6743452E482B964D279BD5BDD6C81
|
| 580 |
+
97528F7B6C22503E537F6E0564CE66746C3060C598778BF75E86743C7A7779CB
|
| 581 |
+
4E1890B174036C4256DA914B6CC58D8B533A86C666F28EAF5C489A716E200000
|
| 582 |
+
C8
|
| 583 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 584 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 585 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 586 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 587 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 588 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 589 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 590 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 591 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 592 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 593 |
+
000053D65A369F8B8DA353BB570898A76743919B6CC9516875CA62F372AC5238
|
| 594 |
+
529D7F3A7094763853749E4A69B7786E96C088D97FA4713671C3518967D374E4
|
| 595 |
+
58E4651856B78BA9997662707ED560F970ED58EC4EC14EBA5FCD97E74EFB8BA4
|
| 596 |
+
5203598A7EAB62544ECD65E5620E833884C98363878D71946EB65BB97ED25197
|
| 597 |
+
63C967D480898339881551125B7A59828FB14E736C5D516589258F6F962E854A
|
| 598 |
+
745E951095F06DA682E55F3164926D128428816E9CC3585E8D5B4E0953C10000
|
| 599 |
+
C9
|
| 600 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 601 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 602 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 603 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 604 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 605 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 606 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 607 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 608 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 609 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 610 |
+
00004F1E6563685155D34E2764149A9A626B5AC2745F82726DA968EE50E7838E
|
| 611 |
+
7802674052396C997EB150BB5565715E7B5B665273CA82EB67495C715220717D
|
| 612 |
+
886B95EA965564C58D6181B355846C5562477F2E58924F2455468D4F664C4E0A
|
| 613 |
+
5C1A88F368A2634E7A0D70E7828D52FA97F65C1154E890B57ECD59628D4A86C7
|
| 614 |
+
820C820D8D6664445C0461516D89793E8BBE78377533547B4F388EAB6DF15A20
|
| 615 |
+
7EC5795E6C885BA15A76751A80BE614E6E1758F0751F7525727253477EF30000
|
| 616 |
+
CA
|
| 617 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 618 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 619 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 620 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 621 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 622 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 623 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 624 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 625 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 626 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 627 |
+
0000770176DB526980DC57235E08593172EE65BD6E7F8BD75C388671534177F3
|
| 628 |
+
62FE65F64EC098DF86805B9E8BC653F277E24F7F5C4E9A7659CB5F0F793A58EB
|
| 629 |
+
4E1667FF4E8B62ED8A93901D52BF662F55DC566C90024ED54F8D91CA99706C0F
|
| 630 |
+
5E0260435BA489C68BD56536624B99965B885BFF6388552E53D77626517D852C
|
| 631 |
+
67A268B36B8A62928F9353D482126DD1758F4E668D4E5B70719F85AF669166D9
|
| 632 |
+
7F7287009ECD9F205C5E672F8FF06811675F620D7AD658855EB665706F310000
|
| 633 |
+
CB
|
| 634 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 635 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 636 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 637 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 638 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 639 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 640 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 641 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 642 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 643 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 644 |
+
000060555237800D6454887075295E05681362F4971C53CC723D8C016C347761
|
| 645 |
+
7A0E542E77AC987A821C8BF47855671470C165AF64955636601D79C153F84E1D
|
| 646 |
+
6B7B80865BFA55E356DB4F3A4F3C99725DF3677E80386002988290015B8B8BBC
|
| 647 |
+
8BF5641C825864DE55FD82CF91654FD77D20901F7C9F50F358516EAF5BBF8BC9
|
| 648 |
+
80839178849C7B97867D968B968F7EE59AD3788E5C817A57904296A7795F5B59
|
| 649 |
+
635F7B0B84D168AD55067F2974107D2295016240584C4ED65B83597958540000
|
| 650 |
+
CC
|
| 651 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 652 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 653 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 654 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 655 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 656 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 657 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 658 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 659 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 660 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 661 |
+
0000736D631E8E4B8E0F80CE82D462AC53F06CF0915E592A60016C70574D644A
|
| 662 |
+
8D2A762B6EE9575B6A8075F06F6D8C2D8C0857666BEF889278B363A253F970AD
|
| 663 |
+
6C645858642A580268E0819B55107CD650188EBA6DCC8D9F70EB638F6D9B6ED4
|
| 664 |
+
7EE68404684390036DD896768BA85957727985E4817E75BC8A8A68AF52548E22
|
| 665 |
+
951163D098988E44557C4F5366FF568F60D56D9552435C4959296DFB586B7530
|
| 666 |
+
751C606C82148146631167618FE2773A8DF38D3494C15E165385542C70C30000
|
| 667 |
+
CD
|
| 668 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 669 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 670 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 671 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 672 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 673 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 674 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 675 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 676 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 677 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 678 |
+
00006C405EF7505C4EAD5EAD633A8247901A6850916E77B3540C94DC5F647AE5
|
| 679 |
+
687663457B527EDF75DB507762955934900F51F879C37A8156FE5F9290146D82
|
| 680 |
+
5C60571F541051546E4D56E263A89893817F8715892A9000541E5C6F81C062D6
|
| 681 |
+
625881319E3596409A6E9A7C692D59A562D3553E631654C786D96D3C5A0374E6
|
| 682 |
+
889C6B6A59168C4C5F2F6E7E73A9987D4E3870F75B8C7897633D665A769660CB
|
| 683 |
+
5B9B5A494E0781556C6A738B4EA167897F515F8065FA671B5FD859845A010000
|
| 684 |
+
CE
|
| 685 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 686 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 687 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 688 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 689 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 690 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 691 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 692 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 693 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 694 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 695 |
+
00005DCD5FAE537197E68FDD684556F4552F60DF4E3A6F4D7EF482C7840E59D4
|
| 696 |
+
4F1F4F2A5C3E7EAC672A851A5473754F80C355829B4F4F4D6E2D8C135C096170
|
| 697 |
+
536B761F6E29868A658795FB7EB9543B7A337D0A95EE55E17FC174EE631D8717
|
| 698 |
+
6DA17A9D621165A1536763E16C835DEB545C94A84E4C6C618BEC5C4B65E0829C
|
| 699 |
+
68A7543E54346BCB6B664E9463425348821E4F0D4FAE575E620A96FE66647269
|
| 700 |
+
52FF52A1609F8BEF661471996790897F785277FD6670563B54389521727A0000
|
| 701 |
+
CF
|
| 702 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 703 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 704 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 705 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 706 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 707 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 708 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 709 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 710 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 711 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 712 |
+
00007A00606F5E0C6089819D591560DC718470EF6EAA6C5072806A8488AD5E2D
|
| 713 |
+
4E605AB3559C94E36D177CFB9699620F7EC6778E867E5323971E8F9666875CE1
|
| 714 |
+
4FA072ED4E0B53A6590F54136380952851484ED99C9C7EA454B88D2488548237
|
| 715 |
+
95F26D8E5F265ACC663E966973B0732E53BF817A99857FA15BAA967796507EBF
|
| 716 |
+
76F853A2957699997BB189446E584E617FD479658BE660F354CD4EAB98795DF7
|
| 717 |
+
6A6150CF54118C618427785D9704524A54EE56A395006D885BB56DC666530000
|
| 718 |
+
D0
|
| 719 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 720 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 721 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 722 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 723 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 724 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 725 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 726 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 727 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 728 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 729 |
+
00005C0F5B5D6821809655787B11654869544E9B6B47874E978B534F631F643A
|
| 730 |
+
90AA659C80C18C10519968B0537887F961C86CC46CFB8C225C5185AA82AF950C
|
| 731 |
+
6B238F9B65B05FFB5FC34FE18845661F8165732960FA51745211578B5F6290A2
|
| 732 |
+
884C91925E78674F602759D3514451F680F853086C7996C4718A4F114FEE7F9E
|
| 733 |
+
673D55C5950879C088967EE3589F620C9700865A5618987B5F908BB884C49157
|
| 734 |
+
53D965ED5E8F755C60647D6E5A7F7EEA7EED8F6955A75BA360AC65CB73840000
|
| 735 |
+
D1
|
| 736 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 737 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 738 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 739 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 740 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 741 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 742 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 743 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 744 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 745 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 746 |
+
00009009766377297EDA9774859B5B667A7496EA884052CB718F5FAA65EC8BE2
|
| 747 |
+
5BFB9A6F5DE16B896C5B8BAD8BAF900A8FC5538B62BC9E269E2D54404E2B82BD
|
| 748 |
+
7259869C5D1688596DAF96C554D14E9A8BB6710954BD960970DF6DF976D04E25
|
| 749 |
+
781487125CA95EF68A00989C960E708E6CBF594463A9773C884D6F1482735830
|
| 750 |
+
71D5538C781A96C155015F6671305BB48C1A9A8C6B83592E9E2F79E76768626C
|
| 751 |
+
4F6F75A17F8A6D0B96336C274EF075D2517B68376F3E90808170599674760000
|
| 752 |
+
D2
|
| 753 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 754 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 755 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 756 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 757 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 758 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 759 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 760 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 761 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 762 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 763 |
+
000064475C2790657A918C2359DA54AC8200836F898180006930564E80367237
|
| 764 |
+
91CE51B64E5F987563964E1A53F666F3814B591C6DB24E0058F9533B63D694F1
|
| 765 |
+
4F9D4F0A886398905937905779FB4EEA80F075916C825B9C59E85F5D69058681
|
| 766 |
+
501A5DF24E5977E34EE5827A6291661390915C794EBF5F7981C69038808475AB
|
| 767 |
+
4EA688D4610F6BC55FC64E4976CA6EA28BE38BAE8C0A8BD15F027FFC7FCC7ECE
|
| 768 |
+
8335836B56E06BB797F3963459FB541F94F66DEB5BC5996E5C395F1596900000
|
| 769 |
+
D3
|
| 770 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 771 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 772 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 773 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 774 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 775 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 776 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 777 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 778 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 779 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 780 |
+
0000537082F16A315A749E705E947F2883B984248425836787478FCE8D6276C8
|
| 781 |
+
5F719896786C662054DF62E54F6381C375C85EB896CD8E0A86F9548F6CF36D8C
|
| 782 |
+
6C38607F52C775285E7D4F1860A05FE75C24753190AE94C072B96CB96E389149
|
| 783 |
+
670953CB53F34F5191C98BF153C85E7C8FC26DE44E8E76C26986865E611A8206
|
| 784 |
+
4F594FDE903E9C7C61096E1D6E1496854E885A3196E84E0E5C7F79B95B878BED
|
| 785 |
+
7FBD738957DF828B90C15401904755BB5CEA5FA161086B3272F180B28A890000
|
| 786 |
+
D4
|
| 787 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 788 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 789 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 790 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 791 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 792 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 793 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 794 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 795 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 796 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 797 |
+
00006D745BD388D598848C6B9A6D9E336E0A51A4514357A38881539F63F48F95
|
| 798 |
+
56ED54585706733F6E907F188FDC82D1613F6028966266F07EA68D8A8DC394A5
|
| 799 |
+
5CB37CA4670860A6960580184E9190E75300966851418FD08574915D665597F5
|
| 800 |
+
5B55531D78386742683D54C9707E5BB08F7D518D572854B1651266828D5E8D43
|
| 801 |
+
810F846C906D7CDF51FF85FB67A365E96FA186A48E81566A90207682707671E5
|
| 802 |
+
8D2362E952196CFD8D3C600E589E618E66FE8D60624E55B36E23672D8F670000
|
| 803 |
+
D5
|
| 804 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 805 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 806 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 807 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 808 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 809 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 810 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 811 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 812 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 813 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 814 |
+
000094E195F87728680569A8548B4E4D70B88BC86458658B5B857A84503A5BE8
|
| 815 |
+
77BB6BE18A797C986CBE76CF65A98F975D2D5C5586386808536062187AD96E5B
|
| 816 |
+
7EFD6A1F7AE05F706F335F20638C6DA867564E085E108D264ED780C07634969C
|
| 817 |
+
62DB662D627E6CBC8D7571677F695146808753EC906E629854F286F08F998005
|
| 818 |
+
951785178FD96D5973CD659F771F7504782781FB8D1E94884FA6679575B98BCA
|
| 819 |
+
9707632F9547963584B8632377415F8172F04E896014657462EF6B63653F0000
|
| 820 |
+
D6
|
| 821 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 822 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 823 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 824 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 825 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 826 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 827 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 828 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 829 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 830 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 831 |
+
00005E2775C790D18BC1829D679D652F5431871877E580A281026C414E4B7EC7
|
| 832 |
+
804C76F4690D6B966267503C4F84574063076B628DBE53EA65E87EB85FD7631A
|
| 833 |
+
63B781F381F47F6E5E1C5CD95236667A79E97A1A8D28709975D46EDE6CBB7A92
|
| 834 |
+
4E2D76C55FE0949F88777EC879CD80BF91CD4EF24F17821F54685DDE6D328BCC
|
| 835 |
+
7CA58F7480985E1A549276B15B99663C9AA473E0682A86DB6731732A8BF88BDB
|
| 836 |
+
90107AF970DB716E62C477A956314E3B845767F152A986C08D2E94F87B510000
|
| 837 |
+
D7
|
| 838 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 839 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 840 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 841 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 842 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 843 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 844 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 845 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 846 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 847 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 848 |
+
00004F4F6CE8795D9A7B6293722A62FD4E1378168F6C64B08D5A7BC668695E84
|
| 849 |
+
88C55986649E58EE72B6690E95258FFD8D5857607F008C0651C6634962D95353
|
| 850 |
+
684C74228301914C55447740707C6D4A517954A88D4459FF6ECB6DC45B5C7D2B
|
| 851 |
+
4ED47C7D6ED35B5081EA6E0D5B579B0368D58E2A5B977EFC603B7EB590B98D70
|
| 852 |
+
594F63CD79DF8DB3535265CF79568BC5963B7EC494BB7E825634918967007F6A
|
| 853 |
+
5C0A907566285DE64F5067DE505A4F5C57505EA7000000000000000000000000
|
| 854 |
+
D8
|
| 855 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 856 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 857 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 858 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 859 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 860 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 861 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 862 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 863 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 864 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 865 |
+
00004E8D4E0C51404E105EFF53454E154E984E1E9B325B6C56694E2879BA4E3F
|
| 866 |
+
53154E47592D723B536E6C1056DF80E499976BD3777E9F174E364E9F9F104E5C
|
| 867 |
+
4E694E9382885B5B556C560F4EC4538D539D53A353A553AE97658D5D531A53F5
|
| 868 |
+
5326532E533E8D5C5366536352025208520E522D5233523F5240524C525E5261
|
| 869 |
+
525C84AF527D528252815290529351827F544EBB4EC34EC94EC24EE84EE14EEB
|
| 870 |
+
4EDE4F1B4EF34F224F644EF54F254F274F094F2B4F5E4F6765384F5A4F5D0000
|
| 871 |
+
D9
|
| 872 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 873 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 874 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 875 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 876 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 877 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 878 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 879 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 880 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 881 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 882 |
+
00004F5F4F574F324F3D4F764F744F914F894F834F8F4F7E4F7B4FAA4F7C4FAC
|
| 883 |
+
4F944FE64FE84FEA4FC54FDA4FE34FDC4FD14FDF4FF85029504C4FF3502C500F
|
| 884 |
+
502E502D4FFE501C500C50255028507E504350555048504E506C507B50A550A7
|
| 885 |
+
50A950BA50D6510650ED50EC50E650EE5107510B4EDD6C3D4F584F654FCE9FA0
|
| 886 |
+
6C467C74516E5DFD9EC999985181591452F9530D8A07531051EB591951554EA0
|
| 887 |
+
51564EB3886E88A44EB5811488D279805B3488037FB851AB51B151BD51BC0000
|
| 888 |
+
DA
|
| 889 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 890 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 891 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 892 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 893 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 894 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 895 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 896 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 897 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 898 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 899 |
+
000051C7519651A251A58BA08BA68BA78BAA8BB48BB58BB78BC28BC38BCB8BCF
|
| 900 |
+
8BCE8BD28BD38BD48BD68BD88BD98BDC8BDF8BE08BE48BE88BE98BEE8BF08BF3
|
| 901 |
+
8BF68BF98BFC8BFF8C008C028C048C078C0C8C0F8C118C128C148C158C168C19
|
| 902 |
+
8C1B8C188C1D8C1F8C208C218C258C278C2A8C2B8C2E8C2F8C328C338C358C36
|
| 903 |
+
5369537A961D962296219631962A963D963C964296499654965F9667966C9672
|
| 904 |
+
96749688968D969796B09097909B909D909990AC90A190B490B390B690BA0000
|
| 905 |
+
DB
|
| 906 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 907 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 908 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 909 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 910 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 911 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 912 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 913 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 914 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 915 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 916 |
+
000090B890B090CF90C590BE90D090C490C790D390E690E290DC90D790DB90EB
|
| 917 |
+
90EF90FE91049122911E91239131912F913991439146520D594252A252AC52AD
|
| 918 |
+
52BE54FF52D052D652F053DF71EE77CD5EF451F551FC9B2F53B65F01755A5DEF
|
| 919 |
+
574C57A957A1587E58BC58C558D15729572C572A57335739572E572F575C573B
|
| 920 |
+
574257695785576B5786577C577B5768576D5776577357AD57A4578C57B257CF
|
| 921 |
+
57A757B4579357A057D557D857DA57D957D257B857F457EF57F857E457DD0000
|
| 922 |
+
DC
|
| 923 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 924 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 925 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 926 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 927 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 928 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 929 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 930 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 931 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 932 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 933 |
+
0000580B580D57FD57ED5800581E5819584458205865586C58815889589A5880
|
| 934 |
+
99A89F1961FF8279827D827F828F828A82A88284828E82918297829982AB82B8
|
| 935 |
+
82BE82B082C882CA82E3829882B782AE82CB82CC82C182A982B482A182AA829F
|
| 936 |
+
82C482CE82A482E1830982F782E4830F830782DC82F482D282D8830C82FB82D3
|
| 937 |
+
8311831A83068314831582E082D5831C8351835B835C83088392833C83348331
|
| 938 |
+
839B835E832F834F83478343835F834083178360832D833A8333836683650000
|
| 939 |
+
DD
|
| 940 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 941 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 942 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 943 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 944 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 945 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 946 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 947 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 948 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 949 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 950 |
+
00008368831B8369836C836A836D836E83B0837883B383B483A083AA8393839C
|
| 951 |
+
8385837C83B683A9837D83B8837B8398839E83A883BA83BC83C1840183E583D8
|
| 952 |
+
58078418840B83DD83FD83D6841C84388411840683D483DF840F840383F883F9
|
| 953 |
+
83EA83C583C0842683F083E1845C8451845A8459847384878488847A84898478
|
| 954 |
+
843C844684698476848C848E8431846D84C184CD84D084E684BD84D384CA84BF
|
| 955 |
+
84BA84E084A184B984B4849784E584E3850C750D853884F08539851F853A0000
|
| 956 |
+
DE
|
| 957 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 958 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 959 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 960 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 961 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 962 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 963 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 964 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 965 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 966 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 967 |
+
00008556853B84FF84FC8559854885688564855E857A77A285438572857B85A4
|
| 968 |
+
85A88587858F857985AE859C858585B985B785B085D385C185DC85FF86278605
|
| 969 |
+
86298616863C5EFE5F08593C594180375955595A5958530F5C225C255C2C5C34
|
| 970 |
+
624C626A629F62BB62CA62DA62D762EE632262F66339634B634363AD63F66371
|
| 971 |
+
637A638E63B4636D63AC638A636963AE63BC63F263F863E063FF63C463DE63CE
|
| 972 |
+
645263C663BE64456441640B641B6420640C64266421645E6484646D64960000
|
| 973 |
+
DF
|
| 974 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 975 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 976 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 977 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 978 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 979 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 980 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 981 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 982 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 983 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 984 |
+
0000647A64B764B8649964BA64C064D064D764E464E265096525652E5F0B5FD2
|
| 985 |
+
75195F11535F53F153FD53E953E853FB541254165406544B5452545354545456
|
| 986 |
+
54435421545754595423543254825494547754715464549A549B548454765466
|
| 987 |
+
549D54D054AD54C254B454D254A754A654D354D4547254A354D554BB54BF54CC
|
| 988 |
+
54D954DA54DC54A954AA54A454DD54CF54DE551B54E7552054FD551454F35522
|
| 989 |
+
5523550F55115527552A5567558F55B55549556D55415555553F5550553C0000
|
| 990 |
+
E0
|
| 991 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 992 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 993 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 994 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 995 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 996 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 997 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 998 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 999 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1000 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1001 |
+
00005537555655755576557755335530555C558B55D2558355B155B955885581
|
| 1002 |
+
559F557E55D65591557B55DF55BD55BE5594559955EA55F755C9561F55D155EB
|
| 1003 |
+
55EC55D455E655DD55C455EF55E555F255F355CC55CD55E855F555E48F94561E
|
| 1004 |
+
5608560C56015624562355FE56005627562D565856395657562C564D56625659
|
| 1005 |
+
565C564C5654568656645671566B567B567C5685569356AF56D456D756DD56E1
|
| 1006 |
+
56F556EB56F956FF5704570A5709571C5E0F5E195E145E115E315E3B5E3C0000
|
| 1007 |
+
E1
|
| 1008 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1009 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1010 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1011 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1012 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1013 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1014 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1015 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1016 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1017 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1018 |
+
00005E375E445E545E5B5E5E5E615C8C5C7A5C8D5C905C965C885C985C995C91
|
| 1019 |
+
5C9A5C9C5CB55CA25CBD5CAC5CAB5CB15CA35CC15CB75CC45CD25CE45CCB5CE5
|
| 1020 |
+
5D025D035D275D265D2E5D245D1E5D065D1B5D585D3E5D345D3D5D6C5D5B5D6F
|
| 1021 |
+
5D5D5D6B5D4B5D4A5D695D745D825D995D9D8C735DB75DC55F735F775F825F87
|
| 1022 |
+
5F895F8C5F955F995F9C5FA85FAD5FB55FBC88625F6172AD72B072B472B772B8
|
| 1023 |
+
72C372C172CE72CD72D272E872EF72E972F272F472F7730172F3730372FA0000
|
| 1024 |
+
E2
|
| 1025 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1026 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1027 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1028 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1029 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1030 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1031 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1032 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1033 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1034 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1035 |
+
000072FB731773137321730A731E731D7315732273397325732C733873317350
|
| 1036 |
+
734D73577360736C736F737E821B592598E7592459029963996799689969996A
|
| 1037 |
+
996B996C99749977997D998099849987998A998D999099919993999499955E80
|
| 1038 |
+
5E915E8B5E965EA55EA05EB95EB55EBE5EB38D535ED25ED15EDB5EE85EEA81BA
|
| 1039 |
+
5FC45FC95FD65FCF60035FEE60045FE15FE45FFE600560065FEA5FED5FF86019
|
| 1040 |
+
60356026601B600F600D6029602B600A603F602160786079607B607A60420000
|
| 1041 |
+
E3
|
| 1042 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1043 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1044 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1045 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1046 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1047 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1048 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1049 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1050 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1051 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1052 |
+
0000606A607D6096609A60AD609D60836092608C609B60EC60BB60B160DD60D8
|
| 1053 |
+
60C660DA60B4612061266115612360F46100610E612B614A617561AC619461A7
|
| 1054 |
+
61B761D461F55FDD96B395E995EB95F195F395F595F695FC95FE960396049606
|
| 1055 |
+
9608960A960B960C960D960F96129615961696179619961A4E2C723F62156C35
|
| 1056 |
+
6C546C5C6C4A6CA36C856C906C946C8C6C686C696C746C766C866CA96CD06CD4
|
| 1057 |
+
6CAD6CF76CF86CF16CD76CB26CE06CD66CFA6CEB6CEE6CB16CD36CEF6CFE0000
|
| 1058 |
+
E4
|
| 1059 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1060 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1061 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1062 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1063 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1064 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1065 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1066 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1067 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1068 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1069 |
+
00006D396D276D0C6D436D486D076D046D196D0E6D2B6D4D6D2E6D356D1A6D4F
|
| 1070 |
+
6D526D546D336D916D6F6D9E6DA06D5E6D936D946D5C6D606D7C6D636E1A6DC7
|
| 1071 |
+
6DC56DDE6E0E6DBF6DE06E116DE66DDD6DD96E166DAB6E0C6DAE6E2B6E6E6E4E
|
| 1072 |
+
6E6B6EB26E5F6E866E536E546E326E256E446EDF6EB16E986EE06F2D6EE26EA5
|
| 1073 |
+
6EA76EBD6EBB6EB76ED76EB46ECF6E8F6EC26E9F6F626F466F476F246F156EF9
|
| 1074 |
+
6F2F6F366F4B6F746F2A6F096F296F896F8D6F8C6F786F726F7C6F7A6FD10000
|
| 1075 |
+
E5
|
| 1076 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1077 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1078 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1079 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1080 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1081 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1082 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1083 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1084 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1085 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1086 |
+
00006FC96FA76FB96FB66FC26FE16FEE6FDE6FE06FEF701A7023701B70397035
|
| 1087 |
+
704F705E5B805B845B955B935BA55BB8752F9A9E64345BE45BEE89305BF08E47
|
| 1088 |
+
8B078FB68FD38FD58FE58FEE8FE48FE98FE68FF38FE890059004900B90269011
|
| 1089 |
+
900D9016902190359036902D902F9044905190529050906890589062905B66B9
|
| 1090 |
+
9074907D908290889083908B5F505F575F565F585C3B54AB5C505C595B715C63
|
| 1091 |
+
5C667FBC5F2A5F295F2D82745F3C9B3B5C6E59815983598D59A959AA59A30000
|
| 1092 |
+
E6
|
| 1093 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1094 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1095 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1096 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1097 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1098 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1099 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1100 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1101 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1102 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1103 |
+
0000599759CA59AB599E59A459D259B259AF59D759BE5A055A0659DD5A0859E3
|
| 1104 |
+
59D859F95A0C5A095A325A345A115A235A135A405A675A4A5A555A3C5A625A75
|
| 1105 |
+
80EC5AAA5A9B5A775A7A5ABE5AEB5AB25AD25AD45AB85AE05AE35AF15AD65AE6
|
| 1106 |
+
5AD85ADC5B095B175B165B325B375B405C155C1C5B5A5B655B735B515B535B62
|
| 1107 |
+
9A759A779A789A7A9A7F9A7D9A809A819A859A889A8A9A909A929A939A969A98
|
| 1108 |
+
9A9B9A9C9A9D9A9F9AA09AA29AA39AA59AA77E9F7EA17EA37EA57EA87EA90000
|
| 1109 |
+
E7
|
| 1110 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1111 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1112 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1113 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1114 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1115 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1116 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1117 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1118 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1119 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1120 |
+
00007EAD7EB07EBE7EC07EC17EC27EC97ECB7ECC7ED07ED47ED77EDB7EE07EE1
|
| 1121 |
+
7EE87EEB7EEE7EEF7EF17EF27F0D7EF67EFA7EFB7EFE7F017F027F037F077F08
|
| 1122 |
+
7F0B7F0C7F0F7F117F127F177F197F1C7F1B7F1F7F217F227F237F247F257F26
|
| 1123 |
+
7F277F2A7F2B7F2C7F2D7F2F7F307F317F327F337F355E7A757F5DDB753E9095
|
| 1124 |
+
738E739173AE73A2739F73CF73C273D173B773B373C073C973C873E573D9987C
|
| 1125 |
+
740A73E973E773DE73BA73F2740F742A745B7426742574287430742E742C0000
|
| 1126 |
+
E8
|
| 1127 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1128 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1129 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1130 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1131 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1132 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1133 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1134 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1135 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1136 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1137 |
+
0000741B741A7441745C7457745574597477746D747E749C748E748074817487
|
| 1138 |
+
748B749E74A874A9749074A774D274BA97EA97EB97EC674C6753675E67486769
|
| 1139 |
+
67A56787676A6773679867A7677567A8679E67AD678B6777677C67F0680967D8
|
| 1140 |
+
680A67E967B0680C67D967B567DA67B367DD680067C367B867E2680E67C167FD
|
| 1141 |
+
6832683368606861684E6862684468646883681D68556866684168676840683E
|
| 1142 |
+
684A6849682968B5688F687468776893686B68C2696E68FC691F692068F90000
|
| 1143 |
+
E9
|
| 1144 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1145 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1146 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1147 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1148 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1149 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1150 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1151 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1152 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1153 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1154 |
+
0000692468F0690B6901695768E369106971693969606942695D6984696B6980
|
| 1155 |
+
69986978693469CC6987698869CE6989696669636979699B69A769BB69AB69AD
|
| 1156 |
+
69D469B169C169CA69DF699569E0698D69FF6A2F69ED6A176A186A6569F26A44
|
| 1157 |
+
6A3E6AA06A506A5B6A356A8E6A796A3D6A286A586A7C6A916A906AA96A976AAB
|
| 1158 |
+
733773526B816B826B876B846B926B936B8D6B9A6B9B6BA16BAA8F6B8F6D8F71
|
| 1159 |
+
8F728F738F758F768F788F778F798F7A8F7C8F7E8F818F828F848F878F8B0000
|
| 1160 |
+
EA
|
| 1161 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1162 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1163 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1164 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1165 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1166 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1167 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1168 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1169 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1170 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1171 |
+
00008F8D8F8E8F8F8F988F9A8ECE620B6217621B621F6222622162256224622C
|
| 1172 |
+
81E774EF74F474FF750F75117513653465EE65EF65F0660A6619677266036615
|
| 1173 |
+
6600708566F7661D66346631663666358006665F66546641664F665666616657
|
| 1174 |
+
66776684668C66A7669D66BE66DB66DC66E666E98D328D338D368D3B8D3D8D40
|
| 1175 |
+
8D458D468D488D498D478D4D8D558D5989C789CA89CB89CC89CE89CF89D089D1
|
| 1176 |
+
726E729F725D7266726F727E727F7284728B728D728F72926308633263B00000
|
| 1177 |
+
EB
|
| 1178 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1179 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1180 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1181 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1182 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1183 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1184 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1185 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1186 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1187 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1188 |
+
0000643F64D880046BEA6BF36BFD6BF56BF96C056C076C066C0D6C156C186C19
|
| 1189 |
+
6C1A6C216C296C246C2A6C3265356555656B724D72527256723086625216809F
|
| 1190 |
+
809C809380BC670A80BD80B180AB80AD80B480B780E780E880E980EA80DB80C2
|
| 1191 |
+
80C480D980CD80D7671080DD80EB80F180F480ED810D810E80F280FC67158112
|
| 1192 |
+
8C5A8136811E812C811881328148814C815381748159815A817181608169817C
|
| 1193 |
+
817D816D8167584D5AB58188818281916ED581A381AA81CC672681CA81BB0000
|
| 1194 |
+
EC
|
| 1195 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1196 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1197 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1198 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1199 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1200 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1201 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1202 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1203 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1204 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1205 |
+
000081C181A66B246B376B396B436B466B5998D198D298D398D598D998DA6BB3
|
| 1206 |
+
5F406BC289F365909F51659365BC65C665C465C365CC65CE65D265D67080709C
|
| 1207 |
+
7096709D70BB70C070B770AB70B170E870CA711071137116712F71317173715C
|
| 1208 |
+
716871457172714A7178717A719871B371B571A871A071E071D471E771F9721D
|
| 1209 |
+
7228706C7118716671B9623E623D624362486249793B794079467949795B795C
|
| 1210 |
+
7953795A796279577960796F7967797A7985798A799A79A779B35FD15FD00000
|
| 1211 |
+
ED
|
| 1212 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1213 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1214 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1215 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1216 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1217 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1218 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1219 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1220 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1221 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1222 |
+
0000603C605D605A606760416059606360AB6106610D615D61A9619D61CB61D1
|
| 1223 |
+
62068080807F6C936CF66DFC77F677F87800780978177818781165AB782D781C
|
| 1224 |
+
781D7839783A783B781F783C7825782C78237829784E786D7856785778267850
|
| 1225 |
+
7847784C786A789B7893789A7887789C78A178A378B278B978A578D478D978C9
|
| 1226 |
+
78EC78F2790578F479137924791E79349F9B9EF99EFB9EFC76F17704770D76F9
|
| 1227 |
+
77077708771A77227719772D7726773577387750775177477743775A77680000
|
| 1228 |
+
EE
|
| 1229 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1230 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1231 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1232 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1233 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1234 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1235 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1236 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1237 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1238 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1239 |
+
000077627765777F778D777D7780778C7791779F77A077B077B577BD753A7540
|
| 1240 |
+
754E754B7548755B7572757975837F587F617F5F8A487F687F747F717F797F81
|
| 1241 |
+
7F7E76CD76E58832948594869487948B948A948C948D948F9490949494979495
|
| 1242 |
+
949A949B949C94A394A494AB94AA94AD94AC94AF94B094B294B494B694B794B8
|
| 1243 |
+
94B994BA94BC94BD94BF94C494C894C994CA94CB94CC94CD94CE94D094D194D2
|
| 1244 |
+
94D594D694D794D994D894DB94DE94DF94E094E294E494E594E794E894EA0000
|
| 1245 |
+
EF
|
| 1246 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1247 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1248 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1249 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1250 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1251 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1252 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1253 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1254 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1255 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1256 |
+
000094E994EB94EE94EF94F394F494F594F794F994FC94FD94FF950395029506
|
| 1257 |
+
95079509950A950D950E950F951295139514951595169518951B951D951E951F
|
| 1258 |
+
9522952A952B9529952C953195329534953695379538953C953E953F95429535
|
| 1259 |
+
9544954595469549954C954E954F9552955395549556955795589559955B955E
|
| 1260 |
+
955F955D95619562956495659566956795689569956A956B956C956F95719572
|
| 1261 |
+
9573953A77E777EC96C979D579ED79E379EB7A065D477A037A027A1E7A140000
|
| 1262 |
+
F0
|
| 1263 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1264 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1265 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1266 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1267 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1268 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1269 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1270 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1271 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1272 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1273 |
+
00007A397A377A519ECF99A57A707688768E7693769976A474DE74E0752C9E20
|
| 1274 |
+
9E229E289E299E2A9E2B9E2C9E329E319E369E389E379E399E3A9E3E9E419E42
|
| 1275 |
+
9E449E469E479E489E499E4B9E4C9E4E9E519E559E579E5A9E5B9E5C9E5E9E63
|
| 1276 |
+
9E669E679E689E699E6A9E6B9E6C9E719E6D9E7375927594759675A0759D75AC
|
| 1277 |
+
75A375B375B475B875C475B175B075C375C275D675CD75E375E875E675E475EB
|
| 1278 |
+
75E7760375F175FC75FF761076007605760C7617760A76257618761576190000
|
| 1279 |
+
F1
|
| 1280 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1281 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1282 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1283 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1284 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1285 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1286 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1287 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1288 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1289 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1290 |
+
0000761B763C762276207640762D7630763F76357643763E7633764D765E7654
|
| 1291 |
+
765C7656766B766F7FCA7AE67A787A797A807A867A887A957AA67AA07AAC7AA8
|
| 1292 |
+
7AAD7AB3886488698872887D887F888288A288C688B788BC88C988E288CE88E3
|
| 1293 |
+
88E588F1891A88FC88E888FE88F0892189198913891B890A8934892B89368941
|
| 1294 |
+
8966897B758B80E576B276B477DC801280148016801C80208022802580268027
|
| 1295 |
+
802980288031800B803580438046804D80528069807189839878988098830000
|
| 1296 |
+
F2
|
| 1297 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1298 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1299 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1300 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1301 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1302 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1303 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1304 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1305 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1306 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1307 |
+
00009889988C988D988F9894989A989B989E989F98A198A298A598A6864D8654
|
| 1308 |
+
866C866E867F867A867C867B86A8868D868B86AC869D86A786A386AA869386A9
|
| 1309 |
+
86B686C486B586CE86B086BA86B186AF86C986CF86B486E986F186F286ED86F3
|
| 1310 |
+
86D0871386DE86F486DF86D886D18703870786F88708870A870D87098723873B
|
| 1311 |
+
871E8725872E871A873E87488734873187298737873F87828722877D877E877B
|
| 1312 |
+
87608770874C876E878B87538763877C876487598765879387AF87A887D20000
|
| 1313 |
+
F3
|
| 1314 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1315 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1316 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1317 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1318 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1319 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1320 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1321 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1322 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1323 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1324 |
+
000087C68788878587AD8797878387AB87E587AC87B587B387CB87D387BD87D1
|
| 1325 |
+
87C087CA87DB87EA87E087EE8816881387FE880A881B88218839883C7F367F42
|
| 1326 |
+
7F447F4582107AFA7AFD7B087B037B047B157B0A7B2B7B0F7B477B387B2A7B19
|
| 1327 |
+
7B2E7B317B207B257B247B337B3E7B1E7B587B5A7B457B757B4C7B5D7B607B6E
|
| 1328 |
+
7B7B7B627B727B717B907BA67BA77BB87BAC7B9D7BA87B857BAA7B9C7BA27BAB
|
| 1329 |
+
7BB47BD17BC17BCC7BDD7BDA7BE57BE67BEA7C0C7BFE7BFC7C0F7C167C0B0000
|
| 1330 |
+
F4
|
| 1331 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1332 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1333 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1334 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1335 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1336 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1337 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1338 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1339 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1340 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1341 |
+
00007C1F7C2A7C267C387C417C4081FE82018202820481EC8844822182228223
|
| 1342 |
+
822D822F8228822B8238823B82338234823E82448249824B824F825A825F8268
|
| 1343 |
+
887E8885888888D888DF895E7F9D7F9F7FA77FAF7FB07FB27C7C65497C917C9D
|
| 1344 |
+
7C9C7C9E7CA27CB27CBC7CBD7CC17CC77CCC7CCD7CC87CC57CD77CE8826E66A8
|
| 1345 |
+
7FBF7FCE7FD57FE57FE17FE67FE97FEE7FF37CF87D777DA67DAE7E477E9B9EB8
|
| 1346 |
+
9EB48D738D848D948D918DB18D678D6D8C478C49914A9150914E914F91640000
|
| 1347 |
+
F5
|
| 1348 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1349 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1350 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1351 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1352 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1353 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1354 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1355 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1356 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1357 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1358 |
+
00009162916191709169916F917D917E917291749179918C91859190918D9191
|
| 1359 |
+
91A291A391AA91AD91AE91AF91B591B491BA8C559E7E8DB88DEB8E058E598E69
|
| 1360 |
+
8DB58DBF8DBC8DBA8DC48DD68DD78DDA8DDE8DCE8DCF8DDB8DC68DEC8DF78DF8
|
| 1361 |
+
8DE38DF98DFB8DE48E098DFD8E148E1D8E1F8E2C8E2E8E238E2F8E3A8E408E39
|
| 1362 |
+
8E358E3D8E318E498E418E428E518E528E4A8E708E768E7C8E6F8E748E858E8F
|
| 1363 |
+
8E948E908E9C8E9E8C788C828C8A8C858C988C94659B89D689DE89DA89DC0000
|
| 1364 |
+
F6
|
| 1365 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1366 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1367 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1368 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1369 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1370 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1371 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1372 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1373 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1374 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1375 |
+
000089E589EB89EF8A3E8B26975396E996F396EF970697019708970F970E972A
|
| 1376 |
+
972D9730973E9F809F839F859F869F879F889F899F8A9F8C9EFE9F0B9F0D96B9
|
| 1377 |
+
96BC96BD96CE96D277BF96E0928E92AE92C8933E936A93CA938F943E946B9C7F
|
| 1378 |
+
9C829C859C869C879C887A239C8B9C8E9C909C919C929C949C959C9A9C9B9C9E
|
| 1379 |
+
9C9F9CA09CA19CA29CA39CA59CA69CA79CA89CA99CAB9CAD9CAE9CB09CB19CB2
|
| 1380 |
+
9CB39CB49CB59CB69CB79CBA9CBB9CBC9CBD9CC49CC59CC69CC79CCA9CCB0000
|
| 1381 |
+
F7
|
| 1382 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1383 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1384 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1385 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1386 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1387 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1388 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1389 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1390 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1391 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 1392 |
+
00009CCC9CCD9CCE9CCF9CD09CD39CD49CD59CD79CD89CD99CDC9CDD9CDF9CE2
|
| 1393 |
+
977C978597919792979497AF97AB97A397B297B49AB19AB09AB79E589AB69ABA
|
| 1394 |
+
9ABC9AC19AC09AC59AC29ACB9ACC9AD19B459B439B479B499B489B4D9B5198E8
|
| 1395 |
+
990D992E995599549ADF9AE19AE69AEF9AEB9AFB9AED9AF99B089B0F9B139B1F
|
| 1396 |
+
9B239EBD9EBE7E3B9E829E879E889E8B9E9293D69E9D9E9F9EDB9EDC9EDD9EE0
|
| 1397 |
+
9EDF9EE29EE99EE79EE59EEA9EEF9F229F2C9F2F9F399F379F3D9F3E9F440000
|
janus/lib/tcl8.6/encoding/gb1988.enc
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: gb1988, single-byte
|
| 2 |
+
S
|
| 3 |
+
003F 0 1
|
| 4 |
+
00
|
| 5 |
+
0000000100020003000400050006000700080009000A000B000C000D000E000F
|
| 6 |
+
0010001100120013001400150016001700180019001A001B001C001D001E001F
|
| 7 |
+
002000210022002300A500250026002700280029002A002B002C002D002E002F
|
| 8 |
+
0030003100320033003400350036003700380039003A003B003C003D003E003F
|
| 9 |
+
0040004100420043004400450046004700480049004A004B004C004D004E004F
|
| 10 |
+
0050005100520053005400550056005700580059005A005B005C005D005E005F
|
| 11 |
+
0060006100620063006400650066006700680069006A006B006C006D006E006F
|
| 12 |
+
0070007100720073007400750076007700780079007A007B007C007D203E007F
|
| 13 |
+
0080008100820083008400850086008700880089008A008B008C008D008E008F
|
| 14 |
+
0090009100920093009400950096009700980099009A009B009C009D009E009F
|
| 15 |
+
0000FF61FF62FF63FF64FF65FF66FF67FF68FF69FF6AFF6BFF6CFF6DFF6EFF6F
|
| 16 |
+
FF70FF71FF72FF73FF74FF75FF76FF77FF78FF79FF7AFF7BFF7CFF7DFF7EFF7F
|
| 17 |
+
FF80FF81FF82FF83FF84FF85FF86FF87FF88FF89FF8AFF8BFF8CFF8DFF8EFF8F
|
| 18 |
+
FF90FF91FF92FF93FF94FF95FF96FF97FF98FF99FF9AFF9BFF9CFF9DFF9EFF9F
|
| 19 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
| 20 |
+
0000000000000000000000000000000000000000000000000000000000000000
|
janus/lib/tcl8.6/encoding/iso2022-kr.enc
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Encoding file: iso2022-kr, escape-driven
|
| 2 |
+
E
|
| 3 |
+
name iso2022-kr
|
| 4 |
+
init \x1b$)C
|
| 5 |
+
final {}
|
| 6 |
+
iso8859-1 \x0f
|
| 7 |
+
ksc5601 \x0e
|