Datasets:
Add embodied module back
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- models/embodied/__init__.py +13 -0
- models/embodied/core/__init__.py +14 -0
- models/embodied/core/base.py +73 -0
- models/embodied/core/chunk.py +99 -0
- models/embodied/core/clock.py +118 -0
- models/embodied/core/driver.py +138 -0
- models/embodied/core/limiters.py +80 -0
- models/embodied/core/random.py +39 -0
- models/embodied/core/replay.py +394 -0
- models/embodied/core/selectors.py +354 -0
- models/embodied/core/streams.py +241 -0
- models/embodied/core/wrappers.py +418 -0
- models/embodied/envs/atari.py +177 -0
- models/embodied/envs/bsuite.py +51 -0
- models/embodied/envs/crafter.py +93 -0
- models/embodied/envs/dmc.py +76 -0
- models/embodied/envs/dmlab.py +147 -0
- models/embodied/envs/dummy.py +59 -0
- models/embodied/envs/from_dm.py +89 -0
- models/embodied/envs/from_gym.py +123 -0
- models/embodied/envs/loconav.py +228 -0
- models/embodied/envs/loconav_quadruped.py +132 -0
- models/embodied/envs/loconav_quadruped.xml +311 -0
- models/embodied/envs/minecraft.py +17 -0
- models/embodied/envs/minecraft_flat.py +438 -0
- models/embodied/envs/pinpad.py +225 -0
- models/embodied/envs/procgen.py +67 -0
- models/embodied/jax/__init__.py +15 -0
- models/embodied/jax/agent.py +502 -0
- models/embodied/jax/heads.py +162 -0
- models/embodied/jax/internal.py +304 -0
- models/embodied/jax/nets.py +670 -0
- models/embodied/jax/opt.py +164 -0
- models/embodied/jax/outs.py +330 -0
- models/embodied/jax/transform.py +185 -0
- models/embodied/jax/utils.py +233 -0
- models/embodied/perf/test_bandwidth.py +60 -0
- models/embodied/perf/test_distr.py +142 -0
- models/embodied/perf/test_driver.py +39 -0
- models/embodied/perf/test_replay.py +140 -0
- models/embodied/run/__init__.py +5 -0
- models/embodied/run/eval_only.py +76 -0
- models/embodied/run/parallel.py +481 -0
- models/embodied/run/train.py +119 -0
- models/embodied/run/train_eval.py +158 -0
- models/embodied/tests/test_driver.py +122 -0
- models/embodied/tests/test_layer_scan.py +102 -0
- models/embodied/tests/test_parallel.py +106 -0
- models/embodied/tests/test_replay.py +357 -0
- models/embodied/tests/test_sampletree.py +195 -0
models/embodied/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__version__ = '2.0.0'
|
| 2 |
+
|
| 3 |
+
try:
|
| 4 |
+
import colored_traceback
|
| 5 |
+
colored_traceback.add_hook(colors='terminal')
|
| 6 |
+
except ImportError:
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
from .core import *
|
| 10 |
+
|
| 11 |
+
from . import envs
|
| 12 |
+
from . import jax
|
| 13 |
+
from . import run
|
models/embodied/core/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base import Agent, Env
|
| 2 |
+
|
| 3 |
+
from .clock import GlobalClock
|
| 4 |
+
from .clock import LocalClock
|
| 5 |
+
from .driver import Driver
|
| 6 |
+
from .random import RandomAgent
|
| 7 |
+
from .replay import Replay
|
| 8 |
+
from .wrappers import Wrapper
|
| 9 |
+
|
| 10 |
+
from . import clock
|
| 11 |
+
from . import limiters
|
| 12 |
+
from . import selectors
|
| 13 |
+
from . import streams
|
| 14 |
+
from . import wrappers
|
models/embodied/core/base.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Agent:
|
| 2 |
+
|
| 3 |
+
def __init__(self, obs_space, act_space, config):
|
| 4 |
+
pass
|
| 5 |
+
|
| 6 |
+
def init_train(self, batch_size):
|
| 7 |
+
raise NotImplementedError('init_train(batch_size) -> carry')
|
| 8 |
+
|
| 9 |
+
def init_report(self, batch_size):
|
| 10 |
+
raise NotImplementedError('init_report(batch_size) -> carry')
|
| 11 |
+
|
| 12 |
+
def init_policy(self, batch_size):
|
| 13 |
+
raise NotImplementedError('init_policy(batch_size) -> carry')
|
| 14 |
+
|
| 15 |
+
def train(self, carry, data):
|
| 16 |
+
raise NotImplementedError('train(carry, data) -> carry, out, metrics')
|
| 17 |
+
|
| 18 |
+
def report(self, carry, data):
|
| 19 |
+
raise NotImplementedError('report(carry, data) -> carry, metrics')
|
| 20 |
+
|
| 21 |
+
def policy(self, carry, obs, mode):
|
| 22 |
+
raise NotImplementedError('policy(carry, obs, mode) -> carry, act, out')
|
| 23 |
+
|
| 24 |
+
def stream(self, st):
|
| 25 |
+
raise NotImplementedError('stream(st) -> st')
|
| 26 |
+
|
| 27 |
+
def save(self):
|
| 28 |
+
raise NotImplementedError('save() -> data')
|
| 29 |
+
|
| 30 |
+
def load(self, data):
|
| 31 |
+
raise NotImplementedError('load(data) -> None')
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Env:
|
| 35 |
+
|
| 36 |
+
def __repr__(self):
|
| 37 |
+
return (
|
| 38 |
+
f'{self.__class__.__name__}('
|
| 39 |
+
f'obs_space={self.obs_space}, '
|
| 40 |
+
f'act_space={self.act_space})')
|
| 41 |
+
|
| 42 |
+
@property
|
| 43 |
+
def obs_space(self):
|
| 44 |
+
# The observation space must contain the keys is_first, is_last, and
|
| 45 |
+
# is_terminal. Commonly, it also contains the keys reward and image. By
|
| 46 |
+
# convention, keys starting with 'log/' are not consumed by the agent.
|
| 47 |
+
raise NotImplementedError('Returns: dict of spaces')
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def act_space(self):
|
| 51 |
+
# The action space must contain the reset key as well as any actions.
|
| 52 |
+
raise NotImplementedError('Returns: dict of spaces')
|
| 53 |
+
|
| 54 |
+
def step(self, action):
|
| 55 |
+
raise NotImplementedError('Returns: dict')
|
| 56 |
+
|
| 57 |
+
def close(self):
|
| 58 |
+
pass
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class Stream:
|
| 62 |
+
|
| 63 |
+
def __iter__(self):
|
| 64 |
+
return self
|
| 65 |
+
|
| 66 |
+
def __next__(self):
|
| 67 |
+
raise NotImplementedError
|
| 68 |
+
|
| 69 |
+
def save(self):
|
| 70 |
+
raise NotImplementedError
|
| 71 |
+
|
| 72 |
+
def load(self, state):
|
| 73 |
+
raise NotImplementedError
|
models/embodied/core/chunk.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import sys
|
| 3 |
+
import traceback
|
| 4 |
+
|
| 5 |
+
import elements
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Chunk:
|
| 10 |
+
|
| 11 |
+
__slots__ = ('time', 'uuid', 'succ', 'length', 'size', 'data', 'saved')
|
| 12 |
+
|
| 13 |
+
def __init__(self, size=1024):
|
| 14 |
+
self.time = elements.timestamp(millis=True)
|
| 15 |
+
self.uuid = elements.UUID()
|
| 16 |
+
self.succ = elements.UUID(0)
|
| 17 |
+
# self.uuid = int(np.random.randint(1, 2 * 63))
|
| 18 |
+
# self.succ = 0
|
| 19 |
+
self.length = 0
|
| 20 |
+
self.size = size
|
| 21 |
+
self.data = None
|
| 22 |
+
self.saved = False
|
| 23 |
+
|
| 24 |
+
def __repr__(self):
|
| 25 |
+
return f'Chunk({self.filename})'
|
| 26 |
+
|
| 27 |
+
def __lt__(self, other):
|
| 28 |
+
return self.time < other.time
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def filename(self):
|
| 32 |
+
succ = self.succ.uuid if isinstance(self.succ, type(self)) else self.succ
|
| 33 |
+
return f'{self.time}-{str(self.uuid)}-{str(succ)}-{self.length}.npz'
|
| 34 |
+
|
| 35 |
+
@property
|
| 36 |
+
def nbytes(self):
|
| 37 |
+
if not self.data:
|
| 38 |
+
return 0
|
| 39 |
+
return sum(x.nbytes for x in self.data.values())
|
| 40 |
+
|
| 41 |
+
def append(self, step):
|
| 42 |
+
assert self.length < self.size
|
| 43 |
+
if not self.data:
|
| 44 |
+
example = step
|
| 45 |
+
self.data = {
|
| 46 |
+
k: np.empty((self.size, *v.shape), v.dtype)
|
| 47 |
+
for k, v in example.items()}
|
| 48 |
+
for key, value in step.items():
|
| 49 |
+
self.data[key][self.length] = value
|
| 50 |
+
self.length += 1
|
| 51 |
+
# if self.length == self.size:
|
| 52 |
+
# [x.setflags(write=False) for x in self.data.values()]
|
| 53 |
+
|
| 54 |
+
def update(self, index, length, mapping):
|
| 55 |
+
assert 0 <= index <= self.length, (index, self.length)
|
| 56 |
+
assert 0 <= index + length <= self.length, (index, length, self.length)
|
| 57 |
+
for key, value in mapping.items():
|
| 58 |
+
self.data[key][index: index + length] = value
|
| 59 |
+
|
| 60 |
+
def slice(self, index, length):
|
| 61 |
+
assert 0 <= index and index + length <= self.length
|
| 62 |
+
return {k: v[index: index + length] for k, v in self.data.items()}
|
| 63 |
+
|
| 64 |
+
@elements.timer.section('chunk_save')
|
| 65 |
+
def save(self, directory, log=False):
|
| 66 |
+
assert not self.saved
|
| 67 |
+
self.saved = True
|
| 68 |
+
filename = elements.Path(directory) / self.filename
|
| 69 |
+
data = {k: v[:self.length] for k, v in self.data.items()}
|
| 70 |
+
with io.BytesIO() as stream:
|
| 71 |
+
np.savez_compressed(stream, **data)
|
| 72 |
+
stream.seek(0)
|
| 73 |
+
filename.write(stream.read(), mode='wb')
|
| 74 |
+
log and print(f'Saved chunk: {filename.name}')
|
| 75 |
+
|
| 76 |
+
@classmethod
|
| 77 |
+
def load(cls, filename, error='raise'):
|
| 78 |
+
assert error in ('raise', 'none')
|
| 79 |
+
time, uuid, succ, length = filename.stem.split('-')
|
| 80 |
+
length = int(length)
|
| 81 |
+
try:
|
| 82 |
+
with elements.Path(filename).open('rb') as f:
|
| 83 |
+
data = np.load(f)
|
| 84 |
+
data = {k: data[k] for k in data.keys()}
|
| 85 |
+
except Exception:
|
| 86 |
+
tb = ''.join(traceback.format_exception(sys.exception()))
|
| 87 |
+
print(f'Error loading chunk {filename}:\n{tb}')
|
| 88 |
+
if error == 'raise':
|
| 89 |
+
raise
|
| 90 |
+
else:
|
| 91 |
+
return None
|
| 92 |
+
chunk = cls(length)
|
| 93 |
+
chunk.time = time
|
| 94 |
+
chunk.uuid = elements.UUID(uuid)
|
| 95 |
+
chunk.succ = elements.UUID(succ)
|
| 96 |
+
chunk.length = length
|
| 97 |
+
chunk.data = data
|
| 98 |
+
chunk.saved = True
|
| 99 |
+
return chunk
|
models/embodied/core/clock.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import portal
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
CLIENT = None
|
| 8 |
+
REPLICA = None
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def setup(is_server, replica, replicas, port, addr):
|
| 12 |
+
global CLIENT, REPLICA
|
| 13 |
+
assert CLIENT is None
|
| 14 |
+
if replicas <= 1:
|
| 15 |
+
return
|
| 16 |
+
print('CLOCK PORT:', port)
|
| 17 |
+
print('CLOCK ADDR:', addr)
|
| 18 |
+
if is_server:
|
| 19 |
+
_start_server(port, replicas)
|
| 20 |
+
client = portal.Client(addr, 'ClockClient')
|
| 21 |
+
client.connect()
|
| 22 |
+
CLIENT = client
|
| 23 |
+
REPLICA = replica
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _start_server(port, replicas):
|
| 27 |
+
|
| 28 |
+
clocks = []
|
| 29 |
+
requests = []
|
| 30 |
+
result = [None]
|
| 31 |
+
receive = threading.Barrier(replicas)
|
| 32 |
+
respond = threading.Barrier(replicas)
|
| 33 |
+
|
| 34 |
+
def create(replica, every):
|
| 35 |
+
requests.append(every)
|
| 36 |
+
receive.wait()
|
| 37 |
+
if replica == 0:
|
| 38 |
+
assert len(requests) == replicas, (len(requests), replicas)
|
| 39 |
+
assert all(x == every for x in requests)
|
| 40 |
+
clockid = len(clocks)
|
| 41 |
+
clocks.append([float(every), time.time()])
|
| 42 |
+
result[0] = clockid
|
| 43 |
+
requests.clear()
|
| 44 |
+
respond.wait()
|
| 45 |
+
return result[0]
|
| 46 |
+
|
| 47 |
+
def should(replica, clockid, skip):
|
| 48 |
+
requests.append((clockid, skip))
|
| 49 |
+
receive.wait()
|
| 50 |
+
if replica == 0:
|
| 51 |
+
assert len(requests) == replicas, (len(requests), replicas)
|
| 52 |
+
clockids, skips = zip(*requests)
|
| 53 |
+
assert all(x == clockid for x in clockids)
|
| 54 |
+
every, prev = clocks[clockid]
|
| 55 |
+
now = time.time()
|
| 56 |
+
if every == 0:
|
| 57 |
+
decision = False
|
| 58 |
+
elif every < 0:
|
| 59 |
+
decision = True
|
| 60 |
+
elif now >= prev + every:
|
| 61 |
+
clocks[clockid][1] = now
|
| 62 |
+
decision = True
|
| 63 |
+
else:
|
| 64 |
+
decision = False
|
| 65 |
+
decision = decision and not any(skips)
|
| 66 |
+
result[0] = decision
|
| 67 |
+
requests.clear()
|
| 68 |
+
respond.wait()
|
| 69 |
+
return result[0]
|
| 70 |
+
|
| 71 |
+
server = portal.Server(port, 'ClockServer')
|
| 72 |
+
server.bind('create', create, workers=replicas)
|
| 73 |
+
server.bind('should', should, workers=replicas)
|
| 74 |
+
server.start(block=False)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class GlobalClock:
|
| 78 |
+
|
| 79 |
+
def __init__(self, every, first=False):
|
| 80 |
+
self.multihost = bool(CLIENT)
|
| 81 |
+
if self.multihost:
|
| 82 |
+
self.clockid = CLIENT.create(REPLICA, every).result()
|
| 83 |
+
self.skip_next = (not first)
|
| 84 |
+
else:
|
| 85 |
+
self.clock = LocalClock(every, first)
|
| 86 |
+
|
| 87 |
+
def __call__(self, step=None, skip=None):
|
| 88 |
+
if self.multihost:
|
| 89 |
+
if self.skip_next:
|
| 90 |
+
self.skip_next = False
|
| 91 |
+
skip = True
|
| 92 |
+
return CLIENT.should(REPLICA, self.clockid, bool(skip)).result()
|
| 93 |
+
else:
|
| 94 |
+
return self.clock(step, skip)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class LocalClock:
|
| 98 |
+
|
| 99 |
+
def __init__(self, every, first=False):
|
| 100 |
+
self.every = every
|
| 101 |
+
self.prev = None
|
| 102 |
+
self.first = first
|
| 103 |
+
|
| 104 |
+
def __call__(self, step=None, skip=None):
|
| 105 |
+
if skip:
|
| 106 |
+
return False
|
| 107 |
+
if self.every == 0: # Zero means off
|
| 108 |
+
return False
|
| 109 |
+
if self.every < 0: # Negative means always
|
| 110 |
+
return True
|
| 111 |
+
now = time.time()
|
| 112 |
+
if self.prev is None:
|
| 113 |
+
self.prev = now
|
| 114 |
+
return self.first
|
| 115 |
+
if now >= self.prev + self.every:
|
| 116 |
+
self.prev = now
|
| 117 |
+
return True
|
| 118 |
+
return False
|
models/embodied/core/driver.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
|
| 3 |
+
import cloudpickle
|
| 4 |
+
import elements
|
| 5 |
+
import numpy as np
|
| 6 |
+
import portal
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Driver:
|
| 10 |
+
|
| 11 |
+
def __init__(self, make_env_fns, parallel=True, **kwargs):
|
| 12 |
+
assert len(make_env_fns) >= 1
|
| 13 |
+
self.parallel = parallel
|
| 14 |
+
self.kwargs = kwargs
|
| 15 |
+
self.length = len(make_env_fns)
|
| 16 |
+
if parallel:
|
| 17 |
+
import multiprocessing as mp
|
| 18 |
+
context = mp.get_context()
|
| 19 |
+
self.pipes, pipes = zip(*[context.Pipe() for _ in range(self.length)])
|
| 20 |
+
self.stop = context.Event()
|
| 21 |
+
fns = [cloudpickle.dumps(fn) for fn in make_env_fns]
|
| 22 |
+
self.procs = [
|
| 23 |
+
portal.Process(self._env_server, self.stop, i, pipe, fn, start=True)
|
| 24 |
+
for i, (fn, pipe) in enumerate(zip(fns, pipes))]
|
| 25 |
+
self.pipes[0].send(('act_space',))
|
| 26 |
+
self.act_space = self._receive(self.pipes[0])
|
| 27 |
+
else:
|
| 28 |
+
self.envs = [fn() for fn in make_env_fns]
|
| 29 |
+
self.act_space = self.envs[0].act_space
|
| 30 |
+
self.callbacks = []
|
| 31 |
+
self.acts = None
|
| 32 |
+
self.carry = None
|
| 33 |
+
self.reset()
|
| 34 |
+
|
| 35 |
+
def reset(self, init_policy=None):
|
| 36 |
+
self.acts = {
|
| 37 |
+
k: np.zeros((self.length,) + v.shape, v.dtype)
|
| 38 |
+
for k, v in self.act_space.items()}
|
| 39 |
+
self.acts['reset'] = np.ones(self.length, bool)
|
| 40 |
+
self.carry = init_policy and init_policy(self.length)
|
| 41 |
+
|
| 42 |
+
def close(self):
|
| 43 |
+
if self.parallel:
|
| 44 |
+
[proc.kill() for proc in self.procs]
|
| 45 |
+
else:
|
| 46 |
+
[env.close() for env in self.envs]
|
| 47 |
+
|
| 48 |
+
def on_step(self, callback):
|
| 49 |
+
self.callbacks.append(callback)
|
| 50 |
+
|
| 51 |
+
def __call__(self, policy, steps=0, episodes=0):
|
| 52 |
+
step, episode = 0, 0
|
| 53 |
+
while step < steps or episode < episodes:
|
| 54 |
+
step, episode = self._step(policy, step, episode)
|
| 55 |
+
|
| 56 |
+
def _step(self, policy, step, episode):
|
| 57 |
+
acts = self.acts
|
| 58 |
+
assert all(len(x) == self.length for x in acts.values())
|
| 59 |
+
assert all(isinstance(v, np.ndarray) for v in acts.values())
|
| 60 |
+
acts = [{k: v[i] for k, v in acts.items()} for i in range(self.length)]
|
| 61 |
+
if self.parallel:
|
| 62 |
+
[pipe.send(('step', act)) for pipe, act in zip(self.pipes, acts)]
|
| 63 |
+
obs = [self._receive(pipe) for pipe in self.pipes]
|
| 64 |
+
else:
|
| 65 |
+
obs = [env.step(act) for env, act in zip(self.envs, acts)]
|
| 66 |
+
obs = {k: np.stack([x[k] for x in obs]) for k in obs[0].keys()}
|
| 67 |
+
logs = {k: v for k, v in obs.items() if k.startswith('log/')}
|
| 68 |
+
obs = {k: v for k, v in obs.items() if not k.startswith('log/')}
|
| 69 |
+
assert all(len(x) == self.length for x in obs.values()), obs
|
| 70 |
+
self.carry, acts, outs = policy(self.carry, obs, **self.kwargs)
|
| 71 |
+
assert all(k not in acts for k in outs), (
|
| 72 |
+
list(outs.keys()), list(acts.keys()))
|
| 73 |
+
if obs['is_last'].any():
|
| 74 |
+
mask = ~obs['is_last']
|
| 75 |
+
acts = {k: self._mask(v, mask) for k, v in acts.items()}
|
| 76 |
+
self.acts = {**acts, 'reset': obs['is_last'].copy()}
|
| 77 |
+
trans = {**obs, **acts, **outs, **logs}
|
| 78 |
+
for i in range(self.length):
|
| 79 |
+
trn = elements.tree.map(lambda x: x[i], trans)
|
| 80 |
+
[fn(trn, i, **self.kwargs) for fn in self.callbacks]
|
| 81 |
+
step += len(obs['is_first'])
|
| 82 |
+
episode += obs['is_last'].sum()
|
| 83 |
+
return step, episode
|
| 84 |
+
|
| 85 |
+
def _mask(self, value, mask):
|
| 86 |
+
while mask.ndim < value.ndim:
|
| 87 |
+
mask = mask[..., None]
|
| 88 |
+
return value * mask.astype(value.dtype)
|
| 89 |
+
|
| 90 |
+
def _receive(self, pipe):
|
| 91 |
+
try:
|
| 92 |
+
msg, arg = pipe.recv()
|
| 93 |
+
if msg == 'error':
|
| 94 |
+
raise RuntimeError(arg)
|
| 95 |
+
assert msg == 'result'
|
| 96 |
+
return arg
|
| 97 |
+
except Exception:
|
| 98 |
+
print('Terminating workers due to an exception.')
|
| 99 |
+
[proc.kill() for proc in self.procs]
|
| 100 |
+
raise
|
| 101 |
+
|
| 102 |
+
@staticmethod
|
| 103 |
+
def _env_server(stop, envid, pipe, ctor):
|
| 104 |
+
try:
|
| 105 |
+
ctor = cloudpickle.loads(ctor)
|
| 106 |
+
env = ctor()
|
| 107 |
+
while not stop.is_set():
|
| 108 |
+
if not pipe.poll(0.1):
|
| 109 |
+
time.sleep(0.1)
|
| 110 |
+
continue
|
| 111 |
+
try:
|
| 112 |
+
msg, *args = pipe.recv()
|
| 113 |
+
except EOFError:
|
| 114 |
+
return
|
| 115 |
+
if msg == 'step':
|
| 116 |
+
assert len(args) == 1
|
| 117 |
+
act = args[0]
|
| 118 |
+
obs = env.step(act)
|
| 119 |
+
pipe.send(('result', obs))
|
| 120 |
+
elif msg == 'obs_space':
|
| 121 |
+
assert len(args) == 0
|
| 122 |
+
pipe.send(('result', env.obs_space))
|
| 123 |
+
elif msg == 'act_space':
|
| 124 |
+
assert len(args) == 0
|
| 125 |
+
pipe.send(('result', env.act_space))
|
| 126 |
+
else:
|
| 127 |
+
raise ValueError(f'Invalid message {msg}')
|
| 128 |
+
except ConnectionResetError:
|
| 129 |
+
print('Connection to driver lost')
|
| 130 |
+
except Exception as e:
|
| 131 |
+
pipe.send(('error', e))
|
| 132 |
+
raise
|
| 133 |
+
finally:
|
| 134 |
+
try:
|
| 135 |
+
env.close()
|
| 136 |
+
except Exception:
|
| 137 |
+
pass
|
| 138 |
+
pipe.close()
|
models/embodied/core/limiters.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def wait(predicate, message, info=None, sleep=0.01, notify=60):
|
| 6 |
+
if predicate():
|
| 7 |
+
return 0
|
| 8 |
+
start = last_notify = time.time()
|
| 9 |
+
while not predicate():
|
| 10 |
+
now = time.time()
|
| 11 |
+
if now - last_notify > notify:
|
| 12 |
+
dur = now - start
|
| 13 |
+
print(f'{message} {dur:.1f}s: {info}')
|
| 14 |
+
last_notify = time.time()
|
| 15 |
+
time.sleep(sleep)
|
| 16 |
+
return time.time() - start
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class SamplesPerInsert:
|
| 20 |
+
|
| 21 |
+
def __init__(self, samples_per_insert, tolerance, minsize):
|
| 22 |
+
assert 1 <= minsize
|
| 23 |
+
self.samples_per_insert = samples_per_insert
|
| 24 |
+
self.minsize = minsize
|
| 25 |
+
self.avail = -minsize
|
| 26 |
+
self.min_avail = -tolerance
|
| 27 |
+
self.max_avail = tolerance * samples_per_insert
|
| 28 |
+
self.size = 0
|
| 29 |
+
self.lock = threading.Lock()
|
| 30 |
+
|
| 31 |
+
def save(self):
|
| 32 |
+
return {'size': self.size, 'avail': self.avail}
|
| 33 |
+
|
| 34 |
+
def load(self, data):
|
| 35 |
+
self.size = data['size']
|
| 36 |
+
self.avail = data['avail']
|
| 37 |
+
|
| 38 |
+
def want_insert(self):
|
| 39 |
+
# if self.samples_per_insert <= 0 or self.size < self.minsize:
|
| 40 |
+
# return True, 'ok'
|
| 41 |
+
# if self.avail >= self.max_avail:
|
| 42 |
+
# return False, f'rate limited: {self.avail:.3f} >= {self.max_avail:.3f}'
|
| 43 |
+
# return True, 'ok'
|
| 44 |
+
|
| 45 |
+
if self.size < self.minsize:
|
| 46 |
+
return True
|
| 47 |
+
if self.samples_per_insert <= 0:
|
| 48 |
+
return True
|
| 49 |
+
if self.avail < self.max_avail:
|
| 50 |
+
return True
|
| 51 |
+
return False
|
| 52 |
+
|
| 53 |
+
def want_sample(self):
|
| 54 |
+
# if self.size < self.minsize:
|
| 55 |
+
# return False, f'too empty: {self.size} < {self.minsize}'
|
| 56 |
+
# if self.samples_per_insert > 0 and self.avail <= self.min_avail:
|
| 57 |
+
# return False, f'rate limited: {self.avail:.3f} <= {self.min_avail:.3f}'
|
| 58 |
+
# return True, 'ok'
|
| 59 |
+
|
| 60 |
+
if self.size < self.minsize:
|
| 61 |
+
return False
|
| 62 |
+
if self.samples_per_insert <= 0:
|
| 63 |
+
return True
|
| 64 |
+
if self.min_avail < self.avail:
|
| 65 |
+
return True
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
def insert(self):
|
| 69 |
+
with self.lock:
|
| 70 |
+
self.size += 1
|
| 71 |
+
if self.size >= self.minsize:
|
| 72 |
+
self.avail += self.samples_per_insert
|
| 73 |
+
|
| 74 |
+
# def remove(self):
|
| 75 |
+
# with self.lock:
|
| 76 |
+
# self.size -= 1
|
| 77 |
+
|
| 78 |
+
def sample(self):
|
| 79 |
+
with self.lock:
|
| 80 |
+
self.avail -= 1
|
models/embodied/core/random.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class RandomAgent:
|
| 5 |
+
|
| 6 |
+
def __init__(self, obs_space, act_space):
|
| 7 |
+
self.obs_space = obs_space
|
| 8 |
+
self.act_space = act_space
|
| 9 |
+
|
| 10 |
+
def init_policy(self, batch_size):
|
| 11 |
+
return ()
|
| 12 |
+
|
| 13 |
+
def init_train(self, batch_size):
|
| 14 |
+
return ()
|
| 15 |
+
|
| 16 |
+
def init_report(self, batch_size):
|
| 17 |
+
return ()
|
| 18 |
+
|
| 19 |
+
def policy(self, carry, obs, mode='train'):
|
| 20 |
+
batch_size = len(obs['is_first'])
|
| 21 |
+
act = {
|
| 22 |
+
k: np.stack([v.sample() for _ in range(batch_size)])
|
| 23 |
+
for k, v in self.act_space.items() if k != 'reset'}
|
| 24 |
+
return carry, act, {}
|
| 25 |
+
|
| 26 |
+
def train(self, carry, data):
|
| 27 |
+
return carry, {}, {}
|
| 28 |
+
|
| 29 |
+
def report(self, carry, data):
|
| 30 |
+
return carry, {}
|
| 31 |
+
|
| 32 |
+
def stream(self, st):
|
| 33 |
+
return st
|
| 34 |
+
|
| 35 |
+
def save(self):
|
| 36 |
+
return None
|
| 37 |
+
|
| 38 |
+
def load(self, data=None):
|
| 39 |
+
pass
|
models/embodied/core/replay.py
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
from collections import defaultdict, deque
|
| 3 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 4 |
+
from functools import partial as bind
|
| 5 |
+
|
| 6 |
+
import elements
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from . import chunk as chunklib
|
| 10 |
+
from . import limiters
|
| 11 |
+
from . import selectors
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Replay:
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self, length, capacity=None, directory=None, chunksize=1024,
|
| 18 |
+
online=False, selector=None, save_wait=False, name='unnamed', seed=0):
|
| 19 |
+
|
| 20 |
+
self.length = length
|
| 21 |
+
self.capacity = capacity
|
| 22 |
+
self.chunksize = chunksize
|
| 23 |
+
self.name = name
|
| 24 |
+
|
| 25 |
+
self.sampler = selector or selectors.Uniform(seed)
|
| 26 |
+
|
| 27 |
+
self.chunks = {}
|
| 28 |
+
self.refs = {}
|
| 29 |
+
self.refs_lock = threading.RLock()
|
| 30 |
+
|
| 31 |
+
self.items = {}
|
| 32 |
+
self.fifo = deque()
|
| 33 |
+
self.itemid = 0
|
| 34 |
+
|
| 35 |
+
self.current = {}
|
| 36 |
+
self.streams = defaultdict(deque)
|
| 37 |
+
self.rwlock = elements.RWLock()
|
| 38 |
+
|
| 39 |
+
self.online = online
|
| 40 |
+
if online:
|
| 41 |
+
self.lengths = defaultdict(int)
|
| 42 |
+
self.queue = deque()
|
| 43 |
+
|
| 44 |
+
if directory:
|
| 45 |
+
self.directory = elements.Path(directory)
|
| 46 |
+
self.directory.mkdir()
|
| 47 |
+
self.workers = ThreadPoolExecutor(16, 'replay_saver')
|
| 48 |
+
self.saved = set()
|
| 49 |
+
else:
|
| 50 |
+
self.directory = None
|
| 51 |
+
self.save_wait = save_wait
|
| 52 |
+
|
| 53 |
+
self.metrics = {'samples': 0, 'inserts': 0, 'updates': 0}
|
| 54 |
+
|
| 55 |
+
def __len__(self):
|
| 56 |
+
return len(self.items)
|
| 57 |
+
|
| 58 |
+
def stats(self):
|
| 59 |
+
ratio = lambda x, y: x / y if y else np.nan
|
| 60 |
+
m = self.metrics
|
| 61 |
+
chunk_nbytes = sum(x.nbytes for x in list(self.chunks.values()))
|
| 62 |
+
stats = {
|
| 63 |
+
'items': len(self.items),
|
| 64 |
+
'chunks': len(self.chunks),
|
| 65 |
+
'streams': len(self.streams),
|
| 66 |
+
'ram_gb': chunk_nbytes / (1024 ** 3),
|
| 67 |
+
'inserts': m['inserts'],
|
| 68 |
+
'samples': m['samples'],
|
| 69 |
+
'updates': m['updates'],
|
| 70 |
+
'replay_ratio': ratio(self.length * m['samples'], m['inserts']),
|
| 71 |
+
}
|
| 72 |
+
for key in self.metrics:
|
| 73 |
+
self.metrics[key] = 0
|
| 74 |
+
return stats
|
| 75 |
+
|
| 76 |
+
@elements.timer.section('replay_add')
|
| 77 |
+
def add(self, step, worker=0):
|
| 78 |
+
step = {k: v for k, v in step.items() if not k.startswith('log/')}
|
| 79 |
+
with self.rwlock.reading:
|
| 80 |
+
step = {k: np.asarray(v) for k, v in step.items()}
|
| 81 |
+
|
| 82 |
+
if worker not in self.current:
|
| 83 |
+
chunk = chunklib.Chunk(self.chunksize)
|
| 84 |
+
with self.refs_lock:
|
| 85 |
+
self.refs[chunk.uuid] = 1
|
| 86 |
+
self.chunks[chunk.uuid] = chunk
|
| 87 |
+
self.current[worker] = (chunk.uuid, 0)
|
| 88 |
+
|
| 89 |
+
chunkid, index = self.current[worker]
|
| 90 |
+
step['stepid'] = np.frombuffer(
|
| 91 |
+
bytes(chunkid) + index.to_bytes(4, 'big'), np.uint8)
|
| 92 |
+
stream = self.streams[worker]
|
| 93 |
+
chunk = self.chunks[chunkid]
|
| 94 |
+
assert chunk.length == index, (chunk.length, index)
|
| 95 |
+
chunk.append(step)
|
| 96 |
+
assert chunk.length == index + 1, (chunk.length, index + 1)
|
| 97 |
+
stream.append((chunkid, index))
|
| 98 |
+
with self.refs_lock:
|
| 99 |
+
self.refs[chunkid] += 1
|
| 100 |
+
|
| 101 |
+
index += 1
|
| 102 |
+
if index < chunk.size:
|
| 103 |
+
self.current[worker] = (chunkid, index)
|
| 104 |
+
else:
|
| 105 |
+
self._complete(chunk, worker)
|
| 106 |
+
assert len(self.streams) == len(self.current)
|
| 107 |
+
|
| 108 |
+
if len(stream) >= self.length:
|
| 109 |
+
# Increment is not thread safe thus inaccurate but faster than locking.
|
| 110 |
+
self.metrics['inserts'] += 1
|
| 111 |
+
chunkid, index = stream.popleft()
|
| 112 |
+
self._insert(chunkid, index)
|
| 113 |
+
|
| 114 |
+
if self.online and self.lengths[worker] % self.length == 0:
|
| 115 |
+
self.queue.append((chunkid, index))
|
| 116 |
+
|
| 117 |
+
if self.online:
|
| 118 |
+
self.lengths[worker] += 1
|
| 119 |
+
|
| 120 |
+
@elements.timer.section('replay_sample')
|
| 121 |
+
def sample(self, batch, mode='train'):
|
| 122 |
+
message = f'Replay buffer {self.name} is empty'
|
| 123 |
+
limiters.wait(lambda: len(self.sampler), message)
|
| 124 |
+
seqs, is_online = zip(*[self._sample(mode) for _ in range(batch)])
|
| 125 |
+
data = self._assemble_batch(seqs, 0, self.length)
|
| 126 |
+
data = self._annotate_batch(data, is_online, True)
|
| 127 |
+
return data
|
| 128 |
+
|
| 129 |
+
@elements.timer.section('replay_update')
|
| 130 |
+
def update(self, data):
|
| 131 |
+
stepid = data.pop('stepid')
|
| 132 |
+
priority = data.pop('priority', None)
|
| 133 |
+
assert stepid.ndim == 3, stepid.shape
|
| 134 |
+
self.metrics['updates'] += int(np.prod(stepid.shape[:-1]))
|
| 135 |
+
if priority is not None:
|
| 136 |
+
assert priority.ndim == 2, priority.shape
|
| 137 |
+
self.sampler.prioritize(
|
| 138 |
+
stepid.reshape((-1, stepid.shape[-1])),
|
| 139 |
+
priority.flatten())
|
| 140 |
+
if data:
|
| 141 |
+
for i, stepid in enumerate(stepid):
|
| 142 |
+
stepid = stepid[0].tobytes()
|
| 143 |
+
chunkid = elements.UUID(stepid[:-4])
|
| 144 |
+
index = int.from_bytes(stepid[-4:], 'big')
|
| 145 |
+
values = {k: v[i] for k, v in data.items()}
|
| 146 |
+
try:
|
| 147 |
+
self._setseq(chunkid, index, values)
|
| 148 |
+
except KeyError:
|
| 149 |
+
pass
|
| 150 |
+
|
| 151 |
+
def _sample(self, mode):
|
| 152 |
+
assert mode in ('train', 'report', 'eval'), mode
|
| 153 |
+
if mode == 'train':
|
| 154 |
+
# Increment is not thread safe thus inaccurate but faster than locking.
|
| 155 |
+
self.metrics['samples'] += 1
|
| 156 |
+
while True:
|
| 157 |
+
try:
|
| 158 |
+
if self.online and self.queue and mode == 'train':
|
| 159 |
+
chunkid, index = self.queue.popleft()
|
| 160 |
+
is_online = True
|
| 161 |
+
else:
|
| 162 |
+
with elements.timer.section('sample'):
|
| 163 |
+
itemid = self.sampler()
|
| 164 |
+
chunkid, index = self.items[itemid]
|
| 165 |
+
is_online = False
|
| 166 |
+
seq = self._getseq(chunkid, index, concat=False)
|
| 167 |
+
return seq, is_online
|
| 168 |
+
except KeyError:
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
def _insert(self, chunkid, index):
|
| 172 |
+
while self.capacity and len(self.items) >= self.capacity:
|
| 173 |
+
self._remove()
|
| 174 |
+
itemid = self.itemid
|
| 175 |
+
self.itemid += 1
|
| 176 |
+
self.items[itemid] = (chunkid, index)
|
| 177 |
+
stepids = self._getseq(chunkid, index, ['stepid'])['stepid']
|
| 178 |
+
self.sampler[itemid] = stepids
|
| 179 |
+
self.fifo.append(itemid)
|
| 180 |
+
|
| 181 |
+
def _remove(self):
|
| 182 |
+
itemid = self.fifo.popleft()
|
| 183 |
+
del self.sampler[itemid]
|
| 184 |
+
chunkid, index = self.items.pop(itemid)
|
| 185 |
+
with self.refs_lock:
|
| 186 |
+
self.refs[chunkid] -= 1
|
| 187 |
+
if self.refs[chunkid] < 1:
|
| 188 |
+
del self.refs[chunkid]
|
| 189 |
+
chunk = self.chunks.pop(chunkid)
|
| 190 |
+
if chunk.succ in self.refs:
|
| 191 |
+
self.refs[chunk.succ] -= 1
|
| 192 |
+
|
| 193 |
+
def _getseq(self, chunkid, index, keys=None, concat=True):
|
| 194 |
+
chunk = self.chunks[chunkid]
|
| 195 |
+
available = chunk.length - index
|
| 196 |
+
if available >= self.length:
|
| 197 |
+
with elements.timer.section('get_slice'):
|
| 198 |
+
seq = chunk.slice(index, self.length)
|
| 199 |
+
if not concat:
|
| 200 |
+
seq = {k: [v] for k, v in seq.items()}
|
| 201 |
+
return seq
|
| 202 |
+
else:
|
| 203 |
+
with elements.timer.section('get_compose'):
|
| 204 |
+
parts = [chunk.slice(index, available)]
|
| 205 |
+
remaining = self.length - available
|
| 206 |
+
while remaining > 0:
|
| 207 |
+
chunk = self.chunks[chunk.succ]
|
| 208 |
+
used = min(remaining, chunk.length)
|
| 209 |
+
parts.append(chunk.slice(0, used))
|
| 210 |
+
remaining -= used
|
| 211 |
+
seq = {k: [p[k] for p in parts] for k in keys or parts[0].keys()}
|
| 212 |
+
if concat:
|
| 213 |
+
seq = {k: np.concatenate(v, 0) for k, v in seq.items()}
|
| 214 |
+
return seq
|
| 215 |
+
|
| 216 |
+
def _setseq(self, chunkid, index, values):
|
| 217 |
+
length = len(next(iter(values.values())))
|
| 218 |
+
chunk = self.chunks[chunkid]
|
| 219 |
+
available = chunk.length - index
|
| 220 |
+
if available >= length:
|
| 221 |
+
with elements.timer.section('set_slice'):
|
| 222 |
+
return chunk.update(index, length, values)
|
| 223 |
+
else:
|
| 224 |
+
with elements.timer.section('set_compose'):
|
| 225 |
+
part = {k: v[:available] for k, v in values.items()}
|
| 226 |
+
values = {k: v[available:] for k, v in values.items()}
|
| 227 |
+
chunk.update(index, available, part)
|
| 228 |
+
remaining = length - available
|
| 229 |
+
while remaining > 0:
|
| 230 |
+
chunk = self.chunks[chunk.succ]
|
| 231 |
+
used = min(remaining, chunk.length)
|
| 232 |
+
part = {k: v[:used] for k, v in values.items()}
|
| 233 |
+
values = {k: v[used:] for k, v in values.items()}
|
| 234 |
+
chunk.update(0, used, part)
|
| 235 |
+
remaining -= used
|
| 236 |
+
|
| 237 |
+
# def dataset(self, batch, length=None, consec=None, prefix=0, report=False):
|
| 238 |
+
# length = length or self.length
|
| 239 |
+
# consec = consec or (self.length - prefix) // length
|
| 240 |
+
# assert consec <= (self.length - prefix) // length, (
|
| 241 |
+
# self.length, length, consec, prefix)
|
| 242 |
+
# limiters.wait(lambda: len(self.sampler), 'Replay buffer is empty')
|
| 243 |
+
# # For performance, each batch should be consecutive in memory, rather than
|
| 244 |
+
# # a non-consecutive view into a longer batch. For example, this allows
|
| 245 |
+
# # near-instant serialization when sending over the network.
|
| 246 |
+
# while True:
|
| 247 |
+
# seqs, is_online = zip(*[self._sample(report) for _ in range(batch)])
|
| 248 |
+
# for i in range(consec):
|
| 249 |
+
# offset = i * length
|
| 250 |
+
# data = self._assemble_batch(seqs, offset, offset + length + prefix)
|
| 251 |
+
# data = self._annotate_batch(data, is_online, is_first=(i == 0))
|
| 252 |
+
# data['consec'] = np.full(data['is_first'].shape, i, np.int32)
|
| 253 |
+
# yield data
|
| 254 |
+
|
| 255 |
+
@elements.timer.section('assemble_batch')
|
| 256 |
+
def _assemble_batch(self, seqs, start, stop):
|
| 257 |
+
shape = (len(seqs), stop - start)
|
| 258 |
+
data = {
|
| 259 |
+
key: np.empty((*shape, *parts[0].shape[1:]), parts[0].dtype)
|
| 260 |
+
for key, parts in seqs[0].items()}
|
| 261 |
+
for n, seq in enumerate(seqs):
|
| 262 |
+
st, dt = 0, 0 # Source and destination time index.
|
| 263 |
+
for p in range(len(seq['stepid'])):
|
| 264 |
+
partlen = len(seq['stepid'][p])
|
| 265 |
+
if start < st + partlen:
|
| 266 |
+
part_start = max(0, start - st)
|
| 267 |
+
part_stop = min(stop - st, partlen)
|
| 268 |
+
num = part_stop - part_start
|
| 269 |
+
for k in data.keys():
|
| 270 |
+
data[k][n, dt: dt + num] = seq[k][p][part_start: part_stop]
|
| 271 |
+
dt += num
|
| 272 |
+
st += partlen
|
| 273 |
+
if st >= stop:
|
| 274 |
+
break
|
| 275 |
+
return data
|
| 276 |
+
|
| 277 |
+
@elements.timer.section('annotate_batch')
|
| 278 |
+
def _annotate_batch(self, data, is_online, is_first):
|
| 279 |
+
data = data.copy()
|
| 280 |
+
# if self.online:
|
| 281 |
+
# broadcasted = [[x] for x in is_online]
|
| 282 |
+
# data['is_online'] = np.full(data['is_first'].shape, broadcasted, bool)
|
| 283 |
+
if 'is_first' in data:
|
| 284 |
+
if is_first:
|
| 285 |
+
data['is_first'] = data['is_first'].copy()
|
| 286 |
+
data['is_first'][:, 0] = True
|
| 287 |
+
if 'is_last' in data:
|
| 288 |
+
# Make sure that abandoned episodes have is_last set.
|
| 289 |
+
next_is_first = np.roll(data['is_first'], shift=-1, axis=1)
|
| 290 |
+
next_is_first[:, -1] = False
|
| 291 |
+
data['is_last'] = data['is_last'] | next_is_first
|
| 292 |
+
return data
|
| 293 |
+
|
| 294 |
+
@elements.timer.section('replay_save')
|
| 295 |
+
def save(self):
|
| 296 |
+
if self.directory:
|
| 297 |
+
with self.rwlock.writing:
|
| 298 |
+
for worker, (chunkid, _) in self.current.items():
|
| 299 |
+
chunk = self.chunks[chunkid]
|
| 300 |
+
if chunk.length > 0:
|
| 301 |
+
self._complete(chunk, worker)
|
| 302 |
+
promises = []
|
| 303 |
+
for chunk in self.chunks.values():
|
| 304 |
+
if chunk.length > 0 and chunk.uuid not in self.saved:
|
| 305 |
+
self.saved.add(chunk.uuid)
|
| 306 |
+
promises.append(self.workers.submit(chunk.save, self.directory))
|
| 307 |
+
if self.save_wait:
|
| 308 |
+
[promise.result() for promise in promises]
|
| 309 |
+
return None
|
| 310 |
+
|
| 311 |
+
@elements.timer.section('replay_load')
|
| 312 |
+
def load(self, data=None, directory=None, amount=None):
|
| 313 |
+
|
| 314 |
+
directory = directory or self.directory
|
| 315 |
+
amount = amount or self.capacity or np.inf
|
| 316 |
+
if not directory:
|
| 317 |
+
return
|
| 318 |
+
revsorted = lambda x: list(reversed(sorted(list(x))))
|
| 319 |
+
directory = elements.Path(directory)
|
| 320 |
+
names_loaded = revsorted(x.filename for x in list(self.chunks.values()))
|
| 321 |
+
names_ondisk = revsorted(x.name for x in directory.glob('*.npz'))
|
| 322 |
+
names_ondisk = [x for x in names_ondisk if x not in names_loaded]
|
| 323 |
+
if not names_ondisk:
|
| 324 |
+
return
|
| 325 |
+
|
| 326 |
+
numitems = self._numitems(names_loaded + names_ondisk)
|
| 327 |
+
uuids = [elements.UUID(x.split('-')[1]) for x in names_ondisk]
|
| 328 |
+
total = 0
|
| 329 |
+
numchunks = 0
|
| 330 |
+
for uuid in uuids:
|
| 331 |
+
numchunks += 1
|
| 332 |
+
total += numitems[uuid]
|
| 333 |
+
if total >= amount:
|
| 334 |
+
break
|
| 335 |
+
|
| 336 |
+
load = bind(chunklib.Chunk.load, error='none')
|
| 337 |
+
filenames = [directory / x for x in names_ondisk[:numchunks]]
|
| 338 |
+
|
| 339 |
+
with ThreadPoolExecutor(16, 'replay_loader') as pool:
|
| 340 |
+
chunks = [x for x in pool.map(load, filenames) if x]
|
| 341 |
+
|
| 342 |
+
# We need to recompute the number of items per chunk now because some
|
| 343 |
+
# chunks may be corrupted and thus not available.
|
| 344 |
+
# numitems = self._numitems(chunks + list(self.chunks.values()))
|
| 345 |
+
numitems = self._numitems(chunks)
|
| 346 |
+
|
| 347 |
+
with self.rwlock.writing:
|
| 348 |
+
self.saved.update([chunk.uuid for chunk in chunks])
|
| 349 |
+
with self.refs_lock:
|
| 350 |
+
for chunk in chunks:
|
| 351 |
+
self.chunks[chunk.uuid] = chunk
|
| 352 |
+
self.refs[chunk.uuid] = 0
|
| 353 |
+
for chunk in reversed(chunks):
|
| 354 |
+
amount = numitems[chunk.uuid]
|
| 355 |
+
self.refs[chunk.uuid] += amount
|
| 356 |
+
if chunk.succ in self.refs:
|
| 357 |
+
self.refs[chunk.succ] += 1
|
| 358 |
+
for index in range(amount):
|
| 359 |
+
self._insert(chunk.uuid, index)
|
| 360 |
+
|
| 361 |
+
@elements.timer.section('complete_chunk')
|
| 362 |
+
def _complete(self, chunk, worker):
|
| 363 |
+
succ = chunklib.Chunk(self.chunksize)
|
| 364 |
+
with self.refs_lock:
|
| 365 |
+
self.refs[chunk.uuid] -= 1
|
| 366 |
+
self.refs[succ.uuid] = 2
|
| 367 |
+
self.chunks[succ.uuid] = succ
|
| 368 |
+
self.current[worker] = (succ.uuid, 0)
|
| 369 |
+
chunk.succ = succ.uuid
|
| 370 |
+
return succ
|
| 371 |
+
|
| 372 |
+
def _numitems(self, chunks):
|
| 373 |
+
chunks = [x.filename if hasattr(x, 'filename') else x for x in chunks]
|
| 374 |
+
if not chunks:
|
| 375 |
+
return 0
|
| 376 |
+
chunks = list(reversed(sorted([elements.Path(x).stem for x in chunks])))
|
| 377 |
+
times, uuids, succs, lengths = zip(*[x.split('-') for x in chunks])
|
| 378 |
+
uuids = [elements.UUID(x) for x in uuids]
|
| 379 |
+
succs = [elements.UUID(x) for x in succs]
|
| 380 |
+
lengths = {k: int(v) for k, v in zip(uuids, lengths)}
|
| 381 |
+
future = {}
|
| 382 |
+
for uuid, succ in zip(uuids, succs):
|
| 383 |
+
future[uuid] = lengths[uuid] + future.get(succ, 0)
|
| 384 |
+
numitems = {}
|
| 385 |
+
for uuid, succ in zip(uuids, succs):
|
| 386 |
+
numitems[uuid] = lengths[uuid] + 1 - self.length + future.get(succ, 0)
|
| 387 |
+
numitems = {k: np.clip(v, 0, lengths[k]) for k, v in numitems.items()}
|
| 388 |
+
return numitems
|
| 389 |
+
|
| 390 |
+
def _notempty(self, reason=False):
|
| 391 |
+
if reason:
|
| 392 |
+
return (True, 'ok') if len(self.sampler) else (False, 'empty buffer')
|
| 393 |
+
else:
|
| 394 |
+
return bool(len(self.sampler))
|
models/embodied/core/selectors.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import threading
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Fifo:
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.queue = collections.deque()
|
| 11 |
+
|
| 12 |
+
def __call__(self):
|
| 13 |
+
return self.queue[0]
|
| 14 |
+
|
| 15 |
+
def __len__(self):
|
| 16 |
+
return len(self.queue)
|
| 17 |
+
|
| 18 |
+
def __setitem__(self, key, stepids):
|
| 19 |
+
self.queue.append(key)
|
| 20 |
+
|
| 21 |
+
def __delitem__(self, key):
|
| 22 |
+
if self.queue[0] == key:
|
| 23 |
+
self.queue.popleft()
|
| 24 |
+
else:
|
| 25 |
+
# This is very slow but typically not used.
|
| 26 |
+
self.queue.remove(key)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Uniform:
|
| 30 |
+
|
| 31 |
+
def __init__(self, seed=0):
|
| 32 |
+
self.indices = {}
|
| 33 |
+
self.keys = []
|
| 34 |
+
self.rng = np.random.default_rng(seed)
|
| 35 |
+
self.lock = threading.Lock()
|
| 36 |
+
|
| 37 |
+
def __len__(self):
|
| 38 |
+
return len(self.keys)
|
| 39 |
+
|
| 40 |
+
def __call__(self):
|
| 41 |
+
with self.lock:
|
| 42 |
+
index = self.rng.integers(0, len(self.keys)).item()
|
| 43 |
+
return self.keys[index]
|
| 44 |
+
|
| 45 |
+
def __setitem__(self, key, stepids):
|
| 46 |
+
with self.lock:
|
| 47 |
+
self.indices[key] = len(self.keys)
|
| 48 |
+
self.keys.append(key)
|
| 49 |
+
|
| 50 |
+
def __delitem__(self, key):
|
| 51 |
+
with self.lock:
|
| 52 |
+
assert 2 <= len(self), len(self)
|
| 53 |
+
index = self.indices.pop(key)
|
| 54 |
+
last = self.keys.pop()
|
| 55 |
+
if index != len(self.keys):
|
| 56 |
+
self.keys[index] = last
|
| 57 |
+
self.indices[last] = index
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class Recency:
|
| 61 |
+
|
| 62 |
+
def __init__(self, uprobs, seed=0):
|
| 63 |
+
assert uprobs[0] >= uprobs[-1], uprobs
|
| 64 |
+
self.uprobs = uprobs
|
| 65 |
+
self.tree = self._build(uprobs)
|
| 66 |
+
self.rng = np.random.default_rng(seed)
|
| 67 |
+
self.step = 0
|
| 68 |
+
self.steps = {}
|
| 69 |
+
self.items = {}
|
| 70 |
+
|
| 71 |
+
def __len__(self):
|
| 72 |
+
return len(self.items)
|
| 73 |
+
|
| 74 |
+
def __call__(self):
|
| 75 |
+
for retry in range(10):
|
| 76 |
+
try:
|
| 77 |
+
age = self._sample(self.tree, self.rng)
|
| 78 |
+
if len(self.items) < len(self.uprobs):
|
| 79 |
+
age = int(age / len(self.uprobs) * len(self.items))
|
| 80 |
+
return self.items[self.step - 1 - age]
|
| 81 |
+
except KeyError:
|
| 82 |
+
# Item might have been deleted very recently.
|
| 83 |
+
if retry < 9:
|
| 84 |
+
import time
|
| 85 |
+
time.sleep(0.01)
|
| 86 |
+
else:
|
| 87 |
+
raise
|
| 88 |
+
|
| 89 |
+
def __setitem__(self, key, stepids):
|
| 90 |
+
self.steps[key] = self.step
|
| 91 |
+
self.items[self.step] = key
|
| 92 |
+
self.step += 1
|
| 93 |
+
|
| 94 |
+
def __delitem__(self, key):
|
| 95 |
+
step = self.steps.pop(key)
|
| 96 |
+
del self.items[step]
|
| 97 |
+
|
| 98 |
+
def _sample(self, tree, rng, bfactor=16):
|
| 99 |
+
path = []
|
| 100 |
+
for level, prob in enumerate(tree):
|
| 101 |
+
p = prob
|
| 102 |
+
for segment in path:
|
| 103 |
+
p = p[segment]
|
| 104 |
+
index = rng.choice(len(segment), p=p)
|
| 105 |
+
path.append(index)
|
| 106 |
+
index = sum(
|
| 107 |
+
index * bfactor ** (len(tree) - level - 1)
|
| 108 |
+
for level, index in enumerate(path))
|
| 109 |
+
return index
|
| 110 |
+
|
| 111 |
+
def _build(self, uprobs, bfactor=16):
|
| 112 |
+
assert np.isfinite(uprobs).all(), uprobs
|
| 113 |
+
assert (uprobs >= 0).all(), uprobs
|
| 114 |
+
depth = int(np.ceil(np.log(len(uprobs)) / np.log(bfactor)))
|
| 115 |
+
size = bfactor ** depth
|
| 116 |
+
uprobs = np.concatenate([uprobs, np.zeros(size - len(uprobs))])
|
| 117 |
+
tree = [uprobs]
|
| 118 |
+
for level in reversed(range(depth - 1)):
|
| 119 |
+
tree.insert(0, tree[0].reshape((-1, bfactor)).sum(-1))
|
| 120 |
+
for level, prob in enumerate(tree):
|
| 121 |
+
prob = prob.reshape([bfactor] * (1 + level))
|
| 122 |
+
total = prob.sum(-1, keepdims=True)
|
| 123 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
| 124 |
+
tree[level] = np.where(total, prob / total, prob)
|
| 125 |
+
return tree
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class Prioritized:
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self, exponent=1.0, initial=1.0, zero_on_sample=False,
|
| 132 |
+
maxfrac=0.0, branching=16, seed=0):
|
| 133 |
+
assert 0 <= maxfrac <= 1, maxfrac
|
| 134 |
+
self.exponent = float(exponent)
|
| 135 |
+
self.initial = float(initial)
|
| 136 |
+
self.zero_on_sample = zero_on_sample
|
| 137 |
+
self.maxfrac = maxfrac
|
| 138 |
+
self.tree = SampleTree(branching, seed)
|
| 139 |
+
self.prios = collections.defaultdict(lambda: self.initial)
|
| 140 |
+
self.stepitems = collections.defaultdict(list)
|
| 141 |
+
self.items = {}
|
| 142 |
+
|
| 143 |
+
def prioritize(self, stepids, priorities):
|
| 144 |
+
if not isinstance(stepids[0], bytes):
|
| 145 |
+
stepids = [x.tobytes() for x in stepids]
|
| 146 |
+
for stepid, priority in zip(stepids, priorities):
|
| 147 |
+
try:
|
| 148 |
+
self.prios[stepid] = priority
|
| 149 |
+
except KeyError:
|
| 150 |
+
print('Ignoring priority update for removed time step.')
|
| 151 |
+
items = []
|
| 152 |
+
for stepid in stepids:
|
| 153 |
+
items += self.stepitems[stepid]
|
| 154 |
+
for key in list(set(items)):
|
| 155 |
+
try:
|
| 156 |
+
self.tree.update(key, self._aggregate(key))
|
| 157 |
+
except KeyError:
|
| 158 |
+
print('Ignoring tree update for removed time step.')
|
| 159 |
+
|
| 160 |
+
def __len__(self):
|
| 161 |
+
return len(self.items)
|
| 162 |
+
|
| 163 |
+
def __call__(self):
|
| 164 |
+
key = self.tree.sample()
|
| 165 |
+
if self.zero_on_sample:
|
| 166 |
+
zeros = [0.0] * len(self.items[key])
|
| 167 |
+
self.prioritize(self.items[key], zeros)
|
| 168 |
+
return key
|
| 169 |
+
|
| 170 |
+
def __setitem__(self, key, stepids):
|
| 171 |
+
if not isinstance(stepids[0], bytes):
|
| 172 |
+
stepids = [x.tobytes() for x in stepids]
|
| 173 |
+
self.items[key] = stepids
|
| 174 |
+
[self.stepitems[stepid].append(key) for stepid in stepids]
|
| 175 |
+
self.tree.insert(key, self._aggregate(key))
|
| 176 |
+
|
| 177 |
+
def __delitem__(self, key):
|
| 178 |
+
self.tree.remove(key)
|
| 179 |
+
stepids = self.items.pop(key)
|
| 180 |
+
for stepid in stepids:
|
| 181 |
+
stepitems = self.stepitems[stepid]
|
| 182 |
+
stepitems.remove(key)
|
| 183 |
+
if not stepitems:
|
| 184 |
+
del self.stepitems[stepid]
|
| 185 |
+
del self.prios[stepid]
|
| 186 |
+
|
| 187 |
+
def _aggregate(self, key):
|
| 188 |
+
# Both list comprehensions in this function are a performance bottleneck
|
| 189 |
+
# because they are called very often.
|
| 190 |
+
prios = [self.prios[stepid] for stepid in self.items[key]]
|
| 191 |
+
if self.exponent != 1.0:
|
| 192 |
+
prios = [x ** self.exponent for x in prios]
|
| 193 |
+
mean = sum(prios) / len(prios)
|
| 194 |
+
if self.maxfrac:
|
| 195 |
+
return self.maxfrac * max(prios) + (1 - self.maxfrac) * mean
|
| 196 |
+
else:
|
| 197 |
+
return mean
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class Mixture:
|
| 201 |
+
|
| 202 |
+
def __init__(self, selectors, fractions, seed=0):
|
| 203 |
+
assert set(selectors.keys()) == set(fractions.keys())
|
| 204 |
+
assert sum(fractions.values()) == 1, fractions
|
| 205 |
+
for key, frac in list(fractions.items()):
|
| 206 |
+
if not frac:
|
| 207 |
+
selectors.pop(key)
|
| 208 |
+
fractions.pop(key)
|
| 209 |
+
keys = sorted(selectors.keys())
|
| 210 |
+
self.selectors = [selectors[key] for key in keys]
|
| 211 |
+
self.fractions = np.array([fractions[key] for key in keys], np.float32)
|
| 212 |
+
self.rng = np.random.default_rng(seed)
|
| 213 |
+
|
| 214 |
+
def __call__(self):
|
| 215 |
+
return self.rng.choice(self.selectors, p=self.fractions)()
|
| 216 |
+
|
| 217 |
+
def __setitem__(self, key, stepids):
|
| 218 |
+
for selector in self.selectors:
|
| 219 |
+
selector[key] = stepids
|
| 220 |
+
|
| 221 |
+
def __delitem__(self, key):
|
| 222 |
+
for selector in self.selectors:
|
| 223 |
+
del selector[key]
|
| 224 |
+
|
| 225 |
+
def prioritize(self, stepids, priorities):
|
| 226 |
+
for selector in self.selectors:
|
| 227 |
+
if hasattr(selector, 'prioritize'):
|
| 228 |
+
selector.prioritize(stepids, priorities)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class SampleTree:
|
| 232 |
+
|
| 233 |
+
def __init__(self, branching=16, seed=0):
|
| 234 |
+
assert 2 <= branching
|
| 235 |
+
self.branching = branching
|
| 236 |
+
self.root = SampleTreeNode()
|
| 237 |
+
self.last = None
|
| 238 |
+
self.entries = {}
|
| 239 |
+
self.rng = np.random.default_rng(seed)
|
| 240 |
+
|
| 241 |
+
def __len__(self):
|
| 242 |
+
return len(self.entries)
|
| 243 |
+
|
| 244 |
+
def insert(self, key, uprob):
|
| 245 |
+
if not self.last:
|
| 246 |
+
node = self.root
|
| 247 |
+
else:
|
| 248 |
+
ups = 0
|
| 249 |
+
node = self.last.parent
|
| 250 |
+
while node and len(node) >= self.branching:
|
| 251 |
+
node = node.parent
|
| 252 |
+
ups += 1
|
| 253 |
+
if not node:
|
| 254 |
+
node = SampleTreeNode()
|
| 255 |
+
node.append(self.root)
|
| 256 |
+
self.root = node
|
| 257 |
+
for _ in range(ups):
|
| 258 |
+
below = SampleTreeNode()
|
| 259 |
+
node.append(below)
|
| 260 |
+
node = below
|
| 261 |
+
entry = SampleTreeEntry(key, uprob)
|
| 262 |
+
node.append(entry)
|
| 263 |
+
self.entries[key] = entry
|
| 264 |
+
self.last = entry
|
| 265 |
+
|
| 266 |
+
def remove(self, key):
|
| 267 |
+
entry = self.entries.pop(key)
|
| 268 |
+
entry_parent = entry.parent
|
| 269 |
+
last_parent = self.last.parent
|
| 270 |
+
entry.parent.remove(entry)
|
| 271 |
+
if entry is not self.last:
|
| 272 |
+
entry_parent.append(self.last)
|
| 273 |
+
node = last_parent
|
| 274 |
+
ups = 0
|
| 275 |
+
while node.parent and not len(node):
|
| 276 |
+
above = node.parent
|
| 277 |
+
above.remove(node)
|
| 278 |
+
node = above
|
| 279 |
+
ups += 1
|
| 280 |
+
if not len(node):
|
| 281 |
+
self.last = None
|
| 282 |
+
return
|
| 283 |
+
while isinstance(node, SampleTreeNode):
|
| 284 |
+
node = node.children[-1]
|
| 285 |
+
self.last = node
|
| 286 |
+
|
| 287 |
+
def update(self, key, uprob):
|
| 288 |
+
entry = self.entries[key]
|
| 289 |
+
entry.uprob = uprob
|
| 290 |
+
entry.parent.recompute()
|
| 291 |
+
|
| 292 |
+
def sample(self):
|
| 293 |
+
node = self.root
|
| 294 |
+
while isinstance(node, SampleTreeNode):
|
| 295 |
+
uprobs = np.array([x.uprob for x in node.children])
|
| 296 |
+
total = uprobs.sum()
|
| 297 |
+
if not np.isfinite(total):
|
| 298 |
+
finite = np.isinf(uprobs)
|
| 299 |
+
probs = finite / finite.sum()
|
| 300 |
+
elif total == 0:
|
| 301 |
+
probs = np.ones(len(uprobs)) / len(uprobs)
|
| 302 |
+
else:
|
| 303 |
+
probs = uprobs / total
|
| 304 |
+
choice = self.rng.choice(np.arange(len(uprobs)), p=probs)
|
| 305 |
+
node = node.children[choice.item()]
|
| 306 |
+
return node.key
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
class SampleTreeNode:
|
| 310 |
+
|
| 311 |
+
__slots__ = ('parent', 'children', 'uprob')
|
| 312 |
+
|
| 313 |
+
def __init__(self, parent=None):
|
| 314 |
+
self.parent = parent
|
| 315 |
+
self.children = []
|
| 316 |
+
self.uprob = 0
|
| 317 |
+
|
| 318 |
+
def __repr__(self):
|
| 319 |
+
return (
|
| 320 |
+
f'SampleTreeNode(uprob={self.uprob}, '
|
| 321 |
+
f'children={[x.uprob for x in self.children]})'
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
def __len__(self):
|
| 325 |
+
return len(self.children)
|
| 326 |
+
|
| 327 |
+
def __bool__(self):
|
| 328 |
+
return True
|
| 329 |
+
|
| 330 |
+
def append(self, child):
|
| 331 |
+
if child.parent:
|
| 332 |
+
child.parent.remove(child)
|
| 333 |
+
child.parent = self
|
| 334 |
+
self.children.append(child)
|
| 335 |
+
self.recompute()
|
| 336 |
+
|
| 337 |
+
def remove(self, child):
|
| 338 |
+
child.parent = None
|
| 339 |
+
self.children.remove(child)
|
| 340 |
+
self.recompute()
|
| 341 |
+
|
| 342 |
+
def recompute(self):
|
| 343 |
+
self.uprob = sum(x.uprob for x in self.children)
|
| 344 |
+
self.parent and self.parent.recompute()
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class SampleTreeEntry:
|
| 348 |
+
|
| 349 |
+
__slots__ = ('parent', 'key', 'uprob')
|
| 350 |
+
|
| 351 |
+
def __init__(self, key=None, uprob=None):
|
| 352 |
+
self.parent = None
|
| 353 |
+
self.key = key
|
| 354 |
+
self.uprob = uprob
|
models/embodied/core/streams.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import queue
|
| 3 |
+
import threading
|
| 4 |
+
|
| 5 |
+
import elements
|
| 6 |
+
import numpy as np
|
| 7 |
+
import portal
|
| 8 |
+
|
| 9 |
+
from . import base
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Stateless(base.Stream):
|
| 13 |
+
|
| 14 |
+
def __init__(self, nextfn, *args, **kwargs):
|
| 15 |
+
if not callable(nextfn) and hasattr(nextfn, '__next__'):
|
| 16 |
+
nextfn = nextfn.__next__
|
| 17 |
+
self.nextfn = functools.partial(nextfn, *args, **kwargs)
|
| 18 |
+
|
| 19 |
+
def __iter__(self):
|
| 20 |
+
return self
|
| 21 |
+
|
| 22 |
+
def __next__(self):
|
| 23 |
+
return self.nextfn()
|
| 24 |
+
|
| 25 |
+
def save(self):
|
| 26 |
+
return None
|
| 27 |
+
|
| 28 |
+
def load(self, data):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class Prefetch(base.Stream):
|
| 33 |
+
|
| 34 |
+
def __init__(self, source, transform=None, amount=1):
|
| 35 |
+
self.source = iter(source) if hasattr(source, '__iter__') else source()
|
| 36 |
+
self.transform = transform or (lambda x: x)
|
| 37 |
+
self.state = self._getstate()
|
| 38 |
+
self.requests = threading.Semaphore(amount)
|
| 39 |
+
self.amount = amount
|
| 40 |
+
self.queue = queue.Queue()
|
| 41 |
+
self.worker = portal.Thread(self._worker)
|
| 42 |
+
self.started = False
|
| 43 |
+
|
| 44 |
+
def __iter__(self):
|
| 45 |
+
assert not self.started
|
| 46 |
+
self.worker.start()
|
| 47 |
+
self.started = True
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def __next__(self):
|
| 51 |
+
assert self.started
|
| 52 |
+
result = self.queue.get()
|
| 53 |
+
self.requests.release()
|
| 54 |
+
if isinstance(result, str):
|
| 55 |
+
raise RuntimeError(result)
|
| 56 |
+
data, self.state = result
|
| 57 |
+
return data
|
| 58 |
+
|
| 59 |
+
def save(self):
|
| 60 |
+
return self.state
|
| 61 |
+
|
| 62 |
+
def load(self, state):
|
| 63 |
+
if self.started:
|
| 64 |
+
for _ in range(self.amount):
|
| 65 |
+
self.queue.get()
|
| 66 |
+
self.source.load(state)
|
| 67 |
+
if self.started:
|
| 68 |
+
self.requests.release(self.amount)
|
| 69 |
+
|
| 70 |
+
def _worker(self):
|
| 71 |
+
try:
|
| 72 |
+
while True:
|
| 73 |
+
self.requests.acquire()
|
| 74 |
+
data = next(self.source)
|
| 75 |
+
data = self.transform(data)
|
| 76 |
+
state = self._getstate()
|
| 77 |
+
self.queue.put((data, state))
|
| 78 |
+
except Exception as e:
|
| 79 |
+
self.queue.put(str(e))
|
| 80 |
+
raise
|
| 81 |
+
|
| 82 |
+
def _getstate(self):
|
| 83 |
+
if hasattr(self.source, 'save'):
|
| 84 |
+
return self.source.save()
|
| 85 |
+
else:
|
| 86 |
+
return None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class Consec(base.Stream):
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
Example:
|
| 93 |
+
|
| 94 |
+
length = 3
|
| 95 |
+
consec = 3
|
| 96 |
+
prefix = 2
|
| 97 |
+
|
| 98 |
+
source: 0 1 2 3 4 5 6 7 8 9 10
|
| 99 |
+
chunk 1: p-p-#-#-#
|
| 100 |
+
chunk 2: p-p-#-#-#
|
| 101 |
+
chunk 3: p-p-#-#-#
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self, source, length, consec, prefix=0, strict=True, contiguous=False):
|
| 106 |
+
self.source = source
|
| 107 |
+
self.length = length
|
| 108 |
+
self.consec = consec
|
| 109 |
+
self.prefix = prefix
|
| 110 |
+
self.strict = strict
|
| 111 |
+
self.contiguous = contiguous
|
| 112 |
+
self.index = 0
|
| 113 |
+
self.current = None
|
| 114 |
+
self.it = None
|
| 115 |
+
|
| 116 |
+
def __iter__(self):
|
| 117 |
+
self.it = iter(self.source)
|
| 118 |
+
return self
|
| 119 |
+
|
| 120 |
+
def __next__(self):
|
| 121 |
+
if self.index >= self.consec:
|
| 122 |
+
self.index = 0
|
| 123 |
+
if self.index == 0:
|
| 124 |
+
self.current = next(self.it)
|
| 125 |
+
available = self.current['is_first'].shape[-1]
|
| 126 |
+
assert self.length * self.consec + self.prefix <= available, (
|
| 127 |
+
self.length, self.consec, self.prefix, available)
|
| 128 |
+
if self.strict:
|
| 129 |
+
assert self.consec * self.length + self.prefix == available, (
|
| 130 |
+
self.consec, self.length, self.prefix, available)
|
| 131 |
+
start = self.index * self.length
|
| 132 |
+
stop = start + (self.length + self.prefix)
|
| 133 |
+
chunk = {k: v[:, start: stop] for k, v in self.current.items()}
|
| 134 |
+
chunk['consec'] = np.full(chunk['is_first'].shape, self.index, np.int32)
|
| 135 |
+
if self.contiguous:
|
| 136 |
+
# This is expensive but can speed up following operations, such as
|
| 137 |
+
# sending arrays via networking.
|
| 138 |
+
chunk = {k: np.ascontiguousarray(v) for k, v in chunk.items()}
|
| 139 |
+
self.index += 1
|
| 140 |
+
return chunk
|
| 141 |
+
|
| 142 |
+
def save(self):
|
| 143 |
+
return {
|
| 144 |
+
'source': self.source.save(),
|
| 145 |
+
'index': self.index,
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
def load(self, data):
|
| 149 |
+
self.source.load(data['source'])
|
| 150 |
+
self.index = data['index']
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class Zip(base.Stream):
|
| 154 |
+
|
| 155 |
+
def __init__(self, sources):
|
| 156 |
+
assert len(sources) > 1, len(sources)
|
| 157 |
+
self.sources = sources
|
| 158 |
+
self.iterators = None
|
| 159 |
+
self.started = False
|
| 160 |
+
|
| 161 |
+
def __iter__(self):
|
| 162 |
+
assert not self.started
|
| 163 |
+
self.started = True
|
| 164 |
+
self.iterators = [iter(x) for x in self.sources]
|
| 165 |
+
return self
|
| 166 |
+
|
| 167 |
+
def __next__(self):
|
| 168 |
+
parts = [next(x) for x in self.iterators]
|
| 169 |
+
result = elements.tree.map(lambda *el: np.concatenate(el), *parts)
|
| 170 |
+
return result
|
| 171 |
+
|
| 172 |
+
def save(self):
|
| 173 |
+
return [x.save() for x in self.iterators]
|
| 174 |
+
|
| 175 |
+
def load(self, data):
|
| 176 |
+
assert len(data) == len(self.iterators)
|
| 177 |
+
[it.load(d) for it, d in zip(self.iterators, data)]
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class Map(base.Stream):
|
| 181 |
+
|
| 182 |
+
def __init__(self, source, fn, *args, **kwargs):
|
| 183 |
+
self.source = source
|
| 184 |
+
self.fn = lambda x: fn(x, *args, **kwargs)
|
| 185 |
+
self.iterator = None
|
| 186 |
+
self.started = False
|
| 187 |
+
|
| 188 |
+
def __iter__(self):
|
| 189 |
+
assert not self.started
|
| 190 |
+
self.started = True
|
| 191 |
+
self.iterator = iter(self.source)
|
| 192 |
+
return self
|
| 193 |
+
|
| 194 |
+
def __next__(self):
|
| 195 |
+
assert self.started
|
| 196 |
+
return self.fn(next(self.iterator))
|
| 197 |
+
|
| 198 |
+
def save(self):
|
| 199 |
+
return self.iterator.save()
|
| 200 |
+
|
| 201 |
+
def load(self, data):
|
| 202 |
+
self.iterator.load(data)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class Mixer(base.Stream):
|
| 206 |
+
|
| 207 |
+
def __init__(self, sources, weights, seed=0):
|
| 208 |
+
assert sources.keys() == weights.keys(), (sources, weights)
|
| 209 |
+
self.keys = sorted(sources.keys())
|
| 210 |
+
self.iterators = [iter(sources[k]) for k in self.keys]
|
| 211 |
+
weights = np.array([weights[k] for k in self.keys], np.float32)
|
| 212 |
+
self.probs = weights / weights.sum()
|
| 213 |
+
self.seed = seed
|
| 214 |
+
self.started = False
|
| 215 |
+
self.step = 0
|
| 216 |
+
|
| 217 |
+
def __iter__(self):
|
| 218 |
+
assert not self.started
|
| 219 |
+
return self
|
| 220 |
+
|
| 221 |
+
def __next__(self):
|
| 222 |
+
assert self.started
|
| 223 |
+
rng = np.ranodm.default_rng(seed=[self.seed, self.step])
|
| 224 |
+
self.step += 1
|
| 225 |
+
index = rng.choice(len(self.keys), p=self.probs)
|
| 226 |
+
return next(self.iterators[index])
|
| 227 |
+
|
| 228 |
+
def save(self):
|
| 229 |
+
return {
|
| 230 |
+
'step': self.step,
|
| 231 |
+
'seed': self.seed,
|
| 232 |
+
'sources': {k: it.save() for k, it in zip(self.keys, self.iterators)},
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
def load(self, data):
|
| 236 |
+
self.step = data['step']
|
| 237 |
+
self.seed = data['seed']
|
| 238 |
+
assert sorted(data['sources'].keys()) == self.keys, (
|
| 239 |
+
data['sources'], self.keys)
|
| 240 |
+
for key in self.keys:
|
| 241 |
+
self.iterators[key].load(data['sources'][key])
|
models/embodied/core/wrappers.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Wrapper:
|
| 9 |
+
|
| 10 |
+
def __init__(self, env):
|
| 11 |
+
self.env = env
|
| 12 |
+
|
| 13 |
+
def __len__(self):
|
| 14 |
+
return len(self.env)
|
| 15 |
+
|
| 16 |
+
def __bool__(self):
|
| 17 |
+
return bool(self.env)
|
| 18 |
+
|
| 19 |
+
def __getattr__(self, name):
|
| 20 |
+
if name.startswith('__'):
|
| 21 |
+
raise AttributeError(name)
|
| 22 |
+
try:
|
| 23 |
+
return getattr(self.env, name)
|
| 24 |
+
except AttributeError:
|
| 25 |
+
raise ValueError(name)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TimeLimit(Wrapper):
|
| 29 |
+
|
| 30 |
+
def __init__(self, env, duration, reset=True):
|
| 31 |
+
super().__init__(env)
|
| 32 |
+
self._duration = duration
|
| 33 |
+
self._reset = reset
|
| 34 |
+
self._step = 0
|
| 35 |
+
self._done = False
|
| 36 |
+
|
| 37 |
+
def step(self, action):
|
| 38 |
+
if action['reset'] or self._done:
|
| 39 |
+
self._step = 0
|
| 40 |
+
self._done = False
|
| 41 |
+
if self._reset:
|
| 42 |
+
action.update(reset=True)
|
| 43 |
+
return self.env.step(action)
|
| 44 |
+
else:
|
| 45 |
+
action.update(reset=False)
|
| 46 |
+
obs = self.env.step(action)
|
| 47 |
+
obs['is_first'] = True
|
| 48 |
+
return obs
|
| 49 |
+
self._step += 1
|
| 50 |
+
obs = self.env.step(action)
|
| 51 |
+
if self._duration and self._step >= self._duration:
|
| 52 |
+
obs['is_last'] = True
|
| 53 |
+
self._done = obs['is_last']
|
| 54 |
+
return obs
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class ActionRepeat(Wrapper):
|
| 58 |
+
|
| 59 |
+
def __init__(self, env, repeat):
|
| 60 |
+
super().__init__(env)
|
| 61 |
+
self._repeat = repeat
|
| 62 |
+
|
| 63 |
+
def step(self, action):
|
| 64 |
+
if action['reset']:
|
| 65 |
+
return self.env.step(action)
|
| 66 |
+
reward = 0.0
|
| 67 |
+
for _ in range(self._repeat):
|
| 68 |
+
obs = self.env.step(action)
|
| 69 |
+
reward += obs['reward']
|
| 70 |
+
if obs['is_last'] or obs['is_terminal']:
|
| 71 |
+
break
|
| 72 |
+
obs['reward'] = np.float32(reward)
|
| 73 |
+
return obs
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ClipAction(Wrapper):
|
| 77 |
+
|
| 78 |
+
def __init__(self, env, key='action', low=-1, high=1):
|
| 79 |
+
super().__init__(env)
|
| 80 |
+
self._key = key
|
| 81 |
+
self._low = low
|
| 82 |
+
self._high = high
|
| 83 |
+
|
| 84 |
+
def step(self, action):
|
| 85 |
+
clipped = np.clip(action[self._key], self._low, self._high)
|
| 86 |
+
return self.env.step({**action, self._key: clipped})
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class NormalizeAction(Wrapper):
|
| 90 |
+
|
| 91 |
+
def __init__(self, env, key='action'):
|
| 92 |
+
super().__init__(env)
|
| 93 |
+
self._key = key
|
| 94 |
+
self._space = env.act_space[key]
|
| 95 |
+
self._mask = np.isfinite(self._space.low) & np.isfinite(self._space.high)
|
| 96 |
+
self._low = np.where(self._mask, self._space.low, -1)
|
| 97 |
+
self._high = np.where(self._mask, self._space.high, 1)
|
| 98 |
+
|
| 99 |
+
@functools.cached_property
|
| 100 |
+
def act_space(self):
|
| 101 |
+
low = np.where(self._mask, -np.ones_like(self._low), self._low)
|
| 102 |
+
high = np.where(self._mask, np.ones_like(self._low), self._high)
|
| 103 |
+
space = elements.Space(np.float32, self._space.shape, low, high)
|
| 104 |
+
return {**self.env.act_space, self._key: space}
|
| 105 |
+
|
| 106 |
+
def step(self, action):
|
| 107 |
+
orig = (action[self._key] + 1) / 2 * (self._high - self._low) + self._low
|
| 108 |
+
orig = np.where(self._mask, orig, action[self._key])
|
| 109 |
+
return self.env.step({**action, self._key: orig})
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# class ExpandScalars(Wrapper):
|
| 113 |
+
#
|
| 114 |
+
# def __init__(self, env):
|
| 115 |
+
# super().__init__(env)
|
| 116 |
+
# self._obs_expanded = []
|
| 117 |
+
# self._obs_space = {}
|
| 118 |
+
# for key, space in self.env.obs_space.items():
|
| 119 |
+
# if space.shape == () and key != 'reward' and not space.discrete:
|
| 120 |
+
# space = elements.Space(space.dtype, (1,), space.low, space.high)
|
| 121 |
+
# self._obs_expanded.append(key)
|
| 122 |
+
# self._obs_space[key] = space
|
| 123 |
+
# self._act_expanded = []
|
| 124 |
+
# self._act_space = {}
|
| 125 |
+
# for key, space in self.env.act_space.items():
|
| 126 |
+
# if space.shape == () and not space.discrete:
|
| 127 |
+
# space = elements.Space(space.dtype, (1,), space.low, space.high)
|
| 128 |
+
# self._act_expanded.append(key)
|
| 129 |
+
# self._act_space[key] = space
|
| 130 |
+
#
|
| 131 |
+
# @functools.cached_property
|
| 132 |
+
# def obs_space(self):
|
| 133 |
+
# return self._obs_space
|
| 134 |
+
#
|
| 135 |
+
# @functools.cached_property
|
| 136 |
+
# def act_space(self):
|
| 137 |
+
# return self._act_space
|
| 138 |
+
#
|
| 139 |
+
# def step(self, action):
|
| 140 |
+
# action = {
|
| 141 |
+
# key: np.squeeze(value, 0) if key in self._act_expanded else value
|
| 142 |
+
# for key, value in action.items()}
|
| 143 |
+
# obs = self.env.step(action)
|
| 144 |
+
# obs = {
|
| 145 |
+
# key: np.expand_dims(value, 0) if key in self._obs_expanded else value
|
| 146 |
+
# for key, value in obs.items()}
|
| 147 |
+
# return obs
|
| 148 |
+
#
|
| 149 |
+
#
|
| 150 |
+
# class FlattenTwoDimObs(Wrapper):
|
| 151 |
+
#
|
| 152 |
+
# def __init__(self, env):
|
| 153 |
+
# super().__init__(env)
|
| 154 |
+
# self._keys = []
|
| 155 |
+
# self._obs_space = {}
|
| 156 |
+
# for key, space in self.env.obs_space.items():
|
| 157 |
+
# if len(space.shape) == 2:
|
| 158 |
+
# space = elements.Space(
|
| 159 |
+
# space.dtype,
|
| 160 |
+
# (int(np.prod(space.shape)),),
|
| 161 |
+
# space.low.flatten(),
|
| 162 |
+
# space.high.flatten())
|
| 163 |
+
# self._keys.append(key)
|
| 164 |
+
# self._obs_space[key] = space
|
| 165 |
+
#
|
| 166 |
+
# @functools.cached_property
|
| 167 |
+
# def obs_space(self):
|
| 168 |
+
# return self._obs_space
|
| 169 |
+
#
|
| 170 |
+
# def step(self, action):
|
| 171 |
+
# obs = self.env.step(action).copy()
|
| 172 |
+
# for key in self._keys:
|
| 173 |
+
# obs[key] = obs[key].flatten()
|
| 174 |
+
# return obs
|
| 175 |
+
#
|
| 176 |
+
#
|
| 177 |
+
# class FlattenTwoDimActions(Wrapper):
|
| 178 |
+
#
|
| 179 |
+
# def __init__(self, env):
|
| 180 |
+
# super().__init__(env)
|
| 181 |
+
# self._origs = {}
|
| 182 |
+
# self._act_space = {}
|
| 183 |
+
# for key, space in self.env.act_space.items():
|
| 184 |
+
# if len(space.shape) == 2:
|
| 185 |
+
# space = elements.Space(
|
| 186 |
+
# space.dtype,
|
| 187 |
+
# (int(np.prod(space.shape)),),
|
| 188 |
+
# space.low.flatten(),
|
| 189 |
+
# space.high.flatten())
|
| 190 |
+
# self._origs[key] = space.shape
|
| 191 |
+
# self._act_space[key] = space
|
| 192 |
+
#
|
| 193 |
+
# @functools.cached_property
|
| 194 |
+
# def act_space(self):
|
| 195 |
+
# return self._act_space
|
| 196 |
+
#
|
| 197 |
+
# def step(self, action):
|
| 198 |
+
# action = action.copy()
|
| 199 |
+
# for key, shape in self._origs.items():
|
| 200 |
+
# action[key] = action[key].reshape(shape)
|
| 201 |
+
# return self.env.step(action)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class UnifyDtypes(Wrapper):
|
| 205 |
+
|
| 206 |
+
def __init__(self, env):
|
| 207 |
+
super().__init__(env)
|
| 208 |
+
self._obs_space, _, self._obs_outer = self._convert(env.obs_space)
|
| 209 |
+
self._act_space, self._act_inner, _ = self._convert(env.act_space)
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def obs_space(self):
|
| 213 |
+
return self._obs_space
|
| 214 |
+
|
| 215 |
+
@property
|
| 216 |
+
def act_space(self):
|
| 217 |
+
return self._act_space
|
| 218 |
+
|
| 219 |
+
def step(self, action):
|
| 220 |
+
action = action.copy()
|
| 221 |
+
for key, dtype in self._act_inner.items():
|
| 222 |
+
action[key] = np.asarray(action[key], dtype)
|
| 223 |
+
obs = self.env.step(action)
|
| 224 |
+
for key, dtype in self._obs_outer.items():
|
| 225 |
+
obs[key] = np.asarray(obs[key], dtype)
|
| 226 |
+
return obs
|
| 227 |
+
|
| 228 |
+
def _convert(self, spaces):
|
| 229 |
+
results, befores, afters = {}, {}, {}
|
| 230 |
+
for key, space in spaces.items():
|
| 231 |
+
before = after = space.dtype
|
| 232 |
+
if np.issubdtype(before, np.floating):
|
| 233 |
+
after = np.float32
|
| 234 |
+
elif np.issubdtype(before, np.uint8):
|
| 235 |
+
after = np.uint8
|
| 236 |
+
elif np.issubdtype(before, np.integer):
|
| 237 |
+
after = np.int32
|
| 238 |
+
befores[key] = before
|
| 239 |
+
afters[key] = after
|
| 240 |
+
results[key] = elements.Space(after, space.shape, space.low, space.high)
|
| 241 |
+
return results, befores, afters
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class CheckSpaces(Wrapper):
|
| 245 |
+
|
| 246 |
+
def __init__(self, env):
|
| 247 |
+
assert not (env.obs_space.keys() & env.act_space.keys()), (
|
| 248 |
+
env.obs_space.keys(), env.act_space.keys())
|
| 249 |
+
super().__init__(env)
|
| 250 |
+
|
| 251 |
+
def step(self, action):
|
| 252 |
+
for key, value in action.items():
|
| 253 |
+
self._check(value, self.env.act_space[key], key)
|
| 254 |
+
obs = self.env.step(action)
|
| 255 |
+
for key, value in obs.items():
|
| 256 |
+
self._check(value, self.env.obs_space[key], key)
|
| 257 |
+
return obs
|
| 258 |
+
|
| 259 |
+
def _check(self, value, space, key):
|
| 260 |
+
if not isinstance(value, (
|
| 261 |
+
np.ndarray, np.generic, list, tuple, int, float, bool)):
|
| 262 |
+
raise TypeError(f'Invalid type {type(value)} for key {key}.')
|
| 263 |
+
if value in space:
|
| 264 |
+
return
|
| 265 |
+
dtype = np.array(value).dtype
|
| 266 |
+
shape = np.array(value).shape
|
| 267 |
+
lowest, highest = np.min(value), np.max(value)
|
| 268 |
+
raise ValueError(
|
| 269 |
+
f"Value for '{key}' with dtype {dtype}, shape {shape}, "
|
| 270 |
+
f"lowest {lowest}, highest {highest} is not in {space}.")
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class DiscretizeAction(Wrapper):
|
| 274 |
+
|
| 275 |
+
def __init__(self, env, key='action', bins=5):
|
| 276 |
+
super().__init__(env)
|
| 277 |
+
self._dims = np.squeeze(env.act_space[key].shape, 0).item()
|
| 278 |
+
self._values = np.linspace(-1, 1, bins)
|
| 279 |
+
self._key = key
|
| 280 |
+
|
| 281 |
+
@functools.cached_property
|
| 282 |
+
def act_space(self):
|
| 283 |
+
space = elements.Space(np.int32, self._dims, 0, len(self._values))
|
| 284 |
+
return {**self.env.act_space, self._key: space}
|
| 285 |
+
|
| 286 |
+
def step(self, action):
|
| 287 |
+
continuous = np.take(self._values, action[self._key])
|
| 288 |
+
return self.env.step({**action, self._key: continuous})
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class ResizeImage(Wrapper):
|
| 292 |
+
|
| 293 |
+
def __init__(self, env, size=(64, 64)):
|
| 294 |
+
super().__init__(env)
|
| 295 |
+
self._size = size
|
| 296 |
+
self._keys = [
|
| 297 |
+
k for k, v in env.obs_space.items()
|
| 298 |
+
if len(v.shape) > 1 and v.shape[:2] != size]
|
| 299 |
+
print(f'Resizing keys {",".join(self._keys)} to {self._size}.')
|
| 300 |
+
if self._keys:
|
| 301 |
+
from PIL import Image
|
| 302 |
+
self._Image = Image
|
| 303 |
+
|
| 304 |
+
@functools.cached_property
|
| 305 |
+
def obs_space(self):
|
| 306 |
+
spaces = self.env.obs_space
|
| 307 |
+
for key in self._keys:
|
| 308 |
+
shape = self._size + spaces[key].shape[2:]
|
| 309 |
+
spaces[key] = elements.Space(np.uint8, shape)
|
| 310 |
+
return spaces
|
| 311 |
+
|
| 312 |
+
def step(self, action):
|
| 313 |
+
obs = self.env.step(action)
|
| 314 |
+
for key in self._keys:
|
| 315 |
+
obs[key] = self._resize(obs[key])
|
| 316 |
+
return obs
|
| 317 |
+
|
| 318 |
+
def _resize(self, image):
|
| 319 |
+
image = self._Image.fromarray(image)
|
| 320 |
+
image = image.resize(self._size, self._Image.NEAREST)
|
| 321 |
+
image = np.array(image)
|
| 322 |
+
return image
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
# class RenderImage(Wrapper):
|
| 326 |
+
#
|
| 327 |
+
# def __init__(self, env, key='image'):
|
| 328 |
+
# super().__init__(env)
|
| 329 |
+
# self._key = key
|
| 330 |
+
# self._shape = self.env.render().shape
|
| 331 |
+
#
|
| 332 |
+
# @functools.cached_property
|
| 333 |
+
# def obs_space(self):
|
| 334 |
+
# spaces = self.env.obs_space
|
| 335 |
+
# spaces[self._key] = elements.Space(np.uint8, self._shape)
|
| 336 |
+
# return spaces
|
| 337 |
+
#
|
| 338 |
+
# def step(self, action):
|
| 339 |
+
# obs = self.env.step(action)
|
| 340 |
+
# obs[self._key] = self.env.render()
|
| 341 |
+
# return obs
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
class BackwardReturn(Wrapper):
|
| 345 |
+
|
| 346 |
+
def __init__(self, env, horizon):
|
| 347 |
+
super().__init__(env)
|
| 348 |
+
self._discount = 1 - 1 / horizon
|
| 349 |
+
self._bwreturn = 0.0
|
| 350 |
+
|
| 351 |
+
@functools.cached_property
|
| 352 |
+
def obs_space(self):
|
| 353 |
+
return {
|
| 354 |
+
**self.env.obs_space,
|
| 355 |
+
'bwreturn': elements.Space(np.float32),
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
def step(self, action):
|
| 359 |
+
obs = self.env.step(action)
|
| 360 |
+
self._bwreturn *= (1 - obs['is_first']) * self._discount
|
| 361 |
+
self._bwreturn += obs['reward']
|
| 362 |
+
obs['bwreturn'] = np.float32(self._bwreturn)
|
| 363 |
+
return obs
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
class AddObs(Wrapper):
|
| 367 |
+
|
| 368 |
+
def __init__(self, env, key, value, space):
|
| 369 |
+
super().__init__(env)
|
| 370 |
+
self._key = key
|
| 371 |
+
self._value = value
|
| 372 |
+
self._space = space
|
| 373 |
+
|
| 374 |
+
@functools.cached_property
|
| 375 |
+
def obs_space(self):
|
| 376 |
+
return {
|
| 377 |
+
**self.env.obs_space,
|
| 378 |
+
self._key: self._space,
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
def step(self, action):
|
| 382 |
+
obs = self.env.step(action)
|
| 383 |
+
obs[self._key] = self._value
|
| 384 |
+
return obs
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class RestartOnException(Wrapper):
|
| 388 |
+
|
| 389 |
+
def __init__(
|
| 390 |
+
self, ctor, exceptions=(Exception,), window=300, maxfails=2, wait=20):
|
| 391 |
+
if not isinstance(exceptions, (tuple, list)):
|
| 392 |
+
exceptions = [exceptions]
|
| 393 |
+
self._ctor = ctor
|
| 394 |
+
self._exceptions = tuple(exceptions)
|
| 395 |
+
self._window = window
|
| 396 |
+
self._maxfails = maxfails
|
| 397 |
+
self._wait = wait
|
| 398 |
+
self._last = time.time()
|
| 399 |
+
self._fails = 0
|
| 400 |
+
super().__init__(self._ctor())
|
| 401 |
+
|
| 402 |
+
def step(self, action):
|
| 403 |
+
try:
|
| 404 |
+
return self.env.step(action)
|
| 405 |
+
except self._exceptions as e:
|
| 406 |
+
if time.time() > self._last + self._window:
|
| 407 |
+
self._last = time.time()
|
| 408 |
+
self._fails = 1
|
| 409 |
+
else:
|
| 410 |
+
self._fails += 1
|
| 411 |
+
if self._fails > self._maxfails:
|
| 412 |
+
raise RuntimeError('The env crashed too many times.')
|
| 413 |
+
message = f'Restarting env after crash with {type(e).__name__}: {e}'
|
| 414 |
+
print(message, flush=True)
|
| 415 |
+
time.sleep(self._wait)
|
| 416 |
+
self.env = self._ctor()
|
| 417 |
+
action['reset'] = np.ones_like(action['reset'])
|
| 418 |
+
return self.env.step(action)
|
models/embodied/envs/atari.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import threading
|
| 3 |
+
import collections
|
| 4 |
+
|
| 5 |
+
import ale_py
|
| 6 |
+
import ale_py.roms as roms
|
| 7 |
+
import elements
|
| 8 |
+
import embodied
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from PIL import Image
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Atari(embodied.Env):
|
| 15 |
+
|
| 16 |
+
LOCK = threading.Lock()
|
| 17 |
+
WEIGHTS = np.array([0.299, 0.587, 1 - (0.299 + 0.587)])
|
| 18 |
+
ACTION_MEANING = (
|
| 19 |
+
'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',
|
| 20 |
+
'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',
|
| 21 |
+
'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE')
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self, name, repeat=4, size=(84, 84), gray=True, noops=0, lives='unused',
|
| 25 |
+
sticky=True, actions='all', length=108000, pooling=2, aggregate='max',
|
| 26 |
+
resize='pillow', autostart=False, clip_reward=False, seed=None):
|
| 27 |
+
|
| 28 |
+
assert lives in ('unused', 'discount', 'reset'), lives
|
| 29 |
+
assert actions in ('all', 'needed'), actions
|
| 30 |
+
assert resize in ('opencv', 'pillow'), resize
|
| 31 |
+
assert aggregate in ('max', 'mean'), aggregate
|
| 32 |
+
assert pooling >= 1, pooling
|
| 33 |
+
assert repeat >= 1, repeat
|
| 34 |
+
if name == 'james_bond':
|
| 35 |
+
name = 'jamesbond'
|
| 36 |
+
|
| 37 |
+
self.repeat = repeat
|
| 38 |
+
self.size = size
|
| 39 |
+
self.gray = gray
|
| 40 |
+
self.noops = noops
|
| 41 |
+
self.lives = lives
|
| 42 |
+
self.sticky = sticky
|
| 43 |
+
self.length = length
|
| 44 |
+
self.pooling = pooling
|
| 45 |
+
self.aggregate = aggregate
|
| 46 |
+
self.resize = resize
|
| 47 |
+
self.autostart = autostart
|
| 48 |
+
self.clip_reward = clip_reward
|
| 49 |
+
self.rng = np.random.default_rng(seed)
|
| 50 |
+
|
| 51 |
+
with self.LOCK:
|
| 52 |
+
self.ale = ale_py.ALEInterface()
|
| 53 |
+
self.ale.setLoggerMode(ale_py.LoggerMode.Error)
|
| 54 |
+
self.ale.setInt(b'random_seed', self.rng.integers(0, 2 ** 31))
|
| 55 |
+
path = os.environ.get('ALE_ROM_PATH', None)
|
| 56 |
+
if path:
|
| 57 |
+
self.ale.loadROM(os.path.join(path, f'{name}.bin'))
|
| 58 |
+
else:
|
| 59 |
+
self.ale.loadROM(roms.get_rom_path(name))
|
| 60 |
+
|
| 61 |
+
self.ale.setFloat('repeat_action_probability', 0.25 if sticky else 0.0)
|
| 62 |
+
self.actionset = {
|
| 63 |
+
'all': self.ale.getLegalActionSet,
|
| 64 |
+
'needed': self.ale.getMinimalActionSet,
|
| 65 |
+
}[actions]()
|
| 66 |
+
|
| 67 |
+
W, H = self.ale.getScreenDims()
|
| 68 |
+
self.buffers = collections.deque(
|
| 69 |
+
[np.zeros((W, H, 3), np.uint8) for _ in range(self.pooling)],
|
| 70 |
+
maxlen=self.pooling)
|
| 71 |
+
self.prevlives = None
|
| 72 |
+
self.duration = None
|
| 73 |
+
self.done = True
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def obs_space(self):
|
| 77 |
+
return {
|
| 78 |
+
'image': elements.Space(np.uint8, (*self.size, 1 if self.gray else 3)),
|
| 79 |
+
'reward': elements.Space(np.float32),
|
| 80 |
+
'is_first': elements.Space(bool),
|
| 81 |
+
'is_last': elements.Space(bool),
|
| 82 |
+
'is_terminal': elements.Space(bool),
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def act_space(self):
|
| 87 |
+
return {
|
| 88 |
+
'action': elements.Space(np.int32, (), 0, len(self.actionset)),
|
| 89 |
+
'reset': elements.Space(bool),
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def step(self, action):
|
| 93 |
+
if action['reset'] or self.done:
|
| 94 |
+
self._reset()
|
| 95 |
+
self.prevlives = self.ale.lives()
|
| 96 |
+
self.duration = 0
|
| 97 |
+
self.done = False
|
| 98 |
+
return self._obs(0.0, is_first=True)
|
| 99 |
+
reward = 0.0
|
| 100 |
+
terminal = False
|
| 101 |
+
last = False
|
| 102 |
+
assert 0 <= action['action'] < len(self.actionset), action['action']
|
| 103 |
+
act = self.actionset[action['action']]
|
| 104 |
+
for repeat in range(self.repeat):
|
| 105 |
+
reward += self.ale.act(act)
|
| 106 |
+
self.duration += 1
|
| 107 |
+
if repeat >= self.repeat - self.pooling:
|
| 108 |
+
self._render()
|
| 109 |
+
if self.ale.game_over():
|
| 110 |
+
terminal = True
|
| 111 |
+
last = True
|
| 112 |
+
if self.duration >= self.length:
|
| 113 |
+
last = True
|
| 114 |
+
lives = self.ale.lives()
|
| 115 |
+
if self.lives == 'discount' and 0 < lives < self.prevlives:
|
| 116 |
+
terminal = True
|
| 117 |
+
if self.lives == 'reset' and 0 < lives < self.prevlives:
|
| 118 |
+
terminal = True
|
| 119 |
+
last = True
|
| 120 |
+
self.prevlives = lives
|
| 121 |
+
if terminal or last:
|
| 122 |
+
break
|
| 123 |
+
self.done = last
|
| 124 |
+
obs = self._obs(reward, is_last=last, is_terminal=terminal)
|
| 125 |
+
return obs
|
| 126 |
+
|
| 127 |
+
def _reset(self):
|
| 128 |
+
with self.LOCK:
|
| 129 |
+
self.ale.reset_game()
|
| 130 |
+
for _ in range(self.rng.integers(self.noops + 1)):
|
| 131 |
+
self.ale.act(self.ACTION_MEANING.index('NOOP'))
|
| 132 |
+
if self.ale.game_over():
|
| 133 |
+
with self.LOCK:
|
| 134 |
+
self.ale.reset_game()
|
| 135 |
+
if self.autostart and self.ACTION_MEANING.index('FIRE') in self.actionset:
|
| 136 |
+
self.ale.act(self.ACTION_MEANING.index('FIRE'))
|
| 137 |
+
if self.ale.game_over():
|
| 138 |
+
with self.LOCK:
|
| 139 |
+
self.ale.reset_game()
|
| 140 |
+
self.ale.act(self.ACTION_MEANING.index('UP'))
|
| 141 |
+
if self.ale.game_over():
|
| 142 |
+
with self.LOCK:
|
| 143 |
+
self.ale.reset_game()
|
| 144 |
+
self._render()
|
| 145 |
+
for i, dst in enumerate(self.buffers):
|
| 146 |
+
if i > 0:
|
| 147 |
+
np.copyto(self.buffers[0], dst)
|
| 148 |
+
|
| 149 |
+
def _render(self, reset=False):
|
| 150 |
+
self.buffers.appendleft(self.buffers.pop())
|
| 151 |
+
self.ale.getScreenRGB(self.buffers[0])
|
| 152 |
+
|
| 153 |
+
def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
|
| 154 |
+
if self.clip_reward:
|
| 155 |
+
reward = np.sign(reward)
|
| 156 |
+
if self.aggregate == 'max':
|
| 157 |
+
image = np.amax(self.buffers, 0)
|
| 158 |
+
elif self.aggregate == 'mean':
|
| 159 |
+
image = np.mean(self.buffers, 0).astype(np.uint8)
|
| 160 |
+
if self.resize == 'opencv':
|
| 161 |
+
import cv2
|
| 162 |
+
image = cv2.resize(image, self.size, interpolation=cv2.INTER_AREA)
|
| 163 |
+
elif self.resize == 'pillow':
|
| 164 |
+
image = Image.fromarray(image)
|
| 165 |
+
image = image.resize(self.size, Image.BILINEAR)
|
| 166 |
+
image = np.array(image)
|
| 167 |
+
if self.gray:
|
| 168 |
+
# Averaging channels equally would not work. For example, a fully red
|
| 169 |
+
# object on a fully green background would average to the same color.
|
| 170 |
+
image = (image * self.WEIGHTS).sum(-1).astype(image.dtype)[:, :, None]
|
| 171 |
+
return dict(
|
| 172 |
+
image=image,
|
| 173 |
+
reward=np.float32(reward),
|
| 174 |
+
is_first=is_first,
|
| 175 |
+
is_last=is_last,
|
| 176 |
+
is_terminal=is_last,
|
| 177 |
+
)
|
models/embodied/envs/bsuite.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
|
| 3 |
+
import embodied
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BSuite(embodied.Env):
|
| 8 |
+
|
| 9 |
+
def __init__(self, task):
|
| 10 |
+
print(
|
| 11 |
+
'Warning: BSuite result logging is stateful and therefore training ' +
|
| 12 |
+
'runs cannot be interrupted or restarted.')
|
| 13 |
+
np.int = int # Patch deprecated Numpy alias used inside BSuite.
|
| 14 |
+
from . import from_dm
|
| 15 |
+
if '/' not in task:
|
| 16 |
+
task = f'{task}/0'
|
| 17 |
+
import bsuite
|
| 18 |
+
env = bsuite.from_checkpoint_id(task)
|
| 19 |
+
self.num_episodes = 0
|
| 20 |
+
self.max_episodes = env.bsuite_num_episodes
|
| 21 |
+
self.exit_after = None
|
| 22 |
+
env = from_dm.FromDM(env)
|
| 23 |
+
env = embodied.wrappers.ForceDtypes(env)
|
| 24 |
+
env = embodied.wrappers.FlattenTwoDimObs(env)
|
| 25 |
+
self.env = env
|
| 26 |
+
|
| 27 |
+
@property
|
| 28 |
+
def obs_space(self):
|
| 29 |
+
return self.env.obs_space
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def act_space(self):
|
| 33 |
+
return self.env.act_space
|
| 34 |
+
|
| 35 |
+
def step(self, action):
|
| 36 |
+
obs = self.env.step(action)
|
| 37 |
+
if obs['is_last']:
|
| 38 |
+
self.num_episodes += 1
|
| 39 |
+
if self.num_episodes >= self.max_episodes:
|
| 40 |
+
# After reaching the target number of episodes, continue running for 10
|
| 41 |
+
# minutes to make sure logs are flushed and then raise an exception to
|
| 42 |
+
# terminate the program.
|
| 43 |
+
if not self.exit_after:
|
| 44 |
+
self.exit_after = time.time() + 600
|
| 45 |
+
if time.time() > self.exit_after:
|
| 46 |
+
if self.xm:
|
| 47 |
+
wu = self.xm.get_current_work_unit()
|
| 48 |
+
wu.stop(mark_as_completed=True, message='BSuite run complete')
|
| 49 |
+
else:
|
| 50 |
+
raise RuntimeError('BSuite run complete')
|
| 51 |
+
return obs
|
models/embodied/envs/crafter.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import crafter
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Crafter(embodied.Env):
|
| 10 |
+
|
| 11 |
+
def __init__(self, task, size=(64, 64), logs=False, logdir=None, seed=None):
|
| 12 |
+
assert task in ('reward', 'noreward')
|
| 13 |
+
self._env = crafter.Env(size=size, reward=(task == 'reward'), seed=seed)
|
| 14 |
+
self._logs = logs
|
| 15 |
+
self._logdir = logdir and elements.Path(logdir)
|
| 16 |
+
self._logdir and self._logdir.mkdir()
|
| 17 |
+
self._episode = 0
|
| 18 |
+
self._length = None
|
| 19 |
+
self._reward = None
|
| 20 |
+
self._achievements = crafter.constants.achievements.copy()
|
| 21 |
+
self._done = True
|
| 22 |
+
|
| 23 |
+
@property
|
| 24 |
+
def obs_space(self):
|
| 25 |
+
spaces = {
|
| 26 |
+
'image': elements.Space(np.uint8, self._env.observation_space.shape),
|
| 27 |
+
'reward': elements.Space(np.float32),
|
| 28 |
+
'is_first': elements.Space(bool),
|
| 29 |
+
'is_last': elements.Space(bool),
|
| 30 |
+
'is_terminal': elements.Space(bool),
|
| 31 |
+
'log/reward': elements.Space(np.float32),
|
| 32 |
+
}
|
| 33 |
+
if self._logs:
|
| 34 |
+
spaces.update({
|
| 35 |
+
f'log/achievement_{k}': elements.Space(np.int32)
|
| 36 |
+
for k in self._achievements})
|
| 37 |
+
return spaces
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def act_space(self):
|
| 41 |
+
return {
|
| 42 |
+
'action': elements.Space(np.int32, (), 0, self._env.action_space.n),
|
| 43 |
+
'reset': elements.Space(bool),
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
def step(self, action):
|
| 47 |
+
if action['reset'] or self._done:
|
| 48 |
+
self._episode += 1
|
| 49 |
+
self._length = 0
|
| 50 |
+
self._reward = 0
|
| 51 |
+
self._done = False
|
| 52 |
+
image = self._env.reset()
|
| 53 |
+
return self._obs(image, 0.0, {}, is_first=True)
|
| 54 |
+
image, reward, self._done, info = self._env.step(action['action'])
|
| 55 |
+
self._reward += reward
|
| 56 |
+
self._length += 1
|
| 57 |
+
if self._done and self._logdir:
|
| 58 |
+
self._write_stats(self._length, self._reward, info)
|
| 59 |
+
return self._obs(
|
| 60 |
+
image, reward, info,
|
| 61 |
+
is_last=self._done,
|
| 62 |
+
is_terminal=info['discount'] == 0)
|
| 63 |
+
|
| 64 |
+
def _obs(
|
| 65 |
+
self, image, reward, info,
|
| 66 |
+
is_first=False, is_last=False, is_terminal=False):
|
| 67 |
+
obs = dict(
|
| 68 |
+
image=image,
|
| 69 |
+
reward=np.float32(reward),
|
| 70 |
+
is_first=is_first,
|
| 71 |
+
is_last=is_last,
|
| 72 |
+
is_terminal=is_terminal,
|
| 73 |
+
**{'log/reward': np.float32(info['reward'] if info else 0.0)},
|
| 74 |
+
)
|
| 75 |
+
if self._logs:
|
| 76 |
+
log_achievements = {
|
| 77 |
+
f'log/achievement_{k}': info['achievements'][k] if info else 0
|
| 78 |
+
for k in self._achievements}
|
| 79 |
+
obs.update({k: np.int32(v) for k, v in log_achievements.items()})
|
| 80 |
+
return obs
|
| 81 |
+
|
| 82 |
+
def _write_stats(self, length, reward, info):
|
| 83 |
+
stats = {
|
| 84 |
+
'episode': self._episode,
|
| 85 |
+
'length': length,
|
| 86 |
+
'reward': round(reward, 1),
|
| 87 |
+
**{f'achievement_{k}': v for k, v in info['achievements'].items()},
|
| 88 |
+
}
|
| 89 |
+
filename = self._logdir / 'stats.jsonl'
|
| 90 |
+
lines = filename.read() if filename.exists() else ''
|
| 91 |
+
lines += json.dumps(stats) + '\n'
|
| 92 |
+
filename.write(lines, mode='w')
|
| 93 |
+
print(f'Wrote stats: {filename}')
|
models/embodied/envs/dmc.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
from dm_control import manipulation
|
| 8 |
+
from dm_control import suite
|
| 9 |
+
from dm_control.locomotion.examples import basic_rodent_2020
|
| 10 |
+
|
| 11 |
+
from . import from_dm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class DMC(embodied.Env):
|
| 15 |
+
|
| 16 |
+
DEFAULT_CAMERAS = dict(
|
| 17 |
+
quadruped=2,
|
| 18 |
+
rodent=4,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
def __init__(
|
| 22 |
+
self, env, repeat=1, size=(64, 64), proprio=True, image=True, camera=-1):
|
| 23 |
+
if 'MUJOCO_GL' not in os.environ:
|
| 24 |
+
os.environ['MUJOCO_GL'] = 'egl'
|
| 25 |
+
if isinstance(env, str):
|
| 26 |
+
domain, task = env.split('_', 1)
|
| 27 |
+
if camera == -1:
|
| 28 |
+
camera = self.DEFAULT_CAMERAS.get(domain, 0)
|
| 29 |
+
if domain == 'cup': # Only domain with multiple words.
|
| 30 |
+
domain = 'ball_in_cup'
|
| 31 |
+
if domain == 'manip':
|
| 32 |
+
env = manipulation.load(task + '_vision')
|
| 33 |
+
elif domain == 'rodent':
|
| 34 |
+
# camera 0: topdown map
|
| 35 |
+
# camera 2: shoulder
|
| 36 |
+
# camera 4: topdown tracking
|
| 37 |
+
# camera 5: eyes
|
| 38 |
+
env = getattr(basic_rodent_2020, task)()
|
| 39 |
+
else:
|
| 40 |
+
env = suite.load(domain, task)
|
| 41 |
+
self._dmenv = env
|
| 42 |
+
self._env = from_dm.FromDM(self._dmenv)
|
| 43 |
+
self._env = embodied.wrappers.ActionRepeat(self._env, repeat)
|
| 44 |
+
self._size = size
|
| 45 |
+
self._proprio = proprio
|
| 46 |
+
self._image = image
|
| 47 |
+
self._camera = camera
|
| 48 |
+
|
| 49 |
+
@functools.cached_property
|
| 50 |
+
def obs_space(self):
|
| 51 |
+
basic = ('is_first', 'is_last', 'is_terminal', 'reward')
|
| 52 |
+
spaces = self._env.obs_space.copy()
|
| 53 |
+
if not self._proprio:
|
| 54 |
+
spaces = {k: spaces[k] for k in basic}
|
| 55 |
+
key = 'image' if self._image else 'log/image'
|
| 56 |
+
spaces[key] = elements.Space(np.uint8, self._size + (3,))
|
| 57 |
+
return spaces
|
| 58 |
+
|
| 59 |
+
@functools.cached_property
|
| 60 |
+
def act_space(self):
|
| 61 |
+
return self._env.act_space
|
| 62 |
+
|
| 63 |
+
def step(self, action):
|
| 64 |
+
for key, space in self.act_space.items():
|
| 65 |
+
if not space.discrete:
|
| 66 |
+
assert np.isfinite(action[key]).all(), (key, action[key])
|
| 67 |
+
obs = self._env.step(action)
|
| 68 |
+
basic = ('is_first', 'is_last', 'is_terminal', 'reward')
|
| 69 |
+
if not self._proprio:
|
| 70 |
+
obs = {k: obs[k] for k in basic}
|
| 71 |
+
key = 'image' if self._image else 'log/image'
|
| 72 |
+
obs[key] = self._dmenv.physics.render(*self._size, camera_id=self._camera)
|
| 73 |
+
for key, space in self.obs_space.items():
|
| 74 |
+
if np.issubdtype(space.dtype, np.floating):
|
| 75 |
+
assert np.isfinite(obs[key]).all(), (key, obs[key])
|
| 76 |
+
return obs
|
models/embodied/envs/dmlab.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import re
|
| 3 |
+
import zlib
|
| 4 |
+
|
| 5 |
+
import deepmind_lab
|
| 6 |
+
import elements
|
| 7 |
+
import embodied
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class DMLab(embodied.Env):
|
| 12 |
+
|
| 13 |
+
TOKENIZER = re.compile(r'([A-Za-z_]+|[^A-Za-z_ ]+)')
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self, level, repeat=4, size=(64, 64), mode='train',
|
| 17 |
+
actions='popart', episodic=True, text=None, seed=None):
|
| 18 |
+
if level == 'goals': # Shortcut for convenience
|
| 19 |
+
level = 'dmlab_explore_goal_locations_small'
|
| 20 |
+
self._size = size
|
| 21 |
+
self._repeat = repeat
|
| 22 |
+
self._actions = {
|
| 23 |
+
'impala': IMPALA_ACTION_SET,
|
| 24 |
+
'popart': POPART_ACTION_SET,
|
| 25 |
+
}[actions]
|
| 26 |
+
if text is None:
|
| 27 |
+
text = bool(level.startswith('language'))
|
| 28 |
+
self._episodic = episodic
|
| 29 |
+
self._text = text
|
| 30 |
+
self._random = np.random.RandomState(seed)
|
| 31 |
+
config = dict(height=size[0], width=size[1], logLevel='WARN')
|
| 32 |
+
if mode == 'train':
|
| 33 |
+
if level.endswith('_test'):
|
| 34 |
+
level = level.replace('_test', '_train')
|
| 35 |
+
elif mode == 'eval':
|
| 36 |
+
config.update(allowHoldOutLevels='true', mixerSeed=0x600D5EED)
|
| 37 |
+
else:
|
| 38 |
+
raise NotImplementedError(mode)
|
| 39 |
+
config = {k: str(v) for k, v in config.items()}
|
| 40 |
+
obs = ['RGB_INTERLEAVED', 'INSTR'] if text else ['RGB_INTERLEAVED']
|
| 41 |
+
self._env = deepmind_lab.Lab(
|
| 42 |
+
level='contributed/dmlab30/' + level,
|
| 43 |
+
observations=obs, config=config)
|
| 44 |
+
self._current_image = None
|
| 45 |
+
if self._text:
|
| 46 |
+
self._current_instr = None
|
| 47 |
+
self._instr_length = 32
|
| 48 |
+
self._embed_size = 32
|
| 49 |
+
self._vocab_buckets = 64 * 1024
|
| 50 |
+
self._embeddings = np.random.default_rng(seed=0).normal(
|
| 51 |
+
0.0, 1.0, (self._vocab_buckets, self._embed_size)).astype(np.float32)
|
| 52 |
+
self._done = True
|
| 53 |
+
|
| 54 |
+
@property
|
| 55 |
+
def obs_space(self):
|
| 56 |
+
spaces = {
|
| 57 |
+
'image': elements.Space(np.uint8, self._size + (3,)),
|
| 58 |
+
'reward': elements.Space(np.float32),
|
| 59 |
+
'is_first': elements.Space(bool),
|
| 60 |
+
'is_last': elements.Space(bool),
|
| 61 |
+
'is_terminal': elements.Space(bool),
|
| 62 |
+
}
|
| 63 |
+
if self._text:
|
| 64 |
+
spaces['instr'] = elements.Space(
|
| 65 |
+
np.float32, self._instr_length * self._embed_size)
|
| 66 |
+
return spaces
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def act_space(self):
|
| 70 |
+
return {
|
| 71 |
+
'action': elements.Space(np.int32, (), 0, len(self._actions)),
|
| 72 |
+
'reset': elements.Space(bool),
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
def step(self, action):
|
| 76 |
+
if action['reset'] or self._done:
|
| 77 |
+
self._env.reset(seed=self._random.randint(0, 2 ** 31 - 1))
|
| 78 |
+
self._done = False
|
| 79 |
+
return self._obs(0.0, is_first=True)
|
| 80 |
+
raw_action = np.array(self._actions[action['action']], np.intc)
|
| 81 |
+
reward = self._env.step(raw_action, num_steps=self._repeat)
|
| 82 |
+
self._done = not self._env.is_running()
|
| 83 |
+
return self._obs(reward, is_last=self._done)
|
| 84 |
+
|
| 85 |
+
def _obs(self, reward, is_first=False, is_last=False):
|
| 86 |
+
if not self._done:
|
| 87 |
+
self._current_image = self._env.observations()['RGB_INTERLEAVED']
|
| 88 |
+
if self._text:
|
| 89 |
+
self._current_instr = self._embed(self._env.observations()['INSTR'])
|
| 90 |
+
obs = dict(
|
| 91 |
+
image=self._current_image,
|
| 92 |
+
reward=np.float32(reward),
|
| 93 |
+
is_first=is_first,
|
| 94 |
+
is_last=is_last,
|
| 95 |
+
is_terminal=is_last if self._episodic else False,
|
| 96 |
+
)
|
| 97 |
+
if self._text:
|
| 98 |
+
obs['instr'] = self._current_instr
|
| 99 |
+
return obs
|
| 100 |
+
|
| 101 |
+
def _embed(self, text):
|
| 102 |
+
tokens = self.TOKENIZER.findall(text.lower())
|
| 103 |
+
indices = [self._hash(token) for token in tokens]
|
| 104 |
+
# print('EMBED', text, '->', tokens, '->', indices)
|
| 105 |
+
indices = indices + [0] * (self._instr_length - len(indices))
|
| 106 |
+
embeddings = [self._embeddings[i] for i in indices]
|
| 107 |
+
return np.concatenate(embeddings)
|
| 108 |
+
|
| 109 |
+
@functools.cache
|
| 110 |
+
def _hash(self, token):
|
| 111 |
+
return zlib.crc32(token.encode('utf-8')) % self._vocab_buckets
|
| 112 |
+
|
| 113 |
+
def close(self):
|
| 114 |
+
self._env.close()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# Small action set used by IMPALA.
|
| 118 |
+
IMPALA_ACTION_SET = (
|
| 119 |
+
( 0, 0, 0, 1, 0, 0, 0), # Forward
|
| 120 |
+
( 0, 0, 0, -1, 0, 0, 0), # Backward
|
| 121 |
+
( 0, 0, -1, 0, 0, 0, 0), # Strafe Left
|
| 122 |
+
( 0, 0, 1, 0, 0, 0, 0), # Strafe Right
|
| 123 |
+
(-20, 0, 0, 0, 0, 0, 0), # Look Left
|
| 124 |
+
( 20, 0, 0, 0, 0, 0, 0), # Look Right
|
| 125 |
+
(-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
|
| 126 |
+
( 20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
|
| 127 |
+
( 0, 0, 0, 0, 1, 0, 0), # Fire
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# Large action set used by PopArt and R2D2.
|
| 131 |
+
POPART_ACTION_SET = [
|
| 132 |
+
( 0, 0, 0, 1, 0, 0, 0), # FW
|
| 133 |
+
( 0, 0, 0, -1, 0, 0, 0), # BW
|
| 134 |
+
( 0, 0, -1, 0, 0, 0, 0), # Strafe Left
|
| 135 |
+
( 0, 0, 1, 0, 0, 0, 0), # Strafe Right
|
| 136 |
+
(-10, 0, 0, 0, 0, 0, 0), # Small LL
|
| 137 |
+
( 10, 0, 0, 0, 0, 0, 0), # Small LR
|
| 138 |
+
(-60, 0, 0, 0, 0, 0, 0), # Large LL
|
| 139 |
+
( 60, 0, 0, 0, 0, 0, 0), # Large LR
|
| 140 |
+
( 0, 10, 0, 0, 0, 0, 0), # Look Down
|
| 141 |
+
( 0, -10, 0, 0, 0, 0, 0), # Look Up
|
| 142 |
+
(-10, 0, 0, 1, 0, 0, 0), # FW + Small LL
|
| 143 |
+
( 10, 0, 0, 1, 0, 0, 0), # FW + Small LR
|
| 144 |
+
(-60, 0, 0, 1, 0, 0, 0), # FW + Large LL
|
| 145 |
+
( 60, 0, 0, 1, 0, 0, 0), # FW + Large LR
|
| 146 |
+
( 0, 0, 0, 0, 1, 0, 0), # Fire
|
| 147 |
+
]
|
models/embodied/envs/dummy.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import elements
|
| 2 |
+
import embodied
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Dummy(embodied.Env):
|
| 7 |
+
|
| 8 |
+
def __init__(self, task, size=(64, 64), length=100):
|
| 9 |
+
del task
|
| 10 |
+
self.size = size
|
| 11 |
+
self.length = length
|
| 12 |
+
self.count = 0
|
| 13 |
+
self.done = False
|
| 14 |
+
|
| 15 |
+
@property
|
| 16 |
+
def obs_space(self):
|
| 17 |
+
return {
|
| 18 |
+
'image': elements.Space(np.uint8, self.size + (3,)),
|
| 19 |
+
'vector': elements.Space(np.float32, (7,)),
|
| 20 |
+
'token': elements.Space(np.int32, (), 0, 256),
|
| 21 |
+
'count': elements.Space(np.float32, (), 0, self.length),
|
| 22 |
+
'float2d': elements.Space(np.float32, (4, 5)),
|
| 23 |
+
'int2d': elements.Space(np.int32, (2, 3), 0, 4),
|
| 24 |
+
'reward': elements.Space(np.float32),
|
| 25 |
+
'is_first': elements.Space(bool),
|
| 26 |
+
'is_last': elements.Space(bool),
|
| 27 |
+
'is_terminal': elements.Space(bool),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def act_space(self):
|
| 32 |
+
return {
|
| 33 |
+
'reset': elements.Space(bool),
|
| 34 |
+
'act_disc': elements.Space(np.int32, (), 0, 5),
|
| 35 |
+
'act_cont': elements.Space(np.float32, (6,)),
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
def step(self, action):
|
| 39 |
+
if action.pop('reset') or self.done:
|
| 40 |
+
self.count = 0
|
| 41 |
+
self.done = False
|
| 42 |
+
return self._obs(0, is_first=True)
|
| 43 |
+
self.count += 1
|
| 44 |
+
self.done = (self.count >= self.length)
|
| 45 |
+
return self._obs(1, is_last=self.done, is_terminal=self.done)
|
| 46 |
+
|
| 47 |
+
def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
|
| 48 |
+
return dict(
|
| 49 |
+
image=np.full(self.size + (3,), 255, np.uint8),
|
| 50 |
+
vector=np.zeros(7, np.float32),
|
| 51 |
+
token=np.zeros((), np.int32),
|
| 52 |
+
count=np.float32(self.count),
|
| 53 |
+
float2d=np.ones((4, 5), np.float32),
|
| 54 |
+
int2d=np.ones((2, 3), np.int32),
|
| 55 |
+
reward=np.float32(reward),
|
| 56 |
+
is_first=is_first,
|
| 57 |
+
is_last=is_last,
|
| 58 |
+
is_terminal=is_terminal,
|
| 59 |
+
)
|
models/embodied/envs/from_dm.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import elements
|
| 4 |
+
import embodied
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class FromDM(embodied.Env):
|
| 9 |
+
|
| 10 |
+
def __init__(self, env, obs_key='observation', act_key='action'):
|
| 11 |
+
self._env = env
|
| 12 |
+
obs_spec = self._env.observation_spec()
|
| 13 |
+
act_spec = self._env.action_spec()
|
| 14 |
+
self._obs_dict = isinstance(obs_spec, dict)
|
| 15 |
+
self._act_dict = isinstance(act_spec, dict)
|
| 16 |
+
self._obs_key = not self._obs_dict and obs_key
|
| 17 |
+
self._act_key = not self._act_dict and act_key
|
| 18 |
+
self._obs_empty = []
|
| 19 |
+
self._done = True
|
| 20 |
+
|
| 21 |
+
@functools.cached_property
|
| 22 |
+
def obs_space(self):
|
| 23 |
+
spec = self._env.observation_spec()
|
| 24 |
+
spec = spec if self._obs_dict else {self._obs_key: spec}
|
| 25 |
+
if 'reward' in spec:
|
| 26 |
+
spec['obs_reward'] = spec.pop('reward')
|
| 27 |
+
for key, value in spec.copy().items():
|
| 28 |
+
if int(np.prod(value.shape)) == 0:
|
| 29 |
+
self._obs_empty.append(key)
|
| 30 |
+
del spec[key]
|
| 31 |
+
spaces = {
|
| 32 |
+
'reward': elements.Space(np.float32),
|
| 33 |
+
'is_first': elements.Space(bool),
|
| 34 |
+
'is_last': elements.Space(bool),
|
| 35 |
+
'is_terminal': elements.Space(bool),
|
| 36 |
+
}
|
| 37 |
+
for key, value in spec.items():
|
| 38 |
+
key = key.replace('/', '_')
|
| 39 |
+
spaces[key] = self._convert(value)
|
| 40 |
+
return spaces
|
| 41 |
+
|
| 42 |
+
@functools.cached_property
|
| 43 |
+
def act_space(self):
|
| 44 |
+
spec = self._env.action_spec()
|
| 45 |
+
spec = spec if self._act_dict else {self._act_key: spec}
|
| 46 |
+
return {
|
| 47 |
+
'reset': elements.Space(bool),
|
| 48 |
+
**{k or self._act_key: self._convert(v) for k, v in spec.items()},
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
def step(self, action):
|
| 52 |
+
action = action.copy()
|
| 53 |
+
reset = action.pop('reset')
|
| 54 |
+
if reset or self._done:
|
| 55 |
+
time_step = self._env.reset()
|
| 56 |
+
else:
|
| 57 |
+
action = action if self._act_dict else action[self._act_key]
|
| 58 |
+
time_step = self._env.step(action)
|
| 59 |
+
self._done = time_step.last()
|
| 60 |
+
return self._obs(time_step)
|
| 61 |
+
|
| 62 |
+
def _obs(self, time_step):
|
| 63 |
+
if not time_step.first():
|
| 64 |
+
assert time_step.discount in (0, 1), time_step.discount
|
| 65 |
+
obs = time_step.observation
|
| 66 |
+
obs = dict(obs) if self._obs_dict else {self._obs_key: obs}
|
| 67 |
+
if 'reward' in obs:
|
| 68 |
+
obs['obs_reward'] = obs.pop('reward')
|
| 69 |
+
for key in self._obs_empty:
|
| 70 |
+
del obs[key]
|
| 71 |
+
obs = {k.replace('/', '_'): v for k, v in obs.items()}
|
| 72 |
+
return dict(
|
| 73 |
+
reward=np.float32(0.0 if time_step.first() else time_step.reward),
|
| 74 |
+
is_first=time_step.first(),
|
| 75 |
+
is_last=time_step.last(),
|
| 76 |
+
is_terminal=False if time_step.first() else time_step.discount == 0,
|
| 77 |
+
**obs,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def _convert(self, space):
|
| 81 |
+
if hasattr(space, 'num_values'):
|
| 82 |
+
return elements.Space(space.dtype, (), 0, space.num_values)
|
| 83 |
+
elif hasattr(space, 'minimum'):
|
| 84 |
+
assert np.isfinite(space.minimum).all(), space.minimum
|
| 85 |
+
assert np.isfinite(space.maximum).all(), space.maximum
|
| 86 |
+
return elements.Space(
|
| 87 |
+
space.dtype, space.shape, space.minimum, space.maximum)
|
| 88 |
+
else:
|
| 89 |
+
return elements.Space(space.dtype, space.shape, None, None)
|
models/embodied/envs/from_gym.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import elements
|
| 4 |
+
import embodied
|
| 5 |
+
import gym
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FromGym(embodied.Env):
|
| 10 |
+
|
| 11 |
+
def __init__(self, env, obs_key='image', act_key='action', **kwargs):
|
| 12 |
+
if isinstance(env, str):
|
| 13 |
+
self._env = gym.make(env, **kwargs)
|
| 14 |
+
else:
|
| 15 |
+
assert not kwargs, kwargs
|
| 16 |
+
self._env = env
|
| 17 |
+
self._obs_dict = hasattr(self._env.observation_space, 'spaces')
|
| 18 |
+
self._act_dict = hasattr(self._env.action_space, 'spaces')
|
| 19 |
+
self._obs_key = obs_key
|
| 20 |
+
self._act_key = act_key
|
| 21 |
+
self._done = True
|
| 22 |
+
self._info = None
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
def env(self):
|
| 26 |
+
return self._env
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def info(self):
|
| 30 |
+
return self._info
|
| 31 |
+
|
| 32 |
+
@functools.cached_property
|
| 33 |
+
def obs_space(self):
|
| 34 |
+
if self._obs_dict:
|
| 35 |
+
spaces = self._flatten(self._env.observation_space.spaces)
|
| 36 |
+
else:
|
| 37 |
+
spaces = {self._obs_key: self._env.observation_space}
|
| 38 |
+
spaces = {k: self._convert(v) for k, v in spaces.items()}
|
| 39 |
+
return {
|
| 40 |
+
**spaces,
|
| 41 |
+
'reward': elements.Space(np.float32),
|
| 42 |
+
'is_first': elements.Space(bool),
|
| 43 |
+
'is_last': elements.Space(bool),
|
| 44 |
+
'is_terminal': elements.Space(bool),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
@functools.cached_property
|
| 48 |
+
def act_space(self):
|
| 49 |
+
if self._act_dict:
|
| 50 |
+
spaces = self._flatten(self._env.action_space.spaces)
|
| 51 |
+
else:
|
| 52 |
+
spaces = {self._act_key: self._env.action_space}
|
| 53 |
+
spaces = {k: self._convert(v) for k, v in spaces.items()}
|
| 54 |
+
spaces['reset'] = elements.Space(bool)
|
| 55 |
+
return spaces
|
| 56 |
+
|
| 57 |
+
def step(self, action):
|
| 58 |
+
if action['reset'] or self._done:
|
| 59 |
+
self._done = False
|
| 60 |
+
obs = self._env.reset()
|
| 61 |
+
return self._obs(obs, 0.0, is_first=True)
|
| 62 |
+
if self._act_dict:
|
| 63 |
+
action = self._unflatten(action)
|
| 64 |
+
else:
|
| 65 |
+
action = action[self._act_key]
|
| 66 |
+
obs, reward, self._done, self._info = self._env.step(action)
|
| 67 |
+
return self._obs(
|
| 68 |
+
obs, reward,
|
| 69 |
+
is_last=bool(self._done),
|
| 70 |
+
is_terminal=bool(self._info.get('is_terminal', self._done)))
|
| 71 |
+
|
| 72 |
+
def _obs(
|
| 73 |
+
self, obs, reward, is_first=False, is_last=False, is_terminal=False):
|
| 74 |
+
if not self._obs_dict:
|
| 75 |
+
obs = {self._obs_key: obs}
|
| 76 |
+
obs = self._flatten(obs)
|
| 77 |
+
obs = {k: np.asarray(v) for k, v in obs.items()}
|
| 78 |
+
obs.update(
|
| 79 |
+
reward=np.float32(reward),
|
| 80 |
+
is_first=is_first,
|
| 81 |
+
is_last=is_last,
|
| 82 |
+
is_terminal=is_terminal)
|
| 83 |
+
return obs
|
| 84 |
+
|
| 85 |
+
def render(self):
|
| 86 |
+
image = self._env.render('rgb_array')
|
| 87 |
+
assert image is not None
|
| 88 |
+
return image
|
| 89 |
+
|
| 90 |
+
def close(self):
|
| 91 |
+
try:
|
| 92 |
+
self._env.close()
|
| 93 |
+
except Exception:
|
| 94 |
+
pass
|
| 95 |
+
|
| 96 |
+
def _flatten(self, nest, prefix=None):
|
| 97 |
+
result = {}
|
| 98 |
+
for key, value in nest.items():
|
| 99 |
+
key = prefix + '/' + key if prefix else key
|
| 100 |
+
if isinstance(value, gym.spaces.Dict):
|
| 101 |
+
value = value.spaces
|
| 102 |
+
if isinstance(value, dict):
|
| 103 |
+
result.update(self._flatten(value, key))
|
| 104 |
+
else:
|
| 105 |
+
result[key] = value
|
| 106 |
+
return result
|
| 107 |
+
|
| 108 |
+
def _unflatten(self, flat):
|
| 109 |
+
result = {}
|
| 110 |
+
for key, value in flat.items():
|
| 111 |
+
parts = key.split('/')
|
| 112 |
+
node = result
|
| 113 |
+
for part in parts[:-1]:
|
| 114 |
+
if part not in node:
|
| 115 |
+
node[part] = {}
|
| 116 |
+
node = node[part]
|
| 117 |
+
node[parts[-1]] = value
|
| 118 |
+
return result
|
| 119 |
+
|
| 120 |
+
def _convert(self, space):
|
| 121 |
+
if hasattr(space, 'n'):
|
| 122 |
+
return elements.Space(np.int32, (), 0, space.n)
|
| 123 |
+
return elements.Space(space.dtype, space.shape, space.low, space.high)
|
models/embodied/envs/loconav.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import os
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import elements
|
| 6 |
+
import embodied
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class LocoNav(embodied.Env):
|
| 11 |
+
|
| 12 |
+
DEFAULT_CAMERAS = dict(
|
| 13 |
+
ant=4,
|
| 14 |
+
quadruped=5,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self, name, repeat=1, size=(64, 64), camera=-1, again=False,
|
| 19 |
+
termination=False, weaker=1.0):
|
| 20 |
+
if name.endswith('hz'):
|
| 21 |
+
name, freq = name.rsplit('_', 1)
|
| 22 |
+
freq = int(freq.strip('hz'))
|
| 23 |
+
else:
|
| 24 |
+
freq = 50
|
| 25 |
+
if 'MUJOCO_GL' not in os.environ:
|
| 26 |
+
os.environ['MUJOCO_GL'] = 'egl'
|
| 27 |
+
from dm_control import composer
|
| 28 |
+
from dm_control.locomotion.props import target_sphere
|
| 29 |
+
from dm_control.locomotion.tasks import random_goal_maze
|
| 30 |
+
walker, arena = name.split('_', 1)
|
| 31 |
+
if camera == -1:
|
| 32 |
+
camera = self.DEFAULT_CAMERAS.get(walker, 0)
|
| 33 |
+
self._walker = self._make_walker(walker)
|
| 34 |
+
arena = self._make_arena(arena)
|
| 35 |
+
target = target_sphere.TargetSphere(radius=1.2, height_above_ground=0.0)
|
| 36 |
+
task = random_goal_maze.RepeatSingleGoalMaze(
|
| 37 |
+
walker=self._walker, maze_arena=arena, target=target,
|
| 38 |
+
max_repeats=1000 if again else 1,
|
| 39 |
+
randomize_spawn_rotation=True,
|
| 40 |
+
target_reward_scale=1.0,
|
| 41 |
+
aliveness_threshold=-0.5 if termination else -1.0,
|
| 42 |
+
contact_termination=False,
|
| 43 |
+
physics_timestep=min(1 / freq / 4, 0.02),
|
| 44 |
+
control_timestep=1 / freq)
|
| 45 |
+
if not again:
|
| 46 |
+
def after_step(self, physics, random_state):
|
| 47 |
+
super(random_goal_maze.RepeatSingleGoalMaze, self).after_step(
|
| 48 |
+
physics, random_state)
|
| 49 |
+
self._rewarded_this_step = self._target.activated
|
| 50 |
+
self._targets_obtained = int(self._target.activated)
|
| 51 |
+
task.after_step = functools.partial(after_step, task)
|
| 52 |
+
env = composer.Environment(
|
| 53 |
+
time_limit=60, task=task, random_state=None,
|
| 54 |
+
strip_singleton_obs_buffer_dim=True)
|
| 55 |
+
from . import dmc
|
| 56 |
+
self._env = dmc.DMC(env, repeat, size=size, camera=camera, image=False)
|
| 57 |
+
self._visited = None
|
| 58 |
+
self._weaker = weaker
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def obs_space(self):
|
| 62 |
+
spaces = self._env.obs_space.copy()
|
| 63 |
+
spaces['log/coverage'] = elements.Space(np.int32, low=-1)
|
| 64 |
+
return spaces
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def act_space(self):
|
| 68 |
+
return self._env.act_space
|
| 69 |
+
|
| 70 |
+
def step(self, action):
|
| 71 |
+
with warnings.catch_warnings():
|
| 72 |
+
warnings.filterwarnings('ignore', '.*is a deprecated alias for.*')
|
| 73 |
+
action = action.copy()
|
| 74 |
+
action['action'] *= self._weaker
|
| 75 |
+
obs = self._env.step(action)
|
| 76 |
+
if obs['is_first']:
|
| 77 |
+
self._visited = set()
|
| 78 |
+
global_pos = self._walker.get_pose(
|
| 79 |
+
self._env._dmenv._physics)[0].reshape(-1)
|
| 80 |
+
self._visited.add(tuple(np.round(global_pos[:2]).astype(int).tolist()))
|
| 81 |
+
obs['log/coverage'] = np.int32(len(self._visited))
|
| 82 |
+
return obs
|
| 83 |
+
|
| 84 |
+
def _make_walker(self, name):
|
| 85 |
+
if name == 'ant':
|
| 86 |
+
from dm_control.locomotion.walkers import ant
|
| 87 |
+
return ant.Ant()
|
| 88 |
+
elif name == 'quadruped':
|
| 89 |
+
from . import loconav_quadruped
|
| 90 |
+
return loconav_quadruped.Quadruped()
|
| 91 |
+
else:
|
| 92 |
+
raise NotImplementedError(name)
|
| 93 |
+
|
| 94 |
+
def _make_arena(self, name):
|
| 95 |
+
import labmaze
|
| 96 |
+
from dm_control import mjcf
|
| 97 |
+
from dm_control.locomotion.arenas import labmaze_textures
|
| 98 |
+
from dm_control.locomotion.arenas import mazes
|
| 99 |
+
import matplotlib.pyplot as plt
|
| 100 |
+
class WallTexture(labmaze_textures.WallTextures):
|
| 101 |
+
def _build(self, color=[0.8, 0.8, 0.8], model='labmaze_style_01'):
|
| 102 |
+
self._mjcf_root = mjcf.RootElement(model=model)
|
| 103 |
+
self._textures = [self._mjcf_root.asset.add(
|
| 104 |
+
'texture', type='2d', name='wall', builtin='flat',
|
| 105 |
+
rgb1=color, width=100, height=100)]
|
| 106 |
+
wall_textures = {'*': WallTexture([0.8, 0.8, 0.8])}
|
| 107 |
+
cmap = plt.get_cmap('tab10')
|
| 108 |
+
for index in range(9):
|
| 109 |
+
wall_textures[str(index + 1)] = WallTexture(cmap(index)[:3])
|
| 110 |
+
layout = ''.join([
|
| 111 |
+
line[::2].replace('.', ' ') + '\n' for line in MAPS[name]])
|
| 112 |
+
maze = labmaze.FixedMazeWithRandomGoals(
|
| 113 |
+
entity_layer=layout,
|
| 114 |
+
num_spawns=1, num_objects=1, random_state=None)
|
| 115 |
+
arena = mazes.MazeWithTargets(
|
| 116 |
+
maze, xy_scale=1.2, z_height=2.0, aesthetic='default',
|
| 117 |
+
wall_textures=wall_textures, name='maze')
|
| 118 |
+
return arena
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
MAPS = {
|
| 122 |
+
|
| 123 |
+
'maze_s': (
|
| 124 |
+
' 6 6 6 6 6',
|
| 125 |
+
' 6 . . . 6',
|
| 126 |
+
' 6 . G . 6',
|
| 127 |
+
' 6 . . . 6',
|
| 128 |
+
' 5 . . . 4',
|
| 129 |
+
' 5 . . . 4',
|
| 130 |
+
'1 1 1 1 5 5 5 . . . 4',
|
| 131 |
+
'1 . . . . . . . . . 3',
|
| 132 |
+
'1 . P . . . . . . . 3',
|
| 133 |
+
'1 . . . . . . . . . 3',
|
| 134 |
+
'1 1 1 1 2 2 2 3 3 3 3',
|
| 135 |
+
),
|
| 136 |
+
|
| 137 |
+
'maze_m': (
|
| 138 |
+
'6 6 6 6 8 8 8 7 7 7 7',
|
| 139 |
+
'6 . . . . . . . . . 7',
|
| 140 |
+
'6 . G . . . . . . . 7',
|
| 141 |
+
'6 . . . . . . . . . 7',
|
| 142 |
+
'6 6 6 5 5 5 5 . . . 4',
|
| 143 |
+
' 5 . . . 4',
|
| 144 |
+
'1 1 1 1 5 5 5 . . . 4',
|
| 145 |
+
'1 . . . . . . . . . 3',
|
| 146 |
+
'1 . P . . . . . . . 3',
|
| 147 |
+
'1 . . . . . . . . . 3',
|
| 148 |
+
'1 1 1 1 2 2 2 3 3 3 3',
|
| 149 |
+
),
|
| 150 |
+
|
| 151 |
+
'maze_l': (
|
| 152 |
+
'8 8 8 8 7 7 7 6 6 6 6 . . .',
|
| 153 |
+
'8 . . . . . . . . . 6 . . .',
|
| 154 |
+
'8 . G . . . . . . . 6 . . .',
|
| 155 |
+
'8 . . . . . . . . . 6 5 5 5',
|
| 156 |
+
'8 8 8 8 7 7 7 . . . . . . 5',
|
| 157 |
+
'. . . . . . 7 . . . . . . 5',
|
| 158 |
+
'1 1 1 1 1 . 7 . . . . . . 5',
|
| 159 |
+
'1 . . . 1 . 7 9 9 9 . . . 5',
|
| 160 |
+
'1 . . . 1 . . . . 9 . . . 5',
|
| 161 |
+
'1 . . . 1 1 1 9 9 9 . . . 5',
|
| 162 |
+
'2 . . . . . . . . . . . . 4',
|
| 163 |
+
'2 . . . . P . . . . . . . 4',
|
| 164 |
+
'2 . . . . . . . . . . . . 4',
|
| 165 |
+
'2 2 2 2 3 3 3 3 3 3 4 4 4 4',
|
| 166 |
+
),
|
| 167 |
+
|
| 168 |
+
'maze_xl': (
|
| 169 |
+
'9 9 9 9 9 9 9 8 8 8 8 . 4 4 4 4 4',
|
| 170 |
+
'9 . . . . . . . . . 8 . 4 . . . 4',
|
| 171 |
+
'9 . . . . . . . G . 8 . 4 . . . 4',
|
| 172 |
+
'9 . . . . . . . . . 8 . 4 . . . 4',
|
| 173 |
+
'6 . . . 7 7 7 8 8 8 8 . 5 . . . 3',
|
| 174 |
+
'6 . . . 7 . . . . . . . 5 . . . 3',
|
| 175 |
+
'6 . . . 7 7 7 5 5 5 5 5 5 . . . 3',
|
| 176 |
+
'5 . . . . . . . . . . . . . . . 3',
|
| 177 |
+
'5 . . . . . . . . . . . . . . . 3',
|
| 178 |
+
'5 . . . . . . . . . . . . . . . 3',
|
| 179 |
+
'5 5 5 5 4 4 4 . . . 6 6 6 . . . 3',
|
| 180 |
+
'. . . . . . 4 . . . 6 . 6 . . . 3',
|
| 181 |
+
'1 1 1 1 4 4 4 . . . 6 . 6 . . . 3',
|
| 182 |
+
'1 . . . . . . . . . 2 . 1 . . . 1',
|
| 183 |
+
'1 . P . . . . . . . 2 . 1 . . . 1',
|
| 184 |
+
'1 . . . . . . . . . 2 . 1 . . . 1',
|
| 185 |
+
'1 1 1 1 1 1 1 2 2 2 2 . 1 1 1 1 1',
|
| 186 |
+
),
|
| 187 |
+
|
| 188 |
+
'maze_xxl': (
|
| 189 |
+
'7 7 7 7 * * * 6 6 6 * * * 9 9 9 9',
|
| 190 |
+
'7 . . . . . . . . . . . . . . . 9',
|
| 191 |
+
'7 . . . . . . . . . . . . . G . 9',
|
| 192 |
+
'7 . . . . . . . . . . . . . . . 9',
|
| 193 |
+
'* . . . 5 5 5 * * * * * * 9 9 9 9',
|
| 194 |
+
'* . . . 5 . . . . . . . . . . . .',
|
| 195 |
+
'* . . . 5 5 5 * * * * * * 3 3 3 3',
|
| 196 |
+
'8 . . . . . . . . . . . . . . . 3',
|
| 197 |
+
'8 . . . . . . . . . . . . . . . 3',
|
| 198 |
+
'8 . . . . . . . . . . . . . . . 3',
|
| 199 |
+
'8 8 8 8 * * * * * * 4 4 4 . . . *',
|
| 200 |
+
'. . . . . . . . . . . . 4 . . . *',
|
| 201 |
+
'1 1 1 1 * * * * * * 4 4 4 . . . *',
|
| 202 |
+
'1 . . . . . . . . . . . . . . . 2',
|
| 203 |
+
'1 . P . . . . . . . . . . . . . 2',
|
| 204 |
+
'1 . . . . . . . . . . . . . . . 2',
|
| 205 |
+
'1 1 1 1 * * * 6 6 6 * * * 2 2 2 2',
|
| 206 |
+
),
|
| 207 |
+
|
| 208 |
+
'empty': (
|
| 209 |
+
'. . . . . . . . . . . . . . . . .',
|
| 210 |
+
'. . . . . . . . . . . . . . . . .',
|
| 211 |
+
'. . . . . . . . . . . . . . . . .',
|
| 212 |
+
'. . . . . . . . . . . . . . . . .',
|
| 213 |
+
'. . . . . . . . . . . . . . . . .',
|
| 214 |
+
'. . . . . . . . . . . . . . . . .',
|
| 215 |
+
'. . . . . . . . . . . . . . . . .',
|
| 216 |
+
'. . . . . . . . . . . . . . . . .',
|
| 217 |
+
'. . . . . . . . . . . . . . . . .',
|
| 218 |
+
'. . . . . . . . . . . . . . . . .',
|
| 219 |
+
'. . . . . . . . . . . . . . . . .',
|
| 220 |
+
'. . . . . . . . . . . . . . . . .',
|
| 221 |
+
'. . . . . . . . . . . . . . . . .',
|
| 222 |
+
'. . . . . . . . . . . . . . . . .',
|
| 223 |
+
'. . . . . . . . . . . . . . . . .',
|
| 224 |
+
'. . . . . . . . . . . . . . . . .',
|
| 225 |
+
'. . . . . . . . . . . . . . . . .',
|
| 226 |
+
),
|
| 227 |
+
|
| 228 |
+
}
|
models/embodied/envs/loconav_quadruped.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from dm_control import composer
|
| 4 |
+
from dm_control import mjcf
|
| 5 |
+
from dm_control.composer.observation import observable
|
| 6 |
+
from dm_control.locomotion.walkers import base
|
| 7 |
+
from dm_control.locomotion.walkers import legacy_base
|
| 8 |
+
from dm_control.mujoco.wrapper import mjbindings
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
enums = mjbindings.enums
|
| 12 |
+
mjlib = mjbindings.mjlib
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Quadruped(legacy_base.Walker):
|
| 16 |
+
|
| 17 |
+
def _build(self, name='walker', initializer=None):
|
| 18 |
+
super()._build(initializer=initializer)
|
| 19 |
+
self._mjcf_root = mjcf.from_path(
|
| 20 |
+
os.path.join(os.path.dirname(__file__), 'loconav_quadruped.xml'))
|
| 21 |
+
if name:
|
| 22 |
+
self._mjcf_root.model = name
|
| 23 |
+
self._prev_action = np.zeros(
|
| 24 |
+
self.action_spec.shape, self.action_spec.dtype)
|
| 25 |
+
|
| 26 |
+
def initialize_episode(self, physics, random_state):
|
| 27 |
+
self._prev_action = np.zeros_like(self._prev_action)
|
| 28 |
+
|
| 29 |
+
def apply_action(self, physics, action, random_state):
|
| 30 |
+
super().apply_action(physics, action, random_state)
|
| 31 |
+
self._prev_action[:] = action
|
| 32 |
+
|
| 33 |
+
def _build_observables(self):
|
| 34 |
+
return QuadrupedObservables(self)
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def mjcf_model(self):
|
| 38 |
+
return self._mjcf_root
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def upright_pose(self):
|
| 42 |
+
return base.WalkerPose()
|
| 43 |
+
|
| 44 |
+
@composer.cached_property
|
| 45 |
+
def actuators(self):
|
| 46 |
+
return self._mjcf_root.find_all('actuator')
|
| 47 |
+
|
| 48 |
+
@composer.cached_property
|
| 49 |
+
def root_body(self):
|
| 50 |
+
return self._mjcf_root.find('body', 'torso')
|
| 51 |
+
|
| 52 |
+
@composer.cached_property
|
| 53 |
+
def bodies(self):
|
| 54 |
+
return tuple(self.mjcf_model.find_all('body'))
|
| 55 |
+
|
| 56 |
+
@composer.cached_property
|
| 57 |
+
def mocap_tracking_bodies(self):
|
| 58 |
+
return tuple(self.mjcf_model.find_all('body'))
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def mocap_joints(self):
|
| 62 |
+
return self.mjcf_model.find_all('joint')
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def _foot_bodies(self):
|
| 66 |
+
return (
|
| 67 |
+
self._mjcf_root.find('body', 'toe_front_left'),
|
| 68 |
+
self._mjcf_root.find('body', 'toe_front_right'),
|
| 69 |
+
self._mjcf_root.find('body', 'toe_back_right'),
|
| 70 |
+
self._mjcf_root.find('body', 'toe_back_left'),
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
@composer.cached_property
|
| 74 |
+
def end_effectors(self):
|
| 75 |
+
return self._foot_bodies
|
| 76 |
+
|
| 77 |
+
@composer.cached_property
|
| 78 |
+
def observable_joints(self):
|
| 79 |
+
return self._mjcf_root.find_all('joint')
|
| 80 |
+
|
| 81 |
+
@composer.cached_property
|
| 82 |
+
def egocentric_camera(self):
|
| 83 |
+
return self._mjcf_root.find('camera', 'egocentric')
|
| 84 |
+
|
| 85 |
+
def aliveness(self, physics):
|
| 86 |
+
return (physics.bind(self.root_body).xmat[-1] - 1.) / 2.
|
| 87 |
+
|
| 88 |
+
@composer.cached_property
|
| 89 |
+
def ground_contact_geoms(self):
|
| 90 |
+
foot_geoms = []
|
| 91 |
+
for foot in self._foot_bodies:
|
| 92 |
+
foot_geoms.extend(foot.find_all('geom'))
|
| 93 |
+
return tuple(foot_geoms)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def prev_action(self):
|
| 97 |
+
return self._prev_action
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class QuadrupedObservables(legacy_base.WalkerObservables):
|
| 101 |
+
|
| 102 |
+
@composer.observable
|
| 103 |
+
def actuator_activations(self):
|
| 104 |
+
def actuator_activations_in_egocentric_frame(physics):
|
| 105 |
+
return physics.data.act
|
| 106 |
+
return observable.Generic(actuator_activations_in_egocentric_frame)
|
| 107 |
+
|
| 108 |
+
@composer.observable
|
| 109 |
+
def root_global_pos(self):
|
| 110 |
+
def root_pos(physics):
|
| 111 |
+
root_xpos, _ = self._entity.get_pose(physics)
|
| 112 |
+
return np.reshape(root_xpos, -1)
|
| 113 |
+
return observable.Generic(root_pos)
|
| 114 |
+
|
| 115 |
+
@composer.observable
|
| 116 |
+
def torso_global_pos(self):
|
| 117 |
+
def torso_pos(physics):
|
| 118 |
+
root_body = self._entity.root_body
|
| 119 |
+
root_body_xpos = physics.bind(root_body).xpos
|
| 120 |
+
return np.reshape(root_body_xpos, -1)
|
| 121 |
+
return observable.Generic(torso_pos)
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def proprioception(self):
|
| 125 |
+
return ([
|
| 126 |
+
self.joints_pos, self.joints_vel, self.actuator_activations,
|
| 127 |
+
self.sensors_accelerometer, self.sensors_gyro,
|
| 128 |
+
self.sensors_velocimeter,
|
| 129 |
+
self.sensors_force, self.sensors_torque,
|
| 130 |
+
self.world_zaxis,
|
| 131 |
+
self.root_global_pos, self.torso_global_pos,
|
| 132 |
+
] + self._collect_from_attachments('proprioception'))
|
models/embodied/envs/loconav_quadruped.xml
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<mujoco model="quadruped">
|
| 2 |
+
|
| 3 |
+
<visual>
|
| 4 |
+
<quality shadowsize="2048"/>
|
| 5 |
+
<rgba rangefinder="1 1 0.1 0.1"/>
|
| 6 |
+
</visual>
|
| 7 |
+
|
| 8 |
+
<asset>
|
| 9 |
+
<texture name="grid" type="2d" builtin="checker" rgb1=".1 .2 .3" rgb2=".2 .3 .4" width="300" height="300" mark="edge" markrgb=".2 .3 .4"/>
|
| 10 |
+
<material name="grid" texture="grid" texrepeat="1 1" texuniform="true" reflectance=".2"/>
|
| 11 |
+
<material name="self" rgba=".7 .5 .3 1"/>
|
| 12 |
+
<material name="self_default" rgba=".7 .5 .3 1"/>
|
| 13 |
+
<material name="self_highlight" rgba="0 .5 .3 1"/>
|
| 14 |
+
<material name="effector" rgba=".7 .4 .2 1"/>
|
| 15 |
+
<material name="effector_default" rgba=".7 .4 .2 1"/>
|
| 16 |
+
<material name="effector_highlight" rgba="0 .5 .3 1"/>
|
| 17 |
+
<material name="decoration" rgba=".3 .5 .7 1"/>
|
| 18 |
+
<material name="eye" rgba="0 .2 1 1"/>
|
| 19 |
+
<material name="target" rgba=".6 .3 .3 1"/>
|
| 20 |
+
<material name="target_default" rgba=".6 .3 .3 1"/>
|
| 21 |
+
<material name="target_highlight" rgba=".6 .3 .3 .4"/>
|
| 22 |
+
<material name="site" rgba=".5 .5 .5 .3"/>
|
| 23 |
+
|
| 24 |
+
<hfield name="terrain" ncol="201" nrow="201" size="30 30 5 .1"/>
|
| 25 |
+
</asset>
|
| 26 |
+
|
| 27 |
+
<option timestep=".005"/>
|
| 28 |
+
|
| 29 |
+
<default>
|
| 30 |
+
<geom solimp=".9 .99 .003" solref=".01 1"/>
|
| 31 |
+
<default class="body">
|
| 32 |
+
<geom type="capsule" size=".08" condim="1" material="self" density="500"/>
|
| 33 |
+
<joint type="hinge" damping="30" armature=".01"
|
| 34 |
+
limited="true" solimplimit="0 .99 .01"/>
|
| 35 |
+
<default class="hip">
|
| 36 |
+
<default class="yaw">
|
| 37 |
+
<joint axis="0 0 1" range="-50 50"/>
|
| 38 |
+
</default>
|
| 39 |
+
<default class="pitch">
|
| 40 |
+
<joint axis="0 1 0" range="-20 60"/>
|
| 41 |
+
</default>
|
| 42 |
+
<geom fromto="0 0 0 .3 0 .11"/>
|
| 43 |
+
</default>
|
| 44 |
+
<default class="knee">
|
| 45 |
+
<joint axis="0 1 0" range="-60 50"/>
|
| 46 |
+
<geom size=".065" fromto="0 0 0 .25 0 -.25"/>
|
| 47 |
+
</default>
|
| 48 |
+
<default class="ankle">
|
| 49 |
+
<joint axis="0 1 0" range="-45 55"/>
|
| 50 |
+
<geom size=".055" fromto="0 0 0 0 0 -.25"/>
|
| 51 |
+
</default>
|
| 52 |
+
<default class="toe">
|
| 53 |
+
<geom type="sphere" size=".08" material="effector" friction="1.5"/>
|
| 54 |
+
<site type="sphere" size=".084" material="site" group="4"/>
|
| 55 |
+
</default>
|
| 56 |
+
</default>
|
| 57 |
+
<default class="rangefinder">
|
| 58 |
+
<site type="capsule" size=".005 .1" material="site" group="4"/>
|
| 59 |
+
</default>
|
| 60 |
+
|
| 61 |
+
<default class="coupling">
|
| 62 |
+
<equality solimp="0.95 0.99 0.01" solref=".005 .5"/>
|
| 63 |
+
</default>
|
| 64 |
+
|
| 65 |
+
<general ctrllimited="true" gainprm="1000" biasprm="0 -1000" biastype="affine" dyntype="filter" dynprm=".1"/>
|
| 66 |
+
<default class="yaw_act">
|
| 67 |
+
<general ctrlrange="-1 1"/>
|
| 68 |
+
</default>
|
| 69 |
+
<default class="lift_act">
|
| 70 |
+
<general ctrlrange="-1 1.1"/>
|
| 71 |
+
</default>
|
| 72 |
+
<default class="extend_act">
|
| 73 |
+
<general ctrlrange="-.8 .8"/>
|
| 74 |
+
</default>
|
| 75 |
+
</default>
|
| 76 |
+
|
| 77 |
+
<worldbody>
|
| 78 |
+
<camera name="sideon" pos="0 -10 5" fovy="45" mode="targetbody" target="torso" />
|
| 79 |
+
<camera name="float_far" pos="-4 0 2" xyaxes="0 -1 0 .5 0 1" mode="trackcom" fovy="90"/>
|
| 80 |
+
<body name="torso" childclass="body" pos="0 0 .57">
|
| 81 |
+
|
| 82 |
+
<camera name="x" pos="-1.7 0 1" xyaxes="0 -1 0 .75 0 1" mode="trackcom"/>
|
| 83 |
+
<camera name="y" pos="0 4 2" xyaxes="-1 0 0 0 -.5 1" mode="trackcom"/>
|
| 84 |
+
<camera name="egocentric" pos=".3 0 .11" xyaxes="0 -1 0 .4 0 1" fovy="60"/>
|
| 85 |
+
<light name="light" pos="0 0 4" mode="trackcom"/>
|
| 86 |
+
|
| 87 |
+
<geom name="eye_r" type="cylinder" size=".05" fromto=".1 -.07 .12 .31 -.07 .08" mass="0"/>
|
| 88 |
+
<site name="pupil_r" type="sphere" size=".033" pos=".3 -.07 .08" zaxis="1 0 0" material="eye"/>
|
| 89 |
+
<geom name="eye_l" type="cylinder" size=".05" fromto=".1 .07 .12 .31 .07 .08" mass="0"/>
|
| 90 |
+
<site name="pupil_l" type="sphere" size=".033" pos=".3 .07 .08" zaxis="1 0 0" material="eye"/>
|
| 91 |
+
<site name="workspace" type="sphere" size=".3 .3 .3" material="site" pos=".8 0 -.2" group="3"/>
|
| 92 |
+
|
| 93 |
+
<site name="rf_00" class="rangefinder" fromto=".41 -.02 .11 .34 0 .115"/>
|
| 94 |
+
<site name="rf_01" class="rangefinder" fromto=".41 -.01 .11 .34 0 .115"/>
|
| 95 |
+
<site name="rf_02" class="rangefinder" fromto=".41 0 .11 .34 0 .115"/>
|
| 96 |
+
<site name="rf_03" class="rangefinder" fromto=".41 .01 .11 .34 0 .115"/>
|
| 97 |
+
<site name="rf_04" class="rangefinder" fromto=".41 .02 .11 .34 0 .115"/>
|
| 98 |
+
<site name="rf_10" class="rangefinder" fromto=".41 -.02 .1 .36 0 .11"/>
|
| 99 |
+
<site name="rf_11" class="rangefinder" fromto=".41 -.02 .1 .36 0 .11"/>
|
| 100 |
+
<site name="rf_12" class="rangefinder" fromto=".41 0 .1 .36 0 .11"/>
|
| 101 |
+
<site name="rf_13" class="rangefinder" fromto=".41 .01 .1 .36 0 .11"/>
|
| 102 |
+
<site name="rf_14" class="rangefinder" fromto=".41 .02 .1 .36 0 .11"/>
|
| 103 |
+
<site name="rf_20" class="rangefinder" fromto=".41 -.02 .09 .38 0 .105"/>
|
| 104 |
+
<site name="rf_21" class="rangefinder" fromto=".41 -.01 .09 .38 0 .105"/>
|
| 105 |
+
<site name="rf_22" class="rangefinder" fromto=".41 0 .09 .38 0 .105"/>
|
| 106 |
+
<site name="rf_23" class="rangefinder" fromto=".41 .01 .09 .38 0 .105"/>
|
| 107 |
+
<site name="rf_24" class="rangefinder" fromto=".41 .02 .09 .38 0 .105"/>
|
| 108 |
+
<site name="rf_30" class="rangefinder" fromto=".41 -.02 .08 .4 0 .1"/>
|
| 109 |
+
<site name="rf_31" class="rangefinder" fromto=".41 -.01 .08 .4 0 .1"/>
|
| 110 |
+
<site name="rf_32" class="rangefinder" fromto=".41 0 .08 .4 0 .1"/>
|
| 111 |
+
<site name="rf_33" class="rangefinder" fromto=".41 .01 .08 .4 0 .1"/>
|
| 112 |
+
<site name="rf_34" class="rangefinder" fromto=".41 .02 .08 .4 0 .1"/>
|
| 113 |
+
|
| 114 |
+
<geom name="torso" type="ellipsoid" size=".3 .27 .2" density="1000"/>
|
| 115 |
+
<site name="torso_touch" type="box" size=".26 .26 .26" rgba="0 0 1 0"/>
|
| 116 |
+
<site name="torso" size=".05" rgba="1 0 0 1" />
|
| 117 |
+
|
| 118 |
+
<body name="hip_front_left" pos=".2 .2 0" euler="0 0 45" childclass="hip">
|
| 119 |
+
<joint name="yaw_front_left" class="yaw"/>
|
| 120 |
+
<joint name="pitch_front_left" class="pitch"/>
|
| 121 |
+
<geom name="thigh_front_left"/>
|
| 122 |
+
<body name="knee_front_left" pos=".3 0 .11" childclass="knee">
|
| 123 |
+
<joint name="knee_front_left"/>
|
| 124 |
+
<geom name="shin_front_left"/>
|
| 125 |
+
<body name="ankle_front_left" pos=".25 0 -.25" childclass="ankle">
|
| 126 |
+
<joint name="ankle_front_left"/>
|
| 127 |
+
<geom name="foot_front_left"/>
|
| 128 |
+
<body name="toe_front_left" pos="0 0 -.3" childclass="toe">
|
| 129 |
+
<geom name="toe_front_left"/>
|
| 130 |
+
<site name="toe_front_left"/>
|
| 131 |
+
</body>
|
| 132 |
+
</body>
|
| 133 |
+
</body>
|
| 134 |
+
</body>
|
| 135 |
+
|
| 136 |
+
<body name="hip_front_right" pos=".2 -.2 0" euler="0 0 -45" childclass="hip">
|
| 137 |
+
<joint name="yaw_front_right" class="yaw"/>
|
| 138 |
+
<joint name="pitch_front_right" class="pitch"/>
|
| 139 |
+
<geom name="thigh_front_right"/>
|
| 140 |
+
<body name="knee_front_right" pos=".3 0 .11" childclass="knee">
|
| 141 |
+
<joint name="knee_front_right"/>
|
| 142 |
+
<geom name="shin_front_right"/>
|
| 143 |
+
<body name="ankle_front_right" pos=".25 0 -.25" childclass="ankle">
|
| 144 |
+
<joint name="ankle_front_right"/>
|
| 145 |
+
<geom name="foot_front_right"/>
|
| 146 |
+
<body name="toe_front_right" pos="0 0 -.3" childclass="toe">
|
| 147 |
+
<geom name="toe_front_right"/>
|
| 148 |
+
<site name="toe_front_right"/>
|
| 149 |
+
</body>
|
| 150 |
+
</body>
|
| 151 |
+
</body>
|
| 152 |
+
</body>
|
| 153 |
+
|
| 154 |
+
<body name="hip_back_right" pos="-.2 -.2 0" euler="0 0 -135" childclass="hip">
|
| 155 |
+
<joint name="yaw_back_right" class="yaw"/>
|
| 156 |
+
<joint name="pitch_back_right" class="pitch"/>
|
| 157 |
+
<geom name="thigh_back_right"/>
|
| 158 |
+
<body name="knee_back_right" pos=".3 0 .11" childclass="knee">
|
| 159 |
+
<joint name="knee_back_right"/>
|
| 160 |
+
<geom name="shin_back_right"/>
|
| 161 |
+
<body name="ankle_back_right" pos=".25 0 -.25" childclass="ankle">
|
| 162 |
+
<joint name="ankle_back_right"/>
|
| 163 |
+
<geom name="foot_back_right"/>
|
| 164 |
+
<body name="toe_back_right" pos="0 0 -.3" childclass="toe">
|
| 165 |
+
<geom name="toe_back_right"/>
|
| 166 |
+
<site name="toe_back_right"/>
|
| 167 |
+
</body>
|
| 168 |
+
</body>
|
| 169 |
+
</body>
|
| 170 |
+
</body>
|
| 171 |
+
|
| 172 |
+
<body name="hip_back_left" pos="-.2 .2 0" euler="0 0 135" childclass="hip">
|
| 173 |
+
<joint name="yaw_back_left" class="yaw"/>
|
| 174 |
+
<joint name="pitch_back_left" class="pitch"/>
|
| 175 |
+
<geom name="thigh_back_left"/>
|
| 176 |
+
<body name="knee_back_left" pos=".3 0 .11" childclass="knee">
|
| 177 |
+
<joint name="knee_back_left"/>
|
| 178 |
+
<geom name="shin_back_left"/>
|
| 179 |
+
<body name="ankle_back_left" pos=".25 0 -.25" childclass="ankle">
|
| 180 |
+
<joint name="ankle_back_left"/>
|
| 181 |
+
<geom name="foot_back_left"/>
|
| 182 |
+
<body name="toe_back_left" pos="0 0 -.3" childclass="toe">
|
| 183 |
+
<geom name="toe_back_left"/>
|
| 184 |
+
<site name="toe_back_left"/>
|
| 185 |
+
</body>
|
| 186 |
+
</body>
|
| 187 |
+
</body>
|
| 188 |
+
</body>
|
| 189 |
+
</body>
|
| 190 |
+
</worldbody>
|
| 191 |
+
|
| 192 |
+
<tendon>
|
| 193 |
+
<fixed name="coupling_front_left">
|
| 194 |
+
<joint joint="pitch_front_left" coef=".333"/>
|
| 195 |
+
<joint joint="knee_front_left" coef=".333"/>
|
| 196 |
+
<joint joint="ankle_front_left" coef=".333"/>
|
| 197 |
+
</fixed>
|
| 198 |
+
<fixed name="coupling_front_right">
|
| 199 |
+
<joint joint="pitch_front_right" coef=".333"/>
|
| 200 |
+
<joint joint="knee_front_right" coef=".333"/>
|
| 201 |
+
<joint joint="ankle_front_right" coef=".333"/>
|
| 202 |
+
</fixed>
|
| 203 |
+
<fixed name="coupling_back_right">
|
| 204 |
+
<joint joint="pitch_back_right" coef=".333"/>
|
| 205 |
+
<joint joint="knee_back_right" coef=".333"/>
|
| 206 |
+
<joint joint="ankle_back_right" coef=".333"/>
|
| 207 |
+
</fixed>
|
| 208 |
+
<fixed name="coupling_back_left">
|
| 209 |
+
<joint joint="pitch_back_left" coef=".333"/>
|
| 210 |
+
<joint joint="knee_back_left" coef=".333"/>
|
| 211 |
+
<joint joint="ankle_back_left" coef=".333"/>
|
| 212 |
+
</fixed>
|
| 213 |
+
|
| 214 |
+
<fixed name="extend_front_left">
|
| 215 |
+
<joint joint="pitch_front_left" coef=".25"/>
|
| 216 |
+
<joint joint="knee_front_left" coef="-.5"/>
|
| 217 |
+
<joint joint="ankle_front_left" coef=".25"/>
|
| 218 |
+
</fixed>
|
| 219 |
+
<fixed name="lift_front_left">
|
| 220 |
+
<joint joint="pitch_front_left" coef=".5"/>
|
| 221 |
+
<joint joint="ankle_front_left" coef="-.5"/>
|
| 222 |
+
</fixed>
|
| 223 |
+
|
| 224 |
+
<fixed name="extend_front_right">
|
| 225 |
+
<joint joint="pitch_front_right" coef=".25"/>
|
| 226 |
+
<joint joint="knee_front_right" coef="-.5"/>
|
| 227 |
+
<joint joint="ankle_front_right" coef=".25"/>
|
| 228 |
+
</fixed>
|
| 229 |
+
<fixed name="lift_front_right">
|
| 230 |
+
<joint joint="pitch_front_right" coef=".5"/>
|
| 231 |
+
<joint joint="ankle_front_right" coef="-.5"/>
|
| 232 |
+
</fixed>
|
| 233 |
+
|
| 234 |
+
<fixed name="extend_back_right">
|
| 235 |
+
<joint joint="pitch_back_right" coef=".25"/>
|
| 236 |
+
<joint joint="knee_back_right" coef="-.5"/>
|
| 237 |
+
<joint joint="ankle_back_right" coef=".25"/>
|
| 238 |
+
</fixed>
|
| 239 |
+
<fixed name="lift_back_right">
|
| 240 |
+
<joint joint="pitch_back_right" coef=".5"/>
|
| 241 |
+
<joint joint="ankle_back_right" coef="-.5"/>
|
| 242 |
+
</fixed>
|
| 243 |
+
|
| 244 |
+
<fixed name="extend_back_left">
|
| 245 |
+
<joint joint="pitch_back_left" coef=".25"/>
|
| 246 |
+
<joint joint="knee_back_left" coef="-.5"/>
|
| 247 |
+
<joint joint="ankle_back_left" coef=".25"/>
|
| 248 |
+
</fixed>
|
| 249 |
+
<fixed name="lift_back_left">
|
| 250 |
+
<joint joint="pitch_back_left" coef=".5"/>
|
| 251 |
+
<joint joint="ankle_back_left" coef="-.5"/>
|
| 252 |
+
</fixed>
|
| 253 |
+
</tendon>
|
| 254 |
+
|
| 255 |
+
<equality>
|
| 256 |
+
<tendon name="coupling_front_left" tendon1="coupling_front_left" class="coupling"/>
|
| 257 |
+
<tendon name="coupling_front_right" tendon1="coupling_front_right" class="coupling"/>
|
| 258 |
+
<tendon name="coupling_back_right" tendon1="coupling_back_right" class="coupling"/>
|
| 259 |
+
<tendon name="coupling_back_left" tendon1="coupling_back_left" class="coupling"/>
|
| 260 |
+
</equality>
|
| 261 |
+
|
| 262 |
+
<actuator>
|
| 263 |
+
<general name="yaw_front_left" class="yaw_act" joint="yaw_front_left"/>
|
| 264 |
+
<general name="lift_front_left" class="lift_act" tendon="lift_front_left"/>
|
| 265 |
+
<general name="extend_front_left" class="extend_act" tendon="extend_front_left"/>
|
| 266 |
+
<general name="yaw_front_right" class="yaw_act" joint="yaw_front_right"/>
|
| 267 |
+
<general name="lift_front_right" class="lift_act" tendon="lift_front_right"/>
|
| 268 |
+
<general name="extend_front_right" class="extend_act" tendon="extend_front_right"/>
|
| 269 |
+
<general name="yaw_back_right" class="yaw_act" joint="yaw_back_right"/>
|
| 270 |
+
<general name="lift_back_right" class="lift_act" tendon="lift_back_right"/>
|
| 271 |
+
<general name="extend_back_right" class="extend_act" tendon="extend_back_right"/>
|
| 272 |
+
<general name="yaw_back_left" class="yaw_act" joint="yaw_back_left"/>
|
| 273 |
+
<general name="lift_back_left" class="lift_act" tendon="lift_back_left"/>
|
| 274 |
+
<general name="extend_back_left" class="extend_act" tendon="extend_back_left"/>
|
| 275 |
+
</actuator>
|
| 276 |
+
|
| 277 |
+
<sensor>
|
| 278 |
+
<accelerometer name="imu_accel" site="torso"/>
|
| 279 |
+
<gyro name="imu_gyro" site="torso"/>
|
| 280 |
+
<velocimeter name="velocimeter" site="torso"/>
|
| 281 |
+
<force name="force_toe_front_left" site="toe_front_left"/>
|
| 282 |
+
<force name="force_toe_front_right" site="toe_front_right"/>
|
| 283 |
+
<force name="force_toe_back_right" site="toe_back_right"/>
|
| 284 |
+
<force name="force_toe_back_left" site="toe_back_left"/>
|
| 285 |
+
<torque name="torque_toe_front_left" site="toe_front_left"/>
|
| 286 |
+
<torque name="torque_toe_front_right" site="toe_front_right"/>
|
| 287 |
+
<torque name="torque_toe_back_right" site="toe_back_right"/>
|
| 288 |
+
<torque name="torque_toe_back_left" site="toe_back_left"/>
|
| 289 |
+
<subtreecom name="center_of_mass" body="torso"/>
|
| 290 |
+
<rangefinder name="rf_00" site="rf_00"/>
|
| 291 |
+
<rangefinder name="rf_01" site="rf_01"/>
|
| 292 |
+
<rangefinder name="rf_02" site="rf_02"/>
|
| 293 |
+
<rangefinder name="rf_03" site="rf_03"/>
|
| 294 |
+
<rangefinder name="rf_04" site="rf_04"/>
|
| 295 |
+
<rangefinder name="rf_10" site="rf_10"/>
|
| 296 |
+
<rangefinder name="rf_11" site="rf_11"/>
|
| 297 |
+
<rangefinder name="rf_12" site="rf_12"/>
|
| 298 |
+
<rangefinder name="rf_13" site="rf_13"/>
|
| 299 |
+
<rangefinder name="rf_14" site="rf_14"/>
|
| 300 |
+
<rangefinder name="rf_20" site="rf_20"/>
|
| 301 |
+
<rangefinder name="rf_21" site="rf_21"/>
|
| 302 |
+
<rangefinder name="rf_22" site="rf_22"/>
|
| 303 |
+
<rangefinder name="rf_23" site="rf_23"/>
|
| 304 |
+
<rangefinder name="rf_24" site="rf_24"/>
|
| 305 |
+
<rangefinder name="rf_30" site="rf_30"/>
|
| 306 |
+
<rangefinder name="rf_31" site="rf_31"/>
|
| 307 |
+
<rangefinder name="rf_32" site="rf_32"/>
|
| 308 |
+
<rangefinder name="rf_33" site="rf_33"/>
|
| 309 |
+
<rangefinder name="rf_34" site="rf_34"/>
|
| 310 |
+
</sensor>
|
| 311 |
+
</mujoco>
|
models/embodied/envs/minecraft.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
|
| 3 |
+
import embodied
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Minecraft(embodied.Wrapper):
|
| 7 |
+
|
| 8 |
+
def __init__(self, task, *args, **kwargs):
|
| 9 |
+
module, cls = {
|
| 10 |
+
'wood': 'minecraft_flat:Wood',
|
| 11 |
+
'climb': 'minecraft_flat:Climb',
|
| 12 |
+
'diamond': 'minecraft_flat:Diamond',
|
| 13 |
+
}[task].split(':')
|
| 14 |
+
module = importlib.import_module(f'.{module}', __package__)
|
| 15 |
+
cls = getattr(module, cls)
|
| 16 |
+
env = cls(*args, **kwargs)
|
| 17 |
+
super().__init__(env)
|
models/embodied/envs/minecraft_flat.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import threading
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
np.float = float
|
| 9 |
+
np.int = int
|
| 10 |
+
np.bool = bool
|
| 11 |
+
|
| 12 |
+
from minerl.herobraine.env_spec import EnvSpec
|
| 13 |
+
from minerl.herobraine.hero import handler
|
| 14 |
+
from minerl.herobraine.hero import handlers
|
| 15 |
+
from minerl.herobraine.hero import mc
|
| 16 |
+
from minerl.herobraine.hero.mc import INVERSE_KEYMAP
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Wood(embodied.Wrapper):
|
| 20 |
+
|
| 21 |
+
def __init__(self, *args, **kwargs):
|
| 22 |
+
actions = BASIC_ACTIONS
|
| 23 |
+
self.rewards = [
|
| 24 |
+
CollectReward('log', repeated=1),
|
| 25 |
+
HealthReward(),
|
| 26 |
+
]
|
| 27 |
+
length = kwargs.pop('length', 36000)
|
| 28 |
+
env = MinecraftBase(actions, *args, **kwargs)
|
| 29 |
+
env = embodied.wrappers.TimeLimit(env, length)
|
| 30 |
+
super().__init__(env)
|
| 31 |
+
|
| 32 |
+
def step(self, action):
|
| 33 |
+
obs = self.env.step(action)
|
| 34 |
+
reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
|
| 35 |
+
obs['reward'] = np.float32(reward)
|
| 36 |
+
return obs
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class Climb(embodied.Wrapper):
|
| 40 |
+
|
| 41 |
+
def __init__(self, *args, **kwargs):
|
| 42 |
+
actions = BASIC_ACTIONS
|
| 43 |
+
length = kwargs.pop('length', 36000)
|
| 44 |
+
env = MinecraftBase(actions, *args, **kwargs)
|
| 45 |
+
env = embodied.wrappers.TimeLimit(env, length)
|
| 46 |
+
super().__init__(env)
|
| 47 |
+
self._previous = None
|
| 48 |
+
self._health_reward = HealthReward()
|
| 49 |
+
|
| 50 |
+
def step(self, action):
|
| 51 |
+
obs = self.env.step(action)
|
| 52 |
+
x, y, z = obs['log/player_pos']
|
| 53 |
+
height = np.float32(y)
|
| 54 |
+
if obs['is_first']:
|
| 55 |
+
self._previous = height
|
| 56 |
+
reward = (height - self._previous) + self._health_reward(obs)
|
| 57 |
+
obs['reward'] = np.float32(reward)
|
| 58 |
+
self._previous = height
|
| 59 |
+
return obs
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class Diamond(embodied.Wrapper):
|
| 63 |
+
|
| 64 |
+
def __init__(self, *args, **kwargs):
|
| 65 |
+
actions = {
|
| 66 |
+
**BASIC_ACTIONS,
|
| 67 |
+
'craft_planks': dict(craft='planks'),
|
| 68 |
+
'craft_stick': dict(craft='stick'),
|
| 69 |
+
'craft_crafting_table': dict(craft='crafting_table'),
|
| 70 |
+
'place_crafting_table': dict(place='crafting_table'),
|
| 71 |
+
'craft_wooden_pickaxe': dict(nearbyCraft='wooden_pickaxe'),
|
| 72 |
+
'craft_stone_pickaxe': dict(nearbyCraft='stone_pickaxe'),
|
| 73 |
+
'craft_iron_pickaxe': dict(nearbyCraft='iron_pickaxe'),
|
| 74 |
+
'equip_stone_pickaxe': dict(equip='stone_pickaxe'),
|
| 75 |
+
'equip_wooden_pickaxe': dict(equip='wooden_pickaxe'),
|
| 76 |
+
'equip_iron_pickaxe': dict(equip='iron_pickaxe'),
|
| 77 |
+
'craft_furnace': dict(nearbyCraft='furnace'),
|
| 78 |
+
'place_furnace': dict(place='furnace'),
|
| 79 |
+
'smelt_iron_ingot': dict(nearbySmelt='iron_ingot'),
|
| 80 |
+
}
|
| 81 |
+
self.rewards = [
|
| 82 |
+
CollectReward('log', once=1),
|
| 83 |
+
CollectReward('planks', once=1),
|
| 84 |
+
CollectReward('stick', once=1),
|
| 85 |
+
CollectReward('crafting_table', once=1),
|
| 86 |
+
CollectReward('wooden_pickaxe', once=1),
|
| 87 |
+
CollectReward('cobblestone', once=1),
|
| 88 |
+
CollectReward('stone_pickaxe', once=1),
|
| 89 |
+
CollectReward('iron_ore', once=1),
|
| 90 |
+
CollectReward('furnace', once=1),
|
| 91 |
+
CollectReward('iron_ingot', once=1),
|
| 92 |
+
CollectReward('iron_pickaxe', once=1),
|
| 93 |
+
CollectReward('diamond', once=1),
|
| 94 |
+
HealthReward(),
|
| 95 |
+
]
|
| 96 |
+
length = kwargs.pop('length', 36000)
|
| 97 |
+
env = MinecraftBase(actions, *args, **kwargs)
|
| 98 |
+
env = embodied.wrappers.TimeLimit(env, length)
|
| 99 |
+
super().__init__(env)
|
| 100 |
+
|
| 101 |
+
def step(self, action):
|
| 102 |
+
obs = self.env.step(action)
|
| 103 |
+
reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
|
| 104 |
+
obs['reward'] = np.float32(reward)
|
| 105 |
+
return obs
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
BASIC_ACTIONS = {
|
| 109 |
+
'noop': dict(),
|
| 110 |
+
'attack': dict(attack=1),
|
| 111 |
+
'turn_up': dict(camera=(-15, 0)),
|
| 112 |
+
'turn_down': dict(camera=(15, 0)),
|
| 113 |
+
'turn_left': dict(camera=(0, -15)),
|
| 114 |
+
'turn_right': dict(camera=(0, 15)),
|
| 115 |
+
'forward': dict(forward=1),
|
| 116 |
+
'back': dict(back=1),
|
| 117 |
+
'left': dict(left=1),
|
| 118 |
+
'right': dict(right=1),
|
| 119 |
+
'jump': dict(jump=1, forward=1),
|
| 120 |
+
'place_dirt': dict(place='dirt'),
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class CollectReward:
|
| 125 |
+
|
| 126 |
+
def __init__(self, item, once=0, repeated=0):
|
| 127 |
+
self.item = item
|
| 128 |
+
self.once = once
|
| 129 |
+
self.repeated = repeated
|
| 130 |
+
self.previous = 0
|
| 131 |
+
self.maximum = 0
|
| 132 |
+
|
| 133 |
+
def __call__(self, obs, inventory):
|
| 134 |
+
current = inventory[self.item]
|
| 135 |
+
if obs['is_first']:
|
| 136 |
+
self.previous = current
|
| 137 |
+
self.maximum = current
|
| 138 |
+
return 0
|
| 139 |
+
reward = self.repeated * max(0, current - self.previous)
|
| 140 |
+
if self.maximum == 0 and current > 0:
|
| 141 |
+
reward += self.once
|
| 142 |
+
self.previous = current
|
| 143 |
+
self.maximum = max(self.maximum, current)
|
| 144 |
+
return reward
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class HealthReward:
|
| 148 |
+
|
| 149 |
+
def __init__(self, scale=0.01):
|
| 150 |
+
self.scale = scale
|
| 151 |
+
self.previous = None
|
| 152 |
+
|
| 153 |
+
def __call__(self, obs, inventory=None):
|
| 154 |
+
health = obs['health']
|
| 155 |
+
if obs['is_first']:
|
| 156 |
+
self.previous = health
|
| 157 |
+
return 0
|
| 158 |
+
reward = self.scale * (health - self.previous)
|
| 159 |
+
self.previous = health
|
| 160 |
+
return np.float32(reward)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class MinecraftBase(embodied.Env):
|
| 164 |
+
|
| 165 |
+
LOCK = threading.Lock()
|
| 166 |
+
NOOP = dict(
|
| 167 |
+
camera=(0, 0), forward=0, back=0, left=0, right=0, attack=0, sprint=0,
|
| 168 |
+
jump=0, sneak=0, craft='none', nearbyCraft='none', nearbySmelt='none',
|
| 169 |
+
place='none', equip='none')
|
| 170 |
+
|
| 171 |
+
def __init__(
|
| 172 |
+
self, actions,
|
| 173 |
+
repeat=1,
|
| 174 |
+
size=(64, 64),
|
| 175 |
+
break_speed=100.0,
|
| 176 |
+
gamma=10.0,
|
| 177 |
+
sticky_attack=30,
|
| 178 |
+
sticky_jump=10,
|
| 179 |
+
pitch_limit=(-60, 60),
|
| 180 |
+
log_inv_keys=('log', 'cobblestone', 'iron_ingot', 'diamond'),
|
| 181 |
+
logs=False,
|
| 182 |
+
):
|
| 183 |
+
if logs:
|
| 184 |
+
logging.basicConfig(level=logging.DEBUG)
|
| 185 |
+
self._repeat = repeat
|
| 186 |
+
self._size = size
|
| 187 |
+
if break_speed != 1.0:
|
| 188 |
+
sticky_attack = 0
|
| 189 |
+
|
| 190 |
+
# Make env
|
| 191 |
+
with self.LOCK:
|
| 192 |
+
self._gymenv = MineRLEnv(size, break_speed).make()
|
| 193 |
+
from . import from_gym
|
| 194 |
+
self._env = from_gym.FromGym(self._gymenv)
|
| 195 |
+
self._inventory = {}
|
| 196 |
+
|
| 197 |
+
# Observations
|
| 198 |
+
self._inv_keys = [
|
| 199 |
+
k for k in self._env.obs_space if k.startswith('inventory/')
|
| 200 |
+
if k != 'inventory/log2']
|
| 201 |
+
self._inv_log_keys = [f'inventory/{k}' for k in log_inv_keys]
|
| 202 |
+
assert all(k in self._inv_keys for k in self._inv_log_keys), (
|
| 203 |
+
self._inv_keys, self._inv_log_keys)
|
| 204 |
+
self._step = 0
|
| 205 |
+
self._max_inventory = None
|
| 206 |
+
self._equip_enum = self._gymenv.observation_space[
|
| 207 |
+
'equipped_items']['mainhand']['type'].values.tolist()
|
| 208 |
+
self._obs_space = self.obs_space
|
| 209 |
+
|
| 210 |
+
# Actions
|
| 211 |
+
actions = self._insert_defaults(actions)
|
| 212 |
+
self._action_names = tuple(actions.keys())
|
| 213 |
+
self._action_values = tuple(actions.values())
|
| 214 |
+
message = f'Minecraft action space ({len(self._action_values)}):'
|
| 215 |
+
print(message, ', '.join(self._action_names))
|
| 216 |
+
self._sticky_attack_length = sticky_attack
|
| 217 |
+
self._sticky_attack_counter = 0
|
| 218 |
+
self._sticky_jump_length = sticky_jump
|
| 219 |
+
self._sticky_jump_counter = 0
|
| 220 |
+
self._pitch_limit = pitch_limit
|
| 221 |
+
self._pitch = 0
|
| 222 |
+
|
| 223 |
+
@property
|
| 224 |
+
def obs_space(self):
|
| 225 |
+
return {
|
| 226 |
+
'image': elements.Space(np.uint8, self._size + (3,)),
|
| 227 |
+
'inventory': elements.Space(np.float32, len(self._inv_keys), 0),
|
| 228 |
+
'inventory_max': elements.Space(np.float32, len(self._inv_keys), 0),
|
| 229 |
+
'equipped': elements.Space(np.float32, len(self._equip_enum), 0, 1),
|
| 230 |
+
'reward': elements.Space(np.float32),
|
| 231 |
+
'health': elements.Space(np.float32),
|
| 232 |
+
'hunger': elements.Space(np.float32),
|
| 233 |
+
'breath': elements.Space(np.float32),
|
| 234 |
+
'is_first': elements.Space(bool),
|
| 235 |
+
'is_last': elements.Space(bool),
|
| 236 |
+
'is_terminal': elements.Space(bool),
|
| 237 |
+
**{f'log/{k}': elements.Space(np.int64) for k in self._inv_log_keys},
|
| 238 |
+
# 'log/player_pos': elements.Space(np.float32, 3),
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
@property
|
| 242 |
+
def act_space(self):
|
| 243 |
+
return {
|
| 244 |
+
'action': elements.Space(np.int32, (), 0, len(self._action_values)),
|
| 245 |
+
'reset': elements.Space(bool),
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
def step(self, action):
|
| 249 |
+
action = action.copy()
|
| 250 |
+
index = action.pop('action')
|
| 251 |
+
action.update(self._action_values[index])
|
| 252 |
+
action = self._action(action)
|
| 253 |
+
if action['reset']:
|
| 254 |
+
obs = self._reset()
|
| 255 |
+
else:
|
| 256 |
+
following = self.NOOP.copy()
|
| 257 |
+
for key in ('attack', 'forward', 'back', 'left', 'right'):
|
| 258 |
+
following[key] = action[key]
|
| 259 |
+
for act in [action] + ([following] * (self._repeat - 1)):
|
| 260 |
+
obs = self._env.step(act)
|
| 261 |
+
if self._env.info and 'error' in self._env.info:
|
| 262 |
+
obs = self._reset()
|
| 263 |
+
break
|
| 264 |
+
obs = self._obs(obs)
|
| 265 |
+
self._step += 1
|
| 266 |
+
assert 'pov' not in obs, list(obs.keys())
|
| 267 |
+
return obs
|
| 268 |
+
|
| 269 |
+
@property
|
| 270 |
+
def inventory(self):
|
| 271 |
+
return self._inventory
|
| 272 |
+
|
| 273 |
+
def _reset(self):
|
| 274 |
+
with self.LOCK:
|
| 275 |
+
obs = self._env.step({'reset': True})
|
| 276 |
+
self._step = 0
|
| 277 |
+
self._max_inventory = None
|
| 278 |
+
self._sticky_attack_counter = 0
|
| 279 |
+
self._sticky_jump_counter = 0
|
| 280 |
+
self._pitch = 0
|
| 281 |
+
self._inventory = {}
|
| 282 |
+
return obs
|
| 283 |
+
|
| 284 |
+
def _obs(self, obs):
|
| 285 |
+
obs['inventory/log'] += obs.pop('inventory/log2')
|
| 286 |
+
self._inventory = {
|
| 287 |
+
k.split('/', 1)[1]: obs[k] for k in self._inv_keys
|
| 288 |
+
if k != 'inventory/air'}
|
| 289 |
+
inventory = np.array([obs[k] for k in self._inv_keys], np.float32)
|
| 290 |
+
if self._max_inventory is None:
|
| 291 |
+
self._max_inventory = inventory
|
| 292 |
+
else:
|
| 293 |
+
self._max_inventory = np.maximum(self._max_inventory, inventory)
|
| 294 |
+
index = self._equip_enum.index(obs['equipped_items/mainhand/type'])
|
| 295 |
+
equipped = np.zeros(len(self._equip_enum), np.float32)
|
| 296 |
+
equipped[index] = 1.0
|
| 297 |
+
# player_x = obs['location_stats/xpos']
|
| 298 |
+
# player_y = obs['location_stats/ypos']
|
| 299 |
+
# player_z = obs['location_stats/zpos']
|
| 300 |
+
obs = {
|
| 301 |
+
'image': obs['pov'],
|
| 302 |
+
'inventory': inventory,
|
| 303 |
+
'inventory_max': self._max_inventory.copy(),
|
| 304 |
+
'equipped': equipped,
|
| 305 |
+
'health': np.float32(obs['life_stats/life'] / 20),
|
| 306 |
+
'hunger': np.float32(obs['life_stats/food'] / 20),
|
| 307 |
+
'breath': np.float32(obs['life_stats/air'] / 300),
|
| 308 |
+
'reward': np.float32(0.0),
|
| 309 |
+
'is_first': obs['is_first'],
|
| 310 |
+
'is_last': obs['is_last'],
|
| 311 |
+
'is_terminal': obs['is_terminal'],
|
| 312 |
+
**{f'log/{k}': np.int64(obs[k]) for k in self._inv_log_keys},
|
| 313 |
+
# 'log/player_pos': np.array([player_x, player_y, player_z], np.float32),
|
| 314 |
+
}
|
| 315 |
+
for key, value in obs.items():
|
| 316 |
+
space = self._obs_space[key]
|
| 317 |
+
if not isinstance(value, np.ndarray):
|
| 318 |
+
value = np.array(value)
|
| 319 |
+
assert value in space, (key, value, value.dtype, value.shape, space)
|
| 320 |
+
return obs
|
| 321 |
+
|
| 322 |
+
def _action(self, action):
|
| 323 |
+
if self._sticky_attack_length:
|
| 324 |
+
if action['attack']:
|
| 325 |
+
self._sticky_attack_counter = self._sticky_attack_length
|
| 326 |
+
if self._sticky_attack_counter > 0:
|
| 327 |
+
action['attack'] = 1
|
| 328 |
+
action['jump'] = 0
|
| 329 |
+
self._sticky_attack_counter -= 1
|
| 330 |
+
if self._sticky_jump_length:
|
| 331 |
+
if action['jump']:
|
| 332 |
+
self._sticky_jump_counter = self._sticky_jump_length
|
| 333 |
+
if self._sticky_jump_counter > 0:
|
| 334 |
+
action['jump'] = 1
|
| 335 |
+
action['forward'] = 1
|
| 336 |
+
self._sticky_jump_counter -= 1
|
| 337 |
+
if self._pitch_limit and action['camera'][0]:
|
| 338 |
+
lo, hi = self._pitch_limit
|
| 339 |
+
if not (lo <= self._pitch + action['camera'][0] <= hi):
|
| 340 |
+
action['camera'] = (0, action['camera'][1])
|
| 341 |
+
self._pitch += action['camera'][0]
|
| 342 |
+
return action
|
| 343 |
+
|
| 344 |
+
def _insert_defaults(self, actions):
|
| 345 |
+
actions = {name: action.copy() for name, action in actions.items()}
|
| 346 |
+
for key, default in self.NOOP.items():
|
| 347 |
+
for action in actions.values():
|
| 348 |
+
if key not in action:
|
| 349 |
+
action[key] = default
|
| 350 |
+
return actions
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
class MineRLEnv(EnvSpec):
|
| 354 |
+
|
| 355 |
+
def __init__(self, resolution=(64, 64), break_speed=50):
|
| 356 |
+
self.resolution = resolution
|
| 357 |
+
self.break_speed = break_speed
|
| 358 |
+
super().__init__(name='MineRLEnv-v1')
|
| 359 |
+
|
| 360 |
+
def create_agent_start(self):
|
| 361 |
+
return [BreakSpeedMultiplier(self.break_speed)]
|
| 362 |
+
|
| 363 |
+
def create_agent_handlers(self):
|
| 364 |
+
return []
|
| 365 |
+
|
| 366 |
+
def create_server_world_generators(self):
|
| 367 |
+
return [handlers.DefaultWorldGenerator(force_reset=True)]
|
| 368 |
+
|
| 369 |
+
def create_server_quit_producers(self):
|
| 370 |
+
return [handlers.ServerQuitWhenAnyAgentFinishes()]
|
| 371 |
+
|
| 372 |
+
def create_server_initial_conditions(self):
|
| 373 |
+
return [
|
| 374 |
+
handlers.TimeInitialCondition(
|
| 375 |
+
allow_passage_of_time=True, start_time=0),
|
| 376 |
+
handlers.SpawningInitialCondition(allow_spawning=True),
|
| 377 |
+
]
|
| 378 |
+
|
| 379 |
+
def create_observables(self):
|
| 380 |
+
return [
|
| 381 |
+
handlers.POVObservation(self.resolution),
|
| 382 |
+
handlers.FlatInventoryObservation(mc.ALL_ITEMS),
|
| 383 |
+
handlers.EquippedItemObservation(
|
| 384 |
+
mc.ALL_ITEMS, _default='air', _other='other'),
|
| 385 |
+
handlers.ObservationFromCurrentLocation(),
|
| 386 |
+
handlers.ObservationFromLifeStats(),
|
| 387 |
+
]
|
| 388 |
+
|
| 389 |
+
def create_actionables(self):
|
| 390 |
+
kw = dict(_other='none', _default='none')
|
| 391 |
+
return [
|
| 392 |
+
handlers.KeybasedCommandAction('forward', INVERSE_KEYMAP['forward']),
|
| 393 |
+
handlers.KeybasedCommandAction('back', INVERSE_KEYMAP['back']),
|
| 394 |
+
handlers.KeybasedCommandAction('left', INVERSE_KEYMAP['left']),
|
| 395 |
+
handlers.KeybasedCommandAction('right', INVERSE_KEYMAP['right']),
|
| 396 |
+
handlers.KeybasedCommandAction('jump', INVERSE_KEYMAP['jump']),
|
| 397 |
+
handlers.KeybasedCommandAction('sneak', INVERSE_KEYMAP['sneak']),
|
| 398 |
+
handlers.KeybasedCommandAction('attack', INVERSE_KEYMAP['attack']),
|
| 399 |
+
handlers.CameraAction(),
|
| 400 |
+
handlers.PlaceBlock(['none'] + mc.ALL_ITEMS, **kw),
|
| 401 |
+
handlers.EquipAction(['none'] + mc.ALL_ITEMS, **kw),
|
| 402 |
+
handlers.CraftAction(['none'] + mc.ALL_ITEMS, **kw),
|
| 403 |
+
handlers.CraftNearbyAction(['none'] + mc.ALL_ITEMS, **kw),
|
| 404 |
+
handlers.SmeltItemNearby(['none'] + mc.ALL_ITEMS, **kw),
|
| 405 |
+
]
|
| 406 |
+
|
| 407 |
+
def is_from_folder(self, folder):
|
| 408 |
+
return folder == 'none'
|
| 409 |
+
|
| 410 |
+
def get_docstring(self):
|
| 411 |
+
return ''
|
| 412 |
+
|
| 413 |
+
def determine_success_from_rewards(self, rewards):
|
| 414 |
+
return True
|
| 415 |
+
|
| 416 |
+
def create_rewardables(self):
|
| 417 |
+
return []
|
| 418 |
+
|
| 419 |
+
def create_server_decorators(self):
|
| 420 |
+
return []
|
| 421 |
+
|
| 422 |
+
def create_mission_handlers(self):
|
| 423 |
+
return []
|
| 424 |
+
|
| 425 |
+
def create_monitors(self):
|
| 426 |
+
return []
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
class BreakSpeedMultiplier(handler.Handler):
|
| 430 |
+
|
| 431 |
+
def __init__(self, multiplier=1.0):
|
| 432 |
+
self.multiplier = multiplier
|
| 433 |
+
|
| 434 |
+
def to_string(self):
|
| 435 |
+
return f'break_speed({self.multiplier})'
|
| 436 |
+
|
| 437 |
+
def xml_template(self):
|
| 438 |
+
return '<BreakSpeedMultiplier>{{multiplier}}</BreakSpeedMultiplier>'
|
models/embodied/envs/pinpad.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
|
| 3 |
+
import elements
|
| 4 |
+
import embodied
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class PinPad(embodied.Env):
|
| 9 |
+
|
| 10 |
+
COLORS = {
|
| 11 |
+
'1': (255, 0, 0),
|
| 12 |
+
'2': ( 0, 255, 0),
|
| 13 |
+
'3': ( 0, 0, 255),
|
| 14 |
+
'4': (255, 255, 0),
|
| 15 |
+
'5': (255, 0, 255),
|
| 16 |
+
'6': ( 0, 255, 255),
|
| 17 |
+
'7': (128, 0, 128),
|
| 18 |
+
'8': ( 0, 128, 128),
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
def __init__(self, task, length=10000):
|
| 22 |
+
assert length > 0
|
| 23 |
+
layout = {
|
| 24 |
+
'three': LAYOUT_THREE,
|
| 25 |
+
'four': LAYOUT_FOUR,
|
| 26 |
+
'five': LAYOUT_FIVE,
|
| 27 |
+
'six': LAYOUT_SIX,
|
| 28 |
+
'seven': LAYOUT_SEVEN,
|
| 29 |
+
'eight': LAYOUT_EIGHT,
|
| 30 |
+
}[task]
|
| 31 |
+
self.layout = np.array([list(line) for line in layout.split('\n')]).T
|
| 32 |
+
assert self.layout.shape == (16, 14), self.layout.shape
|
| 33 |
+
self.length = length
|
| 34 |
+
self.random = np.random.RandomState()
|
| 35 |
+
self.pads = set(self.layout.flatten().tolist()) - set('* #\n')
|
| 36 |
+
self.target = tuple(sorted(self.pads))
|
| 37 |
+
self.spawns = []
|
| 38 |
+
for (x, y), char in np.ndenumerate(self.layout):
|
| 39 |
+
if char != '#':
|
| 40 |
+
self.spawns.append((x, y))
|
| 41 |
+
print(f'Created PinPad env with sequence: {"->".join(self.target)}')
|
| 42 |
+
self.sequence = collections.deque(maxlen=len(self.target))
|
| 43 |
+
self.player = None
|
| 44 |
+
self.steps = None
|
| 45 |
+
self.done = None
|
| 46 |
+
self.countdown = None
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def act_space(self):
|
| 50 |
+
return {
|
| 51 |
+
'action': elements.Space(np.int32, (), 0, 5),
|
| 52 |
+
'reset': elements.Space(bool),
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
def obs_space(self):
|
| 57 |
+
return {
|
| 58 |
+
'image': elements.Space(np.uint8, (64, 64, 3)),
|
| 59 |
+
'reward': elements.Space(np.float32),
|
| 60 |
+
'is_first': elements.Space(bool),
|
| 61 |
+
'is_last': elements.Space(bool),
|
| 62 |
+
'is_terminal': elements.Space(bool),
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
def step(self, action):
|
| 66 |
+
if self.done or action['reset']:
|
| 67 |
+
self.player = self.spawns[self.random.randint(len(self.spawns))]
|
| 68 |
+
self.sequence.clear()
|
| 69 |
+
self.steps = 0
|
| 70 |
+
self.done = False
|
| 71 |
+
self.countdown = 0
|
| 72 |
+
return self._obs(reward=0.0, is_first=True)
|
| 73 |
+
if self.countdown:
|
| 74 |
+
self.countdown -= 1
|
| 75 |
+
if self.countdown == 0:
|
| 76 |
+
self.player = self.spawns[self.random.randint(len(self.spawns))]
|
| 77 |
+
self.sequence.clear()
|
| 78 |
+
reward = 0.0
|
| 79 |
+
move = [(0, 0), (0, 1), (0, -1), (1, 0), (-1, 0)][action['action']]
|
| 80 |
+
x = np.clip(self.player[0] + move[0], 0, 15)
|
| 81 |
+
y = np.clip(self.player[1] + move[1], 0, 13)
|
| 82 |
+
tile = self.layout[x][y]
|
| 83 |
+
if tile != '#':
|
| 84 |
+
self.player = (x, y)
|
| 85 |
+
if tile in self.pads:
|
| 86 |
+
if not self.sequence or self.sequence[-1] != tile:
|
| 87 |
+
self.sequence.append(tile)
|
| 88 |
+
if tuple(self.sequence) == self.target and not self.countdown:
|
| 89 |
+
reward += 10.0
|
| 90 |
+
self.countdown = 10
|
| 91 |
+
self.steps += 1
|
| 92 |
+
self.done = self.done or (self.steps >= self.length)
|
| 93 |
+
return self._obs(reward=reward, is_last=self.done)
|
| 94 |
+
|
| 95 |
+
def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
|
| 96 |
+
return dict(
|
| 97 |
+
image=self._render(),
|
| 98 |
+
reward=np.float32(reward),
|
| 99 |
+
is_first=is_first,
|
| 100 |
+
is_last=is_last,
|
| 101 |
+
is_terminal=is_terminal,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def _render(self):
|
| 105 |
+
grid = np.zeros((16, 16, 3), np.uint8) + 255
|
| 106 |
+
white = np.array([255, 255, 255])
|
| 107 |
+
if self.countdown:
|
| 108 |
+
grid[:] = (223, 255, 223)
|
| 109 |
+
current = self.layout[self.player[0]][self.player[1]]
|
| 110 |
+
for (x, y), char in np.ndenumerate(self.layout):
|
| 111 |
+
if char == '#':
|
| 112 |
+
grid[x, y] = (192, 192, 192)
|
| 113 |
+
elif char in self.pads:
|
| 114 |
+
color = np.array(self.COLORS[char])
|
| 115 |
+
color = color if char == current else (10 * color + 90 * white) / 100
|
| 116 |
+
grid[x, y] = color
|
| 117 |
+
grid[self.player] = (0, 0, 0)
|
| 118 |
+
grid[:, -2:] = (192, 192, 192)
|
| 119 |
+
for i, char in enumerate(self.sequence):
|
| 120 |
+
grid[2 * i + 1, -2] = self.COLORS[char]
|
| 121 |
+
image = np.repeat(np.repeat(grid, 4, 0), 4, 1)
|
| 122 |
+
return image.transpose((1, 0, 2))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
LAYOUT_THREE = """
|
| 126 |
+
################
|
| 127 |
+
#1111 3333#
|
| 128 |
+
#1111 3333#
|
| 129 |
+
#1111 3333#
|
| 130 |
+
#1111 3333#
|
| 131 |
+
# #
|
| 132 |
+
# #
|
| 133 |
+
# #
|
| 134 |
+
# #
|
| 135 |
+
# 2222 #
|
| 136 |
+
# 2222 #
|
| 137 |
+
# 2222 #
|
| 138 |
+
# 2222 #
|
| 139 |
+
################
|
| 140 |
+
""".strip('\n')
|
| 141 |
+
|
| 142 |
+
LAYOUT_FOUR = """
|
| 143 |
+
################
|
| 144 |
+
#1111 4444#
|
| 145 |
+
#1111 4444#
|
| 146 |
+
#1111 4444#
|
| 147 |
+
#1111 4444#
|
| 148 |
+
# #
|
| 149 |
+
# #
|
| 150 |
+
# #
|
| 151 |
+
# #
|
| 152 |
+
#3333 2222#
|
| 153 |
+
#3333 2222#
|
| 154 |
+
#3333 2222#
|
| 155 |
+
#3333 2222#
|
| 156 |
+
################
|
| 157 |
+
""".strip('\n')
|
| 158 |
+
|
| 159 |
+
LAYOUT_FIVE = """
|
| 160 |
+
################
|
| 161 |
+
# 4444#
|
| 162 |
+
#111 4444#
|
| 163 |
+
#111 4444#
|
| 164 |
+
#111 #
|
| 165 |
+
#111 555#
|
| 166 |
+
# 555#
|
| 167 |
+
# 555#
|
| 168 |
+
#333 555#
|
| 169 |
+
#333 #
|
| 170 |
+
#333 2222#
|
| 171 |
+
#333 2222#
|
| 172 |
+
# 2222#
|
| 173 |
+
################
|
| 174 |
+
""".strip('\n')
|
| 175 |
+
|
| 176 |
+
LAYOUT_SIX = """
|
| 177 |
+
################
|
| 178 |
+
#111 555#
|
| 179 |
+
#111 555#
|
| 180 |
+
#111 555#
|
| 181 |
+
# #
|
| 182 |
+
#33 66#
|
| 183 |
+
#33 66#
|
| 184 |
+
#33 66#
|
| 185 |
+
#33 66#
|
| 186 |
+
# #
|
| 187 |
+
#444 222#
|
| 188 |
+
#444 222#
|
| 189 |
+
#444 222#
|
| 190 |
+
################
|
| 191 |
+
""".strip('\n')
|
| 192 |
+
|
| 193 |
+
LAYOUT_SEVEN = """
|
| 194 |
+
################
|
| 195 |
+
#111 444#
|
| 196 |
+
#111 444#
|
| 197 |
+
#11 44#
|
| 198 |
+
# #
|
| 199 |
+
#33 55#
|
| 200 |
+
#33 55#
|
| 201 |
+
#33 55#
|
| 202 |
+
#33 55#
|
| 203 |
+
# #
|
| 204 |
+
#66 22#
|
| 205 |
+
#666 7777 222#
|
| 206 |
+
#666 7777 222#
|
| 207 |
+
################
|
| 208 |
+
""".strip('\n')
|
| 209 |
+
|
| 210 |
+
LAYOUT_EIGHT = """
|
| 211 |
+
################
|
| 212 |
+
#111 8888 444#
|
| 213 |
+
#111 8888 444#
|
| 214 |
+
#11 44#
|
| 215 |
+
# #
|
| 216 |
+
#33 55#
|
| 217 |
+
#33 55#
|
| 218 |
+
#33 55#
|
| 219 |
+
#33 55#
|
| 220 |
+
# #
|
| 221 |
+
#66 22#
|
| 222 |
+
#666 7777 222#
|
| 223 |
+
#666 7777 222#
|
| 224 |
+
################
|
| 225 |
+
""".strip('\n')
|
models/embodied/envs/procgen.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import elements
|
| 2 |
+
import embodied
|
| 3 |
+
import numpy as np
|
| 4 |
+
import procgen # noqa
|
| 5 |
+
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ProcGen(embodied.Env):
|
| 10 |
+
|
| 11 |
+
def __init__(self, task, size=(64, 64), resize='pillow', **kwargs):
|
| 12 |
+
assert resize in ('opencv', 'pillow'), resize
|
| 13 |
+
from . import from_gym
|
| 14 |
+
self.size = size
|
| 15 |
+
self.resize = resize
|
| 16 |
+
if self.size == (64, 64):
|
| 17 |
+
self.source = 'step'
|
| 18 |
+
else:
|
| 19 |
+
self.source = 'info'
|
| 20 |
+
if self.source == 'info':
|
| 21 |
+
kwargs['render_mode'] = 'rgb_array'
|
| 22 |
+
try:
|
| 23 |
+
self.env = from_gym.FromGym(f'procgen:procgen-{task}-v0', **kwargs)
|
| 24 |
+
except Exception:
|
| 25 |
+
self.env = from_gym.FromGym(f'procgen-{task}-v0', **kwargs)
|
| 26 |
+
if self.source == 'info':
|
| 27 |
+
self.inner = self.env
|
| 28 |
+
while not hasattr(self.inner, 'get_info'):
|
| 29 |
+
self.inner = self.inner.env
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def obs_space(self):
|
| 33 |
+
spaces = self.env.obs_space.copy()
|
| 34 |
+
spaces['image'] = elements.Space(np.uint8, (*self.size, 3))
|
| 35 |
+
return spaces
|
| 36 |
+
|
| 37 |
+
@property
|
| 38 |
+
def act_space(self):
|
| 39 |
+
return self.env.act_space
|
| 40 |
+
|
| 41 |
+
def step(self, action):
|
| 42 |
+
obs = self.env.step(action)
|
| 43 |
+
if self.source == 'step':
|
| 44 |
+
pass
|
| 45 |
+
elif self.source == 'info':
|
| 46 |
+
info = self.inner.get_info()
|
| 47 |
+
assert len(info) == 1
|
| 48 |
+
obs['image'] = self._resize(info[0]['rgb'], self.size, self.resize)
|
| 49 |
+
elif self.source == 'render':
|
| 50 |
+
obs['image'] = self._resize(
|
| 51 |
+
self.env.env.render(mode='rgb_array'), self.size, self.resize)
|
| 52 |
+
else:
|
| 53 |
+
raise NotImplementedError(self.source)
|
| 54 |
+
return obs
|
| 55 |
+
|
| 56 |
+
def _resize(self, image, size, method):
|
| 57 |
+
if method == 'opencv':
|
| 58 |
+
import cv2
|
| 59 |
+
image = cv2.resize(image, size, interpolation=cv2.INTER_AREA)
|
| 60 |
+
return image
|
| 61 |
+
elif method == 'pillow':
|
| 62 |
+
image = Image.fromarray(image)
|
| 63 |
+
image = image.resize((size[1], size[0]), Image.BILINEAR)
|
| 64 |
+
image = np.array(image)
|
| 65 |
+
return image
|
| 66 |
+
else:
|
| 67 |
+
raise NotImplementedError(method)
|
models/embodied/jax/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .agent import Agent
|
| 2 |
+
|
| 3 |
+
from .heads import DictHead
|
| 4 |
+
from .heads import Head
|
| 5 |
+
from .heads import MLPHead
|
| 6 |
+
|
| 7 |
+
from .utils import LayerScan
|
| 8 |
+
from .utils import Normalize
|
| 9 |
+
from .utils import SlowModel
|
| 10 |
+
|
| 11 |
+
from .opt import Optimizer
|
| 12 |
+
|
| 13 |
+
from . import nets
|
| 14 |
+
from . import outs
|
| 15 |
+
from . import opt
|
models/embodied/jax/agent.py
ADDED
|
@@ -0,0 +1,502 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import dataclasses
|
| 3 |
+
import re
|
| 4 |
+
import threading
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
import chex
|
| 8 |
+
import elements
|
| 9 |
+
import embodied
|
| 10 |
+
import jax
|
| 11 |
+
import jax.experimental.multihost_utils
|
| 12 |
+
import jax.numpy as jnp
|
| 13 |
+
import ninjax as nj
|
| 14 |
+
import numpy as np
|
| 15 |
+
P = jax.sharding.PartitionSpec
|
| 16 |
+
|
| 17 |
+
from . import internal
|
| 18 |
+
from . import transform
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclasses.dataclass
|
| 22 |
+
class Options:
|
| 23 |
+
|
| 24 |
+
policy_devices: tuple = (0,)
|
| 25 |
+
train_devices: tuple = (0,)
|
| 26 |
+
policy_mesh: str = '-1,1,1'
|
| 27 |
+
train_mesh: str = '-1,1,1'
|
| 28 |
+
profiler: bool = True
|
| 29 |
+
expect_devices: int = 0
|
| 30 |
+
use_shardmap: bool = False
|
| 31 |
+
enable_policy: bool = True
|
| 32 |
+
ckpt_chunksize: int = -1
|
| 33 |
+
precompile: bool = True
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Agent(embodied.Agent):
|
| 37 |
+
|
| 38 |
+
def __new__(subcls, obs_space, act_space, config):
|
| 39 |
+
keys = Options.__dataclass_fields__
|
| 40 |
+
options = {k: v for k, v in config.jax.items() if k in keys}
|
| 41 |
+
setup = {k: v for k, v in config.jax.items() if k not in keys}
|
| 42 |
+
jaxcfg = Options(**options)
|
| 43 |
+
internal.setup(**setup)
|
| 44 |
+
model = super().__new__(subcls)
|
| 45 |
+
model.__init__(obs_space, act_space, config)
|
| 46 |
+
outer = super().__new__(Agent)
|
| 47 |
+
outer.__init__(model, obs_space, act_space, config, jaxcfg)
|
| 48 |
+
return outer
|
| 49 |
+
|
| 50 |
+
def __init__(self, model, obs_space, act_space, config, jaxcfg):
|
| 51 |
+
assert not any(k.startswith('log/') for k in obs_space)
|
| 52 |
+
assert 'reset' not in act_space
|
| 53 |
+
|
| 54 |
+
self.model = model
|
| 55 |
+
self.obs_space = obs_space
|
| 56 |
+
self.act_space = act_space
|
| 57 |
+
self.config = config
|
| 58 |
+
self.jaxcfg = jaxcfg
|
| 59 |
+
self.logdir = elements.Path(config.logdir)
|
| 60 |
+
|
| 61 |
+
ext_space = self.model.ext_space # Extra inputs to train and report.
|
| 62 |
+
elements.print('Observations', color='cyan')
|
| 63 |
+
[elements.print(f' {k:<16} {v}') for k, v in obs_space.items()]
|
| 64 |
+
elements.print('Actions', color='cyan')
|
| 65 |
+
[elements.print(f' {k:<16} {v}') for k, v in act_space.items()]
|
| 66 |
+
elements.print('Extras', color='cyan')
|
| 67 |
+
[elements.print(f' {k:<16} {v}') for k, v in ext_space.items()]
|
| 68 |
+
self.spaces = dict(**obs_space, **act_space, **ext_space)
|
| 69 |
+
assert not (obs_space.keys() & ext_space.keys()), (obs_space, ext_space)
|
| 70 |
+
assert not (act_space.keys() & ext_space.keys()), (act_space, ext_space)
|
| 71 |
+
|
| 72 |
+
available = jax.devices()
|
| 73 |
+
elements.print(f'JAX devices ({jax.device_count()}):', available)
|
| 74 |
+
if self.jaxcfg.expect_devices > 0:
|
| 75 |
+
if len(available) != self.jaxcfg.expect_devices:
|
| 76 |
+
print('ALERT: Wrong number of devices')
|
| 77 |
+
while True:
|
| 78 |
+
time.sleep(1)
|
| 79 |
+
assert len(available) == jax.process_count() * jax.local_device_count()
|
| 80 |
+
flatten = lambda x: x.reshape(-1).tolist()
|
| 81 |
+
devices = np.array(available).reshape(
|
| 82 |
+
jax.process_count(), jax.local_device_count())
|
| 83 |
+
self.policy_devices = flatten(devices[:, self.jaxcfg.policy_devices])
|
| 84 |
+
self.train_devices = flatten(devices[:, self.jaxcfg.train_devices])
|
| 85 |
+
print('Policy devices:', ', '.join([str(x) for x in self.policy_devices]))
|
| 86 |
+
print('Train devices: ', ', '.join([str(x) for x in self.train_devices]))
|
| 87 |
+
|
| 88 |
+
# d = DP, f = FSDP, t = TP
|
| 89 |
+
self.policy_mesh = internal.mesh(
|
| 90 |
+
self.policy_devices, self.jaxcfg.policy_mesh, ('d', 'f', 't'))
|
| 91 |
+
self.policy_sharded = jax.sharding.NamedSharding(
|
| 92 |
+
self.policy_mesh, P(('d', 'f')))
|
| 93 |
+
self.policy_mirrored = jax.sharding.NamedSharding(self.policy_mesh, P())
|
| 94 |
+
self.train_mesh = internal.mesh(
|
| 95 |
+
self.train_devices, self.jaxcfg.train_mesh, ('d', 'f', 't'))
|
| 96 |
+
self.train_sharded = jax.sharding.NamedSharding(
|
| 97 |
+
self.train_mesh, P(('d', 'f')))
|
| 98 |
+
self.train_mirrored = jax.sharding.NamedSharding(self.train_mesh, P())
|
| 99 |
+
if self.train_mesh.shape['t'] > len(self.jaxcfg.train_devices) or (
|
| 100 |
+
self.policy_mesh.shape['t'] > len(self.jaxcfg.policy_devices)):
|
| 101 |
+
raise NotImplementedError('Inter-node TP is not supported!')
|
| 102 |
+
if self.jaxcfg.use_shardmap:
|
| 103 |
+
assert self.train_mesh.shape['d'] == self.train_mesh.size
|
| 104 |
+
assert self.policy_mesh.shape['d'] == self.policy_mesh.size
|
| 105 |
+
|
| 106 |
+
# self.train_node_mesh = internal.node_mesh(self.train_mesh, mp_dims=('t',))
|
| 107 |
+
# print('Train Node mesh:',self.train_node_mesh)
|
| 108 |
+
|
| 109 |
+
self.partition_rules = getattr(
|
| 110 |
+
self.model, 'partition_rules', ([('.*', P())], []))
|
| 111 |
+
elements.print('Initializing parameters...', color='yellow')
|
| 112 |
+
with self.train_mesh:
|
| 113 |
+
self.params, self.train_params_sharding = self._init_params()
|
| 114 |
+
elements.print('Done initializing!', color='yellow')
|
| 115 |
+
pattern = re.compile(self.model.policy_keys)
|
| 116 |
+
self.policy_keys = [k for k in self.params.keys() if pattern.search(k)]
|
| 117 |
+
assert self.policy_keys, (list(self.params.keys()), self.model.policy_keys)
|
| 118 |
+
|
| 119 |
+
self.policy_params_sharding = {
|
| 120 |
+
k: jax.sharding.NamedSharding(self.policy_mesh, v.spec)
|
| 121 |
+
for k, v in self.train_params_sharding.items()
|
| 122 |
+
if k in self.policy_keys}
|
| 123 |
+
|
| 124 |
+
shared_kwargs = {'use_shardmap': jaxcfg.use_shardmap}
|
| 125 |
+
tm, ts = self.train_mirrored, self.train_sharded
|
| 126 |
+
pm, ps = self.policy_mirrored, self.policy_sharded
|
| 127 |
+
tp, pp = self.train_params_sharding, self.policy_params_sharding
|
| 128 |
+
_, ar = self.partition_rules
|
| 129 |
+
self._init_train = transform.apply(
|
| 130 |
+
nj.pure(self.model.init_train), self.train_mesh,
|
| 131 |
+
(tp, tm), (ts,), ar, single_output=True, static_argnums=(2,),
|
| 132 |
+
**shared_kwargs)
|
| 133 |
+
self._init_report = transform.apply(
|
| 134 |
+
nj.pure(self.model.init_report), self.train_mesh,
|
| 135 |
+
(tp, tm), (ts,), ar, single_output=True, static_argnums=(2,),
|
| 136 |
+
**shared_kwargs)
|
| 137 |
+
self._init_policy = transform.apply(
|
| 138 |
+
nj.pure(self.model.init_policy), self.policy_mesh,
|
| 139 |
+
(pp, pm), (ps,), ar, single_output=True, static_argnums=(2,),
|
| 140 |
+
**shared_kwargs)
|
| 141 |
+
allo_sharding = {k: v for k, v in tp.items() if k in self.policy_keys}
|
| 142 |
+
dona_sharding = {k: v for k, v in tp.items() if k not in self.policy_keys}
|
| 143 |
+
self._train = transform.apply(
|
| 144 |
+
nj.pure(self.model.train), self.train_mesh,
|
| 145 |
+
(dona_sharding, allo_sharding, tm, ts, ts), (tp, ts, ts, tm), ar,
|
| 146 |
+
return_params=True, donate_params=True, first_outnums=(3,),
|
| 147 |
+
**shared_kwargs)
|
| 148 |
+
self._report = transform.apply(
|
| 149 |
+
nj.pure(self.model.report), self.train_mesh,
|
| 150 |
+
(tp, tm, ts, ts), (ts, tm), ar,
|
| 151 |
+
first_outnums=(1,), **shared_kwargs)
|
| 152 |
+
self._policy = transform.apply(
|
| 153 |
+
nj.pure(self.model.policy), self.policy_mesh,
|
| 154 |
+
(pp, pm, ps, ps), (ps, ps, ps), ar,
|
| 155 |
+
static_argnums=(4,), **shared_kwargs)
|
| 156 |
+
|
| 157 |
+
self.policy_lock = threading.Lock()
|
| 158 |
+
self.train_lock = threading.Lock()
|
| 159 |
+
self.n_updates = elements.Counter()
|
| 160 |
+
self.n_batches = elements.Counter()
|
| 161 |
+
self.n_actions = elements.Counter()
|
| 162 |
+
|
| 163 |
+
self.pending_outs = None
|
| 164 |
+
self.pending_mets = None
|
| 165 |
+
self.pending_sync = None
|
| 166 |
+
|
| 167 |
+
if self.jaxcfg.enable_policy:
|
| 168 |
+
policy_params = {
|
| 169 |
+
k: self.params[k].copy() for k in self.policy_keys}
|
| 170 |
+
self.policy_params = internal.move(
|
| 171 |
+
policy_params, self.policy_params_sharding)
|
| 172 |
+
|
| 173 |
+
self._split = jax.jit(
|
| 174 |
+
lambda xs: jax.tree.map(lambda x: list(x), xs),
|
| 175 |
+
internal.local_sharding(self.policy_sharded),
|
| 176 |
+
internal.local_sharding(self.policy_mirrored))
|
| 177 |
+
self._stack = jax.jit(
|
| 178 |
+
lambda xs: jax.tree.map(
|
| 179 |
+
jnp.stack, xs, is_leaf=lambda x: isinstance(x, list)),
|
| 180 |
+
internal.local_sharding(self.policy_mirrored),
|
| 181 |
+
internal.local_sharding(self.policy_sharded))
|
| 182 |
+
|
| 183 |
+
self._ckpt_groups = internal.grouped_ckpt_fns(
|
| 184 |
+
self.params, self.jaxcfg.ckpt_chunksize)
|
| 185 |
+
if self.jaxcfg.precompile:
|
| 186 |
+
elements.print('Compiling train and report...', color='yellow')
|
| 187 |
+
with self.train_mesh:
|
| 188 |
+
self._compile_train()
|
| 189 |
+
print('Train cost analysis:')
|
| 190 |
+
print(self._format_jit_stats(self._train))
|
| 191 |
+
self._compile_report()
|
| 192 |
+
print('Report cost analysis:')
|
| 193 |
+
print(self._format_jit_stats(self._report))
|
| 194 |
+
elements.print('Done compiling!', color='yellow')
|
| 195 |
+
|
| 196 |
+
def init_policy(self, batch_size):
|
| 197 |
+
if not self.jaxcfg.enable_policy:
|
| 198 |
+
raise Exception('Policy not available when enable_policy=False')
|
| 199 |
+
batch_size = batch_size * jax.process_count()
|
| 200 |
+
if self.jaxcfg.use_shardmap:
|
| 201 |
+
batch_size = batch_size // self.policy_mesh.size
|
| 202 |
+
return self._split(internal.to_local(self._init_policy(
|
| 203 |
+
self.policy_params, self._seeds(0, self.policy_mirrored), batch_size)))
|
| 204 |
+
|
| 205 |
+
def init_train(self, batch_size):
|
| 206 |
+
batch_size = batch_size * jax.process_count()
|
| 207 |
+
if self.jaxcfg.use_shardmap:
|
| 208 |
+
batch_size = batch_size // self.train_mesh.size
|
| 209 |
+
return self._init_train(
|
| 210 |
+
self.params, self._seeds(0, self.train_mirrored), batch_size)
|
| 211 |
+
|
| 212 |
+
def init_report(self, batch_size):
|
| 213 |
+
batch_size = batch_size * jax.process_count()
|
| 214 |
+
if self.jaxcfg.use_shardmap:
|
| 215 |
+
batch_size = batch_size // self.train_mesh.size
|
| 216 |
+
return self._init_report(
|
| 217 |
+
self.params, self._seeds(0, self.train_mirrored), batch_size)
|
| 218 |
+
|
| 219 |
+
@elements.timer.section('jaxagent_policy')
|
| 220 |
+
def policy(self, carry, obs, mode='train'):
|
| 221 |
+
if not self.jaxcfg.enable_policy:
|
| 222 |
+
raise Exception('Policy not available when enable_policy=False')
|
| 223 |
+
assert not any(k.startswith('log/') for k in obs), obs.keys()
|
| 224 |
+
assert sorted(obs.keys()) == sorted(self.obs_space.keys()), (
|
| 225 |
+
sorted(obs.keys()), sorted(self.obs_space.keys()))
|
| 226 |
+
for key, space in self.obs_space.items():
|
| 227 |
+
assert np.isfinite(obs[key]).all(), (obs[key], key, space)
|
| 228 |
+
|
| 229 |
+
with self.policy_lock:
|
| 230 |
+
obs = internal.device_put(obs, self.policy_sharded)
|
| 231 |
+
with self.n_actions.lock:
|
| 232 |
+
counter = self.n_actions.value
|
| 233 |
+
self.n_actions.value += 1
|
| 234 |
+
seed = self._seeds(counter, self.policy_mirrored)
|
| 235 |
+
carry = internal.to_global(self._stack(carry), self.policy_sharded)
|
| 236 |
+
|
| 237 |
+
with self.policy_lock:
|
| 238 |
+
carry, acts, outs = self._policy(
|
| 239 |
+
self.policy_params, seed, carry, obs, mode)
|
| 240 |
+
|
| 241 |
+
if self.jaxcfg.enable_policy:
|
| 242 |
+
with self.policy_lock:
|
| 243 |
+
if self.pending_sync:
|
| 244 |
+
old = self.policy_params
|
| 245 |
+
self.policy_params = self.pending_sync
|
| 246 |
+
jax.tree.map(lambda x: x.delete(), old)
|
| 247 |
+
self.pending_sync = None
|
| 248 |
+
|
| 249 |
+
acts, outs = self._take_outs(internal.fetch_async((acts, outs)))
|
| 250 |
+
carry = self._split(internal.to_local(carry))
|
| 251 |
+
|
| 252 |
+
finite = outs.pop('finite', {})
|
| 253 |
+
for key, fin in finite.items():
|
| 254 |
+
assert all(x.all() for x in jax.tree.leaves(fin)), str(finite)
|
| 255 |
+
for key, space in self.act_space.items():
|
| 256 |
+
if space.discrete:
|
| 257 |
+
assert (acts[key] >= 0).all(), (acts[key], key, space)
|
| 258 |
+
else:
|
| 259 |
+
assert np.isfinite(acts[key]).all(), (acts[key], key, space)
|
| 260 |
+
|
| 261 |
+
return carry, acts, outs
|
| 262 |
+
|
| 263 |
+
@elements.timer.section('jaxagent_train')
|
| 264 |
+
def train(self, carry, data):
|
| 265 |
+
seed = data.pop('seed')
|
| 266 |
+
assert sorted(data.keys()) == sorted(self.spaces.keys()), (
|
| 267 |
+
sorted(data.keys()), sorted(self.spaces.keys()))
|
| 268 |
+
allo = {k: v for k, v in self.params.items() if k in self.policy_keys}
|
| 269 |
+
dona = {k: v for k, v in self.params.items() if k not in self.policy_keys}
|
| 270 |
+
with self.train_lock:
|
| 271 |
+
with elements.timer.section('jit_train'):
|
| 272 |
+
with jax.profiler.StepTraceAnnotation(
|
| 273 |
+
'train', step_num=int(self.n_updates)):
|
| 274 |
+
self.params, carry, outs, mets = self._train(
|
| 275 |
+
dona, allo, seed, carry, data)
|
| 276 |
+
self.n_updates.increment()
|
| 277 |
+
|
| 278 |
+
if self.jaxcfg.enable_policy:
|
| 279 |
+
if not self.pending_sync:
|
| 280 |
+
self.pending_sync = internal.move(
|
| 281 |
+
{k: allo[k] for k in self.policy_keys},
|
| 282 |
+
self.policy_params_sharding)
|
| 283 |
+
else:
|
| 284 |
+
jax.tree.map(lambda x: x.delete(), allo)
|
| 285 |
+
|
| 286 |
+
return_outs = {}
|
| 287 |
+
if self.pending_outs:
|
| 288 |
+
return_outs = self._take_outs(self.pending_outs)
|
| 289 |
+
self.pending_outs = internal.fetch_async(outs)
|
| 290 |
+
|
| 291 |
+
return_mets = {}
|
| 292 |
+
if self.pending_mets:
|
| 293 |
+
return_mets = self._take_outs(self.pending_mets)
|
| 294 |
+
self.pending_mets = internal.fetch_async(mets)
|
| 295 |
+
|
| 296 |
+
if self.jaxcfg.profiler:
|
| 297 |
+
outdir, copyto = self.logdir, None
|
| 298 |
+
if str(outdir).startswith(('gs://', '/gcs/', '/cns/')):
|
| 299 |
+
copyto = outdir
|
| 300 |
+
outdir = elements.Path('/tmp/profiler')
|
| 301 |
+
outdir.mkdir()
|
| 302 |
+
if self.n_updates == 100:
|
| 303 |
+
elements.print(f'Start JAX profiler: {str(outdir)}', color='yellow')
|
| 304 |
+
jax.profiler.start_trace(str(outdir))
|
| 305 |
+
if self.n_updates == 120:
|
| 306 |
+
elements.print('Stop JAX profiler', color='yellow')
|
| 307 |
+
jax.profiler.stop_trace()
|
| 308 |
+
if copyto:
|
| 309 |
+
for subdir in elements.Path(outdir).glob('*'):
|
| 310 |
+
subdir.copy(copyto, recursive=True)
|
| 311 |
+
print(f'Copied profiler result {outdir} to {copyto}')
|
| 312 |
+
|
| 313 |
+
return carry, return_outs, return_mets
|
| 314 |
+
|
| 315 |
+
@elements.timer.section('jaxagent_report')
|
| 316 |
+
def report(self, carry, data):
|
| 317 |
+
seed = data.pop('seed')
|
| 318 |
+
assert sorted(data.keys()) == sorted(self.spaces.keys()), (
|
| 319 |
+
sorted(data.keys()), sorted(self.spaces.keys()))
|
| 320 |
+
with self.train_lock:
|
| 321 |
+
carry, mets = self._report(self.params, seed, carry, data)
|
| 322 |
+
mets = self._take_outs(internal.fetch_async(mets))
|
| 323 |
+
mets['params/summary'] = self._summary()
|
| 324 |
+
return carry, mets
|
| 325 |
+
|
| 326 |
+
def stream(self, st):
|
| 327 |
+
def fn(data):
|
| 328 |
+
for key, value in data.items():
|
| 329 |
+
if np.issubdtype(value.dtype, np.floating):
|
| 330 |
+
assert not np.isnan(value).any(), (key, value)
|
| 331 |
+
data = internal.device_put(data, self.train_sharded)
|
| 332 |
+
with self.n_batches.lock:
|
| 333 |
+
counter = self.n_batches.value
|
| 334 |
+
self.n_batches.value += 1
|
| 335 |
+
seed = self._seeds(counter, self.train_mirrored)
|
| 336 |
+
return {**data, 'seed': seed}
|
| 337 |
+
return embodied.streams.Prefetch(st, fn)
|
| 338 |
+
|
| 339 |
+
@elements.timer.section('jaxagent_save')
|
| 340 |
+
def save(self):
|
| 341 |
+
with self.train_lock:
|
| 342 |
+
params = {}
|
| 343 |
+
for keys, gather_fn, _ in self._ckpt_groups:
|
| 344 |
+
group = {k: self.params[k] for k in keys}
|
| 345 |
+
params.update(jax.device_get(gather_fn(group)))
|
| 346 |
+
assert params
|
| 347 |
+
counters = {
|
| 348 |
+
'updates': int(self.n_updates),
|
| 349 |
+
'batches': int(self.n_batches),
|
| 350 |
+
'actions': int(self.n_actions),
|
| 351 |
+
}
|
| 352 |
+
data = {'params': params, 'counters': counters}
|
| 353 |
+
return data
|
| 354 |
+
|
| 355 |
+
@elements.timer.section('jaxagent_load')
|
| 356 |
+
def load(self, data, regex=None):
|
| 357 |
+
params = data['params']
|
| 358 |
+
assert params
|
| 359 |
+
|
| 360 |
+
with contextlib.ExitStack() as stack:
|
| 361 |
+
stack.enter_context(self.train_lock)
|
| 362 |
+
stack.enter_context(self.policy_lock)
|
| 363 |
+
|
| 364 |
+
with self.n_updates.lock:
|
| 365 |
+
self.n_updates.value = int(data['counters']['updates'])
|
| 366 |
+
with self.n_batches.lock:
|
| 367 |
+
# We restore n_batches to the checkpointed update counter, so the
|
| 368 |
+
# prefetched batches that were not trained on get repeated.
|
| 369 |
+
self.n_batches.value = int(data['counters']['updates'])
|
| 370 |
+
with self.n_actions.lock:
|
| 371 |
+
self.n_actions.value = int(data['counters']['actions'])
|
| 372 |
+
|
| 373 |
+
if regex:
|
| 374 |
+
params = {k: v for k, v in params.items() if re.match(regex, k)}
|
| 375 |
+
keys = params.keys()
|
| 376 |
+
jax.tree.map(lambda x: x.delete(), [self.params[k] for k in keys])
|
| 377 |
+
params = internal.ckpt_fn({k: self.params[k] for k in keys})[1](
|
| 378 |
+
internal.device_put(params, self.train_mirrored))
|
| 379 |
+
print('Loaded pretrained checkpoint with keys:', list(params.keys()))
|
| 380 |
+
self.params.update(params)
|
| 381 |
+
else:
|
| 382 |
+
chex.assert_trees_all_equal_shapes(self.params, params)
|
| 383 |
+
jax.tree.map(lambda x: x.delete(), self.params)
|
| 384 |
+
|
| 385 |
+
loaded = {}
|
| 386 |
+
for keys, _, shard_fn in self._ckpt_groups:
|
| 387 |
+
group = {k: params[k] for k in keys}
|
| 388 |
+
group = shard_fn(internal.device_put(group, self.train_mirrored))
|
| 389 |
+
loaded.update(group)
|
| 390 |
+
self.params = loaded
|
| 391 |
+
|
| 392 |
+
if self.jaxcfg.enable_policy:
|
| 393 |
+
jax.tree.map(lambda x: x.delete(), self.policy_params)
|
| 394 |
+
policy_params = {
|
| 395 |
+
k: self.params[k].copy() for k in self.policy_keys}
|
| 396 |
+
self.policy_params = internal.move(
|
| 397 |
+
policy_params, self.policy_params_sharding)
|
| 398 |
+
|
| 399 |
+
def _take_outs(self, outs):
|
| 400 |
+
outs = jax.tree.map(lambda x: x.__array__(), outs)
|
| 401 |
+
outs = jax.tree.map(
|
| 402 |
+
lambda x: np.float32(x) if x.dtype == jnp.bfloat16 else x, outs)
|
| 403 |
+
return outs
|
| 404 |
+
|
| 405 |
+
def _seeds(self, counter, sharding):
|
| 406 |
+
rng = np.random.default_rng(seed=[self.config.seed, int(counter)])
|
| 407 |
+
seeds = rng.integers(0, np.iinfo(np.uint32).max, (2,), np.uint32)
|
| 408 |
+
return internal.device_put(seeds, sharding)
|
| 409 |
+
|
| 410 |
+
def _init_params(self):
|
| 411 |
+
B = min(self.config.batch_size, len(self.jaxcfg.train_devices))
|
| 412 |
+
GB = B * jax.process_count()
|
| 413 |
+
T = self.config.batch_length
|
| 414 |
+
C = self.config.replay_context
|
| 415 |
+
tm, ts = self.train_mirrored, self.train_sharded
|
| 416 |
+
us = self.jaxcfg.use_shardmap
|
| 417 |
+
|
| 418 |
+
with jax._src.config.explicit_device_get_scope():
|
| 419 |
+
seed = jax.device_put(np.array([self.config.seed, 0], np.uint32), tm)
|
| 420 |
+
data = internal.device_put(self._zeros(self.spaces, (B, T + C)), ts)
|
| 421 |
+
pr, ar = self.partition_rules
|
| 422 |
+
|
| 423 |
+
params, params_sharding = transform.init(
|
| 424 |
+
self.model.init_train, self.train_mesh,
|
| 425 |
+
({}, self.train_mirrored),
|
| 426 |
+
param_partition_rules=pr,
|
| 427 |
+
act_partition_rules=ar,
|
| 428 |
+
static_argnums=(2,),
|
| 429 |
+
dummy_inputs=({}, seed, GB),
|
| 430 |
+
print_partition=(len(pr) >= 2),
|
| 431 |
+
)
|
| 432 |
+
carry = transform.apply(
|
| 433 |
+
nj.pure(self.model.init_train), self.train_mesh,
|
| 434 |
+
(params_sharding, tm), (ts,), single_output=True,
|
| 435 |
+
static_argnums=(2,), use_shardmap=us)(
|
| 436 |
+
params, seed, GB // self.train_mesh.size if us else GB)
|
| 437 |
+
params, params_sharding = transform.init(
|
| 438 |
+
self.model.train, self.train_mesh,
|
| 439 |
+
(params_sharding, tm, ts, ts),
|
| 440 |
+
param_partition_rules=pr,
|
| 441 |
+
act_partition_rules=ar,
|
| 442 |
+
dummy_inputs=(params, seed, carry, data),
|
| 443 |
+
print_partition=(len(pr) >= 2),
|
| 444 |
+
)
|
| 445 |
+
return params, params_sharding
|
| 446 |
+
|
| 447 |
+
def _compile_train(self):
|
| 448 |
+
B = self.config.batch_size
|
| 449 |
+
T = self.config.batch_length
|
| 450 |
+
C = self.config.replay_context
|
| 451 |
+
data = self._zeros(self.spaces, (B, T + C))
|
| 452 |
+
data = internal.device_put(data, self.train_sharded)
|
| 453 |
+
seed = self._seeds(0, self.train_mirrored)
|
| 454 |
+
carry = self.init_train(B)
|
| 455 |
+
allo = {k: v for k, v in self.params.items() if k in self.policy_keys}
|
| 456 |
+
dona = {k: v for k, v in self.params.items() if k not in self.policy_keys}
|
| 457 |
+
self._train = self._train.lower(dona, allo, seed, carry, data).compile()
|
| 458 |
+
|
| 459 |
+
def _compile_report(self):
|
| 460 |
+
B = self.config.batch_size
|
| 461 |
+
T = self.config.report_length
|
| 462 |
+
C = self.config.replay_context
|
| 463 |
+
data = self._zeros(self.spaces, (B, T + C))
|
| 464 |
+
data = internal.device_put(data, self.train_sharded)
|
| 465 |
+
seed = self._seeds(0, self.train_mirrored)
|
| 466 |
+
carry = self.init_report(B)
|
| 467 |
+
self._report = self._report.lower(self.params, seed, carry, data).compile()
|
| 468 |
+
|
| 469 |
+
def _summary(self):
|
| 470 |
+
lines = []
|
| 471 |
+
for k, v in self.params.items():
|
| 472 |
+
lines.append(f'{k:<40} {v.dtype} {v.size} {v.shape}')
|
| 473 |
+
return '\n'.join(lines)
|
| 474 |
+
|
| 475 |
+
def _zeros(self, spaces, batch_shape):
|
| 476 |
+
data = {k: np.zeros(v.shape, v.dtype) for k, v in spaces.items()}
|
| 477 |
+
for dim in reversed(batch_shape):
|
| 478 |
+
data = {k: np.repeat(v[None], dim, axis=0) for k, v in data.items()}
|
| 479 |
+
return data
|
| 480 |
+
|
| 481 |
+
def _format_jit_stats(self, compiled):
|
| 482 |
+
try:
|
| 483 |
+
cost = compiled.cost_analysis()
|
| 484 |
+
mem = compiled.memory_analysis()
|
| 485 |
+
lines = []
|
| 486 |
+
lines.append(f"FLOPS: {cost[0]['flops']:.1e}")
|
| 487 |
+
lines.append(f"Memory (temp): {mem.temp_size_in_bytes:.1e}")
|
| 488 |
+
lines.append(f"Memory (inputs): {mem.argument_size_in_bytes:.1e}")
|
| 489 |
+
lines.append(f"Memory (outputs): {mem.output_size_in_bytes:.1e}")
|
| 490 |
+
lines.append(f"Memory (code): {mem.generated_code_size_in_bytes:.1e}")
|
| 491 |
+
return ''.join(f' {line}\n' for line in lines)
|
| 492 |
+
except (TypeError, AttributeError, KeyError):
|
| 493 |
+
return 'No available'
|
| 494 |
+
|
| 495 |
+
def init(fun, **jit_kwargs):
|
| 496 |
+
if not getattr(fun, '_is_pure', False):
|
| 497 |
+
fun = nj.pure(fun)
|
| 498 |
+
def wrapper(*args, **kwargs):
|
| 499 |
+
state, out = fun(*args, create=True, modify=True, ignore=True, **kwargs)
|
| 500 |
+
del out
|
| 501 |
+
return state, ()
|
| 502 |
+
return wrapper
|
models/embodied/jax/heads.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable
|
| 2 |
+
|
| 3 |
+
import elements
|
| 4 |
+
import jax
|
| 5 |
+
import jax.numpy as jnp
|
| 6 |
+
import ninjax as nj
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from . import nets
|
| 10 |
+
from . import outs
|
| 11 |
+
|
| 12 |
+
i32 = jnp.int32
|
| 13 |
+
f32 = jnp.float32
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MLPHead(nj.Module):
|
| 17 |
+
|
| 18 |
+
units: int = 1024
|
| 19 |
+
layers: int = 5
|
| 20 |
+
act: str = 'silu'
|
| 21 |
+
norm: str = 'rms'
|
| 22 |
+
bias: bool = True
|
| 23 |
+
winit: str | Callable = nets.Initializer('trunc_normal')
|
| 24 |
+
binit: str | Callable = nets.Initializer('zeros')
|
| 25 |
+
|
| 26 |
+
def __init__(self, space, output, **hkw):
|
| 27 |
+
shared = dict(bias=self.bias, winit=self.winit, binit=self.binit)
|
| 28 |
+
mkw = dict(**shared, act=self.act, norm=self.norm)
|
| 29 |
+
hkw = dict(**shared, **hkw)
|
| 30 |
+
self.mlp = nets.MLP(self.layers, self.units, **mkw, name='mlp')
|
| 31 |
+
if isinstance(space, dict):
|
| 32 |
+
self.head = DictHead(space, output, **hkw, name='head')
|
| 33 |
+
else:
|
| 34 |
+
self.head = Head(space, output, **hkw, name='head')
|
| 35 |
+
|
| 36 |
+
def __call__(self, x, bdims):
|
| 37 |
+
bshape = jax.tree.leaves(x)[0].shape[:bdims]
|
| 38 |
+
x = x.reshape((*bshape, -1))
|
| 39 |
+
x = self.mlp(x)
|
| 40 |
+
x = self.head(x)
|
| 41 |
+
return x
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class DictHead(nj.Module):
|
| 45 |
+
|
| 46 |
+
def __init__(self, spaces, outputs, **kw):
|
| 47 |
+
assert spaces, spaces
|
| 48 |
+
if not isinstance(spaces, dict):
|
| 49 |
+
spaces = {'output': spaces}
|
| 50 |
+
if not isinstance(outputs, dict):
|
| 51 |
+
outputs = {'output': outputs}
|
| 52 |
+
assert spaces.keys() == outputs.keys(), (spaces, outputs)
|
| 53 |
+
self.spaces = spaces
|
| 54 |
+
self.outputs = outputs
|
| 55 |
+
self.kw = kw
|
| 56 |
+
|
| 57 |
+
def __call__(self, x):
|
| 58 |
+
outputs = {}
|
| 59 |
+
for key, impl in self.outputs.items():
|
| 60 |
+
space = self.spaces[key]
|
| 61 |
+
outputs[key] = self.sub(key, Head, space, impl, **self.kw)(x)
|
| 62 |
+
return outputs
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class Head(nj.Module):
|
| 66 |
+
|
| 67 |
+
minstd: float = 1.0
|
| 68 |
+
maxstd: float = 1.0
|
| 69 |
+
unimix: float = 0.0
|
| 70 |
+
bins: int = 255
|
| 71 |
+
outscale: float = 1.0
|
| 72 |
+
|
| 73 |
+
def __init__(self, space, output, **kw):
|
| 74 |
+
if isinstance(space, tuple):
|
| 75 |
+
space = elements.Space(np.float32, space)
|
| 76 |
+
if output == 'onehot':
|
| 77 |
+
classes = np.asarray(space.classes).flatten()
|
| 78 |
+
assert (classes == classes[0]).all(), classes
|
| 79 |
+
shape = (*space.shape, classes[0].item())
|
| 80 |
+
space = elements.Space(f32, shape, 0.0, 1.0)
|
| 81 |
+
self.space = space
|
| 82 |
+
self.impl = output
|
| 83 |
+
self.kw = {**kw, 'outscale': self.outscale}
|
| 84 |
+
|
| 85 |
+
def __call__(self, x):
|
| 86 |
+
if not hasattr(self, self.impl):
|
| 87 |
+
raise NotImplementedError(self.impl)
|
| 88 |
+
x = nets.ensure_dtypes(x)
|
| 89 |
+
output = getattr(self, self.impl)(x)
|
| 90 |
+
if self.space.shape:
|
| 91 |
+
output = outs.Agg(output, len(self.space.shape), jnp.sum)
|
| 92 |
+
assert output.pred().shape[x.ndim - 1:] == self.space.shape, (
|
| 93 |
+
self.space, self.impl, x.shape, output.pred().shape)
|
| 94 |
+
return output
|
| 95 |
+
|
| 96 |
+
def binary(self, x):
|
| 97 |
+
assert np.all(self.space.classes == 2), self.space
|
| 98 |
+
logit = self.sub('logit', nets.Linear, self.space.shape, **self.kw)(x)
|
| 99 |
+
return outs.Binary(logit)
|
| 100 |
+
|
| 101 |
+
def categorical(self, x):
|
| 102 |
+
assert self.space.discrete
|
| 103 |
+
classes = np.asarray(self.space.classes).flatten()
|
| 104 |
+
assert (classes == classes[0]).all(), classes
|
| 105 |
+
shape = (*self.space.shape, classes[0].item())
|
| 106 |
+
logits = self.sub('logits', nets.Linear, shape, **self.kw)(x)
|
| 107 |
+
output = outs.Categorical(logits)
|
| 108 |
+
output.minent = 0
|
| 109 |
+
output.maxent = np.log(logits.shape[-1])
|
| 110 |
+
return output
|
| 111 |
+
|
| 112 |
+
def onehot(self, x):
|
| 113 |
+
assert not self.space.discrete
|
| 114 |
+
logits = self.sub('logits', nets.Linear, self.space.shape, **self.kw)(x)
|
| 115 |
+
return outs.OneHot(logits, self.unimix)
|
| 116 |
+
|
| 117 |
+
def mse(self, x):
|
| 118 |
+
assert not self.space.discrete
|
| 119 |
+
pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
|
| 120 |
+
return outs.MSE(pred)
|
| 121 |
+
|
| 122 |
+
def huber(self, x):
|
| 123 |
+
assert not self.space.discrete
|
| 124 |
+
pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
|
| 125 |
+
return outs.Huber(pred)
|
| 126 |
+
|
| 127 |
+
def symlog_mse(self, x):
|
| 128 |
+
assert not self.space.discrete
|
| 129 |
+
pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
|
| 130 |
+
return outs.MSE(pred, nets.symlog)
|
| 131 |
+
|
| 132 |
+
def symexp_twohot(self, x):
|
| 133 |
+
assert not self.space.discrete
|
| 134 |
+
shape = (*self.space.shape, self.bins)
|
| 135 |
+
logits = self.sub('logits', nets.Linear, shape, **self.kw)(x)
|
| 136 |
+
if self.bins % 2 == 1:
|
| 137 |
+
half = jnp.linspace(-20, 0, (self.bins - 1) // 2 + 1, dtype=f32)
|
| 138 |
+
half = nets.symexp(half)
|
| 139 |
+
bins = jnp.concatenate([half, -half[:-1][::-1]], 0)
|
| 140 |
+
else:
|
| 141 |
+
half = jnp.linspace(-20, 0, self.bins // 2, dtype=f32)
|
| 142 |
+
half = nets.symexp(half)
|
| 143 |
+
bins = jnp.concatenate([half, -half[::-1]], 0)
|
| 144 |
+
return outs.TwoHot(logits, bins)
|
| 145 |
+
|
| 146 |
+
def bounded_normal(self, x):
|
| 147 |
+
assert not self.space.discrete
|
| 148 |
+
mean = self.sub('mean', nets.Linear, self.space.shape, **self.kw)(x)
|
| 149 |
+
stddev = self.sub('stddev', nets.Linear, self.space.shape, **self.kw)(x)
|
| 150 |
+
lo, hi = self.minstd, self.maxstd
|
| 151 |
+
stddev = (hi - lo) * jax.nn.sigmoid(stddev + 2.0) + lo
|
| 152 |
+
output = outs.Normal(jnp.tanh(mean), stddev)
|
| 153 |
+
output.minent = outs.Normal(jnp.zeros_like(mean), self.minstd).entropy()
|
| 154 |
+
output.maxent = outs.Normal(jnp.zeros_like(mean), self.maxstd).entropy()
|
| 155 |
+
return output
|
| 156 |
+
|
| 157 |
+
def normal_logstd(self, x):
|
| 158 |
+
assert not self.space.discrete
|
| 159 |
+
mean = self.sub('mean', nets.Linear, self.space.shape, **self.kw)(x)
|
| 160 |
+
stddev = self.sub('stddev', nets.Linear, self.space.shape, **self.kw)(x)
|
| 161 |
+
output = outs.Normal(mean, jnp.exp(stddev))
|
| 162 |
+
return output
|
models/embodied/jax/internal.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import concurrent.futures
|
| 2 |
+
import math
|
| 3 |
+
import os
|
| 4 |
+
import string
|
| 5 |
+
|
| 6 |
+
import elements
|
| 7 |
+
import jax
|
| 8 |
+
import jax.numpy as jnp
|
| 9 |
+
import numpy as np
|
| 10 |
+
from jax.sharding import PartitionSpec as P
|
| 11 |
+
|
| 12 |
+
from . import nets
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def setup(
|
| 16 |
+
platform=None,
|
| 17 |
+
compute_dtype=jnp.bfloat16,
|
| 18 |
+
debug=False,
|
| 19 |
+
jit=True,
|
| 20 |
+
prealloc=False,
|
| 21 |
+
mock_devices=0,
|
| 22 |
+
transfer_guard=True,
|
| 23 |
+
deterministic=True,
|
| 24 |
+
autotune=1,
|
| 25 |
+
gpuflags=True,
|
| 26 |
+
tpuflags=False,
|
| 27 |
+
xladump=None,
|
| 28 |
+
debug_nans=False,
|
| 29 |
+
process_id=-1,
|
| 30 |
+
num_processes=1,
|
| 31 |
+
coordinator_address=None,
|
| 32 |
+
compilation_cache=True,
|
| 33 |
+
):
|
| 34 |
+
platform and jax.config.update('jax_platforms', platform)
|
| 35 |
+
jax.config.update('jax_disable_most_optimizations', debug)
|
| 36 |
+
jax.config.update('jax_disable_jit', not jit)
|
| 37 |
+
if transfer_guard and jit and not debug_nans:
|
| 38 |
+
jax.config.update('jax_transfer_guard', 'disallow')
|
| 39 |
+
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = str(bool(prealloc)).lower()
|
| 40 |
+
jax.config.update('jax_debug_nans', debug_nans)
|
| 41 |
+
jax.config.update('jax_enable_compilation_cache', compilation_cache)
|
| 42 |
+
|
| 43 |
+
xlaflags = []
|
| 44 |
+
xlaflags.append(f'--xla_gpu_autotune_level={autotune}')
|
| 45 |
+
if deterministic:
|
| 46 |
+
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
|
| 47 |
+
xlaflags.append('--xla_gpu_deterministic_ops=true')
|
| 48 |
+
if mock_devices:
|
| 49 |
+
xlaflags.append(f'--xla_force_host_platform_device_count={mock_devices}')
|
| 50 |
+
if xladump:
|
| 51 |
+
elements.Path(xladump).mkdir()
|
| 52 |
+
xlaflags.append(f'--xla_dump_to={xladump}')
|
| 53 |
+
xlaflags.append('--xla_dump_hlo_as_long_text')
|
| 54 |
+
if gpuflags and platform == 'gpu':
|
| 55 |
+
# xla_flags.append('--xla_gpu_enable_latency_hiding_scheduler=true')
|
| 56 |
+
# xla_flags.append('--xla_gpu_enable_async_all_gather=true')
|
| 57 |
+
# xla_flags.append('--xla_gpu_enable_async_reduce_scatter=true')
|
| 58 |
+
# xla_flags.append('--xla_gpu_enable_triton_gemm=false')
|
| 59 |
+
# os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
|
| 60 |
+
# os.environ['NCCL_IB_SL'] = '1'
|
| 61 |
+
# os.environ['NCCL_NVLS_ENABLE'] = '0'
|
| 62 |
+
# os.environ['CUDA_MODULE_LOADING'] = 'EAGER'
|
| 63 |
+
xlaflags += [
|
| 64 |
+
'--xla_disable_hlo_passes=rematerialization',
|
| 65 |
+
'--xla_gpu_all_gather_combine_threshold_bytes=134217728',
|
| 66 |
+
'--xla_gpu_all_reduce_combine_threshold_bytes=134217728',
|
| 67 |
+
'--xla_gpu_enable_all_gather_combine_by_dim=false',
|
| 68 |
+
'--xla_gpu_enable_highest_priority_async_stream=true',
|
| 69 |
+
'--xla_gpu_enable_latency_hiding_scheduler=true',
|
| 70 |
+
'--xla_gpu_enable_pipelined_all_gather=true',
|
| 71 |
+
'--xla_gpu_enable_pipelined_all_reduce=true',
|
| 72 |
+
'--xla_gpu_enable_pipelined_reduce_scatter=true',
|
| 73 |
+
'--xla_gpu_enable_reduce_scatter_combine_by_dim=false',
|
| 74 |
+
'--xla_gpu_enable_triton_gemm=false',
|
| 75 |
+
'--xla_gpu_enable_triton_softmax_fusion=false',
|
| 76 |
+
'--xla_gpu_enable_while_loop_double_buffering=true',
|
| 77 |
+
'--xla_gpu_graph_level=0',
|
| 78 |
+
'--xla_gpu_reduce_scatter_combine_threshold_bytes=67108864',
|
| 79 |
+
]
|
| 80 |
+
if tpuflags and platform == 'tpu':
|
| 81 |
+
xlaflags += [
|
| 82 |
+
'--xla_disable_hlo_passes=rematerialization',
|
| 83 |
+
'--xla_tpu_megacore_fusion_allow_ags=false',
|
| 84 |
+
'--xla_enable_async_collective_permute=true',
|
| 85 |
+
'--xla_tpu_enable_ag_backward_pipelining=true',
|
| 86 |
+
'--xla_tpu_enable_data_parallel_all_reduce_opt=true',
|
| 87 |
+
'--xla_tpu_data_parallel_opt_different_sized_ops=true',
|
| 88 |
+
'--xla_tpu_enable_async_collective_fusion=true',
|
| 89 |
+
'--xla_tpu_enable_async_collective_fusion_multiple_steps=true',
|
| 90 |
+
'--xla_tpu_overlap_compute_collective_tc=true',
|
| 91 |
+
'--xla_enable_async_all_gather=true',
|
| 92 |
+
]
|
| 93 |
+
if xlaflags:
|
| 94 |
+
os.environ['XLA_FLAGS'] = ' '.join(xlaflags)
|
| 95 |
+
|
| 96 |
+
if num_processes > 1 and platform != 'tpu':
|
| 97 |
+
# Note that the process_id is unrelated to the jax.process_index() that JAX
|
| 98 |
+
# will assign later. It is only used to establish initial communication and
|
| 99 |
+
# for error handling, whereas jax.process_index() depends on the underlying
|
| 100 |
+
# hardware mesh.
|
| 101 |
+
assert process_id >= 0
|
| 102 |
+
assert coordinator_address
|
| 103 |
+
jax.distributed.initialize(coordinator_address, num_processes, process_id)
|
| 104 |
+
index, count = jax.process_index(), jax.process_count()
|
| 105 |
+
print(f'JAX multi-host initialized: ({process_id}) {index} / {count}')
|
| 106 |
+
|
| 107 |
+
if isinstance(compute_dtype, str):
|
| 108 |
+
compute_dtype = getattr(jnp, compute_dtype)
|
| 109 |
+
nets.COMPUTE_DTYPE = compute_dtype
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_named_axes():
|
| 113 |
+
axes = []
|
| 114 |
+
for x in string.ascii_lowercase:
|
| 115 |
+
try:
|
| 116 |
+
jax.lax.axis_index(x)
|
| 117 |
+
except NameError:
|
| 118 |
+
continue
|
| 119 |
+
axes.append(x)
|
| 120 |
+
return axes
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_data_axes():
|
| 124 |
+
axes = ('d', 'f')
|
| 125 |
+
for x in axes:
|
| 126 |
+
try:
|
| 127 |
+
jax.lax.axis_index(x)
|
| 128 |
+
except NameError:
|
| 129 |
+
return ()
|
| 130 |
+
return axes
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def fetch_async(value):
|
| 134 |
+
if is_multihost():
|
| 135 |
+
value = to_local(value)
|
| 136 |
+
with jax._src.config.explicit_device_get_scope():
|
| 137 |
+
[x.copy_to_host_async() for x in jax.tree.leaves(value)]
|
| 138 |
+
return value
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def is_multihost():
|
| 142 |
+
return jax.process_count() > 1
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def device_put(value, sharding):
|
| 146 |
+
if is_multihost():
|
| 147 |
+
with jax._src.config.explicit_device_put_scope():
|
| 148 |
+
value = jax.tree.map(
|
| 149 |
+
lambda x: jax.make_array_from_process_local_data(sharding, x), value)
|
| 150 |
+
else:
|
| 151 |
+
value = jax.device_put(value, sharding)
|
| 152 |
+
return value
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def local_sharding(sharding):
|
| 156 |
+
return jax.tree.map(lambda s: jax.sharding.NamedSharding(
|
| 157 |
+
s.mesh.local_mesh, s.spec), sharding)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def to_local(x):
|
| 161 |
+
return jax.tree.map(_to_local, x)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _to_local(x):
|
| 165 |
+
shape, sharding = x.shape, x.sharding
|
| 166 |
+
spec, mesh = sharding.spec, sharding.mesh
|
| 167 |
+
fullspec = [*spec, *([None] * (len(shape) - len(spec)))]
|
| 168 |
+
assert len(shape) == len(fullspec)
|
| 169 |
+
shard_shape = []
|
| 170 |
+
for d, s in zip(shape, fullspec):
|
| 171 |
+
if s is None:
|
| 172 |
+
ms, lms = 1, 1
|
| 173 |
+
else:
|
| 174 |
+
if not isinstance(s, tuple):
|
| 175 |
+
s = (s,)
|
| 176 |
+
ms = math.prod(mesh.shape[si] for si in s)
|
| 177 |
+
lms = math.prod(mesh.local_mesh.shape[si] for si in s)
|
| 178 |
+
shard_shape.append(d // ms * lms)
|
| 179 |
+
shard_shape = tuple(shard_shape)
|
| 180 |
+
arrs = [arr.data for arr in x.addressable_shards]
|
| 181 |
+
sharding_local = jax.sharding.NamedSharding(mesh.local_mesh, spec)
|
| 182 |
+
x = jax.make_array_from_single_device_arrays(
|
| 183 |
+
shard_shape, sharding_local, arrs)
|
| 184 |
+
return x
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def to_global(x, global_sharding):
|
| 188 |
+
if isinstance(global_sharding, jax.sharding.NamedSharding):
|
| 189 |
+
return jax.tree.map(lambda xi: _to_global(xi, global_sharding), x)
|
| 190 |
+
else:
|
| 191 |
+
return jax.tree.map(lambda xi, gs: _to_global(xi, gs), x, global_sharding)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _to_global(x, global_sharding):
|
| 195 |
+
shape, sharding = x.shape, x.sharding
|
| 196 |
+
spec = sharding.spec
|
| 197 |
+
fullspec = [*spec, *([None] * (len(shape) - len(spec)))]
|
| 198 |
+
assert len(shape) == len(fullspec)
|
| 199 |
+
shard_shape = []
|
| 200 |
+
for d, s in zip(shape, fullspec):
|
| 201 |
+
if s is None:
|
| 202 |
+
ms, lms = 1, 1
|
| 203 |
+
else:
|
| 204 |
+
if not isinstance(s, tuple):
|
| 205 |
+
s = (s,)
|
| 206 |
+
ms = math.prod(global_sharding.mesh.shape[si] for si in s)
|
| 207 |
+
lms = math.prod(sharding.mesh.shape[si] for si in s)
|
| 208 |
+
shard_shape.append(d // lms * ms)
|
| 209 |
+
shard_shape = tuple(shard_shape)
|
| 210 |
+
arrs = [arr.data for arr in x.addressable_shards]
|
| 211 |
+
x = jax.make_array_from_single_device_arrays(
|
| 212 |
+
shard_shape, global_sharding, arrs)
|
| 213 |
+
return x
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def move(xs, dst_sharding):
|
| 217 |
+
if is_multihost():
|
| 218 |
+
xs = to_local(xs)
|
| 219 |
+
xs = jax.device_put(xs, local_sharding(dst_sharding))
|
| 220 |
+
xs = to_global(xs, dst_sharding)
|
| 221 |
+
else:
|
| 222 |
+
xs = jax.device_put(xs, dst_sharding)
|
| 223 |
+
return xs
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def mesh(devices, shape, names):
|
| 227 |
+
shape = list(map(int, shape.split(',')))
|
| 228 |
+
# At most a single -1 is allowed
|
| 229 |
+
assert sum(i == -1 for i in shape) <= 1
|
| 230 |
+
n = len(devices)
|
| 231 |
+
prod = math.prod(i for i in shape if i != -1)
|
| 232 |
+
assert n % prod == 0
|
| 233 |
+
shape = [i if i != -1 else n // prod for i in shape]
|
| 234 |
+
assert math.prod(shape) == n
|
| 235 |
+
devices = np.array(devices).reshape(shape)
|
| 236 |
+
return jax.sharding.Mesh(devices, names)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def grouped_ckpt_fns(params, chunksize):
|
| 240 |
+
if chunksize <= 0:
|
| 241 |
+
groups = [list(params.keys())]
|
| 242 |
+
else:
|
| 243 |
+
groups = []
|
| 244 |
+
keys, size = [], 0
|
| 245 |
+
for k, v in params.items():
|
| 246 |
+
if size + v.nbytes <= chunksize:
|
| 247 |
+
keys.append(k)
|
| 248 |
+
size += v.nbytes
|
| 249 |
+
else:
|
| 250 |
+
groups.append(keys)
|
| 251 |
+
keys, size = [k], v.nbytes
|
| 252 |
+
keys and groups.append(keys)
|
| 253 |
+
assert sum(len(keys) for keys in groups) == len(params)
|
| 254 |
+
assert all(len(keys) for keys in groups)
|
| 255 |
+
msg = f'Compiling {len(groups)} checkpoint groups...'
|
| 256 |
+
elements.print(msg, color='yellow')
|
| 257 |
+
maxsize = max(sum(params[k].nbytes for k in g) for g in groups)
|
| 258 |
+
print(f'Largest checkpoint group: {maxsize / (1024 ** 3):.0f} GB')
|
| 259 |
+
|
| 260 |
+
gather_fns, shard_fns = [], []
|
| 261 |
+
with concurrent.futures.ThreadPoolExecutor(64) as pool:
|
| 262 |
+
for keys in groups:
|
| 263 |
+
gather_fn, shard_fn = ckpt_fn(
|
| 264 |
+
{k: params[k] for k in keys}, compile=False)
|
| 265 |
+
gather_fns.append(pool.submit(gather_fn.compile))
|
| 266 |
+
shard_fns.append(pool.submit(shard_fn.compile))
|
| 267 |
+
gather_fns = [future.result() for future in gather_fns]
|
| 268 |
+
shard_fns = [future.result() for future in shard_fns]
|
| 269 |
+
|
| 270 |
+
return list(zip(groups, gather_fns, shard_fns))
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def ckpt_fn(params, compile=True):
|
| 274 |
+
mesh = params[list(params.keys())[0]].sharding.mesh
|
| 275 |
+
mirrored = jax.sharding.NamedSharding(mesh, P())
|
| 276 |
+
struct = lambda x, s: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=s)
|
| 277 |
+
keys = params.keys()
|
| 278 |
+
original = {k: params[k].sharding for k in keys}
|
| 279 |
+
inspec = {k: struct(params[k], original[k]) for k in keys}
|
| 280 |
+
gather_fn = jax.jit(lambda x: x, (original,), mirrored).lower(inspec)
|
| 281 |
+
inspec = {k: struct(params[k], mirrored) for k in keys}
|
| 282 |
+
shard_fn = jax.jit(lambda x: x, (mirrored,), original).lower(inspec)
|
| 283 |
+
if compile:
|
| 284 |
+
gather_fn = gather_fn.compile()
|
| 285 |
+
shard_fn = shard_fn.compile()
|
| 286 |
+
return gather_fn, shard_fn
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# def node_mesh(mesh, mp_dims=('t',)):
|
| 290 |
+
# n_mp = math.prod(mesh.shape[d] for d in mp_dims)
|
| 291 |
+
# n_local = mesh.local_mesh.size
|
| 292 |
+
# n_mp_nodes = max(1, n_mp // n_local)
|
| 293 |
+
# total_nodes = mesh.size // n_local
|
| 294 |
+
# n_data_nodes = total_nodes // n_mp_nodes
|
| 295 |
+
# assert n_data_nodes * n_mp_nodes == total_nodes
|
| 296 |
+
# data_node_rank, model_node_rank = divmod(jax.process_index(), n_mp_nodes)
|
| 297 |
+
# data_node_size, model_node_size = n_data_nodes, n_mp_nodes
|
| 298 |
+
# return {
|
| 299 |
+
# 'data_node_rank': data_node_rank,
|
| 300 |
+
# 'data_node_size': data_node_size,
|
| 301 |
+
# 'model_node_rank': model_node_rank,
|
| 302 |
+
# 'model_node_size': model_node_size,
|
| 303 |
+
# }
|
| 304 |
+
|
models/embodied/jax/nets.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import math
|
| 3 |
+
from typing import Callable
|
| 4 |
+
|
| 5 |
+
import einops
|
| 6 |
+
import jax
|
| 7 |
+
import jax.ad_checkpoint as adc
|
| 8 |
+
import jax.numpy as jnp
|
| 9 |
+
import ninjax as nj
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
COMPUTE_DTYPE = jnp.bfloat16
|
| 13 |
+
LAYER_CALLBACK = lambda tensor, name: tensor
|
| 14 |
+
|
| 15 |
+
f32 = jnp.float32
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def cast(xs, force=False):
|
| 19 |
+
if force:
|
| 20 |
+
should = lambda x: True
|
| 21 |
+
else:
|
| 22 |
+
should = lambda x: jnp.issubdtype(x.dtype, jnp.floating)
|
| 23 |
+
return jax.tree.map(lambda x: COMPUTE_DTYPE(x) if should(x) else x, xs)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def act(name):
|
| 27 |
+
if name == 'none':
|
| 28 |
+
return lambda x: x
|
| 29 |
+
elif name == 'mish':
|
| 30 |
+
return lambda x: x * jnp.tanh(jax.nn.softplus(x))
|
| 31 |
+
elif name == 'relu2':
|
| 32 |
+
return lambda x: jnp.square(jax.nn.relu(x))
|
| 33 |
+
elif name == 'swiglu':
|
| 34 |
+
def fn(x):
|
| 35 |
+
x, y = jnp.split(x, 2, -1)
|
| 36 |
+
return jax.nn.silu(x) * y
|
| 37 |
+
return fn
|
| 38 |
+
else:
|
| 39 |
+
return getattr(jax.nn, name)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def init(name):
|
| 43 |
+
if callable(name):
|
| 44 |
+
return name
|
| 45 |
+
elif name.endswith(('_in', '_out', '_avg')):
|
| 46 |
+
dist, fan = name.rsplit('_', 1)
|
| 47 |
+
else:
|
| 48 |
+
dist, fan = name, 'in'
|
| 49 |
+
return Initializer(dist, fan, 1.0)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def dropout(x, prob, training):
|
| 53 |
+
if not prob or not training:
|
| 54 |
+
return x
|
| 55 |
+
keep = jax.random.bernoulli(nj.seed(), 1.0 - prob, x.shape)
|
| 56 |
+
return x * keep / (1.0 - prob)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def symlog(x):
|
| 60 |
+
return jnp.sign(x) * jnp.log1p(jnp.abs(x))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def symexp(x):
|
| 64 |
+
return jnp.sign(x) * jnp.expm1(jnp.abs(x))
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def where(condition, xs, ys):
|
| 68 |
+
assert condition.dtype == bool, condition.dtype
|
| 69 |
+
def fn(x, y):
|
| 70 |
+
assert x.shape == y.shape, (x.shape, y.shape)
|
| 71 |
+
expanded = jnp.expand_dims(condition, list(range(condition.ndim, x.ndim)))
|
| 72 |
+
return jnp.where(expanded, x, y)
|
| 73 |
+
return jax.tree.map(fn, xs, ys)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def mask(xs, mask):
|
| 77 |
+
return where(mask, xs, jax.tree.map(jnp.zeros_like, xs))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def available(*trees, bdims=None):
|
| 81 |
+
def fn(*xs):
|
| 82 |
+
masks = []
|
| 83 |
+
for x in xs:
|
| 84 |
+
if jnp.issubdtype(x.dtype, jnp.floating):
|
| 85 |
+
mask = (x != -jnp.inf)
|
| 86 |
+
elif jnp.issubdtype(x.dtype, jnp.signedinteger):
|
| 87 |
+
mask = (x != -1)
|
| 88 |
+
elif (
|
| 89 |
+
jnp.issubdtype(x.dtype, jnp.unsignedinteger) or
|
| 90 |
+
jnp.issubdtype(x.dtype, bool)):
|
| 91 |
+
shape = x.shape if bdims is None else x.shape[:bdims]
|
| 92 |
+
mask = jnp.full(shape, True, bool)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError(x.dtype)
|
| 95 |
+
if bdims is not None:
|
| 96 |
+
mask = mask.all(tuple(range(bdims, mask.ndim)))
|
| 97 |
+
masks.append(mask)
|
| 98 |
+
return jnp.stack(masks, 0).all(0)
|
| 99 |
+
return jax.tree.map(fn, *trees)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@functools.partial(jax.custom_vjp, nondiff_argnums=[1, 2])
|
| 103 |
+
def ensure_dtypes(x, fwd=None, bwd=None):
|
| 104 |
+
fwd = fwd or COMPUTE_DTYPE
|
| 105 |
+
bwd = bwd or COMPUTE_DTYPE
|
| 106 |
+
assert x.dtype == fwd, (x.dtype, fwd)
|
| 107 |
+
return x
|
| 108 |
+
def ensure_dtypes_fwd(x, fwd=None, bwd=None):
|
| 109 |
+
fwd = fwd or COMPUTE_DTYPE
|
| 110 |
+
bwd = bwd or COMPUTE_DTYPE
|
| 111 |
+
return ensure_dtypes(x, fwd, bwd), ()
|
| 112 |
+
def ensure_dtypes_bwd(fwd, bwd, cache, dx):
|
| 113 |
+
fwd = fwd or COMPUTE_DTYPE
|
| 114 |
+
bwd = bwd or COMPUTE_DTYPE
|
| 115 |
+
assert dx.dtype == bwd, (dx.dtype, bwd)
|
| 116 |
+
return (dx,)
|
| 117 |
+
ensure_dtypes.defvjp(ensure_dtypes_fwd, ensure_dtypes_bwd)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def rms(xs):
|
| 121 |
+
xs = jax.tree.leaves(xs)
|
| 122 |
+
count = sum(x.size for x in xs)
|
| 123 |
+
sumsq = jnp.stack([f32(jnp.square(x).sum()) for x in xs]).sum()
|
| 124 |
+
return jnp.sqrt(sumsq / f32(count))
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def rope(x, ts=None, inverse=False, maxlen=4096):
|
| 128 |
+
B, T, _, D = x.shape
|
| 129 |
+
if ts is None:
|
| 130 |
+
ts = jnp.ones(B, jnp.int32)[:, None] * jnp.arange(T)[None, :] # [B, T]
|
| 131 |
+
assert ts.shape == (B, T), (ts.shape, (B, T))
|
| 132 |
+
if inverse:
|
| 133 |
+
ts = -ts
|
| 134 |
+
freq_exponents = (2.0 / D) * jnp.arange(D // 2) # [D/2]
|
| 135 |
+
timescale = maxlen ** freq_exponents
|
| 136 |
+
radians = ts[:, :, None] / timescale[None, None, :] # [B, T, D/2]
|
| 137 |
+
radians = radians[..., None, :].astype(x.dtype) # [B, T, 1, D/2]
|
| 138 |
+
sin, cos = jnp.sin(radians), jnp.cos(radians)
|
| 139 |
+
x1, x2 = jnp.split(x, 2, axis=-1) # [B, T, H, D/2]
|
| 140 |
+
res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1)
|
| 141 |
+
return res
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class Initializer:
|
| 145 |
+
|
| 146 |
+
def __init__(self, dist='trunc_normal', fan='in', scale=1.0):
|
| 147 |
+
self.dist = dist
|
| 148 |
+
self.fan = fan
|
| 149 |
+
self.scale = scale
|
| 150 |
+
|
| 151 |
+
def __call__(self, shape, dtype=jnp.float32, fshape=None):
|
| 152 |
+
shape = (shape,) if isinstance(shape, int) else tuple(shape)
|
| 153 |
+
assert all(isinstance(x, int) for x in shape), (
|
| 154 |
+
shape, [type(x) for x in shape])
|
| 155 |
+
assert all(x > 0 for x in shape), shape
|
| 156 |
+
fanin, fanout = self.compute_fans(shape if fshape is None else fshape)
|
| 157 |
+
fan = {
|
| 158 |
+
'avg': (fanin + fanout) / 2, 'in': fanin, 'out': fanout, 'none': 1,
|
| 159 |
+
}[self.fan]
|
| 160 |
+
if self.dist == 'zeros':
|
| 161 |
+
x = jnp.zeros(shape, dtype)
|
| 162 |
+
elif self.dist == 'uniform':
|
| 163 |
+
limit = np.sqrt(1 / fan)
|
| 164 |
+
x = jax.random.uniform(nj.seed(), shape, dtype, -limit, limit)
|
| 165 |
+
elif self.dist == 'normal':
|
| 166 |
+
x = jax.random.normal(nj.seed(), shape)
|
| 167 |
+
x *= np.sqrt(1 / fan)
|
| 168 |
+
elif self.dist == 'trunc_normal':
|
| 169 |
+
x = jax.random.truncated_normal(nj.seed(), -2, 2, shape)
|
| 170 |
+
x *= 1.1368 * np.sqrt(1 / fan)
|
| 171 |
+
elif self.dist == 'normed':
|
| 172 |
+
x = jax.random.uniform(nj.seed(), shape, dtype, -1, 1)
|
| 173 |
+
x *= (1 / jnp.linalg.norm(x.reshape((-1, shape[-1])), 2, 0))
|
| 174 |
+
else:
|
| 175 |
+
raise NotImplementedError(self.dist)
|
| 176 |
+
x *= self.scale
|
| 177 |
+
x = x.astype(dtype)
|
| 178 |
+
return x
|
| 179 |
+
|
| 180 |
+
def __repr__(self):
|
| 181 |
+
return f'Initializer({self.dist}, {self.fan}, {self.scale})'
|
| 182 |
+
|
| 183 |
+
def __eq__(self, other):
|
| 184 |
+
attributes = ('dist', 'fan', 'scale')
|
| 185 |
+
return all(getattr(self, k) == getattr(other, k) for k in attributes)
|
| 186 |
+
|
| 187 |
+
@staticmethod
|
| 188 |
+
def compute_fans(shape):
|
| 189 |
+
if len(shape) == 0:
|
| 190 |
+
return (1, 1)
|
| 191 |
+
elif len(shape) == 1:
|
| 192 |
+
return (1, shape[0])
|
| 193 |
+
elif len(shape) == 2:
|
| 194 |
+
return shape
|
| 195 |
+
else:
|
| 196 |
+
space = math.prod(shape[:-2])
|
| 197 |
+
return (shape[-2] * space, shape[-1] * space)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class Embed(nj.Module):
|
| 201 |
+
|
| 202 |
+
einit: str | Callable = Initializer('trunc_normal', 'out')
|
| 203 |
+
combine: bool = False
|
| 204 |
+
|
| 205 |
+
def __init__(self, classes, units, shape=()):
|
| 206 |
+
self.classes = classes
|
| 207 |
+
self.units = units
|
| 208 |
+
self.shape = shape
|
| 209 |
+
|
| 210 |
+
def __call__(self, x):
|
| 211 |
+
batch_shape = x.shape[:x.ndim - len(self.shape)]
|
| 212 |
+
event_shape = x.shape[x.ndim - len(self.shape):]
|
| 213 |
+
assert event_shape == self.shape, (self.shape, x.shape)
|
| 214 |
+
N = math.prod(self.shape)
|
| 215 |
+
K = self.classes
|
| 216 |
+
D = self.units
|
| 217 |
+
shape = (*self.shape, self.classes, self.units)
|
| 218 |
+
table = self.value('table', init(self.einit), shape)
|
| 219 |
+
table = table.reshape(N, K, D)
|
| 220 |
+
table = table.astype(COMPUTE_DTYPE)
|
| 221 |
+
index = x.reshape(-1, N)
|
| 222 |
+
embed = table[jnp.arange(N), index]
|
| 223 |
+
if self.combine:
|
| 224 |
+
embed = embed.sum(-2).reshape(*batch_shape, self.units)
|
| 225 |
+
else:
|
| 226 |
+
embed = embed.reshape(*batch_shape, *self.shape, self.units)
|
| 227 |
+
return embed
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class Linear(nj.Module):
|
| 231 |
+
|
| 232 |
+
bias: bool = True
|
| 233 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 234 |
+
binit: str | Callable = Initializer('zeros')
|
| 235 |
+
outscale: float = 1.0
|
| 236 |
+
|
| 237 |
+
def __init__(self, units):
|
| 238 |
+
self.units = (units,) if isinstance(units, int) else tuple(units)
|
| 239 |
+
|
| 240 |
+
def __call__(self, x):
|
| 241 |
+
ensure_dtypes(x)
|
| 242 |
+
size = math.prod(self.units)
|
| 243 |
+
shape = (x.shape[-1], size)
|
| 244 |
+
x = x @ self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
|
| 245 |
+
if self.bias:
|
| 246 |
+
x += self.value('bias', init(self.binit), size).astype(x.dtype)
|
| 247 |
+
x = x.reshape((*x.shape[:-1], *self.units))
|
| 248 |
+
return x
|
| 249 |
+
|
| 250 |
+
def _scaled_winit(self, *args, **kwargs):
|
| 251 |
+
return init(self.winit)(*args, **kwargs) * self.outscale
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class BlockLinear(nj.Module):
|
| 255 |
+
|
| 256 |
+
bias: bool = True
|
| 257 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 258 |
+
binit: str | Callable = Initializer('zeros')
|
| 259 |
+
outscale: float = 1.0
|
| 260 |
+
|
| 261 |
+
def __init__(self, units, blocks):
|
| 262 |
+
assert isinstance(units, int), (units, type(units))
|
| 263 |
+
assert blocks <= units and units % blocks == 0, (blocks, units)
|
| 264 |
+
self.units = units
|
| 265 |
+
self.blocks = blocks
|
| 266 |
+
|
| 267 |
+
def __call__(self, x):
|
| 268 |
+
ensure_dtypes(x)
|
| 269 |
+
assert x.shape[-1] % self.blocks == 0, (x.shape, self.blocks)
|
| 270 |
+
insize = x.shape[-1]
|
| 271 |
+
shape = (self.blocks, insize // self.blocks, self.units // self.blocks)
|
| 272 |
+
kernel = self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
|
| 273 |
+
x = x.reshape((*x.shape[:-1], self.blocks, insize // self.blocks))
|
| 274 |
+
x = jnp.einsum('...ki,kio->...ko', x, kernel)
|
| 275 |
+
x = x.reshape((*x.shape[:-2], self.units))
|
| 276 |
+
if self.bias:
|
| 277 |
+
x += self.value('bias', init(self.binit), self.units).astype(x.dtype)
|
| 278 |
+
return x
|
| 279 |
+
|
| 280 |
+
def _scaled_winit(self, *args, **kwargs):
|
| 281 |
+
return init(self.winit)(*args, **kwargs) * self.outscale
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
class Conv2D(nj.Module):
|
| 285 |
+
|
| 286 |
+
transp: bool = False
|
| 287 |
+
groups: int = 1
|
| 288 |
+
pad: str = 'same'
|
| 289 |
+
bias: bool = True
|
| 290 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 291 |
+
binit: str | Callable = Initializer('zeros')
|
| 292 |
+
outscale: float = 1.0
|
| 293 |
+
|
| 294 |
+
def __init__(self, depth, kernel, stride=1):
|
| 295 |
+
self.depth = depth
|
| 296 |
+
self.kernel = (kernel,) * 2 if isinstance(kernel, int) else kernel
|
| 297 |
+
self.stride = stride
|
| 298 |
+
|
| 299 |
+
def __call__(self, x):
|
| 300 |
+
ensure_dtypes(x)
|
| 301 |
+
shape = (*self.kernel, x.shape[-1] // self.groups, self.depth)
|
| 302 |
+
kernel = self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
|
| 303 |
+
if self.transp:
|
| 304 |
+
assert self.pad == 'same', self.pad
|
| 305 |
+
# Manual implementation of fractionally strided convolution because the
|
| 306 |
+
# cuDNN implementation used by XLA has bugs and performance issues.
|
| 307 |
+
x = x.repeat(self.stride, -2).repeat(self.stride, -3)
|
| 308 |
+
maskh = ((jnp.arange(x.shape[-3]) - 1) % self.stride == 0)[:, None]
|
| 309 |
+
maskw = ((jnp.arange(x.shape[-2]) - 1) % self.stride == 0)[None, :]
|
| 310 |
+
x *= (maskh * maskw)[:, :, None]
|
| 311 |
+
stride = (1, 1)
|
| 312 |
+
else:
|
| 313 |
+
stride = (self.stride, self.stride)
|
| 314 |
+
x = jax.lax.conv_general_dilated(
|
| 315 |
+
x, kernel, stride, self.pad.upper(),
|
| 316 |
+
feature_group_count=self.groups,
|
| 317 |
+
dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
|
| 318 |
+
if self.bias:
|
| 319 |
+
x += self.value('bias', init(self.binit), self.depth).astype(x.dtype)
|
| 320 |
+
return x
|
| 321 |
+
|
| 322 |
+
def _scaled_winit(self, *args, **kwargs):
|
| 323 |
+
return init(self.winit)(*args, **kwargs) * self.outscale
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
class Conv3D(nj.Module):
|
| 327 |
+
|
| 328 |
+
transp: bool = False
|
| 329 |
+
groups: int = 1
|
| 330 |
+
pad: str = 'same'
|
| 331 |
+
bias: bool = True
|
| 332 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 333 |
+
binit: str | Callable = Initializer('zeros')
|
| 334 |
+
|
| 335 |
+
def __init__(self, depth, kernel, stride=1):
|
| 336 |
+
self.depth = depth
|
| 337 |
+
self.kernel = (kernel,) * 3 if isinstance(kernel, int) else kernel
|
| 338 |
+
self.stride = (stride,) * 3 if isinstance(stride, int) else stride
|
| 339 |
+
|
| 340 |
+
def __call__(self, x):
|
| 341 |
+
ensure_dtypes(x)
|
| 342 |
+
if self.transp:
|
| 343 |
+
assert self.groups == 1, self.groups
|
| 344 |
+
shape = (*self.kernel, x.shape[-1], self.depth)
|
| 345 |
+
kernel = self.value('kernel', init(self.winit), shape).astype(x.dtype)
|
| 346 |
+
x = jax.lax.conv_transpose(
|
| 347 |
+
x, kernel, self.stride, self.pad.upper(),
|
| 348 |
+
dimension_numbers=('NTHWC', 'THWIO', 'NTHWC'))
|
| 349 |
+
else:
|
| 350 |
+
shape = (*self.kernel, x.shape[-1] // self.groups, self.depth)
|
| 351 |
+
kernel = self.value('kernel', init(self.winit), shape).astype(x.dtype)
|
| 352 |
+
x = jax.lax.conv_general_dilated(
|
| 353 |
+
x, kernel, self.stride, self.pad.upper(),
|
| 354 |
+
feature_group_count=self.groups,
|
| 355 |
+
dimension_numbers=('NTHWC', 'THWIO', 'NTHWC'))
|
| 356 |
+
if self.bias:
|
| 357 |
+
x += self.value('bias', init(self.binit), self.depth).astype(x.dtype)
|
| 358 |
+
return x
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class Norm(nj.Module):
|
| 362 |
+
|
| 363 |
+
axis: tuple = (-1,)
|
| 364 |
+
eps: float = 1e-4
|
| 365 |
+
scale: bool = True
|
| 366 |
+
shift: bool = True
|
| 367 |
+
|
| 368 |
+
def __init__(self, impl):
|
| 369 |
+
if '1em' in impl:
|
| 370 |
+
impl, exp = impl.split('1em')
|
| 371 |
+
self._fields['eps'] = 10 ** -int(exp)
|
| 372 |
+
self.impl = impl
|
| 373 |
+
|
| 374 |
+
def __call__(self, x):
|
| 375 |
+
ensure_dtypes(x)
|
| 376 |
+
dtype = x.dtype
|
| 377 |
+
x = f32(x)
|
| 378 |
+
axis = [a % x.ndim for a in self.axis]
|
| 379 |
+
shape = [x.shape[i] if i in axis else 1 for i in range(min(axis), x.ndim)]
|
| 380 |
+
if self.impl == 'none':
|
| 381 |
+
pass
|
| 382 |
+
elif self.impl == 'rms':
|
| 383 |
+
mean2 = jnp.square(x).mean(axis, keepdims=True)
|
| 384 |
+
mean2 = adc.checkpoint_name(mean2, 'small')
|
| 385 |
+
scale = self._scale(shape, x.dtype)
|
| 386 |
+
x = x * (jax.lax.rsqrt(mean2 + self.eps) * scale)
|
| 387 |
+
elif self.impl == 'layer':
|
| 388 |
+
mean = x.mean(axis, keepdims=True)
|
| 389 |
+
mean2 = jnp.square(x).mean(axis, keepdims=True)
|
| 390 |
+
mean2 = adc.checkpoint_name(mean2, 'small')
|
| 391 |
+
var = jnp.maximum(0, mean2 - jnp.square(mean))
|
| 392 |
+
var = adc.checkpoint_name(var, 'small')
|
| 393 |
+
scale = self._scale(shape, x.dtype)
|
| 394 |
+
shift = self._shift(shape, x.dtype)
|
| 395 |
+
x = (x - mean) * (jax.lax.rsqrt(var + self.eps) * scale) + shift
|
| 396 |
+
else:
|
| 397 |
+
raise NotImplementedError(self.impl)
|
| 398 |
+
x = x.astype(dtype)
|
| 399 |
+
return x
|
| 400 |
+
|
| 401 |
+
def _scale(self, shape, dtype):
|
| 402 |
+
if not self.scale:
|
| 403 |
+
return jnp.ones(shape, dtype)
|
| 404 |
+
return self.value('scale', jnp.ones, shape, f32).astype(dtype)
|
| 405 |
+
|
| 406 |
+
def _shift(self, shape, dtype):
|
| 407 |
+
if not self.shift:
|
| 408 |
+
return jnp.zeros(shape, dtype)
|
| 409 |
+
return self.value('shift', jnp.zeros, shape, f32).astype(dtype)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class Attention(nj.Module):
|
| 413 |
+
|
| 414 |
+
heads: int = 8
|
| 415 |
+
kv_heads: int = 0
|
| 416 |
+
dropout: float = 0.0
|
| 417 |
+
rope: bool = True
|
| 418 |
+
qknorm: str = 'none'
|
| 419 |
+
bias: bool = True
|
| 420 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 421 |
+
binit: str | Callable = Initializer('zeros')
|
| 422 |
+
outscale: float = 1.0
|
| 423 |
+
|
| 424 |
+
def __call__(self, x, mask=None, ts=None, training=True):
|
| 425 |
+
kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
|
| 426 |
+
B, T, D = x.shape
|
| 427 |
+
kv_heads = self.kv_heads or self.heads
|
| 428 |
+
assert self.heads % kv_heads == 0
|
| 429 |
+
head_ratio = self.heads // kv_heads
|
| 430 |
+
if head_ratio == 1:
|
| 431 |
+
qkv = self.sub('qkv', Linear, 3 * D, **kw)(x)
|
| 432 |
+
q, k, v = jnp.split(qkv, 3, -1)
|
| 433 |
+
else:
|
| 434 |
+
q = self.sub('q', Linear, D, **kw)(x)
|
| 435 |
+
k = self.sub('k', Linear, D // head_ratio, **kw)(x)
|
| 436 |
+
v = self.sub('v', Linear, D // head_ratio, **kw)(x)
|
| 437 |
+
q = einops.rearrange(q, 'b t (h d) -> b t h d', h=self.heads)
|
| 438 |
+
k = einops.rearrange(k, 'b t (h d) -> b t h d', h=kv_heads)
|
| 439 |
+
v = einops.rearrange(v, 'b t (h d) -> b t h d', h=kv_heads)
|
| 440 |
+
|
| 441 |
+
if self.qknorm != 'none':
|
| 442 |
+
q = self.sub('normq', Norm, self.qknorm)(q)
|
| 443 |
+
k = self.sub('normk', Norm, self.qknorm)(k)
|
| 444 |
+
|
| 445 |
+
if self.rope:
|
| 446 |
+
q = rope(q, ts)
|
| 447 |
+
k = rope(k, ts)
|
| 448 |
+
|
| 449 |
+
q = einops.rearrange(q, 'b t (h g) d -> b t h g d', h=kv_heads)
|
| 450 |
+
logits = einops.einsum(q, k, 'b tq h g d, b tk h d -> b h g tq tk')
|
| 451 |
+
logits = logits * (1.0 / np.sqrt(k.shape[-1]))
|
| 452 |
+
logits = f32(logits)
|
| 453 |
+
if mask is not None:
|
| 454 |
+
Tq, Tk = q.shape[1], k.shape[1]
|
| 455 |
+
assert mask.shape == (B, Tq, Tk), (mask.shape, (B, Tq, Tk))
|
| 456 |
+
mask = einops.rearrange(mask, 'b tq tk -> b 1 1 tq tk')
|
| 457 |
+
logits = jnp.where(mask, logits, -1e30)
|
| 458 |
+
weights = jax.nn.softmax(logits)
|
| 459 |
+
weights = weights.astype(x.dtype)
|
| 460 |
+
weights = dropout(weights, self.dropout, training)
|
| 461 |
+
x = einops.einsum(weights, v, 'b h g tq tk, b tk h d -> b tq h g d')
|
| 462 |
+
x = einops.rearrange(x, 'b t h g d -> b t (h g d)')
|
| 463 |
+
x = self.sub('proj', Linear, D, **kw, outscale=self.outscale)(x)
|
| 464 |
+
return x
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
class DictConcat:
|
| 468 |
+
|
| 469 |
+
def __init__(self, spaces, fdims, squish=lambda x: x):
|
| 470 |
+
assert 1 <= fdims, fdims
|
| 471 |
+
self.keys = sorted(spaces.keys())
|
| 472 |
+
self.spaces = spaces
|
| 473 |
+
self.fdims = fdims
|
| 474 |
+
self.squish = squish
|
| 475 |
+
|
| 476 |
+
def __call__(self, xs):
|
| 477 |
+
assert all(k in xs for k in self.spaces), (self.spaces, xs.keys())
|
| 478 |
+
bdims = xs[self.keys[0]].ndim - len(self.spaces[self.keys[0]].shape)
|
| 479 |
+
ys = []
|
| 480 |
+
for key in self.keys:
|
| 481 |
+
space = self.spaces[key]
|
| 482 |
+
x = xs[key]
|
| 483 |
+
m = available(x, bdims=bdims)
|
| 484 |
+
x = mask(x, m)
|
| 485 |
+
assert x.shape[bdims:] == space.shape, (key, bdims, space.shape, x.shape)
|
| 486 |
+
if space.dtype == jnp.uint8 and len(space.shape) in (2, 3):
|
| 487 |
+
raise NotImplementedError('Images are not supported.')
|
| 488 |
+
elif space.discrete:
|
| 489 |
+
classes = np.asarray(space.classes).flatten()
|
| 490 |
+
assert (classes == classes[0]).all(), classes
|
| 491 |
+
classes = classes[0].item()
|
| 492 |
+
x = x.astype(jnp.int32)
|
| 493 |
+
x = jax.nn.one_hot(x, classes, dtype=COMPUTE_DTYPE)
|
| 494 |
+
else:
|
| 495 |
+
x = self.squish(x)
|
| 496 |
+
x = x.astype(COMPUTE_DTYPE)
|
| 497 |
+
x = mask(x, m)
|
| 498 |
+
x = x.reshape((*x.shape[:bdims + self.fdims - 1], -1))
|
| 499 |
+
ys.append(x)
|
| 500 |
+
return jnp.concatenate(ys, -1)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class DictEmbed(nj.Module):
|
| 504 |
+
|
| 505 |
+
squish: Callable = lambda x: x
|
| 506 |
+
padone: bool = True
|
| 507 |
+
bias: bool = True
|
| 508 |
+
einit: str | Callable = Initializer('trunc_normal', 'out')
|
| 509 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 510 |
+
binit: str | Callable = Initializer('zeros')
|
| 511 |
+
impl: str = 'onehot'
|
| 512 |
+
|
| 513 |
+
def __init__(self, spaces, units):
|
| 514 |
+
self.keys = sorted(spaces.keys())
|
| 515 |
+
self.spaces = spaces
|
| 516 |
+
self.units = units
|
| 517 |
+
self.ekw = dict(einit=self.einit)
|
| 518 |
+
self.lkw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
|
| 519 |
+
|
| 520 |
+
def __call__(self, xs, bshape):
|
| 521 |
+
assert isinstance(bshape, tuple), bshape
|
| 522 |
+
assert all(k in xs for k in self.spaces), (self.spaces, xs.keys())
|
| 523 |
+
ys = []
|
| 524 |
+
init = self.value('init', self.einit, (self.units,))
|
| 525 |
+
init = jnp.broadcast_to(init, (*bshape, self.units))
|
| 526 |
+
init = COMPUTE_DTYPE(init)
|
| 527 |
+
ys.append(init)
|
| 528 |
+
for key in self.keys:
|
| 529 |
+
try:
|
| 530 |
+
space = self.spaces[key]
|
| 531 |
+
x = xs[key]
|
| 532 |
+
assert x.dtype == space.dtype, (key, space.dtype, x.dtype, x.shape)
|
| 533 |
+
m = available(x, bdims=len(bshape))
|
| 534 |
+
x = mask(x, m)
|
| 535 |
+
if space.discrete:
|
| 536 |
+
if space.dtype == jnp.uint8 and len(space.shape) in (2, 3):
|
| 537 |
+
raise NotImplementedError('Images are not supported.')
|
| 538 |
+
classes = int(np.asarray(space.classes).max())
|
| 539 |
+
assert classes <= 256, (key, space, classes)
|
| 540 |
+
if self.impl == 'lookup':
|
| 541 |
+
x = self.sub(
|
| 542 |
+
key, Embed, classes, self.units, space.shape,
|
| 543 |
+
combine=True, **self.ekw)(x)
|
| 544 |
+
# x = x.reshape((*x.shape[:len(bshape)], -1))
|
| 545 |
+
elif self.impl == 'onehot':
|
| 546 |
+
x = jax.nn.one_hot(x, classes, dtype=COMPUTE_DTYPE)
|
| 547 |
+
x = x.reshape((*x.shape[:len(bshape)], -1))
|
| 548 |
+
x = self.sub(key, Linear, self.units, **self.lkw)(x)
|
| 549 |
+
else:
|
| 550 |
+
raise NotImplementedError(self.impl)
|
| 551 |
+
else:
|
| 552 |
+
x = self.squish(x)
|
| 553 |
+
x = x.astype(COMPUTE_DTYPE)
|
| 554 |
+
x = x.reshape((*x.shape[:len(bshape)], -1))
|
| 555 |
+
x = self.sub(key, Linear, self.units, **self.lkw)(x)
|
| 556 |
+
x = mask(x, m)
|
| 557 |
+
ys.append(x)
|
| 558 |
+
except Exception:
|
| 559 |
+
print(f"Error encoding key '{key}' with space {space}.")
|
| 560 |
+
raise
|
| 561 |
+
x = sum(ys)
|
| 562 |
+
return x
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
class MLP(nj.Module):
|
| 566 |
+
|
| 567 |
+
act: str = 'silu'
|
| 568 |
+
norm: str = 'rms'
|
| 569 |
+
bias: bool = True
|
| 570 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 571 |
+
binit: str | Callable = Initializer('zeros')
|
| 572 |
+
|
| 573 |
+
def __init__(self, layers=5, units=1024):
|
| 574 |
+
self.layers = layers
|
| 575 |
+
self.units = units
|
| 576 |
+
self.kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
|
| 577 |
+
|
| 578 |
+
def __call__(self, x):
|
| 579 |
+
shape = x.shape[:-1]
|
| 580 |
+
x = x.astype(COMPUTE_DTYPE)
|
| 581 |
+
x = x.reshape([-1, x.shape[-1]])
|
| 582 |
+
for i in range(self.layers):
|
| 583 |
+
x = self.sub(f'linear{i}', Linear, self.units, **self.kw)(x)
|
| 584 |
+
x = self.sub(f'norm{i}', Norm, self.norm)(x)
|
| 585 |
+
x = act(self.act)(x)
|
| 586 |
+
x = x.reshape((*shape, x.shape[-1]))
|
| 587 |
+
return x
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class Transformer(nj.Module):
|
| 591 |
+
|
| 592 |
+
units: int = 1024
|
| 593 |
+
layers: int = 12
|
| 594 |
+
heads: int = 8
|
| 595 |
+
ffup: int = 4
|
| 596 |
+
act: str = 'silu'
|
| 597 |
+
norm: str = 'rms'
|
| 598 |
+
glu: bool = False
|
| 599 |
+
rope: bool = True
|
| 600 |
+
qknorm: str = 'none'
|
| 601 |
+
bias: bool = True
|
| 602 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 603 |
+
binit: str | Callable = Initializer('zeros')
|
| 604 |
+
outscale: float = 1.0
|
| 605 |
+
|
| 606 |
+
def __call__(self, x, mask=None, ts=None, training=True):
|
| 607 |
+
kw = {k: getattr(self, k) for k in ('bias', 'winit', 'binit')}
|
| 608 |
+
ak = {k: getattr(self, k) for k in ('heads', 'rope', 'qknorm', 'outscale')}
|
| 609 |
+
D = x.shape[-1]
|
| 610 |
+
assert D == self.units, (D, self.units)
|
| 611 |
+
for i in range(self.layers):
|
| 612 |
+
with nj.scope(f'layer{i}'):
|
| 613 |
+
skip = x
|
| 614 |
+
x = self.sub('norm1', Norm, self.norm)(x)
|
| 615 |
+
x = self.sub('mha', Attention, **kw, **ak)(x, mask, ts, training)
|
| 616 |
+
x += skip
|
| 617 |
+
skip = x
|
| 618 |
+
x = self.sub('norm2', Norm, self.norm)(x)
|
| 619 |
+
if self.glu:
|
| 620 |
+
U = max(D, int((D * self.ffup * 2 / 3) // 32 * 32))
|
| 621 |
+
ff1 = self.sub('ff1', Linear, U, **kw)
|
| 622 |
+
ff2 = self.sub('ff2', Linear, U, **kw)
|
| 623 |
+
ff3 = self.sub('ff3', Linear, D, **kw, outscale=self.outscale)
|
| 624 |
+
x = ff3(act(self.act)(ff1(x)) * ff2(x))
|
| 625 |
+
else:
|
| 626 |
+
ff1 = self.sub('ff1', Linear, D * self.ffup, **kw)
|
| 627 |
+
ff2 = self.sub('ff2', Linear, D, **kw, outscale=self.outscale)
|
| 628 |
+
x = ff2(act(self.act)(ff1(x)))
|
| 629 |
+
x += skip
|
| 630 |
+
x = self.sub('outnorm', Norm, self.norm)(x)
|
| 631 |
+
return x
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class GRU(nj.Module):
|
| 635 |
+
|
| 636 |
+
units: int = 1024
|
| 637 |
+
bias: bool = True
|
| 638 |
+
winit: str | Callable = Initializer('trunc_normal')
|
| 639 |
+
binit: str | Callable = Initializer('zeros')
|
| 640 |
+
norm: str = 'rms'
|
| 641 |
+
update_bias: float = -1.0
|
| 642 |
+
|
| 643 |
+
def initial(self, batch_size):
|
| 644 |
+
return jnp.zeros((batch_size, self.units), COMPUTE_DTYPE)
|
| 645 |
+
|
| 646 |
+
def __call__(self, carry, inputs, resets, single=False):
|
| 647 |
+
assert carry.dtype == COMPUTE_DTYPE, carry.dtype
|
| 648 |
+
assert inputs.dtype == COMPUTE_DTYPE, inputs.dtype
|
| 649 |
+
assert resets.dtype == bool, resets.dtype
|
| 650 |
+
if single:
|
| 651 |
+
return self.step(carry, inputs, resets)
|
| 652 |
+
carry, outputs = nj.scan(
|
| 653 |
+
lambda carry, args: self.step(carry, *args),
|
| 654 |
+
carry, (inputs, resets), axis=1)
|
| 655 |
+
return carry, outputs
|
| 656 |
+
|
| 657 |
+
def step(self, carry, inp, reset):
|
| 658 |
+
# NOTE: When passing previous actions as input, ensure to zero out past
|
| 659 |
+
# actions on is_first and clip actions to bounds if needed.
|
| 660 |
+
kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
|
| 661 |
+
carry = mask(carry, ~reset)
|
| 662 |
+
x = jnp.concatenate([carry, inp], -1)
|
| 663 |
+
x = self.sub('norm', Norm, self.norm)(x)
|
| 664 |
+
x = self.sub('linear', Linear, 3 * self.units, **kw)(x)
|
| 665 |
+
res, cand, update = jnp.split(x, 3, -1)
|
| 666 |
+
cand = jnp.tanh(jax.nn.sigmoid(res) * cand)
|
| 667 |
+
update = jax.nn.sigmoid(update + self.update_bias)
|
| 668 |
+
carry = output = update * cand + (1 - update) * carry
|
| 669 |
+
return carry, output
|
| 670 |
+
|
models/embodied/jax/opt.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import jax.numpy as jnp
|
| 5 |
+
import ninjax as nj
|
| 6 |
+
import optax
|
| 7 |
+
|
| 8 |
+
from . import internal
|
| 9 |
+
from . import nets
|
| 10 |
+
|
| 11 |
+
f32 = jnp.float32
|
| 12 |
+
i32 = jnp.int32
|
| 13 |
+
sg = jax.lax.stop_gradient
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Optimizer(nj.Module):
|
| 17 |
+
|
| 18 |
+
summary_depth: int = 2
|
| 19 |
+
|
| 20 |
+
def __init__(self, modules, opt):
|
| 21 |
+
modules = modules if isinstance(modules, (list, tuple)) else (modules,)
|
| 22 |
+
self.modules = modules
|
| 23 |
+
self.opt = opt
|
| 24 |
+
self.step = nj.Variable(jnp.array, 0, i32, name='step')
|
| 25 |
+
self.scaling = (nets.COMPUTE_DTYPE == jnp.float16)
|
| 26 |
+
if self.scaling:
|
| 27 |
+
self.opt = optax.apply_if_finite(self.opt, max_consecutive_errors=1000)
|
| 28 |
+
self.grad_scale = nj.Variable(jnp.array, 1e4, f32, name='grad_scale')
|
| 29 |
+
self.good_steps = nj.Variable(jnp.array, 0, i32, name='good_steps')
|
| 30 |
+
|
| 31 |
+
def __call__(self, lossfn, *args, has_aux=False, **kwargs):
|
| 32 |
+
metrics = {}
|
| 33 |
+
|
| 34 |
+
def lossfn2(*args, **kwargs):
|
| 35 |
+
outs = lossfn(*args, **kwargs)
|
| 36 |
+
loss, aux = outs if has_aux else (outs, None)
|
| 37 |
+
assert loss.dtype == f32, (self.name, loss.dtype)
|
| 38 |
+
assert loss.shape == (), (self.name, loss.shape)
|
| 39 |
+
if self.scaling:
|
| 40 |
+
loss *= sg(self.grad_scale.read())
|
| 41 |
+
return loss, aux
|
| 42 |
+
|
| 43 |
+
loss, params, grads, aux = nj.grad(
|
| 44 |
+
lossfn2, self.modules, has_aux=True)(*args, **kwargs)
|
| 45 |
+
if self.scaling:
|
| 46 |
+
loss *= 1 / self.grad_scale.read()
|
| 47 |
+
|
| 48 |
+
counts = {k: math.prod(v.shape) for k, v in params.items()}
|
| 49 |
+
if nj.creating():
|
| 50 |
+
print(self._summarize_params(counts, self.summary_depth))
|
| 51 |
+
|
| 52 |
+
axes = internal.get_data_axes()
|
| 53 |
+
if axes:
|
| 54 |
+
grads = jax.tree.map(lambda x: jax.lax.pmean(x, axes), grads)
|
| 55 |
+
|
| 56 |
+
if self.scaling:
|
| 57 |
+
invscale = 1 / self.grad_scale.read()
|
| 58 |
+
grads = jax.tree.map(lambda x: x * invscale, grads)
|
| 59 |
+
|
| 60 |
+
state = self.sub('state', nj.Tree, self.opt.init, params)
|
| 61 |
+
updates, new_state = self.opt.update(grads, state.read(), params)
|
| 62 |
+
nj.context().update(optax.apply_updates(params, updates))
|
| 63 |
+
state.write(new_state)
|
| 64 |
+
grad_norm = optax.global_norm(grads)
|
| 65 |
+
if self.scaling:
|
| 66 |
+
self._update_scale(grads, jnp.isfinite(grad_norm))
|
| 67 |
+
grad_norm = jnp.where(jnp.isfinite(grad_norm), grad_norm, jnp.nan)
|
| 68 |
+
self.step.write(self.step.read() + i32(jnp.isfinite(grad_norm)))
|
| 69 |
+
metrics['grad_scale'] = self.grad_scale.read()
|
| 70 |
+
metrics['grad_overflow'] = f32(~jnp.isfinite(grad_norm))
|
| 71 |
+
else:
|
| 72 |
+
self.step.write(self.step.read() + 1)
|
| 73 |
+
metrics['loss'] = loss.mean()
|
| 74 |
+
metrics['updates'] = self.step.read()
|
| 75 |
+
metrics['grad_norm'] = grad_norm
|
| 76 |
+
metrics['grad_rms'] = nets.rms(grads)
|
| 77 |
+
metrics['update_rms'] = nets.rms(updates)
|
| 78 |
+
metrics['param_rms'] = nets.rms([x.values for x in self.modules])
|
| 79 |
+
metrics['param_count'] = jnp.array(list(counts.values()), f32).sum()
|
| 80 |
+
metrics = {f'{self.name}/{k}': v for k, v in metrics.items()}
|
| 81 |
+
return (metrics, aux) if has_aux else metrics
|
| 82 |
+
|
| 83 |
+
def _update_scale(self, grads, finite):
|
| 84 |
+
keep = (finite & (self.good_steps.read() < 1000))
|
| 85 |
+
incr = (finite & (self.good_steps.read() >= 1000))
|
| 86 |
+
decr = ~finite
|
| 87 |
+
self.good_steps.write(i32(keep) * (self.good_steps.read() + 1))
|
| 88 |
+
self.grad_scale.write(jnp.clip(
|
| 89 |
+
f32(keep) * self.grad_scale.read() +
|
| 90 |
+
f32(incr) * self.grad_scale.read() * 2 +
|
| 91 |
+
f32(decr) * self.grad_scale.read() / 2, 1e-4, 1e5))
|
| 92 |
+
return finite
|
| 93 |
+
|
| 94 |
+
def _summarize_params(self, counts, depth):
|
| 95 |
+
lines = []
|
| 96 |
+
pfxs = []
|
| 97 |
+
for key in counts:
|
| 98 |
+
parts = key.split('/')
|
| 99 |
+
pfxs += ['/'.join(parts[: i + 1]) for i in range(min(len(parts), depth))]
|
| 100 |
+
subcounts = {
|
| 101 |
+
prefix: sum(v for k, v in counts.items() if k.startswith(prefix))
|
| 102 |
+
for prefix in set(pfxs)}
|
| 103 |
+
lines = [f'Optimizer {self.name} has {sum(counts.values()):,} params:']
|
| 104 |
+
for prefix, count in sorted(subcounts.items(), key=lambda x: -x[1]):
|
| 105 |
+
lines.append(f'{count:>14,} {prefix}')
|
| 106 |
+
return '\n'.join(lines)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def clip_by_agc(clip=0.3, pmin=1e-3):
|
| 110 |
+
|
| 111 |
+
def init_fn(params):
|
| 112 |
+
return ()
|
| 113 |
+
|
| 114 |
+
def update_fn(updates, state, params=None):
|
| 115 |
+
def fn(param, update):
|
| 116 |
+
unorm = jnp.linalg.norm(update.flatten(), 2)
|
| 117 |
+
pnorm = jnp.linalg.norm(param.flatten(), 2)
|
| 118 |
+
upper = clip * jnp.maximum(pmin, pnorm)
|
| 119 |
+
return update * (1 / jnp.maximum(1.0, unorm / upper))
|
| 120 |
+
updates = jax.tree.map(fn, params, updates) if clip else updates
|
| 121 |
+
return updates, ()
|
| 122 |
+
|
| 123 |
+
return optax.GradientTransformation(init_fn, update_fn)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def scale_by_rms(beta=0.999, eps=1e-8):
|
| 127 |
+
|
| 128 |
+
def init_fn(params):
|
| 129 |
+
nu = jax.tree.map(lambda t: jnp.zeros_like(t, f32), params)
|
| 130 |
+
step = jnp.zeros((), i32)
|
| 131 |
+
return (step, nu)
|
| 132 |
+
|
| 133 |
+
def update_fn(updates, state, params=None):
|
| 134 |
+
step, nu = state
|
| 135 |
+
step = optax.safe_int32_increment(step)
|
| 136 |
+
nu = jax.tree.map(
|
| 137 |
+
lambda v, u: beta * v + (1 - beta) * (u * u), nu, updates)
|
| 138 |
+
nu_hat = optax.bias_correction(nu, beta, step)
|
| 139 |
+
updates = jax.tree.map(
|
| 140 |
+
lambda u, v: u / (jnp.sqrt(v) + eps), updates, nu_hat)
|
| 141 |
+
return updates, (step, nu)
|
| 142 |
+
|
| 143 |
+
return optax.GradientTransformation(init_fn, update_fn)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def scale_by_momentum(beta=0.9, nesterov=False):
|
| 147 |
+
|
| 148 |
+
def init_fn(params):
|
| 149 |
+
mu = jax.tree.map(lambda t: jnp.zeros_like(t, f32), params)
|
| 150 |
+
step = jnp.zeros((), i32)
|
| 151 |
+
return (step, mu)
|
| 152 |
+
|
| 153 |
+
def update_fn(updates, state, params=None):
|
| 154 |
+
step, mu = state
|
| 155 |
+
step = optax.safe_int32_increment(step)
|
| 156 |
+
mu = optax.update_moment(updates, mu, beta, 1)
|
| 157 |
+
if nesterov:
|
| 158 |
+
mu_nesterov = optax.update_moment(updates, mu, beta, 1)
|
| 159 |
+
mu_hat = optax.bias_correction(mu_nesterov, beta, step)
|
| 160 |
+
else:
|
| 161 |
+
mu_hat = optax.bias_correction(mu, beta, step)
|
| 162 |
+
return mu_hat, (step, mu)
|
| 163 |
+
|
| 164 |
+
return optax.GradientTransformation(init_fn, update_fn)
|
models/embodied/jax/outs.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import jax.numpy as jnp
|
| 5 |
+
|
| 6 |
+
i32 = jnp.int32
|
| 7 |
+
f32 = jnp.float32
|
| 8 |
+
sg = jax.lax.stop_gradient
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Output:
|
| 12 |
+
|
| 13 |
+
def __repr__(self):
|
| 14 |
+
name = type(self).__name__
|
| 15 |
+
pred = self.pred()
|
| 16 |
+
return f'{name}({pred.dtype}, shape={pred.shape})'
|
| 17 |
+
|
| 18 |
+
def pred(self):
|
| 19 |
+
raise NotImplementedError
|
| 20 |
+
|
| 21 |
+
def loss(self, target):
|
| 22 |
+
return -self.logp(sg(target))
|
| 23 |
+
|
| 24 |
+
def sample(self, seed, shape=()):
|
| 25 |
+
raise NotImplementedError
|
| 26 |
+
|
| 27 |
+
def logp(self, event):
|
| 28 |
+
raise NotImplementedError
|
| 29 |
+
|
| 30 |
+
def prob(self, event):
|
| 31 |
+
return jnp.exp(self.logp(event))
|
| 32 |
+
|
| 33 |
+
def entropy(self):
|
| 34 |
+
raise NotImplementedError
|
| 35 |
+
|
| 36 |
+
def kl(self, other):
|
| 37 |
+
raise NotImplementedError
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Agg(Output):
|
| 41 |
+
|
| 42 |
+
def __init__(self, output, dims, agg=jnp.sum):
|
| 43 |
+
self.output = output
|
| 44 |
+
self.axes = [-i for i in range(1, dims + 1)]
|
| 45 |
+
self.agg = agg
|
| 46 |
+
|
| 47 |
+
def __repr__(self):
|
| 48 |
+
name = type(self.output).__name__
|
| 49 |
+
pred = self.pred()
|
| 50 |
+
dims = len(self.axes)
|
| 51 |
+
return f'{name}({pred.dtype}, shape={pred.shape}, agg={dims})'
|
| 52 |
+
|
| 53 |
+
def pred(self):
|
| 54 |
+
return self.output.pred()
|
| 55 |
+
|
| 56 |
+
def loss(self, target):
|
| 57 |
+
loss = self.output.loss(target)
|
| 58 |
+
return self.agg(loss, self.axes)
|
| 59 |
+
|
| 60 |
+
def sample(self, seed, shape=()):
|
| 61 |
+
return self.output.sample(seed, shape)
|
| 62 |
+
|
| 63 |
+
def logp(self, event):
|
| 64 |
+
return self.output.logp(event).sum(self.axes)
|
| 65 |
+
|
| 66 |
+
def prob(self, event):
|
| 67 |
+
return self.output.prob(event).sum(self.axes)
|
| 68 |
+
|
| 69 |
+
def entropy(self):
|
| 70 |
+
entropy = self.output.entropy()
|
| 71 |
+
return self.agg(entropy, self.axes)
|
| 72 |
+
|
| 73 |
+
def kl(self, other):
|
| 74 |
+
assert isinstance(other, Agg), other
|
| 75 |
+
kl = self.output.kl(other.output)
|
| 76 |
+
return self.agg(kl, self.axes)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class Frozen:
|
| 80 |
+
|
| 81 |
+
def __init__(self, output):
|
| 82 |
+
self.output = output
|
| 83 |
+
|
| 84 |
+
def __getattr__(self, name):
|
| 85 |
+
if name.startswith('__'):
|
| 86 |
+
raise AttributeError(name)
|
| 87 |
+
try:
|
| 88 |
+
fn = getattr(self.output, name)
|
| 89 |
+
except AttributeError:
|
| 90 |
+
raise ValueError(name)
|
| 91 |
+
return functools.partial(self._wrapper, fn)
|
| 92 |
+
|
| 93 |
+
def _wrapper(self, fn, *args, **kwargs):
|
| 94 |
+
result = fn(*args, **kwargs)
|
| 95 |
+
result = sg(result)
|
| 96 |
+
return result
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class Concat:
|
| 100 |
+
|
| 101 |
+
def __init__(self, outputs, midpoints, axis):
|
| 102 |
+
assert len(midpoints) == len(outputs) - 1
|
| 103 |
+
self.outputs = outputs
|
| 104 |
+
self.midpoints = tuple(midpoints)
|
| 105 |
+
self.axis = axis
|
| 106 |
+
|
| 107 |
+
def __getattr__(self, name):
|
| 108 |
+
if name.startswith('__'):
|
| 109 |
+
raise AttributeError(name)
|
| 110 |
+
try:
|
| 111 |
+
fns = [getattr(x, name) for x in self.outputs]
|
| 112 |
+
except AttributeError:
|
| 113 |
+
raise ValueError(name)
|
| 114 |
+
return functools.partial(self._wrapper, fns)
|
| 115 |
+
|
| 116 |
+
def _wrapper(self, fns, *args, **kwargs):
|
| 117 |
+
los = (None,) + self.midpoints
|
| 118 |
+
his = self.midpoints + (None,)
|
| 119 |
+
results = []
|
| 120 |
+
for fn, lo, hi in zip(fns, los, his):
|
| 121 |
+
segment = [slice(None, None, None)] * (self.axis + 1)
|
| 122 |
+
segment[self.axis] = slice(lo, hi, None)
|
| 123 |
+
segment = tuple(segment)
|
| 124 |
+
a, kw = jax.tree.map(lambda x: x[segment], (args, kwargs))
|
| 125 |
+
results.append(fn(*a, **kw))
|
| 126 |
+
return jax.tree.map(lambda *xs: jnp.concatenate(xs, self.axis), *results)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class MSE(Output):
|
| 130 |
+
|
| 131 |
+
def __init__(self, mean, squash=None):
|
| 132 |
+
self.mean = f32(mean)
|
| 133 |
+
self.squash = squash or (lambda x: x)
|
| 134 |
+
|
| 135 |
+
def pred(self):
|
| 136 |
+
return self.mean
|
| 137 |
+
|
| 138 |
+
def loss(self, target):
|
| 139 |
+
assert jnp.issubdtype(target.dtype, jnp.floating), target.dtype
|
| 140 |
+
assert self.mean.shape == target.shape, (self.mean.shape, target.shape)
|
| 141 |
+
return jnp.square(self.mean - sg(self.squash(f32(target))))
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class Huber(Output):
|
| 145 |
+
|
| 146 |
+
def __init__(self, mean, eps=1.0):
|
| 147 |
+
# Soft Huber loss or Charbonnier loss.
|
| 148 |
+
self.mean = f32(mean)
|
| 149 |
+
self.eps = eps
|
| 150 |
+
|
| 151 |
+
def pred(self):
|
| 152 |
+
return self.mean
|
| 153 |
+
|
| 154 |
+
def loss(self, target):
|
| 155 |
+
assert jnp.issubdtype(target.dtype, jnp.floating), target.dtype
|
| 156 |
+
assert self.mean.shape == target.shape, (self.mean.shape, target.shape)
|
| 157 |
+
dist = self.mean - sg(f32(target))
|
| 158 |
+
return jnp.sqrt(jnp.square(dist) + jnp.square(self.eps)) - self.eps
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class Normal(Output):
|
| 162 |
+
|
| 163 |
+
def __init__(self, mean, stddev=1.0):
|
| 164 |
+
self.mean = f32(mean)
|
| 165 |
+
self.stddev = jnp.broadcast_to(f32(stddev), self.mean.shape)
|
| 166 |
+
|
| 167 |
+
def pred(self):
|
| 168 |
+
return self.mean
|
| 169 |
+
|
| 170 |
+
def sample(self, seed, shape=()):
|
| 171 |
+
sample = jax.random.normal(seed, shape + self.mean.shape, f32)
|
| 172 |
+
return sample * self.stddev + self.mean
|
| 173 |
+
|
| 174 |
+
def logp(self, event):
|
| 175 |
+
assert jnp.issubdtype(event.dtype, jnp.floating), event.dtype
|
| 176 |
+
return jax.scipy.stats.norm.logpdf(f32(event), self.mean, self.stddev)
|
| 177 |
+
|
| 178 |
+
def entropy(self):
|
| 179 |
+
return 0.5 * jnp.log(2 * jnp.pi * jnp.square(self.stddev)) + 0.5
|
| 180 |
+
|
| 181 |
+
def kl(self, other):
|
| 182 |
+
assert isinstance(other, type(self)), (self, other)
|
| 183 |
+
return 0.5 * (
|
| 184 |
+
jnp.square(self.stddev / other.stddev) +
|
| 185 |
+
jnp.square(other.mean - self.mean) / jnp.square(other.stddev) +
|
| 186 |
+
2 * jnp.log(other.stddev) - 2 * jnp.log(self.stddev) - 1)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class Binary(Output):
|
| 190 |
+
|
| 191 |
+
def __init__(self, logit):
|
| 192 |
+
self.logit = f32(logit)
|
| 193 |
+
|
| 194 |
+
def pred(self):
|
| 195 |
+
return (self.logit > 0)
|
| 196 |
+
|
| 197 |
+
def logp(self, event):
|
| 198 |
+
event = f32(event)
|
| 199 |
+
logp = jax.nn.log_sigmoid(self.logit)
|
| 200 |
+
lognotp = jax.nn.log_sigmoid(-self.logit)
|
| 201 |
+
return event * logp + (1 - event) * lognotp
|
| 202 |
+
|
| 203 |
+
def sample(self, seed, shape=()):
|
| 204 |
+
prob = jax.nn.sigmoid(self.logit)
|
| 205 |
+
return jax.random.bernoulli(seed, prob, -1, shape + self.logit.shape)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class Categorical(Output):
|
| 209 |
+
|
| 210 |
+
def __init__(self, logits, unimix=0.0):
|
| 211 |
+
logits = f32(logits)
|
| 212 |
+
if unimix:
|
| 213 |
+
probs = jax.nn.softmax(logits, -1)
|
| 214 |
+
uniform = jnp.ones_like(probs) / probs.shape[-1]
|
| 215 |
+
probs = (1 - unimix) * probs + unimix * uniform
|
| 216 |
+
logits = jnp.log(probs)
|
| 217 |
+
self.logits = logits
|
| 218 |
+
|
| 219 |
+
def pred(self):
|
| 220 |
+
return jnp.argmax(self.logits, -1)
|
| 221 |
+
|
| 222 |
+
def sample(self, seed, shape=()):
|
| 223 |
+
return jax.random.categorical(
|
| 224 |
+
seed, self.logits, -1, shape + self.logits.shape[:-1])
|
| 225 |
+
|
| 226 |
+
def logp(self, event):
|
| 227 |
+
onehot = jax.nn.one_hot(event, self.logits.shape[-1])
|
| 228 |
+
return (jax.nn.log_softmax(self.logits, -1) * onehot).sum(-1)
|
| 229 |
+
|
| 230 |
+
def entropy(self):
|
| 231 |
+
logprob = jax.nn.log_softmax(self.logits, -1)
|
| 232 |
+
prob = jax.nn.softmax(self.logits, -1)
|
| 233 |
+
entropy = -(prob * logprob).sum(-1)
|
| 234 |
+
return entropy
|
| 235 |
+
|
| 236 |
+
def kl(self, other):
|
| 237 |
+
logprob = jax.nn.log_softmax(self.logits, -1)
|
| 238 |
+
logother = jax.nn.log_softmax(other.logits, -1)
|
| 239 |
+
prob = jax.nn.softmax(self.logits, -1)
|
| 240 |
+
return (prob * (logprob - logother)).sum(-1)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class OneHot(Output):
|
| 244 |
+
|
| 245 |
+
def __init__(self, logits, unimix=0.0):
|
| 246 |
+
self.dist = Categorical(logits, unimix)
|
| 247 |
+
|
| 248 |
+
def pred(self):
|
| 249 |
+
index = self.dist.pred()
|
| 250 |
+
return self._onehot_with_grad(index)
|
| 251 |
+
|
| 252 |
+
def sample(self, seed, shape=()):
|
| 253 |
+
index = self.dist.sample(seed, shape)
|
| 254 |
+
return self._onehot_with_grad(index)
|
| 255 |
+
|
| 256 |
+
def logp(self, event):
|
| 257 |
+
return (jax.nn.log_softmax(self.dist.logits, -1) * event).sum(-1)
|
| 258 |
+
|
| 259 |
+
def entropy(self):
|
| 260 |
+
return self.dist.entropy()
|
| 261 |
+
|
| 262 |
+
def kl(self, other):
|
| 263 |
+
return self.dist.kl(other.dist)
|
| 264 |
+
|
| 265 |
+
def _onehot_with_grad(self, index):
|
| 266 |
+
# Straight through gradients.
|
| 267 |
+
value = jax.nn.one_hot(index, self.dist.logits.shape[-1], dtype=f32)
|
| 268 |
+
probs = jax.nn.softmax(self.dist.logits, -1)
|
| 269 |
+
value = sg(value) + (probs - sg(probs))
|
| 270 |
+
return value
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class TwoHot(Output):
|
| 274 |
+
|
| 275 |
+
def __init__(self, logits, bins, squash=None, unsquash=None):
|
| 276 |
+
logits = f32(logits)
|
| 277 |
+
assert logits.shape[-1] == len(bins), (logits.shape, len(bins))
|
| 278 |
+
assert bins.dtype == f32, bins.dtype
|
| 279 |
+
self.logits = logits
|
| 280 |
+
self.probs = jax.nn.softmax(logits)
|
| 281 |
+
self.bins = jnp.array(bins)
|
| 282 |
+
self.squash = squash or (lambda x: x)
|
| 283 |
+
self.unsquash = unsquash or (lambda x: x)
|
| 284 |
+
|
| 285 |
+
def pred(self):
|
| 286 |
+
# The naive implementation results in a non-zero result even if the bins
|
| 287 |
+
# are symmetric and the probabilities uniform, because the sum operation
|
| 288 |
+
# goes left to right, accumulating numerical errors. Instead, we use a
|
| 289 |
+
# symmetric sum to ensure that the predicted rewards and values are
|
| 290 |
+
# actually zero at initialization.
|
| 291 |
+
# return self.unsquash((self.probs * self.bins).sum(-1))
|
| 292 |
+
n = self.logits.shape[-1]
|
| 293 |
+
if n % 2 == 1:
|
| 294 |
+
m = (n - 1) // 2
|
| 295 |
+
p1 = self.probs[..., :m]
|
| 296 |
+
p2 = self.probs[..., m: m + 1]
|
| 297 |
+
p3 = self.probs[..., m + 1:]
|
| 298 |
+
b1 = self.bins[..., :m]
|
| 299 |
+
b2 = self.bins[..., m: m + 1]
|
| 300 |
+
b3 = self.bins[..., m + 1:]
|
| 301 |
+
wavg = (p2 * b2).sum(-1) + ((p1 * b1)[..., ::-1] + (p3 * b3)).sum(-1)
|
| 302 |
+
return self.unsquash(wavg)
|
| 303 |
+
else:
|
| 304 |
+
p1 = self.probs[..., :n // 2]
|
| 305 |
+
p2 = self.probs[..., n // 2:]
|
| 306 |
+
b1 = self.bins[..., :n // 2]
|
| 307 |
+
b2 = self.bins[..., n // 2:]
|
| 308 |
+
wavg = ((p1 * b1)[..., ::-1] + (p2 * b2)).sum(-1)
|
| 309 |
+
return self.unsquash(wavg)
|
| 310 |
+
|
| 311 |
+
def loss(self, target):
|
| 312 |
+
assert target.dtype == f32, target.dtype
|
| 313 |
+
target = sg(self.squash(target))
|
| 314 |
+
below = (self.bins <= target[..., None]).astype(i32).sum(-1) - 1
|
| 315 |
+
above = len(self.bins) - (
|
| 316 |
+
self.bins > target[..., None]).astype(i32).sum(-1)
|
| 317 |
+
below = jnp.clip(below, 0, len(self.bins) - 1)
|
| 318 |
+
above = jnp.clip(above, 0, len(self.bins) - 1)
|
| 319 |
+
equal = (below == above)
|
| 320 |
+
dist_to_below = jnp.where(equal, 1, jnp.abs(self.bins[below] - target))
|
| 321 |
+
dist_to_above = jnp.where(equal, 1, jnp.abs(self.bins[above] - target))
|
| 322 |
+
total = dist_to_below + dist_to_above
|
| 323 |
+
weight_below = dist_to_above / total
|
| 324 |
+
weight_above = dist_to_below / total
|
| 325 |
+
target = (
|
| 326 |
+
jax.nn.one_hot(below, len(self.bins)) * weight_below[..., None] +
|
| 327 |
+
jax.nn.one_hot(above, len(self.bins)) * weight_above[..., None])
|
| 328 |
+
log_pred = self.logits - jax.scipy.special.logsumexp(
|
| 329 |
+
self.logits, -1, keepdims=True)
|
| 330 |
+
return -(target * log_pred).sum(-1)
|
models/embodied/jax/transform.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import re
|
| 3 |
+
from collections import Counter
|
| 4 |
+
|
| 5 |
+
import jax
|
| 6 |
+
from jax.sharding import PartitionSpec as P
|
| 7 |
+
import ninjax as nj
|
| 8 |
+
|
| 9 |
+
from . import nets as nn
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
LOCK = threading.Lock()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Add tracer_sharding attribute to abstract values. This allows us to use
|
| 16 |
+
# shard_map based on layer callback shardings, even though JAX does not
|
| 17 |
+
# currently expose the shardings of tracer objects.
|
| 18 |
+
TRACER_SHARDINGS = {}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def init(
|
| 22 |
+
fn, mesh, arg_shardings,
|
| 23 |
+
param_partition_rules=(),
|
| 24 |
+
act_partition_rules=(),
|
| 25 |
+
static_argnums=(),
|
| 26 |
+
dummy_inputs=(),
|
| 27 |
+
print_partition=False,
|
| 28 |
+
):
|
| 29 |
+
|
| 30 |
+
def init(fun, **jit_kwargs):
|
| 31 |
+
if not getattr(fun, '_is_pure', False):
|
| 32 |
+
fun = nj.pure(fun)
|
| 33 |
+
def wrapper(*args, **kwargs):
|
| 34 |
+
state, out = fun(*args, create=True, modify=True, ignore=True, **kwargs)
|
| 35 |
+
del out
|
| 36 |
+
return state, ()
|
| 37 |
+
return wrapper
|
| 38 |
+
fn = init(fn)
|
| 39 |
+
|
| 40 |
+
def fn(*args, inner=fn):
|
| 41 |
+
params, seed, *args = args
|
| 42 |
+
old = nn.LAYER_CALLBACK
|
| 43 |
+
nn.LAYER_CALLBACK = create_layer_callback(mesh, act_partition_rules)
|
| 44 |
+
params, _ = inner(params, *args, seed=seed)
|
| 45 |
+
nn.LAYER_CALLBACK = old
|
| 46 |
+
return params
|
| 47 |
+
|
| 48 |
+
fn = jax.jit(fn, static_argnums=static_argnums)
|
| 49 |
+
|
| 50 |
+
params_shapes = fn.eval_shape(*dummy_inputs)
|
| 51 |
+
params_sharding, grouping = resolve_rules(
|
| 52 |
+
params_shapes, param_partition_rules, mesh)
|
| 53 |
+
if print_partition:
|
| 54 |
+
print_grouping(grouping)
|
| 55 |
+
|
| 56 |
+
fn = jax.jit(fn, arg_shardings, params_sharding, static_argnums, None)
|
| 57 |
+
params = fn(*dummy_inputs)
|
| 58 |
+
|
| 59 |
+
return params, params_sharding
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def apply(
|
| 63 |
+
fn, mesh, in_shardings, out_shardings,
|
| 64 |
+
partition_rules=(),
|
| 65 |
+
static_argnums=(),
|
| 66 |
+
single_output=False,
|
| 67 |
+
return_params=False,
|
| 68 |
+
donate_params=False,
|
| 69 |
+
# shard_map specific
|
| 70 |
+
split_rng=True,
|
| 71 |
+
use_shardmap=False,
|
| 72 |
+
first_outnums=(),
|
| 73 |
+
):
|
| 74 |
+
|
| 75 |
+
if single_output:
|
| 76 |
+
assert len(out_shardings) == 1
|
| 77 |
+
|
| 78 |
+
def fn(*args, inner=fn):
|
| 79 |
+
if donate_params:
|
| 80 |
+
donated, allocated, seed, *args = args
|
| 81 |
+
params = {**donated, **allocated}
|
| 82 |
+
else:
|
| 83 |
+
params, seed, *args = args
|
| 84 |
+
if use_shardmap and len(mesh.devices) > 1 and split_rng:
|
| 85 |
+
seed = jax.random.fold_in(seed, jax.lax.axis_index('d'))
|
| 86 |
+
params, outs = inner(params, *args, seed=seed)
|
| 87 |
+
outs = (outs,) if single_output else outs
|
| 88 |
+
assert isinstance(outs, tuple)
|
| 89 |
+
return (params, *outs) if return_params else outs
|
| 90 |
+
|
| 91 |
+
if use_shardmap and len(mesh.devices) > 1:
|
| 92 |
+
|
| 93 |
+
def fn(*args, inner=fn):
|
| 94 |
+
outs = list(inner(*args))
|
| 95 |
+
for i in first_outnums:
|
| 96 |
+
outs[i] = jax.tree.map(lambda x: x[None], outs[i])
|
| 97 |
+
return tuple(outs)
|
| 98 |
+
|
| 99 |
+
from jax.experimental.shard_map import shard_map
|
| 100 |
+
ispecs = list(jax.tree.map(lambda s: s.spec, in_shardings))
|
| 101 |
+
for i in sorted(static_argnums):
|
| 102 |
+
ispecs.insert(i, None)
|
| 103 |
+
ispecs = tuple(ispecs)
|
| 104 |
+
ospecs = jax.tree.map(lambda s: s.spec, out_shardings)
|
| 105 |
+
fn = shard_map(fn, mesh, ispecs, ospecs, check_rep=False)
|
| 106 |
+
|
| 107 |
+
def fn(*args, inner=fn):
|
| 108 |
+
outs = list(inner(*args))
|
| 109 |
+
for i in first_outnums:
|
| 110 |
+
outs[i] = jax.tree.map(lambda x: x[0], outs[i])
|
| 111 |
+
return tuple(outs)
|
| 112 |
+
|
| 113 |
+
if single_output:
|
| 114 |
+
def fn(*args, inner=fn):
|
| 115 |
+
outs = inner(*args)
|
| 116 |
+
assert len(outs) == 1
|
| 117 |
+
return outs[0]
|
| 118 |
+
|
| 119 |
+
if single_output:
|
| 120 |
+
out_shardings = out_shardings[0]
|
| 121 |
+
donate = [0] if donate_params else []
|
| 122 |
+
|
| 123 |
+
if not use_shardmap:
|
| 124 |
+
def fn(*args, inner=fn):
|
| 125 |
+
with LOCK:
|
| 126 |
+
old = nn.LAYER_CALLBACK
|
| 127 |
+
nn.LAYER_CALLBACK = create_layer_callback(mesh, partition_rules)
|
| 128 |
+
outs = inner(*args)
|
| 129 |
+
nn.LAYER_CALLBACK = old
|
| 130 |
+
return outs
|
| 131 |
+
|
| 132 |
+
fn = jax.jit(fn, in_shardings, out_shardings, static_argnums, None, donate)
|
| 133 |
+
|
| 134 |
+
return fn
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def create_layer_callback(mesh, partition_rules):
|
| 138 |
+
def layer_callback(y, name):
|
| 139 |
+
name = f'{nj.ninjax.SCOPE}/{name}'
|
| 140 |
+
for rule, spec in partition_rules:
|
| 141 |
+
if re.search(rule, name):
|
| 142 |
+
sharding = jax.sharding.NamedSharding(mesh, spec)
|
| 143 |
+
def apply(y):
|
| 144 |
+
y = jax.lax.with_sharding_constraint(y, sharding)
|
| 145 |
+
if not hasattr(type(y), 'tracer_shardings'):
|
| 146 |
+
type(y).tracer_sharding = property(
|
| 147 |
+
lambda self: TRACER_SHARDINGS[id(self)])
|
| 148 |
+
TRACER_SHARDINGS[id(y)] = sharding
|
| 149 |
+
return y
|
| 150 |
+
return jax.tree.map(apply, y)
|
| 151 |
+
else:
|
| 152 |
+
raise Exception(f'No matching rule found for activation key: {name}')
|
| 153 |
+
return layer_callback
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def resolve_rules(params, partition_rules, mesh):
|
| 157 |
+
if len(partition_rules) == 0:
|
| 158 |
+
partition_rules = [('.*', P())]
|
| 159 |
+
params_spec, grouping = dict(), dict()
|
| 160 |
+
for k in params.keys():
|
| 161 |
+
for rule, spec in partition_rules:
|
| 162 |
+
if re.search(rule, k):
|
| 163 |
+
params_spec[k] = spec
|
| 164 |
+
if rule not in grouping:
|
| 165 |
+
grouping[rule] = []
|
| 166 |
+
grouping[rule].append(k)
|
| 167 |
+
break
|
| 168 |
+
else:
|
| 169 |
+
raise Exception(f'No matching rule found for param key: {k}')
|
| 170 |
+
assert set(params.keys()) == set(params_spec.keys())
|
| 171 |
+
sharding = jax.tree.map(
|
| 172 |
+
lambda spec: jax.sharding.NamedSharding(mesh, spec), params_spec)
|
| 173 |
+
return sharding, grouping
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def print_grouping(grouping):
|
| 177 |
+
for rule, ps in grouping.items():
|
| 178 |
+
if len(ps) == 0:
|
| 179 |
+
continue
|
| 180 |
+
print(f'Partition rule "{rule}" matches {len(ps)} param tensors')
|
| 181 |
+
ks = ['/'.join(p.split('/')[-2:]) for p in ps]
|
| 182 |
+
ks = Counter(ks)
|
| 183 |
+
ks = ks.most_common(len(ks))
|
| 184 |
+
ks = [f'- .../{k}: {v}' for k, v in ks]
|
| 185 |
+
print('\n'.join(ks))
|
models/embodied/jax/utils.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import jax
|
| 4 |
+
import jax.numpy as jnp
|
| 5 |
+
import ninjax as nj
|
| 6 |
+
|
| 7 |
+
from . import internal
|
| 8 |
+
|
| 9 |
+
sg = jax.lax.stop_gradient
|
| 10 |
+
f32 = jnp.float32
|
| 11 |
+
i32 = jnp.int32
|
| 12 |
+
|
| 13 |
+
COMPUTE_DTYPE = jnp.bfloat16
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Normalize(nj.Module):
|
| 17 |
+
|
| 18 |
+
rate: float = 0.01
|
| 19 |
+
limit: float = 1e-8
|
| 20 |
+
perclo: float = 5.0
|
| 21 |
+
perchi: float = 95.0
|
| 22 |
+
debias: bool = True
|
| 23 |
+
|
| 24 |
+
def __init__(self, impl):
|
| 25 |
+
self.impl = impl
|
| 26 |
+
if self.debias and self.impl != 'none':
|
| 27 |
+
self.corr = nj.Variable(jnp.zeros, (), f32, name='corr')
|
| 28 |
+
if self.impl == 'none':
|
| 29 |
+
pass
|
| 30 |
+
elif self.impl == 'meanstd':
|
| 31 |
+
self.mean = nj.Variable(jnp.zeros, (), f32, name='mean')
|
| 32 |
+
self.sqrs = nj.Variable(jnp.zeros, (), f32, name='sqrs')
|
| 33 |
+
elif self.impl == 'perc':
|
| 34 |
+
self.lo = nj.Variable(jnp.zeros, (), f32, name='lo')
|
| 35 |
+
self.hi = nj.Variable(jnp.zeros, (), f32, name='hi')
|
| 36 |
+
else:
|
| 37 |
+
raise NotImplementedError(self.impl)
|
| 38 |
+
|
| 39 |
+
def __call__(self, x, update):
|
| 40 |
+
if update:
|
| 41 |
+
self.update(x)
|
| 42 |
+
return self.stats()
|
| 43 |
+
|
| 44 |
+
def update(self, x):
|
| 45 |
+
x = sg(f32(x))
|
| 46 |
+
if self.impl == 'none':
|
| 47 |
+
pass
|
| 48 |
+
elif self.impl == 'meanstd':
|
| 49 |
+
self._update(self.mean, self._mean(x))
|
| 50 |
+
self._update(self.sqrs, self._mean(jnp.square(x)))
|
| 51 |
+
elif self.impl == 'perc':
|
| 52 |
+
self._update(self.lo, self._perc(x, self.perclo))
|
| 53 |
+
self._update(self.hi, self._perc(x, self.perchi))
|
| 54 |
+
else:
|
| 55 |
+
raise NotImplementedError(self.impl)
|
| 56 |
+
if self.debias and self.impl != 'none':
|
| 57 |
+
self._update(self.corr, 1.0)
|
| 58 |
+
|
| 59 |
+
def stats(self):
|
| 60 |
+
corr = 1.0
|
| 61 |
+
if self.debias and self.impl != 'none':
|
| 62 |
+
corr /= jnp.maximum(self.rate, self.corr.read())
|
| 63 |
+
if self.impl == 'none':
|
| 64 |
+
return 0.0, 1.0
|
| 65 |
+
elif self.impl == 'meanstd':
|
| 66 |
+
mean = self.mean.read() * corr
|
| 67 |
+
std = jnp.sqrt(jax.nn.relu(self.sqrs.read() * corr - mean ** 2))
|
| 68 |
+
std = jnp.maximum(self.limit, std)
|
| 69 |
+
return mean, std
|
| 70 |
+
elif self.impl == 'perc':
|
| 71 |
+
lo, hi = self.lo.read() * corr, self.hi.read() * corr
|
| 72 |
+
return sg(lo), sg(jnp.maximum(self.limit, hi - lo))
|
| 73 |
+
else:
|
| 74 |
+
raise NotImplementedError(self.impl)
|
| 75 |
+
|
| 76 |
+
def _mean(self, x):
|
| 77 |
+
x = x.mean()
|
| 78 |
+
axes = internal.get_data_axes()
|
| 79 |
+
if axes:
|
| 80 |
+
x = jax.lax.pmean(x, axes)
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
def _perc(self, x, q):
|
| 84 |
+
axes = internal.get_data_axes()
|
| 85 |
+
if axes:
|
| 86 |
+
x = jax.lax.all_gather(x, axes)
|
| 87 |
+
x = jnp.percentile(x, q)
|
| 88 |
+
return x
|
| 89 |
+
|
| 90 |
+
def _update(self, var, x):
|
| 91 |
+
var.write((1 - self.rate) * var.read() + self.rate * sg(x))
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class SlowModel:
|
| 95 |
+
|
| 96 |
+
def __init__(self, model, *, source, rate=1.0, every=1):
|
| 97 |
+
assert rate == 1 or rate < 0.5, rate
|
| 98 |
+
self.source = source
|
| 99 |
+
self.model = model
|
| 100 |
+
self.rate = rate
|
| 101 |
+
self.every = every
|
| 102 |
+
name = self.model.path + '_count'
|
| 103 |
+
self.count = nj.Variable(jnp.zeros, (), i32, name=name)
|
| 104 |
+
|
| 105 |
+
def __getattr__(self, name):
|
| 106 |
+
self._initonce()
|
| 107 |
+
return getattr(self.model, name)
|
| 108 |
+
|
| 109 |
+
def __call__(self, *args, **kwargs):
|
| 110 |
+
self._initonce()
|
| 111 |
+
return self.model(*args, **kwargs)
|
| 112 |
+
|
| 113 |
+
def update(self):
|
| 114 |
+
self._initonce()
|
| 115 |
+
mix = jnp.where(self.count.read() % self.every == 0, self.rate, 0)
|
| 116 |
+
fn = lambda src, dst: mix * src + (1 - mix) * dst
|
| 117 |
+
values = jax.tree.map(fn, self.source.values, self.model.values)
|
| 118 |
+
[self.model.write(k, v) for k, v in values.items()]
|
| 119 |
+
self.count.write(self.count.read() + 1)
|
| 120 |
+
|
| 121 |
+
def _initonce(self, *args, method=None, **kwargs):
|
| 122 |
+
assert self.source.values, 'no parameters to track'
|
| 123 |
+
if not self.model.values:
|
| 124 |
+
p = self.model.path + '/'
|
| 125 |
+
nj.context().update({p + k: v for k, v in self.source.values.items()})
|
| 126 |
+
assert self.model.values.keys() == self.source.values.keys(), (
|
| 127 |
+
self.model.values.keys(), self.source.values.keys())
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class LayerScan:
|
| 131 |
+
|
| 132 |
+
def __init__(self, module, count, names=('__call__',)):
|
| 133 |
+
self.module = module
|
| 134 |
+
self.count = count
|
| 135 |
+
self.names = names
|
| 136 |
+
|
| 137 |
+
def __call__(self, *args, **kwargs):
|
| 138 |
+
# Magic methods need to be forwarded explicitly.
|
| 139 |
+
return self.__getattr__('__call__')(*args, **kwargs)
|
| 140 |
+
|
| 141 |
+
def __getattr__(self, name):
|
| 142 |
+
value = getattr(self.module, name)
|
| 143 |
+
if name in self.names:
|
| 144 |
+
assert callable(value)
|
| 145 |
+
value = nj.pure(value, nested=True)
|
| 146 |
+
value = functools.partial(
|
| 147 |
+
layer_scan, value, self.module.path, self.count)
|
| 148 |
+
return value
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def layer_scan(fn, scope, count, inp, *args, **kwargs):
|
| 152 |
+
isinner = lambda k: k.startswith(scope + '/')
|
| 153 |
+
|
| 154 |
+
args_ = jax.tree.map(lambda x: x[0], args) # Copy structure
|
| 155 |
+
kwargs_ = jax.tree.map(lambda x: x, kwargs) # Copy structure
|
| 156 |
+
state_ = {k: v[0] if isinner(k) else v for k, v in nj.context().items()}
|
| 157 |
+
state, _, accessed, modified, created = fn(
|
| 158 |
+
state_, inp, *args_, ignore=True, track=True,
|
| 159 |
+
seed=nj.seed(None, True), **kwargs_)
|
| 160 |
+
|
| 161 |
+
# print('-' * 79)
|
| 162 |
+
# print('accessed:', accessed)
|
| 163 |
+
# print('modified:', modified)
|
| 164 |
+
# print('created:', created)
|
| 165 |
+
|
| 166 |
+
inner = lambda xs: {k: v for k, v in xs.items() if isinner(k)}
|
| 167 |
+
outer = lambda xs: {k: v for k, v in xs.items() if not isinner(k)}
|
| 168 |
+
|
| 169 |
+
unchanging = {
|
| 170 |
+
k: v for k, v in nj.context().items()
|
| 171 |
+
if k in accessed and k not in modified and k not in created}
|
| 172 |
+
unchanging_inner = inner(unchanging)
|
| 173 |
+
unchanging_outer = outer(unchanging)
|
| 174 |
+
|
| 175 |
+
creations = {k: v for k, v in state.items() if k in created}
|
| 176 |
+
creations_inner = inner(creations)
|
| 177 |
+
creations_outer = outer(creations)
|
| 178 |
+
nj.context().update(creations_outer)
|
| 179 |
+
del creations_inner # Will be created inside the scan.
|
| 180 |
+
|
| 181 |
+
# Inner values do not exist yet, so we only keep them in the creations. This
|
| 182 |
+
# is fine, because inner values cannot change across scan iterations anyways.
|
| 183 |
+
# Outer values can change over iterations, so we need to thread them even
|
| 184 |
+
# during creation.
|
| 185 |
+
changing_inner = inner({
|
| 186 |
+
# k: v for k, v in state.items()
|
| 187 |
+
k: v for k, v in nj.context().items()
|
| 188 |
+
if k in modified and k not in created})
|
| 189 |
+
changing_outer = outer({
|
| 190 |
+
k: v for k, v in state.items()
|
| 191 |
+
if k in modified})
|
| 192 |
+
|
| 193 |
+
# f = lambda x: {k: v.shape for k, v in x.items()}
|
| 194 |
+
# print('-' * 79)
|
| 195 |
+
# print('unchanging_inner', f(unchanging_inner))
|
| 196 |
+
# print('unchanging_outer', f(unchanging_outer))
|
| 197 |
+
# print('creations_inner', f(inner(creations)))
|
| 198 |
+
# print('creations_outer', f(creations_outer))
|
| 199 |
+
# print('changing_inner', f(changing_inner))
|
| 200 |
+
# print('changing_outer', f(changing_outer))
|
| 201 |
+
|
| 202 |
+
def body(carry, x):
|
| 203 |
+
inp, changing_outer = carry
|
| 204 |
+
arg, seed, unchanging_inner, changing_inner = x
|
| 205 |
+
state = {
|
| 206 |
+
**unchanging_inner, **unchanging_outer,
|
| 207 |
+
**changing_inner, **changing_outer}
|
| 208 |
+
state, out = fn(state, inp, *arg, **kwargs, seed=seed)
|
| 209 |
+
out, *other = out if isinstance(out, tuple) else (out,)
|
| 210 |
+
changing = {k: v for k, v in state.items() if k in modified}
|
| 211 |
+
changing_inner = inner(changing)
|
| 212 |
+
changing_outer = outer(changing)
|
| 213 |
+
creations = {k: v for k, v in state.items() if k in created}
|
| 214 |
+
creations_inner = inner(creations)
|
| 215 |
+
carry = (out, changing_outer)
|
| 216 |
+
y = (other, creations_inner, changing_inner)
|
| 217 |
+
return carry, y
|
| 218 |
+
|
| 219 |
+
seeds = nj.seed(count, True)
|
| 220 |
+
carry, ys = jax.lax.scan(
|
| 221 |
+
f=body,
|
| 222 |
+
init=(inp, changing_outer),
|
| 223 |
+
xs=(args, seeds, unchanging_inner, changing_inner),
|
| 224 |
+
length=count)
|
| 225 |
+
out, changing_outer = carry
|
| 226 |
+
other, creations_inner, changing_inner = ys
|
| 227 |
+
|
| 228 |
+
if nj.context().modify:
|
| 229 |
+
nj.context().update(creations_inner)
|
| 230 |
+
nj.context().update(changing_inner)
|
| 231 |
+
nj.context().update(changing_outer)
|
| 232 |
+
|
| 233 |
+
return (out, *other) if len(other) else out
|
models/embodied/perf/test_bandwidth.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import sys
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
|
| 6 |
+
|
| 7 |
+
import elements
|
| 8 |
+
import zerofun
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestBandwidth:
|
| 13 |
+
|
| 14 |
+
def test_numpy_read(self):
|
| 15 |
+
arr = np.ones((128, 1024, 1024), np.int64) # 1 GiB
|
| 16 |
+
size = arr.nbytes / (1024 ** 3)
|
| 17 |
+
for _ in range(10):
|
| 18 |
+
with elements.timer.section('step'):
|
| 19 |
+
arr.sum()
|
| 20 |
+
dt = elements.timer.stats()['step/avg']
|
| 21 |
+
print(f'numpy_read: {dt:.3f} avg | {size / dt:.2f} gib/s')
|
| 22 |
+
|
| 23 |
+
def test_numpy_copy(self):
|
| 24 |
+
arr = np.ones((1024, 1024, 1024), np.uint8)
|
| 25 |
+
size = arr.nbytes / (1024 ** 3)
|
| 26 |
+
for _ in range(10):
|
| 27 |
+
with elements.timer.section('step'):
|
| 28 |
+
arr.copy()
|
| 29 |
+
dt = elements.timer.stats()['step/avg']
|
| 30 |
+
print(f'numpy_copy: {dt:.3f} avg | {size / dt:.2f} gib/s')
|
| 31 |
+
|
| 32 |
+
def test_socket_send(self):
|
| 33 |
+
shape, dtype, gib = (1024, 1024, 1024), np.uint8, 1.00
|
| 34 |
+
|
| 35 |
+
def server(context, addr):
|
| 36 |
+
server = zerofun.Server(addr)
|
| 37 |
+
data = {'foo': np.ones(shape, dtype)}
|
| 38 |
+
server.bind('function', lambda _: data)
|
| 39 |
+
with server:
|
| 40 |
+
while context.running:
|
| 41 |
+
time.sleep(0.01)
|
| 42 |
+
|
| 43 |
+
addr = f'tcp://localhost:{zerofun.get_free_port()}'
|
| 44 |
+
proc = zerofun.StoppableProcess(server, addr, start=True)
|
| 45 |
+
|
| 46 |
+
client = zerofun.Client(addr)
|
| 47 |
+
client.connect()
|
| 48 |
+
for _ in range(10):
|
| 49 |
+
with elements.timer.section('step'):
|
| 50 |
+
client.function({}).result()
|
| 51 |
+
proc.stop()
|
| 52 |
+
|
| 53 |
+
dt = elements.timer.stats()['step/avg']
|
| 54 |
+
print(f'socket_send: {dt:.3f} avg | {gib / dt:.2f} gib/s')
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == '__main__':
|
| 58 |
+
TestBandwidth().test_numpy_read() # 21 gib/s
|
| 59 |
+
TestBandwidth().test_numpy_copy() # 7 gib/s
|
| 60 |
+
TestBandwidth().test_socket_send() # 4 gib/s
|
models/embodied/perf/test_distr.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pathlib
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
|
| 8 |
+
|
| 9 |
+
import zerofun
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TestDistr:
|
| 14 |
+
|
| 15 |
+
def test_batched_throughput(self, clients=32, batch=16, workers=4):
|
| 16 |
+
assert int(os.popen('ulimit -n').read()) > 1024
|
| 17 |
+
|
| 18 |
+
addr = f'tcp://localhost:{zerofun.get_free_port()}'
|
| 19 |
+
stats = defaultdict(int)
|
| 20 |
+
barrier = zerofun.mp.Barrier(1 + clients)
|
| 21 |
+
|
| 22 |
+
def client(context, addr, barrier):
|
| 23 |
+
data = {
|
| 24 |
+
'foo': np.zeros((64, 64, 3,), np.uint8),
|
| 25 |
+
'bar': np.zeros((1024,), np.float32),
|
| 26 |
+
'baz': np.zeros((), bool),
|
| 27 |
+
}
|
| 28 |
+
client = zerofun.Client(addr)
|
| 29 |
+
client.connect()
|
| 30 |
+
barrier.wait()
|
| 31 |
+
while context.running:
|
| 32 |
+
client.function(data).result()
|
| 33 |
+
|
| 34 |
+
def workfn(data):
|
| 35 |
+
time.sleep(0.002)
|
| 36 |
+
return data, data
|
| 37 |
+
|
| 38 |
+
def donefn(data):
|
| 39 |
+
stats['batches'] += 1
|
| 40 |
+
stats['frames'] += len(data['foo'])
|
| 41 |
+
stats['nbytes'] += sum(x.nbytes for x in data.values())
|
| 42 |
+
|
| 43 |
+
procs = [
|
| 44 |
+
zerofun.StoppableProcess(client, addr, barrier, start=True)
|
| 45 |
+
for _ in range(clients)]
|
| 46 |
+
|
| 47 |
+
server = zerofun.Server(addr)
|
| 48 |
+
# server = zerofun.Server2(addr)
|
| 49 |
+
|
| 50 |
+
server.bind('function', workfn, donefn, batch=batch, workers=workers)
|
| 51 |
+
with server:
|
| 52 |
+
barrier.wait()
|
| 53 |
+
start = time.time()
|
| 54 |
+
while True:
|
| 55 |
+
server.check()
|
| 56 |
+
now = time.time()
|
| 57 |
+
dur = now - start
|
| 58 |
+
print(
|
| 59 |
+
f'{stats["batches"] / dur:.2f} bat/s ' +
|
| 60 |
+
f'{stats["frames"] / dur:.2f} frm/s ' +
|
| 61 |
+
f'{stats["nbytes"] / dur / (1024 ** 3):.2f} gib/s')
|
| 62 |
+
stats.clear()
|
| 63 |
+
start = now
|
| 64 |
+
time.sleep(1)
|
| 65 |
+
[x.stop() for x in procs]
|
| 66 |
+
|
| 67 |
+
#############################################################################
|
| 68 |
+
|
| 69 |
+
def test_proxy_throughput(self, clients=32, batch=16, workers=4):
|
| 70 |
+
assert int(os.popen('ulimit -n').read()) > 1024
|
| 71 |
+
|
| 72 |
+
def client(context, outer_addr, barrier):
|
| 73 |
+
data = {
|
| 74 |
+
'foo': np.zeros((64, 64, 3,), np.uint8),
|
| 75 |
+
'bar': np.zeros((1024,), np.float32),
|
| 76 |
+
'baz': np.zeros((), bool),
|
| 77 |
+
}
|
| 78 |
+
client = zerofun.Client(outer_addr)
|
| 79 |
+
client.connect()
|
| 80 |
+
barrier.wait()
|
| 81 |
+
while context.running:
|
| 82 |
+
client.function(data).result()
|
| 83 |
+
|
| 84 |
+
def proxy(context, outer_addr, inner_addr, barrier):
|
| 85 |
+
client = zerofun.Client(
|
| 86 |
+
inner_addr, pings=0, maxage=0, name='ProxyInner')
|
| 87 |
+
client.connect()
|
| 88 |
+
server = zerofun.Server(
|
| 89 |
+
outer_addr, errors=True, name='ProxyOuter')
|
| 90 |
+
def function(data):
|
| 91 |
+
return client.function(data).result()
|
| 92 |
+
server.bind('function', function, batch=batch, workers=workers)
|
| 93 |
+
with server:
|
| 94 |
+
barrier.wait()
|
| 95 |
+
while context.running:
|
| 96 |
+
server.check()
|
| 97 |
+
time.sleep(0.1)
|
| 98 |
+
|
| 99 |
+
def backend(context, inner_addr, barrier):
|
| 100 |
+
stats = defaultdict(int)
|
| 101 |
+
def workfn(data):
|
| 102 |
+
time.sleep(0.002)
|
| 103 |
+
return data, data
|
| 104 |
+
def donefn(data):
|
| 105 |
+
stats['batches'] += 1
|
| 106 |
+
stats['frames'] += len(data['foo'])
|
| 107 |
+
stats['nbytes'] += sum(x.nbytes for x in data.values())
|
| 108 |
+
server = zerofun.Server(
|
| 109 |
+
inner_addr, errors=True, name='Backend')
|
| 110 |
+
server.bind('function', workfn, donefn, workers=workers)
|
| 111 |
+
with server:
|
| 112 |
+
barrier.wait()
|
| 113 |
+
start = time.time()
|
| 114 |
+
while context.running:
|
| 115 |
+
server.check()
|
| 116 |
+
now = time.time()
|
| 117 |
+
dur = now - start
|
| 118 |
+
print(
|
| 119 |
+
f'{stats["batches"] / dur:.2f} bat/s ' +
|
| 120 |
+
f'{stats["frames"] / dur:.2f} frm/s ' +
|
| 121 |
+
f'{stats["nbytes"] / dur / (1024**3):.2f} gib/s')
|
| 122 |
+
stats.clear()
|
| 123 |
+
start = now
|
| 124 |
+
time.sleep(1)
|
| 125 |
+
|
| 126 |
+
inner_addr = 'ipc:///tmp/test-inner'
|
| 127 |
+
outer_addr = 'ipc:///tmp/test-outer'
|
| 128 |
+
barrier = zerofun.mp.Barrier(2 + clients)
|
| 129 |
+
procs = [
|
| 130 |
+
zerofun.StoppableProcess(client, outer_addr, barrier)
|
| 131 |
+
for _ in range(clients)]
|
| 132 |
+
procs.append(zerofun.StoppableProcess(
|
| 133 |
+
proxy, outer_addr, inner_addr, barrier))
|
| 134 |
+
procs.append(zerofun.StoppableProcess(
|
| 135 |
+
backend, inner_addr, barrier))
|
| 136 |
+
zerofun.run(procs)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if __name__ == '__main__':
|
| 140 |
+
TestDistr().test_batched_throughput() # 4100 frm/s Server
|
| 141 |
+
# TestDistr().test_batched_throughput() # 4200 frm/s Server2
|
| 142 |
+
TestDistr().test_proxy_throughput() # 3000 frm/s
|
models/embodied/perf/test_driver.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import sys
|
| 3 |
+
from functools import partial as bind
|
| 4 |
+
|
| 5 |
+
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
|
| 6 |
+
|
| 7 |
+
import elements
|
| 8 |
+
import embodied
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestDriver:
|
| 12 |
+
|
| 13 |
+
def test_throughput_dummy(self, parallel=True):
|
| 14 |
+
from embodied.envs import dummy
|
| 15 |
+
make_env_fns = [bind(dummy.Dummy, 'disc') for _ in range(32)]
|
| 16 |
+
example = make_env_fns[0]()
|
| 17 |
+
agent = embodied.RandomAgent(example.obs_space, example.act_space)
|
| 18 |
+
example.close()
|
| 19 |
+
driver = embodied.Driver(make_env_fns, parallel)
|
| 20 |
+
driver.reset(agent.init_policy)
|
| 21 |
+
fps = elements.FPS()
|
| 22 |
+
while True:
|
| 23 |
+
driver(agent.policy, steps=100)
|
| 24 |
+
fps.step(100 * len(make_env_fns))
|
| 25 |
+
print(f'FPS: {fps.result():.0f}')
|
| 26 |
+
|
| 27 |
+
def test_throughput_crafter(self, parallel=True):
|
| 28 |
+
from embodied.envs import crafter
|
| 29 |
+
make_env_fns = [bind(crafter.Crafter, 'reward') for _ in range(32)]
|
| 30 |
+
example = make_env_fns[0]()
|
| 31 |
+
agent = embodied.RandomAgent(example.obs_space, example.act_space)
|
| 32 |
+
example.close()
|
| 33 |
+
driver = embodied.Driver(make_env_fns, parallel)
|
| 34 |
+
driver.reset(agent.init_policy)
|
| 35 |
+
fps = elements.FPS()
|
| 36 |
+
while True:
|
| 37 |
+
driver(agent.policy, steps=100)
|
| 38 |
+
fps.step(100 * len(make_env_fns))
|
| 39 |
+
print(f'FPS: {fps.result():.0f}')
|
models/embodied/perf/test_replay.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import sys
|
| 3 |
+
import threading
|
| 4 |
+
import time
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
|
| 8 |
+
|
| 9 |
+
import embodied
|
| 10 |
+
import numpy as np
|
| 11 |
+
import pytest
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
REPLAYS = [
|
| 15 |
+
('Replay', embodied.replay.Replay),
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
STEP = {
|
| 19 |
+
'image': np.zeros((64, 64, 3), np.uint8),
|
| 20 |
+
'vector': np.zeros(1024, np.float32),
|
| 21 |
+
'action': np.zeros(12, np.float32),
|
| 22 |
+
'is_first': np.array(False),
|
| 23 |
+
'is_last': np.array(False),
|
| 24 |
+
'is_terminal': np.array(False),
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestReplay:
|
| 29 |
+
|
| 30 |
+
@pytest.mark.parametrize('name,Replay', REPLAYS)
|
| 31 |
+
def test_speed(self, name, Replay, inserts=2e5, workers=8, samples=1e5):
|
| 32 |
+
print('')
|
| 33 |
+
initial = time.time()
|
| 34 |
+
replay = Replay(length=32, capacity=1e5, chunksize=1024)
|
| 35 |
+
start = time.time()
|
| 36 |
+
for step in range(int(inserts / workers)):
|
| 37 |
+
for worker in range(workers):
|
| 38 |
+
replay.add(STEP, worker)
|
| 39 |
+
duration = time.time() - start
|
| 40 |
+
print(name, 'inserts/sec:', int(inserts / duration))
|
| 41 |
+
start = time.time()
|
| 42 |
+
dataset = iter(replay.dataset(1))
|
| 43 |
+
for _ in range(int(samples)):
|
| 44 |
+
next(dataset)
|
| 45 |
+
duration = time.time() - start
|
| 46 |
+
print(name, 'samples/sec:', int(samples / duration))
|
| 47 |
+
print(name, 'total duration:', time.time() - initial)
|
| 48 |
+
|
| 49 |
+
@pytest.mark.parametrize('chunksize', [64, 128, 256, 512, 1024, 2048, 4096])
|
| 50 |
+
def test_chunk_size(self, chunksize, inserts=2e5, workers=8, samples=2e5):
|
| 51 |
+
print('')
|
| 52 |
+
initial = time.time()
|
| 53 |
+
replay = embodied.replay.Replay(length=64, chunksize=chunksize)
|
| 54 |
+
start = time.time()
|
| 55 |
+
for step in range(int(inserts / workers)):
|
| 56 |
+
for worker in range(workers):
|
| 57 |
+
replay.add(STEP, worker)
|
| 58 |
+
duration = time.time() - start
|
| 59 |
+
print('chunksize', chunksize, 'inserts/sec:', int(inserts / duration))
|
| 60 |
+
start = time.time()
|
| 61 |
+
dataset = iter(replay.dataset(1))
|
| 62 |
+
for _ in range(int(samples)):
|
| 63 |
+
next(dataset)
|
| 64 |
+
duration = time.time() - start
|
| 65 |
+
print('chunksize', chunksize, 'samples/sec:', int(samples / duration))
|
| 66 |
+
print('chunksize', chunksize, 'total duration:', time.time() - initial)
|
| 67 |
+
|
| 68 |
+
@pytest.mark.parametrize('name,Replay', REPLAYS)
|
| 69 |
+
def test_removal(self, name, Replay, inserts=1e6, workers=1):
|
| 70 |
+
print('')
|
| 71 |
+
replay = Replay(length=32, capacity=1e5, chunksize=1024)
|
| 72 |
+
start = time.time()
|
| 73 |
+
for step in range(int(inserts)):
|
| 74 |
+
replay.add(STEP)
|
| 75 |
+
duration = time.time() - start
|
| 76 |
+
print(name, 'inserts/sec:', int(inserts / duration))
|
| 77 |
+
|
| 78 |
+
@pytest.mark.parametrize('name,Replay', REPLAYS)
|
| 79 |
+
def test_parallel(self, tmpdir, name, Replay, duration=5):
|
| 80 |
+
print('')
|
| 81 |
+
replay = Replay(length=16, capacity=1e4, chunksize=32, directory=tmpdir)
|
| 82 |
+
|
| 83 |
+
running = True
|
| 84 |
+
adds = defaultdict(int)
|
| 85 |
+
samples = defaultdict(int)
|
| 86 |
+
saves = defaultdict(int)
|
| 87 |
+
errors = []
|
| 88 |
+
|
| 89 |
+
def adder():
|
| 90 |
+
try:
|
| 91 |
+
ident = threading.get_ident()
|
| 92 |
+
step = {'foo': np.zeros((64, 64, 3))}
|
| 93 |
+
while running:
|
| 94 |
+
replay.add(step, threading.get_ident())
|
| 95 |
+
adds[ident] += 1
|
| 96 |
+
except Exception as e:
|
| 97 |
+
errors.append(e)
|
| 98 |
+
raise
|
| 99 |
+
|
| 100 |
+
def sampler():
|
| 101 |
+
try:
|
| 102 |
+
ident = threading.get_ident()
|
| 103 |
+
dataset = iter(replay.dataset(1))
|
| 104 |
+
while running:
|
| 105 |
+
next(dataset)
|
| 106 |
+
samples[ident] += 1
|
| 107 |
+
except Exception as e:
|
| 108 |
+
errors.append(e)
|
| 109 |
+
raise
|
| 110 |
+
|
| 111 |
+
def saver():
|
| 112 |
+
try:
|
| 113 |
+
ident = threading.get_ident()
|
| 114 |
+
while running:
|
| 115 |
+
data = replay.save()
|
| 116 |
+
time.sleep(0.1)
|
| 117 |
+
replay.load(data)
|
| 118 |
+
time.sleep(0.1)
|
| 119 |
+
saves[ident] += 1
|
| 120 |
+
except Exception as e:
|
| 121 |
+
errors.append(e)
|
| 122 |
+
raise
|
| 123 |
+
|
| 124 |
+
workers = [threading.Thread(target=saver)]
|
| 125 |
+
for _ in range(32):
|
| 126 |
+
workers.append(threading.Thread(target=adder))
|
| 127 |
+
for _ in range(8):
|
| 128 |
+
workers.append(threading.Thread(target=sampler))
|
| 129 |
+
|
| 130 |
+
print(f'Starting {len(workers)} threads')
|
| 131 |
+
[x.start() for x in workers]
|
| 132 |
+
time.sleep(duration)
|
| 133 |
+
running = False
|
| 134 |
+
[x.join() for x in workers]
|
| 135 |
+
if errors:
|
| 136 |
+
print(f'Found {len(errors)} errors: {errors}')
|
| 137 |
+
raise errors[0]
|
| 138 |
+
print('adds/sec:', sum(adds.values()) / duration)
|
| 139 |
+
print('samples/sec:', sum(samples.values()) / duration)
|
| 140 |
+
print('save_load/sec:', sum(saves.values()) / duration)
|
models/embodied/run/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .eval_only import eval_only
|
| 2 |
+
from .train import train
|
| 3 |
+
from .train_eval import train_eval
|
| 4 |
+
|
| 5 |
+
from . import parallel
|
models/embodied/run/eval_only.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
from functools import partial as bind
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def eval_only(make_agent, make_env, make_logger, args):
|
| 10 |
+
assert args.from_checkpoint
|
| 11 |
+
|
| 12 |
+
agent = make_agent()
|
| 13 |
+
logger = make_logger()
|
| 14 |
+
|
| 15 |
+
logdir = elements.Path(args.logdir)
|
| 16 |
+
logdir.mkdir()
|
| 17 |
+
print('Logdir', logdir)
|
| 18 |
+
step = logger.step
|
| 19 |
+
usage = elements.Usage(**args.usage)
|
| 20 |
+
agg = elements.Agg()
|
| 21 |
+
epstats = elements.Agg()
|
| 22 |
+
episodes = defaultdict(elements.Agg)
|
| 23 |
+
should_log = elements.when.Clock(args.log_every)
|
| 24 |
+
policy_fps = elements.FPS()
|
| 25 |
+
|
| 26 |
+
@elements.timer.section('logfn')
|
| 27 |
+
def logfn(tran, worker):
|
| 28 |
+
episode = episodes[worker]
|
| 29 |
+
tran['is_first'] and episode.reset()
|
| 30 |
+
episode.add('score', tran['reward'], agg='sum')
|
| 31 |
+
episode.add('length', 1, agg='sum')
|
| 32 |
+
episode.add('rewards', tran['reward'], agg='stack')
|
| 33 |
+
for key, value in tran.items():
|
| 34 |
+
isimage = (value.dtype == np.uint8) and (value.ndim == 3)
|
| 35 |
+
if isimage and worker == 0:
|
| 36 |
+
episode.add(f'policy_{key}', value, agg='stack')
|
| 37 |
+
elif key.startswith('log/'):
|
| 38 |
+
assert value.ndim == 0, (key, value.shape, value.dtype)
|
| 39 |
+
episode.add(key + '/avg', value, agg='avg')
|
| 40 |
+
episode.add(key + '/max', value, agg='max')
|
| 41 |
+
episode.add(key + '/sum', value, agg='sum')
|
| 42 |
+
if tran['is_last']:
|
| 43 |
+
result = episode.result()
|
| 44 |
+
logger.add({
|
| 45 |
+
'score': result.pop('score'),
|
| 46 |
+
'length': result.pop('length'),
|
| 47 |
+
}, prefix='episode')
|
| 48 |
+
rew = result.pop('rewards')
|
| 49 |
+
if len(rew) > 1:
|
| 50 |
+
result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
|
| 51 |
+
epstats.add(result)
|
| 52 |
+
|
| 53 |
+
fns = [bind(make_env, i) for i in range(args.envs)]
|
| 54 |
+
driver = embodied.Driver(fns, parallel=(not args.debug))
|
| 55 |
+
driver.on_step(lambda tran, _: step.increment())
|
| 56 |
+
driver.on_step(lambda tran, _: policy_fps.step())
|
| 57 |
+
driver.on_step(logfn)
|
| 58 |
+
|
| 59 |
+
cp = elements.Checkpoint()
|
| 60 |
+
cp.agent = agent
|
| 61 |
+
cp.load(args.from_checkpoint, keys=['agent'])
|
| 62 |
+
|
| 63 |
+
print('Start evaluation')
|
| 64 |
+
policy = lambda *args: agent.policy(*args, mode='eval')
|
| 65 |
+
driver.reset(agent.init_policy)
|
| 66 |
+
while step < args.steps:
|
| 67 |
+
driver(policy, steps=10)
|
| 68 |
+
if should_log(step):
|
| 69 |
+
logger.add(agg.result())
|
| 70 |
+
logger.add(epstats.result(), prefix='epstats')
|
| 71 |
+
logger.add(usage.stats(), prefix='usage')
|
| 72 |
+
logger.add({'fps/policy': policy_fps.result()})
|
| 73 |
+
logger.add({'timer': elements.timer.stats()['summary']})
|
| 74 |
+
logger.write()
|
| 75 |
+
|
| 76 |
+
logger.close()
|
models/embodied/run/parallel.py
ADDED
|
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import threading
|
| 3 |
+
import time
|
| 4 |
+
from functools import partial as bind
|
| 5 |
+
|
| 6 |
+
import cloudpickle
|
| 7 |
+
import elements
|
| 8 |
+
import embodied
|
| 9 |
+
import numpy as np
|
| 10 |
+
import portal
|
| 11 |
+
|
| 12 |
+
prefix = lambda d, p: {f'{p}/{k}': v for k, v in d.items()}
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def combined(
|
| 16 |
+
make_agent,
|
| 17 |
+
make_replay_train,
|
| 18 |
+
make_replay_eval,
|
| 19 |
+
make_env_train,
|
| 20 |
+
make_env_eval,
|
| 21 |
+
make_stream,
|
| 22 |
+
make_logger,
|
| 23 |
+
args):
|
| 24 |
+
|
| 25 |
+
if args.actor_batch <= 0:
|
| 26 |
+
args = args.update(actor_batch=max(1, args.envs // 2))
|
| 27 |
+
assert args.actor_batch <= args.envs, (args.actor_batch, args.envs)
|
| 28 |
+
for key in ('actor_addr', 'replay_addr', 'logger_addr'):
|
| 29 |
+
if '{auto}' in args[key]:
|
| 30 |
+
args = args.update({key: args[key].format(auto=portal.free_port())})
|
| 31 |
+
|
| 32 |
+
make_agent = cloudpickle.dumps(make_agent)
|
| 33 |
+
make_replay_train = cloudpickle.dumps(make_replay_train)
|
| 34 |
+
make_replay_eval = cloudpickle.dumps(make_replay_eval)
|
| 35 |
+
make_env_train = cloudpickle.dumps(make_env_train)
|
| 36 |
+
make_env_eval = cloudpickle.dumps(make_env_eval)
|
| 37 |
+
make_stream = cloudpickle.dumps(make_stream)
|
| 38 |
+
make_logger = cloudpickle.dumps(make_logger)
|
| 39 |
+
|
| 40 |
+
workers = []
|
| 41 |
+
if args.agent_process:
|
| 42 |
+
workers.append(portal.Process(parallel_agent, make_agent, args))
|
| 43 |
+
else:
|
| 44 |
+
workers.append(portal.Thread(parallel_agent, make_agent, args))
|
| 45 |
+
workers.append(portal.Process(parallel_logger, make_logger, args))
|
| 46 |
+
|
| 47 |
+
if not args.remote_envs:
|
| 48 |
+
for i in range(args.envs):
|
| 49 |
+
workers.append(portal.Process(parallel_env, make_env_train, i, args))
|
| 50 |
+
for i in range(args.envs, args.envs + args.eval_envs):
|
| 51 |
+
workers.append(portal.Process(
|
| 52 |
+
parallel_env, make_env_eval, i, args, True))
|
| 53 |
+
|
| 54 |
+
if not args.remote_replay:
|
| 55 |
+
workers.append(portal.Process(
|
| 56 |
+
parallel_replay, make_replay_train, make_replay_eval,
|
| 57 |
+
make_stream, args))
|
| 58 |
+
|
| 59 |
+
portal.run(workers)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def parallel_agent(make_agent, args):
|
| 63 |
+
if isinstance(make_agent, bytes):
|
| 64 |
+
make_agent = cloudpickle.loads(make_agent)
|
| 65 |
+
agent = make_agent()
|
| 66 |
+
barrier = threading.Barrier(2)
|
| 67 |
+
workers = []
|
| 68 |
+
workers.append(portal.Thread(parallel_actor, agent, barrier, args))
|
| 69 |
+
workers.append(portal.Thread(parallel_learner, agent, barrier, args))
|
| 70 |
+
portal.run(workers)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@elements.timer.section('actor')
|
| 74 |
+
def parallel_actor(agent, barrier, args):
|
| 75 |
+
|
| 76 |
+
islist = lambda x: isinstance(x, list)
|
| 77 |
+
initial = agent.init_policy(args.actor_batch)
|
| 78 |
+
initial = elements.tree.map(lambda x: x[0], initial, isleaf=islist)
|
| 79 |
+
carries = collections.defaultdict(lambda: initial)
|
| 80 |
+
barrier.wait() # Do not collect data before learner restored checkpoint.
|
| 81 |
+
fps = elements.FPS()
|
| 82 |
+
|
| 83 |
+
should_log = embodied.LocalClock(args.log_every)
|
| 84 |
+
backlog = 8 * args.actor_threads
|
| 85 |
+
logger = portal.Client(args.logger_addr, 'ActorLogger', maxinflight=backlog)
|
| 86 |
+
replay = portal.Client(args.replay_addr, 'ActorReplay', maxinflight=backlog)
|
| 87 |
+
|
| 88 |
+
@elements.timer.section('workfn')
|
| 89 |
+
def workfn(obs):
|
| 90 |
+
envid = obs.pop('envid')
|
| 91 |
+
assert envid.shape == (args.actor_batch,)
|
| 92 |
+
is_eval = obs.pop('is_eval')
|
| 93 |
+
fps.step(obs['is_first'].size)
|
| 94 |
+
with elements.timer.section('get_states'):
|
| 95 |
+
carry = [carries[a] for a in envid]
|
| 96 |
+
carry = elements.tree.map(lambda *xs: list(xs), *carry)
|
| 97 |
+
logs = {k: v for k, v in obs.items() if k.startswith('log/')}
|
| 98 |
+
obs = {k: v for k, v in obs.items() if not k.startswith('log/')}
|
| 99 |
+
carry, acts, outs = agent.policy(carry, obs)
|
| 100 |
+
assert all(k not in acts for k in outs), (
|
| 101 |
+
list(outs.keys()), list(acts.keys()))
|
| 102 |
+
with elements.timer.section('put_states'):
|
| 103 |
+
for i, a in enumerate(envid):
|
| 104 |
+
carries[a] = elements.tree.map(lambda x: x[i], carry, isleaf=islist)
|
| 105 |
+
trans = {'envid': envid, 'is_eval': is_eval, **obs, **acts, **outs, **logs}
|
| 106 |
+
[x.setflags(write=False) for x in trans.values()]
|
| 107 |
+
acts = {**acts, 'reset': obs['is_last'].copy()}
|
| 108 |
+
return acts, trans
|
| 109 |
+
|
| 110 |
+
@elements.timer.section('donefn')
|
| 111 |
+
def postfn(trans):
|
| 112 |
+
logs = {k: v for k, v in trans.items() if k.startswith('log/')}
|
| 113 |
+
trans = {k: v for k, v in trans.items() if not k.startswith('log/')}
|
| 114 |
+
replay.add_batch(trans)
|
| 115 |
+
logger.tran({**trans, **logs})
|
| 116 |
+
if should_log():
|
| 117 |
+
stats = {}
|
| 118 |
+
stats['fps/policy'] = fps.result()
|
| 119 |
+
stats['parallel/ep_states'] = len(carries)
|
| 120 |
+
stats.update(prefix(server.stats(), 'server/actor'))
|
| 121 |
+
stats.update(prefix(logger.stats(), 'client/actor_logger'))
|
| 122 |
+
stats.update(prefix(replay.stats(), 'client/actor_replay'))
|
| 123 |
+
logger.add(stats)
|
| 124 |
+
|
| 125 |
+
server = portal.BatchServer(args.actor_addr, name='Actor')
|
| 126 |
+
server.bind('act', workfn, postfn, args.actor_batch, args.actor_threads)
|
| 127 |
+
server.start()
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@elements.timer.section('learner')
|
| 131 |
+
def parallel_learner(agent, barrier, args):
|
| 132 |
+
|
| 133 |
+
agg = elements.Agg()
|
| 134 |
+
usage = elements.Usage(**args.usage)
|
| 135 |
+
should_log = embodied.GlobalClock(args.log_every)
|
| 136 |
+
should_report = embodied.GlobalClock(args.report_every)
|
| 137 |
+
should_save = embodied.GlobalClock(args.save_every)
|
| 138 |
+
fps = elements.FPS()
|
| 139 |
+
batch_steps = args.batch_size * args.batch_length
|
| 140 |
+
|
| 141 |
+
cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/agent')
|
| 142 |
+
cp.agent = agent
|
| 143 |
+
if args.from_checkpoint:
|
| 144 |
+
elements.checkpoint.load(args.from_checkpoint, dict(
|
| 145 |
+
agent=bind(agent.load, regex=args.from_checkpoint_regex)))
|
| 146 |
+
cp.load_or_save()
|
| 147 |
+
logger = portal.Client(args.logger_addr, 'LearnerLogger', maxinflight=1)
|
| 148 |
+
updater = portal.Client(
|
| 149 |
+
args.replay_addr, 'LearnerReplayUpdater', maxinflight=8)
|
| 150 |
+
barrier.wait()
|
| 151 |
+
|
| 152 |
+
replays = {}
|
| 153 |
+
received = collections.defaultdict(int)
|
| 154 |
+
def parallel_stream(source, prefetch=2):
|
| 155 |
+
replay = portal.Client(args.replay_addr, f'LearnerReplay{source.title()}')
|
| 156 |
+
replays[source] = replay
|
| 157 |
+
call = getattr(replay, f'sample_batch_{source}')
|
| 158 |
+
futures = collections.deque([call() for _ in range(prefetch)])
|
| 159 |
+
while True:
|
| 160 |
+
futures.append(call())
|
| 161 |
+
with elements.timer.section(f'stream_{source}_response'):
|
| 162 |
+
data = futures.popleft().result()
|
| 163 |
+
received[source] += 1
|
| 164 |
+
yield data
|
| 165 |
+
|
| 166 |
+
def evaluate(stream):
|
| 167 |
+
carry = agent.init_report(args.batch_size)
|
| 168 |
+
agg = elements.Agg()
|
| 169 |
+
for _ in range(args.consec_report * args.report_batches):
|
| 170 |
+
batch = next(stream)
|
| 171 |
+
carry, metrics = agent.report(carry, batch)
|
| 172 |
+
agg.add(metrics)
|
| 173 |
+
return agg.result()
|
| 174 |
+
|
| 175 |
+
stream_train = iter(agent.stream(
|
| 176 |
+
embodied.streams.Stateless(parallel_stream('train'))))
|
| 177 |
+
stream_report = iter(agent.stream(
|
| 178 |
+
embodied.streams.Stateless(parallel_stream('report'))))
|
| 179 |
+
stream_eval = iter(agent.stream(
|
| 180 |
+
embodied.streams.Stateless(parallel_stream('eval'))))
|
| 181 |
+
carry = agent.init_train(args.batch_size)
|
| 182 |
+
|
| 183 |
+
while True:
|
| 184 |
+
|
| 185 |
+
with elements.timer.section('batch_next'):
|
| 186 |
+
batch = next(stream_train)
|
| 187 |
+
with elements.timer.section('train_step'):
|
| 188 |
+
carry, outs, mets = agent.train(carry, batch)
|
| 189 |
+
if 'replay' in outs:
|
| 190 |
+
with elements.timer.section('replay_update'):
|
| 191 |
+
updater.update(outs['replay'])
|
| 192 |
+
|
| 193 |
+
time.sleep(0.0001)
|
| 194 |
+
agg.add(mets)
|
| 195 |
+
fps.step(batch_steps)
|
| 196 |
+
|
| 197 |
+
if should_report(skip=not received['report']):
|
| 198 |
+
print('Report started...')
|
| 199 |
+
with elements.timer.section('report'):
|
| 200 |
+
logger.add(prefix(evaluate(stream_report), 'report'))
|
| 201 |
+
if args.eval_envs and received['eval']:
|
| 202 |
+
logger.add(prefix(evaluate(stream_eval), 'eval'))
|
| 203 |
+
print('Report finished!')
|
| 204 |
+
|
| 205 |
+
if should_log():
|
| 206 |
+
with elements.timer.section('metrics'):
|
| 207 |
+
stats = {}
|
| 208 |
+
stats['fps/train'] = fps.result()
|
| 209 |
+
stats['timer/agent'] = elements.timer.stats()['summary']
|
| 210 |
+
stats.update(prefix(agg.result(), 'train'))
|
| 211 |
+
stats.update(prefix(usage.stats(), 'usage/agent'))
|
| 212 |
+
stats.update(prefix(logger.stats(), 'client/learner_logger'))
|
| 213 |
+
for source, client in replays.items():
|
| 214 |
+
stats.update(prefix(client.stats(), f'client/replay_{source}'))
|
| 215 |
+
logger.add(stats)
|
| 216 |
+
|
| 217 |
+
if should_save():
|
| 218 |
+
cp.save()
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def parallel_replay(make_replay_train, make_replay_eval, make_stream, args):
|
| 222 |
+
if isinstance(make_replay_train, bytes):
|
| 223 |
+
make_replay_train = cloudpickle.loads(make_replay_train)
|
| 224 |
+
if isinstance(make_replay_eval, bytes):
|
| 225 |
+
make_replay_eval = cloudpickle.loads(make_replay_eval)
|
| 226 |
+
if isinstance(make_stream, bytes):
|
| 227 |
+
make_stream = cloudpickle.loads(make_stream)
|
| 228 |
+
|
| 229 |
+
replay_train = make_replay_train()
|
| 230 |
+
replay_eval = make_replay_eval()
|
| 231 |
+
|
| 232 |
+
stream_train = iter(make_stream(replay_train, 'train'))
|
| 233 |
+
stream_report = iter(make_stream(replay_train, 'report'))
|
| 234 |
+
stream_eval = iter(make_stream(replay_eval, 'eval'))
|
| 235 |
+
|
| 236 |
+
should_log = embodied.LocalClock(args.log_every)
|
| 237 |
+
logger = portal.Client(args.logger_addr, 'ReplayLogger', maxinflight=1)
|
| 238 |
+
usage = elements.Usage(**args.usage.update(nvsmi=False))
|
| 239 |
+
limit_agg = elements.Agg()
|
| 240 |
+
active = elements.Counter()
|
| 241 |
+
|
| 242 |
+
limiter = embodied.limiters.SamplesPerInsert(
|
| 243 |
+
args.train_ratio / args.batch_length,
|
| 244 |
+
tolerance=4 * args.batch_size,
|
| 245 |
+
minsize=args.batch_size * replay_train.length)
|
| 246 |
+
|
| 247 |
+
def add_batch(data):
|
| 248 |
+
active.increment()
|
| 249 |
+
for i, envid in enumerate(data.pop('envid')):
|
| 250 |
+
tran = {k: v[i] for k, v in data.items()}
|
| 251 |
+
if tran.pop('is_eval', False):
|
| 252 |
+
replay_eval.add(tran, envid)
|
| 253 |
+
continue
|
| 254 |
+
with elements.timer.section('replay_insert_wait'):
|
| 255 |
+
dur = embodied.limiters.wait(
|
| 256 |
+
limiter.want_insert, 'Replay insert waiting',
|
| 257 |
+
limiter.__dict__)
|
| 258 |
+
limit_agg.add('insert_wait_dur', dur, agg='sum')
|
| 259 |
+
limit_agg.add('insert_wait_count', dur > 0, agg='sum')
|
| 260 |
+
limit_agg.add('insert_wait_frac', dur > 0, agg='avg')
|
| 261 |
+
limiter.insert()
|
| 262 |
+
replay_train.add(tran, envid)
|
| 263 |
+
return {}
|
| 264 |
+
|
| 265 |
+
def sample_batch_train():
|
| 266 |
+
active.increment()
|
| 267 |
+
with elements.timer.section('replay_sample_wait'):
|
| 268 |
+
for _ in range(args.batch_size):
|
| 269 |
+
dur = embodied.limiters.wait(
|
| 270 |
+
limiter.want_sample, 'Replay sample waiting',
|
| 271 |
+
limiter.__dict__)
|
| 272 |
+
limit_agg.add('sample_wait_dur', dur, agg='sum')
|
| 273 |
+
limit_agg.add('sample_wait_count', dur > 0, agg='sum')
|
| 274 |
+
limit_agg.add('sample_wait_frac', dur > 0, agg='avg')
|
| 275 |
+
limiter.sample()
|
| 276 |
+
return next(stream_train)
|
| 277 |
+
|
| 278 |
+
def sample_batch_report():
|
| 279 |
+
active.increment()
|
| 280 |
+
return next(stream_report)
|
| 281 |
+
|
| 282 |
+
def sample_batch_eval():
|
| 283 |
+
active.increment()
|
| 284 |
+
return next(stream_eval)
|
| 285 |
+
|
| 286 |
+
should_save = embodied.LocalClock(args.save_every)
|
| 287 |
+
cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/replay')
|
| 288 |
+
cp.replay_train = replay_train
|
| 289 |
+
cp.replay_eval = replay_eval
|
| 290 |
+
cp.limiter = limiter
|
| 291 |
+
cp.load_or_save()
|
| 292 |
+
|
| 293 |
+
server = portal.Server(args.replay_addr, name='Replay')
|
| 294 |
+
server.bind('add_batch', add_batch, workers=1)
|
| 295 |
+
server.bind('sample_batch_train', sample_batch_train, workers=1)
|
| 296 |
+
server.bind('sample_batch_report', sample_batch_report, workers=1)
|
| 297 |
+
server.bind('sample_batch_eval', sample_batch_eval, workers=1)
|
| 298 |
+
server.bind('update', lambda data: replay_train.update(data), workers=1)
|
| 299 |
+
server.start(block=False)
|
| 300 |
+
while True:
|
| 301 |
+
if should_save() and active > 0:
|
| 302 |
+
active.reset()
|
| 303 |
+
cp.save()
|
| 304 |
+
if should_log():
|
| 305 |
+
stats = {}
|
| 306 |
+
stats['timer/replay'] = elements.timer.stats()['summary']
|
| 307 |
+
stats.update(prefix(limit_agg.result(), 'limiter'))
|
| 308 |
+
stats.update(prefix(replay_train.stats(), 'replay'))
|
| 309 |
+
stats.update(prefix(replay_eval.stats(), 'replay_eval'))
|
| 310 |
+
stats.update(prefix(usage.stats(), 'usage/replay'))
|
| 311 |
+
stats.update(prefix(logger.stats(), 'client/replay_logger'))
|
| 312 |
+
stats.update(prefix(server.stats(), 'server/replay'))
|
| 313 |
+
logger.add(stats)
|
| 314 |
+
time.sleep(1)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@elements.timer.section('logger')
|
| 318 |
+
def parallel_logger(make_logger, args):
|
| 319 |
+
if isinstance(make_logger, bytes):
|
| 320 |
+
make_logger = cloudpickle.loads(make_logger)
|
| 321 |
+
|
| 322 |
+
logger = make_logger()
|
| 323 |
+
should_log = embodied.LocalClock(args.log_every)
|
| 324 |
+
usage = elements.Usage(**args.usage.update(nvsmi=False))
|
| 325 |
+
|
| 326 |
+
active = elements.Counter()
|
| 327 |
+
should_save = embodied.LocalClock(args.save_every)
|
| 328 |
+
cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/logger')
|
| 329 |
+
cp.step = logger.step
|
| 330 |
+
cp.load_or_save()
|
| 331 |
+
|
| 332 |
+
parallel = elements.Agg()
|
| 333 |
+
epstats = elements.Agg()
|
| 334 |
+
episodes = collections.defaultdict(elements.Agg)
|
| 335 |
+
updated = collections.defaultdict(lambda: None)
|
| 336 |
+
dones = collections.defaultdict(lambda: True)
|
| 337 |
+
|
| 338 |
+
@elements.timer.section('addfn')
|
| 339 |
+
def addfn(metrics):
|
| 340 |
+
active.increment()
|
| 341 |
+
logger.add(metrics)
|
| 342 |
+
|
| 343 |
+
@elements.timer.section('tranfn')
|
| 344 |
+
def tranfn(trans):
|
| 345 |
+
active.increment()
|
| 346 |
+
now = time.time()
|
| 347 |
+
envid = trans.pop('envid')
|
| 348 |
+
logger.step.increment((~trans['is_eval']).sum())
|
| 349 |
+
parallel.add('ep_starts', trans['is_first'].sum(), agg='sum')
|
| 350 |
+
parallel.add('ep_ends', trans['is_last'].sum(), agg='sum')
|
| 351 |
+
|
| 352 |
+
for i, addr in enumerate(envid):
|
| 353 |
+
tran = {k: v[i] for k, v in trans.items()}
|
| 354 |
+
|
| 355 |
+
updated[addr] = now
|
| 356 |
+
episode = episodes[addr]
|
| 357 |
+
if tran['is_first']:
|
| 358 |
+
episode.reset()
|
| 359 |
+
parallel.add('ep_abandoned', int(not dones[addr]), agg='sum')
|
| 360 |
+
dones[addr] = tran['is_last']
|
| 361 |
+
|
| 362 |
+
episode.add('score', tran['reward'], agg='sum')
|
| 363 |
+
episode.add('length', 1, agg='sum')
|
| 364 |
+
episode.add('rewards', tran['reward'], agg='stack')
|
| 365 |
+
|
| 366 |
+
first_addr = next(iter(episodes.keys()))
|
| 367 |
+
for key, value in tran.items():
|
| 368 |
+
if value.dtype == np.uint8 and value.ndim == 3:
|
| 369 |
+
if addr == first_addr:
|
| 370 |
+
episode.add(f'policy_{key}', value, agg='stack')
|
| 371 |
+
elif key.startswith('log/'):
|
| 372 |
+
assert value.ndim == 0, (key, value.shape, value.dtype)
|
| 373 |
+
episode.add(key + '/avg', value, agg='avg')
|
| 374 |
+
episode.add(key + '/max', value, agg='max')
|
| 375 |
+
episode.add(key + '/sum', value, agg='sum')
|
| 376 |
+
if tran['is_last']:
|
| 377 |
+
result = episode.result()
|
| 378 |
+
logger.add({
|
| 379 |
+
'score': result.pop('score'),
|
| 380 |
+
'length': result.pop('length') - 1,
|
| 381 |
+
}, prefix='episode')
|
| 382 |
+
rew = result.pop('rewards')
|
| 383 |
+
if len(rew) > 1:
|
| 384 |
+
result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
|
| 385 |
+
epstats.add(result)
|
| 386 |
+
|
| 387 |
+
for addr, last in list(updated.items()):
|
| 388 |
+
if now - last >= args.episode_timeout:
|
| 389 |
+
print('Dropping episode statistics due to timeout.')
|
| 390 |
+
del episodes[addr]
|
| 391 |
+
del updated[addr]
|
| 392 |
+
|
| 393 |
+
server = portal.Server(args.logger_addr, 'Logger')
|
| 394 |
+
server.bind('add', addfn)
|
| 395 |
+
server.bind('tran', tranfn)
|
| 396 |
+
server.start(block=False)
|
| 397 |
+
last_step = int(logger.step)
|
| 398 |
+
while True:
|
| 399 |
+
time.sleep(1)
|
| 400 |
+
if should_log() and active > 0:
|
| 401 |
+
active.reset()
|
| 402 |
+
with elements.timer.section('metrics'):
|
| 403 |
+
logger.add({'timer/logger': elements.timer.stats()['summary']})
|
| 404 |
+
logger.add(parallel.result(), prefix='parallel')
|
| 405 |
+
logger.add(epstats.result(), prefix='epstats')
|
| 406 |
+
logger.add(usage.stats(), prefix='usage/logger')
|
| 407 |
+
logger.add(server.stats(), prefix='server/logger')
|
| 408 |
+
if logger.step == last_step:
|
| 409 |
+
continue
|
| 410 |
+
logger.write()
|
| 411 |
+
last_step = int(logger.step)
|
| 412 |
+
if should_save():
|
| 413 |
+
cp.save()
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
@elements.timer.section('env')
|
| 417 |
+
def parallel_env(make_env, envid, args, is_eval=False):
|
| 418 |
+
if isinstance(make_env, bytes):
|
| 419 |
+
make_env = cloudpickle.loads(make_env)
|
| 420 |
+
assert envid >= 0, envid
|
| 421 |
+
name = f'Env{envid:05}'
|
| 422 |
+
print = lambda x: elements.print(f'[{name}] {x}', flush=True)
|
| 423 |
+
|
| 424 |
+
should_log = embodied.LocalClock(args.log_every)
|
| 425 |
+
fps = elements.FPS()
|
| 426 |
+
if envid == 0:
|
| 427 |
+
logger = portal.Client(args.logger_addr, f'{name}Logger', maxinflight=1)
|
| 428 |
+
usage = elements.Usage(**args.usage.update(nvsmi=False))
|
| 429 |
+
|
| 430 |
+
print('Make env')
|
| 431 |
+
env = make_env(envid)
|
| 432 |
+
actor = portal.Client(args.actor_addr, name, autoconn=False)
|
| 433 |
+
actor.connect()
|
| 434 |
+
|
| 435 |
+
done = True
|
| 436 |
+
while True:
|
| 437 |
+
|
| 438 |
+
if done:
|
| 439 |
+
act = {k: v.sample() for k, v in env.act_space.items()}
|
| 440 |
+
act['reset'] = True
|
| 441 |
+
score, length = 0, 0
|
| 442 |
+
|
| 443 |
+
scope_name = 'reset' if act['reset'] else 'step'
|
| 444 |
+
with elements.timer.section(scope_name):
|
| 445 |
+
obs = env.step(act)
|
| 446 |
+
obs = {k: np.asarray(v, order='C') for k, v in obs.items()}
|
| 447 |
+
obs['is_eval'] = is_eval
|
| 448 |
+
score += obs['reward']
|
| 449 |
+
length += 1
|
| 450 |
+
fps.step(1)
|
| 451 |
+
done = obs['is_last']
|
| 452 |
+
if done and envid == 0:
|
| 453 |
+
print(f'Episode of length {length} with score {score:.2f}')
|
| 454 |
+
|
| 455 |
+
try:
|
| 456 |
+
with elements.timer.section('request'):
|
| 457 |
+
future = actor.act({'envid': envid, **obs})
|
| 458 |
+
with elements.timer.section('response'):
|
| 459 |
+
act = future.result()
|
| 460 |
+
except portal.Disconnected:
|
| 461 |
+
print('Env lost connection to agent')
|
| 462 |
+
actor.connect()
|
| 463 |
+
done = True
|
| 464 |
+
|
| 465 |
+
if should_log() and envid == 0:
|
| 466 |
+
stats = {}
|
| 467 |
+
stats['fps/env'] = fps.result()
|
| 468 |
+
stats['timer/env'] = elements.timer.stats()['summary']
|
| 469 |
+
stats.update(prefix(usage.stats(), 'usage/env'))
|
| 470 |
+
stats.update(prefix(logger.stats(), 'client/env_logger'))
|
| 471 |
+
stats.update(prefix(actor.stats(), 'client/env_actor'))
|
| 472 |
+
logger.add(stats)
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def parallel_envs(make_env, make_env_eval, args):
|
| 476 |
+
workers = []
|
| 477 |
+
for i in range(args.envs):
|
| 478 |
+
workers.append(portal.Process(parallel_env, make_env, i, args))
|
| 479 |
+
for i in range(args.envs, args.envs + args.eval_envs):
|
| 480 |
+
workers.append(portal.Process(parallel_env, make_env_eval, i, args, True))
|
| 481 |
+
portal.run(workers)
|
models/embodied/run/train.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
from functools import partial as bind
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def train(make_agent, make_replay, make_env, make_stream, make_logger, args):
|
| 10 |
+
|
| 11 |
+
agent = make_agent()
|
| 12 |
+
replay = make_replay()
|
| 13 |
+
logger = make_logger()
|
| 14 |
+
|
| 15 |
+
logdir = elements.Path(args.logdir)
|
| 16 |
+
step = logger.step
|
| 17 |
+
usage = elements.Usage(**args.usage)
|
| 18 |
+
train_agg = elements.Agg()
|
| 19 |
+
epstats = elements.Agg()
|
| 20 |
+
episodes = collections.defaultdict(elements.Agg)
|
| 21 |
+
policy_fps = elements.FPS()
|
| 22 |
+
train_fps = elements.FPS()
|
| 23 |
+
|
| 24 |
+
batch_steps = args.batch_size * args.batch_length
|
| 25 |
+
should_train = elements.when.Ratio(args.train_ratio / batch_steps)
|
| 26 |
+
should_log = embodied.LocalClock(args.log_every)
|
| 27 |
+
should_report = embodied.LocalClock(args.report_every)
|
| 28 |
+
should_save = embodied.LocalClock(args.save_every)
|
| 29 |
+
|
| 30 |
+
@elements.timer.section('logfn')
|
| 31 |
+
def logfn(tran, worker):
|
| 32 |
+
episode = episodes[worker]
|
| 33 |
+
tran['is_first'] and episode.reset()
|
| 34 |
+
episode.add('score', tran['reward'], agg='sum')
|
| 35 |
+
episode.add('length', 1, agg='sum')
|
| 36 |
+
episode.add('rewards', tran['reward'], agg='stack')
|
| 37 |
+
for key, value in tran.items():
|
| 38 |
+
if value.dtype == np.uint8 and value.ndim == 3:
|
| 39 |
+
if worker == 0:
|
| 40 |
+
episode.add(f'policy_{key}', value, agg='stack')
|
| 41 |
+
elif key.startswith('log/'):
|
| 42 |
+
assert value.ndim == 0, (key, value.shape, value.dtype)
|
| 43 |
+
episode.add(key + '/avg', value, agg='avg')
|
| 44 |
+
episode.add(key + '/max', value, agg='max')
|
| 45 |
+
episode.add(key + '/sum', value, agg='sum')
|
| 46 |
+
if tran['is_last']:
|
| 47 |
+
result = episode.result()
|
| 48 |
+
logger.add({
|
| 49 |
+
'score': result.pop('score'),
|
| 50 |
+
'length': result.pop('length'),
|
| 51 |
+
}, prefix='episode')
|
| 52 |
+
rew = result.pop('rewards')
|
| 53 |
+
if len(rew) > 1:
|
| 54 |
+
result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
|
| 55 |
+
epstats.add(result)
|
| 56 |
+
|
| 57 |
+
fns = [bind(make_env, i) for i in range(args.envs)]
|
| 58 |
+
driver = embodied.Driver(fns, parallel=not args.debug)
|
| 59 |
+
driver.on_step(lambda tran, _: step.increment())
|
| 60 |
+
driver.on_step(lambda tran, _: policy_fps.step())
|
| 61 |
+
driver.on_step(replay.add)
|
| 62 |
+
driver.on_step(logfn)
|
| 63 |
+
|
| 64 |
+
stream_train = iter(agent.stream(make_stream(replay, 'train')))
|
| 65 |
+
stream_report = iter(agent.stream(make_stream(replay, 'report')))
|
| 66 |
+
|
| 67 |
+
carry_train = [agent.init_train(args.batch_size)]
|
| 68 |
+
carry_report = agent.init_report(args.batch_size)
|
| 69 |
+
|
| 70 |
+
def trainfn(tran, worker):
|
| 71 |
+
if len(replay) < args.batch_size * args.batch_length:
|
| 72 |
+
return
|
| 73 |
+
for _ in range(should_train(step)):
|
| 74 |
+
with elements.timer.section('stream_next'):
|
| 75 |
+
batch = next(stream_train)
|
| 76 |
+
carry_train[0], outs, mets = agent.train(carry_train[0], batch)
|
| 77 |
+
train_fps.step(batch_steps)
|
| 78 |
+
if 'replay' in outs:
|
| 79 |
+
replay.update(outs['replay'])
|
| 80 |
+
train_agg.add(mets, prefix='train')
|
| 81 |
+
driver.on_step(trainfn)
|
| 82 |
+
|
| 83 |
+
cp = elements.Checkpoint(logdir / 'ckpt')
|
| 84 |
+
cp.step = step
|
| 85 |
+
cp.agent = agent
|
| 86 |
+
cp.replay = replay
|
| 87 |
+
if args.from_checkpoint:
|
| 88 |
+
elements.checkpoint.load(args.from_checkpoint, dict(
|
| 89 |
+
agent=bind(agent.load, regex=args.from_checkpoint_regex)))
|
| 90 |
+
cp.load_or_save()
|
| 91 |
+
|
| 92 |
+
print('Start training loop')
|
| 93 |
+
policy = lambda *args: agent.policy(*args, mode='train')
|
| 94 |
+
driver.reset(agent.init_policy)
|
| 95 |
+
while step < args.steps:
|
| 96 |
+
|
| 97 |
+
driver(policy, steps=10)
|
| 98 |
+
|
| 99 |
+
if should_report(step) and len(replay):
|
| 100 |
+
agg = elements.Agg()
|
| 101 |
+
for _ in range(args.consec_report * args.report_batches):
|
| 102 |
+
carry_report, mets = agent.report(carry_report, next(stream_report))
|
| 103 |
+
agg.add(mets)
|
| 104 |
+
logger.add(agg.result(), prefix='report')
|
| 105 |
+
|
| 106 |
+
if should_log(step):
|
| 107 |
+
logger.add(train_agg.result())
|
| 108 |
+
logger.add(epstats.result(), prefix='epstats')
|
| 109 |
+
logger.add(replay.stats(), prefix='replay')
|
| 110 |
+
logger.add(usage.stats(), prefix='usage')
|
| 111 |
+
logger.add({'fps/policy': policy_fps.result()})
|
| 112 |
+
logger.add({'fps/train': train_fps.result()})
|
| 113 |
+
logger.add({'timer': elements.timer.stats()['summary']})
|
| 114 |
+
logger.write()
|
| 115 |
+
|
| 116 |
+
if should_save(step):
|
| 117 |
+
cp.save()
|
| 118 |
+
|
| 119 |
+
logger.close()
|
models/embodied/run/train_eval.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
from functools import partial as bind
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def train_eval(
|
| 10 |
+
make_agent,
|
| 11 |
+
make_replay_train,
|
| 12 |
+
make_replay_eval,
|
| 13 |
+
make_env_train,
|
| 14 |
+
make_env_eval,
|
| 15 |
+
make_stream,
|
| 16 |
+
make_logger,
|
| 17 |
+
args):
|
| 18 |
+
|
| 19 |
+
agent = make_agent()
|
| 20 |
+
replay_train = make_replay_train()
|
| 21 |
+
replay_eval = make_replay_eval()
|
| 22 |
+
logger = make_logger()
|
| 23 |
+
|
| 24 |
+
logdir = elements.Path(args.logdir)
|
| 25 |
+
logdir.mkdir()
|
| 26 |
+
print('Logdir', logdir)
|
| 27 |
+
step = logger.step
|
| 28 |
+
usage = elements.Usage(**args.usage)
|
| 29 |
+
agg = elements.Agg()
|
| 30 |
+
train_episodes = collections.defaultdict(elements.Agg)
|
| 31 |
+
train_epstats = elements.Agg()
|
| 32 |
+
eval_episodes = collections.defaultdict(elements.Agg)
|
| 33 |
+
eval_epstats = elements.Agg()
|
| 34 |
+
policy_fps = elements.FPS()
|
| 35 |
+
train_fps = elements.FPS()
|
| 36 |
+
|
| 37 |
+
batch_steps = args.batch_size * args.batch_length
|
| 38 |
+
should_train = elements.when.Ratio(args.train_ratio / batch_steps)
|
| 39 |
+
should_log = elements.when.Clock(args.log_every)
|
| 40 |
+
should_report = elements.when.Clock(args.report_every)
|
| 41 |
+
should_save = elements.when.Clock(args.save_every)
|
| 42 |
+
|
| 43 |
+
@elements.timer.section('logfn')
|
| 44 |
+
def logfn(tran, worker, mode):
|
| 45 |
+
episodes = dict(train=train_episodes, eval=eval_episodes)[mode]
|
| 46 |
+
epstats = dict(train=train_epstats, eval=eval_epstats)[mode]
|
| 47 |
+
episode = episodes[worker]
|
| 48 |
+
tran['is_first'] and episode.reset()
|
| 49 |
+
episode.add('score', tran['reward'], agg='sum')
|
| 50 |
+
episode.add('length', 1, agg='sum')
|
| 51 |
+
episode.add('rewards', tran['reward'], agg='stack')
|
| 52 |
+
for key, value in tran.items():
|
| 53 |
+
if value.dtype == np.uint8 and value.ndim == 3:
|
| 54 |
+
if worker == 0:
|
| 55 |
+
episode.add(f'policy_{key}', value, agg='stack')
|
| 56 |
+
elif key.startswith('log/'):
|
| 57 |
+
assert value.ndim == 0, (key, value.shape, value.dtype)
|
| 58 |
+
episode.add(key + '/avg', value, agg='avg')
|
| 59 |
+
episode.add(key + '/max', value, agg='max')
|
| 60 |
+
episode.add(key + '/sum', value, agg='sum')
|
| 61 |
+
if tran['is_last']:
|
| 62 |
+
result = episode.result()
|
| 63 |
+
logger.add({
|
| 64 |
+
'score': result.pop('score'),
|
| 65 |
+
'length': result.pop('length'),
|
| 66 |
+
}, prefix='episode')
|
| 67 |
+
rew = result.pop('rewards')
|
| 68 |
+
if len(rew) > 1:
|
| 69 |
+
result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
|
| 70 |
+
epstats.add(result)
|
| 71 |
+
|
| 72 |
+
fns = [bind(make_env_train, i) for i in range(args.envs)]
|
| 73 |
+
driver_train = embodied.Driver(fns, parallel=(not args.debug))
|
| 74 |
+
driver_train.on_step(lambda tran, _: step.increment())
|
| 75 |
+
driver_train.on_step(lambda tran, _: policy_fps.step())
|
| 76 |
+
driver_train.on_step(replay_train.add)
|
| 77 |
+
driver_train.on_step(bind(logfn, mode='train'))
|
| 78 |
+
|
| 79 |
+
fns = [bind(make_env_eval, i) for i in range(args.eval_envs)]
|
| 80 |
+
driver_eval = embodied.Driver(fns, parallel=(not args.debug))
|
| 81 |
+
driver_eval.on_step(replay_eval.add)
|
| 82 |
+
driver_eval.on_step(bind(logfn, mode='eval'))
|
| 83 |
+
driver_eval.on_step(lambda tran, _: policy_fps.step())
|
| 84 |
+
|
| 85 |
+
stream_train = iter(agent.stream(make_stream(replay_train, 'train')))
|
| 86 |
+
stream_report = iter(agent.stream(make_stream(replay_train, 'report')))
|
| 87 |
+
stream_eval = iter(agent.stream(make_stream(replay_eval, 'eval')))
|
| 88 |
+
|
| 89 |
+
carry_train = [agent.init_train(args.batch_size)]
|
| 90 |
+
carry_report = agent.init_report(args.batch_size)
|
| 91 |
+
carry_eval = agent.init_report(args.batch_size)
|
| 92 |
+
|
| 93 |
+
def trainfn(tran, worker):
|
| 94 |
+
if len(replay_train) < args.batch_size * args.batch_length:
|
| 95 |
+
return
|
| 96 |
+
for _ in range(should_train(step)):
|
| 97 |
+
with elements.timer.section('stream_next'):
|
| 98 |
+
batch = next(stream_train)
|
| 99 |
+
carry_train[0], outs, mets = agent.train(carry_train[0], batch)
|
| 100 |
+
train_fps.step(batch_steps)
|
| 101 |
+
if 'replay' in outs:
|
| 102 |
+
replay_train.update(outs['replay'])
|
| 103 |
+
agg.add(mets, prefix='train')
|
| 104 |
+
driver_train.on_step(trainfn)
|
| 105 |
+
|
| 106 |
+
def reportfn(carry, stream):
|
| 107 |
+
agg = elements.Agg()
|
| 108 |
+
for _ in range(args.report_batches):
|
| 109 |
+
batch = next(stream)
|
| 110 |
+
carry, mets = agent.report(carry, batch)
|
| 111 |
+
agg.add(mets)
|
| 112 |
+
return carry, agg.result()
|
| 113 |
+
|
| 114 |
+
cp = elements.Checkpoint(logdir / 'ckpt')
|
| 115 |
+
cp.step = step
|
| 116 |
+
cp.agent = agent
|
| 117 |
+
cp.replay_train = replay_train
|
| 118 |
+
cp.replay_eval = replay_eval
|
| 119 |
+
if args.from_checkpoint:
|
| 120 |
+
elements.checkpoint.load(args.from_checkpoint, dict(
|
| 121 |
+
agent=bind(agent.load, regex=args.from_checkpoint_regex)))
|
| 122 |
+
cp.load_or_save()
|
| 123 |
+
should_save(step) # Register that we just saved.
|
| 124 |
+
|
| 125 |
+
print('Start training loop')
|
| 126 |
+
train_policy = lambda *args: agent.policy(*args, mode='train')
|
| 127 |
+
eval_policy = lambda *args: agent.policy(*args, mode='eval')
|
| 128 |
+
driver_train.reset(agent.init_policy)
|
| 129 |
+
while step < args.steps:
|
| 130 |
+
|
| 131 |
+
if should_report(step):
|
| 132 |
+
print('Evaluation')
|
| 133 |
+
driver_eval.reset(agent.init_policy)
|
| 134 |
+
driver_eval(eval_policy, episodes=args.eval_eps)
|
| 135 |
+
logger.add(eval_epstats.result(), prefix='epstats')
|
| 136 |
+
if len(replay_train):
|
| 137 |
+
carry_report, mets = reportfn(carry_report, stream_report)
|
| 138 |
+
logger.add(mets, prefix='report')
|
| 139 |
+
if len(replay_eval):
|
| 140 |
+
carry_eval, mets = reportfn(carry_eval, stream_eval)
|
| 141 |
+
logger.add(mets, prefix='eval')
|
| 142 |
+
|
| 143 |
+
driver_train(train_policy, steps=10)
|
| 144 |
+
|
| 145 |
+
if should_log(step):
|
| 146 |
+
logger.add(agg.result())
|
| 147 |
+
logger.add(train_epstats.result(), prefix='epstats')
|
| 148 |
+
logger.add(replay_train.stats(), prefix='replay')
|
| 149 |
+
logger.add(usage.stats(), prefix='usage')
|
| 150 |
+
logger.add({'fps/policy': policy_fps.result()})
|
| 151 |
+
logger.add({'fps/train': train_fps.result()})
|
| 152 |
+
logger.add({'timer': elements.timer.stats()['summary']})
|
| 153 |
+
logger.write()
|
| 154 |
+
|
| 155 |
+
if should_save(step):
|
| 156 |
+
cp.save()
|
| 157 |
+
|
| 158 |
+
logger.close()
|
models/embodied/tests/test_driver.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import partial as bind
|
| 2 |
+
|
| 3 |
+
import embodied
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestDriver:
|
| 8 |
+
|
| 9 |
+
def test_episode_length(self):
|
| 10 |
+
agent = self._make_agent()
|
| 11 |
+
driver = embodied.Driver([self._make_env])
|
| 12 |
+
driver.reset(agent.init_policy)
|
| 13 |
+
seq = []
|
| 14 |
+
driver.on_step(lambda tran, _: seq.append(tran))
|
| 15 |
+
driver(agent.policy, episodes=1)
|
| 16 |
+
assert len(seq) == 11
|
| 17 |
+
|
| 18 |
+
def test_first_step(self):
|
| 19 |
+
agent = self._make_agent()
|
| 20 |
+
driver = embodied.Driver([self._make_env])
|
| 21 |
+
driver.reset(agent.init_policy)
|
| 22 |
+
seq = []
|
| 23 |
+
driver.on_step(lambda tran, _: seq.append(tran))
|
| 24 |
+
driver(agent.policy, episodes=2)
|
| 25 |
+
for index in [0, 11]:
|
| 26 |
+
assert seq[index]['is_first'].item() is True
|
| 27 |
+
assert seq[index]['is_last'].item() is False
|
| 28 |
+
for index in [1, 10, 12]:
|
| 29 |
+
assert seq[index]['is_first'].item() is False
|
| 30 |
+
|
| 31 |
+
def test_last_step(self):
|
| 32 |
+
agent = self._make_agent()
|
| 33 |
+
driver = embodied.Driver([self._make_env])
|
| 34 |
+
driver.reset(agent.init_policy)
|
| 35 |
+
seq = []
|
| 36 |
+
driver.on_step(lambda tran, _: seq.append(tran))
|
| 37 |
+
driver(agent.policy, episodes=2)
|
| 38 |
+
for index in [10, 21]:
|
| 39 |
+
assert seq[index]['is_last'].item() is True
|
| 40 |
+
assert seq[index]['is_first'].item() is False
|
| 41 |
+
for index in [0, 1, 9, 11, 20]:
|
| 42 |
+
assert seq[index]['is_last'].item() is False
|
| 43 |
+
|
| 44 |
+
def test_env_reset(self):
|
| 45 |
+
agent = self._make_agent()
|
| 46 |
+
driver = embodied.Driver([bind(self._make_env, length=5)])
|
| 47 |
+
driver.reset(agent.init_policy)
|
| 48 |
+
seq = []
|
| 49 |
+
driver.on_step(lambda tran, _: seq.append(tran))
|
| 50 |
+
action = {'act_disc': np.ones(1, int), 'act_cont': np.zeros((1, 6), float)}
|
| 51 |
+
policy = lambda carry, obs: (carry, action, {})
|
| 52 |
+
driver(policy, episodes=2)
|
| 53 |
+
assert len(seq) == 12
|
| 54 |
+
seq = {k: np.array([seq[i][k] for i in range(len(seq))]) for k in seq[0]}
|
| 55 |
+
assert (seq['is_first'] == [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]).all()
|
| 56 |
+
assert (seq['is_last'] == [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1]).all()
|
| 57 |
+
assert (seq['reset'] == [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1]).all()
|
| 58 |
+
assert (seq['act_disc'] == [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0]).all()
|
| 59 |
+
|
| 60 |
+
def test_agent_inputs(self):
|
| 61 |
+
agent = self._make_agent()
|
| 62 |
+
driver = embodied.Driver([self._make_env])
|
| 63 |
+
driver.reset(agent.init_policy)
|
| 64 |
+
inputs = []
|
| 65 |
+
states = []
|
| 66 |
+
def policy(carry, obs, mode='train'):
|
| 67 |
+
inputs.append(obs)
|
| 68 |
+
states.append(carry)
|
| 69 |
+
_, act, _ = agent.policy(carry, obs, mode)
|
| 70 |
+
return 'carry', act, {}
|
| 71 |
+
seq = []
|
| 72 |
+
driver.on_step(lambda tran, _: seq.append(tran))
|
| 73 |
+
driver(policy, episodes=2)
|
| 74 |
+
assert len(seq) == 22
|
| 75 |
+
assert states == ([()] + ['carry'] * 21)
|
| 76 |
+
for index in [0, 11]:
|
| 77 |
+
assert inputs[index]['is_first'].item() is True
|
| 78 |
+
for index in [1, 10, 12, 21]:
|
| 79 |
+
assert inputs[index]['is_first'].item() is False
|
| 80 |
+
for index in [10, 21]:
|
| 81 |
+
assert inputs[index]['is_last'].item() is True
|
| 82 |
+
for index in [0, 1, 9, 11, 20]:
|
| 83 |
+
assert inputs[index]['is_last'].item() is False
|
| 84 |
+
|
| 85 |
+
def test_unexpected_reset(self):
|
| 86 |
+
|
| 87 |
+
class UnexpectedReset(embodied.Wrapper):
|
| 88 |
+
"""Send is_first without preceeding is_last."""
|
| 89 |
+
def __init__(self, env, when):
|
| 90 |
+
super().__init__(env)
|
| 91 |
+
self._when = when
|
| 92 |
+
self._step = 0
|
| 93 |
+
def step(self, action):
|
| 94 |
+
if self._step == self._when:
|
| 95 |
+
action = action.copy()
|
| 96 |
+
action['reset'] = np.ones_like(action['reset'])
|
| 97 |
+
self._step += 1
|
| 98 |
+
return self.env.step(action)
|
| 99 |
+
|
| 100 |
+
env = self._make_env(length=4)
|
| 101 |
+
env = UnexpectedReset(env, when=3)
|
| 102 |
+
agent = self._make_agent()
|
| 103 |
+
driver = embodied.Driver([lambda: env])
|
| 104 |
+
driver.reset(agent.init_policy)
|
| 105 |
+
steps = []
|
| 106 |
+
driver.on_step(lambda tran, _: steps.append(tran))
|
| 107 |
+
driver(agent.policy, episodes=1)
|
| 108 |
+
assert len(steps) == 8
|
| 109 |
+
steps = {k: np.array([x[k] for x in steps]) for k in steps[0]}
|
| 110 |
+
assert (steps['reset'] == [0, 0, 0, 0, 0, 0, 0, 1]).all()
|
| 111 |
+
assert (steps['is_first'] == [1, 0, 0, 1, 0, 0, 0, 0]).all()
|
| 112 |
+
assert (steps['is_last'] == [0, 0, 0, 0, 0, 0, 0, 1]).all()
|
| 113 |
+
|
| 114 |
+
def _make_env(self, length=10):
|
| 115 |
+
from embodied.envs import dummy
|
| 116 |
+
return dummy.Dummy('disc', length=length)
|
| 117 |
+
|
| 118 |
+
def _make_agent(self):
|
| 119 |
+
env = self._make_env()
|
| 120 |
+
agent = embodied.RandomAgent(env.obs_space, env.act_space)
|
| 121 |
+
env.close()
|
| 122 |
+
return agent
|
models/embodied/tests/test_layer_scan.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import jax
|
| 2 |
+
import jax.numpy as jnp
|
| 3 |
+
import ninjax as nj
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from embodied.jax import utils
|
| 7 |
+
|
| 8 |
+
f32 = jnp.float32
|
| 9 |
+
i32 = jnp.int32
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Layer(nj.Module):
|
| 14 |
+
|
| 15 |
+
units: int = 8
|
| 16 |
+
|
| 17 |
+
def __call__(self, x, c, k):
|
| 18 |
+
assert x.shape[1:] == (self.units,)
|
| 19 |
+
assert c.shape == (7,)
|
| 20 |
+
assert k.shape == (13, 7)
|
| 21 |
+
shape = (x.shape[-1], self.units)
|
| 22 |
+
winit = lambda: jax.random.normal(nj.seed(), shape, f32)
|
| 23 |
+
x = x @ self.value('kernel', winit)
|
| 24 |
+
if 'outer3' not in nj.context():
|
| 25 |
+
nj.context()['outer3'] = jnp.zeros((), i32)
|
| 26 |
+
nj.context()['outer3'] += 1
|
| 27 |
+
nj.context()['outer1'] += 1
|
| 28 |
+
inner = self.value('inner', jnp.array(0))
|
| 29 |
+
self.write('inner', inner + nj.context()['outer2'])
|
| 30 |
+
return x
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Net(nj.Module):
|
| 34 |
+
|
| 35 |
+
layers: int = 4
|
| 36 |
+
units: int = 8
|
| 37 |
+
|
| 38 |
+
def __call__(self, x):
|
| 39 |
+
if 'outer1' not in nj.context():
|
| 40 |
+
nj.context()['outer1'] = jnp.ones((), i32)
|
| 41 |
+
if 'outer2' not in nj.context():
|
| 42 |
+
nj.context()['outer2'] = jnp.ones((), i32)
|
| 43 |
+
nj.context()['outer1'] += 1
|
| 44 |
+
|
| 45 |
+
module = self.sub('linear', Layer, units=self.units)
|
| 46 |
+
c = jnp.zeros((self.layers, 7))
|
| 47 |
+
k = jnp.zeros((13, 7))
|
| 48 |
+
x = utils.LayerScan(module, self.layers)(x, c, k=k)
|
| 49 |
+
|
| 50 |
+
return x
|
| 51 |
+
|
| 52 |
+
def loss(self, x):
|
| 53 |
+
return self(x).mean()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class TestLayerScan:
|
| 57 |
+
|
| 58 |
+
def test_init(self, L=4, B=2, D=8):
|
| 59 |
+
x = np.random.normal(0, 1, (B, D))
|
| 60 |
+
net = Net(layers=L, units=D, name='net')
|
| 61 |
+
params = nj.init(net)({}, x, seed=0)
|
| 62 |
+
assert set(params.keys()) == {
|
| 63 |
+
'outer1', 'outer2', 'outer3',
|
| 64 |
+
'net/linear/kernel', 'net/linear/inner'}
|
| 65 |
+
assert params['net/linear/kernel'].shape == (L, D, D)
|
| 66 |
+
assert params['outer1'] == 1
|
| 67 |
+
assert params['outer2'] == 1
|
| 68 |
+
assert params['outer3'] == 0
|
| 69 |
+
assert params['net/linear/inner'].shape == (L,)
|
| 70 |
+
assert (params['net/linear/inner'] == 0).all()
|
| 71 |
+
for i in range(1, L):
|
| 72 |
+
assert not jnp.allclose(
|
| 73 |
+
params['net/linear/kernel'][0],
|
| 74 |
+
params['net/linear/kernel'][i])
|
| 75 |
+
|
| 76 |
+
def test_apply(self, L=4, B=2, D=8):
|
| 77 |
+
x = np.random.normal(0, 1, (B, D))
|
| 78 |
+
net = Net(layers=L, units=D, name='net')
|
| 79 |
+
params = nj.init(net)({}, x, seed=0)
|
| 80 |
+
params, out = nj.pure(net)(params, x)
|
| 81 |
+
assert out.shape == (B, D)
|
| 82 |
+
assert params['outer1'] == L + 2
|
| 83 |
+
assert params['outer2'] == 1
|
| 84 |
+
assert params['outer3'] == L
|
| 85 |
+
assert params['net/linear/inner'].shape == (L,)
|
| 86 |
+
assert (params['net/linear/inner'] == 1).all()
|
| 87 |
+
|
| 88 |
+
def test_grad(self, L=4, B=2, D=8):
|
| 89 |
+
x = np.random.normal(0, 1, (B, D))
|
| 90 |
+
net = Net(layers=L, units=D, name='net')
|
| 91 |
+
def fn(x):
|
| 92 |
+
if nj.creating():
|
| 93 |
+
net(x)
|
| 94 |
+
params = {k: v for k, v in net.values.items() if v.dtype == f32}
|
| 95 |
+
params = {net.path + '/' + k: v for k, v in params.items()}
|
| 96 |
+
loss, _, grads = nj.grad(lambda x: net(x).mean(), params.keys())(x)
|
| 97 |
+
params = {k: v - 0.1 * grads[k] for k, v in params.items()}
|
| 98 |
+
nj.context().update(params)
|
| 99 |
+
return loss
|
| 100 |
+
params = nj.init(net)({}, x, seed=0)
|
| 101 |
+
params, loss = nj.pure(fn)(params, x)
|
| 102 |
+
assert loss.shape == ()
|
models/embodied/tests/test_parallel.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import deque
|
| 2 |
+
from functools import partial as bind
|
| 3 |
+
|
| 4 |
+
import elements
|
| 5 |
+
import embodied
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pytest
|
| 8 |
+
import zerofun
|
| 9 |
+
from embodied.envs import dummy
|
| 10 |
+
|
| 11 |
+
import utils
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestParallel:
|
| 15 |
+
|
| 16 |
+
@pytest.mark.parametrize('train_ratio, eval_envs', (
|
| 17 |
+
(-1, 2), (1, 2), (1, 0), (32, 2),
|
| 18 |
+
))
|
| 19 |
+
def test_run_loop(self, tmpdir, train_ratio, eval_envs):
|
| 20 |
+
addr = 'ipc:///tmp/teststats'
|
| 21 |
+
received = deque(maxlen=1)
|
| 22 |
+
server = zerofun.Server(addr, name='TestStats')
|
| 23 |
+
server.bind('report', lambda stats: received.append(stats))
|
| 24 |
+
server.start()
|
| 25 |
+
|
| 26 |
+
args = self._make_args(tmpdir, train_ratio, eval_envs)
|
| 27 |
+
|
| 28 |
+
embodied.run.parallel.combined(
|
| 29 |
+
bind(self._make_agent, addr),
|
| 30 |
+
bind(self._make_replay, args),
|
| 31 |
+
bind(self._make_replay, args),
|
| 32 |
+
self._make_env,
|
| 33 |
+
self._make_env,
|
| 34 |
+
self._make_logger, args)
|
| 35 |
+
|
| 36 |
+
stats = received[0]
|
| 37 |
+
print('Stats:', stats)
|
| 38 |
+
assert stats['env_steps'] > 400
|
| 39 |
+
if args.train_ratio > -1:
|
| 40 |
+
replay_steps = stats['env_steps'] * args.train_ratio
|
| 41 |
+
assert np.allclose(stats['replay_steps'], replay_steps, 100, 0.1)
|
| 42 |
+
else:
|
| 43 |
+
assert stats['replay_steps'] > 100
|
| 44 |
+
assert stats['reports'] >= 1
|
| 45 |
+
assert stats['saves'] >= 2
|
| 46 |
+
assert stats['loads'] == 0
|
| 47 |
+
|
| 48 |
+
embodied.run.parallel.combined(
|
| 49 |
+
bind(self._make_agent, addr),
|
| 50 |
+
bind(self._make_replay, args),
|
| 51 |
+
bind(self._make_replay, args),
|
| 52 |
+
self._make_env,
|
| 53 |
+
self._make_env,
|
| 54 |
+
self._make_logger, args)
|
| 55 |
+
stats = received[0]
|
| 56 |
+
assert stats['loads'] == 1
|
| 57 |
+
|
| 58 |
+
def _make_agent(self, queue):
|
| 59 |
+
env = self._make_env(0)
|
| 60 |
+
agent = utils.TestAgent(env.obs_space, env.act_space, queue)
|
| 61 |
+
env.close()
|
| 62 |
+
return agent
|
| 63 |
+
|
| 64 |
+
def _make_env(self, index):
|
| 65 |
+
return dummy.Dummy('disc', size=(64, 64), length=100)
|
| 66 |
+
|
| 67 |
+
def _make_replay(self, args, train_ratio=None):
|
| 68 |
+
kwargs = {'length': args.batch_length, 'capacity': 1e4}
|
| 69 |
+
if train_ratio:
|
| 70 |
+
kwargs['samples_per_insert'] = train_ratio / args.batch_length
|
| 71 |
+
return embodied.replay.Replay(**kwargs)
|
| 72 |
+
|
| 73 |
+
def _make_logger(self):
|
| 74 |
+
return elements.Logger(elements.Counter(), [
|
| 75 |
+
elements.logger.TerminalOutput(),
|
| 76 |
+
])
|
| 77 |
+
|
| 78 |
+
def _make_args(self, logdir, train_ratio, eval_envs):
|
| 79 |
+
return elements.Config(
|
| 80 |
+
duration=5.0,
|
| 81 |
+
train_ratio=float(train_ratio),
|
| 82 |
+
log_every=0.1,
|
| 83 |
+
report_every=0.2,
|
| 84 |
+
save_every=0.2,
|
| 85 |
+
envs=4,
|
| 86 |
+
eval_envs=int(eval_envs),
|
| 87 |
+
report_batches=1,
|
| 88 |
+
from_checkpoint='',
|
| 89 |
+
episode_timeout=10,
|
| 90 |
+
actor_addr='tcp://localhost:{auto}',
|
| 91 |
+
replay_addr='tcp://localhost:{auto}',
|
| 92 |
+
logger_addr='tcp://localhost:{auto}',
|
| 93 |
+
ipv6=False,
|
| 94 |
+
actor_batch=-1,
|
| 95 |
+
actor_threads=2,
|
| 96 |
+
agent_process=False,
|
| 97 |
+
remote_replay=False,
|
| 98 |
+
remote_envs=False,
|
| 99 |
+
usage=dict(psutil=True, nvsmi=False),
|
| 100 |
+
debug=False,
|
| 101 |
+
logdir=str(logdir),
|
| 102 |
+
batch_size=8,
|
| 103 |
+
batch_length=16,
|
| 104 |
+
replay_context=0,
|
| 105 |
+
report_length=8,
|
| 106 |
+
)
|
models/embodied/tests/test_replay.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import threading
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import elements
|
| 6 |
+
import embodied
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
REPLAYS_UNLIMITED = [
|
| 12 |
+
embodied.replay.Replay,
|
| 13 |
+
# embodied.replay.Reverb,
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
REPLAYS_SAVECHUNKS = [
|
| 17 |
+
embodied.replay.Replay,
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
REPLAYS_UNIFORM = [
|
| 21 |
+
embodied.replay.Replay,
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def unbatched(dataset):
|
| 26 |
+
for batch in dataset:
|
| 27 |
+
yield {k: v[0] for k, v in batch.items()}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@pytest.mark.filterwarnings('ignore:.*Pillow.*')
|
| 31 |
+
@pytest.mark.filterwarnings('ignore:.*the imp module.*')
|
| 32 |
+
@pytest.mark.filterwarnings('ignore:.*distutils.*')
|
| 33 |
+
class TestReplay:
|
| 34 |
+
|
| 35 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 36 |
+
def test_multiple_keys(self, Replay):
|
| 37 |
+
replay = Replay(length=5, capacity=10)
|
| 38 |
+
for step in range(30):
|
| 39 |
+
replay.add({'image': np.zeros((64, 64, 3)), 'action': np.zeros(12)})
|
| 40 |
+
seq = next(unbatched(replay.dataset(1)))
|
| 41 |
+
assert set(seq.keys()) == {'stepid', 'image', 'action'}
|
| 42 |
+
assert seq['stepid'].shape == (5, 20)
|
| 43 |
+
assert seq['image'].shape == (5, 64, 64, 3)
|
| 44 |
+
assert seq['action'].shape == (5, 12)
|
| 45 |
+
|
| 46 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 47 |
+
@pytest.mark.parametrize(
|
| 48 |
+
'length,workers,capacity',
|
| 49 |
+
[(1, 1, 1), (2, 1, 2), (5, 1, 10), (1, 2, 2), (5, 3, 15), (2, 7, 20)])
|
| 50 |
+
def test_capacity_exact(self, Replay, length, workers, capacity):
|
| 51 |
+
replay = Replay(length, capacity)
|
| 52 |
+
for step in range(30):
|
| 53 |
+
for worker in range(workers):
|
| 54 |
+
replay.add({'step': step}, worker)
|
| 55 |
+
target = min(workers * max(0, (step + 1) - length + 1), capacity)
|
| 56 |
+
assert len(replay) == target
|
| 57 |
+
|
| 58 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 59 |
+
@pytest.mark.parametrize(
|
| 60 |
+
'length,workers,capacity,chunksize',
|
| 61 |
+
[(1, 1, 1, 128), (2, 1, 2, 128), (5, 1, 10, 128), (1, 2, 2, 128),
|
| 62 |
+
(5, 3, 15, 128), (2, 7, 20, 128), (7, 2, 27, 4)])
|
| 63 |
+
def test_sample_sequences(
|
| 64 |
+
self, Replay, length, workers, capacity, chunksize):
|
| 65 |
+
replay = Replay(length, capacity, chunksize=chunksize)
|
| 66 |
+
for step in range(30):
|
| 67 |
+
for worker in range(workers):
|
| 68 |
+
replay.add({'step': step, 'worker': worker}, worker)
|
| 69 |
+
dataset = unbatched(replay.dataset(1))
|
| 70 |
+
for _ in range(10):
|
| 71 |
+
seq = next(dataset)
|
| 72 |
+
assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
|
| 73 |
+
assert (seq['worker'] == seq['worker'][0]).all()
|
| 74 |
+
|
| 75 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 76 |
+
@pytest.mark.parametrize(
|
| 77 |
+
'length,capacity', [(1, 1), (2, 2), (5, 10), (1, 2), (5, 15), (2, 20)])
|
| 78 |
+
def test_sample_single(self, Replay, length, capacity):
|
| 79 |
+
replay = Replay(length, capacity)
|
| 80 |
+
for step in range(length):
|
| 81 |
+
replay.add({'step': step})
|
| 82 |
+
dataset = unbatched(replay.dataset(1))
|
| 83 |
+
for _ in range(10):
|
| 84 |
+
seq = next(dataset)
|
| 85 |
+
assert (seq['step'] == np.arange(length)).all()
|
| 86 |
+
|
| 87 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNIFORM)
|
| 88 |
+
def test_sample_uniform(self, Replay):
|
| 89 |
+
replay = Replay(capacity=20, length=5, seed=0)
|
| 90 |
+
for step in range(7):
|
| 91 |
+
replay.add({'step': step})
|
| 92 |
+
assert len(replay) == 3
|
| 93 |
+
histogram = collections.defaultdict(int)
|
| 94 |
+
dataset = unbatched(replay.dataset(1))
|
| 95 |
+
for _ in range(100):
|
| 96 |
+
seq = next(dataset)
|
| 97 |
+
histogram[seq['step'][0]] += 1
|
| 98 |
+
assert len(histogram) == 3, histogram
|
| 99 |
+
histogram = tuple(histogram.values())
|
| 100 |
+
assert histogram[0] > 20
|
| 101 |
+
assert histogram[1] > 20
|
| 102 |
+
assert histogram[2] > 20
|
| 103 |
+
|
| 104 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 105 |
+
def test_workers_simple(self, Replay):
|
| 106 |
+
replay = Replay(length=2, capacity=20)
|
| 107 |
+
replay.add({'step': 0}, worker=0)
|
| 108 |
+
replay.add({'step': 1}, worker=1)
|
| 109 |
+
replay.add({'step': 2}, worker=0)
|
| 110 |
+
replay.add({'step': 3}, worker=1)
|
| 111 |
+
dataset = unbatched(replay.dataset(1))
|
| 112 |
+
for _ in range(10):
|
| 113 |
+
seq = next(dataset)
|
| 114 |
+
assert tuple(seq['step']) in ((0, 2), (1, 3))
|
| 115 |
+
|
| 116 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 117 |
+
def test_workers_random(self, Replay, length=4, capacity=30):
|
| 118 |
+
rng = np.random.default_rng(seed=0)
|
| 119 |
+
replay = Replay(length, capacity)
|
| 120 |
+
streams = {i: iter(range(10)) for i in range(3)}
|
| 121 |
+
for _ in range(40):
|
| 122 |
+
worker = int(rng.integers(0, 3, ()))
|
| 123 |
+
try:
|
| 124 |
+
step = {'step': next(streams[worker]), 'stream': worker}
|
| 125 |
+
replay.add(step, worker=worker)
|
| 126 |
+
except StopIteration:
|
| 127 |
+
pass
|
| 128 |
+
histogram = collections.defaultdict(int)
|
| 129 |
+
dataset = unbatched(replay.dataset(1))
|
| 130 |
+
for _ in range(10):
|
| 131 |
+
seq = next(dataset)
|
| 132 |
+
assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
|
| 133 |
+
assert (seq['stream'] == seq['stream'][0]).all()
|
| 134 |
+
histogram[int(seq['stream'][0])] += 1
|
| 135 |
+
assert all(count > 0 for count in histogram.values())
|
| 136 |
+
|
| 137 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 138 |
+
@pytest.mark.parametrize(
|
| 139 |
+
'length,workers,capacity',
|
| 140 |
+
[(1, 1, 1), (2, 1, 2), (5, 1, 10), (1, 2, 2), (5, 3, 15), (2, 7, 20)])
|
| 141 |
+
def test_worker_delay(self, Replay, length, workers, capacity):
|
| 142 |
+
replay = Replay(length, capacity)
|
| 143 |
+
rng = np.random.default_rng(seed=0)
|
| 144 |
+
streams = [iter(range(10)) for _ in range(workers)]
|
| 145 |
+
while streams:
|
| 146 |
+
try:
|
| 147 |
+
worker = rng.integers(0, len(streams))
|
| 148 |
+
replay.add({'step': next(streams[worker])}, worker)
|
| 149 |
+
except StopIteration:
|
| 150 |
+
del streams[worker]
|
| 151 |
+
|
| 152 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 153 |
+
@pytest.mark.parametrize(
|
| 154 |
+
'length,capacity,chunksize',
|
| 155 |
+
[(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
|
| 156 |
+
def test_restore_exact(self, tmpdir, Replay, length, capacity, chunksize):
|
| 157 |
+
elements.UUID.reset(debug=True)
|
| 158 |
+
replay = Replay(
|
| 159 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 160 |
+
save_wait=True)
|
| 161 |
+
for step in range(30):
|
| 162 |
+
replay.add({'step': step})
|
| 163 |
+
num_items = np.clip(30 - length + 1, 0, capacity)
|
| 164 |
+
assert len(replay) == num_items
|
| 165 |
+
data = replay.save()
|
| 166 |
+
replay = Replay(length, capacity, directory=tmpdir)
|
| 167 |
+
replay.load(data)
|
| 168 |
+
assert len(replay) == num_items
|
| 169 |
+
dataset = unbatched(replay.dataset(1))
|
| 170 |
+
for _ in range(len(replay)):
|
| 171 |
+
assert len(next(dataset)['step']) == length
|
| 172 |
+
|
| 173 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 174 |
+
@pytest.mark.parametrize(
|
| 175 |
+
'length,capacity,chunksize',
|
| 176 |
+
[(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
|
| 177 |
+
def test_restore_noclear(self, tmpdir, Replay, length, capacity, chunksize):
|
| 178 |
+
elements.UUID.reset(debug=True)
|
| 179 |
+
replay = Replay(
|
| 180 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 181 |
+
save_wait=True)
|
| 182 |
+
for _ in range(30):
|
| 183 |
+
replay.add({'foo': 13})
|
| 184 |
+
num_items = np.clip(30 - length + 1, 0, capacity)
|
| 185 |
+
assert len(replay) == num_items
|
| 186 |
+
data = replay.save()
|
| 187 |
+
for _ in range(30):
|
| 188 |
+
replay.add({'foo': 42})
|
| 189 |
+
replay.load(data)
|
| 190 |
+
dataset = unbatched(replay.dataset(1))
|
| 191 |
+
if capacity < num_items:
|
| 192 |
+
for _ in range(len(replay)):
|
| 193 |
+
assert next(dataset)['foo'] == 13
|
| 194 |
+
|
| 195 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 196 |
+
@pytest.mark.parametrize('workers', [1, 2, 5])
|
| 197 |
+
@pytest.mark.parametrize('length,capacity', [(1, 1), (3, 10), (5, 100)])
|
| 198 |
+
def test_restore_workers(self, tmpdir, Replay, workers, length, capacity):
|
| 199 |
+
capacity *= workers
|
| 200 |
+
replay = Replay(
|
| 201 |
+
length, capacity, directory=tmpdir, save_wait=True)
|
| 202 |
+
for step in range(50):
|
| 203 |
+
for worker in range(workers):
|
| 204 |
+
replay.add({'step': step}, worker)
|
| 205 |
+
num_items = np.clip((50 - length + 1) * workers, 0, capacity)
|
| 206 |
+
assert len(replay) == num_items
|
| 207 |
+
data = replay.save()
|
| 208 |
+
replay = Replay(length, capacity, directory=tmpdir)
|
| 209 |
+
replay.load(data)
|
| 210 |
+
assert len(replay) == num_items
|
| 211 |
+
dataset = unbatched(replay.dataset(1))
|
| 212 |
+
for _ in range(len(replay)):
|
| 213 |
+
assert len(next(dataset)['step']) == length
|
| 214 |
+
|
| 215 |
+
@pytest.mark.parametrize('Replay', REPLAYS_SAVECHUNKS)
|
| 216 |
+
@pytest.mark.parametrize(
|
| 217 |
+
'length,capacity,chunksize', [(1, 1, 1), (3, 10, 5), (5, 100, 12)])
|
| 218 |
+
def test_restore_chunks_exact(
|
| 219 |
+
self, tmpdir, Replay, length, capacity, chunksize):
|
| 220 |
+
elements.UUID.reset(debug=True)
|
| 221 |
+
assert len(list(elements.Path(tmpdir).glob('*.npz'))) == 0
|
| 222 |
+
replay = Replay(
|
| 223 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 224 |
+
save_wait=True)
|
| 225 |
+
for step in range(30):
|
| 226 |
+
replay.add({'step': step})
|
| 227 |
+
num_items = np.clip(30 - length + 1, 0, capacity)
|
| 228 |
+
assert len(replay) == num_items
|
| 229 |
+
data = replay.save()
|
| 230 |
+
filenames = list(elements.Path(tmpdir).glob('*.npz'))
|
| 231 |
+
lengths = [int(x.stem.split('-')[3]) for x in filenames]
|
| 232 |
+
stored_steps = min(capacity + length - 1, 30)
|
| 233 |
+
total_chunks = int(np.ceil(30 / chunksize))
|
| 234 |
+
pruned_chunks = int(np.floor((30 - stored_steps) / chunksize))
|
| 235 |
+
assert len(filenames) == total_chunks - pruned_chunks
|
| 236 |
+
last_chunk_empty = total_chunks * chunksize - 30
|
| 237 |
+
saved_steps = (total_chunks - pruned_chunks) * chunksize - last_chunk_empty
|
| 238 |
+
assert sum(lengths) == saved_steps
|
| 239 |
+
assert all(1 <= x <= chunksize for x in lengths)
|
| 240 |
+
replay = Replay(length, capacity, directory=tmpdir, chunksize=chunksize)
|
| 241 |
+
replay.load(data)
|
| 242 |
+
assert sorted(elements.Path(tmpdir).glob('*.npz')) == sorted(filenames)
|
| 243 |
+
assert len(replay) == num_items
|
| 244 |
+
dataset = unbatched(replay.dataset(1))
|
| 245 |
+
for _ in range(len(replay)):
|
| 246 |
+
assert len(next(dataset)['step']) == length
|
| 247 |
+
|
| 248 |
+
@pytest.mark.parametrize('Replay', REPLAYS_SAVECHUNKS)
|
| 249 |
+
@pytest.mark.parametrize('workers', [1, 2, 5])
|
| 250 |
+
@pytest.mark.parametrize(
|
| 251 |
+
'length,capacity,chunksize', [(1, 1, 1), (3, 10, 5), (5, 100, 12)])
|
| 252 |
+
def test_restore_chunks_workers(
|
| 253 |
+
self, tmpdir, Replay, workers, length, capacity, chunksize):
|
| 254 |
+
capacity *= workers
|
| 255 |
+
replay = Replay(
|
| 256 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 257 |
+
save_wait=True)
|
| 258 |
+
for step in range(50):
|
| 259 |
+
for worker in range(workers):
|
| 260 |
+
replay.add({'step': step}, worker)
|
| 261 |
+
num_items = np.clip((50 - length + 1) * workers, 0, capacity)
|
| 262 |
+
assert len(replay) == num_items
|
| 263 |
+
data = replay.save()
|
| 264 |
+
filenames = list(elements.Path(tmpdir).glob('*.npz'))
|
| 265 |
+
lengths = [int(x.stem.split('-')[3]) for x in filenames]
|
| 266 |
+
stored_steps = min(capacity // workers + length - 1, 50)
|
| 267 |
+
total_chunks = int(np.ceil(50 / chunksize))
|
| 268 |
+
pruned_chunks = int(np.floor((50 - stored_steps) / chunksize))
|
| 269 |
+
assert len(filenames) == (total_chunks - pruned_chunks) * workers
|
| 270 |
+
last_chunk_empty = total_chunks * chunksize - 50
|
| 271 |
+
saved_steps = (total_chunks - pruned_chunks) * chunksize - last_chunk_empty
|
| 272 |
+
assert sum(lengths) == saved_steps * workers
|
| 273 |
+
replay = Replay(length, capacity, directory=tmpdir, chunksize=chunksize)
|
| 274 |
+
replay.load(data)
|
| 275 |
+
assert len(replay) == num_items
|
| 276 |
+
dataset = unbatched(replay.dataset(1))
|
| 277 |
+
for _ in range(len(replay)):
|
| 278 |
+
assert len(next(dataset)['step']) == length
|
| 279 |
+
|
| 280 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 281 |
+
@pytest.mark.parametrize(
|
| 282 |
+
'length,capacity,chunksize',
|
| 283 |
+
[(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
|
| 284 |
+
def test_restore_insert(self, tmpdir, Replay, length, capacity, chunksize):
|
| 285 |
+
elements.UUID.reset(debug=True)
|
| 286 |
+
replay = Replay(
|
| 287 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 288 |
+
save_wait=True)
|
| 289 |
+
inserts = int(1.5 * chunksize)
|
| 290 |
+
for step in range(inserts):
|
| 291 |
+
replay.add({'step': step})
|
| 292 |
+
num_items = np.clip(inserts - length + 1, 0, capacity)
|
| 293 |
+
assert len(replay) == num_items
|
| 294 |
+
data = replay.save()
|
| 295 |
+
replay = Replay(length, capacity, directory=tmpdir)
|
| 296 |
+
replay.load(data)
|
| 297 |
+
assert len(replay) == num_items
|
| 298 |
+
dataset = unbatched(replay.dataset(1))
|
| 299 |
+
for _ in range(len(replay)):
|
| 300 |
+
assert len(next(dataset)['step']) == length
|
| 301 |
+
for step in range(inserts):
|
| 302 |
+
replay.add({'step': step})
|
| 303 |
+
num_items = np.clip(2 * (inserts - length + 1), 0, capacity)
|
| 304 |
+
assert len(replay) == num_items
|
| 305 |
+
|
| 306 |
+
@pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
|
| 307 |
+
def test_threading(
|
| 308 |
+
self, tmpdir, Replay, length=5, capacity=128, chunksize=32,
|
| 309 |
+
adders=8, samplers=4):
|
| 310 |
+
elements.UUID.reset(debug=True)
|
| 311 |
+
replay = Replay(
|
| 312 |
+
length, capacity, directory=tmpdir, chunksize=chunksize,
|
| 313 |
+
save_wait=True)
|
| 314 |
+
running = [True]
|
| 315 |
+
|
| 316 |
+
def adder():
|
| 317 |
+
ident = threading.get_ident()
|
| 318 |
+
step = 0
|
| 319 |
+
while running[0]:
|
| 320 |
+
replay.add({'step': step}, worker=ident)
|
| 321 |
+
step += 1
|
| 322 |
+
time.sleep(0.001)
|
| 323 |
+
|
| 324 |
+
def sampler():
|
| 325 |
+
dataset = unbatched(replay.dataset(1))
|
| 326 |
+
while running[0]:
|
| 327 |
+
seq = next(dataset)
|
| 328 |
+
assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
|
| 329 |
+
time.sleep(0.001)
|
| 330 |
+
|
| 331 |
+
workers = []
|
| 332 |
+
for _ in range(adders):
|
| 333 |
+
workers.append(threading.Thread(target=adder))
|
| 334 |
+
for _ in range(samplers):
|
| 335 |
+
workers.append(threading.Thread(target=sampler))
|
| 336 |
+
|
| 337 |
+
try:
|
| 338 |
+
[worker.start() for worker in workers]
|
| 339 |
+
for _ in range(4):
|
| 340 |
+
|
| 341 |
+
time.sleep(0.1)
|
| 342 |
+
stats = replay.stats()
|
| 343 |
+
assert stats['inserts'] > 0
|
| 344 |
+
assert stats['samples'] > 0
|
| 345 |
+
|
| 346 |
+
print('SAVING')
|
| 347 |
+
data = replay.save()
|
| 348 |
+
time.sleep(0.1)
|
| 349 |
+
|
| 350 |
+
print('LOADING')
|
| 351 |
+
replay.load(data)
|
| 352 |
+
|
| 353 |
+
finally:
|
| 354 |
+
running[0] = False
|
| 355 |
+
[worker.join() for worker in workers]
|
| 356 |
+
|
| 357 |
+
assert len(replay) == capacity
|
models/embodied/tests/test_sampletree.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
from embodied.core import selectors
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TestSampleTree:
|
| 9 |
+
|
| 10 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 11 |
+
def test_root_sum(self, branching):
|
| 12 |
+
tree = selectors.SampleTree(branching)
|
| 13 |
+
entries = range(50)
|
| 14 |
+
for index, uprob in enumerate(entries):
|
| 15 |
+
assert tree.root.uprob == sum(entries[:index])
|
| 16 |
+
tree.insert(index, uprob)
|
| 17 |
+
|
| 18 |
+
@pytest.mark.parametrize('inserts', [1, 2, 10, 100])
|
| 19 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 20 |
+
def test_depth_inserts(self, inserts, branching):
|
| 21 |
+
tree = selectors.SampleTree(branching)
|
| 22 |
+
for index in range(inserts):
|
| 23 |
+
tree.insert(index, 1)
|
| 24 |
+
assert len(tree) == inserts
|
| 25 |
+
depths = self._find_leave_depths(tree)
|
| 26 |
+
target = max(1, int(np.ceil(np.log(inserts) / np.log(branching))))
|
| 27 |
+
assert all(x == target for x in depths)
|
| 28 |
+
|
| 29 |
+
@pytest.mark.parametrize('inserts', [2, 10, 100])
|
| 30 |
+
@pytest.mark.parametrize('remove_every', [2, 3, 4])
|
| 31 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 32 |
+
def test_depth_removals(self, inserts, remove_every, branching):
|
| 33 |
+
tree = selectors.SampleTree(branching)
|
| 34 |
+
for index in range(0, inserts, 1):
|
| 35 |
+
tree.insert(index, 1)
|
| 36 |
+
removals = list(range(0, inserts, remove_every))
|
| 37 |
+
for index in removals:
|
| 38 |
+
tree.remove(index)
|
| 39 |
+
assert len(tree) == inserts - len(removals)
|
| 40 |
+
depths = self._find_leave_depths(tree)
|
| 41 |
+
target = max(1, int(np.ceil(np.log(inserts) / np.log(branching))))
|
| 42 |
+
assert all(x == target for x in depths)
|
| 43 |
+
|
| 44 |
+
@pytest.mark.parametrize('inserts', [2, 10, 100])
|
| 45 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 46 |
+
def test_removal_num_nodes(self, inserts, branching):
|
| 47 |
+
tree = selectors.SampleTree(branching)
|
| 48 |
+
assert len(self._get_flat_nodes(tree)) == 1
|
| 49 |
+
rng = np.random.default_rng(seed=0)
|
| 50 |
+
for key in rng.permutation(np.arange(inserts)):
|
| 51 |
+
tree.insert(key, 1)
|
| 52 |
+
num_nodes = len(self._get_flat_nodes(tree))
|
| 53 |
+
for key in rng.permutation(np.arange(inserts)):
|
| 54 |
+
tree.remove(key)
|
| 55 |
+
assert len(self._get_flat_nodes(tree)) == 1
|
| 56 |
+
for key in rng.permutation(np.arange(inserts)):
|
| 57 |
+
tree.insert(key, 1)
|
| 58 |
+
assert len(self._get_flat_nodes(tree)) == num_nodes
|
| 59 |
+
|
| 60 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 61 |
+
def test_sample_single(self, branching):
|
| 62 |
+
tree = selectors.SampleTree(branching)
|
| 63 |
+
tree.insert(12, 1.0)
|
| 64 |
+
tree.insert(123, 1.0)
|
| 65 |
+
tree.insert(42, 1.0)
|
| 66 |
+
tree.remove(12)
|
| 67 |
+
tree.remove(42)
|
| 68 |
+
for _ in range(10):
|
| 69 |
+
assert tree.sample() == 123
|
| 70 |
+
|
| 71 |
+
@pytest.mark.parametrize('inserts', [2, 10])
|
| 72 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 73 |
+
@pytest.mark.parametrize('uprob', [1e-5, 1.0, 1e5])
|
| 74 |
+
def test_sample_uniform(self, inserts, branching, uprob):
|
| 75 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 76 |
+
keys = list(range(inserts))
|
| 77 |
+
for key in keys:
|
| 78 |
+
tree.insert(key, 1.0)
|
| 79 |
+
for key in keys[::3]:
|
| 80 |
+
tree.remove(key)
|
| 81 |
+
keys.remove(key)
|
| 82 |
+
histogram = collections.defaultdict(int)
|
| 83 |
+
for _ in range(100 * len(keys)):
|
| 84 |
+
key = tree.sample()
|
| 85 |
+
histogram[key] += 1
|
| 86 |
+
assert len(histogram) > 0
|
| 87 |
+
assert len(histogram) == len(keys)
|
| 88 |
+
assert all(k in histogram for k in keys)
|
| 89 |
+
for key, count in histogram.items():
|
| 90 |
+
prob = count / (100 * len(keys))
|
| 91 |
+
assert prob > 0.5 * (1 / len(keys))
|
| 92 |
+
|
| 93 |
+
@pytest.mark.parametrize('scale', [1e-5, 1, 1e5])
|
| 94 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 95 |
+
def test_sample_frequencies(self, scale, branching):
|
| 96 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 97 |
+
keys = [0, 1, 2, 3, 4, 5]
|
| 98 |
+
uprobs = [0, 3, 1, 1, 2, 2]
|
| 99 |
+
entries = dict(zip(keys, uprobs))
|
| 100 |
+
for key, uprob in entries.items():
|
| 101 |
+
tree.insert(key, scale * uprob)
|
| 102 |
+
histogram = collections.defaultdict(int)
|
| 103 |
+
for _ in range(100 * len(entries)):
|
| 104 |
+
key = tree.sample()
|
| 105 |
+
histogram[key] += 1
|
| 106 |
+
assert len(histogram) > 0
|
| 107 |
+
total = sum(entries.values())
|
| 108 |
+
for key, uprob in entries.items():
|
| 109 |
+
if uprob == 0:
|
| 110 |
+
assert key not in histogram
|
| 111 |
+
for key, count in histogram.items():
|
| 112 |
+
prob = count / (100 * len(entries))
|
| 113 |
+
target = entries[key] / total
|
| 114 |
+
assert 0.7 * target < prob < 1.3 * target
|
| 115 |
+
|
| 116 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 117 |
+
def test_update_frequencies(self, branching):
|
| 118 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 119 |
+
keys = [0, 1, 2, 3, 4, 5]
|
| 120 |
+
uprobs = [0, 3, 1, 1, 2, 2]
|
| 121 |
+
entries = dict(zip(keys, uprobs))
|
| 122 |
+
for key in entries.keys():
|
| 123 |
+
tree.insert(key, 100)
|
| 124 |
+
for key, uprob in entries.items():
|
| 125 |
+
tree.update(key, uprob)
|
| 126 |
+
histogram = collections.defaultdict(int)
|
| 127 |
+
for _ in range(100 * len(entries)):
|
| 128 |
+
key = tree.sample()
|
| 129 |
+
histogram[key] += 1
|
| 130 |
+
assert len(histogram) > 0
|
| 131 |
+
total = sum(entries.values())
|
| 132 |
+
for key, uprob in entries.items():
|
| 133 |
+
if uprob == 0:
|
| 134 |
+
assert key not in histogram
|
| 135 |
+
for key, count in histogram.items():
|
| 136 |
+
prob = count / (100 * len(entries))
|
| 137 |
+
target = entries[key] / total
|
| 138 |
+
assert 0.7 * target < prob < 1.3 * target
|
| 139 |
+
|
| 140 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 141 |
+
def test_zero_probs_mixed(self, branching):
|
| 142 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 143 |
+
impossible = []
|
| 144 |
+
for index in range(100):
|
| 145 |
+
if index % 3 == 0:
|
| 146 |
+
tree.insert(index, 1.0)
|
| 147 |
+
else:
|
| 148 |
+
tree.insert(index, 0.0)
|
| 149 |
+
impossible.append(index)
|
| 150 |
+
for _ in range(1000):
|
| 151 |
+
assert tree.sample() not in impossible
|
| 152 |
+
|
| 153 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 154 |
+
def test_zero_probs_only(self, branching):
|
| 155 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 156 |
+
for index in range(100):
|
| 157 |
+
tree.insert(index, 0.0)
|
| 158 |
+
for _ in range(1000):
|
| 159 |
+
assert tree.sample() in range(100)
|
| 160 |
+
|
| 161 |
+
@pytest.mark.parametrize('branching', [2, 3, 5, 10])
|
| 162 |
+
def test_infinity_probs(self, branching):
|
| 163 |
+
tree = selectors.SampleTree(branching, seed=0)
|
| 164 |
+
possible = []
|
| 165 |
+
for index in range(100):
|
| 166 |
+
if index % 3 == 0:
|
| 167 |
+
tree.insert(index, np.inf)
|
| 168 |
+
possible.append(index)
|
| 169 |
+
else:
|
| 170 |
+
tree.insert(index, 1.0)
|
| 171 |
+
for _ in range(1000):
|
| 172 |
+
assert tree.sample() in possible
|
| 173 |
+
|
| 174 |
+
def _find_leave_depths(self, tree):
|
| 175 |
+
depths = []
|
| 176 |
+
queue = [(tree.root, 0)]
|
| 177 |
+
while queue:
|
| 178 |
+
node, depth = queue.pop()
|
| 179 |
+
if hasattr(node, 'children'):
|
| 180 |
+
for child in node.children:
|
| 181 |
+
queue.append((child, depth + 1))
|
| 182 |
+
else:
|
| 183 |
+
depths.append(depth)
|
| 184 |
+
assert len(depths) > 0
|
| 185 |
+
return depths
|
| 186 |
+
|
| 187 |
+
def _get_flat_nodes(self, tree):
|
| 188 |
+
nodes = []
|
| 189 |
+
queue = [tree.root]
|
| 190 |
+
while queue:
|
| 191 |
+
node = queue.pop()
|
| 192 |
+
nodes.append(node)
|
| 193 |
+
if hasattr(node, 'children'):
|
| 194 |
+
queue += node.children
|
| 195 |
+
return nodes
|