diff --git a/models/embodied/__init__.py b/models/embodied/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4118d7f9192d566b4528ec0173b183ac8885108
--- /dev/null
+++ b/models/embodied/__init__.py
@@ -0,0 +1,13 @@
+__version__ = '2.0.0'
+
+try:
+ import colored_traceback
+ colored_traceback.add_hook(colors='terminal')
+except ImportError:
+ pass
+
+from .core import *
+
+from . import envs
+from . import jax
+from . import run
diff --git a/models/embodied/core/__init__.py b/models/embodied/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e126ab435d481da64ad04a441025f989120ee55c
--- /dev/null
+++ b/models/embodied/core/__init__.py
@@ -0,0 +1,14 @@
+from .base import Agent, Env
+
+from .clock import GlobalClock
+from .clock import LocalClock
+from .driver import Driver
+from .random import RandomAgent
+from .replay import Replay
+from .wrappers import Wrapper
+
+from . import clock
+from . import limiters
+from . import selectors
+from . import streams
+from . import wrappers
diff --git a/models/embodied/core/base.py b/models/embodied/core/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f1d09284832869fa5bc01f0525e526eaf4ed55f
--- /dev/null
+++ b/models/embodied/core/base.py
@@ -0,0 +1,73 @@
+class Agent:
+
+ def __init__(self, obs_space, act_space, config):
+ pass
+
+ def init_train(self, batch_size):
+ raise NotImplementedError('init_train(batch_size) -> carry')
+
+ def init_report(self, batch_size):
+ raise NotImplementedError('init_report(batch_size) -> carry')
+
+ def init_policy(self, batch_size):
+ raise NotImplementedError('init_policy(batch_size) -> carry')
+
+ def train(self, carry, data):
+ raise NotImplementedError('train(carry, data) -> carry, out, metrics')
+
+ def report(self, carry, data):
+ raise NotImplementedError('report(carry, data) -> carry, metrics')
+
+ def policy(self, carry, obs, mode):
+ raise NotImplementedError('policy(carry, obs, mode) -> carry, act, out')
+
+ def stream(self, st):
+ raise NotImplementedError('stream(st) -> st')
+
+ def save(self):
+ raise NotImplementedError('save() -> data')
+
+ def load(self, data):
+ raise NotImplementedError('load(data) -> None')
+
+
+class Env:
+
+ def __repr__(self):
+ return (
+ f'{self.__class__.__name__}('
+ f'obs_space={self.obs_space}, '
+ f'act_space={self.act_space})')
+
+ @property
+ def obs_space(self):
+ # The observation space must contain the keys is_first, is_last, and
+ # is_terminal. Commonly, it also contains the keys reward and image. By
+ # convention, keys starting with 'log/' are not consumed by the agent.
+ raise NotImplementedError('Returns: dict of spaces')
+
+ @property
+ def act_space(self):
+ # The action space must contain the reset key as well as any actions.
+ raise NotImplementedError('Returns: dict of spaces')
+
+ def step(self, action):
+ raise NotImplementedError('Returns: dict')
+
+ def close(self):
+ pass
+
+
+class Stream:
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ raise NotImplementedError
+
+ def save(self):
+ raise NotImplementedError
+
+ def load(self, state):
+ raise NotImplementedError
diff --git a/models/embodied/core/chunk.py b/models/embodied/core/chunk.py
new file mode 100644
index 0000000000000000000000000000000000000000..426dbe225446be262fda026928df5560d2f60c92
--- /dev/null
+++ b/models/embodied/core/chunk.py
@@ -0,0 +1,99 @@
+import io
+import sys
+import traceback
+
+import elements
+import numpy as np
+
+
+class Chunk:
+
+ __slots__ = ('time', 'uuid', 'succ', 'length', 'size', 'data', 'saved')
+
+ def __init__(self, size=1024):
+ self.time = elements.timestamp(millis=True)
+ self.uuid = elements.UUID()
+ self.succ = elements.UUID(0)
+ # self.uuid = int(np.random.randint(1, 2 * 63))
+ # self.succ = 0
+ self.length = 0
+ self.size = size
+ self.data = None
+ self.saved = False
+
+ def __repr__(self):
+ return f'Chunk({self.filename})'
+
+ def __lt__(self, other):
+ return self.time < other.time
+
+ @property
+ def filename(self):
+ succ = self.succ.uuid if isinstance(self.succ, type(self)) else self.succ
+ return f'{self.time}-{str(self.uuid)}-{str(succ)}-{self.length}.npz'
+
+ @property
+ def nbytes(self):
+ if not self.data:
+ return 0
+ return sum(x.nbytes for x in self.data.values())
+
+ def append(self, step):
+ assert self.length < self.size
+ if not self.data:
+ example = step
+ self.data = {
+ k: np.empty((self.size, *v.shape), v.dtype)
+ for k, v in example.items()}
+ for key, value in step.items():
+ self.data[key][self.length] = value
+ self.length += 1
+ # if self.length == self.size:
+ # [x.setflags(write=False) for x in self.data.values()]
+
+ def update(self, index, length, mapping):
+ assert 0 <= index <= self.length, (index, self.length)
+ assert 0 <= index + length <= self.length, (index, length, self.length)
+ for key, value in mapping.items():
+ self.data[key][index: index + length] = value
+
+ def slice(self, index, length):
+ assert 0 <= index and index + length <= self.length
+ return {k: v[index: index + length] for k, v in self.data.items()}
+
+ @elements.timer.section('chunk_save')
+ def save(self, directory, log=False):
+ assert not self.saved
+ self.saved = True
+ filename = elements.Path(directory) / self.filename
+ data = {k: v[:self.length] for k, v in self.data.items()}
+ with io.BytesIO() as stream:
+ np.savez_compressed(stream, **data)
+ stream.seek(0)
+ filename.write(stream.read(), mode='wb')
+ log and print(f'Saved chunk: {filename.name}')
+
+ @classmethod
+ def load(cls, filename, error='raise'):
+ assert error in ('raise', 'none')
+ time, uuid, succ, length = filename.stem.split('-')
+ length = int(length)
+ try:
+ with elements.Path(filename).open('rb') as f:
+ data = np.load(f)
+ data = {k: data[k] for k in data.keys()}
+ except Exception:
+ tb = ''.join(traceback.format_exception(sys.exception()))
+ print(f'Error loading chunk {filename}:\n{tb}')
+ if error == 'raise':
+ raise
+ else:
+ return None
+ chunk = cls(length)
+ chunk.time = time
+ chunk.uuid = elements.UUID(uuid)
+ chunk.succ = elements.UUID(succ)
+ chunk.length = length
+ chunk.data = data
+ chunk.saved = True
+ return chunk
diff --git a/models/embodied/core/clock.py b/models/embodied/core/clock.py
new file mode 100644
index 0000000000000000000000000000000000000000..a424310c9d82bfe9c86ed58735c1e2345fc8fd9e
--- /dev/null
+++ b/models/embodied/core/clock.py
@@ -0,0 +1,118 @@
+import threading
+import time
+
+import portal
+
+
+CLIENT = None
+REPLICA = None
+
+
+def setup(is_server, replica, replicas, port, addr):
+ global CLIENT, REPLICA
+ assert CLIENT is None
+ if replicas <= 1:
+ return
+ print('CLOCK PORT:', port)
+ print('CLOCK ADDR:', addr)
+ if is_server:
+ _start_server(port, replicas)
+ client = portal.Client(addr, 'ClockClient')
+ client.connect()
+ CLIENT = client
+ REPLICA = replica
+
+
+def _start_server(port, replicas):
+
+ clocks = []
+ requests = []
+ result = [None]
+ receive = threading.Barrier(replicas)
+ respond = threading.Barrier(replicas)
+
+ def create(replica, every):
+ requests.append(every)
+ receive.wait()
+ if replica == 0:
+ assert len(requests) == replicas, (len(requests), replicas)
+ assert all(x == every for x in requests)
+ clockid = len(clocks)
+ clocks.append([float(every), time.time()])
+ result[0] = clockid
+ requests.clear()
+ respond.wait()
+ return result[0]
+
+ def should(replica, clockid, skip):
+ requests.append((clockid, skip))
+ receive.wait()
+ if replica == 0:
+ assert len(requests) == replicas, (len(requests), replicas)
+ clockids, skips = zip(*requests)
+ assert all(x == clockid for x in clockids)
+ every, prev = clocks[clockid]
+ now = time.time()
+ if every == 0:
+ decision = False
+ elif every < 0:
+ decision = True
+ elif now >= prev + every:
+ clocks[clockid][1] = now
+ decision = True
+ else:
+ decision = False
+ decision = decision and not any(skips)
+ result[0] = decision
+ requests.clear()
+ respond.wait()
+ return result[0]
+
+ server = portal.Server(port, 'ClockServer')
+ server.bind('create', create, workers=replicas)
+ server.bind('should', should, workers=replicas)
+ server.start(block=False)
+
+
+class GlobalClock:
+
+ def __init__(self, every, first=False):
+ self.multihost = bool(CLIENT)
+ if self.multihost:
+ self.clockid = CLIENT.create(REPLICA, every).result()
+ self.skip_next = (not first)
+ else:
+ self.clock = LocalClock(every, first)
+
+ def __call__(self, step=None, skip=None):
+ if self.multihost:
+ if self.skip_next:
+ self.skip_next = False
+ skip = True
+ return CLIENT.should(REPLICA, self.clockid, bool(skip)).result()
+ else:
+ return self.clock(step, skip)
+
+
+class LocalClock:
+
+ def __init__(self, every, first=False):
+ self.every = every
+ self.prev = None
+ self.first = first
+
+ def __call__(self, step=None, skip=None):
+ if skip:
+ return False
+ if self.every == 0: # Zero means off
+ return False
+ if self.every < 0: # Negative means always
+ return True
+ now = time.time()
+ if self.prev is None:
+ self.prev = now
+ return self.first
+ if now >= self.prev + self.every:
+ self.prev = now
+ return True
+ return False
diff --git a/models/embodied/core/driver.py b/models/embodied/core/driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..b24797d702afc767ae3a6da4be68cca1bbabac6d
--- /dev/null
+++ b/models/embodied/core/driver.py
@@ -0,0 +1,138 @@
+import time
+
+import cloudpickle
+import elements
+import numpy as np
+import portal
+
+
+class Driver:
+
+ def __init__(self, make_env_fns, parallel=True, **kwargs):
+ assert len(make_env_fns) >= 1
+ self.parallel = parallel
+ self.kwargs = kwargs
+ self.length = len(make_env_fns)
+ if parallel:
+ import multiprocessing as mp
+ context = mp.get_context()
+ self.pipes, pipes = zip(*[context.Pipe() for _ in range(self.length)])
+ self.stop = context.Event()
+ fns = [cloudpickle.dumps(fn) for fn in make_env_fns]
+ self.procs = [
+ portal.Process(self._env_server, self.stop, i, pipe, fn, start=True)
+ for i, (fn, pipe) in enumerate(zip(fns, pipes))]
+ self.pipes[0].send(('act_space',))
+ self.act_space = self._receive(self.pipes[0])
+ else:
+ self.envs = [fn() for fn in make_env_fns]
+ self.act_space = self.envs[0].act_space
+ self.callbacks = []
+ self.acts = None
+ self.carry = None
+ self.reset()
+
+ def reset(self, init_policy=None):
+ self.acts = {
+ k: np.zeros((self.length,) + v.shape, v.dtype)
+ for k, v in self.act_space.items()}
+ self.acts['reset'] = np.ones(self.length, bool)
+ self.carry = init_policy and init_policy(self.length)
+
+ def close(self):
+ if self.parallel:
+ [proc.kill() for proc in self.procs]
+ else:
+ [env.close() for env in self.envs]
+
+ def on_step(self, callback):
+ self.callbacks.append(callback)
+
+ def __call__(self, policy, steps=0, episodes=0):
+ step, episode = 0, 0
+ while step < steps or episode < episodes:
+ step, episode = self._step(policy, step, episode)
+
+ def _step(self, policy, step, episode):
+ acts = self.acts
+ assert all(len(x) == self.length for x in acts.values())
+ assert all(isinstance(v, np.ndarray) for v in acts.values())
+ acts = [{k: v[i] for k, v in acts.items()} for i in range(self.length)]
+ if self.parallel:
+ [pipe.send(('step', act)) for pipe, act in zip(self.pipes, acts)]
+ obs = [self._receive(pipe) for pipe in self.pipes]
+ else:
+ obs = [env.step(act) for env, act in zip(self.envs, acts)]
+ obs = {k: np.stack([x[k] for x in obs]) for k in obs[0].keys()}
+ logs = {k: v for k, v in obs.items() if k.startswith('log/')}
+ obs = {k: v for k, v in obs.items() if not k.startswith('log/')}
+ assert all(len(x) == self.length for x in obs.values()), obs
+ self.carry, acts, outs = policy(self.carry, obs, **self.kwargs)
+ assert all(k not in acts for k in outs), (
+ list(outs.keys()), list(acts.keys()))
+ if obs['is_last'].any():
+ mask = ~obs['is_last']
+ acts = {k: self._mask(v, mask) for k, v in acts.items()}
+ self.acts = {**acts, 'reset': obs['is_last'].copy()}
+ trans = {**obs, **acts, **outs, **logs}
+ for i in range(self.length):
+ trn = elements.tree.map(lambda x: x[i], trans)
+ [fn(trn, i, **self.kwargs) for fn in self.callbacks]
+ step += len(obs['is_first'])
+ episode += obs['is_last'].sum()
+ return step, episode
+
+ def _mask(self, value, mask):
+ while mask.ndim < value.ndim:
+ mask = mask[..., None]
+ return value * mask.astype(value.dtype)
+
+ def _receive(self, pipe):
+ try:
+ msg, arg = pipe.recv()
+ if msg == 'error':
+ raise RuntimeError(arg)
+ assert msg == 'result'
+ return arg
+ except Exception:
+ print('Terminating workers due to an exception.')
+ [proc.kill() for proc in self.procs]
+ raise
+
+ @staticmethod
+ def _env_server(stop, envid, pipe, ctor):
+ try:
+ ctor = cloudpickle.loads(ctor)
+ env = ctor()
+ while not stop.is_set():
+ if not pipe.poll(0.1):
+ time.sleep(0.1)
+ continue
+ try:
+ msg, *args = pipe.recv()
+ except EOFError:
+ return
+ if msg == 'step':
+ assert len(args) == 1
+ act = args[0]
+ obs = env.step(act)
+ pipe.send(('result', obs))
+ elif msg == 'obs_space':
+ assert len(args) == 0
+ pipe.send(('result', env.obs_space))
+ elif msg == 'act_space':
+ assert len(args) == 0
+ pipe.send(('result', env.act_space))
+ else:
+ raise ValueError(f'Invalid message {msg}')
+ except ConnectionResetError:
+ print('Connection to driver lost')
+ except Exception as e:
+ pipe.send(('error', e))
+ raise
+ finally:
+ try:
+ env.close()
+ except Exception:
+ pass
+ pipe.close()
diff --git a/models/embodied/core/limiters.py b/models/embodied/core/limiters.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0bd5b342651fb767fc72e9b6513f54cd215485e
--- /dev/null
+++ b/models/embodied/core/limiters.py
@@ -0,0 +1,80 @@
+import threading
+import time
+
+
+def wait(predicate, message, info=None, sleep=0.01, notify=60):
+ if predicate():
+ return 0
+ start = last_notify = time.time()
+ while not predicate():
+ now = time.time()
+ if now - last_notify > notify:
+ dur = now - start
+ print(f'{message} {dur:.1f}s: {info}')
+ last_notify = time.time()
+ time.sleep(sleep)
+ return time.time() - start
+
+
+class SamplesPerInsert:
+
+ def __init__(self, samples_per_insert, tolerance, minsize):
+ assert 1 <= minsize
+ self.samples_per_insert = samples_per_insert
+ self.minsize = minsize
+ self.avail = -minsize
+ self.min_avail = -tolerance
+ self.max_avail = tolerance * samples_per_insert
+ self.size = 0
+ self.lock = threading.Lock()
+
+ def save(self):
+ return {'size': self.size, 'avail': self.avail}
+
+ def load(self, data):
+ self.size = data['size']
+ self.avail = data['avail']
+
+ def want_insert(self):
+ # if self.samples_per_insert <= 0 or self.size < self.minsize:
+ # return True, 'ok'
+ # if self.avail >= self.max_avail:
+ # return False, f'rate limited: {self.avail:.3f} >= {self.max_avail:.3f}'
+ # return True, 'ok'
+
+ if self.size < self.minsize:
+ return True
+ if self.samples_per_insert <= 0:
+ return True
+ if self.avail < self.max_avail:
+ return True
+ return False
+
+ def want_sample(self):
+ # if self.size < self.minsize:
+ # return False, f'too empty: {self.size} < {self.minsize}'
+ # if self.samples_per_insert > 0 and self.avail <= self.min_avail:
+ # return False, f'rate limited: {self.avail:.3f} <= {self.min_avail:.3f}'
+ # return True, 'ok'
+
+ if self.size < self.minsize:
+ return False
+ if self.samples_per_insert <= 0:
+ return True
+ if self.min_avail < self.avail:
+ return True
+ return False
+
+ def insert(self):
+ with self.lock:
+ self.size += 1
+ if self.size >= self.minsize:
+ self.avail += self.samples_per_insert
+
+ # def remove(self):
+ # with self.lock:
+ # self.size -= 1
+
+ def sample(self):
+ with self.lock:
+ self.avail -= 1
diff --git a/models/embodied/core/random.py b/models/embodied/core/random.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e8cb3645034329993a3745c075ac34347802408
--- /dev/null
+++ b/models/embodied/core/random.py
@@ -0,0 +1,39 @@
+import numpy as np
+
+
+class RandomAgent:
+
+ def __init__(self, obs_space, act_space):
+ self.obs_space = obs_space
+ self.act_space = act_space
+
+ def init_policy(self, batch_size):
+ return ()
+
+ def init_train(self, batch_size):
+ return ()
+
+ def init_report(self, batch_size):
+ return ()
+
+ def policy(self, carry, obs, mode='train'):
+ batch_size = len(obs['is_first'])
+ act = {
+ k: np.stack([v.sample() for _ in range(batch_size)])
+ for k, v in self.act_space.items() if k != 'reset'}
+ return carry, act, {}
+
+ def train(self, carry, data):
+ return carry, {}, {}
+
+ def report(self, carry, data):
+ return carry, {}
+
+ def stream(self, st):
+ return st
+
+ def save(self):
+ return None
+
+ def load(self, data=None):
+ pass
diff --git a/models/embodied/core/replay.py b/models/embodied/core/replay.py
new file mode 100644
index 0000000000000000000000000000000000000000..09a68fbc3638c5bd1fd6f9dbd93e59d09dba9523
--- /dev/null
+++ b/models/embodied/core/replay.py
@@ -0,0 +1,394 @@
+import threading
+from collections import defaultdict, deque
+from concurrent.futures import ThreadPoolExecutor
+from functools import partial as bind
+
+import elements
+import numpy as np
+
+from . import chunk as chunklib
+from . import limiters
+from . import selectors
+
+
+class Replay:
+
+ def __init__(
+ self, length, capacity=None, directory=None, chunksize=1024,
+ online=False, selector=None, save_wait=False, name='unnamed', seed=0):
+
+ self.length = length
+ self.capacity = capacity
+ self.chunksize = chunksize
+ self.name = name
+
+ self.sampler = selector or selectors.Uniform(seed)
+
+ self.chunks = {}
+ self.refs = {}
+ self.refs_lock = threading.RLock()
+
+ self.items = {}
+ self.fifo = deque()
+ self.itemid = 0
+
+ self.current = {}
+ self.streams = defaultdict(deque)
+ self.rwlock = elements.RWLock()
+
+ self.online = online
+ if online:
+ self.lengths = defaultdict(int)
+ self.queue = deque()
+
+ if directory:
+ self.directory = elements.Path(directory)
+ self.directory.mkdir()
+ self.workers = ThreadPoolExecutor(16, 'replay_saver')
+ self.saved = set()
+ else:
+ self.directory = None
+ self.save_wait = save_wait
+
+ self.metrics = {'samples': 0, 'inserts': 0, 'updates': 0}
+
+ def __len__(self):
+ return len(self.items)
+
+ def stats(self):
+ ratio = lambda x, y: x / y if y else np.nan
+ m = self.metrics
+ chunk_nbytes = sum(x.nbytes for x in list(self.chunks.values()))
+ stats = {
+ 'items': len(self.items),
+ 'chunks': len(self.chunks),
+ 'streams': len(self.streams),
+ 'ram_gb': chunk_nbytes / (1024 ** 3),
+ 'inserts': m['inserts'],
+ 'samples': m['samples'],
+ 'updates': m['updates'],
+ 'replay_ratio': ratio(self.length * m['samples'], m['inserts']),
+ }
+ for key in self.metrics:
+ self.metrics[key] = 0
+ return stats
+
+ @elements.timer.section('replay_add')
+ def add(self, step, worker=0):
+ step = {k: v for k, v in step.items() if not k.startswith('log/')}
+ with self.rwlock.reading:
+ step = {k: np.asarray(v) for k, v in step.items()}
+
+ if worker not in self.current:
+ chunk = chunklib.Chunk(self.chunksize)
+ with self.refs_lock:
+ self.refs[chunk.uuid] = 1
+ self.chunks[chunk.uuid] = chunk
+ self.current[worker] = (chunk.uuid, 0)
+
+ chunkid, index = self.current[worker]
+ step['stepid'] = np.frombuffer(
+ bytes(chunkid) + index.to_bytes(4, 'big'), np.uint8)
+ stream = self.streams[worker]
+ chunk = self.chunks[chunkid]
+ assert chunk.length == index, (chunk.length, index)
+ chunk.append(step)
+ assert chunk.length == index + 1, (chunk.length, index + 1)
+ stream.append((chunkid, index))
+ with self.refs_lock:
+ self.refs[chunkid] += 1
+
+ index += 1
+ if index < chunk.size:
+ self.current[worker] = (chunkid, index)
+ else:
+ self._complete(chunk, worker)
+ assert len(self.streams) == len(self.current)
+
+ if len(stream) >= self.length:
+ # Increment is not thread safe thus inaccurate but faster than locking.
+ self.metrics['inserts'] += 1
+ chunkid, index = stream.popleft()
+ self._insert(chunkid, index)
+
+ if self.online and self.lengths[worker] % self.length == 0:
+ self.queue.append((chunkid, index))
+
+ if self.online:
+ self.lengths[worker] += 1
+
+ @elements.timer.section('replay_sample')
+ def sample(self, batch, mode='train'):
+ message = f'Replay buffer {self.name} is empty'
+ limiters.wait(lambda: len(self.sampler), message)
+ seqs, is_online = zip(*[self._sample(mode) for _ in range(batch)])
+ data = self._assemble_batch(seqs, 0, self.length)
+ data = self._annotate_batch(data, is_online, True)
+ return data
+
+ @elements.timer.section('replay_update')
+ def update(self, data):
+ stepid = data.pop('stepid')
+ priority = data.pop('priority', None)
+ assert stepid.ndim == 3, stepid.shape
+ self.metrics['updates'] += int(np.prod(stepid.shape[:-1]))
+ if priority is not None:
+ assert priority.ndim == 2, priority.shape
+ self.sampler.prioritize(
+ stepid.reshape((-1, stepid.shape[-1])),
+ priority.flatten())
+ if data:
+ for i, stepid in enumerate(stepid):
+ stepid = stepid[0].tobytes()
+ chunkid = elements.UUID(stepid[:-4])
+ index = int.from_bytes(stepid[-4:], 'big')
+ values = {k: v[i] for k, v in data.items()}
+ try:
+ self._setseq(chunkid, index, values)
+ except KeyError:
+ pass
+
+ def _sample(self, mode):
+ assert mode in ('train', 'report', 'eval'), mode
+ if mode == 'train':
+ # Increment is not thread safe thus inaccurate but faster than locking.
+ self.metrics['samples'] += 1
+ while True:
+ try:
+ if self.online and self.queue and mode == 'train':
+ chunkid, index = self.queue.popleft()
+ is_online = True
+ else:
+ with elements.timer.section('sample'):
+ itemid = self.sampler()
+ chunkid, index = self.items[itemid]
+ is_online = False
+ seq = self._getseq(chunkid, index, concat=False)
+ return seq, is_online
+ except KeyError:
+ continue
+
+ def _insert(self, chunkid, index):
+ while self.capacity and len(self.items) >= self.capacity:
+ self._remove()
+ itemid = self.itemid
+ self.itemid += 1
+ self.items[itemid] = (chunkid, index)
+ stepids = self._getseq(chunkid, index, ['stepid'])['stepid']
+ self.sampler[itemid] = stepids
+ self.fifo.append(itemid)
+
+ def _remove(self):
+ itemid = self.fifo.popleft()
+ del self.sampler[itemid]
+ chunkid, index = self.items.pop(itemid)
+ with self.refs_lock:
+ self.refs[chunkid] -= 1
+ if self.refs[chunkid] < 1:
+ del self.refs[chunkid]
+ chunk = self.chunks.pop(chunkid)
+ if chunk.succ in self.refs:
+ self.refs[chunk.succ] -= 1
+
+ def _getseq(self, chunkid, index, keys=None, concat=True):
+ chunk = self.chunks[chunkid]
+ available = chunk.length - index
+ if available >= self.length:
+ with elements.timer.section('get_slice'):
+ seq = chunk.slice(index, self.length)
+ if not concat:
+ seq = {k: [v] for k, v in seq.items()}
+ return seq
+ else:
+ with elements.timer.section('get_compose'):
+ parts = [chunk.slice(index, available)]
+ remaining = self.length - available
+ while remaining > 0:
+ chunk = self.chunks[chunk.succ]
+ used = min(remaining, chunk.length)
+ parts.append(chunk.slice(0, used))
+ remaining -= used
+ seq = {k: [p[k] for p in parts] for k in keys or parts[0].keys()}
+ if concat:
+ seq = {k: np.concatenate(v, 0) for k, v in seq.items()}
+ return seq
+
+ def _setseq(self, chunkid, index, values):
+ length = len(next(iter(values.values())))
+ chunk = self.chunks[chunkid]
+ available = chunk.length - index
+ if available >= length:
+ with elements.timer.section('set_slice'):
+ return chunk.update(index, length, values)
+ else:
+ with elements.timer.section('set_compose'):
+ part = {k: v[:available] for k, v in values.items()}
+ values = {k: v[available:] for k, v in values.items()}
+ chunk.update(index, available, part)
+ remaining = length - available
+ while remaining > 0:
+ chunk = self.chunks[chunk.succ]
+ used = min(remaining, chunk.length)
+ part = {k: v[:used] for k, v in values.items()}
+ values = {k: v[used:] for k, v in values.items()}
+ chunk.update(0, used, part)
+ remaining -= used
+
+ # def dataset(self, batch, length=None, consec=None, prefix=0, report=False):
+ # length = length or self.length
+ # consec = consec or (self.length - prefix) // length
+ # assert consec <= (self.length - prefix) // length, (
+ # self.length, length, consec, prefix)
+ # limiters.wait(lambda: len(self.sampler), 'Replay buffer is empty')
+ # # For performance, each batch should be consecutive in memory, rather than
+ # # a non-consecutive view into a longer batch. For example, this allows
+ # # near-instant serialization when sending over the network.
+ # while True:
+ # seqs, is_online = zip(*[self._sample(report) for _ in range(batch)])
+ # for i in range(consec):
+ # offset = i * length
+ # data = self._assemble_batch(seqs, offset, offset + length + prefix)
+ # data = self._annotate_batch(data, is_online, is_first=(i == 0))
+ # data['consec'] = np.full(data['is_first'].shape, i, np.int32)
+ # yield data
+
+ @elements.timer.section('assemble_batch')
+ def _assemble_batch(self, seqs, start, stop):
+ shape = (len(seqs), stop - start)
+ data = {
+ key: np.empty((*shape, *parts[0].shape[1:]), parts[0].dtype)
+ for key, parts in seqs[0].items()}
+ for n, seq in enumerate(seqs):
+ st, dt = 0, 0 # Source and destination time index.
+ for p in range(len(seq['stepid'])):
+ partlen = len(seq['stepid'][p])
+ if start < st + partlen:
+ part_start = max(0, start - st)
+ part_stop = min(stop - st, partlen)
+ num = part_stop - part_start
+ for k in data.keys():
+ data[k][n, dt: dt + num] = seq[k][p][part_start: part_stop]
+ dt += num
+ st += partlen
+ if st >= stop:
+ break
+ return data
+
+ @elements.timer.section('annotate_batch')
+ def _annotate_batch(self, data, is_online, is_first):
+ data = data.copy()
+ # if self.online:
+ # broadcasted = [[x] for x in is_online]
+ # data['is_online'] = np.full(data['is_first'].shape, broadcasted, bool)
+ if 'is_first' in data:
+ if is_first:
+ data['is_first'] = data['is_first'].copy()
+ data['is_first'][:, 0] = True
+ if 'is_last' in data:
+ # Make sure that abandoned episodes have is_last set.
+ next_is_first = np.roll(data['is_first'], shift=-1, axis=1)
+ next_is_first[:, -1] = False
+ data['is_last'] = data['is_last'] | next_is_first
+ return data
+
+ @elements.timer.section('replay_save')
+ def save(self):
+ if self.directory:
+ with self.rwlock.writing:
+ for worker, (chunkid, _) in self.current.items():
+ chunk = self.chunks[chunkid]
+ if chunk.length > 0:
+ self._complete(chunk, worker)
+ promises = []
+ for chunk in self.chunks.values():
+ if chunk.length > 0 and chunk.uuid not in self.saved:
+ self.saved.add(chunk.uuid)
+ promises.append(self.workers.submit(chunk.save, self.directory))
+ if self.save_wait:
+ [promise.result() for promise in promises]
+ return None
+
+ @elements.timer.section('replay_load')
+ def load(self, data=None, directory=None, amount=None):
+
+ directory = directory or self.directory
+ amount = amount or self.capacity or np.inf
+ if not directory:
+ return
+ revsorted = lambda x: list(reversed(sorted(list(x))))
+ directory = elements.Path(directory)
+ names_loaded = revsorted(x.filename for x in list(self.chunks.values()))
+ names_ondisk = revsorted(x.name for x in directory.glob('*.npz'))
+ names_ondisk = [x for x in names_ondisk if x not in names_loaded]
+ if not names_ondisk:
+ return
+
+ numitems = self._numitems(names_loaded + names_ondisk)
+ uuids = [elements.UUID(x.split('-')[1]) for x in names_ondisk]
+ total = 0
+ numchunks = 0
+ for uuid in uuids:
+ numchunks += 1
+ total += numitems[uuid]
+ if total >= amount:
+ break
+
+ load = bind(chunklib.Chunk.load, error='none')
+ filenames = [directory / x for x in names_ondisk[:numchunks]]
+
+ with ThreadPoolExecutor(16, 'replay_loader') as pool:
+ chunks = [x for x in pool.map(load, filenames) if x]
+
+ # We need to recompute the number of items per chunk now because some
+ # chunks may be corrupted and thus not available.
+ # numitems = self._numitems(chunks + list(self.chunks.values()))
+ numitems = self._numitems(chunks)
+
+ with self.rwlock.writing:
+ self.saved.update([chunk.uuid for chunk in chunks])
+ with self.refs_lock:
+ for chunk in chunks:
+ self.chunks[chunk.uuid] = chunk
+ self.refs[chunk.uuid] = 0
+ for chunk in reversed(chunks):
+ amount = numitems[chunk.uuid]
+ self.refs[chunk.uuid] += amount
+ if chunk.succ in self.refs:
+ self.refs[chunk.succ] += 1
+ for index in range(amount):
+ self._insert(chunk.uuid, index)
+
+ @elements.timer.section('complete_chunk')
+ def _complete(self, chunk, worker):
+ succ = chunklib.Chunk(self.chunksize)
+ with self.refs_lock:
+ self.refs[chunk.uuid] -= 1
+ self.refs[succ.uuid] = 2
+ self.chunks[succ.uuid] = succ
+ self.current[worker] = (succ.uuid, 0)
+ chunk.succ = succ.uuid
+ return succ
+
+ def _numitems(self, chunks):
+ chunks = [x.filename if hasattr(x, 'filename') else x for x in chunks]
+ if not chunks:
+ return 0
+ chunks = list(reversed(sorted([elements.Path(x).stem for x in chunks])))
+ times, uuids, succs, lengths = zip(*[x.split('-') for x in chunks])
+ uuids = [elements.UUID(x) for x in uuids]
+ succs = [elements.UUID(x) for x in succs]
+ lengths = {k: int(v) for k, v in zip(uuids, lengths)}
+ future = {}
+ for uuid, succ in zip(uuids, succs):
+ future[uuid] = lengths[uuid] + future.get(succ, 0)
+ numitems = {}
+ for uuid, succ in zip(uuids, succs):
+ numitems[uuid] = lengths[uuid] + 1 - self.length + future.get(succ, 0)
+ numitems = {k: np.clip(v, 0, lengths[k]) for k, v in numitems.items()}
+ return numitems
+
+ def _notempty(self, reason=False):
+ if reason:
+ return (True, 'ok') if len(self.sampler) else (False, 'empty buffer')
+ else:
+ return bool(len(self.sampler))
diff --git a/models/embodied/core/selectors.py b/models/embodied/core/selectors.py
new file mode 100644
index 0000000000000000000000000000000000000000..63eed6a2338949a3ed1328ecaebbcb201d56da87
--- /dev/null
+++ b/models/embodied/core/selectors.py
@@ -0,0 +1,354 @@
+import collections
+import threading
+
+import numpy as np
+
+
+class Fifo:
+
+ def __init__(self):
+ self.queue = collections.deque()
+
+ def __call__(self):
+ return self.queue[0]
+
+ def __len__(self):
+ return len(self.queue)
+
+ def __setitem__(self, key, stepids):
+ self.queue.append(key)
+
+ def __delitem__(self, key):
+ if self.queue[0] == key:
+ self.queue.popleft()
+ else:
+ # This is very slow but typically not used.
+ self.queue.remove(key)
+
+
+class Uniform:
+
+ def __init__(self, seed=0):
+ self.indices = {}
+ self.keys = []
+ self.rng = np.random.default_rng(seed)
+ self.lock = threading.Lock()
+
+ def __len__(self):
+ return len(self.keys)
+
+ def __call__(self):
+ with self.lock:
+ index = self.rng.integers(0, len(self.keys)).item()
+ return self.keys[index]
+
+ def __setitem__(self, key, stepids):
+ with self.lock:
+ self.indices[key] = len(self.keys)
+ self.keys.append(key)
+
+ def __delitem__(self, key):
+ with self.lock:
+ assert 2 <= len(self), len(self)
+ index = self.indices.pop(key)
+ last = self.keys.pop()
+ if index != len(self.keys):
+ self.keys[index] = last
+ self.indices[last] = index
+
+
+class Recency:
+
+ def __init__(self, uprobs, seed=0):
+ assert uprobs[0] >= uprobs[-1], uprobs
+ self.uprobs = uprobs
+ self.tree = self._build(uprobs)
+ self.rng = np.random.default_rng(seed)
+ self.step = 0
+ self.steps = {}
+ self.items = {}
+
+ def __len__(self):
+ return len(self.items)
+
+ def __call__(self):
+ for retry in range(10):
+ try:
+ age = self._sample(self.tree, self.rng)
+ if len(self.items) < len(self.uprobs):
+ age = int(age / len(self.uprobs) * len(self.items))
+ return self.items[self.step - 1 - age]
+ except KeyError:
+ # Item might have been deleted very recently.
+ if retry < 9:
+ import time
+ time.sleep(0.01)
+ else:
+ raise
+
+ def __setitem__(self, key, stepids):
+ self.steps[key] = self.step
+ self.items[self.step] = key
+ self.step += 1
+
+ def __delitem__(self, key):
+ step = self.steps.pop(key)
+ del self.items[step]
+
+ def _sample(self, tree, rng, bfactor=16):
+ path = []
+ for level, prob in enumerate(tree):
+ p = prob
+ for segment in path:
+ p = p[segment]
+ index = rng.choice(len(segment), p=p)
+ path.append(index)
+ index = sum(
+ index * bfactor ** (len(tree) - level - 1)
+ for level, index in enumerate(path))
+ return index
+
+ def _build(self, uprobs, bfactor=16):
+ assert np.isfinite(uprobs).all(), uprobs
+ assert (uprobs >= 0).all(), uprobs
+ depth = int(np.ceil(np.log(len(uprobs)) / np.log(bfactor)))
+ size = bfactor ** depth
+ uprobs = np.concatenate([uprobs, np.zeros(size - len(uprobs))])
+ tree = [uprobs]
+ for level in reversed(range(depth - 1)):
+ tree.insert(0, tree[0].reshape((-1, bfactor)).sum(-1))
+ for level, prob in enumerate(tree):
+ prob = prob.reshape([bfactor] * (1 + level))
+ total = prob.sum(-1, keepdims=True)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ tree[level] = np.where(total, prob / total, prob)
+ return tree
+
+
+class Prioritized:
+
+ def __init__(
+ self, exponent=1.0, initial=1.0, zero_on_sample=False,
+ maxfrac=0.0, branching=16, seed=0):
+ assert 0 <= maxfrac <= 1, maxfrac
+ self.exponent = float(exponent)
+ self.initial = float(initial)
+ self.zero_on_sample = zero_on_sample
+ self.maxfrac = maxfrac
+ self.tree = SampleTree(branching, seed)
+ self.prios = collections.defaultdict(lambda: self.initial)
+ self.stepitems = collections.defaultdict(list)
+ self.items = {}
+
+ def prioritize(self, stepids, priorities):
+ if not isinstance(stepids[0], bytes):
+ stepids = [x.tobytes() for x in stepids]
+ for stepid, priority in zip(stepids, priorities):
+ try:
+ self.prios[stepid] = priority
+ except KeyError:
+ print('Ignoring priority update for removed time step.')
+ items = []
+ for stepid in stepids:
+ items += self.stepitems[stepid]
+ for key in list(set(items)):
+ try:
+ self.tree.update(key, self._aggregate(key))
+ except KeyError:
+ print('Ignoring tree update for removed time step.')
+
+ def __len__(self):
+ return len(self.items)
+
+ def __call__(self):
+ key = self.tree.sample()
+ if self.zero_on_sample:
+ zeros = [0.0] * len(self.items[key])
+ self.prioritize(self.items[key], zeros)
+ return key
+
+ def __setitem__(self, key, stepids):
+ if not isinstance(stepids[0], bytes):
+ stepids = [x.tobytes() for x in stepids]
+ self.items[key] = stepids
+ [self.stepitems[stepid].append(key) for stepid in stepids]
+ self.tree.insert(key, self._aggregate(key))
+
+ def __delitem__(self, key):
+ self.tree.remove(key)
+ stepids = self.items.pop(key)
+ for stepid in stepids:
+ stepitems = self.stepitems[stepid]
+ stepitems.remove(key)
+ if not stepitems:
+ del self.stepitems[stepid]
+ del self.prios[stepid]
+
+ def _aggregate(self, key):
+ # Both list comprehensions in this function are a performance bottleneck
+ # because they are called very often.
+ prios = [self.prios[stepid] for stepid in self.items[key]]
+ if self.exponent != 1.0:
+ prios = [x ** self.exponent for x in prios]
+ mean = sum(prios) / len(prios)
+ if self.maxfrac:
+ return self.maxfrac * max(prios) + (1 - self.maxfrac) * mean
+ else:
+ return mean
+
+
+class Mixture:
+
+ def __init__(self, selectors, fractions, seed=0):
+ assert set(selectors.keys()) == set(fractions.keys())
+ assert sum(fractions.values()) == 1, fractions
+ for key, frac in list(fractions.items()):
+ if not frac:
+ selectors.pop(key)
+ fractions.pop(key)
+ keys = sorted(selectors.keys())
+ self.selectors = [selectors[key] for key in keys]
+ self.fractions = np.array([fractions[key] for key in keys], np.float32)
+ self.rng = np.random.default_rng(seed)
+
+ def __call__(self):
+ return self.rng.choice(self.selectors, p=self.fractions)()
+
+ def __setitem__(self, key, stepids):
+ for selector in self.selectors:
+ selector[key] = stepids
+
+ def __delitem__(self, key):
+ for selector in self.selectors:
+ del selector[key]
+
+ def prioritize(self, stepids, priorities):
+ for selector in self.selectors:
+ if hasattr(selector, 'prioritize'):
+ selector.prioritize(stepids, priorities)
+
+
+class SampleTree:
+
+ def __init__(self, branching=16, seed=0):
+ assert 2 <= branching
+ self.branching = branching
+ self.root = SampleTreeNode()
+ self.last = None
+ self.entries = {}
+ self.rng = np.random.default_rng(seed)
+
+ def __len__(self):
+ return len(self.entries)
+
+ def insert(self, key, uprob):
+ if not self.last:
+ node = self.root
+ else:
+ ups = 0
+ node = self.last.parent
+ while node and len(node) >= self.branching:
+ node = node.parent
+ ups += 1
+ if not node:
+ node = SampleTreeNode()
+ node.append(self.root)
+ self.root = node
+ for _ in range(ups):
+ below = SampleTreeNode()
+ node.append(below)
+ node = below
+ entry = SampleTreeEntry(key, uprob)
+ node.append(entry)
+ self.entries[key] = entry
+ self.last = entry
+
+ def remove(self, key):
+ entry = self.entries.pop(key)
+ entry_parent = entry.parent
+ last_parent = self.last.parent
+ entry.parent.remove(entry)
+ if entry is not self.last:
+ entry_parent.append(self.last)
+ node = last_parent
+ ups = 0
+ while node.parent and not len(node):
+ above = node.parent
+ above.remove(node)
+ node = above
+ ups += 1
+ if not len(node):
+ self.last = None
+ return
+ while isinstance(node, SampleTreeNode):
+ node = node.children[-1]
+ self.last = node
+
+ def update(self, key, uprob):
+ entry = self.entries[key]
+ entry.uprob = uprob
+ entry.parent.recompute()
+
+ def sample(self):
+ node = self.root
+ while isinstance(node, SampleTreeNode):
+ uprobs = np.array([x.uprob for x in node.children])
+ total = uprobs.sum()
+ if not np.isfinite(total):
+ finite = np.isinf(uprobs)
+ probs = finite / finite.sum()
+ elif total == 0:
+ probs = np.ones(len(uprobs)) / len(uprobs)
+ else:
+ probs = uprobs / total
+ choice = self.rng.choice(np.arange(len(uprobs)), p=probs)
+ node = node.children[choice.item()]
+ return node.key
+
+
+class SampleTreeNode:
+
+ __slots__ = ('parent', 'children', 'uprob')
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.children = []
+ self.uprob = 0
+
+ def __repr__(self):
+ return (
+ f'SampleTreeNode(uprob={self.uprob}, '
+ f'children={[x.uprob for x in self.children]})'
+ )
+
+ def __len__(self):
+ return len(self.children)
+
+ def __bool__(self):
+ return True
+
+ def append(self, child):
+ if child.parent:
+ child.parent.remove(child)
+ child.parent = self
+ self.children.append(child)
+ self.recompute()
+
+ def remove(self, child):
+ child.parent = None
+ self.children.remove(child)
+ self.recompute()
+
+ def recompute(self):
+ self.uprob = sum(x.uprob for x in self.children)
+ self.parent and self.parent.recompute()
+
+
+class SampleTreeEntry:
+
+ __slots__ = ('parent', 'key', 'uprob')
+
+ def __init__(self, key=None, uprob=None):
+ self.parent = None
+ self.key = key
+ self.uprob = uprob
diff --git a/models/embodied/core/streams.py b/models/embodied/core/streams.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0d836bb78bb7ca202392d4524fcde4e12d2e53d
--- /dev/null
+++ b/models/embodied/core/streams.py
@@ -0,0 +1,241 @@
+import functools
+import queue
+import threading
+
+import elements
+import numpy as np
+import portal
+
+from . import base
+
+
+class Stateless(base.Stream):
+
+ def __init__(self, nextfn, *args, **kwargs):
+ if not callable(nextfn) and hasattr(nextfn, '__next__'):
+ nextfn = nextfn.__next__
+ self.nextfn = functools.partial(nextfn, *args, **kwargs)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self.nextfn()
+
+ def save(self):
+ return None
+
+ def load(self, data):
+ pass
+
+
+class Prefetch(base.Stream):
+
+ def __init__(self, source, transform=None, amount=1):
+ self.source = iter(source) if hasattr(source, '__iter__') else source()
+ self.transform = transform or (lambda x: x)
+ self.state = self._getstate()
+ self.requests = threading.Semaphore(amount)
+ self.amount = amount
+ self.queue = queue.Queue()
+ self.worker = portal.Thread(self._worker)
+ self.started = False
+
+ def __iter__(self):
+ assert not self.started
+ self.worker.start()
+ self.started = True
+ return self
+
+ def __next__(self):
+ assert self.started
+ result = self.queue.get()
+ self.requests.release()
+ if isinstance(result, str):
+ raise RuntimeError(result)
+ data, self.state = result
+ return data
+
+ def save(self):
+ return self.state
+
+ def load(self, state):
+ if self.started:
+ for _ in range(self.amount):
+ self.queue.get()
+ self.source.load(state)
+ if self.started:
+ self.requests.release(self.amount)
+
+ def _worker(self):
+ try:
+ while True:
+ self.requests.acquire()
+ data = next(self.source)
+ data = self.transform(data)
+ state = self._getstate()
+ self.queue.put((data, state))
+ except Exception as e:
+ self.queue.put(str(e))
+ raise
+
+ def _getstate(self):
+ if hasattr(self.source, 'save'):
+ return self.source.save()
+ else:
+ return None
+
+
+class Consec(base.Stream):
+
+ """
+ Example:
+
+ length = 3
+ consec = 3
+ prefix = 2
+
+ source: 0 1 2 3 4 5 6 7 8 9 10
+ chunk 1: p-p-#-#-#
+ chunk 2: p-p-#-#-#
+ chunk 3: p-p-#-#-#
+ """
+
+ def __init__(
+ self, source, length, consec, prefix=0, strict=True, contiguous=False):
+ self.source = source
+ self.length = length
+ self.consec = consec
+ self.prefix = prefix
+ self.strict = strict
+ self.contiguous = contiguous
+ self.index = 0
+ self.current = None
+ self.it = None
+
+ def __iter__(self):
+ self.it = iter(self.source)
+ return self
+
+ def __next__(self):
+ if self.index >= self.consec:
+ self.index = 0
+ if self.index == 0:
+ self.current = next(self.it)
+ available = self.current['is_first'].shape[-1]
+ assert self.length * self.consec + self.prefix <= available, (
+ self.length, self.consec, self.prefix, available)
+ if self.strict:
+ assert self.consec * self.length + self.prefix == available, (
+ self.consec, self.length, self.prefix, available)
+ start = self.index * self.length
+ stop = start + (self.length + self.prefix)
+ chunk = {k: v[:, start: stop] for k, v in self.current.items()}
+ chunk['consec'] = np.full(chunk['is_first'].shape, self.index, np.int32)
+ if self.contiguous:
+ # This is expensive but can speed up following operations, such as
+ # sending arrays via networking.
+ chunk = {k: np.ascontiguousarray(v) for k, v in chunk.items()}
+ self.index += 1
+ return chunk
+
+ def save(self):
+ return {
+ 'source': self.source.save(),
+ 'index': self.index,
+ }
+
+ def load(self, data):
+ self.source.load(data['source'])
+ self.index = data['index']
+
+
+class Zip(base.Stream):
+
+ def __init__(self, sources):
+ assert len(sources) > 1, len(sources)
+ self.sources = sources
+ self.iterators = None
+ self.started = False
+
+ def __iter__(self):
+ assert not self.started
+ self.started = True
+ self.iterators = [iter(x) for x in self.sources]
+ return self
+
+ def __next__(self):
+ parts = [next(x) for x in self.iterators]
+ result = elements.tree.map(lambda *el: np.concatenate(el), *parts)
+ return result
+
+ def save(self):
+ return [x.save() for x in self.iterators]
+
+ def load(self, data):
+ assert len(data) == len(self.iterators)
+ [it.load(d) for it, d in zip(self.iterators, data)]
+
+
+class Map(base.Stream):
+
+ def __init__(self, source, fn, *args, **kwargs):
+ self.source = source
+ self.fn = lambda x: fn(x, *args, **kwargs)
+ self.iterator = None
+ self.started = False
+
+ def __iter__(self):
+ assert not self.started
+ self.started = True
+ self.iterator = iter(self.source)
+ return self
+
+ def __next__(self):
+ assert self.started
+ return self.fn(next(self.iterator))
+
+ def save(self):
+ return self.iterator.save()
+
+ def load(self, data):
+ self.iterator.load(data)
+
+
+class Mixer(base.Stream):
+
+ def __init__(self, sources, weights, seed=0):
+ assert sources.keys() == weights.keys(), (sources, weights)
+ self.keys = sorted(sources.keys())
+ self.iterators = [iter(sources[k]) for k in self.keys]
+ weights = np.array([weights[k] for k in self.keys], np.float32)
+ self.probs = weights / weights.sum()
+ self.seed = seed
+ self.started = False
+ self.step = 0
+
+ def __iter__(self):
+ assert not self.started
+ return self
+
+ def __next__(self):
+ assert self.started
+ rng = np.ranodm.default_rng(seed=[self.seed, self.step])
+ self.step += 1
+ index = rng.choice(len(self.keys), p=self.probs)
+ return next(self.iterators[index])
+
+ def save(self):
+ return {
+ 'step': self.step,
+ 'seed': self.seed,
+ 'sources': {k: it.save() for k, it in zip(self.keys, self.iterators)},
+ }
+
+ def load(self, data):
+ self.step = data['step']
+ self.seed = data['seed']
+ assert sorted(data['sources'].keys()) == self.keys, (
+ data['sources'], self.keys)
+ for key in self.keys:
+ self.iterators[key].load(data['sources'][key])
diff --git a/models/embodied/core/wrappers.py b/models/embodied/core/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..63a783253ee5009e29fc2012d52f1ae5b1a4e5e6
--- /dev/null
+++ b/models/embodied/core/wrappers.py
@@ -0,0 +1,418 @@
+import functools
+import time
+
+import elements
+import numpy as np
+
+
+class Wrapper:
+
+ def __init__(self, env):
+ self.env = env
+
+ def __len__(self):
+ return len(self.env)
+
+ def __bool__(self):
+ return bool(self.env)
+
+ def __getattr__(self, name):
+ if name.startswith('__'):
+ raise AttributeError(name)
+ try:
+ return getattr(self.env, name)
+ except AttributeError:
+ raise ValueError(name)
+
+
+class TimeLimit(Wrapper):
+
+ def __init__(self, env, duration, reset=True):
+ super().__init__(env)
+ self._duration = duration
+ self._reset = reset
+ self._step = 0
+ self._done = False
+
+ def step(self, action):
+ if action['reset'] or self._done:
+ self._step = 0
+ self._done = False
+ if self._reset:
+ action.update(reset=True)
+ return self.env.step(action)
+ else:
+ action.update(reset=False)
+ obs = self.env.step(action)
+ obs['is_first'] = True
+ return obs
+ self._step += 1
+ obs = self.env.step(action)
+ if self._duration and self._step >= self._duration:
+ obs['is_last'] = True
+ self._done = obs['is_last']
+ return obs
+
+
+class ActionRepeat(Wrapper):
+
+ def __init__(self, env, repeat):
+ super().__init__(env)
+ self._repeat = repeat
+
+ def step(self, action):
+ if action['reset']:
+ return self.env.step(action)
+ reward = 0.0
+ for _ in range(self._repeat):
+ obs = self.env.step(action)
+ reward += obs['reward']
+ if obs['is_last'] or obs['is_terminal']:
+ break
+ obs['reward'] = np.float32(reward)
+ return obs
+
+
+class ClipAction(Wrapper):
+
+ def __init__(self, env, key='action', low=-1, high=1):
+ super().__init__(env)
+ self._key = key
+ self._low = low
+ self._high = high
+
+ def step(self, action):
+ clipped = np.clip(action[self._key], self._low, self._high)
+ return self.env.step({**action, self._key: clipped})
+
+
+class NormalizeAction(Wrapper):
+
+ def __init__(self, env, key='action'):
+ super().__init__(env)
+ self._key = key
+ self._space = env.act_space[key]
+ self._mask = np.isfinite(self._space.low) & np.isfinite(self._space.high)
+ self._low = np.where(self._mask, self._space.low, -1)
+ self._high = np.where(self._mask, self._space.high, 1)
+
+ @functools.cached_property
+ def act_space(self):
+ low = np.where(self._mask, -np.ones_like(self._low), self._low)
+ high = np.where(self._mask, np.ones_like(self._low), self._high)
+ space = elements.Space(np.float32, self._space.shape, low, high)
+ return {**self.env.act_space, self._key: space}
+
+ def step(self, action):
+ orig = (action[self._key] + 1) / 2 * (self._high - self._low) + self._low
+ orig = np.where(self._mask, orig, action[self._key])
+ return self.env.step({**action, self._key: orig})
+
+
+# class ExpandScalars(Wrapper):
+#
+# def __init__(self, env):
+# super().__init__(env)
+# self._obs_expanded = []
+# self._obs_space = {}
+# for key, space in self.env.obs_space.items():
+# if space.shape == () and key != 'reward' and not space.discrete:
+# space = elements.Space(space.dtype, (1,), space.low, space.high)
+# self._obs_expanded.append(key)
+# self._obs_space[key] = space
+# self._act_expanded = []
+# self._act_space = {}
+# for key, space in self.env.act_space.items():
+# if space.shape == () and not space.discrete:
+# space = elements.Space(space.dtype, (1,), space.low, space.high)
+# self._act_expanded.append(key)
+# self._act_space[key] = space
+#
+# @functools.cached_property
+# def obs_space(self):
+# return self._obs_space
+#
+# @functools.cached_property
+# def act_space(self):
+# return self._act_space
+#
+# def step(self, action):
+# action = {
+# key: np.squeeze(value, 0) if key in self._act_expanded else value
+# for key, value in action.items()}
+# obs = self.env.step(action)
+# obs = {
+# key: np.expand_dims(value, 0) if key in self._obs_expanded else value
+# for key, value in obs.items()}
+# return obs
+#
+#
+# class FlattenTwoDimObs(Wrapper):
+#
+# def __init__(self, env):
+# super().__init__(env)
+# self._keys = []
+# self._obs_space = {}
+# for key, space in self.env.obs_space.items():
+# if len(space.shape) == 2:
+# space = elements.Space(
+# space.dtype,
+# (int(np.prod(space.shape)),),
+# space.low.flatten(),
+# space.high.flatten())
+# self._keys.append(key)
+# self._obs_space[key] = space
+#
+# @functools.cached_property
+# def obs_space(self):
+# return self._obs_space
+#
+# def step(self, action):
+# obs = self.env.step(action).copy()
+# for key in self._keys:
+# obs[key] = obs[key].flatten()
+# return obs
+#
+#
+# class FlattenTwoDimActions(Wrapper):
+#
+# def __init__(self, env):
+# super().__init__(env)
+# self._origs = {}
+# self._act_space = {}
+# for key, space in self.env.act_space.items():
+# if len(space.shape) == 2:
+# space = elements.Space(
+# space.dtype,
+# (int(np.prod(space.shape)),),
+# space.low.flatten(),
+# space.high.flatten())
+# self._origs[key] = space.shape
+# self._act_space[key] = space
+#
+# @functools.cached_property
+# def act_space(self):
+# return self._act_space
+#
+# def step(self, action):
+# action = action.copy()
+# for key, shape in self._origs.items():
+# action[key] = action[key].reshape(shape)
+# return self.env.step(action)
+
+
+class UnifyDtypes(Wrapper):
+
+ def __init__(self, env):
+ super().__init__(env)
+ self._obs_space, _, self._obs_outer = self._convert(env.obs_space)
+ self._act_space, self._act_inner, _ = self._convert(env.act_space)
+
+ @property
+ def obs_space(self):
+ return self._obs_space
+
+ @property
+ def act_space(self):
+ return self._act_space
+
+ def step(self, action):
+ action = action.copy()
+ for key, dtype in self._act_inner.items():
+ action[key] = np.asarray(action[key], dtype)
+ obs = self.env.step(action)
+ for key, dtype in self._obs_outer.items():
+ obs[key] = np.asarray(obs[key], dtype)
+ return obs
+
+ def _convert(self, spaces):
+ results, befores, afters = {}, {}, {}
+ for key, space in spaces.items():
+ before = after = space.dtype
+ if np.issubdtype(before, np.floating):
+ after = np.float32
+ elif np.issubdtype(before, np.uint8):
+ after = np.uint8
+ elif np.issubdtype(before, np.integer):
+ after = np.int32
+ befores[key] = before
+ afters[key] = after
+ results[key] = elements.Space(after, space.shape, space.low, space.high)
+ return results, befores, afters
+
+
+class CheckSpaces(Wrapper):
+
+ def __init__(self, env):
+ assert not (env.obs_space.keys() & env.act_space.keys()), (
+ env.obs_space.keys(), env.act_space.keys())
+ super().__init__(env)
+
+ def step(self, action):
+ for key, value in action.items():
+ self._check(value, self.env.act_space[key], key)
+ obs = self.env.step(action)
+ for key, value in obs.items():
+ self._check(value, self.env.obs_space[key], key)
+ return obs
+
+ def _check(self, value, space, key):
+ if not isinstance(value, (
+ np.ndarray, np.generic, list, tuple, int, float, bool)):
+ raise TypeError(f'Invalid type {type(value)} for key {key}.')
+ if value in space:
+ return
+ dtype = np.array(value).dtype
+ shape = np.array(value).shape
+ lowest, highest = np.min(value), np.max(value)
+ raise ValueError(
+ f"Value for '{key}' with dtype {dtype}, shape {shape}, "
+ f"lowest {lowest}, highest {highest} is not in {space}.")
+
+
+class DiscretizeAction(Wrapper):
+
+ def __init__(self, env, key='action', bins=5):
+ super().__init__(env)
+ self._dims = np.squeeze(env.act_space[key].shape, 0).item()
+ self._values = np.linspace(-1, 1, bins)
+ self._key = key
+
+ @functools.cached_property
+ def act_space(self):
+ space = elements.Space(np.int32, self._dims, 0, len(self._values))
+ return {**self.env.act_space, self._key: space}
+
+ def step(self, action):
+ continuous = np.take(self._values, action[self._key])
+ return self.env.step({**action, self._key: continuous})
+
+
+class ResizeImage(Wrapper):
+
+ def __init__(self, env, size=(64, 64)):
+ super().__init__(env)
+ self._size = size
+ self._keys = [
+ k for k, v in env.obs_space.items()
+ if len(v.shape) > 1 and v.shape[:2] != size]
+ print(f'Resizing keys {",".join(self._keys)} to {self._size}.')
+ if self._keys:
+ from PIL import Image
+ self._Image = Image
+
+ @functools.cached_property
+ def obs_space(self):
+ spaces = self.env.obs_space
+ for key in self._keys:
+ shape = self._size + spaces[key].shape[2:]
+ spaces[key] = elements.Space(np.uint8, shape)
+ return spaces
+
+ def step(self, action):
+ obs = self.env.step(action)
+ for key in self._keys:
+ obs[key] = self._resize(obs[key])
+ return obs
+
+ def _resize(self, image):
+ image = self._Image.fromarray(image)
+ image = image.resize(self._size, self._Image.NEAREST)
+ image = np.array(image)
+ return image
+
+
+# class RenderImage(Wrapper):
+#
+# def __init__(self, env, key='image'):
+# super().__init__(env)
+# self._key = key
+# self._shape = self.env.render().shape
+#
+# @functools.cached_property
+# def obs_space(self):
+# spaces = self.env.obs_space
+# spaces[self._key] = elements.Space(np.uint8, self._shape)
+# return spaces
+#
+# def step(self, action):
+# obs = self.env.step(action)
+# obs[self._key] = self.env.render()
+# return obs
+
+
+class BackwardReturn(Wrapper):
+
+ def __init__(self, env, horizon):
+ super().__init__(env)
+ self._discount = 1 - 1 / horizon
+ self._bwreturn = 0.0
+
+ @functools.cached_property
+ def obs_space(self):
+ return {
+ **self.env.obs_space,
+ 'bwreturn': elements.Space(np.float32),
+ }
+
+ def step(self, action):
+ obs = self.env.step(action)
+ self._bwreturn *= (1 - obs['is_first']) * self._discount
+ self._bwreturn += obs['reward']
+ obs['bwreturn'] = np.float32(self._bwreturn)
+ return obs
+
+
+class AddObs(Wrapper):
+
+ def __init__(self, env, key, value, space):
+ super().__init__(env)
+ self._key = key
+ self._value = value
+ self._space = space
+
+ @functools.cached_property
+ def obs_space(self):
+ return {
+ **self.env.obs_space,
+ self._key: self._space,
+ }
+
+ def step(self, action):
+ obs = self.env.step(action)
+ obs[self._key] = self._value
+ return obs
+
+
+class RestartOnException(Wrapper):
+
+ def __init__(
+ self, ctor, exceptions=(Exception,), window=300, maxfails=2, wait=20):
+ if not isinstance(exceptions, (tuple, list)):
+ exceptions = [exceptions]
+ self._ctor = ctor
+ self._exceptions = tuple(exceptions)
+ self._window = window
+ self._maxfails = maxfails
+ self._wait = wait
+ self._last = time.time()
+ self._fails = 0
+ super().__init__(self._ctor())
+
+ def step(self, action):
+ try:
+ return self.env.step(action)
+ except self._exceptions as e:
+ if time.time() > self._last + self._window:
+ self._last = time.time()
+ self._fails = 1
+ else:
+ self._fails += 1
+ if self._fails > self._maxfails:
+ raise RuntimeError('The env crashed too many times.')
+ message = f'Restarting env after crash with {type(e).__name__}: {e}'
+ print(message, flush=True)
+ time.sleep(self._wait)
+ self.env = self._ctor()
+ action['reset'] = np.ones_like(action['reset'])
+ return self.env.step(action)
diff --git a/models/embodied/envs/atari.py b/models/embodied/envs/atari.py
new file mode 100644
index 0000000000000000000000000000000000000000..d40a56f67274f7277ec4f66cf15128299be51160
--- /dev/null
+++ b/models/embodied/envs/atari.py
@@ -0,0 +1,177 @@
+import os
+import threading
+import collections
+
+import ale_py
+import ale_py.roms as roms
+import elements
+import embodied
+import numpy as np
+
+from PIL import Image
+
+
+class Atari(embodied.Env):
+
+ LOCK = threading.Lock()
+ WEIGHTS = np.array([0.299, 0.587, 1 - (0.299 + 0.587)])
+ ACTION_MEANING = (
+ 'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',
+ 'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',
+ 'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE')
+
+ def __init__(
+ self, name, repeat=4, size=(84, 84), gray=True, noops=0, lives='unused',
+ sticky=True, actions='all', length=108000, pooling=2, aggregate='max',
+ resize='pillow', autostart=False, clip_reward=False, seed=None):
+
+ assert lives in ('unused', 'discount', 'reset'), lives
+ assert actions in ('all', 'needed'), actions
+ assert resize in ('opencv', 'pillow'), resize
+ assert aggregate in ('max', 'mean'), aggregate
+ assert pooling >= 1, pooling
+ assert repeat >= 1, repeat
+ if name == 'james_bond':
+ name = 'jamesbond'
+
+ self.repeat = repeat
+ self.size = size
+ self.gray = gray
+ self.noops = noops
+ self.lives = lives
+ self.sticky = sticky
+ self.length = length
+ self.pooling = pooling
+ self.aggregate = aggregate
+ self.resize = resize
+ self.autostart = autostart
+ self.clip_reward = clip_reward
+ self.rng = np.random.default_rng(seed)
+
+ with self.LOCK:
+ self.ale = ale_py.ALEInterface()
+ self.ale.setLoggerMode(ale_py.LoggerMode.Error)
+ self.ale.setInt(b'random_seed', self.rng.integers(0, 2 ** 31))
+ path = os.environ.get('ALE_ROM_PATH', None)
+ if path:
+ self.ale.loadROM(os.path.join(path, f'{name}.bin'))
+ else:
+ self.ale.loadROM(roms.get_rom_path(name))
+
+ self.ale.setFloat('repeat_action_probability', 0.25 if sticky else 0.0)
+ self.actionset = {
+ 'all': self.ale.getLegalActionSet,
+ 'needed': self.ale.getMinimalActionSet,
+ }[actions]()
+
+ W, H = self.ale.getScreenDims()
+ self.buffers = collections.deque(
+ [np.zeros((W, H, 3), np.uint8) for _ in range(self.pooling)],
+ maxlen=self.pooling)
+ self.prevlives = None
+ self.duration = None
+ self.done = True
+
+ @property
+ def obs_space(self):
+ return {
+ 'image': elements.Space(np.uint8, (*self.size, 1 if self.gray else 3)),
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+
+ @property
+ def act_space(self):
+ return {
+ 'action': elements.Space(np.int32, (), 0, len(self.actionset)),
+ 'reset': elements.Space(bool),
+ }
+
+ def step(self, action):
+ if action['reset'] or self.done:
+ self._reset()
+ self.prevlives = self.ale.lives()
+ self.duration = 0
+ self.done = False
+ return self._obs(0.0, is_first=True)
+ reward = 0.0
+ terminal = False
+ last = False
+ assert 0 <= action['action'] < len(self.actionset), action['action']
+ act = self.actionset[action['action']]
+ for repeat in range(self.repeat):
+ reward += self.ale.act(act)
+ self.duration += 1
+ if repeat >= self.repeat - self.pooling:
+ self._render()
+ if self.ale.game_over():
+ terminal = True
+ last = True
+ if self.duration >= self.length:
+ last = True
+ lives = self.ale.lives()
+ if self.lives == 'discount' and 0 < lives < self.prevlives:
+ terminal = True
+ if self.lives == 'reset' and 0 < lives < self.prevlives:
+ terminal = True
+ last = True
+ self.prevlives = lives
+ if terminal or last:
+ break
+ self.done = last
+ obs = self._obs(reward, is_last=last, is_terminal=terminal)
+ return obs
+
+ def _reset(self):
+ with self.LOCK:
+ self.ale.reset_game()
+ for _ in range(self.rng.integers(self.noops + 1)):
+ self.ale.act(self.ACTION_MEANING.index('NOOP'))
+ if self.ale.game_over():
+ with self.LOCK:
+ self.ale.reset_game()
+ if self.autostart and self.ACTION_MEANING.index('FIRE') in self.actionset:
+ self.ale.act(self.ACTION_MEANING.index('FIRE'))
+ if self.ale.game_over():
+ with self.LOCK:
+ self.ale.reset_game()
+ self.ale.act(self.ACTION_MEANING.index('UP'))
+ if self.ale.game_over():
+ with self.LOCK:
+ self.ale.reset_game()
+ self._render()
+ for i, dst in enumerate(self.buffers):
+ if i > 0:
+ np.copyto(self.buffers[0], dst)
+
+ def _render(self, reset=False):
+ self.buffers.appendleft(self.buffers.pop())
+ self.ale.getScreenRGB(self.buffers[0])
+
+ def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
+ if self.clip_reward:
+ reward = np.sign(reward)
+ if self.aggregate == 'max':
+ image = np.amax(self.buffers, 0)
+ elif self.aggregate == 'mean':
+ image = np.mean(self.buffers, 0).astype(np.uint8)
+ if self.resize == 'opencv':
+ import cv2
+ image = cv2.resize(image, self.size, interpolation=cv2.INTER_AREA)
+ elif self.resize == 'pillow':
+ image = Image.fromarray(image)
+ image = image.resize(self.size, Image.BILINEAR)
+ image = np.array(image)
+ if self.gray:
+ # Averaging channels equally would not work. For example, a fully red
+ # object on a fully green background would average to the same color.
+ image = (image * self.WEIGHTS).sum(-1).astype(image.dtype)[:, :, None]
+ return dict(
+ image=image,
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_last,
+ )
diff --git a/models/embodied/envs/bsuite.py b/models/embodied/envs/bsuite.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4e31043756627dc3267254a87729aace517d25a
--- /dev/null
+++ b/models/embodied/envs/bsuite.py
@@ -0,0 +1,51 @@
+import time
+
+import embodied
+import numpy as np
+
+
+class BSuite(embodied.Env):
+
+ def __init__(self, task):
+ print(
+ 'Warning: BSuite result logging is stateful and therefore training ' +
+ 'runs cannot be interrupted or restarted.')
+ np.int = int # Patch deprecated Numpy alias used inside BSuite.
+ from . import from_dm
+ if '/' not in task:
+ task = f'{task}/0'
+ import bsuite
+ env = bsuite.from_checkpoint_id(task)
+ self.num_episodes = 0
+ self.max_episodes = env.bsuite_num_episodes
+ self.exit_after = None
+ env = from_dm.FromDM(env)
+ env = embodied.wrappers.ForceDtypes(env)
+ env = embodied.wrappers.FlattenTwoDimObs(env)
+ self.env = env
+
+ @property
+ def obs_space(self):
+ return self.env.obs_space
+
+ @property
+ def act_space(self):
+ return self.env.act_space
+
+ def step(self, action):
+ obs = self.env.step(action)
+ if obs['is_last']:
+ self.num_episodes += 1
+ if self.num_episodes >= self.max_episodes:
+ # After reaching the target number of episodes, continue running for 10
+ # minutes to make sure logs are flushed and then raise an exception to
+ # terminate the program.
+ if not self.exit_after:
+ self.exit_after = time.time() + 600
+ if time.time() > self.exit_after:
+ if self.xm:
+ wu = self.xm.get_current_work_unit()
+ wu.stop(mark_as_completed=True, message='BSuite run complete')
+ else:
+ raise RuntimeError('BSuite run complete')
+ return obs
diff --git a/models/embodied/envs/crafter.py b/models/embodied/envs/crafter.py
new file mode 100644
index 0000000000000000000000000000000000000000..affdc8b52055a00a0849023d1f2f7aafe79c127f
--- /dev/null
+++ b/models/embodied/envs/crafter.py
@@ -0,0 +1,93 @@
+import json
+
+import crafter
+import elements
+import embodied
+import numpy as np
+
+
+class Crafter(embodied.Env):
+
+ def __init__(self, task, size=(64, 64), logs=False, logdir=None, seed=None):
+ assert task in ('reward', 'noreward')
+ self._env = crafter.Env(size=size, reward=(task == 'reward'), seed=seed)
+ self._logs = logs
+ self._logdir = logdir and elements.Path(logdir)
+ self._logdir and self._logdir.mkdir()
+ self._episode = 0
+ self._length = None
+ self._reward = None
+ self._achievements = crafter.constants.achievements.copy()
+ self._done = True
+
+ @property
+ def obs_space(self):
+ spaces = {
+ 'image': elements.Space(np.uint8, self._env.observation_space.shape),
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ 'log/reward': elements.Space(np.float32),
+ }
+ if self._logs:
+ spaces.update({
+ f'log/achievement_{k}': elements.Space(np.int32)
+ for k in self._achievements})
+ return spaces
+
+ @property
+ def act_space(self):
+ return {
+ 'action': elements.Space(np.int32, (), 0, self._env.action_space.n),
+ 'reset': elements.Space(bool),
+ }
+
+ def step(self, action):
+ if action['reset'] or self._done:
+ self._episode += 1
+ self._length = 0
+ self._reward = 0
+ self._done = False
+ image = self._env.reset()
+ return self._obs(image, 0.0, {}, is_first=True)
+ image, reward, self._done, info = self._env.step(action['action'])
+ self._reward += reward
+ self._length += 1
+ if self._done and self._logdir:
+ self._write_stats(self._length, self._reward, info)
+ return self._obs(
+ image, reward, info,
+ is_last=self._done,
+ is_terminal=info['discount'] == 0)
+
+ def _obs(
+ self, image, reward, info,
+ is_first=False, is_last=False, is_terminal=False):
+ obs = dict(
+ image=image,
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_terminal,
+ **{'log/reward': np.float32(info['reward'] if info else 0.0)},
+ )
+ if self._logs:
+ log_achievements = {
+ f'log/achievement_{k}': info['achievements'][k] if info else 0
+ for k in self._achievements}
+ obs.update({k: np.int32(v) for k, v in log_achievements.items()})
+ return obs
+
+ def _write_stats(self, length, reward, info):
+ stats = {
+ 'episode': self._episode,
+ 'length': length,
+ 'reward': round(reward, 1),
+ **{f'achievement_{k}': v for k, v in info['achievements'].items()},
+ }
+ filename = self._logdir / 'stats.jsonl'
+ lines = filename.read() if filename.exists() else ''
+ lines += json.dumps(stats) + '\n'
+ filename.write(lines, mode='w')
+ print(f'Wrote stats: {filename}')
diff --git a/models/embodied/envs/dmc.py b/models/embodied/envs/dmc.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a6ec3991cf7b83c90a9e35b524a13fc3b31474f
--- /dev/null
+++ b/models/embodied/envs/dmc.py
@@ -0,0 +1,76 @@
+import functools
+import os
+
+import elements
+import embodied
+import numpy as np
+from dm_control import manipulation
+from dm_control import suite
+from dm_control.locomotion.examples import basic_rodent_2020
+
+from . import from_dm
+
+
+class DMC(embodied.Env):
+
+ DEFAULT_CAMERAS = dict(
+ quadruped=2,
+ rodent=4,
+ )
+
+ def __init__(
+ self, env, repeat=1, size=(64, 64), proprio=True, image=True, camera=-1):
+ if 'MUJOCO_GL' not in os.environ:
+ os.environ['MUJOCO_GL'] = 'egl'
+ if isinstance(env, str):
+ domain, task = env.split('_', 1)
+ if camera == -1:
+ camera = self.DEFAULT_CAMERAS.get(domain, 0)
+ if domain == 'cup': # Only domain with multiple words.
+ domain = 'ball_in_cup'
+ if domain == 'manip':
+ env = manipulation.load(task + '_vision')
+ elif domain == 'rodent':
+ # camera 0: topdown map
+ # camera 2: shoulder
+ # camera 4: topdown tracking
+ # camera 5: eyes
+ env = getattr(basic_rodent_2020, task)()
+ else:
+ env = suite.load(domain, task)
+ self._dmenv = env
+ self._env = from_dm.FromDM(self._dmenv)
+ self._env = embodied.wrappers.ActionRepeat(self._env, repeat)
+ self._size = size
+ self._proprio = proprio
+ self._image = image
+ self._camera = camera
+
+ @functools.cached_property
+ def obs_space(self):
+ basic = ('is_first', 'is_last', 'is_terminal', 'reward')
+ spaces = self._env.obs_space.copy()
+ if not self._proprio:
+ spaces = {k: spaces[k] for k in basic}
+ key = 'image' if self._image else 'log/image'
+ spaces[key] = elements.Space(np.uint8, self._size + (3,))
+ return spaces
+
+ @functools.cached_property
+ def act_space(self):
+ return self._env.act_space
+
+ def step(self, action):
+ for key, space in self.act_space.items():
+ if not space.discrete:
+ assert np.isfinite(action[key]).all(), (key, action[key])
+ obs = self._env.step(action)
+ basic = ('is_first', 'is_last', 'is_terminal', 'reward')
+ if not self._proprio:
+ obs = {k: obs[k] for k in basic}
+ key = 'image' if self._image else 'log/image'
+ obs[key] = self._dmenv.physics.render(*self._size, camera_id=self._camera)
+ for key, space in self.obs_space.items():
+ if np.issubdtype(space.dtype, np.floating):
+ assert np.isfinite(obs[key]).all(), (key, obs[key])
+ return obs
diff --git a/models/embodied/envs/dmlab.py b/models/embodied/envs/dmlab.py
new file mode 100644
index 0000000000000000000000000000000000000000..f46dff53b717d5b13a5b79df65b3a7331a605cc5
--- /dev/null
+++ b/models/embodied/envs/dmlab.py
@@ -0,0 +1,147 @@
+import functools
+import re
+import zlib
+
+import deepmind_lab
+import elements
+import embodied
+import numpy as np
+
+
+class DMLab(embodied.Env):
+
+ TOKENIZER = re.compile(r'([A-Za-z_]+|[^A-Za-z_ ]+)')
+
+ def __init__(
+ self, level, repeat=4, size=(64, 64), mode='train',
+ actions='popart', episodic=True, text=None, seed=None):
+ if level == 'goals': # Shortcut for convenience
+ level = 'dmlab_explore_goal_locations_small'
+ self._size = size
+ self._repeat = repeat
+ self._actions = {
+ 'impala': IMPALA_ACTION_SET,
+ 'popart': POPART_ACTION_SET,
+ }[actions]
+ if text is None:
+ text = bool(level.startswith('language'))
+ self._episodic = episodic
+ self._text = text
+ self._random = np.random.RandomState(seed)
+ config = dict(height=size[0], width=size[1], logLevel='WARN')
+ if mode == 'train':
+ if level.endswith('_test'):
+ level = level.replace('_test', '_train')
+ elif mode == 'eval':
+ config.update(allowHoldOutLevels='true', mixerSeed=0x600D5EED)
+ else:
+ raise NotImplementedError(mode)
+ config = {k: str(v) for k, v in config.items()}
+ obs = ['RGB_INTERLEAVED', 'INSTR'] if text else ['RGB_INTERLEAVED']
+ self._env = deepmind_lab.Lab(
+ level='contributed/dmlab30/' + level,
+ observations=obs, config=config)
+ self._current_image = None
+ if self._text:
+ self._current_instr = None
+ self._instr_length = 32
+ self._embed_size = 32
+ self._vocab_buckets = 64 * 1024
+ self._embeddings = np.random.default_rng(seed=0).normal(
+ 0.0, 1.0, (self._vocab_buckets, self._embed_size)).astype(np.float32)
+ self._done = True
+
+ @property
+ def obs_space(self):
+ spaces = {
+ 'image': elements.Space(np.uint8, self._size + (3,)),
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+ if self._text:
+ spaces['instr'] = elements.Space(
+ np.float32, self._instr_length * self._embed_size)
+ return spaces
+
+ @property
+ def act_space(self):
+ return {
+ 'action': elements.Space(np.int32, (), 0, len(self._actions)),
+ 'reset': elements.Space(bool),
+ }
+
+ def step(self, action):
+ if action['reset'] or self._done:
+ self._env.reset(seed=self._random.randint(0, 2 ** 31 - 1))
+ self._done = False
+ return self._obs(0.0, is_first=True)
+ raw_action = np.array(self._actions[action['action']], np.intc)
+ reward = self._env.step(raw_action, num_steps=self._repeat)
+ self._done = not self._env.is_running()
+ return self._obs(reward, is_last=self._done)
+
+ def _obs(self, reward, is_first=False, is_last=False):
+ if not self._done:
+ self._current_image = self._env.observations()['RGB_INTERLEAVED']
+ if self._text:
+ self._current_instr = self._embed(self._env.observations()['INSTR'])
+ obs = dict(
+ image=self._current_image,
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_last if self._episodic else False,
+ )
+ if self._text:
+ obs['instr'] = self._current_instr
+ return obs
+
+ def _embed(self, text):
+ tokens = self.TOKENIZER.findall(text.lower())
+ indices = [self._hash(token) for token in tokens]
+ # print('EMBED', text, '->', tokens, '->', indices)
+ indices = indices + [0] * (self._instr_length - len(indices))
+ embeddings = [self._embeddings[i] for i in indices]
+ return np.concatenate(embeddings)
+
+ @functools.cache
+ def _hash(self, token):
+ return zlib.crc32(token.encode('utf-8')) % self._vocab_buckets
+
+ def close(self):
+ self._env.close()
+
+
+# Small action set used by IMPALA.
+IMPALA_ACTION_SET = (
+ ( 0, 0, 0, 1, 0, 0, 0), # Forward
+ ( 0, 0, 0, -1, 0, 0, 0), # Backward
+ ( 0, 0, -1, 0, 0, 0, 0), # Strafe Left
+ ( 0, 0, 1, 0, 0, 0, 0), # Strafe Right
+ (-20, 0, 0, 0, 0, 0, 0), # Look Left
+ ( 20, 0, 0, 0, 0, 0, 0), # Look Right
+ (-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
+ ( 20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
+ ( 0, 0, 0, 0, 1, 0, 0), # Fire
+)
+
+# Large action set used by PopArt and R2D2.
+POPART_ACTION_SET = [
+ ( 0, 0, 0, 1, 0, 0, 0), # FW
+ ( 0, 0, 0, -1, 0, 0, 0), # BW
+ ( 0, 0, -1, 0, 0, 0, 0), # Strafe Left
+ ( 0, 0, 1, 0, 0, 0, 0), # Strafe Right
+ (-10, 0, 0, 0, 0, 0, 0), # Small LL
+ ( 10, 0, 0, 0, 0, 0, 0), # Small LR
+ (-60, 0, 0, 0, 0, 0, 0), # Large LL
+ ( 60, 0, 0, 0, 0, 0, 0), # Large LR
+ ( 0, 10, 0, 0, 0, 0, 0), # Look Down
+ ( 0, -10, 0, 0, 0, 0, 0), # Look Up
+ (-10, 0, 0, 1, 0, 0, 0), # FW + Small LL
+ ( 10, 0, 0, 1, 0, 0, 0), # FW + Small LR
+ (-60, 0, 0, 1, 0, 0, 0), # FW + Large LL
+ ( 60, 0, 0, 1, 0, 0, 0), # FW + Large LR
+ ( 0, 0, 0, 0, 1, 0, 0), # Fire
+]
diff --git a/models/embodied/envs/dummy.py b/models/embodied/envs/dummy.py
new file mode 100644
index 0000000000000000000000000000000000000000..2875f98ff8aadf0bac3f90c273795d5d78b738fa
--- /dev/null
+++ b/models/embodied/envs/dummy.py
@@ -0,0 +1,59 @@
+import elements
+import embodied
+import numpy as np
+
+
+class Dummy(embodied.Env):
+
+ def __init__(self, task, size=(64, 64), length=100):
+ del task
+ self.size = size
+ self.length = length
+ self.count = 0
+ self.done = False
+
+ @property
+ def obs_space(self):
+ return {
+ 'image': elements.Space(np.uint8, self.size + (3,)),
+ 'vector': elements.Space(np.float32, (7,)),
+ 'token': elements.Space(np.int32, (), 0, 256),
+ 'count': elements.Space(np.float32, (), 0, self.length),
+ 'float2d': elements.Space(np.float32, (4, 5)),
+ 'int2d': elements.Space(np.int32, (2, 3), 0, 4),
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+
+ @property
+ def act_space(self):
+ return {
+ 'reset': elements.Space(bool),
+ 'act_disc': elements.Space(np.int32, (), 0, 5),
+ 'act_cont': elements.Space(np.float32, (6,)),
+ }
+
+ def step(self, action):
+ if action.pop('reset') or self.done:
+ self.count = 0
+ self.done = False
+ return self._obs(0, is_first=True)
+ self.count += 1
+ self.done = (self.count >= self.length)
+ return self._obs(1, is_last=self.done, is_terminal=self.done)
+
+ def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
+ return dict(
+ image=np.full(self.size + (3,), 255, np.uint8),
+ vector=np.zeros(7, np.float32),
+ token=np.zeros((), np.int32),
+ count=np.float32(self.count),
+ float2d=np.ones((4, 5), np.float32),
+ int2d=np.ones((2, 3), np.int32),
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_terminal,
+ )
diff --git a/models/embodied/envs/from_dm.py b/models/embodied/envs/from_dm.py
new file mode 100644
index 0000000000000000000000000000000000000000..226ab1baa6f67b0d5f805e42623715a6df413bd9
--- /dev/null
+++ b/models/embodied/envs/from_dm.py
@@ -0,0 +1,89 @@
+import functools
+
+import elements
+import embodied
+import numpy as np
+
+
+class FromDM(embodied.Env):
+
+ def __init__(self, env, obs_key='observation', act_key='action'):
+ self._env = env
+ obs_spec = self._env.observation_spec()
+ act_spec = self._env.action_spec()
+ self._obs_dict = isinstance(obs_spec, dict)
+ self._act_dict = isinstance(act_spec, dict)
+ self._obs_key = not self._obs_dict and obs_key
+ self._act_key = not self._act_dict and act_key
+ self._obs_empty = []
+ self._done = True
+
+ @functools.cached_property
+ def obs_space(self):
+ spec = self._env.observation_spec()
+ spec = spec if self._obs_dict else {self._obs_key: spec}
+ if 'reward' in spec:
+ spec['obs_reward'] = spec.pop('reward')
+ for key, value in spec.copy().items():
+ if int(np.prod(value.shape)) == 0:
+ self._obs_empty.append(key)
+ del spec[key]
+ spaces = {
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+ for key, value in spec.items():
+ key = key.replace('/', '_')
+ spaces[key] = self._convert(value)
+ return spaces
+
+ @functools.cached_property
+ def act_space(self):
+ spec = self._env.action_spec()
+ spec = spec if self._act_dict else {self._act_key: spec}
+ return {
+ 'reset': elements.Space(bool),
+ **{k or self._act_key: self._convert(v) for k, v in spec.items()},
+ }
+
+ def step(self, action):
+ action = action.copy()
+ reset = action.pop('reset')
+ if reset or self._done:
+ time_step = self._env.reset()
+ else:
+ action = action if self._act_dict else action[self._act_key]
+ time_step = self._env.step(action)
+ self._done = time_step.last()
+ return self._obs(time_step)
+
+ def _obs(self, time_step):
+ if not time_step.first():
+ assert time_step.discount in (0, 1), time_step.discount
+ obs = time_step.observation
+ obs = dict(obs) if self._obs_dict else {self._obs_key: obs}
+ if 'reward' in obs:
+ obs['obs_reward'] = obs.pop('reward')
+ for key in self._obs_empty:
+ del obs[key]
+ obs = {k.replace('/', '_'): v for k, v in obs.items()}
+ return dict(
+ reward=np.float32(0.0 if time_step.first() else time_step.reward),
+ is_first=time_step.first(),
+ is_last=time_step.last(),
+ is_terminal=False if time_step.first() else time_step.discount == 0,
+ **obs,
+ )
+
+ def _convert(self, space):
+ if hasattr(space, 'num_values'):
+ return elements.Space(space.dtype, (), 0, space.num_values)
+ elif hasattr(space, 'minimum'):
+ assert np.isfinite(space.minimum).all(), space.minimum
+ assert np.isfinite(space.maximum).all(), space.maximum
+ return elements.Space(
+ space.dtype, space.shape, space.minimum, space.maximum)
+ else:
+ return elements.Space(space.dtype, space.shape, None, None)
diff --git a/models/embodied/envs/from_gym.py b/models/embodied/envs/from_gym.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1aef0486d41f72945d9e4b04f0ec76414e57b72
--- /dev/null
+++ b/models/embodied/envs/from_gym.py
@@ -0,0 +1,123 @@
+import functools
+
+import elements
+import embodied
+import gym
+import numpy as np
+
+
+class FromGym(embodied.Env):
+
+ def __init__(self, env, obs_key='image', act_key='action', **kwargs):
+ if isinstance(env, str):
+ self._env = gym.make(env, **kwargs)
+ else:
+ assert not kwargs, kwargs
+ self._env = env
+ self._obs_dict = hasattr(self._env.observation_space, 'spaces')
+ self._act_dict = hasattr(self._env.action_space, 'spaces')
+ self._obs_key = obs_key
+ self._act_key = act_key
+ self._done = True
+ self._info = None
+
+ @property
+ def env(self):
+ return self._env
+
+ @property
+ def info(self):
+ return self._info
+
+ @functools.cached_property
+ def obs_space(self):
+ if self._obs_dict:
+ spaces = self._flatten(self._env.observation_space.spaces)
+ else:
+ spaces = {self._obs_key: self._env.observation_space}
+ spaces = {k: self._convert(v) for k, v in spaces.items()}
+ return {
+ **spaces,
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+
+ @functools.cached_property
+ def act_space(self):
+ if self._act_dict:
+ spaces = self._flatten(self._env.action_space.spaces)
+ else:
+ spaces = {self._act_key: self._env.action_space}
+ spaces = {k: self._convert(v) for k, v in spaces.items()}
+ spaces['reset'] = elements.Space(bool)
+ return spaces
+
+ def step(self, action):
+ if action['reset'] or self._done:
+ self._done = False
+ obs = self._env.reset()
+ return self._obs(obs, 0.0, is_first=True)
+ if self._act_dict:
+ action = self._unflatten(action)
+ else:
+ action = action[self._act_key]
+ obs, reward, self._done, self._info = self._env.step(action)
+ return self._obs(
+ obs, reward,
+ is_last=bool(self._done),
+ is_terminal=bool(self._info.get('is_terminal', self._done)))
+
+ def _obs(
+ self, obs, reward, is_first=False, is_last=False, is_terminal=False):
+ if not self._obs_dict:
+ obs = {self._obs_key: obs}
+ obs = self._flatten(obs)
+ obs = {k: np.asarray(v) for k, v in obs.items()}
+ obs.update(
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_terminal)
+ return obs
+
+ def render(self):
+ image = self._env.render('rgb_array')
+ assert image is not None
+ return image
+
+ def close(self):
+ try:
+ self._env.close()
+ except Exception:
+ pass
+
+ def _flatten(self, nest, prefix=None):
+ result = {}
+ for key, value in nest.items():
+ key = prefix + '/' + key if prefix else key
+ if isinstance(value, gym.spaces.Dict):
+ value = value.spaces
+ if isinstance(value, dict):
+ result.update(self._flatten(value, key))
+ else:
+ result[key] = value
+ return result
+
+ def _unflatten(self, flat):
+ result = {}
+ for key, value in flat.items():
+ parts = key.split('/')
+ node = result
+ for part in parts[:-1]:
+ if part not in node:
+ node[part] = {}
+ node = node[part]
+ node[parts[-1]] = value
+ return result
+
+ def _convert(self, space):
+ if hasattr(space, 'n'):
+ return elements.Space(np.int32, (), 0, space.n)
+ return elements.Space(space.dtype, space.shape, space.low, space.high)
diff --git a/models/embodied/envs/loconav.py b/models/embodied/envs/loconav.py
new file mode 100644
index 0000000000000000000000000000000000000000..51572d77a95ab8e91620c70f63b74623fe793994
--- /dev/null
+++ b/models/embodied/envs/loconav.py
@@ -0,0 +1,228 @@
+import functools
+import os
+import warnings
+
+import elements
+import embodied
+import numpy as np
+
+
+class LocoNav(embodied.Env):
+
+ DEFAULT_CAMERAS = dict(
+ ant=4,
+ quadruped=5,
+ )
+
+ def __init__(
+ self, name, repeat=1, size=(64, 64), camera=-1, again=False,
+ termination=False, weaker=1.0):
+ if name.endswith('hz'):
+ name, freq = name.rsplit('_', 1)
+ freq = int(freq.strip('hz'))
+ else:
+ freq = 50
+ if 'MUJOCO_GL' not in os.environ:
+ os.environ['MUJOCO_GL'] = 'egl'
+ from dm_control import composer
+ from dm_control.locomotion.props import target_sphere
+ from dm_control.locomotion.tasks import random_goal_maze
+ walker, arena = name.split('_', 1)
+ if camera == -1:
+ camera = self.DEFAULT_CAMERAS.get(walker, 0)
+ self._walker = self._make_walker(walker)
+ arena = self._make_arena(arena)
+ target = target_sphere.TargetSphere(radius=1.2, height_above_ground=0.0)
+ task = random_goal_maze.RepeatSingleGoalMaze(
+ walker=self._walker, maze_arena=arena, target=target,
+ max_repeats=1000 if again else 1,
+ randomize_spawn_rotation=True,
+ target_reward_scale=1.0,
+ aliveness_threshold=-0.5 if termination else -1.0,
+ contact_termination=False,
+ physics_timestep=min(1 / freq / 4, 0.02),
+ control_timestep=1 / freq)
+ if not again:
+ def after_step(self, physics, random_state):
+ super(random_goal_maze.RepeatSingleGoalMaze, self).after_step(
+ physics, random_state)
+ self._rewarded_this_step = self._target.activated
+ self._targets_obtained = int(self._target.activated)
+ task.after_step = functools.partial(after_step, task)
+ env = composer.Environment(
+ time_limit=60, task=task, random_state=None,
+ strip_singleton_obs_buffer_dim=True)
+ from . import dmc
+ self._env = dmc.DMC(env, repeat, size=size, camera=camera, image=False)
+ self._visited = None
+ self._weaker = weaker
+
+ @property
+ def obs_space(self):
+ spaces = self._env.obs_space.copy()
+ spaces['log/coverage'] = elements.Space(np.int32, low=-1)
+ return spaces
+
+ @property
+ def act_space(self):
+ return self._env.act_space
+
+ def step(self, action):
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', '.*is a deprecated alias for.*')
+ action = action.copy()
+ action['action'] *= self._weaker
+ obs = self._env.step(action)
+ if obs['is_first']:
+ self._visited = set()
+ global_pos = self._walker.get_pose(
+ self._env._dmenv._physics)[0].reshape(-1)
+ self._visited.add(tuple(np.round(global_pos[:2]).astype(int).tolist()))
+ obs['log/coverage'] = np.int32(len(self._visited))
+ return obs
+
+ def _make_walker(self, name):
+ if name == 'ant':
+ from dm_control.locomotion.walkers import ant
+ return ant.Ant()
+ elif name == 'quadruped':
+ from . import loconav_quadruped
+ return loconav_quadruped.Quadruped()
+ else:
+ raise NotImplementedError(name)
+
+ def _make_arena(self, name):
+ import labmaze
+ from dm_control import mjcf
+ from dm_control.locomotion.arenas import labmaze_textures
+ from dm_control.locomotion.arenas import mazes
+ import matplotlib.pyplot as plt
+ class WallTexture(labmaze_textures.WallTextures):
+ def _build(self, color=[0.8, 0.8, 0.8], model='labmaze_style_01'):
+ self._mjcf_root = mjcf.RootElement(model=model)
+ self._textures = [self._mjcf_root.asset.add(
+ 'texture', type='2d', name='wall', builtin='flat',
+ rgb1=color, width=100, height=100)]
+ wall_textures = {'*': WallTexture([0.8, 0.8, 0.8])}
+ cmap = plt.get_cmap('tab10')
+ for index in range(9):
+ wall_textures[str(index + 1)] = WallTexture(cmap(index)[:3])
+ layout = ''.join([
+ line[::2].replace('.', ' ') + '\n' for line in MAPS[name]])
+ maze = labmaze.FixedMazeWithRandomGoals(
+ entity_layer=layout,
+ num_spawns=1, num_objects=1, random_state=None)
+ arena = mazes.MazeWithTargets(
+ maze, xy_scale=1.2, z_height=2.0, aesthetic='default',
+ wall_textures=wall_textures, name='maze')
+ return arena
+
+
+MAPS = {
+
+ 'maze_s': (
+ ' 6 6 6 6 6',
+ ' 6 . . . 6',
+ ' 6 . G . 6',
+ ' 6 . . . 6',
+ ' 5 . . . 4',
+ ' 5 . . . 4',
+ '1 1 1 1 5 5 5 . . . 4',
+ '1 . . . . . . . . . 3',
+ '1 . P . . . . . . . 3',
+ '1 . . . . . . . . . 3',
+ '1 1 1 1 2 2 2 3 3 3 3',
+ ),
+
+ 'maze_m': (
+ '6 6 6 6 8 8 8 7 7 7 7',
+ '6 . . . . . . . . . 7',
+ '6 . G . . . . . . . 7',
+ '6 . . . . . . . . . 7',
+ '6 6 6 5 5 5 5 . . . 4',
+ ' 5 . . . 4',
+ '1 1 1 1 5 5 5 . . . 4',
+ '1 . . . . . . . . . 3',
+ '1 . P . . . . . . . 3',
+ '1 . . . . . . . . . 3',
+ '1 1 1 1 2 2 2 3 3 3 3',
+ ),
+
+ 'maze_l': (
+ '8 8 8 8 7 7 7 6 6 6 6 . . .',
+ '8 . . . . . . . . . 6 . . .',
+ '8 . G . . . . . . . 6 . . .',
+ '8 . . . . . . . . . 6 5 5 5',
+ '8 8 8 8 7 7 7 . . . . . . 5',
+ '. . . . . . 7 . . . . . . 5',
+ '1 1 1 1 1 . 7 . . . . . . 5',
+ '1 . . . 1 . 7 9 9 9 . . . 5',
+ '1 . . . 1 . . . . 9 . . . 5',
+ '1 . . . 1 1 1 9 9 9 . . . 5',
+ '2 . . . . . . . . . . . . 4',
+ '2 . . . . P . . . . . . . 4',
+ '2 . . . . . . . . . . . . 4',
+ '2 2 2 2 3 3 3 3 3 3 4 4 4 4',
+ ),
+
+ 'maze_xl': (
+ '9 9 9 9 9 9 9 8 8 8 8 . 4 4 4 4 4',
+ '9 . . . . . . . . . 8 . 4 . . . 4',
+ '9 . . . . . . . G . 8 . 4 . . . 4',
+ '9 . . . . . . . . . 8 . 4 . . . 4',
+ '6 . . . 7 7 7 8 8 8 8 . 5 . . . 3',
+ '6 . . . 7 . . . . . . . 5 . . . 3',
+ '6 . . . 7 7 7 5 5 5 5 5 5 . . . 3',
+ '5 . . . . . . . . . . . . . . . 3',
+ '5 . . . . . . . . . . . . . . . 3',
+ '5 . . . . . . . . . . . . . . . 3',
+ '5 5 5 5 4 4 4 . . . 6 6 6 . . . 3',
+ '. . . . . . 4 . . . 6 . 6 . . . 3',
+ '1 1 1 1 4 4 4 . . . 6 . 6 . . . 3',
+ '1 . . . . . . . . . 2 . 1 . . . 1',
+ '1 . P . . . . . . . 2 . 1 . . . 1',
+ '1 . . . . . . . . . 2 . 1 . . . 1',
+ '1 1 1 1 1 1 1 2 2 2 2 . 1 1 1 1 1',
+ ),
+
+ 'maze_xxl': (
+ '7 7 7 7 * * * 6 6 6 * * * 9 9 9 9',
+ '7 . . . . . . . . . . . . . . . 9',
+ '7 . . . . . . . . . . . . . G . 9',
+ '7 . . . . . . . . . . . . . . . 9',
+ '* . . . 5 5 5 * * * * * * 9 9 9 9',
+ '* . . . 5 . . . . . . . . . . . .',
+ '* . . . 5 5 5 * * * * * * 3 3 3 3',
+ '8 . . . . . . . . . . . . . . . 3',
+ '8 . . . . . . . . . . . . . . . 3',
+ '8 . . . . . . . . . . . . . . . 3',
+ '8 8 8 8 * * * * * * 4 4 4 . . . *',
+ '. . . . . . . . . . . . 4 . . . *',
+ '1 1 1 1 * * * * * * 4 4 4 . . . *',
+ '1 . . . . . . . . . . . . . . . 2',
+ '1 . P . . . . . . . . . . . . . 2',
+ '1 . . . . . . . . . . . . . . . 2',
+ '1 1 1 1 * * * 6 6 6 * * * 2 2 2 2',
+ ),
+
+ 'empty': (
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ '. . . . . . . . . . . . . . . . .',
+ ),
+
+}
diff --git a/models/embodied/envs/loconav_quadruped.py b/models/embodied/envs/loconav_quadruped.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c5701d1e920d0f748d6d7f840af3a50176be820
--- /dev/null
+++ b/models/embodied/envs/loconav_quadruped.py
@@ -0,0 +1,132 @@
+import os
+
+from dm_control import composer
+from dm_control import mjcf
+from dm_control.composer.observation import observable
+from dm_control.locomotion.walkers import base
+from dm_control.locomotion.walkers import legacy_base
+from dm_control.mujoco.wrapper import mjbindings
+import numpy as np
+
+enums = mjbindings.enums
+mjlib = mjbindings.mjlib
+
+
+class Quadruped(legacy_base.Walker):
+
+ def _build(self, name='walker', initializer=None):
+ super()._build(initializer=initializer)
+ self._mjcf_root = mjcf.from_path(
+ os.path.join(os.path.dirname(__file__), 'loconav_quadruped.xml'))
+ if name:
+ self._mjcf_root.model = name
+ self._prev_action = np.zeros(
+ self.action_spec.shape, self.action_spec.dtype)
+
+ def initialize_episode(self, physics, random_state):
+ self._prev_action = np.zeros_like(self._prev_action)
+
+ def apply_action(self, physics, action, random_state):
+ super().apply_action(physics, action, random_state)
+ self._prev_action[:] = action
+
+ def _build_observables(self):
+ return QuadrupedObservables(self)
+
+ @property
+ def mjcf_model(self):
+ return self._mjcf_root
+
+ @property
+ def upright_pose(self):
+ return base.WalkerPose()
+
+ @composer.cached_property
+ def actuators(self):
+ return self._mjcf_root.find_all('actuator')
+
+ @composer.cached_property
+ def root_body(self):
+ return self._mjcf_root.find('body', 'torso')
+
+ @composer.cached_property
+ def bodies(self):
+ return tuple(self.mjcf_model.find_all('body'))
+
+ @composer.cached_property
+ def mocap_tracking_bodies(self):
+ return tuple(self.mjcf_model.find_all('body'))
+
+ @property
+ def mocap_joints(self):
+ return self.mjcf_model.find_all('joint')
+
+ @property
+ def _foot_bodies(self):
+ return (
+ self._mjcf_root.find('body', 'toe_front_left'),
+ self._mjcf_root.find('body', 'toe_front_right'),
+ self._mjcf_root.find('body', 'toe_back_right'),
+ self._mjcf_root.find('body', 'toe_back_left'),
+ )
+
+ @composer.cached_property
+ def end_effectors(self):
+ return self._foot_bodies
+
+ @composer.cached_property
+ def observable_joints(self):
+ return self._mjcf_root.find_all('joint')
+
+ @composer.cached_property
+ def egocentric_camera(self):
+ return self._mjcf_root.find('camera', 'egocentric')
+
+ def aliveness(self, physics):
+ return (physics.bind(self.root_body).xmat[-1] - 1.) / 2.
+
+ @composer.cached_property
+ def ground_contact_geoms(self):
+ foot_geoms = []
+ for foot in self._foot_bodies:
+ foot_geoms.extend(foot.find_all('geom'))
+ return tuple(foot_geoms)
+
+ @property
+ def prev_action(self):
+ return self._prev_action
+
+
+class QuadrupedObservables(legacy_base.WalkerObservables):
+
+ @composer.observable
+ def actuator_activations(self):
+ def actuator_activations_in_egocentric_frame(physics):
+ return physics.data.act
+ return observable.Generic(actuator_activations_in_egocentric_frame)
+
+ @composer.observable
+ def root_global_pos(self):
+ def root_pos(physics):
+ root_xpos, _ = self._entity.get_pose(physics)
+ return np.reshape(root_xpos, -1)
+ return observable.Generic(root_pos)
+
+ @composer.observable
+ def torso_global_pos(self):
+ def torso_pos(physics):
+ root_body = self._entity.root_body
+ root_body_xpos = physics.bind(root_body).xpos
+ return np.reshape(root_body_xpos, -1)
+ return observable.Generic(torso_pos)
+
+ @property
+ def proprioception(self):
+ return ([
+ self.joints_pos, self.joints_vel, self.actuator_activations,
+ self.sensors_accelerometer, self.sensors_gyro,
+ self.sensors_velocimeter,
+ self.sensors_force, self.sensors_torque,
+ self.world_zaxis,
+ self.root_global_pos, self.torso_global_pos,
+ ] + self._collect_from_attachments('proprioception'))
diff --git a/models/embodied/envs/loconav_quadruped.xml b/models/embodied/envs/loconav_quadruped.xml
new file mode 100644
index 0000000000000000000000000000000000000000..c556dae489938598e1f55dd1b05de735f6fb2b30
--- /dev/null
+++ b/models/embodied/envs/loconav_quadruped.xml
@@ -0,0 +1,311 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/models/embodied/envs/minecraft.py b/models/embodied/envs/minecraft.py
new file mode 100644
index 0000000000000000000000000000000000000000..312e512aafb43fa33d3cd79d6b66f95a7b10217f
--- /dev/null
+++ b/models/embodied/envs/minecraft.py
@@ -0,0 +1,17 @@
+import importlib
+
+import embodied
+
+
+class Minecraft(embodied.Wrapper):
+
+ def __init__(self, task, *args, **kwargs):
+ module, cls = {
+ 'wood': 'minecraft_flat:Wood',
+ 'climb': 'minecraft_flat:Climb',
+ 'diamond': 'minecraft_flat:Diamond',
+ }[task].split(':')
+ module = importlib.import_module(f'.{module}', __package__)
+ cls = getattr(module, cls)
+ env = cls(*args, **kwargs)
+ super().__init__(env)
diff --git a/models/embodied/envs/minecraft_flat.py b/models/embodied/envs/minecraft_flat.py
new file mode 100644
index 0000000000000000000000000000000000000000..28c2fcfe0e802d67dd2c54b03073a4c742bbbc32
--- /dev/null
+++ b/models/embodied/envs/minecraft_flat.py
@@ -0,0 +1,438 @@
+import logging
+import threading
+
+import elements
+import embodied
+import numpy as np
+
+np.float = float
+np.int = int
+np.bool = bool
+
+from minerl.herobraine.env_spec import EnvSpec
+from minerl.herobraine.hero import handler
+from minerl.herobraine.hero import handlers
+from minerl.herobraine.hero import mc
+from minerl.herobraine.hero.mc import INVERSE_KEYMAP
+
+
+class Wood(embodied.Wrapper):
+
+ def __init__(self, *args, **kwargs):
+ actions = BASIC_ACTIONS
+ self.rewards = [
+ CollectReward('log', repeated=1),
+ HealthReward(),
+ ]
+ length = kwargs.pop('length', 36000)
+ env = MinecraftBase(actions, *args, **kwargs)
+ env = embodied.wrappers.TimeLimit(env, length)
+ super().__init__(env)
+
+ def step(self, action):
+ obs = self.env.step(action)
+ reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
+ obs['reward'] = np.float32(reward)
+ return obs
+
+
+class Climb(embodied.Wrapper):
+
+ def __init__(self, *args, **kwargs):
+ actions = BASIC_ACTIONS
+ length = kwargs.pop('length', 36000)
+ env = MinecraftBase(actions, *args, **kwargs)
+ env = embodied.wrappers.TimeLimit(env, length)
+ super().__init__(env)
+ self._previous = None
+ self._health_reward = HealthReward()
+
+ def step(self, action):
+ obs = self.env.step(action)
+ x, y, z = obs['log/player_pos']
+ height = np.float32(y)
+ if obs['is_first']:
+ self._previous = height
+ reward = (height - self._previous) + self._health_reward(obs)
+ obs['reward'] = np.float32(reward)
+ self._previous = height
+ return obs
+
+
+class Diamond(embodied.Wrapper):
+
+ def __init__(self, *args, **kwargs):
+ actions = {
+ **BASIC_ACTIONS,
+ 'craft_planks': dict(craft='planks'),
+ 'craft_stick': dict(craft='stick'),
+ 'craft_crafting_table': dict(craft='crafting_table'),
+ 'place_crafting_table': dict(place='crafting_table'),
+ 'craft_wooden_pickaxe': dict(nearbyCraft='wooden_pickaxe'),
+ 'craft_stone_pickaxe': dict(nearbyCraft='stone_pickaxe'),
+ 'craft_iron_pickaxe': dict(nearbyCraft='iron_pickaxe'),
+ 'equip_stone_pickaxe': dict(equip='stone_pickaxe'),
+ 'equip_wooden_pickaxe': dict(equip='wooden_pickaxe'),
+ 'equip_iron_pickaxe': dict(equip='iron_pickaxe'),
+ 'craft_furnace': dict(nearbyCraft='furnace'),
+ 'place_furnace': dict(place='furnace'),
+ 'smelt_iron_ingot': dict(nearbySmelt='iron_ingot'),
+ }
+ self.rewards = [
+ CollectReward('log', once=1),
+ CollectReward('planks', once=1),
+ CollectReward('stick', once=1),
+ CollectReward('crafting_table', once=1),
+ CollectReward('wooden_pickaxe', once=1),
+ CollectReward('cobblestone', once=1),
+ CollectReward('stone_pickaxe', once=1),
+ CollectReward('iron_ore', once=1),
+ CollectReward('furnace', once=1),
+ CollectReward('iron_ingot', once=1),
+ CollectReward('iron_pickaxe', once=1),
+ CollectReward('diamond', once=1),
+ HealthReward(),
+ ]
+ length = kwargs.pop('length', 36000)
+ env = MinecraftBase(actions, *args, **kwargs)
+ env = embodied.wrappers.TimeLimit(env, length)
+ super().__init__(env)
+
+ def step(self, action):
+ obs = self.env.step(action)
+ reward = sum([fn(obs, self.env.inventory) for fn in self.rewards])
+ obs['reward'] = np.float32(reward)
+ return obs
+
+
+BASIC_ACTIONS = {
+ 'noop': dict(),
+ 'attack': dict(attack=1),
+ 'turn_up': dict(camera=(-15, 0)),
+ 'turn_down': dict(camera=(15, 0)),
+ 'turn_left': dict(camera=(0, -15)),
+ 'turn_right': dict(camera=(0, 15)),
+ 'forward': dict(forward=1),
+ 'back': dict(back=1),
+ 'left': dict(left=1),
+ 'right': dict(right=1),
+ 'jump': dict(jump=1, forward=1),
+ 'place_dirt': dict(place='dirt'),
+}
+
+
+class CollectReward:
+
+ def __init__(self, item, once=0, repeated=0):
+ self.item = item
+ self.once = once
+ self.repeated = repeated
+ self.previous = 0
+ self.maximum = 0
+
+ def __call__(self, obs, inventory):
+ current = inventory[self.item]
+ if obs['is_first']:
+ self.previous = current
+ self.maximum = current
+ return 0
+ reward = self.repeated * max(0, current - self.previous)
+ if self.maximum == 0 and current > 0:
+ reward += self.once
+ self.previous = current
+ self.maximum = max(self.maximum, current)
+ return reward
+
+
+class HealthReward:
+
+ def __init__(self, scale=0.01):
+ self.scale = scale
+ self.previous = None
+
+ def __call__(self, obs, inventory=None):
+ health = obs['health']
+ if obs['is_first']:
+ self.previous = health
+ return 0
+ reward = self.scale * (health - self.previous)
+ self.previous = health
+ return np.float32(reward)
+
+
+class MinecraftBase(embodied.Env):
+
+ LOCK = threading.Lock()
+ NOOP = dict(
+ camera=(0, 0), forward=0, back=0, left=0, right=0, attack=0, sprint=0,
+ jump=0, sneak=0, craft='none', nearbyCraft='none', nearbySmelt='none',
+ place='none', equip='none')
+
+ def __init__(
+ self, actions,
+ repeat=1,
+ size=(64, 64),
+ break_speed=100.0,
+ gamma=10.0,
+ sticky_attack=30,
+ sticky_jump=10,
+ pitch_limit=(-60, 60),
+ log_inv_keys=('log', 'cobblestone', 'iron_ingot', 'diamond'),
+ logs=False,
+ ):
+ if logs:
+ logging.basicConfig(level=logging.DEBUG)
+ self._repeat = repeat
+ self._size = size
+ if break_speed != 1.0:
+ sticky_attack = 0
+
+ # Make env
+ with self.LOCK:
+ self._gymenv = MineRLEnv(size, break_speed).make()
+ from . import from_gym
+ self._env = from_gym.FromGym(self._gymenv)
+ self._inventory = {}
+
+ # Observations
+ self._inv_keys = [
+ k for k in self._env.obs_space if k.startswith('inventory/')
+ if k != 'inventory/log2']
+ self._inv_log_keys = [f'inventory/{k}' for k in log_inv_keys]
+ assert all(k in self._inv_keys for k in self._inv_log_keys), (
+ self._inv_keys, self._inv_log_keys)
+ self._step = 0
+ self._max_inventory = None
+ self._equip_enum = self._gymenv.observation_space[
+ 'equipped_items']['mainhand']['type'].values.tolist()
+ self._obs_space = self.obs_space
+
+ # Actions
+ actions = self._insert_defaults(actions)
+ self._action_names = tuple(actions.keys())
+ self._action_values = tuple(actions.values())
+ message = f'Minecraft action space ({len(self._action_values)}):'
+ print(message, ', '.join(self._action_names))
+ self._sticky_attack_length = sticky_attack
+ self._sticky_attack_counter = 0
+ self._sticky_jump_length = sticky_jump
+ self._sticky_jump_counter = 0
+ self._pitch_limit = pitch_limit
+ self._pitch = 0
+
+ @property
+ def obs_space(self):
+ return {
+ 'image': elements.Space(np.uint8, self._size + (3,)),
+ 'inventory': elements.Space(np.float32, len(self._inv_keys), 0),
+ 'inventory_max': elements.Space(np.float32, len(self._inv_keys), 0),
+ 'equipped': elements.Space(np.float32, len(self._equip_enum), 0, 1),
+ 'reward': elements.Space(np.float32),
+ 'health': elements.Space(np.float32),
+ 'hunger': elements.Space(np.float32),
+ 'breath': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ **{f'log/{k}': elements.Space(np.int64) for k in self._inv_log_keys},
+ # 'log/player_pos': elements.Space(np.float32, 3),
+ }
+
+ @property
+ def act_space(self):
+ return {
+ 'action': elements.Space(np.int32, (), 0, len(self._action_values)),
+ 'reset': elements.Space(bool),
+ }
+
+ def step(self, action):
+ action = action.copy()
+ index = action.pop('action')
+ action.update(self._action_values[index])
+ action = self._action(action)
+ if action['reset']:
+ obs = self._reset()
+ else:
+ following = self.NOOP.copy()
+ for key in ('attack', 'forward', 'back', 'left', 'right'):
+ following[key] = action[key]
+ for act in [action] + ([following] * (self._repeat - 1)):
+ obs = self._env.step(act)
+ if self._env.info and 'error' in self._env.info:
+ obs = self._reset()
+ break
+ obs = self._obs(obs)
+ self._step += 1
+ assert 'pov' not in obs, list(obs.keys())
+ return obs
+
+ @property
+ def inventory(self):
+ return self._inventory
+
+ def _reset(self):
+ with self.LOCK:
+ obs = self._env.step({'reset': True})
+ self._step = 0
+ self._max_inventory = None
+ self._sticky_attack_counter = 0
+ self._sticky_jump_counter = 0
+ self._pitch = 0
+ self._inventory = {}
+ return obs
+
+ def _obs(self, obs):
+ obs['inventory/log'] += obs.pop('inventory/log2')
+ self._inventory = {
+ k.split('/', 1)[1]: obs[k] for k in self._inv_keys
+ if k != 'inventory/air'}
+ inventory = np.array([obs[k] for k in self._inv_keys], np.float32)
+ if self._max_inventory is None:
+ self._max_inventory = inventory
+ else:
+ self._max_inventory = np.maximum(self._max_inventory, inventory)
+ index = self._equip_enum.index(obs['equipped_items/mainhand/type'])
+ equipped = np.zeros(len(self._equip_enum), np.float32)
+ equipped[index] = 1.0
+ # player_x = obs['location_stats/xpos']
+ # player_y = obs['location_stats/ypos']
+ # player_z = obs['location_stats/zpos']
+ obs = {
+ 'image': obs['pov'],
+ 'inventory': inventory,
+ 'inventory_max': self._max_inventory.copy(),
+ 'equipped': equipped,
+ 'health': np.float32(obs['life_stats/life'] / 20),
+ 'hunger': np.float32(obs['life_stats/food'] / 20),
+ 'breath': np.float32(obs['life_stats/air'] / 300),
+ 'reward': np.float32(0.0),
+ 'is_first': obs['is_first'],
+ 'is_last': obs['is_last'],
+ 'is_terminal': obs['is_terminal'],
+ **{f'log/{k}': np.int64(obs[k]) for k in self._inv_log_keys},
+ # 'log/player_pos': np.array([player_x, player_y, player_z], np.float32),
+ }
+ for key, value in obs.items():
+ space = self._obs_space[key]
+ if not isinstance(value, np.ndarray):
+ value = np.array(value)
+ assert value in space, (key, value, value.dtype, value.shape, space)
+ return obs
+
+ def _action(self, action):
+ if self._sticky_attack_length:
+ if action['attack']:
+ self._sticky_attack_counter = self._sticky_attack_length
+ if self._sticky_attack_counter > 0:
+ action['attack'] = 1
+ action['jump'] = 0
+ self._sticky_attack_counter -= 1
+ if self._sticky_jump_length:
+ if action['jump']:
+ self._sticky_jump_counter = self._sticky_jump_length
+ if self._sticky_jump_counter > 0:
+ action['jump'] = 1
+ action['forward'] = 1
+ self._sticky_jump_counter -= 1
+ if self._pitch_limit and action['camera'][0]:
+ lo, hi = self._pitch_limit
+ if not (lo <= self._pitch + action['camera'][0] <= hi):
+ action['camera'] = (0, action['camera'][1])
+ self._pitch += action['camera'][0]
+ return action
+
+ def _insert_defaults(self, actions):
+ actions = {name: action.copy() for name, action in actions.items()}
+ for key, default in self.NOOP.items():
+ for action in actions.values():
+ if key not in action:
+ action[key] = default
+ return actions
+
+
+class MineRLEnv(EnvSpec):
+
+ def __init__(self, resolution=(64, 64), break_speed=50):
+ self.resolution = resolution
+ self.break_speed = break_speed
+ super().__init__(name='MineRLEnv-v1')
+
+ def create_agent_start(self):
+ return [BreakSpeedMultiplier(self.break_speed)]
+
+ def create_agent_handlers(self):
+ return []
+
+ def create_server_world_generators(self):
+ return [handlers.DefaultWorldGenerator(force_reset=True)]
+
+ def create_server_quit_producers(self):
+ return [handlers.ServerQuitWhenAnyAgentFinishes()]
+
+ def create_server_initial_conditions(self):
+ return [
+ handlers.TimeInitialCondition(
+ allow_passage_of_time=True, start_time=0),
+ handlers.SpawningInitialCondition(allow_spawning=True),
+ ]
+
+ def create_observables(self):
+ return [
+ handlers.POVObservation(self.resolution),
+ handlers.FlatInventoryObservation(mc.ALL_ITEMS),
+ handlers.EquippedItemObservation(
+ mc.ALL_ITEMS, _default='air', _other='other'),
+ handlers.ObservationFromCurrentLocation(),
+ handlers.ObservationFromLifeStats(),
+ ]
+
+ def create_actionables(self):
+ kw = dict(_other='none', _default='none')
+ return [
+ handlers.KeybasedCommandAction('forward', INVERSE_KEYMAP['forward']),
+ handlers.KeybasedCommandAction('back', INVERSE_KEYMAP['back']),
+ handlers.KeybasedCommandAction('left', INVERSE_KEYMAP['left']),
+ handlers.KeybasedCommandAction('right', INVERSE_KEYMAP['right']),
+ handlers.KeybasedCommandAction('jump', INVERSE_KEYMAP['jump']),
+ handlers.KeybasedCommandAction('sneak', INVERSE_KEYMAP['sneak']),
+ handlers.KeybasedCommandAction('attack', INVERSE_KEYMAP['attack']),
+ handlers.CameraAction(),
+ handlers.PlaceBlock(['none'] + mc.ALL_ITEMS, **kw),
+ handlers.EquipAction(['none'] + mc.ALL_ITEMS, **kw),
+ handlers.CraftAction(['none'] + mc.ALL_ITEMS, **kw),
+ handlers.CraftNearbyAction(['none'] + mc.ALL_ITEMS, **kw),
+ handlers.SmeltItemNearby(['none'] + mc.ALL_ITEMS, **kw),
+ ]
+
+ def is_from_folder(self, folder):
+ return folder == 'none'
+
+ def get_docstring(self):
+ return ''
+
+ def determine_success_from_rewards(self, rewards):
+ return True
+
+ def create_rewardables(self):
+ return []
+
+ def create_server_decorators(self):
+ return []
+
+ def create_mission_handlers(self):
+ return []
+
+ def create_monitors(self):
+ return []
+
+
+class BreakSpeedMultiplier(handler.Handler):
+
+ def __init__(self, multiplier=1.0):
+ self.multiplier = multiplier
+
+ def to_string(self):
+ return f'break_speed({self.multiplier})'
+
+ def xml_template(self):
+ return '{{multiplier}}'
diff --git a/models/embodied/envs/pinpad.py b/models/embodied/envs/pinpad.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bb8db205da5dcb12bf2279f53acc99c575925a4
--- /dev/null
+++ b/models/embodied/envs/pinpad.py
@@ -0,0 +1,225 @@
+import collections
+
+import elements
+import embodied
+import numpy as np
+
+
+class PinPad(embodied.Env):
+
+ COLORS = {
+ '1': (255, 0, 0),
+ '2': ( 0, 255, 0),
+ '3': ( 0, 0, 255),
+ '4': (255, 255, 0),
+ '5': (255, 0, 255),
+ '6': ( 0, 255, 255),
+ '7': (128, 0, 128),
+ '8': ( 0, 128, 128),
+ }
+
+ def __init__(self, task, length=10000):
+ assert length > 0
+ layout = {
+ 'three': LAYOUT_THREE,
+ 'four': LAYOUT_FOUR,
+ 'five': LAYOUT_FIVE,
+ 'six': LAYOUT_SIX,
+ 'seven': LAYOUT_SEVEN,
+ 'eight': LAYOUT_EIGHT,
+ }[task]
+ self.layout = np.array([list(line) for line in layout.split('\n')]).T
+ assert self.layout.shape == (16, 14), self.layout.shape
+ self.length = length
+ self.random = np.random.RandomState()
+ self.pads = set(self.layout.flatten().tolist()) - set('* #\n')
+ self.target = tuple(sorted(self.pads))
+ self.spawns = []
+ for (x, y), char in np.ndenumerate(self.layout):
+ if char != '#':
+ self.spawns.append((x, y))
+ print(f'Created PinPad env with sequence: {"->".join(self.target)}')
+ self.sequence = collections.deque(maxlen=len(self.target))
+ self.player = None
+ self.steps = None
+ self.done = None
+ self.countdown = None
+
+ @property
+ def act_space(self):
+ return {
+ 'action': elements.Space(np.int32, (), 0, 5),
+ 'reset': elements.Space(bool),
+ }
+
+ @property
+ def obs_space(self):
+ return {
+ 'image': elements.Space(np.uint8, (64, 64, 3)),
+ 'reward': elements.Space(np.float32),
+ 'is_first': elements.Space(bool),
+ 'is_last': elements.Space(bool),
+ 'is_terminal': elements.Space(bool),
+ }
+
+ def step(self, action):
+ if self.done or action['reset']:
+ self.player = self.spawns[self.random.randint(len(self.spawns))]
+ self.sequence.clear()
+ self.steps = 0
+ self.done = False
+ self.countdown = 0
+ return self._obs(reward=0.0, is_first=True)
+ if self.countdown:
+ self.countdown -= 1
+ if self.countdown == 0:
+ self.player = self.spawns[self.random.randint(len(self.spawns))]
+ self.sequence.clear()
+ reward = 0.0
+ move = [(0, 0), (0, 1), (0, -1), (1, 0), (-1, 0)][action['action']]
+ x = np.clip(self.player[0] + move[0], 0, 15)
+ y = np.clip(self.player[1] + move[1], 0, 13)
+ tile = self.layout[x][y]
+ if tile != '#':
+ self.player = (x, y)
+ if tile in self.pads:
+ if not self.sequence or self.sequence[-1] != tile:
+ self.sequence.append(tile)
+ if tuple(self.sequence) == self.target and not self.countdown:
+ reward += 10.0
+ self.countdown = 10
+ self.steps += 1
+ self.done = self.done or (self.steps >= self.length)
+ return self._obs(reward=reward, is_last=self.done)
+
+ def _obs(self, reward, is_first=False, is_last=False, is_terminal=False):
+ return dict(
+ image=self._render(),
+ reward=np.float32(reward),
+ is_first=is_first,
+ is_last=is_last,
+ is_terminal=is_terminal,
+ )
+
+ def _render(self):
+ grid = np.zeros((16, 16, 3), np.uint8) + 255
+ white = np.array([255, 255, 255])
+ if self.countdown:
+ grid[:] = (223, 255, 223)
+ current = self.layout[self.player[0]][self.player[1]]
+ for (x, y), char in np.ndenumerate(self.layout):
+ if char == '#':
+ grid[x, y] = (192, 192, 192)
+ elif char in self.pads:
+ color = np.array(self.COLORS[char])
+ color = color if char == current else (10 * color + 90 * white) / 100
+ grid[x, y] = color
+ grid[self.player] = (0, 0, 0)
+ grid[:, -2:] = (192, 192, 192)
+ for i, char in enumerate(self.sequence):
+ grid[2 * i + 1, -2] = self.COLORS[char]
+ image = np.repeat(np.repeat(grid, 4, 0), 4, 1)
+ return image.transpose((1, 0, 2))
+
+
+LAYOUT_THREE = """
+################
+#1111 3333#
+#1111 3333#
+#1111 3333#
+#1111 3333#
+# #
+# #
+# #
+# #
+# 2222 #
+# 2222 #
+# 2222 #
+# 2222 #
+################
+""".strip('\n')
+
+LAYOUT_FOUR = """
+################
+#1111 4444#
+#1111 4444#
+#1111 4444#
+#1111 4444#
+# #
+# #
+# #
+# #
+#3333 2222#
+#3333 2222#
+#3333 2222#
+#3333 2222#
+################
+""".strip('\n')
+
+LAYOUT_FIVE = """
+################
+# 4444#
+#111 4444#
+#111 4444#
+#111 #
+#111 555#
+# 555#
+# 555#
+#333 555#
+#333 #
+#333 2222#
+#333 2222#
+# 2222#
+################
+""".strip('\n')
+
+LAYOUT_SIX = """
+################
+#111 555#
+#111 555#
+#111 555#
+# #
+#33 66#
+#33 66#
+#33 66#
+#33 66#
+# #
+#444 222#
+#444 222#
+#444 222#
+################
+""".strip('\n')
+
+LAYOUT_SEVEN = """
+################
+#111 444#
+#111 444#
+#11 44#
+# #
+#33 55#
+#33 55#
+#33 55#
+#33 55#
+# #
+#66 22#
+#666 7777 222#
+#666 7777 222#
+################
+""".strip('\n')
+
+LAYOUT_EIGHT = """
+################
+#111 8888 444#
+#111 8888 444#
+#11 44#
+# #
+#33 55#
+#33 55#
+#33 55#
+#33 55#
+# #
+#66 22#
+#666 7777 222#
+#666 7777 222#
+################
+""".strip('\n')
diff --git a/models/embodied/envs/procgen.py b/models/embodied/envs/procgen.py
new file mode 100644
index 0000000000000000000000000000000000000000..55dd1314cd7115f40b93b6f90d03fca6c027c91c
--- /dev/null
+++ b/models/embodied/envs/procgen.py
@@ -0,0 +1,67 @@
+import elements
+import embodied
+import numpy as np
+import procgen # noqa
+
+from PIL import Image
+
+
+class ProcGen(embodied.Env):
+
+ def __init__(self, task, size=(64, 64), resize='pillow', **kwargs):
+ assert resize in ('opencv', 'pillow'), resize
+ from . import from_gym
+ self.size = size
+ self.resize = resize
+ if self.size == (64, 64):
+ self.source = 'step'
+ else:
+ self.source = 'info'
+ if self.source == 'info':
+ kwargs['render_mode'] = 'rgb_array'
+ try:
+ self.env = from_gym.FromGym(f'procgen:procgen-{task}-v0', **kwargs)
+ except Exception:
+ self.env = from_gym.FromGym(f'procgen-{task}-v0', **kwargs)
+ if self.source == 'info':
+ self.inner = self.env
+ while not hasattr(self.inner, 'get_info'):
+ self.inner = self.inner.env
+
+ @property
+ def obs_space(self):
+ spaces = self.env.obs_space.copy()
+ spaces['image'] = elements.Space(np.uint8, (*self.size, 3))
+ return spaces
+
+ @property
+ def act_space(self):
+ return self.env.act_space
+
+ def step(self, action):
+ obs = self.env.step(action)
+ if self.source == 'step':
+ pass
+ elif self.source == 'info':
+ info = self.inner.get_info()
+ assert len(info) == 1
+ obs['image'] = self._resize(info[0]['rgb'], self.size, self.resize)
+ elif self.source == 'render':
+ obs['image'] = self._resize(
+ self.env.env.render(mode='rgb_array'), self.size, self.resize)
+ else:
+ raise NotImplementedError(self.source)
+ return obs
+
+ def _resize(self, image, size, method):
+ if method == 'opencv':
+ import cv2
+ image = cv2.resize(image, size, interpolation=cv2.INTER_AREA)
+ return image
+ elif method == 'pillow':
+ image = Image.fromarray(image)
+ image = image.resize((size[1], size[0]), Image.BILINEAR)
+ image = np.array(image)
+ return image
+ else:
+ raise NotImplementedError(method)
diff --git a/models/embodied/jax/__init__.py b/models/embodied/jax/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ff49668837471ca1b8f54680197f4cea4034643
--- /dev/null
+++ b/models/embodied/jax/__init__.py
@@ -0,0 +1,15 @@
+from .agent import Agent
+
+from .heads import DictHead
+from .heads import Head
+from .heads import MLPHead
+
+from .utils import LayerScan
+from .utils import Normalize
+from .utils import SlowModel
+
+from .opt import Optimizer
+
+from . import nets
+from . import outs
+from . import opt
diff --git a/models/embodied/jax/agent.py b/models/embodied/jax/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba9042371f69290972e601751d93488c31f96464
--- /dev/null
+++ b/models/embodied/jax/agent.py
@@ -0,0 +1,502 @@
+import contextlib
+import dataclasses
+import re
+import threading
+import time
+
+import chex
+import elements
+import embodied
+import jax
+import jax.experimental.multihost_utils
+import jax.numpy as jnp
+import ninjax as nj
+import numpy as np
+P = jax.sharding.PartitionSpec
+
+from . import internal
+from . import transform
+
+
+@dataclasses.dataclass
+class Options:
+
+ policy_devices: tuple = (0,)
+ train_devices: tuple = (0,)
+ policy_mesh: str = '-1,1,1'
+ train_mesh: str = '-1,1,1'
+ profiler: bool = True
+ expect_devices: int = 0
+ use_shardmap: bool = False
+ enable_policy: bool = True
+ ckpt_chunksize: int = -1
+ precompile: bool = True
+
+
+class Agent(embodied.Agent):
+
+ def __new__(subcls, obs_space, act_space, config):
+ keys = Options.__dataclass_fields__
+ options = {k: v for k, v in config.jax.items() if k in keys}
+ setup = {k: v for k, v in config.jax.items() if k not in keys}
+ jaxcfg = Options(**options)
+ internal.setup(**setup)
+ model = super().__new__(subcls)
+ model.__init__(obs_space, act_space, config)
+ outer = super().__new__(Agent)
+ outer.__init__(model, obs_space, act_space, config, jaxcfg)
+ return outer
+
+ def __init__(self, model, obs_space, act_space, config, jaxcfg):
+ assert not any(k.startswith('log/') for k in obs_space)
+ assert 'reset' not in act_space
+
+ self.model = model
+ self.obs_space = obs_space
+ self.act_space = act_space
+ self.config = config
+ self.jaxcfg = jaxcfg
+ self.logdir = elements.Path(config.logdir)
+
+ ext_space = self.model.ext_space # Extra inputs to train and report.
+ elements.print('Observations', color='cyan')
+ [elements.print(f' {k:<16} {v}') for k, v in obs_space.items()]
+ elements.print('Actions', color='cyan')
+ [elements.print(f' {k:<16} {v}') for k, v in act_space.items()]
+ elements.print('Extras', color='cyan')
+ [elements.print(f' {k:<16} {v}') for k, v in ext_space.items()]
+ self.spaces = dict(**obs_space, **act_space, **ext_space)
+ assert not (obs_space.keys() & ext_space.keys()), (obs_space, ext_space)
+ assert not (act_space.keys() & ext_space.keys()), (act_space, ext_space)
+
+ available = jax.devices()
+ elements.print(f'JAX devices ({jax.device_count()}):', available)
+ if self.jaxcfg.expect_devices > 0:
+ if len(available) != self.jaxcfg.expect_devices:
+ print('ALERT: Wrong number of devices')
+ while True:
+ time.sleep(1)
+ assert len(available) == jax.process_count() * jax.local_device_count()
+ flatten = lambda x: x.reshape(-1).tolist()
+ devices = np.array(available).reshape(
+ jax.process_count(), jax.local_device_count())
+ self.policy_devices = flatten(devices[:, self.jaxcfg.policy_devices])
+ self.train_devices = flatten(devices[:, self.jaxcfg.train_devices])
+ print('Policy devices:', ', '.join([str(x) for x in self.policy_devices]))
+ print('Train devices: ', ', '.join([str(x) for x in self.train_devices]))
+
+ # d = DP, f = FSDP, t = TP
+ self.policy_mesh = internal.mesh(
+ self.policy_devices, self.jaxcfg.policy_mesh, ('d', 'f', 't'))
+ self.policy_sharded = jax.sharding.NamedSharding(
+ self.policy_mesh, P(('d', 'f')))
+ self.policy_mirrored = jax.sharding.NamedSharding(self.policy_mesh, P())
+ self.train_mesh = internal.mesh(
+ self.train_devices, self.jaxcfg.train_mesh, ('d', 'f', 't'))
+ self.train_sharded = jax.sharding.NamedSharding(
+ self.train_mesh, P(('d', 'f')))
+ self.train_mirrored = jax.sharding.NamedSharding(self.train_mesh, P())
+ if self.train_mesh.shape['t'] > len(self.jaxcfg.train_devices) or (
+ self.policy_mesh.shape['t'] > len(self.jaxcfg.policy_devices)):
+ raise NotImplementedError('Inter-node TP is not supported!')
+ if self.jaxcfg.use_shardmap:
+ assert self.train_mesh.shape['d'] == self.train_mesh.size
+ assert self.policy_mesh.shape['d'] == self.policy_mesh.size
+
+ # self.train_node_mesh = internal.node_mesh(self.train_mesh, mp_dims=('t',))
+ # print('Train Node mesh:',self.train_node_mesh)
+
+ self.partition_rules = getattr(
+ self.model, 'partition_rules', ([('.*', P())], []))
+ elements.print('Initializing parameters...', color='yellow')
+ with self.train_mesh:
+ self.params, self.train_params_sharding = self._init_params()
+ elements.print('Done initializing!', color='yellow')
+ pattern = re.compile(self.model.policy_keys)
+ self.policy_keys = [k for k in self.params.keys() if pattern.search(k)]
+ assert self.policy_keys, (list(self.params.keys()), self.model.policy_keys)
+
+ self.policy_params_sharding = {
+ k: jax.sharding.NamedSharding(self.policy_mesh, v.spec)
+ for k, v in self.train_params_sharding.items()
+ if k in self.policy_keys}
+
+ shared_kwargs = {'use_shardmap': jaxcfg.use_shardmap}
+ tm, ts = self.train_mirrored, self.train_sharded
+ pm, ps = self.policy_mirrored, self.policy_sharded
+ tp, pp = self.train_params_sharding, self.policy_params_sharding
+ _, ar = self.partition_rules
+ self._init_train = transform.apply(
+ nj.pure(self.model.init_train), self.train_mesh,
+ (tp, tm), (ts,), ar, single_output=True, static_argnums=(2,),
+ **shared_kwargs)
+ self._init_report = transform.apply(
+ nj.pure(self.model.init_report), self.train_mesh,
+ (tp, tm), (ts,), ar, single_output=True, static_argnums=(2,),
+ **shared_kwargs)
+ self._init_policy = transform.apply(
+ nj.pure(self.model.init_policy), self.policy_mesh,
+ (pp, pm), (ps,), ar, single_output=True, static_argnums=(2,),
+ **shared_kwargs)
+ allo_sharding = {k: v for k, v in tp.items() if k in self.policy_keys}
+ dona_sharding = {k: v for k, v in tp.items() if k not in self.policy_keys}
+ self._train = transform.apply(
+ nj.pure(self.model.train), self.train_mesh,
+ (dona_sharding, allo_sharding, tm, ts, ts), (tp, ts, ts, tm), ar,
+ return_params=True, donate_params=True, first_outnums=(3,),
+ **shared_kwargs)
+ self._report = transform.apply(
+ nj.pure(self.model.report), self.train_mesh,
+ (tp, tm, ts, ts), (ts, tm), ar,
+ first_outnums=(1,), **shared_kwargs)
+ self._policy = transform.apply(
+ nj.pure(self.model.policy), self.policy_mesh,
+ (pp, pm, ps, ps), (ps, ps, ps), ar,
+ static_argnums=(4,), **shared_kwargs)
+
+ self.policy_lock = threading.Lock()
+ self.train_lock = threading.Lock()
+ self.n_updates = elements.Counter()
+ self.n_batches = elements.Counter()
+ self.n_actions = elements.Counter()
+
+ self.pending_outs = None
+ self.pending_mets = None
+ self.pending_sync = None
+
+ if self.jaxcfg.enable_policy:
+ policy_params = {
+ k: self.params[k].copy() for k in self.policy_keys}
+ self.policy_params = internal.move(
+ policy_params, self.policy_params_sharding)
+
+ self._split = jax.jit(
+ lambda xs: jax.tree.map(lambda x: list(x), xs),
+ internal.local_sharding(self.policy_sharded),
+ internal.local_sharding(self.policy_mirrored))
+ self._stack = jax.jit(
+ lambda xs: jax.tree.map(
+ jnp.stack, xs, is_leaf=lambda x: isinstance(x, list)),
+ internal.local_sharding(self.policy_mirrored),
+ internal.local_sharding(self.policy_sharded))
+
+ self._ckpt_groups = internal.grouped_ckpt_fns(
+ self.params, self.jaxcfg.ckpt_chunksize)
+ if self.jaxcfg.precompile:
+ elements.print('Compiling train and report...', color='yellow')
+ with self.train_mesh:
+ self._compile_train()
+ print('Train cost analysis:')
+ print(self._format_jit_stats(self._train))
+ self._compile_report()
+ print('Report cost analysis:')
+ print(self._format_jit_stats(self._report))
+ elements.print('Done compiling!', color='yellow')
+
+ def init_policy(self, batch_size):
+ if not self.jaxcfg.enable_policy:
+ raise Exception('Policy not available when enable_policy=False')
+ batch_size = batch_size * jax.process_count()
+ if self.jaxcfg.use_shardmap:
+ batch_size = batch_size // self.policy_mesh.size
+ return self._split(internal.to_local(self._init_policy(
+ self.policy_params, self._seeds(0, self.policy_mirrored), batch_size)))
+
+ def init_train(self, batch_size):
+ batch_size = batch_size * jax.process_count()
+ if self.jaxcfg.use_shardmap:
+ batch_size = batch_size // self.train_mesh.size
+ return self._init_train(
+ self.params, self._seeds(0, self.train_mirrored), batch_size)
+
+ def init_report(self, batch_size):
+ batch_size = batch_size * jax.process_count()
+ if self.jaxcfg.use_shardmap:
+ batch_size = batch_size // self.train_mesh.size
+ return self._init_report(
+ self.params, self._seeds(0, self.train_mirrored), batch_size)
+
+ @elements.timer.section('jaxagent_policy')
+ def policy(self, carry, obs, mode='train'):
+ if not self.jaxcfg.enable_policy:
+ raise Exception('Policy not available when enable_policy=False')
+ assert not any(k.startswith('log/') for k in obs), obs.keys()
+ assert sorted(obs.keys()) == sorted(self.obs_space.keys()), (
+ sorted(obs.keys()), sorted(self.obs_space.keys()))
+ for key, space in self.obs_space.items():
+ assert np.isfinite(obs[key]).all(), (obs[key], key, space)
+
+ with self.policy_lock:
+ obs = internal.device_put(obs, self.policy_sharded)
+ with self.n_actions.lock:
+ counter = self.n_actions.value
+ self.n_actions.value += 1
+ seed = self._seeds(counter, self.policy_mirrored)
+ carry = internal.to_global(self._stack(carry), self.policy_sharded)
+
+ with self.policy_lock:
+ carry, acts, outs = self._policy(
+ self.policy_params, seed, carry, obs, mode)
+
+ if self.jaxcfg.enable_policy:
+ with self.policy_lock:
+ if self.pending_sync:
+ old = self.policy_params
+ self.policy_params = self.pending_sync
+ jax.tree.map(lambda x: x.delete(), old)
+ self.pending_sync = None
+
+ acts, outs = self._take_outs(internal.fetch_async((acts, outs)))
+ carry = self._split(internal.to_local(carry))
+
+ finite = outs.pop('finite', {})
+ for key, fin in finite.items():
+ assert all(x.all() for x in jax.tree.leaves(fin)), str(finite)
+ for key, space in self.act_space.items():
+ if space.discrete:
+ assert (acts[key] >= 0).all(), (acts[key], key, space)
+ else:
+ assert np.isfinite(acts[key]).all(), (acts[key], key, space)
+
+ return carry, acts, outs
+
+ @elements.timer.section('jaxagent_train')
+ def train(self, carry, data):
+ seed = data.pop('seed')
+ assert sorted(data.keys()) == sorted(self.spaces.keys()), (
+ sorted(data.keys()), sorted(self.spaces.keys()))
+ allo = {k: v for k, v in self.params.items() if k in self.policy_keys}
+ dona = {k: v for k, v in self.params.items() if k not in self.policy_keys}
+ with self.train_lock:
+ with elements.timer.section('jit_train'):
+ with jax.profiler.StepTraceAnnotation(
+ 'train', step_num=int(self.n_updates)):
+ self.params, carry, outs, mets = self._train(
+ dona, allo, seed, carry, data)
+ self.n_updates.increment()
+
+ if self.jaxcfg.enable_policy:
+ if not self.pending_sync:
+ self.pending_sync = internal.move(
+ {k: allo[k] for k in self.policy_keys},
+ self.policy_params_sharding)
+ else:
+ jax.tree.map(lambda x: x.delete(), allo)
+
+ return_outs = {}
+ if self.pending_outs:
+ return_outs = self._take_outs(self.pending_outs)
+ self.pending_outs = internal.fetch_async(outs)
+
+ return_mets = {}
+ if self.pending_mets:
+ return_mets = self._take_outs(self.pending_mets)
+ self.pending_mets = internal.fetch_async(mets)
+
+ if self.jaxcfg.profiler:
+ outdir, copyto = self.logdir, None
+ if str(outdir).startswith(('gs://', '/gcs/', '/cns/')):
+ copyto = outdir
+ outdir = elements.Path('/tmp/profiler')
+ outdir.mkdir()
+ if self.n_updates == 100:
+ elements.print(f'Start JAX profiler: {str(outdir)}', color='yellow')
+ jax.profiler.start_trace(str(outdir))
+ if self.n_updates == 120:
+ elements.print('Stop JAX profiler', color='yellow')
+ jax.profiler.stop_trace()
+ if copyto:
+ for subdir in elements.Path(outdir).glob('*'):
+ subdir.copy(copyto, recursive=True)
+ print(f'Copied profiler result {outdir} to {copyto}')
+
+ return carry, return_outs, return_mets
+
+ @elements.timer.section('jaxagent_report')
+ def report(self, carry, data):
+ seed = data.pop('seed')
+ assert sorted(data.keys()) == sorted(self.spaces.keys()), (
+ sorted(data.keys()), sorted(self.spaces.keys()))
+ with self.train_lock:
+ carry, mets = self._report(self.params, seed, carry, data)
+ mets = self._take_outs(internal.fetch_async(mets))
+ mets['params/summary'] = self._summary()
+ return carry, mets
+
+ def stream(self, st):
+ def fn(data):
+ for key, value in data.items():
+ if np.issubdtype(value.dtype, np.floating):
+ assert not np.isnan(value).any(), (key, value)
+ data = internal.device_put(data, self.train_sharded)
+ with self.n_batches.lock:
+ counter = self.n_batches.value
+ self.n_batches.value += 1
+ seed = self._seeds(counter, self.train_mirrored)
+ return {**data, 'seed': seed}
+ return embodied.streams.Prefetch(st, fn)
+
+ @elements.timer.section('jaxagent_save')
+ def save(self):
+ with self.train_lock:
+ params = {}
+ for keys, gather_fn, _ in self._ckpt_groups:
+ group = {k: self.params[k] for k in keys}
+ params.update(jax.device_get(gather_fn(group)))
+ assert params
+ counters = {
+ 'updates': int(self.n_updates),
+ 'batches': int(self.n_batches),
+ 'actions': int(self.n_actions),
+ }
+ data = {'params': params, 'counters': counters}
+ return data
+
+ @elements.timer.section('jaxagent_load')
+ def load(self, data, regex=None):
+ params = data['params']
+ assert params
+
+ with contextlib.ExitStack() as stack:
+ stack.enter_context(self.train_lock)
+ stack.enter_context(self.policy_lock)
+
+ with self.n_updates.lock:
+ self.n_updates.value = int(data['counters']['updates'])
+ with self.n_batches.lock:
+ # We restore n_batches to the checkpointed update counter, so the
+ # prefetched batches that were not trained on get repeated.
+ self.n_batches.value = int(data['counters']['updates'])
+ with self.n_actions.lock:
+ self.n_actions.value = int(data['counters']['actions'])
+
+ if regex:
+ params = {k: v for k, v in params.items() if re.match(regex, k)}
+ keys = params.keys()
+ jax.tree.map(lambda x: x.delete(), [self.params[k] for k in keys])
+ params = internal.ckpt_fn({k: self.params[k] for k in keys})[1](
+ internal.device_put(params, self.train_mirrored))
+ print('Loaded pretrained checkpoint with keys:', list(params.keys()))
+ self.params.update(params)
+ else:
+ chex.assert_trees_all_equal_shapes(self.params, params)
+ jax.tree.map(lambda x: x.delete(), self.params)
+
+ loaded = {}
+ for keys, _, shard_fn in self._ckpt_groups:
+ group = {k: params[k] for k in keys}
+ group = shard_fn(internal.device_put(group, self.train_mirrored))
+ loaded.update(group)
+ self.params = loaded
+
+ if self.jaxcfg.enable_policy:
+ jax.tree.map(lambda x: x.delete(), self.policy_params)
+ policy_params = {
+ k: self.params[k].copy() for k in self.policy_keys}
+ self.policy_params = internal.move(
+ policy_params, self.policy_params_sharding)
+
+ def _take_outs(self, outs):
+ outs = jax.tree.map(lambda x: x.__array__(), outs)
+ outs = jax.tree.map(
+ lambda x: np.float32(x) if x.dtype == jnp.bfloat16 else x, outs)
+ return outs
+
+ def _seeds(self, counter, sharding):
+ rng = np.random.default_rng(seed=[self.config.seed, int(counter)])
+ seeds = rng.integers(0, np.iinfo(np.uint32).max, (2,), np.uint32)
+ return internal.device_put(seeds, sharding)
+
+ def _init_params(self):
+ B = min(self.config.batch_size, len(self.jaxcfg.train_devices))
+ GB = B * jax.process_count()
+ T = self.config.batch_length
+ C = self.config.replay_context
+ tm, ts = self.train_mirrored, self.train_sharded
+ us = self.jaxcfg.use_shardmap
+
+ with jax._src.config.explicit_device_get_scope():
+ seed = jax.device_put(np.array([self.config.seed, 0], np.uint32), tm)
+ data = internal.device_put(self._zeros(self.spaces, (B, T + C)), ts)
+ pr, ar = self.partition_rules
+
+ params, params_sharding = transform.init(
+ self.model.init_train, self.train_mesh,
+ ({}, self.train_mirrored),
+ param_partition_rules=pr,
+ act_partition_rules=ar,
+ static_argnums=(2,),
+ dummy_inputs=({}, seed, GB),
+ print_partition=(len(pr) >= 2),
+ )
+ carry = transform.apply(
+ nj.pure(self.model.init_train), self.train_mesh,
+ (params_sharding, tm), (ts,), single_output=True,
+ static_argnums=(2,), use_shardmap=us)(
+ params, seed, GB // self.train_mesh.size if us else GB)
+ params, params_sharding = transform.init(
+ self.model.train, self.train_mesh,
+ (params_sharding, tm, ts, ts),
+ param_partition_rules=pr,
+ act_partition_rules=ar,
+ dummy_inputs=(params, seed, carry, data),
+ print_partition=(len(pr) >= 2),
+ )
+ return params, params_sharding
+
+ def _compile_train(self):
+ B = self.config.batch_size
+ T = self.config.batch_length
+ C = self.config.replay_context
+ data = self._zeros(self.spaces, (B, T + C))
+ data = internal.device_put(data, self.train_sharded)
+ seed = self._seeds(0, self.train_mirrored)
+ carry = self.init_train(B)
+ allo = {k: v for k, v in self.params.items() if k in self.policy_keys}
+ dona = {k: v for k, v in self.params.items() if k not in self.policy_keys}
+ self._train = self._train.lower(dona, allo, seed, carry, data).compile()
+
+ def _compile_report(self):
+ B = self.config.batch_size
+ T = self.config.report_length
+ C = self.config.replay_context
+ data = self._zeros(self.spaces, (B, T + C))
+ data = internal.device_put(data, self.train_sharded)
+ seed = self._seeds(0, self.train_mirrored)
+ carry = self.init_report(B)
+ self._report = self._report.lower(self.params, seed, carry, data).compile()
+
+ def _summary(self):
+ lines = []
+ for k, v in self.params.items():
+ lines.append(f'{k:<40} {v.dtype} {v.size} {v.shape}')
+ return '\n'.join(lines)
+
+ def _zeros(self, spaces, batch_shape):
+ data = {k: np.zeros(v.shape, v.dtype) for k, v in spaces.items()}
+ for dim in reversed(batch_shape):
+ data = {k: np.repeat(v[None], dim, axis=0) for k, v in data.items()}
+ return data
+
+ def _format_jit_stats(self, compiled):
+ try:
+ cost = compiled.cost_analysis()
+ mem = compiled.memory_analysis()
+ lines = []
+ lines.append(f"FLOPS: {cost[0]['flops']:.1e}")
+ lines.append(f"Memory (temp): {mem.temp_size_in_bytes:.1e}")
+ lines.append(f"Memory (inputs): {mem.argument_size_in_bytes:.1e}")
+ lines.append(f"Memory (outputs): {mem.output_size_in_bytes:.1e}")
+ lines.append(f"Memory (code): {mem.generated_code_size_in_bytes:.1e}")
+ return ''.join(f' {line}\n' for line in lines)
+ except (TypeError, AttributeError, KeyError):
+ return 'No available'
+
+def init(fun, **jit_kwargs):
+ if not getattr(fun, '_is_pure', False):
+ fun = nj.pure(fun)
+ def wrapper(*args, **kwargs):
+ state, out = fun(*args, create=True, modify=True, ignore=True, **kwargs)
+ del out
+ return state, ()
+ return wrapper
diff --git a/models/embodied/jax/heads.py b/models/embodied/jax/heads.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec887d31ad5f86481b089c66e9096bb23faddaf2
--- /dev/null
+++ b/models/embodied/jax/heads.py
@@ -0,0 +1,162 @@
+from typing import Callable
+
+import elements
+import jax
+import jax.numpy as jnp
+import ninjax as nj
+import numpy as np
+
+from . import nets
+from . import outs
+
+i32 = jnp.int32
+f32 = jnp.float32
+
+
+class MLPHead(nj.Module):
+
+ units: int = 1024
+ layers: int = 5
+ act: str = 'silu'
+ norm: str = 'rms'
+ bias: bool = True
+ winit: str | Callable = nets.Initializer('trunc_normal')
+ binit: str | Callable = nets.Initializer('zeros')
+
+ def __init__(self, space, output, **hkw):
+ shared = dict(bias=self.bias, winit=self.winit, binit=self.binit)
+ mkw = dict(**shared, act=self.act, norm=self.norm)
+ hkw = dict(**shared, **hkw)
+ self.mlp = nets.MLP(self.layers, self.units, **mkw, name='mlp')
+ if isinstance(space, dict):
+ self.head = DictHead(space, output, **hkw, name='head')
+ else:
+ self.head = Head(space, output, **hkw, name='head')
+
+ def __call__(self, x, bdims):
+ bshape = jax.tree.leaves(x)[0].shape[:bdims]
+ x = x.reshape((*bshape, -1))
+ x = self.mlp(x)
+ x = self.head(x)
+ return x
+
+
+class DictHead(nj.Module):
+
+ def __init__(self, spaces, outputs, **kw):
+ assert spaces, spaces
+ if not isinstance(spaces, dict):
+ spaces = {'output': spaces}
+ if not isinstance(outputs, dict):
+ outputs = {'output': outputs}
+ assert spaces.keys() == outputs.keys(), (spaces, outputs)
+ self.spaces = spaces
+ self.outputs = outputs
+ self.kw = kw
+
+ def __call__(self, x):
+ outputs = {}
+ for key, impl in self.outputs.items():
+ space = self.spaces[key]
+ outputs[key] = self.sub(key, Head, space, impl, **self.kw)(x)
+ return outputs
+
+
+class Head(nj.Module):
+
+ minstd: float = 1.0
+ maxstd: float = 1.0
+ unimix: float = 0.0
+ bins: int = 255
+ outscale: float = 1.0
+
+ def __init__(self, space, output, **kw):
+ if isinstance(space, tuple):
+ space = elements.Space(np.float32, space)
+ if output == 'onehot':
+ classes = np.asarray(space.classes).flatten()
+ assert (classes == classes[0]).all(), classes
+ shape = (*space.shape, classes[0].item())
+ space = elements.Space(f32, shape, 0.0, 1.0)
+ self.space = space
+ self.impl = output
+ self.kw = {**kw, 'outscale': self.outscale}
+
+ def __call__(self, x):
+ if not hasattr(self, self.impl):
+ raise NotImplementedError(self.impl)
+ x = nets.ensure_dtypes(x)
+ output = getattr(self, self.impl)(x)
+ if self.space.shape:
+ output = outs.Agg(output, len(self.space.shape), jnp.sum)
+ assert output.pred().shape[x.ndim - 1:] == self.space.shape, (
+ self.space, self.impl, x.shape, output.pred().shape)
+ return output
+
+ def binary(self, x):
+ assert np.all(self.space.classes == 2), self.space
+ logit = self.sub('logit', nets.Linear, self.space.shape, **self.kw)(x)
+ return outs.Binary(logit)
+
+ def categorical(self, x):
+ assert self.space.discrete
+ classes = np.asarray(self.space.classes).flatten()
+ assert (classes == classes[0]).all(), classes
+ shape = (*self.space.shape, classes[0].item())
+ logits = self.sub('logits', nets.Linear, shape, **self.kw)(x)
+ output = outs.Categorical(logits)
+ output.minent = 0
+ output.maxent = np.log(logits.shape[-1])
+ return output
+
+ def onehot(self, x):
+ assert not self.space.discrete
+ logits = self.sub('logits', nets.Linear, self.space.shape, **self.kw)(x)
+ return outs.OneHot(logits, self.unimix)
+
+ def mse(self, x):
+ assert not self.space.discrete
+ pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
+ return outs.MSE(pred)
+
+ def huber(self, x):
+ assert not self.space.discrete
+ pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
+ return outs.Huber(pred)
+
+ def symlog_mse(self, x):
+ assert not self.space.discrete
+ pred = self.sub('pred', nets.Linear, self.space.shape, **self.kw)(x)
+ return outs.MSE(pred, nets.symlog)
+
+ def symexp_twohot(self, x):
+ assert not self.space.discrete
+ shape = (*self.space.shape, self.bins)
+ logits = self.sub('logits', nets.Linear, shape, **self.kw)(x)
+ if self.bins % 2 == 1:
+ half = jnp.linspace(-20, 0, (self.bins - 1) // 2 + 1, dtype=f32)
+ half = nets.symexp(half)
+ bins = jnp.concatenate([half, -half[:-1][::-1]], 0)
+ else:
+ half = jnp.linspace(-20, 0, self.bins // 2, dtype=f32)
+ half = nets.symexp(half)
+ bins = jnp.concatenate([half, -half[::-1]], 0)
+ return outs.TwoHot(logits, bins)
+
+ def bounded_normal(self, x):
+ assert not self.space.discrete
+ mean = self.sub('mean', nets.Linear, self.space.shape, **self.kw)(x)
+ stddev = self.sub('stddev', nets.Linear, self.space.shape, **self.kw)(x)
+ lo, hi = self.minstd, self.maxstd
+ stddev = (hi - lo) * jax.nn.sigmoid(stddev + 2.0) + lo
+ output = outs.Normal(jnp.tanh(mean), stddev)
+ output.minent = outs.Normal(jnp.zeros_like(mean), self.minstd).entropy()
+ output.maxent = outs.Normal(jnp.zeros_like(mean), self.maxstd).entropy()
+ return output
+
+ def normal_logstd(self, x):
+ assert not self.space.discrete
+ mean = self.sub('mean', nets.Linear, self.space.shape, **self.kw)(x)
+ stddev = self.sub('stddev', nets.Linear, self.space.shape, **self.kw)(x)
+ output = outs.Normal(mean, jnp.exp(stddev))
+ return output
diff --git a/models/embodied/jax/internal.py b/models/embodied/jax/internal.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae9047b4061f0696b4c24a051865a191c7fdc654
--- /dev/null
+++ b/models/embodied/jax/internal.py
@@ -0,0 +1,304 @@
+import concurrent.futures
+import math
+import os
+import string
+
+import elements
+import jax
+import jax.numpy as jnp
+import numpy as np
+from jax.sharding import PartitionSpec as P
+
+from . import nets
+
+
+def setup(
+ platform=None,
+ compute_dtype=jnp.bfloat16,
+ debug=False,
+ jit=True,
+ prealloc=False,
+ mock_devices=0,
+ transfer_guard=True,
+ deterministic=True,
+ autotune=1,
+ gpuflags=True,
+ tpuflags=False,
+ xladump=None,
+ debug_nans=False,
+ process_id=-1,
+ num_processes=1,
+ coordinator_address=None,
+ compilation_cache=True,
+):
+ platform and jax.config.update('jax_platforms', platform)
+ jax.config.update('jax_disable_most_optimizations', debug)
+ jax.config.update('jax_disable_jit', not jit)
+ if transfer_guard and jit and not debug_nans:
+ jax.config.update('jax_transfer_guard', 'disallow')
+ os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = str(bool(prealloc)).lower()
+ jax.config.update('jax_debug_nans', debug_nans)
+ jax.config.update('jax_enable_compilation_cache', compilation_cache)
+
+ xlaflags = []
+ xlaflags.append(f'--xla_gpu_autotune_level={autotune}')
+ if deterministic:
+ os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
+ xlaflags.append('--xla_gpu_deterministic_ops=true')
+ if mock_devices:
+ xlaflags.append(f'--xla_force_host_platform_device_count={mock_devices}')
+ if xladump:
+ elements.Path(xladump).mkdir()
+ xlaflags.append(f'--xla_dump_to={xladump}')
+ xlaflags.append('--xla_dump_hlo_as_long_text')
+ if gpuflags and platform == 'gpu':
+ # xla_flags.append('--xla_gpu_enable_latency_hiding_scheduler=true')
+ # xla_flags.append('--xla_gpu_enable_async_all_gather=true')
+ # xla_flags.append('--xla_gpu_enable_async_reduce_scatter=true')
+ # xla_flags.append('--xla_gpu_enable_triton_gemm=false')
+ # os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
+ # os.environ['NCCL_IB_SL'] = '1'
+ # os.environ['NCCL_NVLS_ENABLE'] = '0'
+ # os.environ['CUDA_MODULE_LOADING'] = 'EAGER'
+ xlaflags += [
+ '--xla_disable_hlo_passes=rematerialization',
+ '--xla_gpu_all_gather_combine_threshold_bytes=134217728',
+ '--xla_gpu_all_reduce_combine_threshold_bytes=134217728',
+ '--xla_gpu_enable_all_gather_combine_by_dim=false',
+ '--xla_gpu_enable_highest_priority_async_stream=true',
+ '--xla_gpu_enable_latency_hiding_scheduler=true',
+ '--xla_gpu_enable_pipelined_all_gather=true',
+ '--xla_gpu_enable_pipelined_all_reduce=true',
+ '--xla_gpu_enable_pipelined_reduce_scatter=true',
+ '--xla_gpu_enable_reduce_scatter_combine_by_dim=false',
+ '--xla_gpu_enable_triton_gemm=false',
+ '--xla_gpu_enable_triton_softmax_fusion=false',
+ '--xla_gpu_enable_while_loop_double_buffering=true',
+ '--xla_gpu_graph_level=0',
+ '--xla_gpu_reduce_scatter_combine_threshold_bytes=67108864',
+ ]
+ if tpuflags and platform == 'tpu':
+ xlaflags += [
+ '--xla_disable_hlo_passes=rematerialization',
+ '--xla_tpu_megacore_fusion_allow_ags=false',
+ '--xla_enable_async_collective_permute=true',
+ '--xla_tpu_enable_ag_backward_pipelining=true',
+ '--xla_tpu_enable_data_parallel_all_reduce_opt=true',
+ '--xla_tpu_data_parallel_opt_different_sized_ops=true',
+ '--xla_tpu_enable_async_collective_fusion=true',
+ '--xla_tpu_enable_async_collective_fusion_multiple_steps=true',
+ '--xla_tpu_overlap_compute_collective_tc=true',
+ '--xla_enable_async_all_gather=true',
+ ]
+ if xlaflags:
+ os.environ['XLA_FLAGS'] = ' '.join(xlaflags)
+
+ if num_processes > 1 and platform != 'tpu':
+ # Note that the process_id is unrelated to the jax.process_index() that JAX
+ # will assign later. It is only used to establish initial communication and
+ # for error handling, whereas jax.process_index() depends on the underlying
+ # hardware mesh.
+ assert process_id >= 0
+ assert coordinator_address
+ jax.distributed.initialize(coordinator_address, num_processes, process_id)
+ index, count = jax.process_index(), jax.process_count()
+ print(f'JAX multi-host initialized: ({process_id}) {index} / {count}')
+
+ if isinstance(compute_dtype, str):
+ compute_dtype = getattr(jnp, compute_dtype)
+ nets.COMPUTE_DTYPE = compute_dtype
+
+
+def get_named_axes():
+ axes = []
+ for x in string.ascii_lowercase:
+ try:
+ jax.lax.axis_index(x)
+ except NameError:
+ continue
+ axes.append(x)
+ return axes
+
+
+def get_data_axes():
+ axes = ('d', 'f')
+ for x in axes:
+ try:
+ jax.lax.axis_index(x)
+ except NameError:
+ return ()
+ return axes
+
+
+def fetch_async(value):
+ if is_multihost():
+ value = to_local(value)
+ with jax._src.config.explicit_device_get_scope():
+ [x.copy_to_host_async() for x in jax.tree.leaves(value)]
+ return value
+
+
+def is_multihost():
+ return jax.process_count() > 1
+
+
+def device_put(value, sharding):
+ if is_multihost():
+ with jax._src.config.explicit_device_put_scope():
+ value = jax.tree.map(
+ lambda x: jax.make_array_from_process_local_data(sharding, x), value)
+ else:
+ value = jax.device_put(value, sharding)
+ return value
+
+
+def local_sharding(sharding):
+ return jax.tree.map(lambda s: jax.sharding.NamedSharding(
+ s.mesh.local_mesh, s.spec), sharding)
+
+
+def to_local(x):
+ return jax.tree.map(_to_local, x)
+
+
+def _to_local(x):
+ shape, sharding = x.shape, x.sharding
+ spec, mesh = sharding.spec, sharding.mesh
+ fullspec = [*spec, *([None] * (len(shape) - len(spec)))]
+ assert len(shape) == len(fullspec)
+ shard_shape = []
+ for d, s in zip(shape, fullspec):
+ if s is None:
+ ms, lms = 1, 1
+ else:
+ if not isinstance(s, tuple):
+ s = (s,)
+ ms = math.prod(mesh.shape[si] for si in s)
+ lms = math.prod(mesh.local_mesh.shape[si] for si in s)
+ shard_shape.append(d // ms * lms)
+ shard_shape = tuple(shard_shape)
+ arrs = [arr.data for arr in x.addressable_shards]
+ sharding_local = jax.sharding.NamedSharding(mesh.local_mesh, spec)
+ x = jax.make_array_from_single_device_arrays(
+ shard_shape, sharding_local, arrs)
+ return x
+
+
+def to_global(x, global_sharding):
+ if isinstance(global_sharding, jax.sharding.NamedSharding):
+ return jax.tree.map(lambda xi: _to_global(xi, global_sharding), x)
+ else:
+ return jax.tree.map(lambda xi, gs: _to_global(xi, gs), x, global_sharding)
+
+
+def _to_global(x, global_sharding):
+ shape, sharding = x.shape, x.sharding
+ spec = sharding.spec
+ fullspec = [*spec, *([None] * (len(shape) - len(spec)))]
+ assert len(shape) == len(fullspec)
+ shard_shape = []
+ for d, s in zip(shape, fullspec):
+ if s is None:
+ ms, lms = 1, 1
+ else:
+ if not isinstance(s, tuple):
+ s = (s,)
+ ms = math.prod(global_sharding.mesh.shape[si] for si in s)
+ lms = math.prod(sharding.mesh.shape[si] for si in s)
+ shard_shape.append(d // lms * ms)
+ shard_shape = tuple(shard_shape)
+ arrs = [arr.data for arr in x.addressable_shards]
+ x = jax.make_array_from_single_device_arrays(
+ shard_shape, global_sharding, arrs)
+ return x
+
+
+def move(xs, dst_sharding):
+ if is_multihost():
+ xs = to_local(xs)
+ xs = jax.device_put(xs, local_sharding(dst_sharding))
+ xs = to_global(xs, dst_sharding)
+ else:
+ xs = jax.device_put(xs, dst_sharding)
+ return xs
+
+
+def mesh(devices, shape, names):
+ shape = list(map(int, shape.split(',')))
+ # At most a single -1 is allowed
+ assert sum(i == -1 for i in shape) <= 1
+ n = len(devices)
+ prod = math.prod(i for i in shape if i != -1)
+ assert n % prod == 0
+ shape = [i if i != -1 else n // prod for i in shape]
+ assert math.prod(shape) == n
+ devices = np.array(devices).reshape(shape)
+ return jax.sharding.Mesh(devices, names)
+
+
+def grouped_ckpt_fns(params, chunksize):
+ if chunksize <= 0:
+ groups = [list(params.keys())]
+ else:
+ groups = []
+ keys, size = [], 0
+ for k, v in params.items():
+ if size + v.nbytes <= chunksize:
+ keys.append(k)
+ size += v.nbytes
+ else:
+ groups.append(keys)
+ keys, size = [k], v.nbytes
+ keys and groups.append(keys)
+ assert sum(len(keys) for keys in groups) == len(params)
+ assert all(len(keys) for keys in groups)
+ msg = f'Compiling {len(groups)} checkpoint groups...'
+ elements.print(msg, color='yellow')
+ maxsize = max(sum(params[k].nbytes for k in g) for g in groups)
+ print(f'Largest checkpoint group: {maxsize / (1024 ** 3):.0f} GB')
+
+ gather_fns, shard_fns = [], []
+ with concurrent.futures.ThreadPoolExecutor(64) as pool:
+ for keys in groups:
+ gather_fn, shard_fn = ckpt_fn(
+ {k: params[k] for k in keys}, compile=False)
+ gather_fns.append(pool.submit(gather_fn.compile))
+ shard_fns.append(pool.submit(shard_fn.compile))
+ gather_fns = [future.result() for future in gather_fns]
+ shard_fns = [future.result() for future in shard_fns]
+
+ return list(zip(groups, gather_fns, shard_fns))
+
+
+def ckpt_fn(params, compile=True):
+ mesh = params[list(params.keys())[0]].sharding.mesh
+ mirrored = jax.sharding.NamedSharding(mesh, P())
+ struct = lambda x, s: jax.ShapeDtypeStruct(x.shape, x.dtype, sharding=s)
+ keys = params.keys()
+ original = {k: params[k].sharding for k in keys}
+ inspec = {k: struct(params[k], original[k]) for k in keys}
+ gather_fn = jax.jit(lambda x: x, (original,), mirrored).lower(inspec)
+ inspec = {k: struct(params[k], mirrored) for k in keys}
+ shard_fn = jax.jit(lambda x: x, (mirrored,), original).lower(inspec)
+ if compile:
+ gather_fn = gather_fn.compile()
+ shard_fn = shard_fn.compile()
+ return gather_fn, shard_fn
+
+
+# def node_mesh(mesh, mp_dims=('t',)):
+# n_mp = math.prod(mesh.shape[d] for d in mp_dims)
+# n_local = mesh.local_mesh.size
+# n_mp_nodes = max(1, n_mp // n_local)
+# total_nodes = mesh.size // n_local
+# n_data_nodes = total_nodes // n_mp_nodes
+# assert n_data_nodes * n_mp_nodes == total_nodes
+# data_node_rank, model_node_rank = divmod(jax.process_index(), n_mp_nodes)
+# data_node_size, model_node_size = n_data_nodes, n_mp_nodes
+# return {
+# 'data_node_rank': data_node_rank,
+# 'data_node_size': data_node_size,
+# 'model_node_rank': model_node_rank,
+# 'model_node_size': model_node_size,
+# }
+
diff --git a/models/embodied/jax/nets.py b/models/embodied/jax/nets.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfe7c23705bb6e485503fdc4571c85b46833a017
--- /dev/null
+++ b/models/embodied/jax/nets.py
@@ -0,0 +1,670 @@
+import functools
+import math
+from typing import Callable
+
+import einops
+import jax
+import jax.ad_checkpoint as adc
+import jax.numpy as jnp
+import ninjax as nj
+import numpy as np
+
+COMPUTE_DTYPE = jnp.bfloat16
+LAYER_CALLBACK = lambda tensor, name: tensor
+
+f32 = jnp.float32
+
+
+def cast(xs, force=False):
+ if force:
+ should = lambda x: True
+ else:
+ should = lambda x: jnp.issubdtype(x.dtype, jnp.floating)
+ return jax.tree.map(lambda x: COMPUTE_DTYPE(x) if should(x) else x, xs)
+
+
+def act(name):
+ if name == 'none':
+ return lambda x: x
+ elif name == 'mish':
+ return lambda x: x * jnp.tanh(jax.nn.softplus(x))
+ elif name == 'relu2':
+ return lambda x: jnp.square(jax.nn.relu(x))
+ elif name == 'swiglu':
+ def fn(x):
+ x, y = jnp.split(x, 2, -1)
+ return jax.nn.silu(x) * y
+ return fn
+ else:
+ return getattr(jax.nn, name)
+
+
+def init(name):
+ if callable(name):
+ return name
+ elif name.endswith(('_in', '_out', '_avg')):
+ dist, fan = name.rsplit('_', 1)
+ else:
+ dist, fan = name, 'in'
+ return Initializer(dist, fan, 1.0)
+
+
+def dropout(x, prob, training):
+ if not prob or not training:
+ return x
+ keep = jax.random.bernoulli(nj.seed(), 1.0 - prob, x.shape)
+ return x * keep / (1.0 - prob)
+
+
+def symlog(x):
+ return jnp.sign(x) * jnp.log1p(jnp.abs(x))
+
+
+def symexp(x):
+ return jnp.sign(x) * jnp.expm1(jnp.abs(x))
+
+
+def where(condition, xs, ys):
+ assert condition.dtype == bool, condition.dtype
+ def fn(x, y):
+ assert x.shape == y.shape, (x.shape, y.shape)
+ expanded = jnp.expand_dims(condition, list(range(condition.ndim, x.ndim)))
+ return jnp.where(expanded, x, y)
+ return jax.tree.map(fn, xs, ys)
+
+
+def mask(xs, mask):
+ return where(mask, xs, jax.tree.map(jnp.zeros_like, xs))
+
+
+def available(*trees, bdims=None):
+ def fn(*xs):
+ masks = []
+ for x in xs:
+ if jnp.issubdtype(x.dtype, jnp.floating):
+ mask = (x != -jnp.inf)
+ elif jnp.issubdtype(x.dtype, jnp.signedinteger):
+ mask = (x != -1)
+ elif (
+ jnp.issubdtype(x.dtype, jnp.unsignedinteger) or
+ jnp.issubdtype(x.dtype, bool)):
+ shape = x.shape if bdims is None else x.shape[:bdims]
+ mask = jnp.full(shape, True, bool)
+ else:
+ raise NotImplementedError(x.dtype)
+ if bdims is not None:
+ mask = mask.all(tuple(range(bdims, mask.ndim)))
+ masks.append(mask)
+ return jnp.stack(masks, 0).all(0)
+ return jax.tree.map(fn, *trees)
+
+
+@functools.partial(jax.custom_vjp, nondiff_argnums=[1, 2])
+def ensure_dtypes(x, fwd=None, bwd=None):
+ fwd = fwd or COMPUTE_DTYPE
+ bwd = bwd or COMPUTE_DTYPE
+ assert x.dtype == fwd, (x.dtype, fwd)
+ return x
+def ensure_dtypes_fwd(x, fwd=None, bwd=None):
+ fwd = fwd or COMPUTE_DTYPE
+ bwd = bwd or COMPUTE_DTYPE
+ return ensure_dtypes(x, fwd, bwd), ()
+def ensure_dtypes_bwd(fwd, bwd, cache, dx):
+ fwd = fwd or COMPUTE_DTYPE
+ bwd = bwd or COMPUTE_DTYPE
+ assert dx.dtype == bwd, (dx.dtype, bwd)
+ return (dx,)
+ensure_dtypes.defvjp(ensure_dtypes_fwd, ensure_dtypes_bwd)
+
+
+def rms(xs):
+ xs = jax.tree.leaves(xs)
+ count = sum(x.size for x in xs)
+ sumsq = jnp.stack([f32(jnp.square(x).sum()) for x in xs]).sum()
+ return jnp.sqrt(sumsq / f32(count))
+
+
+def rope(x, ts=None, inverse=False, maxlen=4096):
+ B, T, _, D = x.shape
+ if ts is None:
+ ts = jnp.ones(B, jnp.int32)[:, None] * jnp.arange(T)[None, :] # [B, T]
+ assert ts.shape == (B, T), (ts.shape, (B, T))
+ if inverse:
+ ts = -ts
+ freq_exponents = (2.0 / D) * jnp.arange(D // 2) # [D/2]
+ timescale = maxlen ** freq_exponents
+ radians = ts[:, :, None] / timescale[None, None, :] # [B, T, D/2]
+ radians = radians[..., None, :].astype(x.dtype) # [B, T, 1, D/2]
+ sin, cos = jnp.sin(radians), jnp.cos(radians)
+ x1, x2 = jnp.split(x, 2, axis=-1) # [B, T, H, D/2]
+ res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1)
+ return res
+
+
+class Initializer:
+
+ def __init__(self, dist='trunc_normal', fan='in', scale=1.0):
+ self.dist = dist
+ self.fan = fan
+ self.scale = scale
+
+ def __call__(self, shape, dtype=jnp.float32, fshape=None):
+ shape = (shape,) if isinstance(shape, int) else tuple(shape)
+ assert all(isinstance(x, int) for x in shape), (
+ shape, [type(x) for x in shape])
+ assert all(x > 0 for x in shape), shape
+ fanin, fanout = self.compute_fans(shape if fshape is None else fshape)
+ fan = {
+ 'avg': (fanin + fanout) / 2, 'in': fanin, 'out': fanout, 'none': 1,
+ }[self.fan]
+ if self.dist == 'zeros':
+ x = jnp.zeros(shape, dtype)
+ elif self.dist == 'uniform':
+ limit = np.sqrt(1 / fan)
+ x = jax.random.uniform(nj.seed(), shape, dtype, -limit, limit)
+ elif self.dist == 'normal':
+ x = jax.random.normal(nj.seed(), shape)
+ x *= np.sqrt(1 / fan)
+ elif self.dist == 'trunc_normal':
+ x = jax.random.truncated_normal(nj.seed(), -2, 2, shape)
+ x *= 1.1368 * np.sqrt(1 / fan)
+ elif self.dist == 'normed':
+ x = jax.random.uniform(nj.seed(), shape, dtype, -1, 1)
+ x *= (1 / jnp.linalg.norm(x.reshape((-1, shape[-1])), 2, 0))
+ else:
+ raise NotImplementedError(self.dist)
+ x *= self.scale
+ x = x.astype(dtype)
+ return x
+
+ def __repr__(self):
+ return f'Initializer({self.dist}, {self.fan}, {self.scale})'
+
+ def __eq__(self, other):
+ attributes = ('dist', 'fan', 'scale')
+ return all(getattr(self, k) == getattr(other, k) for k in attributes)
+
+ @staticmethod
+ def compute_fans(shape):
+ if len(shape) == 0:
+ return (1, 1)
+ elif len(shape) == 1:
+ return (1, shape[0])
+ elif len(shape) == 2:
+ return shape
+ else:
+ space = math.prod(shape[:-2])
+ return (shape[-2] * space, shape[-1] * space)
+
+
+class Embed(nj.Module):
+
+ einit: str | Callable = Initializer('trunc_normal', 'out')
+ combine: bool = False
+
+ def __init__(self, classes, units, shape=()):
+ self.classes = classes
+ self.units = units
+ self.shape = shape
+
+ def __call__(self, x):
+ batch_shape = x.shape[:x.ndim - len(self.shape)]
+ event_shape = x.shape[x.ndim - len(self.shape):]
+ assert event_shape == self.shape, (self.shape, x.shape)
+ N = math.prod(self.shape)
+ K = self.classes
+ D = self.units
+ shape = (*self.shape, self.classes, self.units)
+ table = self.value('table', init(self.einit), shape)
+ table = table.reshape(N, K, D)
+ table = table.astype(COMPUTE_DTYPE)
+ index = x.reshape(-1, N)
+ embed = table[jnp.arange(N), index]
+ if self.combine:
+ embed = embed.sum(-2).reshape(*batch_shape, self.units)
+ else:
+ embed = embed.reshape(*batch_shape, *self.shape, self.units)
+ return embed
+
+
+class Linear(nj.Module):
+
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ outscale: float = 1.0
+
+ def __init__(self, units):
+ self.units = (units,) if isinstance(units, int) else tuple(units)
+
+ def __call__(self, x):
+ ensure_dtypes(x)
+ size = math.prod(self.units)
+ shape = (x.shape[-1], size)
+ x = x @ self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
+ if self.bias:
+ x += self.value('bias', init(self.binit), size).astype(x.dtype)
+ x = x.reshape((*x.shape[:-1], *self.units))
+ return x
+
+ def _scaled_winit(self, *args, **kwargs):
+ return init(self.winit)(*args, **kwargs) * self.outscale
+
+
+class BlockLinear(nj.Module):
+
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ outscale: float = 1.0
+
+ def __init__(self, units, blocks):
+ assert isinstance(units, int), (units, type(units))
+ assert blocks <= units and units % blocks == 0, (blocks, units)
+ self.units = units
+ self.blocks = blocks
+
+ def __call__(self, x):
+ ensure_dtypes(x)
+ assert x.shape[-1] % self.blocks == 0, (x.shape, self.blocks)
+ insize = x.shape[-1]
+ shape = (self.blocks, insize // self.blocks, self.units // self.blocks)
+ kernel = self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
+ x = x.reshape((*x.shape[:-1], self.blocks, insize // self.blocks))
+ x = jnp.einsum('...ki,kio->...ko', x, kernel)
+ x = x.reshape((*x.shape[:-2], self.units))
+ if self.bias:
+ x += self.value('bias', init(self.binit), self.units).astype(x.dtype)
+ return x
+
+ def _scaled_winit(self, *args, **kwargs):
+ return init(self.winit)(*args, **kwargs) * self.outscale
+
+
+class Conv2D(nj.Module):
+
+ transp: bool = False
+ groups: int = 1
+ pad: str = 'same'
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ outscale: float = 1.0
+
+ def __init__(self, depth, kernel, stride=1):
+ self.depth = depth
+ self.kernel = (kernel,) * 2 if isinstance(kernel, int) else kernel
+ self.stride = stride
+
+ def __call__(self, x):
+ ensure_dtypes(x)
+ shape = (*self.kernel, x.shape[-1] // self.groups, self.depth)
+ kernel = self.value('kernel', self._scaled_winit, shape).astype(x.dtype)
+ if self.transp:
+ assert self.pad == 'same', self.pad
+ # Manual implementation of fractionally strided convolution because the
+ # cuDNN implementation used by XLA has bugs and performance issues.
+ x = x.repeat(self.stride, -2).repeat(self.stride, -3)
+ maskh = ((jnp.arange(x.shape[-3]) - 1) % self.stride == 0)[:, None]
+ maskw = ((jnp.arange(x.shape[-2]) - 1) % self.stride == 0)[None, :]
+ x *= (maskh * maskw)[:, :, None]
+ stride = (1, 1)
+ else:
+ stride = (self.stride, self.stride)
+ x = jax.lax.conv_general_dilated(
+ x, kernel, stride, self.pad.upper(),
+ feature_group_count=self.groups,
+ dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
+ if self.bias:
+ x += self.value('bias', init(self.binit), self.depth).astype(x.dtype)
+ return x
+
+ def _scaled_winit(self, *args, **kwargs):
+ return init(self.winit)(*args, **kwargs) * self.outscale
+
+
+class Conv3D(nj.Module):
+
+ transp: bool = False
+ groups: int = 1
+ pad: str = 'same'
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+
+ def __init__(self, depth, kernel, stride=1):
+ self.depth = depth
+ self.kernel = (kernel,) * 3 if isinstance(kernel, int) else kernel
+ self.stride = (stride,) * 3 if isinstance(stride, int) else stride
+
+ def __call__(self, x):
+ ensure_dtypes(x)
+ if self.transp:
+ assert self.groups == 1, self.groups
+ shape = (*self.kernel, x.shape[-1], self.depth)
+ kernel = self.value('kernel', init(self.winit), shape).astype(x.dtype)
+ x = jax.lax.conv_transpose(
+ x, kernel, self.stride, self.pad.upper(),
+ dimension_numbers=('NTHWC', 'THWIO', 'NTHWC'))
+ else:
+ shape = (*self.kernel, x.shape[-1] // self.groups, self.depth)
+ kernel = self.value('kernel', init(self.winit), shape).astype(x.dtype)
+ x = jax.lax.conv_general_dilated(
+ x, kernel, self.stride, self.pad.upper(),
+ feature_group_count=self.groups,
+ dimension_numbers=('NTHWC', 'THWIO', 'NTHWC'))
+ if self.bias:
+ x += self.value('bias', init(self.binit), self.depth).astype(x.dtype)
+ return x
+
+
+class Norm(nj.Module):
+
+ axis: tuple = (-1,)
+ eps: float = 1e-4
+ scale: bool = True
+ shift: bool = True
+
+ def __init__(self, impl):
+ if '1em' in impl:
+ impl, exp = impl.split('1em')
+ self._fields['eps'] = 10 ** -int(exp)
+ self.impl = impl
+
+ def __call__(self, x):
+ ensure_dtypes(x)
+ dtype = x.dtype
+ x = f32(x)
+ axis = [a % x.ndim for a in self.axis]
+ shape = [x.shape[i] if i in axis else 1 for i in range(min(axis), x.ndim)]
+ if self.impl == 'none':
+ pass
+ elif self.impl == 'rms':
+ mean2 = jnp.square(x).mean(axis, keepdims=True)
+ mean2 = adc.checkpoint_name(mean2, 'small')
+ scale = self._scale(shape, x.dtype)
+ x = x * (jax.lax.rsqrt(mean2 + self.eps) * scale)
+ elif self.impl == 'layer':
+ mean = x.mean(axis, keepdims=True)
+ mean2 = jnp.square(x).mean(axis, keepdims=True)
+ mean2 = adc.checkpoint_name(mean2, 'small')
+ var = jnp.maximum(0, mean2 - jnp.square(mean))
+ var = adc.checkpoint_name(var, 'small')
+ scale = self._scale(shape, x.dtype)
+ shift = self._shift(shape, x.dtype)
+ x = (x - mean) * (jax.lax.rsqrt(var + self.eps) * scale) + shift
+ else:
+ raise NotImplementedError(self.impl)
+ x = x.astype(dtype)
+ return x
+
+ def _scale(self, shape, dtype):
+ if not self.scale:
+ return jnp.ones(shape, dtype)
+ return self.value('scale', jnp.ones, shape, f32).astype(dtype)
+
+ def _shift(self, shape, dtype):
+ if not self.shift:
+ return jnp.zeros(shape, dtype)
+ return self.value('shift', jnp.zeros, shape, f32).astype(dtype)
+
+
+class Attention(nj.Module):
+
+ heads: int = 8
+ kv_heads: int = 0
+ dropout: float = 0.0
+ rope: bool = True
+ qknorm: str = 'none'
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ outscale: float = 1.0
+
+ def __call__(self, x, mask=None, ts=None, training=True):
+ kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
+ B, T, D = x.shape
+ kv_heads = self.kv_heads or self.heads
+ assert self.heads % kv_heads == 0
+ head_ratio = self.heads // kv_heads
+ if head_ratio == 1:
+ qkv = self.sub('qkv', Linear, 3 * D, **kw)(x)
+ q, k, v = jnp.split(qkv, 3, -1)
+ else:
+ q = self.sub('q', Linear, D, **kw)(x)
+ k = self.sub('k', Linear, D // head_ratio, **kw)(x)
+ v = self.sub('v', Linear, D // head_ratio, **kw)(x)
+ q = einops.rearrange(q, 'b t (h d) -> b t h d', h=self.heads)
+ k = einops.rearrange(k, 'b t (h d) -> b t h d', h=kv_heads)
+ v = einops.rearrange(v, 'b t (h d) -> b t h d', h=kv_heads)
+
+ if self.qknorm != 'none':
+ q = self.sub('normq', Norm, self.qknorm)(q)
+ k = self.sub('normk', Norm, self.qknorm)(k)
+
+ if self.rope:
+ q = rope(q, ts)
+ k = rope(k, ts)
+
+ q = einops.rearrange(q, 'b t (h g) d -> b t h g d', h=kv_heads)
+ logits = einops.einsum(q, k, 'b tq h g d, b tk h d -> b h g tq tk')
+ logits = logits * (1.0 / np.sqrt(k.shape[-1]))
+ logits = f32(logits)
+ if mask is not None:
+ Tq, Tk = q.shape[1], k.shape[1]
+ assert mask.shape == (B, Tq, Tk), (mask.shape, (B, Tq, Tk))
+ mask = einops.rearrange(mask, 'b tq tk -> b 1 1 tq tk')
+ logits = jnp.where(mask, logits, -1e30)
+ weights = jax.nn.softmax(logits)
+ weights = weights.astype(x.dtype)
+ weights = dropout(weights, self.dropout, training)
+ x = einops.einsum(weights, v, 'b h g tq tk, b tk h d -> b tq h g d')
+ x = einops.rearrange(x, 'b t h g d -> b t (h g d)')
+ x = self.sub('proj', Linear, D, **kw, outscale=self.outscale)(x)
+ return x
+
+
+class DictConcat:
+
+ def __init__(self, spaces, fdims, squish=lambda x: x):
+ assert 1 <= fdims, fdims
+ self.keys = sorted(spaces.keys())
+ self.spaces = spaces
+ self.fdims = fdims
+ self.squish = squish
+
+ def __call__(self, xs):
+ assert all(k in xs for k in self.spaces), (self.spaces, xs.keys())
+ bdims = xs[self.keys[0]].ndim - len(self.spaces[self.keys[0]].shape)
+ ys = []
+ for key in self.keys:
+ space = self.spaces[key]
+ x = xs[key]
+ m = available(x, bdims=bdims)
+ x = mask(x, m)
+ assert x.shape[bdims:] == space.shape, (key, bdims, space.shape, x.shape)
+ if space.dtype == jnp.uint8 and len(space.shape) in (2, 3):
+ raise NotImplementedError('Images are not supported.')
+ elif space.discrete:
+ classes = np.asarray(space.classes).flatten()
+ assert (classes == classes[0]).all(), classes
+ classes = classes[0].item()
+ x = x.astype(jnp.int32)
+ x = jax.nn.one_hot(x, classes, dtype=COMPUTE_DTYPE)
+ else:
+ x = self.squish(x)
+ x = x.astype(COMPUTE_DTYPE)
+ x = mask(x, m)
+ x = x.reshape((*x.shape[:bdims + self.fdims - 1], -1))
+ ys.append(x)
+ return jnp.concatenate(ys, -1)
+
+
+class DictEmbed(nj.Module):
+
+ squish: Callable = lambda x: x
+ padone: bool = True
+ bias: bool = True
+ einit: str | Callable = Initializer('trunc_normal', 'out')
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ impl: str = 'onehot'
+
+ def __init__(self, spaces, units):
+ self.keys = sorted(spaces.keys())
+ self.spaces = spaces
+ self.units = units
+ self.ekw = dict(einit=self.einit)
+ self.lkw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
+
+ def __call__(self, xs, bshape):
+ assert isinstance(bshape, tuple), bshape
+ assert all(k in xs for k in self.spaces), (self.spaces, xs.keys())
+ ys = []
+ init = self.value('init', self.einit, (self.units,))
+ init = jnp.broadcast_to(init, (*bshape, self.units))
+ init = COMPUTE_DTYPE(init)
+ ys.append(init)
+ for key in self.keys:
+ try:
+ space = self.spaces[key]
+ x = xs[key]
+ assert x.dtype == space.dtype, (key, space.dtype, x.dtype, x.shape)
+ m = available(x, bdims=len(bshape))
+ x = mask(x, m)
+ if space.discrete:
+ if space.dtype == jnp.uint8 and len(space.shape) in (2, 3):
+ raise NotImplementedError('Images are not supported.')
+ classes = int(np.asarray(space.classes).max())
+ assert classes <= 256, (key, space, classes)
+ if self.impl == 'lookup':
+ x = self.sub(
+ key, Embed, classes, self.units, space.shape,
+ combine=True, **self.ekw)(x)
+ # x = x.reshape((*x.shape[:len(bshape)], -1))
+ elif self.impl == 'onehot':
+ x = jax.nn.one_hot(x, classes, dtype=COMPUTE_DTYPE)
+ x = x.reshape((*x.shape[:len(bshape)], -1))
+ x = self.sub(key, Linear, self.units, **self.lkw)(x)
+ else:
+ raise NotImplementedError(self.impl)
+ else:
+ x = self.squish(x)
+ x = x.astype(COMPUTE_DTYPE)
+ x = x.reshape((*x.shape[:len(bshape)], -1))
+ x = self.sub(key, Linear, self.units, **self.lkw)(x)
+ x = mask(x, m)
+ ys.append(x)
+ except Exception:
+ print(f"Error encoding key '{key}' with space {space}.")
+ raise
+ x = sum(ys)
+ return x
+
+
+class MLP(nj.Module):
+
+ act: str = 'silu'
+ norm: str = 'rms'
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+
+ def __init__(self, layers=5, units=1024):
+ self.layers = layers
+ self.units = units
+ self.kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
+
+ def __call__(self, x):
+ shape = x.shape[:-1]
+ x = x.astype(COMPUTE_DTYPE)
+ x = x.reshape([-1, x.shape[-1]])
+ for i in range(self.layers):
+ x = self.sub(f'linear{i}', Linear, self.units, **self.kw)(x)
+ x = self.sub(f'norm{i}', Norm, self.norm)(x)
+ x = act(self.act)(x)
+ x = x.reshape((*shape, x.shape[-1]))
+ return x
+
+
+class Transformer(nj.Module):
+
+ units: int = 1024
+ layers: int = 12
+ heads: int = 8
+ ffup: int = 4
+ act: str = 'silu'
+ norm: str = 'rms'
+ glu: bool = False
+ rope: bool = True
+ qknorm: str = 'none'
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ outscale: float = 1.0
+
+ def __call__(self, x, mask=None, ts=None, training=True):
+ kw = {k: getattr(self, k) for k in ('bias', 'winit', 'binit')}
+ ak = {k: getattr(self, k) for k in ('heads', 'rope', 'qknorm', 'outscale')}
+ D = x.shape[-1]
+ assert D == self.units, (D, self.units)
+ for i in range(self.layers):
+ with nj.scope(f'layer{i}'):
+ skip = x
+ x = self.sub('norm1', Norm, self.norm)(x)
+ x = self.sub('mha', Attention, **kw, **ak)(x, mask, ts, training)
+ x += skip
+ skip = x
+ x = self.sub('norm2', Norm, self.norm)(x)
+ if self.glu:
+ U = max(D, int((D * self.ffup * 2 / 3) // 32 * 32))
+ ff1 = self.sub('ff1', Linear, U, **kw)
+ ff2 = self.sub('ff2', Linear, U, **kw)
+ ff3 = self.sub('ff3', Linear, D, **kw, outscale=self.outscale)
+ x = ff3(act(self.act)(ff1(x)) * ff2(x))
+ else:
+ ff1 = self.sub('ff1', Linear, D * self.ffup, **kw)
+ ff2 = self.sub('ff2', Linear, D, **kw, outscale=self.outscale)
+ x = ff2(act(self.act)(ff1(x)))
+ x += skip
+ x = self.sub('outnorm', Norm, self.norm)(x)
+ return x
+
+
+class GRU(nj.Module):
+
+ units: int = 1024
+ bias: bool = True
+ winit: str | Callable = Initializer('trunc_normal')
+ binit: str | Callable = Initializer('zeros')
+ norm: str = 'rms'
+ update_bias: float = -1.0
+
+ def initial(self, batch_size):
+ return jnp.zeros((batch_size, self.units), COMPUTE_DTYPE)
+
+ def __call__(self, carry, inputs, resets, single=False):
+ assert carry.dtype == COMPUTE_DTYPE, carry.dtype
+ assert inputs.dtype == COMPUTE_DTYPE, inputs.dtype
+ assert resets.dtype == bool, resets.dtype
+ if single:
+ return self.step(carry, inputs, resets)
+ carry, outputs = nj.scan(
+ lambda carry, args: self.step(carry, *args),
+ carry, (inputs, resets), axis=1)
+ return carry, outputs
+
+ def step(self, carry, inp, reset):
+ # NOTE: When passing previous actions as input, ensure to zero out past
+ # actions on is_first and clip actions to bounds if needed.
+ kw = dict(bias=self.bias, winit=self.winit, binit=self.binit)
+ carry = mask(carry, ~reset)
+ x = jnp.concatenate([carry, inp], -1)
+ x = self.sub('norm', Norm, self.norm)(x)
+ x = self.sub('linear', Linear, 3 * self.units, **kw)(x)
+ res, cand, update = jnp.split(x, 3, -1)
+ cand = jnp.tanh(jax.nn.sigmoid(res) * cand)
+ update = jax.nn.sigmoid(update + self.update_bias)
+ carry = output = update * cand + (1 - update) * carry
+ return carry, output
+
diff --git a/models/embodied/jax/opt.py b/models/embodied/jax/opt.py
new file mode 100644
index 0000000000000000000000000000000000000000..d11b1465b660bd34fe9497eac1128ee2c4e28749
--- /dev/null
+++ b/models/embodied/jax/opt.py
@@ -0,0 +1,164 @@
+import math
+
+import jax
+import jax.numpy as jnp
+import ninjax as nj
+import optax
+
+from . import internal
+from . import nets
+
+f32 = jnp.float32
+i32 = jnp.int32
+sg = jax.lax.stop_gradient
+
+
+class Optimizer(nj.Module):
+
+ summary_depth: int = 2
+
+ def __init__(self, modules, opt):
+ modules = modules if isinstance(modules, (list, tuple)) else (modules,)
+ self.modules = modules
+ self.opt = opt
+ self.step = nj.Variable(jnp.array, 0, i32, name='step')
+ self.scaling = (nets.COMPUTE_DTYPE == jnp.float16)
+ if self.scaling:
+ self.opt = optax.apply_if_finite(self.opt, max_consecutive_errors=1000)
+ self.grad_scale = nj.Variable(jnp.array, 1e4, f32, name='grad_scale')
+ self.good_steps = nj.Variable(jnp.array, 0, i32, name='good_steps')
+
+ def __call__(self, lossfn, *args, has_aux=False, **kwargs):
+ metrics = {}
+
+ def lossfn2(*args, **kwargs):
+ outs = lossfn(*args, **kwargs)
+ loss, aux = outs if has_aux else (outs, None)
+ assert loss.dtype == f32, (self.name, loss.dtype)
+ assert loss.shape == (), (self.name, loss.shape)
+ if self.scaling:
+ loss *= sg(self.grad_scale.read())
+ return loss, aux
+
+ loss, params, grads, aux = nj.grad(
+ lossfn2, self.modules, has_aux=True)(*args, **kwargs)
+ if self.scaling:
+ loss *= 1 / self.grad_scale.read()
+
+ counts = {k: math.prod(v.shape) for k, v in params.items()}
+ if nj.creating():
+ print(self._summarize_params(counts, self.summary_depth))
+
+ axes = internal.get_data_axes()
+ if axes:
+ grads = jax.tree.map(lambda x: jax.lax.pmean(x, axes), grads)
+
+ if self.scaling:
+ invscale = 1 / self.grad_scale.read()
+ grads = jax.tree.map(lambda x: x * invscale, grads)
+
+ state = self.sub('state', nj.Tree, self.opt.init, params)
+ updates, new_state = self.opt.update(grads, state.read(), params)
+ nj.context().update(optax.apply_updates(params, updates))
+ state.write(new_state)
+ grad_norm = optax.global_norm(grads)
+ if self.scaling:
+ self._update_scale(grads, jnp.isfinite(grad_norm))
+ grad_norm = jnp.where(jnp.isfinite(grad_norm), grad_norm, jnp.nan)
+ self.step.write(self.step.read() + i32(jnp.isfinite(grad_norm)))
+ metrics['grad_scale'] = self.grad_scale.read()
+ metrics['grad_overflow'] = f32(~jnp.isfinite(grad_norm))
+ else:
+ self.step.write(self.step.read() + 1)
+ metrics['loss'] = loss.mean()
+ metrics['updates'] = self.step.read()
+ metrics['grad_norm'] = grad_norm
+ metrics['grad_rms'] = nets.rms(grads)
+ metrics['update_rms'] = nets.rms(updates)
+ metrics['param_rms'] = nets.rms([x.values for x in self.modules])
+ metrics['param_count'] = jnp.array(list(counts.values()), f32).sum()
+ metrics = {f'{self.name}/{k}': v for k, v in metrics.items()}
+ return (metrics, aux) if has_aux else metrics
+
+ def _update_scale(self, grads, finite):
+ keep = (finite & (self.good_steps.read() < 1000))
+ incr = (finite & (self.good_steps.read() >= 1000))
+ decr = ~finite
+ self.good_steps.write(i32(keep) * (self.good_steps.read() + 1))
+ self.grad_scale.write(jnp.clip(
+ f32(keep) * self.grad_scale.read() +
+ f32(incr) * self.grad_scale.read() * 2 +
+ f32(decr) * self.grad_scale.read() / 2, 1e-4, 1e5))
+ return finite
+
+ def _summarize_params(self, counts, depth):
+ lines = []
+ pfxs = []
+ for key in counts:
+ parts = key.split('/')
+ pfxs += ['/'.join(parts[: i + 1]) for i in range(min(len(parts), depth))]
+ subcounts = {
+ prefix: sum(v for k, v in counts.items() if k.startswith(prefix))
+ for prefix in set(pfxs)}
+ lines = [f'Optimizer {self.name} has {sum(counts.values()):,} params:']
+ for prefix, count in sorted(subcounts.items(), key=lambda x: -x[1]):
+ lines.append(f'{count:>14,} {prefix}')
+ return '\n'.join(lines)
+
+
+def clip_by_agc(clip=0.3, pmin=1e-3):
+
+ def init_fn(params):
+ return ()
+
+ def update_fn(updates, state, params=None):
+ def fn(param, update):
+ unorm = jnp.linalg.norm(update.flatten(), 2)
+ pnorm = jnp.linalg.norm(param.flatten(), 2)
+ upper = clip * jnp.maximum(pmin, pnorm)
+ return update * (1 / jnp.maximum(1.0, unorm / upper))
+ updates = jax.tree.map(fn, params, updates) if clip else updates
+ return updates, ()
+
+ return optax.GradientTransformation(init_fn, update_fn)
+
+
+def scale_by_rms(beta=0.999, eps=1e-8):
+
+ def init_fn(params):
+ nu = jax.tree.map(lambda t: jnp.zeros_like(t, f32), params)
+ step = jnp.zeros((), i32)
+ return (step, nu)
+
+ def update_fn(updates, state, params=None):
+ step, nu = state
+ step = optax.safe_int32_increment(step)
+ nu = jax.tree.map(
+ lambda v, u: beta * v + (1 - beta) * (u * u), nu, updates)
+ nu_hat = optax.bias_correction(nu, beta, step)
+ updates = jax.tree.map(
+ lambda u, v: u / (jnp.sqrt(v) + eps), updates, nu_hat)
+ return updates, (step, nu)
+
+ return optax.GradientTransformation(init_fn, update_fn)
+
+
+def scale_by_momentum(beta=0.9, nesterov=False):
+
+ def init_fn(params):
+ mu = jax.tree.map(lambda t: jnp.zeros_like(t, f32), params)
+ step = jnp.zeros((), i32)
+ return (step, mu)
+
+ def update_fn(updates, state, params=None):
+ step, mu = state
+ step = optax.safe_int32_increment(step)
+ mu = optax.update_moment(updates, mu, beta, 1)
+ if nesterov:
+ mu_nesterov = optax.update_moment(updates, mu, beta, 1)
+ mu_hat = optax.bias_correction(mu_nesterov, beta, step)
+ else:
+ mu_hat = optax.bias_correction(mu, beta, step)
+ return mu_hat, (step, mu)
+
+ return optax.GradientTransformation(init_fn, update_fn)
diff --git a/models/embodied/jax/outs.py b/models/embodied/jax/outs.py
new file mode 100644
index 0000000000000000000000000000000000000000..c659e1e9024a794967529f6a61b7ff323f5174c7
--- /dev/null
+++ b/models/embodied/jax/outs.py
@@ -0,0 +1,330 @@
+import functools
+
+import jax
+import jax.numpy as jnp
+
+i32 = jnp.int32
+f32 = jnp.float32
+sg = jax.lax.stop_gradient
+
+
+class Output:
+
+ def __repr__(self):
+ name = type(self).__name__
+ pred = self.pred()
+ return f'{name}({pred.dtype}, shape={pred.shape})'
+
+ def pred(self):
+ raise NotImplementedError
+
+ def loss(self, target):
+ return -self.logp(sg(target))
+
+ def sample(self, seed, shape=()):
+ raise NotImplementedError
+
+ def logp(self, event):
+ raise NotImplementedError
+
+ def prob(self, event):
+ return jnp.exp(self.logp(event))
+
+ def entropy(self):
+ raise NotImplementedError
+
+ def kl(self, other):
+ raise NotImplementedError
+
+
+class Agg(Output):
+
+ def __init__(self, output, dims, agg=jnp.sum):
+ self.output = output
+ self.axes = [-i for i in range(1, dims + 1)]
+ self.agg = agg
+
+ def __repr__(self):
+ name = type(self.output).__name__
+ pred = self.pred()
+ dims = len(self.axes)
+ return f'{name}({pred.dtype}, shape={pred.shape}, agg={dims})'
+
+ def pred(self):
+ return self.output.pred()
+
+ def loss(self, target):
+ loss = self.output.loss(target)
+ return self.agg(loss, self.axes)
+
+ def sample(self, seed, shape=()):
+ return self.output.sample(seed, shape)
+
+ def logp(self, event):
+ return self.output.logp(event).sum(self.axes)
+
+ def prob(self, event):
+ return self.output.prob(event).sum(self.axes)
+
+ def entropy(self):
+ entropy = self.output.entropy()
+ return self.agg(entropy, self.axes)
+
+ def kl(self, other):
+ assert isinstance(other, Agg), other
+ kl = self.output.kl(other.output)
+ return self.agg(kl, self.axes)
+
+
+class Frozen:
+
+ def __init__(self, output):
+ self.output = output
+
+ def __getattr__(self, name):
+ if name.startswith('__'):
+ raise AttributeError(name)
+ try:
+ fn = getattr(self.output, name)
+ except AttributeError:
+ raise ValueError(name)
+ return functools.partial(self._wrapper, fn)
+
+ def _wrapper(self, fn, *args, **kwargs):
+ result = fn(*args, **kwargs)
+ result = sg(result)
+ return result
+
+
+class Concat:
+
+ def __init__(self, outputs, midpoints, axis):
+ assert len(midpoints) == len(outputs) - 1
+ self.outputs = outputs
+ self.midpoints = tuple(midpoints)
+ self.axis = axis
+
+ def __getattr__(self, name):
+ if name.startswith('__'):
+ raise AttributeError(name)
+ try:
+ fns = [getattr(x, name) for x in self.outputs]
+ except AttributeError:
+ raise ValueError(name)
+ return functools.partial(self._wrapper, fns)
+
+ def _wrapper(self, fns, *args, **kwargs):
+ los = (None,) + self.midpoints
+ his = self.midpoints + (None,)
+ results = []
+ for fn, lo, hi in zip(fns, los, his):
+ segment = [slice(None, None, None)] * (self.axis + 1)
+ segment[self.axis] = slice(lo, hi, None)
+ segment = tuple(segment)
+ a, kw = jax.tree.map(lambda x: x[segment], (args, kwargs))
+ results.append(fn(*a, **kw))
+ return jax.tree.map(lambda *xs: jnp.concatenate(xs, self.axis), *results)
+
+
+class MSE(Output):
+
+ def __init__(self, mean, squash=None):
+ self.mean = f32(mean)
+ self.squash = squash or (lambda x: x)
+
+ def pred(self):
+ return self.mean
+
+ def loss(self, target):
+ assert jnp.issubdtype(target.dtype, jnp.floating), target.dtype
+ assert self.mean.shape == target.shape, (self.mean.shape, target.shape)
+ return jnp.square(self.mean - sg(self.squash(f32(target))))
+
+
+class Huber(Output):
+
+ def __init__(self, mean, eps=1.0):
+ # Soft Huber loss or Charbonnier loss.
+ self.mean = f32(mean)
+ self.eps = eps
+
+ def pred(self):
+ return self.mean
+
+ def loss(self, target):
+ assert jnp.issubdtype(target.dtype, jnp.floating), target.dtype
+ assert self.mean.shape == target.shape, (self.mean.shape, target.shape)
+ dist = self.mean - sg(f32(target))
+ return jnp.sqrt(jnp.square(dist) + jnp.square(self.eps)) - self.eps
+
+
+class Normal(Output):
+
+ def __init__(self, mean, stddev=1.0):
+ self.mean = f32(mean)
+ self.stddev = jnp.broadcast_to(f32(stddev), self.mean.shape)
+
+ def pred(self):
+ return self.mean
+
+ def sample(self, seed, shape=()):
+ sample = jax.random.normal(seed, shape + self.mean.shape, f32)
+ return sample * self.stddev + self.mean
+
+ def logp(self, event):
+ assert jnp.issubdtype(event.dtype, jnp.floating), event.dtype
+ return jax.scipy.stats.norm.logpdf(f32(event), self.mean, self.stddev)
+
+ def entropy(self):
+ return 0.5 * jnp.log(2 * jnp.pi * jnp.square(self.stddev)) + 0.5
+
+ def kl(self, other):
+ assert isinstance(other, type(self)), (self, other)
+ return 0.5 * (
+ jnp.square(self.stddev / other.stddev) +
+ jnp.square(other.mean - self.mean) / jnp.square(other.stddev) +
+ 2 * jnp.log(other.stddev) - 2 * jnp.log(self.stddev) - 1)
+
+
+class Binary(Output):
+
+ def __init__(self, logit):
+ self.logit = f32(logit)
+
+ def pred(self):
+ return (self.logit > 0)
+
+ def logp(self, event):
+ event = f32(event)
+ logp = jax.nn.log_sigmoid(self.logit)
+ lognotp = jax.nn.log_sigmoid(-self.logit)
+ return event * logp + (1 - event) * lognotp
+
+ def sample(self, seed, shape=()):
+ prob = jax.nn.sigmoid(self.logit)
+ return jax.random.bernoulli(seed, prob, -1, shape + self.logit.shape)
+
+
+class Categorical(Output):
+
+ def __init__(self, logits, unimix=0.0):
+ logits = f32(logits)
+ if unimix:
+ probs = jax.nn.softmax(logits, -1)
+ uniform = jnp.ones_like(probs) / probs.shape[-1]
+ probs = (1 - unimix) * probs + unimix * uniform
+ logits = jnp.log(probs)
+ self.logits = logits
+
+ def pred(self):
+ return jnp.argmax(self.logits, -1)
+
+ def sample(self, seed, shape=()):
+ return jax.random.categorical(
+ seed, self.logits, -1, shape + self.logits.shape[:-1])
+
+ def logp(self, event):
+ onehot = jax.nn.one_hot(event, self.logits.shape[-1])
+ return (jax.nn.log_softmax(self.logits, -1) * onehot).sum(-1)
+
+ def entropy(self):
+ logprob = jax.nn.log_softmax(self.logits, -1)
+ prob = jax.nn.softmax(self.logits, -1)
+ entropy = -(prob * logprob).sum(-1)
+ return entropy
+
+ def kl(self, other):
+ logprob = jax.nn.log_softmax(self.logits, -1)
+ logother = jax.nn.log_softmax(other.logits, -1)
+ prob = jax.nn.softmax(self.logits, -1)
+ return (prob * (logprob - logother)).sum(-1)
+
+
+class OneHot(Output):
+
+ def __init__(self, logits, unimix=0.0):
+ self.dist = Categorical(logits, unimix)
+
+ def pred(self):
+ index = self.dist.pred()
+ return self._onehot_with_grad(index)
+
+ def sample(self, seed, shape=()):
+ index = self.dist.sample(seed, shape)
+ return self._onehot_with_grad(index)
+
+ def logp(self, event):
+ return (jax.nn.log_softmax(self.dist.logits, -1) * event).sum(-1)
+
+ def entropy(self):
+ return self.dist.entropy()
+
+ def kl(self, other):
+ return self.dist.kl(other.dist)
+
+ def _onehot_with_grad(self, index):
+ # Straight through gradients.
+ value = jax.nn.one_hot(index, self.dist.logits.shape[-1], dtype=f32)
+ probs = jax.nn.softmax(self.dist.logits, -1)
+ value = sg(value) + (probs - sg(probs))
+ return value
+
+
+class TwoHot(Output):
+
+ def __init__(self, logits, bins, squash=None, unsquash=None):
+ logits = f32(logits)
+ assert logits.shape[-1] == len(bins), (logits.shape, len(bins))
+ assert bins.dtype == f32, bins.dtype
+ self.logits = logits
+ self.probs = jax.nn.softmax(logits)
+ self.bins = jnp.array(bins)
+ self.squash = squash or (lambda x: x)
+ self.unsquash = unsquash or (lambda x: x)
+
+ def pred(self):
+ # The naive implementation results in a non-zero result even if the bins
+ # are symmetric and the probabilities uniform, because the sum operation
+ # goes left to right, accumulating numerical errors. Instead, we use a
+ # symmetric sum to ensure that the predicted rewards and values are
+ # actually zero at initialization.
+ # return self.unsquash((self.probs * self.bins).sum(-1))
+ n = self.logits.shape[-1]
+ if n % 2 == 1:
+ m = (n - 1) // 2
+ p1 = self.probs[..., :m]
+ p2 = self.probs[..., m: m + 1]
+ p3 = self.probs[..., m + 1:]
+ b1 = self.bins[..., :m]
+ b2 = self.bins[..., m: m + 1]
+ b3 = self.bins[..., m + 1:]
+ wavg = (p2 * b2).sum(-1) + ((p1 * b1)[..., ::-1] + (p3 * b3)).sum(-1)
+ return self.unsquash(wavg)
+ else:
+ p1 = self.probs[..., :n // 2]
+ p2 = self.probs[..., n // 2:]
+ b1 = self.bins[..., :n // 2]
+ b2 = self.bins[..., n // 2:]
+ wavg = ((p1 * b1)[..., ::-1] + (p2 * b2)).sum(-1)
+ return self.unsquash(wavg)
+
+ def loss(self, target):
+ assert target.dtype == f32, target.dtype
+ target = sg(self.squash(target))
+ below = (self.bins <= target[..., None]).astype(i32).sum(-1) - 1
+ above = len(self.bins) - (
+ self.bins > target[..., None]).astype(i32).sum(-1)
+ below = jnp.clip(below, 0, len(self.bins) - 1)
+ above = jnp.clip(above, 0, len(self.bins) - 1)
+ equal = (below == above)
+ dist_to_below = jnp.where(equal, 1, jnp.abs(self.bins[below] - target))
+ dist_to_above = jnp.where(equal, 1, jnp.abs(self.bins[above] - target))
+ total = dist_to_below + dist_to_above
+ weight_below = dist_to_above / total
+ weight_above = dist_to_below / total
+ target = (
+ jax.nn.one_hot(below, len(self.bins)) * weight_below[..., None] +
+ jax.nn.one_hot(above, len(self.bins)) * weight_above[..., None])
+ log_pred = self.logits - jax.scipy.special.logsumexp(
+ self.logits, -1, keepdims=True)
+ return -(target * log_pred).sum(-1)
diff --git a/models/embodied/jax/transform.py b/models/embodied/jax/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc1b27ec634299de0f16ccb6bcda418dbd46e646
--- /dev/null
+++ b/models/embodied/jax/transform.py
@@ -0,0 +1,185 @@
+import threading
+import re
+from collections import Counter
+
+import jax
+from jax.sharding import PartitionSpec as P
+import ninjax as nj
+
+from . import nets as nn
+
+
+LOCK = threading.Lock()
+
+
+# Add tracer_sharding attribute to abstract values. This allows us to use
+# shard_map based on layer callback shardings, even though JAX does not
+# currently expose the shardings of tracer objects.
+TRACER_SHARDINGS = {}
+
+
+def init(
+ fn, mesh, arg_shardings,
+ param_partition_rules=(),
+ act_partition_rules=(),
+ static_argnums=(),
+ dummy_inputs=(),
+ print_partition=False,
+):
+
+ def init(fun, **jit_kwargs):
+ if not getattr(fun, '_is_pure', False):
+ fun = nj.pure(fun)
+ def wrapper(*args, **kwargs):
+ state, out = fun(*args, create=True, modify=True, ignore=True, **kwargs)
+ del out
+ return state, ()
+ return wrapper
+ fn = init(fn)
+
+ def fn(*args, inner=fn):
+ params, seed, *args = args
+ old = nn.LAYER_CALLBACK
+ nn.LAYER_CALLBACK = create_layer_callback(mesh, act_partition_rules)
+ params, _ = inner(params, *args, seed=seed)
+ nn.LAYER_CALLBACK = old
+ return params
+
+ fn = jax.jit(fn, static_argnums=static_argnums)
+
+ params_shapes = fn.eval_shape(*dummy_inputs)
+ params_sharding, grouping = resolve_rules(
+ params_shapes, param_partition_rules, mesh)
+ if print_partition:
+ print_grouping(grouping)
+
+ fn = jax.jit(fn, arg_shardings, params_sharding, static_argnums, None)
+ params = fn(*dummy_inputs)
+
+ return params, params_sharding
+
+
+def apply(
+ fn, mesh, in_shardings, out_shardings,
+ partition_rules=(),
+ static_argnums=(),
+ single_output=False,
+ return_params=False,
+ donate_params=False,
+ # shard_map specific
+ split_rng=True,
+ use_shardmap=False,
+ first_outnums=(),
+):
+
+ if single_output:
+ assert len(out_shardings) == 1
+
+ def fn(*args, inner=fn):
+ if donate_params:
+ donated, allocated, seed, *args = args
+ params = {**donated, **allocated}
+ else:
+ params, seed, *args = args
+ if use_shardmap and len(mesh.devices) > 1 and split_rng:
+ seed = jax.random.fold_in(seed, jax.lax.axis_index('d'))
+ params, outs = inner(params, *args, seed=seed)
+ outs = (outs,) if single_output else outs
+ assert isinstance(outs, tuple)
+ return (params, *outs) if return_params else outs
+
+ if use_shardmap and len(mesh.devices) > 1:
+
+ def fn(*args, inner=fn):
+ outs = list(inner(*args))
+ for i in first_outnums:
+ outs[i] = jax.tree.map(lambda x: x[None], outs[i])
+ return tuple(outs)
+
+ from jax.experimental.shard_map import shard_map
+ ispecs = list(jax.tree.map(lambda s: s.spec, in_shardings))
+ for i in sorted(static_argnums):
+ ispecs.insert(i, None)
+ ispecs = tuple(ispecs)
+ ospecs = jax.tree.map(lambda s: s.spec, out_shardings)
+ fn = shard_map(fn, mesh, ispecs, ospecs, check_rep=False)
+
+ def fn(*args, inner=fn):
+ outs = list(inner(*args))
+ for i in first_outnums:
+ outs[i] = jax.tree.map(lambda x: x[0], outs[i])
+ return tuple(outs)
+
+ if single_output:
+ def fn(*args, inner=fn):
+ outs = inner(*args)
+ assert len(outs) == 1
+ return outs[0]
+
+ if single_output:
+ out_shardings = out_shardings[0]
+ donate = [0] if donate_params else []
+
+ if not use_shardmap:
+ def fn(*args, inner=fn):
+ with LOCK:
+ old = nn.LAYER_CALLBACK
+ nn.LAYER_CALLBACK = create_layer_callback(mesh, partition_rules)
+ outs = inner(*args)
+ nn.LAYER_CALLBACK = old
+ return outs
+
+ fn = jax.jit(fn, in_shardings, out_shardings, static_argnums, None, donate)
+
+ return fn
+
+
+def create_layer_callback(mesh, partition_rules):
+ def layer_callback(y, name):
+ name = f'{nj.ninjax.SCOPE}/{name}'
+ for rule, spec in partition_rules:
+ if re.search(rule, name):
+ sharding = jax.sharding.NamedSharding(mesh, spec)
+ def apply(y):
+ y = jax.lax.with_sharding_constraint(y, sharding)
+ if not hasattr(type(y), 'tracer_shardings'):
+ type(y).tracer_sharding = property(
+ lambda self: TRACER_SHARDINGS[id(self)])
+ TRACER_SHARDINGS[id(y)] = sharding
+ return y
+ return jax.tree.map(apply, y)
+ else:
+ raise Exception(f'No matching rule found for activation key: {name}')
+ return layer_callback
+
+
+def resolve_rules(params, partition_rules, mesh):
+ if len(partition_rules) == 0:
+ partition_rules = [('.*', P())]
+ params_spec, grouping = dict(), dict()
+ for k in params.keys():
+ for rule, spec in partition_rules:
+ if re.search(rule, k):
+ params_spec[k] = spec
+ if rule not in grouping:
+ grouping[rule] = []
+ grouping[rule].append(k)
+ break
+ else:
+ raise Exception(f'No matching rule found for param key: {k}')
+ assert set(params.keys()) == set(params_spec.keys())
+ sharding = jax.tree.map(
+ lambda spec: jax.sharding.NamedSharding(mesh, spec), params_spec)
+ return sharding, grouping
+
+
+def print_grouping(grouping):
+ for rule, ps in grouping.items():
+ if len(ps) == 0:
+ continue
+ print(f'Partition rule "{rule}" matches {len(ps)} param tensors')
+ ks = ['/'.join(p.split('/')[-2:]) for p in ps]
+ ks = Counter(ks)
+ ks = ks.most_common(len(ks))
+ ks = [f'- .../{k}: {v}' for k, v in ks]
+ print('\n'.join(ks))
diff --git a/models/embodied/jax/utils.py b/models/embodied/jax/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f56e47f141963fbddf707d202321550cc897e1fd
--- /dev/null
+++ b/models/embodied/jax/utils.py
@@ -0,0 +1,233 @@
+import functools
+
+import jax
+import jax.numpy as jnp
+import ninjax as nj
+
+from . import internal
+
+sg = jax.lax.stop_gradient
+f32 = jnp.float32
+i32 = jnp.int32
+
+COMPUTE_DTYPE = jnp.bfloat16
+
+
+class Normalize(nj.Module):
+
+ rate: float = 0.01
+ limit: float = 1e-8
+ perclo: float = 5.0
+ perchi: float = 95.0
+ debias: bool = True
+
+ def __init__(self, impl):
+ self.impl = impl
+ if self.debias and self.impl != 'none':
+ self.corr = nj.Variable(jnp.zeros, (), f32, name='corr')
+ if self.impl == 'none':
+ pass
+ elif self.impl == 'meanstd':
+ self.mean = nj.Variable(jnp.zeros, (), f32, name='mean')
+ self.sqrs = nj.Variable(jnp.zeros, (), f32, name='sqrs')
+ elif self.impl == 'perc':
+ self.lo = nj.Variable(jnp.zeros, (), f32, name='lo')
+ self.hi = nj.Variable(jnp.zeros, (), f32, name='hi')
+ else:
+ raise NotImplementedError(self.impl)
+
+ def __call__(self, x, update):
+ if update:
+ self.update(x)
+ return self.stats()
+
+ def update(self, x):
+ x = sg(f32(x))
+ if self.impl == 'none':
+ pass
+ elif self.impl == 'meanstd':
+ self._update(self.mean, self._mean(x))
+ self._update(self.sqrs, self._mean(jnp.square(x)))
+ elif self.impl == 'perc':
+ self._update(self.lo, self._perc(x, self.perclo))
+ self._update(self.hi, self._perc(x, self.perchi))
+ else:
+ raise NotImplementedError(self.impl)
+ if self.debias and self.impl != 'none':
+ self._update(self.corr, 1.0)
+
+ def stats(self):
+ corr = 1.0
+ if self.debias and self.impl != 'none':
+ corr /= jnp.maximum(self.rate, self.corr.read())
+ if self.impl == 'none':
+ return 0.0, 1.0
+ elif self.impl == 'meanstd':
+ mean = self.mean.read() * corr
+ std = jnp.sqrt(jax.nn.relu(self.sqrs.read() * corr - mean ** 2))
+ std = jnp.maximum(self.limit, std)
+ return mean, std
+ elif self.impl == 'perc':
+ lo, hi = self.lo.read() * corr, self.hi.read() * corr
+ return sg(lo), sg(jnp.maximum(self.limit, hi - lo))
+ else:
+ raise NotImplementedError(self.impl)
+
+ def _mean(self, x):
+ x = x.mean()
+ axes = internal.get_data_axes()
+ if axes:
+ x = jax.lax.pmean(x, axes)
+ return x
+
+ def _perc(self, x, q):
+ axes = internal.get_data_axes()
+ if axes:
+ x = jax.lax.all_gather(x, axes)
+ x = jnp.percentile(x, q)
+ return x
+
+ def _update(self, var, x):
+ var.write((1 - self.rate) * var.read() + self.rate * sg(x))
+
+
+class SlowModel:
+
+ def __init__(self, model, *, source, rate=1.0, every=1):
+ assert rate == 1 or rate < 0.5, rate
+ self.source = source
+ self.model = model
+ self.rate = rate
+ self.every = every
+ name = self.model.path + '_count'
+ self.count = nj.Variable(jnp.zeros, (), i32, name=name)
+
+ def __getattr__(self, name):
+ self._initonce()
+ return getattr(self.model, name)
+
+ def __call__(self, *args, **kwargs):
+ self._initonce()
+ return self.model(*args, **kwargs)
+
+ def update(self):
+ self._initonce()
+ mix = jnp.where(self.count.read() % self.every == 0, self.rate, 0)
+ fn = lambda src, dst: mix * src + (1 - mix) * dst
+ values = jax.tree.map(fn, self.source.values, self.model.values)
+ [self.model.write(k, v) for k, v in values.items()]
+ self.count.write(self.count.read() + 1)
+
+ def _initonce(self, *args, method=None, **kwargs):
+ assert self.source.values, 'no parameters to track'
+ if not self.model.values:
+ p = self.model.path + '/'
+ nj.context().update({p + k: v for k, v in self.source.values.items()})
+ assert self.model.values.keys() == self.source.values.keys(), (
+ self.model.values.keys(), self.source.values.keys())
+
+
+class LayerScan:
+
+ def __init__(self, module, count, names=('__call__',)):
+ self.module = module
+ self.count = count
+ self.names = names
+
+ def __call__(self, *args, **kwargs):
+ # Magic methods need to be forwarded explicitly.
+ return self.__getattr__('__call__')(*args, **kwargs)
+
+ def __getattr__(self, name):
+ value = getattr(self.module, name)
+ if name in self.names:
+ assert callable(value)
+ value = nj.pure(value, nested=True)
+ value = functools.partial(
+ layer_scan, value, self.module.path, self.count)
+ return value
+
+
+def layer_scan(fn, scope, count, inp, *args, **kwargs):
+ isinner = lambda k: k.startswith(scope + '/')
+
+ args_ = jax.tree.map(lambda x: x[0], args) # Copy structure
+ kwargs_ = jax.tree.map(lambda x: x, kwargs) # Copy structure
+ state_ = {k: v[0] if isinner(k) else v for k, v in nj.context().items()}
+ state, _, accessed, modified, created = fn(
+ state_, inp, *args_, ignore=True, track=True,
+ seed=nj.seed(None, True), **kwargs_)
+
+ # print('-' * 79)
+ # print('accessed:', accessed)
+ # print('modified:', modified)
+ # print('created:', created)
+
+ inner = lambda xs: {k: v for k, v in xs.items() if isinner(k)}
+ outer = lambda xs: {k: v for k, v in xs.items() if not isinner(k)}
+
+ unchanging = {
+ k: v for k, v in nj.context().items()
+ if k in accessed and k not in modified and k not in created}
+ unchanging_inner = inner(unchanging)
+ unchanging_outer = outer(unchanging)
+
+ creations = {k: v for k, v in state.items() if k in created}
+ creations_inner = inner(creations)
+ creations_outer = outer(creations)
+ nj.context().update(creations_outer)
+ del creations_inner # Will be created inside the scan.
+
+ # Inner values do not exist yet, so we only keep them in the creations. This
+ # is fine, because inner values cannot change across scan iterations anyways.
+ # Outer values can change over iterations, so we need to thread them even
+ # during creation.
+ changing_inner = inner({
+ # k: v for k, v in state.items()
+ k: v for k, v in nj.context().items()
+ if k in modified and k not in created})
+ changing_outer = outer({
+ k: v for k, v in state.items()
+ if k in modified})
+
+ # f = lambda x: {k: v.shape for k, v in x.items()}
+ # print('-' * 79)
+ # print('unchanging_inner', f(unchanging_inner))
+ # print('unchanging_outer', f(unchanging_outer))
+ # print('creations_inner', f(inner(creations)))
+ # print('creations_outer', f(creations_outer))
+ # print('changing_inner', f(changing_inner))
+ # print('changing_outer', f(changing_outer))
+
+ def body(carry, x):
+ inp, changing_outer = carry
+ arg, seed, unchanging_inner, changing_inner = x
+ state = {
+ **unchanging_inner, **unchanging_outer,
+ **changing_inner, **changing_outer}
+ state, out = fn(state, inp, *arg, **kwargs, seed=seed)
+ out, *other = out if isinstance(out, tuple) else (out,)
+ changing = {k: v for k, v in state.items() if k in modified}
+ changing_inner = inner(changing)
+ changing_outer = outer(changing)
+ creations = {k: v for k, v in state.items() if k in created}
+ creations_inner = inner(creations)
+ carry = (out, changing_outer)
+ y = (other, creations_inner, changing_inner)
+ return carry, y
+
+ seeds = nj.seed(count, True)
+ carry, ys = jax.lax.scan(
+ f=body,
+ init=(inp, changing_outer),
+ xs=(args, seeds, unchanging_inner, changing_inner),
+ length=count)
+ out, changing_outer = carry
+ other, creations_inner, changing_inner = ys
+
+ if nj.context().modify:
+ nj.context().update(creations_inner)
+ nj.context().update(changing_inner)
+ nj.context().update(changing_outer)
+
+ return (out, *other) if len(other) else out
diff --git a/models/embodied/perf/test_bandwidth.py b/models/embodied/perf/test_bandwidth.py
new file mode 100644
index 0000000000000000000000000000000000000000..f924c5858c523c16bb039215b75d2285bf6cf578
--- /dev/null
+++ b/models/embodied/perf/test_bandwidth.py
@@ -0,0 +1,60 @@
+import pathlib
+import sys
+import time
+
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+
+import elements
+import zerofun
+import numpy as np
+
+
+class TestBandwidth:
+
+ def test_numpy_read(self):
+ arr = np.ones((128, 1024, 1024), np.int64) # 1 GiB
+ size = arr.nbytes / (1024 ** 3)
+ for _ in range(10):
+ with elements.timer.section('step'):
+ arr.sum()
+ dt = elements.timer.stats()['step/avg']
+ print(f'numpy_read: {dt:.3f} avg | {size / dt:.2f} gib/s')
+
+ def test_numpy_copy(self):
+ arr = np.ones((1024, 1024, 1024), np.uint8)
+ size = arr.nbytes / (1024 ** 3)
+ for _ in range(10):
+ with elements.timer.section('step'):
+ arr.copy()
+ dt = elements.timer.stats()['step/avg']
+ print(f'numpy_copy: {dt:.3f} avg | {size / dt:.2f} gib/s')
+
+ def test_socket_send(self):
+ shape, dtype, gib = (1024, 1024, 1024), np.uint8, 1.00
+
+ def server(context, addr):
+ server = zerofun.Server(addr)
+ data = {'foo': np.ones(shape, dtype)}
+ server.bind('function', lambda _: data)
+ with server:
+ while context.running:
+ time.sleep(0.01)
+
+ addr = f'tcp://localhost:{zerofun.get_free_port()}'
+ proc = zerofun.StoppableProcess(server, addr, start=True)
+
+ client = zerofun.Client(addr)
+ client.connect()
+ for _ in range(10):
+ with elements.timer.section('step'):
+ client.function({}).result()
+ proc.stop()
+
+ dt = elements.timer.stats()['step/avg']
+ print(f'socket_send: {dt:.3f} avg | {gib / dt:.2f} gib/s')
+
+
+if __name__ == '__main__':
+ TestBandwidth().test_numpy_read() # 21 gib/s
+ TestBandwidth().test_numpy_copy() # 7 gib/s
+ TestBandwidth().test_socket_send() # 4 gib/s
diff --git a/models/embodied/perf/test_distr.py b/models/embodied/perf/test_distr.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f5457870ee1322177def695bdc555716970f011
--- /dev/null
+++ b/models/embodied/perf/test_distr.py
@@ -0,0 +1,142 @@
+import os
+import pathlib
+import sys
+import time
+from collections import defaultdict
+
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+
+import zerofun
+import numpy as np
+
+
+class TestDistr:
+
+ def test_batched_throughput(self, clients=32, batch=16, workers=4):
+ assert int(os.popen('ulimit -n').read()) > 1024
+
+ addr = f'tcp://localhost:{zerofun.get_free_port()}'
+ stats = defaultdict(int)
+ barrier = zerofun.mp.Barrier(1 + clients)
+
+ def client(context, addr, barrier):
+ data = {
+ 'foo': np.zeros((64, 64, 3,), np.uint8),
+ 'bar': np.zeros((1024,), np.float32),
+ 'baz': np.zeros((), bool),
+ }
+ client = zerofun.Client(addr)
+ client.connect()
+ barrier.wait()
+ while context.running:
+ client.function(data).result()
+
+ def workfn(data):
+ time.sleep(0.002)
+ return data, data
+
+ def donefn(data):
+ stats['batches'] += 1
+ stats['frames'] += len(data['foo'])
+ stats['nbytes'] += sum(x.nbytes for x in data.values())
+
+ procs = [
+ zerofun.StoppableProcess(client, addr, barrier, start=True)
+ for _ in range(clients)]
+
+ server = zerofun.Server(addr)
+ # server = zerofun.Server2(addr)
+
+ server.bind('function', workfn, donefn, batch=batch, workers=workers)
+ with server:
+ barrier.wait()
+ start = time.time()
+ while True:
+ server.check()
+ now = time.time()
+ dur = now - start
+ print(
+ f'{stats["batches"] / dur:.2f} bat/s ' +
+ f'{stats["frames"] / dur:.2f} frm/s ' +
+ f'{stats["nbytes"] / dur / (1024 ** 3):.2f} gib/s')
+ stats.clear()
+ start = now
+ time.sleep(1)
+ [x.stop() for x in procs]
+
+ #############################################################################
+
+ def test_proxy_throughput(self, clients=32, batch=16, workers=4):
+ assert int(os.popen('ulimit -n').read()) > 1024
+
+ def client(context, outer_addr, barrier):
+ data = {
+ 'foo': np.zeros((64, 64, 3,), np.uint8),
+ 'bar': np.zeros((1024,), np.float32),
+ 'baz': np.zeros((), bool),
+ }
+ client = zerofun.Client(outer_addr)
+ client.connect()
+ barrier.wait()
+ while context.running:
+ client.function(data).result()
+
+ def proxy(context, outer_addr, inner_addr, barrier):
+ client = zerofun.Client(
+ inner_addr, pings=0, maxage=0, name='ProxyInner')
+ client.connect()
+ server = zerofun.Server(
+ outer_addr, errors=True, name='ProxyOuter')
+ def function(data):
+ return client.function(data).result()
+ server.bind('function', function, batch=batch, workers=workers)
+ with server:
+ barrier.wait()
+ while context.running:
+ server.check()
+ time.sleep(0.1)
+
+ def backend(context, inner_addr, barrier):
+ stats = defaultdict(int)
+ def workfn(data):
+ time.sleep(0.002)
+ return data, data
+ def donefn(data):
+ stats['batches'] += 1
+ stats['frames'] += len(data['foo'])
+ stats['nbytes'] += sum(x.nbytes for x in data.values())
+ server = zerofun.Server(
+ inner_addr, errors=True, name='Backend')
+ server.bind('function', workfn, donefn, workers=workers)
+ with server:
+ barrier.wait()
+ start = time.time()
+ while context.running:
+ server.check()
+ now = time.time()
+ dur = now - start
+ print(
+ f'{stats["batches"] / dur:.2f} bat/s ' +
+ f'{stats["frames"] / dur:.2f} frm/s ' +
+ f'{stats["nbytes"] / dur / (1024**3):.2f} gib/s')
+ stats.clear()
+ start = now
+ time.sleep(1)
+
+ inner_addr = 'ipc:///tmp/test-inner'
+ outer_addr = 'ipc:///tmp/test-outer'
+ barrier = zerofun.mp.Barrier(2 + clients)
+ procs = [
+ zerofun.StoppableProcess(client, outer_addr, barrier)
+ for _ in range(clients)]
+ procs.append(zerofun.StoppableProcess(
+ proxy, outer_addr, inner_addr, barrier))
+ procs.append(zerofun.StoppableProcess(
+ backend, inner_addr, barrier))
+ zerofun.run(procs)
+
+
+if __name__ == '__main__':
+ TestDistr().test_batched_throughput() # 4100 frm/s Server
+ # TestDistr().test_batched_throughput() # 4200 frm/s Server2
+ TestDistr().test_proxy_throughput() # 3000 frm/s
diff --git a/models/embodied/perf/test_driver.py b/models/embodied/perf/test_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c90dd212bbdf2e03b0c4804329529a09f336b53
--- /dev/null
+++ b/models/embodied/perf/test_driver.py
@@ -0,0 +1,39 @@
+import pathlib
+import sys
+from functools import partial as bind
+
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+
+import elements
+import embodied
+
+
+class TestDriver:
+
+ def test_throughput_dummy(self, parallel=True):
+ from embodied.envs import dummy
+ make_env_fns = [bind(dummy.Dummy, 'disc') for _ in range(32)]
+ example = make_env_fns[0]()
+ agent = embodied.RandomAgent(example.obs_space, example.act_space)
+ example.close()
+ driver = embodied.Driver(make_env_fns, parallel)
+ driver.reset(agent.init_policy)
+ fps = elements.FPS()
+ while True:
+ driver(agent.policy, steps=100)
+ fps.step(100 * len(make_env_fns))
+ print(f'FPS: {fps.result():.0f}')
+
+ def test_throughput_crafter(self, parallel=True):
+ from embodied.envs import crafter
+ make_env_fns = [bind(crafter.Crafter, 'reward') for _ in range(32)]
+ example = make_env_fns[0]()
+ agent = embodied.RandomAgent(example.obs_space, example.act_space)
+ example.close()
+ driver = embodied.Driver(make_env_fns, parallel)
+ driver.reset(agent.init_policy)
+ fps = elements.FPS()
+ while True:
+ driver(agent.policy, steps=100)
+ fps.step(100 * len(make_env_fns))
+ print(f'FPS: {fps.result():.0f}')
diff --git a/models/embodied/perf/test_replay.py b/models/embodied/perf/test_replay.py
new file mode 100644
index 0000000000000000000000000000000000000000..459b872370da9cf4377e5e7b7a938d76977fb16b
--- /dev/null
+++ b/models/embodied/perf/test_replay.py
@@ -0,0 +1,140 @@
+import pathlib
+import sys
+import threading
+import time
+from collections import defaultdict
+
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+
+import embodied
+import numpy as np
+import pytest
+
+
+REPLAYS = [
+ ('Replay', embodied.replay.Replay),
+]
+
+STEP = {
+ 'image': np.zeros((64, 64, 3), np.uint8),
+ 'vector': np.zeros(1024, np.float32),
+ 'action': np.zeros(12, np.float32),
+ 'is_first': np.array(False),
+ 'is_last': np.array(False),
+ 'is_terminal': np.array(False),
+}
+
+
+class TestReplay:
+
+ @pytest.mark.parametrize('name,Replay', REPLAYS)
+ def test_speed(self, name, Replay, inserts=2e5, workers=8, samples=1e5):
+ print('')
+ initial = time.time()
+ replay = Replay(length=32, capacity=1e5, chunksize=1024)
+ start = time.time()
+ for step in range(int(inserts / workers)):
+ for worker in range(workers):
+ replay.add(STEP, worker)
+ duration = time.time() - start
+ print(name, 'inserts/sec:', int(inserts / duration))
+ start = time.time()
+ dataset = iter(replay.dataset(1))
+ for _ in range(int(samples)):
+ next(dataset)
+ duration = time.time() - start
+ print(name, 'samples/sec:', int(samples / duration))
+ print(name, 'total duration:', time.time() - initial)
+
+ @pytest.mark.parametrize('chunksize', [64, 128, 256, 512, 1024, 2048, 4096])
+ def test_chunk_size(self, chunksize, inserts=2e5, workers=8, samples=2e5):
+ print('')
+ initial = time.time()
+ replay = embodied.replay.Replay(length=64, chunksize=chunksize)
+ start = time.time()
+ for step in range(int(inserts / workers)):
+ for worker in range(workers):
+ replay.add(STEP, worker)
+ duration = time.time() - start
+ print('chunksize', chunksize, 'inserts/sec:', int(inserts / duration))
+ start = time.time()
+ dataset = iter(replay.dataset(1))
+ for _ in range(int(samples)):
+ next(dataset)
+ duration = time.time() - start
+ print('chunksize', chunksize, 'samples/sec:', int(samples / duration))
+ print('chunksize', chunksize, 'total duration:', time.time() - initial)
+
+ @pytest.mark.parametrize('name,Replay', REPLAYS)
+ def test_removal(self, name, Replay, inserts=1e6, workers=1):
+ print('')
+ replay = Replay(length=32, capacity=1e5, chunksize=1024)
+ start = time.time()
+ for step in range(int(inserts)):
+ replay.add(STEP)
+ duration = time.time() - start
+ print(name, 'inserts/sec:', int(inserts / duration))
+
+ @pytest.mark.parametrize('name,Replay', REPLAYS)
+ def test_parallel(self, tmpdir, name, Replay, duration=5):
+ print('')
+ replay = Replay(length=16, capacity=1e4, chunksize=32, directory=tmpdir)
+
+ running = True
+ adds = defaultdict(int)
+ samples = defaultdict(int)
+ saves = defaultdict(int)
+ errors = []
+
+ def adder():
+ try:
+ ident = threading.get_ident()
+ step = {'foo': np.zeros((64, 64, 3))}
+ while running:
+ replay.add(step, threading.get_ident())
+ adds[ident] += 1
+ except Exception as e:
+ errors.append(e)
+ raise
+
+ def sampler():
+ try:
+ ident = threading.get_ident()
+ dataset = iter(replay.dataset(1))
+ while running:
+ next(dataset)
+ samples[ident] += 1
+ except Exception as e:
+ errors.append(e)
+ raise
+
+ def saver():
+ try:
+ ident = threading.get_ident()
+ while running:
+ data = replay.save()
+ time.sleep(0.1)
+ replay.load(data)
+ time.sleep(0.1)
+ saves[ident] += 1
+ except Exception as e:
+ errors.append(e)
+ raise
+
+ workers = [threading.Thread(target=saver)]
+ for _ in range(32):
+ workers.append(threading.Thread(target=adder))
+ for _ in range(8):
+ workers.append(threading.Thread(target=sampler))
+
+ print(f'Starting {len(workers)} threads')
+ [x.start() for x in workers]
+ time.sleep(duration)
+ running = False
+ [x.join() for x in workers]
+ if errors:
+ print(f'Found {len(errors)} errors: {errors}')
+ raise errors[0]
+ print('adds/sec:', sum(adds.values()) / duration)
+ print('samples/sec:', sum(samples.values()) / duration)
+ print('save_load/sec:', sum(saves.values()) / duration)
diff --git a/models/embodied/run/__init__.py b/models/embodied/run/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c022a84eacf7563a0bdc0709af48cadf242f028d
--- /dev/null
+++ b/models/embodied/run/__init__.py
@@ -0,0 +1,5 @@
+from .eval_only import eval_only
+from .train import train
+from .train_eval import train_eval
+
+from . import parallel
diff --git a/models/embodied/run/eval_only.py b/models/embodied/run/eval_only.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b71b55da702a1050fe7a7e489b547fdba2effd5
--- /dev/null
+++ b/models/embodied/run/eval_only.py
@@ -0,0 +1,76 @@
+from collections import defaultdict
+from functools import partial as bind
+
+import elements
+import embodied
+import numpy as np
+
+
+def eval_only(make_agent, make_env, make_logger, args):
+ assert args.from_checkpoint
+
+ agent = make_agent()
+ logger = make_logger()
+
+ logdir = elements.Path(args.logdir)
+ logdir.mkdir()
+ print('Logdir', logdir)
+ step = logger.step
+ usage = elements.Usage(**args.usage)
+ agg = elements.Agg()
+ epstats = elements.Agg()
+ episodes = defaultdict(elements.Agg)
+ should_log = elements.when.Clock(args.log_every)
+ policy_fps = elements.FPS()
+
+ @elements.timer.section('logfn')
+ def logfn(tran, worker):
+ episode = episodes[worker]
+ tran['is_first'] and episode.reset()
+ episode.add('score', tran['reward'], agg='sum')
+ episode.add('length', 1, agg='sum')
+ episode.add('rewards', tran['reward'], agg='stack')
+ for key, value in tran.items():
+ isimage = (value.dtype == np.uint8) and (value.ndim == 3)
+ if isimage and worker == 0:
+ episode.add(f'policy_{key}', value, agg='stack')
+ elif key.startswith('log/'):
+ assert value.ndim == 0, (key, value.shape, value.dtype)
+ episode.add(key + '/avg', value, agg='avg')
+ episode.add(key + '/max', value, agg='max')
+ episode.add(key + '/sum', value, agg='sum')
+ if tran['is_last']:
+ result = episode.result()
+ logger.add({
+ 'score': result.pop('score'),
+ 'length': result.pop('length'),
+ }, prefix='episode')
+ rew = result.pop('rewards')
+ if len(rew) > 1:
+ result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
+ epstats.add(result)
+
+ fns = [bind(make_env, i) for i in range(args.envs)]
+ driver = embodied.Driver(fns, parallel=(not args.debug))
+ driver.on_step(lambda tran, _: step.increment())
+ driver.on_step(lambda tran, _: policy_fps.step())
+ driver.on_step(logfn)
+
+ cp = elements.Checkpoint()
+ cp.agent = agent
+ cp.load(args.from_checkpoint, keys=['agent'])
+
+ print('Start evaluation')
+ policy = lambda *args: agent.policy(*args, mode='eval')
+ driver.reset(agent.init_policy)
+ while step < args.steps:
+ driver(policy, steps=10)
+ if should_log(step):
+ logger.add(agg.result())
+ logger.add(epstats.result(), prefix='epstats')
+ logger.add(usage.stats(), prefix='usage')
+ logger.add({'fps/policy': policy_fps.result()})
+ logger.add({'timer': elements.timer.stats()['summary']})
+ logger.write()
+
+ logger.close()
diff --git a/models/embodied/run/parallel.py b/models/embodied/run/parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2738a2c6c4211e299168366ebd01ff6adcbf99f
--- /dev/null
+++ b/models/embodied/run/parallel.py
@@ -0,0 +1,481 @@
+import collections
+import threading
+import time
+from functools import partial as bind
+
+import cloudpickle
+import elements
+import embodied
+import numpy as np
+import portal
+
+prefix = lambda d, p: {f'{p}/{k}': v for k, v in d.items()}
+
+
+def combined(
+ make_agent,
+ make_replay_train,
+ make_replay_eval,
+ make_env_train,
+ make_env_eval,
+ make_stream,
+ make_logger,
+ args):
+
+ if args.actor_batch <= 0:
+ args = args.update(actor_batch=max(1, args.envs // 2))
+ assert args.actor_batch <= args.envs, (args.actor_batch, args.envs)
+ for key in ('actor_addr', 'replay_addr', 'logger_addr'):
+ if '{auto}' in args[key]:
+ args = args.update({key: args[key].format(auto=portal.free_port())})
+
+ make_agent = cloudpickle.dumps(make_agent)
+ make_replay_train = cloudpickle.dumps(make_replay_train)
+ make_replay_eval = cloudpickle.dumps(make_replay_eval)
+ make_env_train = cloudpickle.dumps(make_env_train)
+ make_env_eval = cloudpickle.dumps(make_env_eval)
+ make_stream = cloudpickle.dumps(make_stream)
+ make_logger = cloudpickle.dumps(make_logger)
+
+ workers = []
+ if args.agent_process:
+ workers.append(portal.Process(parallel_agent, make_agent, args))
+ else:
+ workers.append(portal.Thread(parallel_agent, make_agent, args))
+ workers.append(portal.Process(parallel_logger, make_logger, args))
+
+ if not args.remote_envs:
+ for i in range(args.envs):
+ workers.append(portal.Process(parallel_env, make_env_train, i, args))
+ for i in range(args.envs, args.envs + args.eval_envs):
+ workers.append(portal.Process(
+ parallel_env, make_env_eval, i, args, True))
+
+ if not args.remote_replay:
+ workers.append(portal.Process(
+ parallel_replay, make_replay_train, make_replay_eval,
+ make_stream, args))
+
+ portal.run(workers)
+
+
+def parallel_agent(make_agent, args):
+ if isinstance(make_agent, bytes):
+ make_agent = cloudpickle.loads(make_agent)
+ agent = make_agent()
+ barrier = threading.Barrier(2)
+ workers = []
+ workers.append(portal.Thread(parallel_actor, agent, barrier, args))
+ workers.append(portal.Thread(parallel_learner, agent, barrier, args))
+ portal.run(workers)
+
+
+@elements.timer.section('actor')
+def parallel_actor(agent, barrier, args):
+
+ islist = lambda x: isinstance(x, list)
+ initial = agent.init_policy(args.actor_batch)
+ initial = elements.tree.map(lambda x: x[0], initial, isleaf=islist)
+ carries = collections.defaultdict(lambda: initial)
+ barrier.wait() # Do not collect data before learner restored checkpoint.
+ fps = elements.FPS()
+
+ should_log = embodied.LocalClock(args.log_every)
+ backlog = 8 * args.actor_threads
+ logger = portal.Client(args.logger_addr, 'ActorLogger', maxinflight=backlog)
+ replay = portal.Client(args.replay_addr, 'ActorReplay', maxinflight=backlog)
+
+ @elements.timer.section('workfn')
+ def workfn(obs):
+ envid = obs.pop('envid')
+ assert envid.shape == (args.actor_batch,)
+ is_eval = obs.pop('is_eval')
+ fps.step(obs['is_first'].size)
+ with elements.timer.section('get_states'):
+ carry = [carries[a] for a in envid]
+ carry = elements.tree.map(lambda *xs: list(xs), *carry)
+ logs = {k: v for k, v in obs.items() if k.startswith('log/')}
+ obs = {k: v for k, v in obs.items() if not k.startswith('log/')}
+ carry, acts, outs = agent.policy(carry, obs)
+ assert all(k not in acts for k in outs), (
+ list(outs.keys()), list(acts.keys()))
+ with elements.timer.section('put_states'):
+ for i, a in enumerate(envid):
+ carries[a] = elements.tree.map(lambda x: x[i], carry, isleaf=islist)
+ trans = {'envid': envid, 'is_eval': is_eval, **obs, **acts, **outs, **logs}
+ [x.setflags(write=False) for x in trans.values()]
+ acts = {**acts, 'reset': obs['is_last'].copy()}
+ return acts, trans
+
+ @elements.timer.section('donefn')
+ def postfn(trans):
+ logs = {k: v for k, v in trans.items() if k.startswith('log/')}
+ trans = {k: v for k, v in trans.items() if not k.startswith('log/')}
+ replay.add_batch(trans)
+ logger.tran({**trans, **logs})
+ if should_log():
+ stats = {}
+ stats['fps/policy'] = fps.result()
+ stats['parallel/ep_states'] = len(carries)
+ stats.update(prefix(server.stats(), 'server/actor'))
+ stats.update(prefix(logger.stats(), 'client/actor_logger'))
+ stats.update(prefix(replay.stats(), 'client/actor_replay'))
+ logger.add(stats)
+
+ server = portal.BatchServer(args.actor_addr, name='Actor')
+ server.bind('act', workfn, postfn, args.actor_batch, args.actor_threads)
+ server.start()
+
+
+@elements.timer.section('learner')
+def parallel_learner(agent, barrier, args):
+
+ agg = elements.Agg()
+ usage = elements.Usage(**args.usage)
+ should_log = embodied.GlobalClock(args.log_every)
+ should_report = embodied.GlobalClock(args.report_every)
+ should_save = embodied.GlobalClock(args.save_every)
+ fps = elements.FPS()
+ batch_steps = args.batch_size * args.batch_length
+
+ cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/agent')
+ cp.agent = agent
+ if args.from_checkpoint:
+ elements.checkpoint.load(args.from_checkpoint, dict(
+ agent=bind(agent.load, regex=args.from_checkpoint_regex)))
+ cp.load_or_save()
+ logger = portal.Client(args.logger_addr, 'LearnerLogger', maxinflight=1)
+ updater = portal.Client(
+ args.replay_addr, 'LearnerReplayUpdater', maxinflight=8)
+ barrier.wait()
+
+ replays = {}
+ received = collections.defaultdict(int)
+ def parallel_stream(source, prefetch=2):
+ replay = portal.Client(args.replay_addr, f'LearnerReplay{source.title()}')
+ replays[source] = replay
+ call = getattr(replay, f'sample_batch_{source}')
+ futures = collections.deque([call() for _ in range(prefetch)])
+ while True:
+ futures.append(call())
+ with elements.timer.section(f'stream_{source}_response'):
+ data = futures.popleft().result()
+ received[source] += 1
+ yield data
+
+ def evaluate(stream):
+ carry = agent.init_report(args.batch_size)
+ agg = elements.Agg()
+ for _ in range(args.consec_report * args.report_batches):
+ batch = next(stream)
+ carry, metrics = agent.report(carry, batch)
+ agg.add(metrics)
+ return agg.result()
+
+ stream_train = iter(agent.stream(
+ embodied.streams.Stateless(parallel_stream('train'))))
+ stream_report = iter(agent.stream(
+ embodied.streams.Stateless(parallel_stream('report'))))
+ stream_eval = iter(agent.stream(
+ embodied.streams.Stateless(parallel_stream('eval'))))
+ carry = agent.init_train(args.batch_size)
+
+ while True:
+
+ with elements.timer.section('batch_next'):
+ batch = next(stream_train)
+ with elements.timer.section('train_step'):
+ carry, outs, mets = agent.train(carry, batch)
+ if 'replay' in outs:
+ with elements.timer.section('replay_update'):
+ updater.update(outs['replay'])
+
+ time.sleep(0.0001)
+ agg.add(mets)
+ fps.step(batch_steps)
+
+ if should_report(skip=not received['report']):
+ print('Report started...')
+ with elements.timer.section('report'):
+ logger.add(prefix(evaluate(stream_report), 'report'))
+ if args.eval_envs and received['eval']:
+ logger.add(prefix(evaluate(stream_eval), 'eval'))
+ print('Report finished!')
+
+ if should_log():
+ with elements.timer.section('metrics'):
+ stats = {}
+ stats['fps/train'] = fps.result()
+ stats['timer/agent'] = elements.timer.stats()['summary']
+ stats.update(prefix(agg.result(), 'train'))
+ stats.update(prefix(usage.stats(), 'usage/agent'))
+ stats.update(prefix(logger.stats(), 'client/learner_logger'))
+ for source, client in replays.items():
+ stats.update(prefix(client.stats(), f'client/replay_{source}'))
+ logger.add(stats)
+
+ if should_save():
+ cp.save()
+
+
+def parallel_replay(make_replay_train, make_replay_eval, make_stream, args):
+ if isinstance(make_replay_train, bytes):
+ make_replay_train = cloudpickle.loads(make_replay_train)
+ if isinstance(make_replay_eval, bytes):
+ make_replay_eval = cloudpickle.loads(make_replay_eval)
+ if isinstance(make_stream, bytes):
+ make_stream = cloudpickle.loads(make_stream)
+
+ replay_train = make_replay_train()
+ replay_eval = make_replay_eval()
+
+ stream_train = iter(make_stream(replay_train, 'train'))
+ stream_report = iter(make_stream(replay_train, 'report'))
+ stream_eval = iter(make_stream(replay_eval, 'eval'))
+
+ should_log = embodied.LocalClock(args.log_every)
+ logger = portal.Client(args.logger_addr, 'ReplayLogger', maxinflight=1)
+ usage = elements.Usage(**args.usage.update(nvsmi=False))
+ limit_agg = elements.Agg()
+ active = elements.Counter()
+
+ limiter = embodied.limiters.SamplesPerInsert(
+ args.train_ratio / args.batch_length,
+ tolerance=4 * args.batch_size,
+ minsize=args.batch_size * replay_train.length)
+
+ def add_batch(data):
+ active.increment()
+ for i, envid in enumerate(data.pop('envid')):
+ tran = {k: v[i] for k, v in data.items()}
+ if tran.pop('is_eval', False):
+ replay_eval.add(tran, envid)
+ continue
+ with elements.timer.section('replay_insert_wait'):
+ dur = embodied.limiters.wait(
+ limiter.want_insert, 'Replay insert waiting',
+ limiter.__dict__)
+ limit_agg.add('insert_wait_dur', dur, agg='sum')
+ limit_agg.add('insert_wait_count', dur > 0, agg='sum')
+ limit_agg.add('insert_wait_frac', dur > 0, agg='avg')
+ limiter.insert()
+ replay_train.add(tran, envid)
+ return {}
+
+ def sample_batch_train():
+ active.increment()
+ with elements.timer.section('replay_sample_wait'):
+ for _ in range(args.batch_size):
+ dur = embodied.limiters.wait(
+ limiter.want_sample, 'Replay sample waiting',
+ limiter.__dict__)
+ limit_agg.add('sample_wait_dur', dur, agg='sum')
+ limit_agg.add('sample_wait_count', dur > 0, agg='sum')
+ limit_agg.add('sample_wait_frac', dur > 0, agg='avg')
+ limiter.sample()
+ return next(stream_train)
+
+ def sample_batch_report():
+ active.increment()
+ return next(stream_report)
+
+ def sample_batch_eval():
+ active.increment()
+ return next(stream_eval)
+
+ should_save = embodied.LocalClock(args.save_every)
+ cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/replay')
+ cp.replay_train = replay_train
+ cp.replay_eval = replay_eval
+ cp.limiter = limiter
+ cp.load_or_save()
+
+ server = portal.Server(args.replay_addr, name='Replay')
+ server.bind('add_batch', add_batch, workers=1)
+ server.bind('sample_batch_train', sample_batch_train, workers=1)
+ server.bind('sample_batch_report', sample_batch_report, workers=1)
+ server.bind('sample_batch_eval', sample_batch_eval, workers=1)
+ server.bind('update', lambda data: replay_train.update(data), workers=1)
+ server.start(block=False)
+ while True:
+ if should_save() and active > 0:
+ active.reset()
+ cp.save()
+ if should_log():
+ stats = {}
+ stats['timer/replay'] = elements.timer.stats()['summary']
+ stats.update(prefix(limit_agg.result(), 'limiter'))
+ stats.update(prefix(replay_train.stats(), 'replay'))
+ stats.update(prefix(replay_eval.stats(), 'replay_eval'))
+ stats.update(prefix(usage.stats(), 'usage/replay'))
+ stats.update(prefix(logger.stats(), 'client/replay_logger'))
+ stats.update(prefix(server.stats(), 'server/replay'))
+ logger.add(stats)
+ time.sleep(1)
+
+
+@elements.timer.section('logger')
+def parallel_logger(make_logger, args):
+ if isinstance(make_logger, bytes):
+ make_logger = cloudpickle.loads(make_logger)
+
+ logger = make_logger()
+ should_log = embodied.LocalClock(args.log_every)
+ usage = elements.Usage(**args.usage.update(nvsmi=False))
+
+ active = elements.Counter()
+ should_save = embodied.LocalClock(args.save_every)
+ cp = elements.Checkpoint(elements.Path(args.logdir) / 'ckpt/logger')
+ cp.step = logger.step
+ cp.load_or_save()
+
+ parallel = elements.Agg()
+ epstats = elements.Agg()
+ episodes = collections.defaultdict(elements.Agg)
+ updated = collections.defaultdict(lambda: None)
+ dones = collections.defaultdict(lambda: True)
+
+ @elements.timer.section('addfn')
+ def addfn(metrics):
+ active.increment()
+ logger.add(metrics)
+
+ @elements.timer.section('tranfn')
+ def tranfn(trans):
+ active.increment()
+ now = time.time()
+ envid = trans.pop('envid')
+ logger.step.increment((~trans['is_eval']).sum())
+ parallel.add('ep_starts', trans['is_first'].sum(), agg='sum')
+ parallel.add('ep_ends', trans['is_last'].sum(), agg='sum')
+
+ for i, addr in enumerate(envid):
+ tran = {k: v[i] for k, v in trans.items()}
+
+ updated[addr] = now
+ episode = episodes[addr]
+ if tran['is_first']:
+ episode.reset()
+ parallel.add('ep_abandoned', int(not dones[addr]), agg='sum')
+ dones[addr] = tran['is_last']
+
+ episode.add('score', tran['reward'], agg='sum')
+ episode.add('length', 1, agg='sum')
+ episode.add('rewards', tran['reward'], agg='stack')
+
+ first_addr = next(iter(episodes.keys()))
+ for key, value in tran.items():
+ if value.dtype == np.uint8 and value.ndim == 3:
+ if addr == first_addr:
+ episode.add(f'policy_{key}', value, agg='stack')
+ elif key.startswith('log/'):
+ assert value.ndim == 0, (key, value.shape, value.dtype)
+ episode.add(key + '/avg', value, agg='avg')
+ episode.add(key + '/max', value, agg='max')
+ episode.add(key + '/sum', value, agg='sum')
+ if tran['is_last']:
+ result = episode.result()
+ logger.add({
+ 'score': result.pop('score'),
+ 'length': result.pop('length') - 1,
+ }, prefix='episode')
+ rew = result.pop('rewards')
+ if len(rew) > 1:
+ result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
+ epstats.add(result)
+
+ for addr, last in list(updated.items()):
+ if now - last >= args.episode_timeout:
+ print('Dropping episode statistics due to timeout.')
+ del episodes[addr]
+ del updated[addr]
+
+ server = portal.Server(args.logger_addr, 'Logger')
+ server.bind('add', addfn)
+ server.bind('tran', tranfn)
+ server.start(block=False)
+ last_step = int(logger.step)
+ while True:
+ time.sleep(1)
+ if should_log() and active > 0:
+ active.reset()
+ with elements.timer.section('metrics'):
+ logger.add({'timer/logger': elements.timer.stats()['summary']})
+ logger.add(parallel.result(), prefix='parallel')
+ logger.add(epstats.result(), prefix='epstats')
+ logger.add(usage.stats(), prefix='usage/logger')
+ logger.add(server.stats(), prefix='server/logger')
+ if logger.step == last_step:
+ continue
+ logger.write()
+ last_step = int(logger.step)
+ if should_save():
+ cp.save()
+
+
+@elements.timer.section('env')
+def parallel_env(make_env, envid, args, is_eval=False):
+ if isinstance(make_env, bytes):
+ make_env = cloudpickle.loads(make_env)
+ assert envid >= 0, envid
+ name = f'Env{envid:05}'
+ print = lambda x: elements.print(f'[{name}] {x}', flush=True)
+
+ should_log = embodied.LocalClock(args.log_every)
+ fps = elements.FPS()
+ if envid == 0:
+ logger = portal.Client(args.logger_addr, f'{name}Logger', maxinflight=1)
+ usage = elements.Usage(**args.usage.update(nvsmi=False))
+
+ print('Make env')
+ env = make_env(envid)
+ actor = portal.Client(args.actor_addr, name, autoconn=False)
+ actor.connect()
+
+ done = True
+ while True:
+
+ if done:
+ act = {k: v.sample() for k, v in env.act_space.items()}
+ act['reset'] = True
+ score, length = 0, 0
+
+ scope_name = 'reset' if act['reset'] else 'step'
+ with elements.timer.section(scope_name):
+ obs = env.step(act)
+ obs = {k: np.asarray(v, order='C') for k, v in obs.items()}
+ obs['is_eval'] = is_eval
+ score += obs['reward']
+ length += 1
+ fps.step(1)
+ done = obs['is_last']
+ if done and envid == 0:
+ print(f'Episode of length {length} with score {score:.2f}')
+
+ try:
+ with elements.timer.section('request'):
+ future = actor.act({'envid': envid, **obs})
+ with elements.timer.section('response'):
+ act = future.result()
+ except portal.Disconnected:
+ print('Env lost connection to agent')
+ actor.connect()
+ done = True
+
+ if should_log() and envid == 0:
+ stats = {}
+ stats['fps/env'] = fps.result()
+ stats['timer/env'] = elements.timer.stats()['summary']
+ stats.update(prefix(usage.stats(), 'usage/env'))
+ stats.update(prefix(logger.stats(), 'client/env_logger'))
+ stats.update(prefix(actor.stats(), 'client/env_actor'))
+ logger.add(stats)
+
+
+def parallel_envs(make_env, make_env_eval, args):
+ workers = []
+ for i in range(args.envs):
+ workers.append(portal.Process(parallel_env, make_env, i, args))
+ for i in range(args.envs, args.envs + args.eval_envs):
+ workers.append(portal.Process(parallel_env, make_env_eval, i, args, True))
+ portal.run(workers)
diff --git a/models/embodied/run/train.py b/models/embodied/run/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc791ec79f39640ebf5ff4a634500e98b279e088
--- /dev/null
+++ b/models/embodied/run/train.py
@@ -0,0 +1,119 @@
+import collections
+from functools import partial as bind
+
+import elements
+import embodied
+import numpy as np
+
+
+def train(make_agent, make_replay, make_env, make_stream, make_logger, args):
+
+ agent = make_agent()
+ replay = make_replay()
+ logger = make_logger()
+
+ logdir = elements.Path(args.logdir)
+ step = logger.step
+ usage = elements.Usage(**args.usage)
+ train_agg = elements.Agg()
+ epstats = elements.Agg()
+ episodes = collections.defaultdict(elements.Agg)
+ policy_fps = elements.FPS()
+ train_fps = elements.FPS()
+
+ batch_steps = args.batch_size * args.batch_length
+ should_train = elements.when.Ratio(args.train_ratio / batch_steps)
+ should_log = embodied.LocalClock(args.log_every)
+ should_report = embodied.LocalClock(args.report_every)
+ should_save = embodied.LocalClock(args.save_every)
+
+ @elements.timer.section('logfn')
+ def logfn(tran, worker):
+ episode = episodes[worker]
+ tran['is_first'] and episode.reset()
+ episode.add('score', tran['reward'], agg='sum')
+ episode.add('length', 1, agg='sum')
+ episode.add('rewards', tran['reward'], agg='stack')
+ for key, value in tran.items():
+ if value.dtype == np.uint8 and value.ndim == 3:
+ if worker == 0:
+ episode.add(f'policy_{key}', value, agg='stack')
+ elif key.startswith('log/'):
+ assert value.ndim == 0, (key, value.shape, value.dtype)
+ episode.add(key + '/avg', value, agg='avg')
+ episode.add(key + '/max', value, agg='max')
+ episode.add(key + '/sum', value, agg='sum')
+ if tran['is_last']:
+ result = episode.result()
+ logger.add({
+ 'score': result.pop('score'),
+ 'length': result.pop('length'),
+ }, prefix='episode')
+ rew = result.pop('rewards')
+ if len(rew) > 1:
+ result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
+ epstats.add(result)
+
+ fns = [bind(make_env, i) for i in range(args.envs)]
+ driver = embodied.Driver(fns, parallel=not args.debug)
+ driver.on_step(lambda tran, _: step.increment())
+ driver.on_step(lambda tran, _: policy_fps.step())
+ driver.on_step(replay.add)
+ driver.on_step(logfn)
+
+ stream_train = iter(agent.stream(make_stream(replay, 'train')))
+ stream_report = iter(agent.stream(make_stream(replay, 'report')))
+
+ carry_train = [agent.init_train(args.batch_size)]
+ carry_report = agent.init_report(args.batch_size)
+
+ def trainfn(tran, worker):
+ if len(replay) < args.batch_size * args.batch_length:
+ return
+ for _ in range(should_train(step)):
+ with elements.timer.section('stream_next'):
+ batch = next(stream_train)
+ carry_train[0], outs, mets = agent.train(carry_train[0], batch)
+ train_fps.step(batch_steps)
+ if 'replay' in outs:
+ replay.update(outs['replay'])
+ train_agg.add(mets, prefix='train')
+ driver.on_step(trainfn)
+
+ cp = elements.Checkpoint(logdir / 'ckpt')
+ cp.step = step
+ cp.agent = agent
+ cp.replay = replay
+ if args.from_checkpoint:
+ elements.checkpoint.load(args.from_checkpoint, dict(
+ agent=bind(agent.load, regex=args.from_checkpoint_regex)))
+ cp.load_or_save()
+
+ print('Start training loop')
+ policy = lambda *args: agent.policy(*args, mode='train')
+ driver.reset(agent.init_policy)
+ while step < args.steps:
+
+ driver(policy, steps=10)
+
+ if should_report(step) and len(replay):
+ agg = elements.Agg()
+ for _ in range(args.consec_report * args.report_batches):
+ carry_report, mets = agent.report(carry_report, next(stream_report))
+ agg.add(mets)
+ logger.add(agg.result(), prefix='report')
+
+ if should_log(step):
+ logger.add(train_agg.result())
+ logger.add(epstats.result(), prefix='epstats')
+ logger.add(replay.stats(), prefix='replay')
+ logger.add(usage.stats(), prefix='usage')
+ logger.add({'fps/policy': policy_fps.result()})
+ logger.add({'fps/train': train_fps.result()})
+ logger.add({'timer': elements.timer.stats()['summary']})
+ logger.write()
+
+ if should_save(step):
+ cp.save()
+
+ logger.close()
diff --git a/models/embodied/run/train_eval.py b/models/embodied/run/train_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..5730ca296ced4b60a4efae0caff9d8cc4e9dec54
--- /dev/null
+++ b/models/embodied/run/train_eval.py
@@ -0,0 +1,158 @@
+import collections
+from functools import partial as bind
+
+import elements
+import embodied
+import numpy as np
+
+
+def train_eval(
+ make_agent,
+ make_replay_train,
+ make_replay_eval,
+ make_env_train,
+ make_env_eval,
+ make_stream,
+ make_logger,
+ args):
+
+ agent = make_agent()
+ replay_train = make_replay_train()
+ replay_eval = make_replay_eval()
+ logger = make_logger()
+
+ logdir = elements.Path(args.logdir)
+ logdir.mkdir()
+ print('Logdir', logdir)
+ step = logger.step
+ usage = elements.Usage(**args.usage)
+ agg = elements.Agg()
+ train_episodes = collections.defaultdict(elements.Agg)
+ train_epstats = elements.Agg()
+ eval_episodes = collections.defaultdict(elements.Agg)
+ eval_epstats = elements.Agg()
+ policy_fps = elements.FPS()
+ train_fps = elements.FPS()
+
+ batch_steps = args.batch_size * args.batch_length
+ should_train = elements.when.Ratio(args.train_ratio / batch_steps)
+ should_log = elements.when.Clock(args.log_every)
+ should_report = elements.when.Clock(args.report_every)
+ should_save = elements.when.Clock(args.save_every)
+
+ @elements.timer.section('logfn')
+ def logfn(tran, worker, mode):
+ episodes = dict(train=train_episodes, eval=eval_episodes)[mode]
+ epstats = dict(train=train_epstats, eval=eval_epstats)[mode]
+ episode = episodes[worker]
+ tran['is_first'] and episode.reset()
+ episode.add('score', tran['reward'], agg='sum')
+ episode.add('length', 1, agg='sum')
+ episode.add('rewards', tran['reward'], agg='stack')
+ for key, value in tran.items():
+ if value.dtype == np.uint8 and value.ndim == 3:
+ if worker == 0:
+ episode.add(f'policy_{key}', value, agg='stack')
+ elif key.startswith('log/'):
+ assert value.ndim == 0, (key, value.shape, value.dtype)
+ episode.add(key + '/avg', value, agg='avg')
+ episode.add(key + '/max', value, agg='max')
+ episode.add(key + '/sum', value, agg='sum')
+ if tran['is_last']:
+ result = episode.result()
+ logger.add({
+ 'score': result.pop('score'),
+ 'length': result.pop('length'),
+ }, prefix='episode')
+ rew = result.pop('rewards')
+ if len(rew) > 1:
+ result['reward_rate'] = (np.abs(rew[1:] - rew[:-1]) >= 0.01).mean()
+ epstats.add(result)
+
+ fns = [bind(make_env_train, i) for i in range(args.envs)]
+ driver_train = embodied.Driver(fns, parallel=(not args.debug))
+ driver_train.on_step(lambda tran, _: step.increment())
+ driver_train.on_step(lambda tran, _: policy_fps.step())
+ driver_train.on_step(replay_train.add)
+ driver_train.on_step(bind(logfn, mode='train'))
+
+ fns = [bind(make_env_eval, i) for i in range(args.eval_envs)]
+ driver_eval = embodied.Driver(fns, parallel=(not args.debug))
+ driver_eval.on_step(replay_eval.add)
+ driver_eval.on_step(bind(logfn, mode='eval'))
+ driver_eval.on_step(lambda tran, _: policy_fps.step())
+
+ stream_train = iter(agent.stream(make_stream(replay_train, 'train')))
+ stream_report = iter(agent.stream(make_stream(replay_train, 'report')))
+ stream_eval = iter(agent.stream(make_stream(replay_eval, 'eval')))
+
+ carry_train = [agent.init_train(args.batch_size)]
+ carry_report = agent.init_report(args.batch_size)
+ carry_eval = agent.init_report(args.batch_size)
+
+ def trainfn(tran, worker):
+ if len(replay_train) < args.batch_size * args.batch_length:
+ return
+ for _ in range(should_train(step)):
+ with elements.timer.section('stream_next'):
+ batch = next(stream_train)
+ carry_train[0], outs, mets = agent.train(carry_train[0], batch)
+ train_fps.step(batch_steps)
+ if 'replay' in outs:
+ replay_train.update(outs['replay'])
+ agg.add(mets, prefix='train')
+ driver_train.on_step(trainfn)
+
+ def reportfn(carry, stream):
+ agg = elements.Agg()
+ for _ in range(args.report_batches):
+ batch = next(stream)
+ carry, mets = agent.report(carry, batch)
+ agg.add(mets)
+ return carry, agg.result()
+
+ cp = elements.Checkpoint(logdir / 'ckpt')
+ cp.step = step
+ cp.agent = agent
+ cp.replay_train = replay_train
+ cp.replay_eval = replay_eval
+ if args.from_checkpoint:
+ elements.checkpoint.load(args.from_checkpoint, dict(
+ agent=bind(agent.load, regex=args.from_checkpoint_regex)))
+ cp.load_or_save()
+ should_save(step) # Register that we just saved.
+
+ print('Start training loop')
+ train_policy = lambda *args: agent.policy(*args, mode='train')
+ eval_policy = lambda *args: agent.policy(*args, mode='eval')
+ driver_train.reset(agent.init_policy)
+ while step < args.steps:
+
+ if should_report(step):
+ print('Evaluation')
+ driver_eval.reset(agent.init_policy)
+ driver_eval(eval_policy, episodes=args.eval_eps)
+ logger.add(eval_epstats.result(), prefix='epstats')
+ if len(replay_train):
+ carry_report, mets = reportfn(carry_report, stream_report)
+ logger.add(mets, prefix='report')
+ if len(replay_eval):
+ carry_eval, mets = reportfn(carry_eval, stream_eval)
+ logger.add(mets, prefix='eval')
+
+ driver_train(train_policy, steps=10)
+
+ if should_log(step):
+ logger.add(agg.result())
+ logger.add(train_epstats.result(), prefix='epstats')
+ logger.add(replay_train.stats(), prefix='replay')
+ logger.add(usage.stats(), prefix='usage')
+ logger.add({'fps/policy': policy_fps.result()})
+ logger.add({'fps/train': train_fps.result()})
+ logger.add({'timer': elements.timer.stats()['summary']})
+ logger.write()
+
+ if should_save(step):
+ cp.save()
+
+ logger.close()
diff --git a/models/embodied/tests/test_driver.py b/models/embodied/tests/test_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..00150e107e7c72e6507115d367ce2ac1c11667d6
--- /dev/null
+++ b/models/embodied/tests/test_driver.py
@@ -0,0 +1,122 @@
+from functools import partial as bind
+
+import embodied
+import numpy as np
+
+
+class TestDriver:
+
+ def test_episode_length(self):
+ agent = self._make_agent()
+ driver = embodied.Driver([self._make_env])
+ driver.reset(agent.init_policy)
+ seq = []
+ driver.on_step(lambda tran, _: seq.append(tran))
+ driver(agent.policy, episodes=1)
+ assert len(seq) == 11
+
+ def test_first_step(self):
+ agent = self._make_agent()
+ driver = embodied.Driver([self._make_env])
+ driver.reset(agent.init_policy)
+ seq = []
+ driver.on_step(lambda tran, _: seq.append(tran))
+ driver(agent.policy, episodes=2)
+ for index in [0, 11]:
+ assert seq[index]['is_first'].item() is True
+ assert seq[index]['is_last'].item() is False
+ for index in [1, 10, 12]:
+ assert seq[index]['is_first'].item() is False
+
+ def test_last_step(self):
+ agent = self._make_agent()
+ driver = embodied.Driver([self._make_env])
+ driver.reset(agent.init_policy)
+ seq = []
+ driver.on_step(lambda tran, _: seq.append(tran))
+ driver(agent.policy, episodes=2)
+ for index in [10, 21]:
+ assert seq[index]['is_last'].item() is True
+ assert seq[index]['is_first'].item() is False
+ for index in [0, 1, 9, 11, 20]:
+ assert seq[index]['is_last'].item() is False
+
+ def test_env_reset(self):
+ agent = self._make_agent()
+ driver = embodied.Driver([bind(self._make_env, length=5)])
+ driver.reset(agent.init_policy)
+ seq = []
+ driver.on_step(lambda tran, _: seq.append(tran))
+ action = {'act_disc': np.ones(1, int), 'act_cont': np.zeros((1, 6), float)}
+ policy = lambda carry, obs: (carry, action, {})
+ driver(policy, episodes=2)
+ assert len(seq) == 12
+ seq = {k: np.array([seq[i][k] for i in range(len(seq))]) for k in seq[0]}
+ assert (seq['is_first'] == [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]).all()
+ assert (seq['is_last'] == [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1]).all()
+ assert (seq['reset'] == [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1]).all()
+ assert (seq['act_disc'] == [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0]).all()
+
+ def test_agent_inputs(self):
+ agent = self._make_agent()
+ driver = embodied.Driver([self._make_env])
+ driver.reset(agent.init_policy)
+ inputs = []
+ states = []
+ def policy(carry, obs, mode='train'):
+ inputs.append(obs)
+ states.append(carry)
+ _, act, _ = agent.policy(carry, obs, mode)
+ return 'carry', act, {}
+ seq = []
+ driver.on_step(lambda tran, _: seq.append(tran))
+ driver(policy, episodes=2)
+ assert len(seq) == 22
+ assert states == ([()] + ['carry'] * 21)
+ for index in [0, 11]:
+ assert inputs[index]['is_first'].item() is True
+ for index in [1, 10, 12, 21]:
+ assert inputs[index]['is_first'].item() is False
+ for index in [10, 21]:
+ assert inputs[index]['is_last'].item() is True
+ for index in [0, 1, 9, 11, 20]:
+ assert inputs[index]['is_last'].item() is False
+
+ def test_unexpected_reset(self):
+
+ class UnexpectedReset(embodied.Wrapper):
+ """Send is_first without preceeding is_last."""
+ def __init__(self, env, when):
+ super().__init__(env)
+ self._when = when
+ self._step = 0
+ def step(self, action):
+ if self._step == self._when:
+ action = action.copy()
+ action['reset'] = np.ones_like(action['reset'])
+ self._step += 1
+ return self.env.step(action)
+
+ env = self._make_env(length=4)
+ env = UnexpectedReset(env, when=3)
+ agent = self._make_agent()
+ driver = embodied.Driver([lambda: env])
+ driver.reset(agent.init_policy)
+ steps = []
+ driver.on_step(lambda tran, _: steps.append(tran))
+ driver(agent.policy, episodes=1)
+ assert len(steps) == 8
+ steps = {k: np.array([x[k] for x in steps]) for k in steps[0]}
+ assert (steps['reset'] == [0, 0, 0, 0, 0, 0, 0, 1]).all()
+ assert (steps['is_first'] == [1, 0, 0, 1, 0, 0, 0, 0]).all()
+ assert (steps['is_last'] == [0, 0, 0, 0, 0, 0, 0, 1]).all()
+
+ def _make_env(self, length=10):
+ from embodied.envs import dummy
+ return dummy.Dummy('disc', length=length)
+
+ def _make_agent(self):
+ env = self._make_env()
+ agent = embodied.RandomAgent(env.obs_space, env.act_space)
+ env.close()
+ return agent
diff --git a/models/embodied/tests/test_layer_scan.py b/models/embodied/tests/test_layer_scan.py
new file mode 100644
index 0000000000000000000000000000000000000000..db607b4eb23a1e8dfd700fb82aac9cc398c3449f
--- /dev/null
+++ b/models/embodied/tests/test_layer_scan.py
@@ -0,0 +1,102 @@
+import jax
+import jax.numpy as jnp
+import ninjax as nj
+import numpy as np
+
+from embodied.jax import utils
+
+f32 = jnp.float32
+i32 = jnp.int32
+
+
+
+class Layer(nj.Module):
+
+ units: int = 8
+
+ def __call__(self, x, c, k):
+ assert x.shape[1:] == (self.units,)
+ assert c.shape == (7,)
+ assert k.shape == (13, 7)
+ shape = (x.shape[-1], self.units)
+ winit = lambda: jax.random.normal(nj.seed(), shape, f32)
+ x = x @ self.value('kernel', winit)
+ if 'outer3' not in nj.context():
+ nj.context()['outer3'] = jnp.zeros((), i32)
+ nj.context()['outer3'] += 1
+ nj.context()['outer1'] += 1
+ inner = self.value('inner', jnp.array(0))
+ self.write('inner', inner + nj.context()['outer2'])
+ return x
+
+
+class Net(nj.Module):
+
+ layers: int = 4
+ units: int = 8
+
+ def __call__(self, x):
+ if 'outer1' not in nj.context():
+ nj.context()['outer1'] = jnp.ones((), i32)
+ if 'outer2' not in nj.context():
+ nj.context()['outer2'] = jnp.ones((), i32)
+ nj.context()['outer1'] += 1
+
+ module = self.sub('linear', Layer, units=self.units)
+ c = jnp.zeros((self.layers, 7))
+ k = jnp.zeros((13, 7))
+ x = utils.LayerScan(module, self.layers)(x, c, k=k)
+
+ return x
+
+ def loss(self, x):
+ return self(x).mean()
+
+
+class TestLayerScan:
+
+ def test_init(self, L=4, B=2, D=8):
+ x = np.random.normal(0, 1, (B, D))
+ net = Net(layers=L, units=D, name='net')
+ params = nj.init(net)({}, x, seed=0)
+ assert set(params.keys()) == {
+ 'outer1', 'outer2', 'outer3',
+ 'net/linear/kernel', 'net/linear/inner'}
+ assert params['net/linear/kernel'].shape == (L, D, D)
+ assert params['outer1'] == 1
+ assert params['outer2'] == 1
+ assert params['outer3'] == 0
+ assert params['net/linear/inner'].shape == (L,)
+ assert (params['net/linear/inner'] == 0).all()
+ for i in range(1, L):
+ assert not jnp.allclose(
+ params['net/linear/kernel'][0],
+ params['net/linear/kernel'][i])
+
+ def test_apply(self, L=4, B=2, D=8):
+ x = np.random.normal(0, 1, (B, D))
+ net = Net(layers=L, units=D, name='net')
+ params = nj.init(net)({}, x, seed=0)
+ params, out = nj.pure(net)(params, x)
+ assert out.shape == (B, D)
+ assert params['outer1'] == L + 2
+ assert params['outer2'] == 1
+ assert params['outer3'] == L
+ assert params['net/linear/inner'].shape == (L,)
+ assert (params['net/linear/inner'] == 1).all()
+
+ def test_grad(self, L=4, B=2, D=8):
+ x = np.random.normal(0, 1, (B, D))
+ net = Net(layers=L, units=D, name='net')
+ def fn(x):
+ if nj.creating():
+ net(x)
+ params = {k: v for k, v in net.values.items() if v.dtype == f32}
+ params = {net.path + '/' + k: v for k, v in params.items()}
+ loss, _, grads = nj.grad(lambda x: net(x).mean(), params.keys())(x)
+ params = {k: v - 0.1 * grads[k] for k, v in params.items()}
+ nj.context().update(params)
+ return loss
+ params = nj.init(net)({}, x, seed=0)
+ params, loss = nj.pure(fn)(params, x)
+ assert loss.shape == ()
diff --git a/models/embodied/tests/test_parallel.py b/models/embodied/tests/test_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b5fa9a98dd4ebe93151319dab0ad8e5e1a252fd
--- /dev/null
+++ b/models/embodied/tests/test_parallel.py
@@ -0,0 +1,106 @@
+from collections import deque
+from functools import partial as bind
+
+import elements
+import embodied
+import numpy as np
+import pytest
+import zerofun
+from embodied.envs import dummy
+
+import utils
+
+
+class TestParallel:
+
+ @pytest.mark.parametrize('train_ratio, eval_envs', (
+ (-1, 2), (1, 2), (1, 0), (32, 2),
+ ))
+ def test_run_loop(self, tmpdir, train_ratio, eval_envs):
+ addr = 'ipc:///tmp/teststats'
+ received = deque(maxlen=1)
+ server = zerofun.Server(addr, name='TestStats')
+ server.bind('report', lambda stats: received.append(stats))
+ server.start()
+
+ args = self._make_args(tmpdir, train_ratio, eval_envs)
+
+ embodied.run.parallel.combined(
+ bind(self._make_agent, addr),
+ bind(self._make_replay, args),
+ bind(self._make_replay, args),
+ self._make_env,
+ self._make_env,
+ self._make_logger, args)
+
+ stats = received[0]
+ print('Stats:', stats)
+ assert stats['env_steps'] > 400
+ if args.train_ratio > -1:
+ replay_steps = stats['env_steps'] * args.train_ratio
+ assert np.allclose(stats['replay_steps'], replay_steps, 100, 0.1)
+ else:
+ assert stats['replay_steps'] > 100
+ assert stats['reports'] >= 1
+ assert stats['saves'] >= 2
+ assert stats['loads'] == 0
+
+ embodied.run.parallel.combined(
+ bind(self._make_agent, addr),
+ bind(self._make_replay, args),
+ bind(self._make_replay, args),
+ self._make_env,
+ self._make_env,
+ self._make_logger, args)
+ stats = received[0]
+ assert stats['loads'] == 1
+
+ def _make_agent(self, queue):
+ env = self._make_env(0)
+ agent = utils.TestAgent(env.obs_space, env.act_space, queue)
+ env.close()
+ return agent
+
+ def _make_env(self, index):
+ return dummy.Dummy('disc', size=(64, 64), length=100)
+
+ def _make_replay(self, args, train_ratio=None):
+ kwargs = {'length': args.batch_length, 'capacity': 1e4}
+ if train_ratio:
+ kwargs['samples_per_insert'] = train_ratio / args.batch_length
+ return embodied.replay.Replay(**kwargs)
+
+ def _make_logger(self):
+ return elements.Logger(elements.Counter(), [
+ elements.logger.TerminalOutput(),
+ ])
+
+ def _make_args(self, logdir, train_ratio, eval_envs):
+ return elements.Config(
+ duration=5.0,
+ train_ratio=float(train_ratio),
+ log_every=0.1,
+ report_every=0.2,
+ save_every=0.2,
+ envs=4,
+ eval_envs=int(eval_envs),
+ report_batches=1,
+ from_checkpoint='',
+ episode_timeout=10,
+ actor_addr='tcp://localhost:{auto}',
+ replay_addr='tcp://localhost:{auto}',
+ logger_addr='tcp://localhost:{auto}',
+ ipv6=False,
+ actor_batch=-1,
+ actor_threads=2,
+ agent_process=False,
+ remote_replay=False,
+ remote_envs=False,
+ usage=dict(psutil=True, nvsmi=False),
+ debug=False,
+ logdir=str(logdir),
+ batch_size=8,
+ batch_length=16,
+ replay_context=0,
+ report_length=8,
+ )
diff --git a/models/embodied/tests/test_replay.py b/models/embodied/tests/test_replay.py
new file mode 100644
index 0000000000000000000000000000000000000000..89ba251a8750426fae6c7bdf88c44e136c67bcda
--- /dev/null
+++ b/models/embodied/tests/test_replay.py
@@ -0,0 +1,357 @@
+import collections
+import threading
+import time
+
+import elements
+import embodied
+import numpy as np
+import pytest
+
+
+REPLAYS_UNLIMITED = [
+ embodied.replay.Replay,
+ # embodied.replay.Reverb,
+]
+
+REPLAYS_SAVECHUNKS = [
+ embodied.replay.Replay,
+]
+
+REPLAYS_UNIFORM = [
+ embodied.replay.Replay,
+]
+
+
+def unbatched(dataset):
+ for batch in dataset:
+ yield {k: v[0] for k, v in batch.items()}
+
+
+@pytest.mark.filterwarnings('ignore:.*Pillow.*')
+@pytest.mark.filterwarnings('ignore:.*the imp module.*')
+@pytest.mark.filterwarnings('ignore:.*distutils.*')
+class TestReplay:
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ def test_multiple_keys(self, Replay):
+ replay = Replay(length=5, capacity=10)
+ for step in range(30):
+ replay.add({'image': np.zeros((64, 64, 3)), 'action': np.zeros(12)})
+ seq = next(unbatched(replay.dataset(1)))
+ assert set(seq.keys()) == {'stepid', 'image', 'action'}
+ assert seq['stepid'].shape == (5, 20)
+ assert seq['image'].shape == (5, 64, 64, 3)
+ assert seq['action'].shape == (5, 12)
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,workers,capacity',
+ [(1, 1, 1), (2, 1, 2), (5, 1, 10), (1, 2, 2), (5, 3, 15), (2, 7, 20)])
+ def test_capacity_exact(self, Replay, length, workers, capacity):
+ replay = Replay(length, capacity)
+ for step in range(30):
+ for worker in range(workers):
+ replay.add({'step': step}, worker)
+ target = min(workers * max(0, (step + 1) - length + 1), capacity)
+ assert len(replay) == target
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,workers,capacity,chunksize',
+ [(1, 1, 1, 128), (2, 1, 2, 128), (5, 1, 10, 128), (1, 2, 2, 128),
+ (5, 3, 15, 128), (2, 7, 20, 128), (7, 2, 27, 4)])
+ def test_sample_sequences(
+ self, Replay, length, workers, capacity, chunksize):
+ replay = Replay(length, capacity, chunksize=chunksize)
+ for step in range(30):
+ for worker in range(workers):
+ replay.add({'step': step, 'worker': worker}, worker)
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(10):
+ seq = next(dataset)
+ assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
+ assert (seq['worker'] == seq['worker'][0]).all()
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,capacity', [(1, 1), (2, 2), (5, 10), (1, 2), (5, 15), (2, 20)])
+ def test_sample_single(self, Replay, length, capacity):
+ replay = Replay(length, capacity)
+ for step in range(length):
+ replay.add({'step': step})
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(10):
+ seq = next(dataset)
+ assert (seq['step'] == np.arange(length)).all()
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNIFORM)
+ def test_sample_uniform(self, Replay):
+ replay = Replay(capacity=20, length=5, seed=0)
+ for step in range(7):
+ replay.add({'step': step})
+ assert len(replay) == 3
+ histogram = collections.defaultdict(int)
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(100):
+ seq = next(dataset)
+ histogram[seq['step'][0]] += 1
+ assert len(histogram) == 3, histogram
+ histogram = tuple(histogram.values())
+ assert histogram[0] > 20
+ assert histogram[1] > 20
+ assert histogram[2] > 20
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ def test_workers_simple(self, Replay):
+ replay = Replay(length=2, capacity=20)
+ replay.add({'step': 0}, worker=0)
+ replay.add({'step': 1}, worker=1)
+ replay.add({'step': 2}, worker=0)
+ replay.add({'step': 3}, worker=1)
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(10):
+ seq = next(dataset)
+ assert tuple(seq['step']) in ((0, 2), (1, 3))
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ def test_workers_random(self, Replay, length=4, capacity=30):
+ rng = np.random.default_rng(seed=0)
+ replay = Replay(length, capacity)
+ streams = {i: iter(range(10)) for i in range(3)}
+ for _ in range(40):
+ worker = int(rng.integers(0, 3, ()))
+ try:
+ step = {'step': next(streams[worker]), 'stream': worker}
+ replay.add(step, worker=worker)
+ except StopIteration:
+ pass
+ histogram = collections.defaultdict(int)
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(10):
+ seq = next(dataset)
+ assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
+ assert (seq['stream'] == seq['stream'][0]).all()
+ histogram[int(seq['stream'][0])] += 1
+ assert all(count > 0 for count in histogram.values())
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,workers,capacity',
+ [(1, 1, 1), (2, 1, 2), (5, 1, 10), (1, 2, 2), (5, 3, 15), (2, 7, 20)])
+ def test_worker_delay(self, Replay, length, workers, capacity):
+ replay = Replay(length, capacity)
+ rng = np.random.default_rng(seed=0)
+ streams = [iter(range(10)) for _ in range(workers)]
+ while streams:
+ try:
+ worker = rng.integers(0, len(streams))
+ replay.add({'step': next(streams[worker])}, worker)
+ except StopIteration:
+ del streams[worker]
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,capacity,chunksize',
+ [(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
+ def test_restore_exact(self, tmpdir, Replay, length, capacity, chunksize):
+ elements.UUID.reset(debug=True)
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ for step in range(30):
+ replay.add({'step': step})
+ num_items = np.clip(30 - length + 1, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ replay = Replay(length, capacity, directory=tmpdir)
+ replay.load(data)
+ assert len(replay) == num_items
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(len(replay)):
+ assert len(next(dataset)['step']) == length
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,capacity,chunksize',
+ [(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
+ def test_restore_noclear(self, tmpdir, Replay, length, capacity, chunksize):
+ elements.UUID.reset(debug=True)
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ for _ in range(30):
+ replay.add({'foo': 13})
+ num_items = np.clip(30 - length + 1, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ for _ in range(30):
+ replay.add({'foo': 42})
+ replay.load(data)
+ dataset = unbatched(replay.dataset(1))
+ if capacity < num_items:
+ for _ in range(len(replay)):
+ assert next(dataset)['foo'] == 13
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize('workers', [1, 2, 5])
+ @pytest.mark.parametrize('length,capacity', [(1, 1), (3, 10), (5, 100)])
+ def test_restore_workers(self, tmpdir, Replay, workers, length, capacity):
+ capacity *= workers
+ replay = Replay(
+ length, capacity, directory=tmpdir, save_wait=True)
+ for step in range(50):
+ for worker in range(workers):
+ replay.add({'step': step}, worker)
+ num_items = np.clip((50 - length + 1) * workers, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ replay = Replay(length, capacity, directory=tmpdir)
+ replay.load(data)
+ assert len(replay) == num_items
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(len(replay)):
+ assert len(next(dataset)['step']) == length
+
+ @pytest.mark.parametrize('Replay', REPLAYS_SAVECHUNKS)
+ @pytest.mark.parametrize(
+ 'length,capacity,chunksize', [(1, 1, 1), (3, 10, 5), (5, 100, 12)])
+ def test_restore_chunks_exact(
+ self, tmpdir, Replay, length, capacity, chunksize):
+ elements.UUID.reset(debug=True)
+ assert len(list(elements.Path(tmpdir).glob('*.npz'))) == 0
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ for step in range(30):
+ replay.add({'step': step})
+ num_items = np.clip(30 - length + 1, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ filenames = list(elements.Path(tmpdir).glob('*.npz'))
+ lengths = [int(x.stem.split('-')[3]) for x in filenames]
+ stored_steps = min(capacity + length - 1, 30)
+ total_chunks = int(np.ceil(30 / chunksize))
+ pruned_chunks = int(np.floor((30 - stored_steps) / chunksize))
+ assert len(filenames) == total_chunks - pruned_chunks
+ last_chunk_empty = total_chunks * chunksize - 30
+ saved_steps = (total_chunks - pruned_chunks) * chunksize - last_chunk_empty
+ assert sum(lengths) == saved_steps
+ assert all(1 <= x <= chunksize for x in lengths)
+ replay = Replay(length, capacity, directory=tmpdir, chunksize=chunksize)
+ replay.load(data)
+ assert sorted(elements.Path(tmpdir).glob('*.npz')) == sorted(filenames)
+ assert len(replay) == num_items
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(len(replay)):
+ assert len(next(dataset)['step']) == length
+
+ @pytest.mark.parametrize('Replay', REPLAYS_SAVECHUNKS)
+ @pytest.mark.parametrize('workers', [1, 2, 5])
+ @pytest.mark.parametrize(
+ 'length,capacity,chunksize', [(1, 1, 1), (3, 10, 5), (5, 100, 12)])
+ def test_restore_chunks_workers(
+ self, tmpdir, Replay, workers, length, capacity, chunksize):
+ capacity *= workers
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ for step in range(50):
+ for worker in range(workers):
+ replay.add({'step': step}, worker)
+ num_items = np.clip((50 - length + 1) * workers, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ filenames = list(elements.Path(tmpdir).glob('*.npz'))
+ lengths = [int(x.stem.split('-')[3]) for x in filenames]
+ stored_steps = min(capacity // workers + length - 1, 50)
+ total_chunks = int(np.ceil(50 / chunksize))
+ pruned_chunks = int(np.floor((50 - stored_steps) / chunksize))
+ assert len(filenames) == (total_chunks - pruned_chunks) * workers
+ last_chunk_empty = total_chunks * chunksize - 50
+ saved_steps = (total_chunks - pruned_chunks) * chunksize - last_chunk_empty
+ assert sum(lengths) == saved_steps * workers
+ replay = Replay(length, capacity, directory=tmpdir, chunksize=chunksize)
+ replay.load(data)
+ assert len(replay) == num_items
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(len(replay)):
+ assert len(next(dataset)['step']) == length
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ @pytest.mark.parametrize(
+ 'length,capacity,chunksize',
+ [(1, 1, 128), (3, 10, 128), (5, 100, 128), (5, 25, 2)])
+ def test_restore_insert(self, tmpdir, Replay, length, capacity, chunksize):
+ elements.UUID.reset(debug=True)
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ inserts = int(1.5 * chunksize)
+ for step in range(inserts):
+ replay.add({'step': step})
+ num_items = np.clip(inserts - length + 1, 0, capacity)
+ assert len(replay) == num_items
+ data = replay.save()
+ replay = Replay(length, capacity, directory=tmpdir)
+ replay.load(data)
+ assert len(replay) == num_items
+ dataset = unbatched(replay.dataset(1))
+ for _ in range(len(replay)):
+ assert len(next(dataset)['step']) == length
+ for step in range(inserts):
+ replay.add({'step': step})
+ num_items = np.clip(2 * (inserts - length + 1), 0, capacity)
+ assert len(replay) == num_items
+
+ @pytest.mark.parametrize('Replay', REPLAYS_UNLIMITED)
+ def test_threading(
+ self, tmpdir, Replay, length=5, capacity=128, chunksize=32,
+ adders=8, samplers=4):
+ elements.UUID.reset(debug=True)
+ replay = Replay(
+ length, capacity, directory=tmpdir, chunksize=chunksize,
+ save_wait=True)
+ running = [True]
+
+ def adder():
+ ident = threading.get_ident()
+ step = 0
+ while running[0]:
+ replay.add({'step': step}, worker=ident)
+ step += 1
+ time.sleep(0.001)
+
+ def sampler():
+ dataset = unbatched(replay.dataset(1))
+ while running[0]:
+ seq = next(dataset)
+ assert (seq['step'] - seq['step'][0] == np.arange(length)).all()
+ time.sleep(0.001)
+
+ workers = []
+ for _ in range(adders):
+ workers.append(threading.Thread(target=adder))
+ for _ in range(samplers):
+ workers.append(threading.Thread(target=sampler))
+
+ try:
+ [worker.start() for worker in workers]
+ for _ in range(4):
+
+ time.sleep(0.1)
+ stats = replay.stats()
+ assert stats['inserts'] > 0
+ assert stats['samples'] > 0
+
+ print('SAVING')
+ data = replay.save()
+ time.sleep(0.1)
+
+ print('LOADING')
+ replay.load(data)
+
+ finally:
+ running[0] = False
+ [worker.join() for worker in workers]
+
+ assert len(replay) == capacity
diff --git a/models/embodied/tests/test_sampletree.py b/models/embodied/tests/test_sampletree.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d865eced4207d6928d32383b6cc229b238b1f15
--- /dev/null
+++ b/models/embodied/tests/test_sampletree.py
@@ -0,0 +1,195 @@
+import collections
+
+import numpy as np
+import pytest
+from embodied.core import selectors
+
+
+class TestSampleTree:
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_root_sum(self, branching):
+ tree = selectors.SampleTree(branching)
+ entries = range(50)
+ for index, uprob in enumerate(entries):
+ assert tree.root.uprob == sum(entries[:index])
+ tree.insert(index, uprob)
+
+ @pytest.mark.parametrize('inserts', [1, 2, 10, 100])
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_depth_inserts(self, inserts, branching):
+ tree = selectors.SampleTree(branching)
+ for index in range(inserts):
+ tree.insert(index, 1)
+ assert len(tree) == inserts
+ depths = self._find_leave_depths(tree)
+ target = max(1, int(np.ceil(np.log(inserts) / np.log(branching))))
+ assert all(x == target for x in depths)
+
+ @pytest.mark.parametrize('inserts', [2, 10, 100])
+ @pytest.mark.parametrize('remove_every', [2, 3, 4])
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_depth_removals(self, inserts, remove_every, branching):
+ tree = selectors.SampleTree(branching)
+ for index in range(0, inserts, 1):
+ tree.insert(index, 1)
+ removals = list(range(0, inserts, remove_every))
+ for index in removals:
+ tree.remove(index)
+ assert len(tree) == inserts - len(removals)
+ depths = self._find_leave_depths(tree)
+ target = max(1, int(np.ceil(np.log(inserts) / np.log(branching))))
+ assert all(x == target for x in depths)
+
+ @pytest.mark.parametrize('inserts', [2, 10, 100])
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_removal_num_nodes(self, inserts, branching):
+ tree = selectors.SampleTree(branching)
+ assert len(self._get_flat_nodes(tree)) == 1
+ rng = np.random.default_rng(seed=0)
+ for key in rng.permutation(np.arange(inserts)):
+ tree.insert(key, 1)
+ num_nodes = len(self._get_flat_nodes(tree))
+ for key in rng.permutation(np.arange(inserts)):
+ tree.remove(key)
+ assert len(self._get_flat_nodes(tree)) == 1
+ for key in rng.permutation(np.arange(inserts)):
+ tree.insert(key, 1)
+ assert len(self._get_flat_nodes(tree)) == num_nodes
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_sample_single(self, branching):
+ tree = selectors.SampleTree(branching)
+ tree.insert(12, 1.0)
+ tree.insert(123, 1.0)
+ tree.insert(42, 1.0)
+ tree.remove(12)
+ tree.remove(42)
+ for _ in range(10):
+ assert tree.sample() == 123
+
+ @pytest.mark.parametrize('inserts', [2, 10])
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ @pytest.mark.parametrize('uprob', [1e-5, 1.0, 1e5])
+ def test_sample_uniform(self, inserts, branching, uprob):
+ tree = selectors.SampleTree(branching, seed=0)
+ keys = list(range(inserts))
+ for key in keys:
+ tree.insert(key, 1.0)
+ for key in keys[::3]:
+ tree.remove(key)
+ keys.remove(key)
+ histogram = collections.defaultdict(int)
+ for _ in range(100 * len(keys)):
+ key = tree.sample()
+ histogram[key] += 1
+ assert len(histogram) > 0
+ assert len(histogram) == len(keys)
+ assert all(k in histogram for k in keys)
+ for key, count in histogram.items():
+ prob = count / (100 * len(keys))
+ assert prob > 0.5 * (1 / len(keys))
+
+ @pytest.mark.parametrize('scale', [1e-5, 1, 1e5])
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_sample_frequencies(self, scale, branching):
+ tree = selectors.SampleTree(branching, seed=0)
+ keys = [0, 1, 2, 3, 4, 5]
+ uprobs = [0, 3, 1, 1, 2, 2]
+ entries = dict(zip(keys, uprobs))
+ for key, uprob in entries.items():
+ tree.insert(key, scale * uprob)
+ histogram = collections.defaultdict(int)
+ for _ in range(100 * len(entries)):
+ key = tree.sample()
+ histogram[key] += 1
+ assert len(histogram) > 0
+ total = sum(entries.values())
+ for key, uprob in entries.items():
+ if uprob == 0:
+ assert key not in histogram
+ for key, count in histogram.items():
+ prob = count / (100 * len(entries))
+ target = entries[key] / total
+ assert 0.7 * target < prob < 1.3 * target
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_update_frequencies(self, branching):
+ tree = selectors.SampleTree(branching, seed=0)
+ keys = [0, 1, 2, 3, 4, 5]
+ uprobs = [0, 3, 1, 1, 2, 2]
+ entries = dict(zip(keys, uprobs))
+ for key in entries.keys():
+ tree.insert(key, 100)
+ for key, uprob in entries.items():
+ tree.update(key, uprob)
+ histogram = collections.defaultdict(int)
+ for _ in range(100 * len(entries)):
+ key = tree.sample()
+ histogram[key] += 1
+ assert len(histogram) > 0
+ total = sum(entries.values())
+ for key, uprob in entries.items():
+ if uprob == 0:
+ assert key not in histogram
+ for key, count in histogram.items():
+ prob = count / (100 * len(entries))
+ target = entries[key] / total
+ assert 0.7 * target < prob < 1.3 * target
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_zero_probs_mixed(self, branching):
+ tree = selectors.SampleTree(branching, seed=0)
+ impossible = []
+ for index in range(100):
+ if index % 3 == 0:
+ tree.insert(index, 1.0)
+ else:
+ tree.insert(index, 0.0)
+ impossible.append(index)
+ for _ in range(1000):
+ assert tree.sample() not in impossible
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_zero_probs_only(self, branching):
+ tree = selectors.SampleTree(branching, seed=0)
+ for index in range(100):
+ tree.insert(index, 0.0)
+ for _ in range(1000):
+ assert tree.sample() in range(100)
+
+ @pytest.mark.parametrize('branching', [2, 3, 5, 10])
+ def test_infinity_probs(self, branching):
+ tree = selectors.SampleTree(branching, seed=0)
+ possible = []
+ for index in range(100):
+ if index % 3 == 0:
+ tree.insert(index, np.inf)
+ possible.append(index)
+ else:
+ tree.insert(index, 1.0)
+ for _ in range(1000):
+ assert tree.sample() in possible
+
+ def _find_leave_depths(self, tree):
+ depths = []
+ queue = [(tree.root, 0)]
+ while queue:
+ node, depth = queue.pop()
+ if hasattr(node, 'children'):
+ for child in node.children:
+ queue.append((child, depth + 1))
+ else:
+ depths.append(depth)
+ assert len(depths) > 0
+ return depths
+
+ def _get_flat_nodes(self, tree):
+ nodes = []
+ queue = [tree.root]
+ while queue:
+ node = queue.pop()
+ nodes.append(node)
+ if hasattr(node, 'children'):
+ queue += node.children
+ return nodes
diff --git a/models/embodied/tests/test_train.py b/models/embodied/tests/test_train.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f76fc12ff297d2cf28be02ee85883a7348eac62
--- /dev/null
+++ b/models/embodied/tests/test_train.py
@@ -0,0 +1,71 @@
+from functools import partial as bind
+
+import elements
+import embodied
+import numpy as np
+
+import utils
+
+
+class TestTrain:
+
+ def test_run_loop(self, tmpdir):
+ args = self._make_args(tmpdir)
+ agent = self._make_agent()
+ embodied.run.train(
+ lambda: agent, bind(self._make_replay, args),
+ self._make_env, self._make_logger, args)
+ stats = agent.stats()
+ print('Stats:', stats)
+ replay_steps = args.steps * args.train_ratio
+ assert stats['lifetime'] >= 1 # Otherwise decrease log and ckpt interval.
+ assert np.allclose(stats['env_steps'], args.steps, 100, 0.1)
+ assert np.allclose(stats['replay_steps'], replay_steps, 100, 0.1)
+ assert stats['reports'] >= 1
+ assert stats['saves'] >= 2
+ assert stats['loads'] == 0
+ args = args.update(steps=2 * args.steps)
+ embodied.run.train(
+ lambda: agent, bind(self._make_replay, args),
+ self._make_env, self._make_logger, args)
+ stats = agent.stats()
+ assert stats['loads'] == 1
+ assert np.allclose(stats['env_steps'], args.steps, 100, 0.1)
+
+ def _make_agent(self):
+ env = self._make_env(0)
+ agent = utils.TestAgent(env.obs_space, env.act_space)
+ env.close()
+ return agent
+
+ def _make_env(self, index):
+ from embodied.envs import dummy
+ return dummy.Dummy('disc', size=(64, 64), length=100)
+
+ def _make_replay(self, args):
+ kwargs = {'length': args.batch_length, 'capacity': 1e4}
+ return embodied.replay.Replay(**kwargs)
+
+ def _make_logger(self):
+ return elements.Logger(elements.Counter(), [
+ elements.logger.TerminalOutput(),
+ ])
+
+ def _make_args(self, logdir):
+ return elements.Config(
+ steps=1000,
+ train_ratio=32.0,
+ log_every=0.1,
+ report_every=0.2,
+ save_every=0.2,
+ report_batches=1,
+ from_checkpoint='',
+ usage=dict(psutil=True),
+ debug=False,
+ logdir=str(logdir),
+ envs=4,
+ batch_size=8,
+ batch_length=16,
+ replay_context=0,
+ report_length=8,
+ )
diff --git a/models/embodied/tests/utils.py b/models/embodied/tests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c455b9d5caf9bc66e319a1fd2a7e96a29c6f4e0
--- /dev/null
+++ b/models/embodied/tests/utils.py
@@ -0,0 +1,104 @@
+import time
+
+import elements
+import zerofun
+import numpy as np
+
+
+class TestAgent:
+
+ def __init__(self, obs_space, act_space, addr=None):
+ self.obs_space = obs_space
+ self.act_space = act_space
+ if addr:
+ self.client = zerofun.Client(addr, connect=True)
+ self.should_stats = elements.when.Clock(1)
+ else:
+ self.client = None
+ self._stats = {
+ 'env_steps': 0, 'replay_steps': 0, 'reports': 0,
+ 'saves': 0, 'loads': 0, 'created': time.time(),
+ }
+
+ def _watcher(self):
+ while True:
+ if self.queue.empty():
+ self.queue.put(self.stats())
+ else:
+ time.sleep(0.01)
+
+ def stats(self):
+ stats = self._stats.copy()
+ stats['lifetime'] = time.time() - stats.pop('created')
+ return stats
+
+ def init_policy(self, batch_size):
+ return (np.zeros(batch_size),)
+
+ def init_train(self, batch_size):
+ return (np.zeros(batch_size),)
+
+ def init_report(self, batch_size):
+ return ()
+
+ def policy(self, carry, obs, mode='train'):
+ assert set(obs.keys()) == set(self.obs_space.keys())
+ B = len(obs['is_first'])
+ self._stats['env_steps'] += B
+ carry, = carry
+ carry = np.asarray(carry)
+
+ assert carry.shape == (B,)
+ assert not any(k.startswith('log/') for k in obs.keys())
+
+ target = (carry + 1) * (1 - obs['is_first'])
+ assert (obs['count'] == target).all()
+ carry = target
+
+ if self.client and self.should_stats():
+ self.client.report(self.stats())
+
+ act = {
+ k: np.stack([v.sample() for _ in range(B)])
+ for k, v in self.act_space.items() if k != 'reset'}
+ return (carry,), act, {}
+
+ def train(self, carry, data):
+ expected = sorted(set(self.obs_space | self.act_space) | {'stepid'})
+ assert sorted(data.keys()) == expected, (sorted(data.keys()), expected)
+ B, T = data['count'].shape
+ carry, = carry
+ assert carry.shape == (B,)
+ assert not any(k.startswith('log/') for k in data.keys())
+ self._stats['replay_steps'] += B * T
+ for t in range(T):
+ current = data['count'][:, t]
+ reset = data['is_first'][:, t]
+ target = (1 - reset) * (carry + 1) + reset * current
+ assert (current == target).all()
+ carry = current
+
+ outs = {}
+ metrics = {}
+ return (carry,), outs, metrics
+
+ def report(self, carry, data):
+ self._stats['reports'] += 1
+ return carry, {
+ 'scalar': np.float32(0),
+ 'vector': np.zeros(10),
+ 'image1': np.zeros((64, 64, 1)),
+ 'image3': np.zeros((64, 64, 3)),
+ 'video': np.zeros((10, 64, 64, 3)),
+ }
+
+ def dataset(self, generator):
+ return generator()
+
+ def save(self):
+ self._stats['saves'] += 1
+ return self._stats
+
+ def load(self, data):
+ self._stats = data
+ self._stats['loads'] += 1