Datasets:
File size: 3,014 Bytes
faa3682 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
from collections import deque
from functools import partial as bind
import elements
import embodied
import numpy as np
import pytest
import zerofun
from embodied.envs import dummy
import utils
class TestParallel:
@pytest.mark.parametrize('train_ratio, eval_envs', (
(-1, 2), (1, 2), (1, 0), (32, 2),
))
def test_run_loop(self, tmpdir, train_ratio, eval_envs):
addr = 'ipc:///tmp/teststats'
received = deque(maxlen=1)
server = zerofun.Server(addr, name='TestStats')
server.bind('report', lambda stats: received.append(stats))
server.start()
args = self._make_args(tmpdir, train_ratio, eval_envs)
embodied.run.parallel.combined(
bind(self._make_agent, addr),
bind(self._make_replay, args),
bind(self._make_replay, args),
self._make_env,
self._make_env,
self._make_logger, args)
stats = received[0]
print('Stats:', stats)
assert stats['env_steps'] > 400
if args.train_ratio > -1:
replay_steps = stats['env_steps'] * args.train_ratio
assert np.allclose(stats['replay_steps'], replay_steps, 100, 0.1)
else:
assert stats['replay_steps'] > 100
assert stats['reports'] >= 1
assert stats['saves'] >= 2
assert stats['loads'] == 0
embodied.run.parallel.combined(
bind(self._make_agent, addr),
bind(self._make_replay, args),
bind(self._make_replay, args),
self._make_env,
self._make_env,
self._make_logger, args)
stats = received[0]
assert stats['loads'] == 1
def _make_agent(self, queue):
env = self._make_env(0)
agent = utils.TestAgent(env.obs_space, env.act_space, queue)
env.close()
return agent
def _make_env(self, index):
return dummy.Dummy('disc', size=(64, 64), length=100)
def _make_replay(self, args, train_ratio=None):
kwargs = {'length': args.batch_length, 'capacity': 1e4}
if train_ratio:
kwargs['samples_per_insert'] = train_ratio / args.batch_length
return embodied.replay.Replay(**kwargs)
def _make_logger(self):
return elements.Logger(elements.Counter(), [
elements.logger.TerminalOutput(),
])
def _make_args(self, logdir, train_ratio, eval_envs):
return elements.Config(
duration=5.0,
train_ratio=float(train_ratio),
log_every=0.1,
report_every=0.2,
save_every=0.2,
envs=4,
eval_envs=int(eval_envs),
report_batches=1,
from_checkpoint='',
episode_timeout=10,
actor_addr='tcp://localhost:{auto}',
replay_addr='tcp://localhost:{auto}',
logger_addr='tcp://localhost:{auto}',
ipv6=False,
actor_batch=-1,
actor_threads=2,
agent_process=False,
remote_replay=False,
remote_envs=False,
usage=dict(psutil=True, nvsmi=False),
debug=False,
logdir=str(logdir),
batch_size=8,
batch_length=16,
replay_context=0,
report_length=8,
)
|