code stringlengths 17 6.64M |
|---|
def test_stochastic_network_5(net):
net.add_connection('A', 'B', rate=MockComparableSampler(1.0))
assert net.graph.has_edge('A', 'B')
net.resample_connectivity()
assert net.graph.has_edge('A', 'B')
|
def test_stochastic_network_6(net):
net.add_connection('A', 'B', rate=MockComparableSampler(0.0))
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
class MockEnv():
def view(self):
return None
|
@msg_payload()
class _TestMessage():
value: int
|
class _TestActor(Agent):
@msg_handler(_TestMessage)
def handle_request(self, _: Context, message: _TestMessage) -> List[Tuple[(AgentID, Message)]]:
if (message.payload.value > 1):
return [(message.sender_id, _TestMessage((message.payload.value // 2)))]
|
def test_tracking():
resolver = BatchResolver(enable_tracking=True)
n = Network([_TestActor('A'), _TestActor('B'), _TestActor('C')], resolver)
n.add_connection('A', 'B')
n.add_connection('A', 'C')
n.send('A', 'B', _TestMessage(4))
n.send('A', 'C', _TestMessage(4))
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
assert (resolver.tracked_messages == [Message('A', 'B', _TestMessage(4)), Message('A', 'C', _TestMessage(4)), Message('B', 'A', _TestMessage(2)), Message('C', 'A', _TestMessage(2)), Message('A', 'B', _TestMessage(1)), Message('A', 'C', _TestMessage(1))])
n.resolver.clear_tracked_messages()
assert (resolver.tracked_messages == [])
|
def test_repr():
assert (str(MockAgent('AgentID')) == '[MockAgent AgentID]')
|
def test_reset():
st = MockStrategicAgent.Supertype(MockSampler(1))
agent = MockStrategicAgent('Agent', supertype=st)
assert (agent.supertype == st)
agent.reset()
assert (agent.type == MockStrategicAgent.Supertype(2))
class MockAgent2(ph.StrategicAgent):
@dataclass
class Supertype(ph.Supertype):
type_value: float
agent = MockAgent2('Agent', supertype=MockStrategicAgent.Supertype(0))
agent.reset()
|
@ph.msg_payload()
class MockPayload1():
value: float = 0.0
|
@ph.msg_payload()
class MockPayload2():
value: float = 0.0
|
class MockAgent3(ph.StrategicAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mock_msg_1_recv = 0
@ph.agents.msg_handler(MockPayload1)
def handle_mock_message_1(self, ctx, message):
self.mock_msg_1_recv += 1
|
def test_message_handling():
agent = MockAgent3('Agent')
agent.reset()
agent.handle_message(None, ph.Message('', 'Agent', MockPayload1()))
assert (agent.mock_msg_1_recv == 1)
agent.handle_batch(None, [ph.Message('', 'Agent', MockPayload1())])
assert (agent.mock_msg_1_recv == 2)
with pytest.raises(ValueError):
agent.handle_message(None, ph.Message('', 'Agent', MockPayload2()))
|
@pytest.fixture
def phantom_env():
return ph.PhantomEnv(num_steps=2, network=ph.Network([MockStrategicAgent('A', num_steps=1), MockStrategicAgent('B'), MockAgent('C')]))
|
def test_n_agents(phantom_env):
assert (phantom_env.n_agents == 3)
|
def test_agent_ids(phantom_env):
assert (phantom_env.agent_ids == ['A', 'B', 'C'])
assert (phantom_env.strategic_agent_ids == ['A', 'B'])
assert (phantom_env.non_strategic_agent_ids == ['C'])
|
def test_get_agents(phantom_env):
assert (phantom_env.strategic_agents == [phantom_env.agents['A'], phantom_env.agents['B']])
assert (phantom_env.non_strategic_agents == [phantom_env.agents['C']])
|
def test__get_item__(phantom_env):
assert isinstance(phantom_env['A'], MockStrategicAgent)
assert (phantom_env['A'].id == 'A')
|
def test_is_terminated(phantom_env):
phantom_env._terminations = set()
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A'])
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A', 'B'])
assert phantom_env.is_terminated()
|
def test_is_truncated(phantom_env):
phantom_env._truncations = set()
assert (not phantom_env.is_truncated())
phantom_env._truncations = set(['A'])
assert (not phantom_env.is_truncated())
phantom_env._truncations = set(['A', 'B'])
assert phantom_env.is_truncated()
phantom_env._truncations = set()
phantom_env._current_step = phantom_env.num_steps
assert phantom_env.is_truncated()
|
def test_reset(phantom_env):
(obs, infos) = phantom_env.reset()
assert (phantom_env.current_step == 0)
assert (list(obs.keys()) == ['A', 'B'])
assert (infos == {})
|
def test_step(phantom_env):
current_time = phantom_env.current_step
actions = {'A': 0, 'B': 0}
step = phantom_env.step(actions)
assert (phantom_env.current_step == (current_time + 1))
assert (list(step.observations.keys()) == ['A', 'B'])
assert (list(step.rewards.keys()) == ['A', 'B'])
assert (list(step.infos.keys()) == ['A', 'B'])
assert (step.terminations == {'A': True, 'B': False, '__all__': False})
assert (step.truncations == {'A': True, 'B': False, '__all__': False})
current_time = phantom_env.current_step
actions = {'A': 0, 'B': 0}
step = phantom_env.step(actions)
assert (phantom_env.current_step == (current_time + 1))
assert (list(step.observations.keys()) == ['B'])
assert (list(step.rewards.keys()) == ['B'])
assert (list(step.infos.keys()) == ['B'])
assert (step.terminations == {'B': False, '__all__': False})
assert (step.truncations == {'B': False, '__all__': True})
|
def test_payload_1():
@ph.msg_payload()
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types is None)
assert (MockPayload._receiver_types is None)
|
def test_payload_2():
@ph.msg_payload(sender_type=ph.Agent, receiver_type='OtherAgent')
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types == ['Agent'])
assert (MockPayload._receiver_types == ['OtherAgent'])
|
def test_payload_3():
@ph.msg_payload(sender_type=['AgentA', 'AgentB'], receiver_type=['AgentC', 'AgentD'])
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types == ['AgentA', 'AgentB'])
assert (MockPayload._receiver_types == ['AgentC', 'AgentD'])
|
def test_old_payload():
@dataclass(frozen=True)
class MockPayload(ph.MsgPayload):
value: float = 0.0
net = ph.Network([ph.Agent('a'), ph.Agent('b')], connections=[('a', 'b')])
net.enforce_msg_payload_checks = True
with warnings.catch_warnings(record=True) as w:
net.send('a', 'b', MockPayload(1.0))
assert (len(w) == 1)
assert isinstance(w[0].message, DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
net.send('a', 'b', MockPayload(1.0))
assert (len(w) == 0)
net.enforce_msg_payload_checks = False
net.send('a', 'b', MockPayload(1.0))
|
class MockAgent(ph.StrategicAgent):
def is_terminated(self, ctx: Context) -> bool:
return (self.id == 'B')
def compute_reward(self, ctx: Context) -> float:
return 0.0
def encode_observation(self, ctx: Context):
return 1.0
def decode_action(self, ctx: Context, action: np.ndarray):
print(self.id, action)
self.last_action = action
return []
@property
def observation_space(self):
return gym.spaces.Box((- np.inf), np.inf, (1,))
@property
def action_space(self):
return gym.spaces.Box((- np.inf), np.inf, (1,))
|
class MockPolicy(Policy):
def compute_action(self, observation):
return 2.0
|
@pytest.fixture
def gym_env():
return SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
|
def test_agent_ids(gym_env):
assert (list(gym_env.agent_ids) == ['A', 'B'])
|
def test_n_agents(gym_env):
assert (gym_env.n_agents == 2)
|
def test_reset(gym_env):
(obs, info) = gym_env.reset()
assert (gym_env.current_step == 0)
assert (obs == 1.0)
assert (info == {})
assert (gym_env._observations == {'A': 1.0, 'B': 1.0})
|
def test_step(gym_env):
gym_env.reset()
current_time = gym_env.current_step
action = 3.0
step = gym_env.step(action)
assert (gym_env.current_step == (current_time + 1))
assert (step == (1.0, 0.0, False, False, {}))
assert (gym_env.agents['A'].last_action == 3.0)
assert (gym_env.agents['B'].last_action == 2.0)
current_time = gym_env.current_step
actions = {'A': 0, 'B': 0}
step = gym_env.step(actions)
assert (gym_env.current_step == (current_time + 1))
|
def test_bad_env():
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='X', other_policies={'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'A': (MockPolicy, {}), 'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'X': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
|
def test_stackelberg_env():
agents = [MockStrategicAgent('leader'), MockStrategicAgent('follower')]
network = ph.Network(agents)
env = ph.StackelbergEnv(3, network, ['leader'], ['follower'])
assert (env.reset() == ({'leader': np.array([0])}, {}))
assert (env.agents['leader'].compute_reward_count == 0)
assert (env.agents['leader'].encode_obs_count == 1)
assert (env.agents['leader'].decode_action_count == 0)
assert (env.agents['follower'].compute_reward_count == 0)
assert (env.agents['follower'].encode_obs_count == 0)
assert (env.agents['follower'].decode_action_count == 0)
step = env.step({'leader': np.array([0])})
assert (step.observations == {'follower': np.array([(1 / 3)])})
assert (step.rewards == {})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': False})
assert (step.infos == {'follower': {}})
assert (env.agents['leader'].compute_reward_count == 1)
assert (env.agents['leader'].encode_obs_count == 1)
assert (env.agents['leader'].decode_action_count == 1)
assert (env.agents['follower'].compute_reward_count == 0)
assert (env.agents['follower'].encode_obs_count == 1)
assert (env.agents['follower'].decode_action_count == 0)
step = env.step({'follower': np.array([0])})
assert (step.observations == {'leader': np.array([(2 / 3)])})
assert (step.rewards == {'leader': 0.0})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': False})
assert (step.infos == {'leader': {}})
assert (env.agents['leader'].compute_reward_count == 1)
assert (env.agents['leader'].encode_obs_count == 2)
assert (env.agents['leader'].decode_action_count == 1)
assert (env.agents['follower'].compute_reward_count == 1)
assert (env.agents['follower'].encode_obs_count == 1)
assert (env.agents['follower'].decode_action_count == 1)
step = env.step({'leader': np.array([0])})
assert (step.observations == {'follower': np.array([1])})
assert (step.rewards == {'leader': 0.0, 'follower': 0.0})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': True})
assert (step.infos == {'follower': {}})
assert (env.agents['leader'].compute_reward_count == 2)
assert (env.agents['leader'].encode_obs_count == 2)
assert (env.agents['leader'].decode_action_count == 2)
assert (env.agents['follower'].compute_reward_count == 1)
assert (env.agents['follower'].encode_obs_count == 2)
assert (env.agents['follower'].decode_action_count == 1)
|
def test_base_supertype_sample():
@dataclass
class TestSupertype(Supertype):
a: float
b: float
s1 = TestSupertype(1.0, 'string')
t1 = s1.sample()
assert isinstance(t1, TestSupertype)
assert (t1.__dict__ == {'a': 1.0, 'b': 'string'})
s2 = TestSupertype(MockSampler(0), 'string')
t2 = s2.sample()
assert (t2.__dict__ == {'a': 1, 'b': 'string'})
|
def test_base_type_utilities():
@dataclass
class Type(Supertype):
a: int
b: float
c: List[int]
d: Tuple[int]
e: np.ndarray
f: Dict[(str, int)]
t = Type(a=1, b=2.0, c=[6, 7, 8], d=(9, 10, 11), e=np.array([15, 16, 17], dtype=np.float32), f={'x': 12, 'y': 13, 'z': 14})
t_compat = t.to_obs_space_compatible_type()
assert (len(t_compat) == 6)
assert (t_compat['a'] == t.a)
assert (t_compat['b'] == t.b)
assert (t_compat['c'] == t.c)
assert (t_compat['d'] == t.d)
assert np.all((t_compat['e'] == t.e))
assert (t_compat['f'] == t.f)
t_space = t.to_obs_space()
assert (t_space == gym.spaces.Dict({'a': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'b': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'c': gym.spaces.Tuple([gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)]), 'd': gym.spaces.Tuple([gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)]), 'e': gym.spaces.Box((- np.inf), np.inf, (3,), np.float32), 'f': gym.spaces.Dict({'x': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'y': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'z': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)})}))
assert t_space.contains(t_compat)
@dataclass
class Type(Supertype):
s: str = 's'
t = Type()
with pytest.raises(ValueError):
t.to_obs_space_compatible_type()
with pytest.raises(ValueError):
t.to_obs_space()
|
def test_agent_supertypes_in_env_1():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
s1 = MockSampler(0)
s2 = MockSampler(10)
agent_supertypes = {'a1': MockStrategicAgent.Supertype(type_value=s1), 'a2': MockStrategicAgent.Supertype(type_value=s2)}
env = ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
assert (set(env._samplers) == set([s1, s2]))
assert (env.agents['a1'].supertype == agent_supertypes['a1'])
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(1))
assert (env.agents['a2'].supertype == agent_supertypes['a2'])
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(11))
assert (env.agents['a1'].supertype.type_value == s1)
assert (env.agents['a2'].supertype.type_value == s2)
env.reset()
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(2))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(12))
|
def test_agent_supertypes_in_env_2():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
s1 = MockSampler(0)
s2 = MockSampler(10)
agent_supertypes = {'a1': {'type_value': s1}, 'a2': {'type_value': s2}}
env = ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
assert (set(env._samplers) == set([s1, s2]))
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(1))
assert (env.agents['a1'].supertype == MockStrategicAgent.Supertype(type_value=s1))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(11))
assert (env.agents['a2'].supertype == MockStrategicAgent.Supertype(type_value=s2))
assert (env.agents['a1'].supertype.type_value == s1)
assert (env.agents['a2'].supertype.type_value == s2)
env.reset()
assert (env.agents['a1'].type == MockStrategicAgent.Supertype(2))
assert (env.agents['a2'].type == MockStrategicAgent.Supertype(12))
|
def test_agent_supertypes_in_env_bad():
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2')]
network = ph.Network(agents)
agent_supertypes = {'a1': {'wrong': 1.0}, 'a2': {}}
with pytest.raises(Exception):
ph.PhantomEnv(1, network, agent_supertypes=agent_supertypes)
|
def test_env_supertype_in_env_1():
s1 = MockSampler(0)
env_supertype = MockEnv.Supertype(type_value=s1)
env = MockEnv(env_supertype=env_supertype)
assert (set(env._samplers) == set([s1]))
assert (env.env_type is None)
assert (env.env_supertype == MockEnv.Supertype(s1))
env.reset()
assert (env.env_type == MockEnv.Supertype(2))
|
def test_env_supertype_in_env_2():
s1 = MockSampler(0)
env_supertype = MockEnv.Supertype(type_value=s1)
env = MockEnv(env_supertype={'type_value': s1})
assert (set(env._samplers) == set([s1]))
assert (env.env_type is None)
assert (env.env_supertype == env_supertype)
env.reset()
assert (env.env_type == MockEnv.Supertype(2))
|
def test_env_supertype_in_env_bad():
with pytest.raises(Exception):
MockEnv(env_supertype={'xxx': 0.0})
|
def test_env_type_passed_to_agent():
class MockAgent(ph.Agent):
def __init__(self, *args, num_steps=None, **kwargs):
super().__init__(*args, **kwargs)
self.num_steps = num_steps
self.param = 0.0
def generate_messages(self, ctx):
self.param = ctx.env_view.supertype_param
class MockEnv(ph.PhantomEnv):
@dataclass
class Supertype(ph.Supertype):
param: float = 0.0
@dataclass(frozen=True)
class View(ph.EnvView):
supertype_param: float
def view(self, agent_views):
return self.View(self.current_step, (self.current_step / self.num_steps), self.env_type.param)
def __init__(self, **kwargs):
network = ph.StochasticNetwork([MockAgent('a1')])
super().__init__(num_steps=10, network=network, **kwargs)
env = MockEnv(env_supertype=MockEnv.Supertype(MockSampler(0.0)))
env.reset()
env.step({})
assert (env['a1'].param == 2.0)
env.reset()
env.step({})
assert (env['a1'].param == 3.0)
|
def test_telemetry(tmpdir):
ph.telemetry.logger.configure_print_logging(print_actions=True, print_observations=True, print_rewards=True, print_terminations=True, print_truncations=True, print_infos=True, print_messages=True, metrics={'step': ph.metrics.SimpleEnvMetric('current_step')})
env = MockEnv()
env.reset()
for _ in range(5):
env.step({})
assert (ph.telemetry.logger._current_episode is None)
assert (not os.path.isfile(tmpdir.join('log.json')))
ph.telemetry.logger.configure_print_logging(enable=False)
ph.telemetry.logger.configure_file_logging(file_path=tmpdir.join('log.json'), metrics={'step': ph.metrics.SimpleEnvMetric('current_step')})
env = MockEnv()
env.reset()
for _ in range(5):
env.step({})
assert os.path.isfile(tmpdir.join('log.json'))
data = json.load(open(tmpdir.join('log.json'), 'r'))
assert (set(data.keys()) == {'start', 'steps'})
assert (len(data['steps']) == 6)
assert (set(data['steps'][0]) == {'messages', 'metrics', 'observations'})
assert (set(data['steps'][1]) == {'actions', 'terminations', 'truncations', 'infos', 'messages', 'metrics', 'observations', 'rewards'})
ph.telemetry.logger.configure_file_logging(file_path=None)
|
def test_uniform_range():
range_ = ph.utils.ranges.UniformRange(start=0.0, end=10.0, step=1.0)
assert (range_.values() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])).all()
|
def test_linspace_range():
range_ = ph.utils.ranges.LinspaceRange(start=0.0, end=10.0, n=11)
assert (range_.values() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])).all()
|
def test_unit_array_uniform_range():
range_ = ph.utils.ranges.UnitArrayUniformRange(start=0.0, end=10.0, step=1.0)
assert (range_.values() == [np.array([i]) for i in range(10)])
|
def test_unit_array_linspace_range():
range_ = ph.utils.ranges.UnitArrayLinspaceRange(start=0.0, end=10.0, n=11)
assert (range_.values() == [np.array([i]) for i in range(11)])
|
def test_rllib_train_rollout(tmpdir):
ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=2, results_dir=tmpdir)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=0)
assert (len(list(results)) == 3)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', env_class=MockEnv, num_repeats=3, num_workers=1)
results = list(results)
assert (len(results) == 3)
assert np.all((results[0].actions_for_agent('a1') == results[1].actions_for_agent('a1') == results[2].actions_for_agent('a1')))
ph.utils.rollout.rollouts_to_dataframe(results, avg_over_repeats=False)
with open(f'{tmpdir}/rollouts.json', 'w') as f:
ph.utils.rollout.rollouts_to_jsonl(results, f)
results = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', env_class=MockEnv, custom_policy_mapping={'a1': MockPolicy}, num_repeats=1, num_workers=1)
assert (list(results)[0].actions_for_agent('a1') == [1, 1, 1, 1, 1])
results = ph.utils.rllib.evaluate_policy(directory=f'{tmpdir}/LATEST', obs=[ph.utils.ranges.LinspaceRange(0.0, 1.0, 3, name='r')], policy_id='mock_policy', explore=False)
results = list(results)
assert (results[0][0] == {'r': 0.0})
assert (results[1][0] == {'r': 0.5})
assert (results[2][0] == {'r': 1.0})
assert (results[0][1][0] == 0.0)
assert (results[1][1][0] == 0.5)
assert (results[2][1][0] == 1.0)
results = ph.utils.rllib.evaluate_policy(directory=f'{tmpdir}/LATEST', obs=[ph.utils.ranges.LinspaceRange(0.0, 1.0, 3, name='r')], policy_id='mock_policy', explore=True)
results = list(results)
assert (results[0][0] == {'r': 0.0})
assert (results[1][0] == {'r': 0.5})
assert (results[2][0] == {'r': 1.0})
assert (results[0][1][0] == 0.0)
assert (results[1][1][0] == 0.5)
assert (results[2][1][0] == 1.0)
|
def test_rllib_rollout_vectorized_fsm_env(tmpdir):
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA'])
def handle(self):
return 'StageA'
ph.utils.rllib.train(algorithm='PPO', env_class=Env, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=2, results_dir=tmpdir)
results1 = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=1)
results2 = ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=3)
assert (list(results1) == list(results2))
class Env2(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA', 'StageB'])
def handleA(self):
return 'StageB'
@ph.FSMStage(stage_id='StageB', acting_agents=['A'], next_stages=['StageA', 'StageB'])
def handleB(self):
return 'StageA'
ph.utils.rllib.train(algorithm='PPO', env_class=Env2, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=1, results_dir=tmpdir)
with pytest.raises(ValueError):
list(ph.utils.rllib.rollout(directory=f'{tmpdir}/LATEST', num_repeats=3, num_workers=1, policy_inference_batch_size=3))
|
def test_rllib_rollout_bad():
with pytest.raises(AssertionError):
list(ph.utils.rllib.rollout(directory='', env_class=MockEnv, num_repeats=0))
with pytest.raises(AssertionError):
list(ph.utils.rllib.rollout(directory='', env_class=MockEnv, num_workers=(- 1)))
|
def test_rllib_train_no_checkpoint(tmpdir):
algo = ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=1, checkpoint_freq=0, results_dir=tmpdir)
assert (not Path(algo.logdir, f'checkpoint_{str(1).zfill(6)}').exists())
|
def test_rllib_train_not_set_checkpoint_freq(tmpdir):
algo = ph.utils.rllib.train(algorithm='PPO', env_class=MockEnv, policies={'mock_policy': MockStrategicAgent}, rllib_config={'disable_env_checking': True, 'num_rollout_workers': 1}, iterations=2, checkpoint_freq=None, results_dir=tmpdir)
assert Path(algo.logdir, f'checkpoint_{str(2).zfill(6)}').exists()
|
def test_rollout_class():
rollout = ph.utils.rollout.Rollout(rollout_id=0, repeat_id=0, env_config={}, rollout_params={}, steps=[ph.utils.rollout.Step(i=0, observations={'agent': {'obs': 1}}, rewards={'agent': 1.0}, terminations={'agent': False}, truncations={'agent': False}, infos={'agent': {'info': 1}}, actions={'agent': {'action': 1}}, messages=None, stage=None), ph.utils.rollout.Step(i=0, observations={}, rewards={'agent': None}, terminations={}, truncations={}, infos={}, actions={}, messages=None, stage=None), ph.utils.rollout.Step(i=0, observations={}, rewards={}, terminations={}, truncations={}, infos={}, actions={}, messages=None, stage=None)], metrics={})
obs = rollout.observations_for_agent('agent', drop_nones=False)
assert (obs == [{'obs': 1}, None, None])
obs = rollout.observations_for_agent('agent', drop_nones=True)
assert (obs == [{'obs': 1}])
rewards = rollout.rewards_for_agent('agent', drop_nones=False)
assert (rewards == [1.0, None, None])
rewards = rollout.rewards_for_agent('agent', drop_nones=True)
assert (rewards == [1.0])
terminations = rollout.rewards_for_agent('agent', drop_nones=False)
assert (terminations == [1.0, None, None])
terminations = rollout.rewards_for_agent('agent', drop_nones=True)
assert (terminations == [1.0])
truncations = rollout.rewards_for_agent('agent', drop_nones=False)
assert (truncations == [1.0, None, None])
truncations = rollout.rewards_for_agent('agent', drop_nones=True)
assert (truncations == [1.0])
infos = rollout.rewards_for_agent('agent', drop_nones=False)
assert (infos == [1.0, None, None])
infos = rollout.rewards_for_agent('agent', drop_nones=True)
assert (infos == [1.0])
actions = rollout.rewards_for_agent('agent', drop_nones=False)
assert (actions == [1.0, None, None])
actions = rollout.rewards_for_agent('agent', drop_nones=True)
assert (actions == [1.0])
|
@pytest.fixture
def float_sampler():
return UniformFloatSampler()
|
@pytest.fixture
def int_sampler():
return UniformIntSampler()
|
def test_comparison_with_float(float_sampler):
float_sampler._value = float_sampler.sample()
assert (float_sampler <= 1.0)
assert (float_sampler >= 0.0)
assert (float_sampler == float_sampler._value)
assert (float_sampler != (float_sampler._value + 0.1))
|
def test_comparison_with_int(int_sampler):
int_sampler._value = int_sampler.sample()
assert ((int_sampler == 0) or (int_sampler == 1))
assert (int_sampler == int_sampler._value)
assert (int_sampler != (int_sampler._value + 1))
|
def test_comparison_with_sampler(float_sampler):
float_sampler._value = 0.5
float_sampler2 = UniformFloatSampler()
float_sampler2._value = 0.5
assert (not (float_sampler == float_sampler2))
assert (float_sampler != float_sampler2)
|
def test_iterable():
sampler1 = UniformFloatSampler()
sampler1._value = 0.5
sampler2 = UniformFloatSampler()
sampler2._value = 0.5
sampler3 = UniformFloatSampler()
sampler3._value = 0.5
assert (sampler3 not in [sampler1, sampler2])
|
def test_lambda_sampler():
def _my_func(a_, b_=0):
return (a_ + b_)
a = 5
b = 1
sampler = LambdaSampler(_my_func, a, b_=b)
assert (sampler.sample() == 6)
assert (sampler.sample() == 6)
sampler = LambdaSampler(_my_func, a)
assert (sampler.sample() == 5)
assert (sampler.sample() == 5)
|
def test_asserts():
with pytest.raises(AssertionError):
UniformFloatSampler(high=0.0, low=1.0)
with pytest.raises(AssertionError):
UniformIntSampler(high=0, low=1)
with pytest.raises(AssertionError):
UniformArraySampler(high=0.0, low=1.0)
|
@ex.config
def cfg_base():
uuid = 'basic'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'ppo_epoch': 8, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualExplorationEnv', 'env_specific_kwargs': {}, 'sensors': {}, 'num_processes': 1, 'num_val_processes': 0, 'additional_repeat_count': 0, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None}
cfg['saving'] = {'autofix_log_dir': False, 'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'logging_type': 'tensorboard', 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_port': '8097', 'visdom_server': 'localhost'}
cfg['training'] = {'cuda': True, 'num_frames': 500000.0, 'resumable': True, 'seed': 42}
|
@ex.named_config
def cfg_doom_navigation():
uuid = 'doom_visualnavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.01, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 200, 'num_mini_batch': 16, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 4, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.0001, 'perception_network': 'AtariNet', 'test': False, 'use_replay': False, 'replay_buffer_size': 1000, 'on_policy_epoch': 4, 'off_policy_epoch': 0}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Doom_VizdoomMultiGoalWithClutterEnv.room-v0', 'env_specific_args': {'episode_timeout': 100, 'n_clutter_objects': 8, 'n_goal_objects': 1}, 'sensors': {'rgb_filled': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'additional_repeat_count': 3}
cfg['saving'] = {'port': 8097, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis': False, 'vis_interval': 200, 'launcher_script': None, 'visdom_server': 'localhost', 'visdom_port': '8097', 'checkpoint': None, 'checkpoint_configs': False}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 5000000.0, 'resumable': True}
|
@ex.named_config
def scratch_doom():
uuid = 'doom_scratch'
cfg = {}
cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 0, 'use_target': False}}
cfg['env'] = {'env_specific_kwargs': {'episode_timeout': 1000, 'n_clutter_objects': 8, 'n_goal_objects': 1}, 'transform_fn_pre_aggregation': "\n TransformFactory.splitting(\n {\n 'color': {\n 'rgb_filled':rescale_centercrop_resize((3,84,84)) }\n },\n keep_unnamed=False)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': None}
|
@ex.named_config
def cfg_doom_exploration():
uuid = 'doom_myopicexploration'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.01, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 200, 'num_mini_batch': 16, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 4, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.0001, 'perception_network': 'AtariNet', 'test': False, 'use_replay': False, 'replay_buffer_size': 1000, 'on_policy_epoch': 4, 'off_policy_epoch': 0}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Doom_VizdoomExplorationEnv.room-v0', 'env_specific_args': {'episode_timeout': 2000}, 'sensors': {'rgb_filled': None, 'taskonomy': None, 'map': None, 'occupancy': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'additional_repeat_count': 3}
cfg['saving'] = {'port': 8097, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis': False, 'vis_interval': 200, 'launcher_script': None, 'visdom_server': 'localhost', 'visdom_port': '8097', 'checkpoint': None, 'checkpoint_configs': False}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def scratch_doom_exploration():
uuid = 'doom_scratch_exploration'
cfg = {}
cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_specific_kwargs': {}, 'transform_fn_pre_aggregation': "\n TransformFactory.splitting(\n {\n 'color': {\n 'rgb_filled':rescale_centercrop_resize((3,84,84)) },\n 'occupancy': {\n 'map': rescale_centercrop_resize((1,84,84))}\n },\n keep_unnamed=False)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': None}
|
@ex.named_config
def cfg_exploration():
uuid = 'gibson_exploration'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualExplorationEnv', 'env_specific_kwargs': {'target_dim': 16, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_explore_train_noX.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv'), 'blind': False, 'blank_sensor': True}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'additional_repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_navigation():
uuid = 'gibson_visualnavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualNavigateEnv', 'env_specific_kwargs': {'blind': False, 'blank_sensor': True, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_navigate.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv')}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_planning():
uuid = 'gibson_coordinatenavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyCoordinateNavigateEnv', 'env_specific_kwargs': {'blind': False, 'blank_sensor': True, 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv'), 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_coordinate_navigate.yaml', 'target_dim': 16}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_habitat():
uuid = 'habitat_core'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 1000, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network_reinit': False, 'perception_network': 'AtariNet', 'perception_network_kwargs': {'extra_kwargs': {'normalize_taskonomy': True}}, 'test': False, 'use_replay': True, 'replay_buffer_size': 3000, 'on_policy_epoch': 8, 'off_policy_epoch': 8, 'slam_class': None, 'slam_kwargs': {}, 'loss_kwargs': {'intrinsic_loss_coefs': [], 'intrinsic_loss_types': []}, 'deterministic': False, 'rollout_value_batch_multiplier': 2, 'cache_kwargs': {}, 'optimizer_class': 'optim.Adam', 'optimizer_kwargs': {}}
cfg['env'] = {'add_timestep': False, 'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'swap_building_k_episodes': 10, 'gpu_devices': [0], 'scenario_kwargs': {'use_depth': False, 'max_geodesic_dist': 99999}, 'map_kwargs': {'map_building_size': 22, 'map_max_pool': False, 'use_cuda': False, 'history_size': None}, 'target_dim': 16, 'val_scenes': None, 'train_scenes': None}, 'sensors': {'features': None, 'taskonomy': None, 'rgb_filled': None, 'map': None, 'target': None, 'depth': None, 'global_pos': None, 'pointgoal': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_pre_aggregation_fn': None, 'transform_fn_pre_aggregation_kwargs': {}, 'transform_fn_post_aggregation': None, 'transform_fn_post_aggregation_fn': None, 'transform_fn_post_aggregation_kwargs': {}, 'num_processes': 8, 'num_val_processes': 1, 'additional_repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_num': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097', 'obliterate_logs': False}
cfg['training'] = {'cuda': True, 'gpu_devices': None, 'seed': 42, 'num_frames': 100000000.0, 'resumable': False}
|
@ex.named_config
def cfg_test():
cfg = {}
cfg['saving'] = {'resumable': True, 'checkpoint_configs': True}
override = {}
override['saving'] = {'visdom_server': 'localhost'}
override['env'] = {'num_processes': 10, 'num_val_processes': 10, 'env_specific_kwargs': {'test_mode': True, 'scenario_kwargs': {'max_geodesic_dist': 99999}}}
override['learner'] = {'test_k_episodes': 994, 'test': True}
|
@ex.named_config
def planning():
uuid = 'habitat_planning'
cfg = {}
cfg['learner'] = {'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}}
cfg['env'] = {'env_name': 'Habitat_PointNav', 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'map': 'identity_transform()', 'global_pos': 'identity_transform()', 'target': 'identity_transform()'}, 'keep_unnamed': False}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'map': 'map_pool_collated((3,84,84))'}, 'keep_unnamed': True}}
|
@ex.named_config
def exploration():
uuid = 'habitat_exploration'
cfg = {}
cfg['learner'] = {'lr': 0.001, 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_name': 'Habitat_Exploration', 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'map': 'identity_transform()', 'global_pos': 'identity_transform()'}, 'keep_unnamed': False}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'map': 'map_pool_collated((1,84,84))'}, 'keep_unnamed': True}, 'env_specific_kwargs': {'scenario_kwargs': {'max_episode_steps': 1000}, 'map_kwargs': {'map_size': 84, 'fov': (np.pi / 2), 'min_depth': 0, 'max_depth': 1.5, 'relative_range': True, 'map_x_range': [(- 11), 11], 'map_y_range': [(- 11), 11], 'fullvision': False}, 'reward_kwargs': {'slack_reward': 0}}}
|
@ex.named_config
def small_settings5():
uuid = 'habitat_small_settings5'
cfg = {}
cfg['learner'] = {'num_steps': 512, 'replay_buffer_size': 1024, 'on_policy_epoch': 5, 'off_policy_epoch': 10, 'num_mini_batch': 24, 'rollout_value_batch_multiplier': 1}
cfg['env'] = {'num_processes': 6, 'num_val_processes': 1}
|
@ex.named_config
def cvpr_settings():
uuid = 'habitat_cvpr_settings'
cfg = {}
cfg['learner'] = {'num_steps': 512, 'replay_buffer_size': 4096, 'on_policy_epoch': 8, 'off_policy_epoch': 8, 'num_mini_batch': 8, 'rollout_value_batch_multiplier': 1}
cfg['env'] = {'num_processes': 6, 'num_val_processes': 1}
|
@ex.named_config
def prototype():
uuid = 'test'
cfg = {}
cfg['env'] = {'num_processes': 2, 'num_val_processes': 1, 'env_specific_kwargs': {'train_scenes': ['Adrian'], 'val_scenes': ['Denmark']}}
cfg['saving'] = {'log_interval': 2, 'vis_interval': 1}
|
@ex.named_config
def debug():
uuid = 'test'
cfg = {}
override = {}
cfg['learner'] = {'num_steps': 100, 'replay_buffer_size': 300, 'deterministic': True}
cfg['env'] = {'num_processes': 1, 'num_val_processes': 0, 'env_specific_kwargs': {'train_scenes': ['Adrian'], 'debug_mode': True}}
cfg['saving'] = {'log_interval': 2, 'vis_interval': 1}
override['env'] = {'num_processes': 1, 'num_val_processes': 0, 'env_specific_kwargs': {'debug_mode': True}}
|
@ex.config
def cfg_base():
cfg = {}
uuid = ''
config_file = os.path.join(os.getcwd(), 'habitat-api/configs/tasks/pointnav_gibson_val.yaml')
cfg['eval_kwargs'] = {'exp_path': '/mnt/logdir/keypoints3d_encoding_restart1', 'weights_only_path': None, 'challenge': True, 'debug': False, 'overwrite_configs': True, 'benchmark_episodes': 10, 'benchmark_config': config_file}
|
@ex.named_config
def weights_only():
cfg = {}
cfg['eval_kwargs'] = {'exp_path': None, 'weights_only_path': '/mnt/eval_runs/curvature_encoding_moresteps_collate5/checkpoints/weights_and_more-latest.dat'}
|
@ex.named_config
def cfg_overwrite():
cfg = {}
uuid = '_overwrite'
cfg['learner'] = {'taskonomy_encoder': '/mnt/models/keypoints3d_encoder.dat', 'perception_network': 'features_only', 'encoder_type': 'taskonomy', 'backout': {'use_backout': True, 'patience': 80, 'unstuck_dist': 0.3, 'randomize_actions': True, 'backout_type': 'hardcoded', 'backout_ckpt_path': '/mnt/logdir/curvature_encoding_moresteps_collate/checkpoints/ckpt-latest.dat', 'num_takeover_steps': 8}, 'validator': {'use_validator': True, 'validator_type': 'jerk'}}
image_dim = 84
cfg['env'] = {'sensors': {'features': None, 'taskonomy': None, 'map': None, 'target': None, 'global_pos': None}, 'collate_env_obs': False, 'env_gpus': [0], 'transform_fn': "TransformFactory.independent({{'taskonomy':taskonomy_features_transform('{taskonomy_encoder}', encoder_type='{encoder_type}'), 'map':image_to_input_pool((3,{image_dim},{image_dim})), 'target':identity_transform(), 'global_pos':identity_transform()}}, keep_unnamed=False)".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=image_dim), 'use_target': True, 'use_map': True, 'habitat_map_kwargs': {'map_building_size': 22, 'map_max_pool': False, 'use_cuda': False, 'history_size': None}, 'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_features_transform('{taskonomy_encoder}'),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(taskonomy_encoder='/mnt/models/normal_encoder.dat')}
cfg['training'] = {'seed': 42}
del image_dim
|
@ex.named_config
def cifar10_data():
cfg = {'learner': {'lr': 0.1, 'optimizer_class': 'optim.SGD', 'optimizer_kwargs': {'momentum': 0.9, 'weight_decay': 0.0001}, 'lr_scheduler_method': 'optim.lr_scheduler.MultiStepLR', 'lr_scheduler_method_kwargs': {'milestones': [100, 150]}, 'max_grad_norm': None, 'use_feedback': False}, 'training': {'dataloader_fn': 'icifar_dataset.get_cifar_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar10', 'num_workers': 8, 'pin_memory': True, 'epochlength': 20000, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': [['rgb']], 'targets': [['cifar10']], 'masks': None, 'task_is_classification': [True], 'num_epochs': 1000}, 'saving': {'ticks_per_epoch': 5, 'log_interval': 1, 'save_interval': 200}}
|
@ex.named_config
def icifar_data():
n_epochs = 4
n_classes = 100
n_tasks = N_TASKS
n = (100 // n_tasks)
chunked_classes = []
for i in range((((n_classes + n) - 1) // n)):
chunked_classes.append(np.arange((i * n), ((i + 1) * n)))
chunked_names = [[f'cifar{cs.min()}-{cs.max()}'] for cs in chunked_classes]
cfg = {'training': {'dataloader_fn': 'icifar_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar100', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochlength': (5000 * n_epochs), 'epochs_until_cycle': 0, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': ([['rgb']] * len(chunked_classes)), 'targets': chunked_names, 'masks': None, 'task_is_classification': ([True] * len(chunked_classes)), 'num_epochs': N_TASKS}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 1, 'save_interval': 10}, 'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'dataset': 'icifar'}, 'use_feedback': False}}
del n, n_tasks, n_classes, chunked_classes, i, chunked_names, n_epochs
|
@ex.named_config
def icifar0_10_data():
cfg = {'training': {'dataloader_fn': 'icifar_dataset.get_dataloaders', 'dataloader_fn_kwargs': {'data_path': '/mnt/data/cifar100', 'load_to_mem': False, 'num_workers': 8, 'pin_memory': True, 'epochlength': 20000, 'batch_size': 128, 'batch_size_val': 256}, 'loss_fn': 'softmax_cross_entropy', 'loss_kwargs': {}, 'use_masks': False, 'sources': ([['rgb']] * N_TASKS), 'targets': ([['cifar0-9']] * N_TASKS), 'masks': None, 'task_is_classification': ([True] * N_TASKS)}}
|
@ex.named_config
def cifar_hp():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {'lr': 0.001, 'optimizer_kwargs': {'weight_decay': 0.0}}
|
@ex.named_config
def debug_cifar100():
cfg = {'training': {'dataloader_fn_kwargs': {'epochlength': (50000 // 128)}}, 'learner': {'model_kwargs': {'num_classes': 100}}}
|
@ex.named_config
def model_resnet_cifar():
cfg = {'learner': {'model': 'ResnetiCifar44'}, 'training': {'resume_from_checkpoint_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'resume_training': True}}
|
@ex.named_config
def init_lowenergy_cifar():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN4Reshaped', 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar-lowenergy.pth'}}}
|
@ex.named_config
def init_xavier():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_weights_path': None}}}
|
@ex.named_config
def bsp_cifar():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', 'base_kwargs': {'bsp': True, 'period': 10}, 'side_kwargs': {'bsp': True, 'period': 10}}}}}
|
@ex.named_config
def bsp_norecurse_cifar():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', 'base_kwargs': {'bsp': True, 'period': 10}}}}
|
@ex.named_config
def bsp_debug():
cfg = {'learner': {'model_kwargs': {'base_kwargs': {'bsp': True, 'debug': True}}}}
|
@ex.named_config
def model_boosted_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'BoostedNetwork', 'model_kwargs': {'base_class': None, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}}
del n_channels_out
|
@ex.named_config
def model_boosted_wbase_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'BoostedNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}}
del n_channels_out
|
@ex.named_config
def model_resnet_icifar0_10():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': None, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}}
del n_channels_out
|
@ex.named_config
def model_lifelong_independent_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'GenericSidetuneNetwork', 'side_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth'}, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}}}}
del n_channels_out
|
@ex.named_config
def model_lifelong_independent_resnet_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'ResnetiCifar44NoLinear', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}}
del n_channels_out
|
@ex.named_config
def model_lifelong_independent_fcn4_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}}
del n_channels_out
|
@ex.named_config
def model_lifelong_finetune_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth'}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}}}}
del n_channels_out
|
@ex.named_config
def model_lifelong_finetune_resnet44_cifar():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None}}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.