code stringlengths 17 6.64M |
|---|
def test_state_definition_list_style():
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA', stages=[ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA'], handler=self.handle)])
def handle(self):
return 'StageA'
env = Env()
env.reset()
env.step({})
|
def test_no_stages_registered():
'\n All FSM envs must have at least one state registered using the FSMStage decorator.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage=None)
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_duplicate_stages():
'\n All FSM envs must not have more than one state registered with the same ID.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_1(self):
pass
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_2(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_initial_stage():
'\n All FSM envs must have an initial state that is a valid registered state.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageB')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_next_state():
'\n All next states passed into the FSMStage decorator must be valid states.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageB'])
def handle_1(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_no_handler_stage_next_stages():
'\n All stages without a provided handler must have exactly one next stage\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA', stages=[ph.FSMStage('StageA', acting_agents=['agent'], next_stages=[])])
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_next_state_runtime():
'\n A valid registered next state must be returned by the state handler functions.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_1(self):
return 'StageB'
env = Env()
env.reset()
with pytest.raises(ph.fsm.FSMRuntimeError):
env.step(actions={'agent': 0})
|
def test_is_fsm_deterministic():
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['A'])])
assert env.is_fsm_deterministic()
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['B']), ph.FSMStage(stage_id='B', acting_agents=[], next_stages=['C']), ph.FSMStage(stage_id='C', acting_agents=[], next_stages=['A'])])
assert env.is_fsm_deterministic()
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['A', 'B'], handler=(lambda x: 'B')), ph.FSMStage(stage_id='B', acting_agents=[], next_stages=['A', 'B'], handler=(lambda x: 'A'))])
assert (not env.is_fsm_deterministic())
|
class MockFSMEnv(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=3, network=network, initial_stage='ODD', stages=[ph.FSMStage(stage_id='ODD', acting_agents=['agent'], next_stages=['EVEN']), ph.FSMStage(stage_id='EVEN', acting_agents=['agent'], next_stages=['ODD'])])
|
def test_odd_even_one_agent():
env = MockFSMEnv()
assert (env.reset() == ({'agent': np.array([0])}, {}))
assert (env.current_stage == 'ODD')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'EVEN')
assert (step.observations == {'agent': np.array([(1.0 / 3.0)])})
assert (step.rewards == {'agent': 0.0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
|
class MockFSMEnv(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('odd_agent'), MockStrategicAgent('even_agent')]
network = ph.Network(agents)
super().__init__(num_steps=3, network=network, initial_stage='ODD', stages=[ph.FSMStage(stage_id='ODD', next_stages=['EVEN'], acting_agents=['odd_agent'], rewarded_agents=['odd_agent']), ph.FSMStage(stage_id='EVEN', next_stages=['ODD'], acting_agents=['even_agent'], rewarded_agents=['even_agent'])])
|
def test_odd_even_two_agents():
env = MockFSMEnv()
assert (env.reset() == ({'odd_agent': np.array([0])}, {}))
assert (env.current_stage == 'ODD')
assert (env.agents['odd_agent'].compute_reward_count == 0)
assert (env.agents['odd_agent'].encode_obs_count == 1)
assert (env.agents['odd_agent'].decode_action_count == 0)
assert (env.agents['even_agent'].compute_reward_count == 0)
assert (env.agents['even_agent'].encode_obs_count == 0)
assert (env.agents['even_agent'].decode_action_count == 0)
step = env.step({'odd_agent': np.array([1])})
assert (env.current_stage == 'EVEN')
assert (step.observations == {'even_agent': np.array([(1.0 / 3.0)])})
assert (step.rewards == {'even_agent': None})
assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.infos == {'even_agent': {}})
assert (env.agents['odd_agent'].compute_reward_count == 1)
assert (env.agents['odd_agent'].encode_obs_count == 1)
assert (env.agents['odd_agent'].decode_action_count == 1)
assert (env.agents['even_agent'].compute_reward_count == 0)
assert (env.agents['even_agent'].encode_obs_count == 1)
assert (env.agents['even_agent'].decode_action_count == 0)
step = env.step({'even_agent': np.array([0])})
assert (env.current_stage == 'ODD')
assert (step.observations == {'odd_agent': np.array([(2.0 / 3.0)])})
assert (step.rewards == {'odd_agent': 0.0})
assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.infos == {'odd_agent': {}})
assert (env.agents['odd_agent'].compute_reward_count == 1)
assert (env.agents['odd_agent'].encode_obs_count == 2)
assert (env.agents['odd_agent'].decode_action_count == 1)
assert (env.agents['even_agent'].compute_reward_count == 1)
assert (env.agents['even_agent'].encode_obs_count == 1)
assert (env.agents['even_agent'].decode_action_count == 1)
|
class OneStateFSMEnvWithHandler(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
network.add_connection('agent', 'agent')
super().__init__(num_steps=2, network=network, initial_stage='UNIT')
@ph.FSMStage(stage_id='UNIT', acting_agents=['agent'], next_stages=['UNIT'])
def handle(self):
return 'UNIT'
|
def test_one_state_with_handler():
env = OneStateFSMEnvWithHandler()
assert (env.reset() == ({'agent': np.array([0.0])}, {}))
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
assert (step.observations == {'agent': np.array([0.5])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 2)
assert (env.agents['agent'].encode_obs_count == 3)
assert (env.agents['agent'].decode_action_count == 2)
assert (step.observations == {'agent': np.array([1.0])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': True})
assert (step.infos == {'agent': {}})
|
class OneStateFSMEnvWithoutHandler(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
network.add_connection('agent', 'agent')
super().__init__(num_steps=2, network=network, initial_stage='UNIT', stages=[ph.FSMStage(stage_id='UNIT', acting_agents=['agent'], next_stages=['UNIT'], handler=None)])
|
def test_one_state_without_handler():
env = OneStateFSMEnvWithoutHandler()
assert (env.reset() == ({'agent': np.array([0.0])}, {}))
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
assert (step.observations == {'agent': np.array([0.5])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 2)
assert (env.agents['agent'].encode_obs_count == 3)
assert (env.agents['agent'].decode_action_count == 2)
assert (step.observations == {'agent': np.array([1.0])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': True})
assert (step.infos == {'agent': {}})
|
@dataclass
class MockEpisode():
user_data: dict = field(default_factory=dict)
custom_metrics: dict = field(default_factory=dict)
media: dict = field(default_factory=dict)
|
class MockMetric(SimpleMetric):
def __init__(self, value: int, train_reduce_action='last', fsm_stages=None):
super().__init__(train_reduce_action, fsm_stages=fsm_stages)
self.value = value
def extract(self, _env) -> int:
return self.value
|
class MockBaseEnv():
def __init__(self, env_class):
self.envs = [env_class]
def step(self, actions={}):
for env in self.envs:
env.step(actions)
|
def test_fsm_logging():
env = FiniteStateMachineEnv(num_steps=2, network=Network(), initial_stage=0, stages=[FSMStage(0, [], None, [1]), FSMStage(1, [], None, [0])])
episode = MockEpisode()
base_env = MockBaseEnv(env)
callback = RLlibMetricLogger({'stage_0_metric': MockMetric(0, 'sum', fsm_stages=[0]), 'stage_1_metric': MockMetric(1, 'sum', fsm_stages=[1])})()
callback.on_episode_start(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0], 'stage_1_metric': [NotRecorded()]})
base_env.step()
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0, NotRecorded()], 'stage_1_metric': [NotRecorded(), 1]})
callback.on_episode_end(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
assert (episode.custom_metrics == {'stage_0_metric': 0, 'stage_1_metric': 1})
|
class MockAgent():
def __init__(self, inc):
self.inc = inc
self.test_property = 0.0
def step(self):
self.test_property += self.inc
|
class MockProperty():
def __init__(self):
self.sub_property = 1.0
|
class MockEnv():
def __init__(self):
self.test_property = 0.0
self.nested_property = MockProperty()
self.agents = {'agent1': MockAgent(1.0), 'agent2': MockAgent(2.0)}
def step(self):
self.test_property += 1.0
for agent in self.agents.values():
agent.step()
|
def test_simple_env_metric_1():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='last', eval_reduce_action='none')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
assert np.all((metric.reduce(values, mode='evaluate') == np.arange(1.0, 6.0)))
|
def test_simple_env_metric_2():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='mean')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 3.0)
|
def test_simple_env_metric_3():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='sum')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_simple_agent_metric_1():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
|
def test_simple_agent_metric_2():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='mean')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 3.0)
|
def test_simple_agent_metric_3():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='sum')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_aggregated_agent_metric_1():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='min', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
|
def test_aggregated_agent_metric_2():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='max', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 10.0)
|
def test_aggregated_agent_metric_3():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='mean', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 7.5)
|
def test_aggregated_agent_metric_4():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='sum', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_nested_metric():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='nested_property.sub_property', train_reduce_action='last')
env.step()
assert (metric.extract(env) == 1.0)
|
def test_lambda_metric():
env = MockEnv()
metric = ph.metrics.LambdaMetric(extract_fn=(lambda env: env.test_property), train_reduce_fn=(lambda values: np.sum(values)), eval_reduce_fn=(lambda values: (np.sum(values) * 2)))
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
assert (metric.reduce(values, mode='evaluate') == 30.0)
|
def test_RLlibMetricLogger():
episode = MockEpisode()
base_env = MockBaseEnv(PhantomEnv(Network()))
callback = RLlibMetricLogger({'test_metric': MockMetric(1)})()
callback.on_episode_start(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data['test_metric'] == [1])
callback.on_episode_end(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
assert (episode.custom_metrics['test_metric'] == 1)
|
@msg_payload()
class MockMessage():
cash: float
|
class MockAgent(Agent):
def __init__(self, aid: AgentID) -> None:
super().__init__(aid)
self.total_cash = 0.0
def reset(self) -> None:
self.total_cash = 0.0
@msg_handler(MockMessage)
def handle_message(self, _: Context, message: MockMessage) -> List[Tuple[(AgentID, MsgPayload)]]:
if (message.payload.cash > 25):
self.total_cash += (message.payload.cash / 2.0)
return [(message.sender_id, MockMessage((message.payload.cash / 2.0)))]
|
def test_init():
net = Network([MockAgent('a1'), MockAgent('a2')])
net.add_connection('a1', 'a2')
Network([MockAgent('a1'), MockAgent('a2')], connections=[('a1', 'a2')])
|
def test_bad_init():
with pytest.raises(ValueError):
Network([MockAgent('a1'), MockAgent('a1')])
with pytest.raises(ValueError):
Network([MockAgent('a1')], connections=[('a1', 'a2')])
|
@pytest.fixture
def net() -> Network:
net = Network([MockAgent('mm'), MockAgent('inv'), MockAgent('inv2')])
net.add_connection('mm', 'inv')
return net
|
def test_getters(net):
assert ('mm' in net.agent_ids)
assert ('inv' in net.agent_ids)
agents = net.get_agents_where((lambda a: (a.id == 'mm')))
assert (len(agents) == 1)
assert (list(agents.keys())[0] == 'mm')
assert (net.get_agents_with_type(Agent) == net.agents)
assert (net.get_agents_without_type(Agent) == {})
|
def test_call_response(net):
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
assert (net.agents['mm'].total_cash == 25.0)
assert (net.agents['inv'].total_cash == 50.0)
|
def test_send_many(net):
net.send('mm', 'inv', MockMessage(100.0))
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
assert (net.agents['mm'].total_cash == 50.0)
assert (net.agents['inv'].total_cash == 100.0)
|
def test_invalid_send(net):
with pytest.raises(NetworkError):
net.send('mm', 'inv2', MockMessage(100.0))
|
def test_context_existence(net):
assert ('inv' in net.context_for('mm', EnvView(0, 0.0)))
assert ('mm' in net.context_for('inv', EnvView(0, 0.0)))
|
def test_reset(net):
net.send('mm', 'inv', MockMessage(100.0))
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
net.reset()
assert (net.agents['mm'].total_cash == 0.0)
assert (net.agents['inv'].total_cash == 0.0)
|
@pytest.fixture
def net2() -> Network:
return Network([MockAgent('a'), MockAgent('b'), MockAgent('c')])
|
def test_adjacency_matrix(net2):
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 1], [1, 0]]))
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b', 'c'], np.array([[0, 1], [1, 0]]))
assert (str(e.value) == "Number of agent IDs doesn't match adjacency matrix dimensions.")
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 0, 0], [0, 0, 0]]))
assert (str(e.value) == 'Adjacency matrix must be square.')
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 0], [1, 0]]))
assert (str(e.value) == 'Adjacency matrix must be symmetric.')
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[1, 1], [1, 1]]))
assert (str(e.value) == 'Adjacency matrix must be hollow.')
|
class AgentA(ph.Agent):
pass
|
class AgentB(ph.Agent):
pass
|
def test_payload_checks():
agents = [AgentA('A'), AgentB('B')]
net = ph.Network(agents, enforce_msg_payload_checks=True)
net.add_connection('A', 'B')
@ph.msg_payload()
class Payload1():
pass
net.send('A', 'B', Payload1())
net.send('B', 'A', Payload1())
@ph.msg_payload(AgentA, AgentB)
class Payload2():
pass
net.send('A', 'B', Payload2())
@ph.msg_payload([AgentA, AgentB], [AgentA, AgentB])
class Payload3():
pass
net.send('A', 'B', Payload3())
net.send('B', 'A', Payload3())
@ph.msg_payload(sender_type=AgentA, receiver_type=None)
class Payload4():
pass
@ph.msg_payload(sender_type=None, receiver_type=AgentA)
class Payload5():
pass
net.send('A', 'B', Payload4())
net.send('B', 'A', Payload5())
with pytest.raises(ph.network.NetworkError):
net.send('B', 'A', Payload2())
net = ph.Network(agents, enforce_msg_payload_checks=False)
net.add_connection('A', 'B')
net.send('A', 'B', Payload2())
net.send('B', 'A', Payload2())
|
@msg_payload()
class Request():
cash: float
|
@msg_payload()
class Response():
cash: float
|
class _TestAgent(Agent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.req_time = time.time()
self.res_time = time.time()
@msg_handler(Request)
def handle_request(self, _: Context, message: Request) -> List[Tuple[(AgentID, MsgPayload)]]:
self.req_time = time.time()
return [(message.sender_id, Response((message.payload.cash / 2.0)))]
@msg_handler(Response)
def handle_response(self, _: Context, message: Response) -> List[Tuple[(AgentID, MsgPayload)]]:
self.res_time = time.time()
return []
|
def test_ordering():
n = Network([_TestAgent('A'), _TestAgent('B'), _TestAgent('C')], BatchResolver())
n.add_connection('A', 'B')
n.add_connection('A', 'C')
n.add_connection('B', 'C')
n.send('A', 'B', Request(100.0))
n.send('A', 'C', Request(100.0))
n.send('B', 'C', Request(100.0))
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
assert (n['A'].req_time <= n['B'].req_time)
assert (n['B'].req_time <= n['C'].req_time)
assert (n['C'].res_time <= n['A'].res_time)
assert (n['A'].res_time <= n['B'].res_time)
|
def test_batch_resolver_round_limit():
n = Network([_TestAgent('A'), _TestAgent('B')], BatchResolver(round_limit=0))
n.add_connection('A', 'B')
n.send('A', 'B', Request(0))
with pytest.raises(Exception):
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
|
class _TestAgent2(Agent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handle_message(self, _: Context, message: bool) -> List[Tuple[(AgentID, MsgPayload)]]:
return ([('C', True)] if (self.id == 'B') else [])
|
def test_invalid_response_connection():
n = Network([_TestAgent2('A'), _TestAgent2('B'), _TestAgent2('C')], BatchResolver())
n.add_connection('A', 'B')
n.send('A', 'B', Request(0.0))
with pytest.raises(NetworkError):
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
|
@pytest.fixture
def net():
return StochasticNetwork([Agent('A'), Agent('B')], BatchResolver(2))
|
def test_stochastic_network_1(net):
net.add_connection('A', 'B', 1.0)
assert net.graph.has_edge('A', 'B')
assert net.graph.has_edge('B', 'A')
net.resample_connectivity()
assert net.graph.has_edge('A', 'B')
assert net.graph.has_edge('B', 'A')
|
def test_stochastic_network_2(net):
net.add_connection('A', 'B', 0.0)
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
def test_stochastic_network_3(net):
net.add_connections_from([('A', 'B', 0.0)])
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
def test_stochastic_network_4(net):
net.add_connections_between(['A'], ['B'], rate=0.0)
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
def test_stochastic_network_5(net):
net.add_connection('A', 'B', rate=MockComparableSampler(1.0))
assert net.graph.has_edge('A', 'B')
net.resample_connectivity()
assert net.graph.has_edge('A', 'B')
|
def test_stochastic_network_6(net):
net.add_connection('A', 'B', rate=MockComparableSampler(0.0))
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
class MockEnv():
def view(self):
return None
|
@msg_payload()
class _TestMessage():
value: int
|
class _TestActor(Agent):
@msg_handler(_TestMessage)
def handle_request(self, _: Context, message: _TestMessage) -> List[Tuple[(AgentID, Message)]]:
if (message.payload.value > 1):
return [(message.sender_id, _TestMessage((message.payload.value // 2)))]
|
def test_tracking():
resolver = BatchResolver(enable_tracking=True)
n = Network([_TestActor('A'), _TestActor('B'), _TestActor('C')], resolver)
n.add_connection('A', 'B')
n.add_connection('A', 'C')
n.send('A', 'B', _TestMessage(4))
n.send('A', 'C', _TestMessage(4))
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
assert (resolver.tracked_messages == [Message('A', 'B', _TestMessage(4)), Message('A', 'C', _TestMessage(4)), Message('B', 'A', _TestMessage(2)), Message('C', 'A', _TestMessage(2)), Message('A', 'B', _TestMessage(1)), Message('A', 'C', _TestMessage(1))])
n.resolver.clear_tracked_messages()
assert (resolver.tracked_messages == [])
|
def test_repr():
assert (str(MockAgent('AgentID')) == '[MockAgent AgentID]')
|
def test_reset():
st = MockStrategicAgent.Supertype(MockSampler(1))
agent = MockStrategicAgent('Agent', supertype=st)
assert (agent.supertype == st)
agent.reset()
assert (agent.type == MockStrategicAgent.Supertype(2))
class MockAgent2(ph.StrategicAgent):
@dataclass
class Supertype(ph.Supertype):
type_value: float
agent = MockAgent2('Agent', supertype=MockStrategicAgent.Supertype(0))
agent.reset()
|
@ph.msg_payload()
class MockPayload1():
value: float = 0.0
|
@ph.msg_payload()
class MockPayload2():
value: float = 0.0
|
class MockAgent3(ph.StrategicAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mock_msg_1_recv = 0
@ph.agents.msg_handler(MockPayload1)
def handle_mock_message_1(self, ctx, message):
self.mock_msg_1_recv += 1
|
def test_message_handling():
agent = MockAgent3('Agent')
agent.reset()
agent.handle_message(None, ph.Message('', 'Agent', MockPayload1()))
assert (agent.mock_msg_1_recv == 1)
agent.handle_batch(None, [ph.Message('', 'Agent', MockPayload1())])
assert (agent.mock_msg_1_recv == 2)
with pytest.raises(ValueError):
agent.handle_message(None, ph.Message('', 'Agent', MockPayload2()))
|
@pytest.fixture
def phantom_env():
return ph.PhantomEnv(num_steps=2, network=ph.Network([MockStrategicAgent('A', num_steps=1), MockStrategicAgent('B'), MockAgent('C')]))
|
def test_n_agents(phantom_env):
assert (phantom_env.n_agents == 3)
|
def test_agent_ids(phantom_env):
assert (phantom_env.agent_ids == ['A', 'B', 'C'])
assert (phantom_env.strategic_agent_ids == ['A', 'B'])
assert (phantom_env.non_strategic_agent_ids == ['C'])
|
def test_get_agents(phantom_env):
assert (phantom_env.strategic_agents == [phantom_env.agents['A'], phantom_env.agents['B']])
assert (phantom_env.non_strategic_agents == [phantom_env.agents['C']])
|
def test__get_item__(phantom_env):
assert isinstance(phantom_env['A'], MockStrategicAgent)
assert (phantom_env['A'].id == 'A')
|
def test_is_terminated(phantom_env):
phantom_env._terminations = set()
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A'])
assert (not phantom_env.is_terminated())
phantom_env._terminations = set(['A', 'B'])
assert phantom_env.is_terminated()
|
def test_is_truncated(phantom_env):
phantom_env._truncations = set()
assert (not phantom_env.is_truncated())
phantom_env._truncations = set(['A'])
assert (not phantom_env.is_truncated())
phantom_env._truncations = set(['A', 'B'])
assert phantom_env.is_truncated()
phantom_env._truncations = set()
phantom_env._current_step = phantom_env.num_steps
assert phantom_env.is_truncated()
|
def test_reset(phantom_env):
(obs, infos) = phantom_env.reset()
assert (phantom_env.current_step == 0)
assert (list(obs.keys()) == ['A', 'B'])
assert (infos == {})
|
def test_step(phantom_env):
current_time = phantom_env.current_step
actions = {'A': 0, 'B': 0}
step = phantom_env.step(actions)
assert (phantom_env.current_step == (current_time + 1))
assert (list(step.observations.keys()) == ['A', 'B'])
assert (list(step.rewards.keys()) == ['A', 'B'])
assert (list(step.infos.keys()) == ['A', 'B'])
assert (step.terminations == {'A': True, 'B': False, '__all__': False})
assert (step.truncations == {'A': True, 'B': False, '__all__': False})
current_time = phantom_env.current_step
actions = {'A': 0, 'B': 0}
step = phantom_env.step(actions)
assert (phantom_env.current_step == (current_time + 1))
assert (list(step.observations.keys()) == ['B'])
assert (list(step.rewards.keys()) == ['B'])
assert (list(step.infos.keys()) == ['B'])
assert (step.terminations == {'B': False, '__all__': False})
assert (step.truncations == {'B': False, '__all__': True})
|
def test_payload_1():
@ph.msg_payload()
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types is None)
assert (MockPayload._receiver_types is None)
|
def test_payload_2():
@ph.msg_payload(sender_type=ph.Agent, receiver_type='OtherAgent')
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types == ['Agent'])
assert (MockPayload._receiver_types == ['OtherAgent'])
|
def test_payload_3():
@ph.msg_payload(sender_type=['AgentA', 'AgentB'], receiver_type=['AgentC', 'AgentD'])
class MockPayload():
value: float = 0.0
assert (MockPayload._sender_types == ['AgentA', 'AgentB'])
assert (MockPayload._receiver_types == ['AgentC', 'AgentD'])
|
def test_old_payload():
@dataclass(frozen=True)
class MockPayload(ph.MsgPayload):
value: float = 0.0
net = ph.Network([ph.Agent('a'), ph.Agent('b')], connections=[('a', 'b')])
net.enforce_msg_payload_checks = True
with warnings.catch_warnings(record=True) as w:
net.send('a', 'b', MockPayload(1.0))
assert (len(w) == 1)
assert isinstance(w[0].message, DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
net.send('a', 'b', MockPayload(1.0))
assert (len(w) == 0)
net.enforce_msg_payload_checks = False
net.send('a', 'b', MockPayload(1.0))
|
class MockAgent(ph.StrategicAgent):
def is_terminated(self, ctx: Context) -> bool:
return (self.id == 'B')
def compute_reward(self, ctx: Context) -> float:
return 0.0
def encode_observation(self, ctx: Context):
return 1.0
def decode_action(self, ctx: Context, action: np.ndarray):
print(self.id, action)
self.last_action = action
return []
@property
def observation_space(self):
return gym.spaces.Box((- np.inf), np.inf, (1,))
@property
def action_space(self):
return gym.spaces.Box((- np.inf), np.inf, (1,))
|
class MockPolicy(Policy):
def compute_action(self, observation):
return 2.0
|
@pytest.fixture
def gym_env():
return SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
|
def test_agent_ids(gym_env):
assert (list(gym_env.agent_ids) == ['A', 'B'])
|
def test_n_agents(gym_env):
assert (gym_env.n_agents == 2)
|
def test_reset(gym_env):
(obs, info) = gym_env.reset()
assert (gym_env.current_step == 0)
assert (obs == 1.0)
assert (info == {})
assert (gym_env._observations == {'A': 1.0, 'B': 1.0})
|
def test_step(gym_env):
gym_env.reset()
current_time = gym_env.current_step
action = 3.0
step = gym_env.step(action)
assert (gym_env.current_step == (current_time + 1))
assert (step == (1.0, 0.0, False, False, {}))
assert (gym_env.agents['A'].last_action == 3.0)
assert (gym_env.agents['B'].last_action == 2.0)
current_time = gym_env.current_step
actions = {'A': 0, 'B': 0}
step = gym_env.step(actions)
assert (gym_env.current_step == (current_time + 1))
|
def test_bad_env():
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='X', other_policies={'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'A': (MockPolicy, {}), 'B': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={'X': (MockPolicy, {})}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
with pytest.raises(ValueError):
SingleAgentEnvAdapter(env_class=PhantomEnv, agent_id='A', other_policies={}, env_config={'network': Network([MockAgent('A'), MockAgent('B')]), 'num_steps': 2})
|
def test_stackelberg_env():
agents = [MockStrategicAgent('leader'), MockStrategicAgent('follower')]
network = ph.Network(agents)
env = ph.StackelbergEnv(3, network, ['leader'], ['follower'])
assert (env.reset() == ({'leader': np.array([0])}, {}))
assert (env.agents['leader'].compute_reward_count == 0)
assert (env.agents['leader'].encode_obs_count == 1)
assert (env.agents['leader'].decode_action_count == 0)
assert (env.agents['follower'].compute_reward_count == 0)
assert (env.agents['follower'].encode_obs_count == 0)
assert (env.agents['follower'].decode_action_count == 0)
step = env.step({'leader': np.array([0])})
assert (step.observations == {'follower': np.array([(1 / 3)])})
assert (step.rewards == {})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': False})
assert (step.infos == {'follower': {}})
assert (env.agents['leader'].compute_reward_count == 1)
assert (env.agents['leader'].encode_obs_count == 1)
assert (env.agents['leader'].decode_action_count == 1)
assert (env.agents['follower'].compute_reward_count == 0)
assert (env.agents['follower'].encode_obs_count == 1)
assert (env.agents['follower'].decode_action_count == 0)
step = env.step({'follower': np.array([0])})
assert (step.observations == {'leader': np.array([(2 / 3)])})
assert (step.rewards == {'leader': 0.0})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': False})
assert (step.infos == {'leader': {}})
assert (env.agents['leader'].compute_reward_count == 1)
assert (env.agents['leader'].encode_obs_count == 2)
assert (env.agents['leader'].decode_action_count == 1)
assert (env.agents['follower'].compute_reward_count == 1)
assert (env.agents['follower'].encode_obs_count == 1)
assert (env.agents['follower'].decode_action_count == 1)
step = env.step({'leader': np.array([0])})
assert (step.observations == {'follower': np.array([1])})
assert (step.rewards == {'leader': 0.0, 'follower': 0.0})
assert (step.terminations == {'leader': False, 'follower': False, '__all__': False})
assert (step.truncations == {'leader': False, 'follower': False, '__all__': True})
assert (step.infos == {'follower': {}})
assert (env.agents['leader'].compute_reward_count == 2)
assert (env.agents['leader'].encode_obs_count == 2)
assert (env.agents['leader'].decode_action_count == 2)
assert (env.agents['follower'].compute_reward_count == 1)
assert (env.agents['follower'].encode_obs_count == 2)
assert (env.agents['follower'].decode_action_count == 1)
|
def test_base_supertype_sample():
@dataclass
class TestSupertype(Supertype):
a: float
b: float
s1 = TestSupertype(1.0, 'string')
t1 = s1.sample()
assert isinstance(t1, TestSupertype)
assert (t1.__dict__ == {'a': 1.0, 'b': 'string'})
s2 = TestSupertype(MockSampler(0), 'string')
t2 = s2.sample()
assert (t2.__dict__ == {'a': 1, 'b': 'string'})
|
def test_base_type_utilities():
@dataclass
class Type(Supertype):
a: int
b: float
c: List[int]
d: Tuple[int]
e: np.ndarray
f: Dict[(str, int)]
t = Type(a=1, b=2.0, c=[6, 7, 8], d=(9, 10, 11), e=np.array([15, 16, 17], dtype=np.float32), f={'x': 12, 'y': 13, 'z': 14})
t_compat = t.to_obs_space_compatible_type()
assert (len(t_compat) == 6)
assert (t_compat['a'] == t.a)
assert (t_compat['b'] == t.b)
assert (t_compat['c'] == t.c)
assert (t_compat['d'] == t.d)
assert np.all((t_compat['e'] == t.e))
assert (t_compat['f'] == t.f)
t_space = t.to_obs_space()
assert (t_space == gym.spaces.Dict({'a': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'b': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'c': gym.spaces.Tuple([gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)]), 'd': gym.spaces.Tuple([gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)]), 'e': gym.spaces.Box((- np.inf), np.inf, (3,), np.float32), 'f': gym.spaces.Dict({'x': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'y': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32), 'z': gym.spaces.Box((- np.inf), np.inf, (1,), np.float32)})}))
assert t_space.contains(t_compat)
@dataclass
class Type(Supertype):
s: str = 's'
t = Type()
with pytest.raises(ValueError):
t.to_obs_space_compatible_type()
with pytest.raises(ValueError):
t.to_obs_space()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.