code stringlengths 17 6.64M |
|---|
class ComparableType(Generic[T], ABC):
'Interface for Types that can be compared.'
@abstractmethod
def __lt__(self, other: T) -> bool:
raise NotImplementedError
@abstractmethod
def __le__(self, other: T) -> bool:
raise NotImplementedError
@abstractmethod
def __gt__(self, other: T) -> bool:
raise NotImplementedError
@abstractmethod
def __ge__(self, other: T) -> bool:
raise NotImplementedError
@abstractmethod
def __eq__(self, other: object) -> bool:
raise NotImplementedError
@abstractmethod
def __ne__(self, other: object) -> bool:
raise NotImplementedError
|
class Sampler(ABC, Generic[T]):
'Samplers are used in Agent/Environment Supertypes to define how they are sampled.\n\n Samplers are designed to be used when training policies and a stochastic\n distribution of values is required for the Supertype sampling.\n\n Samplers return an unbounded number of total values with one value being returned at\n a time with the :meth:`sample` method.\n '
def __init__(self):
self._value: Optional[T] = None
self._id = uuid4()
@property
def value(self) -> Optional[T]:
return self._value
@abstractmethod
def sample(self) -> T:
"\n Returns a single value defined by the Sampler's internal distribution.\n\n Implementations of this function should also update the instance's\n :attr:`_value` property.\n "
raise NotImplementedError
|
class ComparableSampler(Sampler[ComparableT], Generic[ComparableT]):
'\n Extension of the :class:`Sampler` for ComparableTypes in order to treat the\n :class:`ComparableSampler` like its actual internal value.\n\n Example:\n >>> s = UniformFloatSampler()\n >>> s.value = s.sample()\n >>> s <= 1.0\n # True\n >>> s == 1.5\n # False\n '
def __lt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
if isinstance(other, ComparableSampler):
return super().__lt__(other)
if (self.value is None):
raise ValueError('`self.value` is None')
return (self.value < other)
def __eq__(self, other: object) -> bool:
if isinstance(other, ComparableSampler):
return object.__eq__(self, other)
return (self.value == other)
def __ne__(self, other: object) -> bool:
if isinstance(other, ComparableSampler):
return object.__ne__(self, other)
return (self.value != other)
def __le__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (self.__lt__(other) or self.__eq__(other))
def __gt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (not self.__le__(other))
def __ge__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (self.__gt__(other) or self.__eq__(other))
|
class UniformFloatSampler(ComparableSampler[float]):
'Samples a single float value from a uniform distribution.\n\n Uses :func:`np.random.uniform` internally.\n '
def __init__(self, low: float=0.0, high: float=1.0, clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None:
assert (high >= low)
self.low = low
self.high = high
self.clip_low = clip_low
self.clip_high = clip_high
super().__init__()
def sample(self) -> float:
self._value = np.random.uniform(self.low, self.high)
if ((self.clip_low is not None) or (self.clip_high is not None)):
self._value = np.clip(self._value, self.clip_low, self.clip_high)
return self._value
|
class UniformIntSampler(ComparableSampler[int]):
'Samples a single int value from a uniform distribution.\n\n Uses :func:`np.random.randint` internally.\n '
def __init__(self, low: int=0, high: int=1, clip_low: Optional[int]=None, clip_high: Optional[int]=None) -> None:
assert (high >= low)
self.low = low
self.high = high
self.clip_low = clip_low
self.clip_high = clip_high
super().__init__()
def sample(self) -> float:
self._value = np.random.randint(self.low, self.high)
if ((self.clip_low is not None) or (self.clip_high is not None)):
self._value = np.clip(self._value, self.clip_low, self.clip_high)
return self._value
|
class UniformArraySampler(ComparableSampler[np.ndarray]):
'Samples an array of float values from a uniform distribution.\n\n Uses :func:`np.random.uniform()` internally.\n '
def __init__(self, low: float=0.0, high: float=1.0, shape: Iterable[int]=(1,), clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None:
assert (high >= low)
self.low = low
self.high = high
self.shape = shape
self.clip_low = clip_low
self.clip_high = clip_high
super().__init__()
def sample(self) -> np.ndarray:
self._value = np.random.uniform(self.low, self.high, self.shape)
if ((self.clip_low is not None) or (self.clip_high is not None)):
self._value = np.clip(self._value, self.clip_low, self.clip_high)
return self._value
|
class NormalSampler(ComparableSampler[float]):
'Samples a single float value from a normal distribution.\n\n Uses :func:`np.random.normal()` internally.\n '
def __init__(self, mu: float, sigma: float, clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None:
self.mu = mu
self.sigma = sigma
self.clip_low = clip_low
self.clip_high = clip_high
super().__init__()
def sample(self) -> float:
self._value = np.random.normal(self.mu, self.sigma)
if ((self.clip_low is not None) or (self.clip_high is not None)):
self._value = np.clip(self._value, self.clip_low, self.clip_high)
return self._value
|
class NormalArraySampler(ComparableSampler[np.ndarray]):
'Samples an array of float values from a normal distribution.\n\n Uses :func:`np.random.normal()` internally.\n '
def __init__(self, mu: float, sigma: float, shape: Tuple[int]=(1,), clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None:
self.mu = mu
self.sigma = sigma
self.shape = shape
self.clip_low = clip_low
self.clip_high = clip_high
super().__init__()
def sample(self) -> np.ndarray:
self._value = np.random.normal(self.mu, self.sigma, self.shape)
if ((self.clip_low is not None) or (self.clip_high is not None)):
self._value = np.clip(self._value, self.clip_low, self.clip_high)
return self._value
|
class LambdaSampler(Sampler[T]):
'Samples using an arbitrary lambda function.'
def __init__(self, func: Callable[(..., T)], *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
super().__init__()
def sample(self) -> T:
self._value = self.func(*self.args, **self.kwargs)
return self._value
|
@dataclass(frozen=True)
class View(ABC):
"\n Base class for the View class hierarchy. Implementations should subclass either\n :class:`AgentView`, :class:`EnvView` or :class:`FSMEnvView`.\n\n Views are used to share state between agents (and the Env) in a formalised manner\n and in a way that is easier than using request and response messages.\n\n Views should be created via the calling of the agent/env's :meth:`view()` method.\n Views can be tailored to particular agents, i.e. the view given can depend on the\n agent that the view is being given to.\n "
|
@dataclass(frozen=True)
class AgentView(View):
'\n Immutable references to public :class:`phantom.Agent` state.\n '
|
@dataclass(frozen=True)
class EnvView(View):
'\n Immutable references to public :class:`phantom.PhantomEnv` state.\n '
current_step: int
proportion_time_elapsed: float
|
def parse_concatenated_json(json_str: str):
decoder = json.JSONDecoder()
pos = 0
objs = []
while (pos < len(json_str)):
json_str = json_str[pos:].strip()
if (not json_str):
break
(obj, pos) = decoder.raw_decode(json_str)
objs.append(obj)
return objs
|
@st.cache
def load_data(file: str):
return parse_concatenated_json(open(file, 'r').read())
|
def _get_version():
with open(os.path.join(NAME, '__init__.py')) as fp:
return re.match('__version__\\s*=\\s*[\\"\\\'](?P<version>.*)[\\",\\\']', fp.read()).group('version')
|
def _get_long_description():
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md'), encoding='utf-8') as readme_file:
long_description = readme_file.read()
return long_description
|
def _get_requirements():
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'requirements.txt'), encoding='utf-8') as requirements_file:
requirements = [line.strip() for line in requirements_file.readlines() if (not (line.strip().startswith('#') or line.strip().startswith('-')))]
return requirements
|
class MockSampler(ph.utils.samplers.Sampler[float]):
def __init__(self, value: float) -> None:
self._value = value
def sample(self) -> float:
self._value += 1
return self._value
|
class MockComparableSampler(ph.utils.samplers.ComparableSampler[float]):
def __init__(self, value: float) -> None:
self._value = value
def sample(self) -> float:
self._value += 1
return self._value
|
class MockAgent(ph.Agent):
def __init__(self, *args, num_steps: Optional[int]=None, **kwargs):
super().__init__(*args, **kwargs)
self.num_steps = num_steps
|
class MockStrategicAgent(ph.StrategicAgent):
@dataclass
class Supertype(ph.Supertype):
type_value: float = 0.0
def __init__(self, *args, num_steps: Optional[int]=None, **kwargs):
super().__init__(*args, **kwargs)
self.action_space = gym.spaces.Box(0, 1, (1,))
self.observation_space = gym.spaces.Box(0, 1, (1,))
self.encode_obs_count = 0
self.decode_action_count = 0
self.compute_reward_count = 0
self.num_steps = num_steps
def encode_observation(self, ctx: ph.Context):
self.encode_obs_count += 1
return np.array([ctx.env_view.proportion_time_elapsed])
def decode_action(self, ctx: ph.Context, action: np.ndarray):
self.decode_action_count += 1
return []
def compute_reward(self, ctx: ph.Context) -> float:
self.compute_reward_count += 1
return 0.0
def is_terminated(self, ctx: ph.Context) -> bool:
return (ctx.env_view.current_step == self.num_steps)
def is_truncated(self, ctx: ph.Context) -> bool:
return (ctx.env_view.current_step == self.num_steps)
|
class MockPolicy(ph.Policy):
def compute_action(self, obs) -> int:
return 1
|
class MockEnv(ph.PhantomEnv):
@dataclass
class Supertype(ph.Supertype):
type_value: float
def __init__(self, env_supertype=None):
agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2'), MockAgent('a3')]
network = ph.network.Network(agents)
network.add_connection('a1', 'a2')
network.add_connection('a2', 'a3')
network.add_connection('a3', 'a1')
super().__init__(num_steps=5, network=network, env_supertype=env_supertype)
|
class SimpleDecoder(Decoder):
def __init__(self, id: int):
self.id = id
@property
def action_space(self) -> gym.Space:
return gym.spaces.Box((- np.inf), np.inf, (1,))
def decode(self, ctx: Context, action) -> List[Tuple[(AgentID, Message)]]:
return [('RECIPIENT', f'FROM {self.id}')]
def reset(self):
self.id = None
|
def test_chained_decoder():
d1 = SimpleDecoder(1)
d2 = SimpleDecoder(2)
cd1 = ChainedDecoder([d1, d2])
messages = cd1.decode(None, [None, None])
assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')])
cd2 = d1.chain(d2)
cd2.decode(None, [None, None])
assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')])
|
def test_chained_decoder_reset():
d1 = SimpleDecoder(1)
d2 = SimpleDecoder(2)
cd = ChainedDecoder([d1, d2])
cd.reset()
assert (d1.id is None)
assert (d2.id is None)
|
class MockDecoder(Decoder):
def __init__(self, id: int):
self.id = id
@property
def action_space(self) -> gym.Space:
return gym.spaces.Box((- np.inf), np.inf, (1,))
def decode(self, ctx: Context, action) -> List[Tuple[(AgentID, Message)]]:
assert (action == self.id)
return [('RECIPIENT', f'FROM {self.id}')]
def reset(self):
self.id = None
|
def test_dict_decoder():
d1 = MockDecoder(1)
d2 = MockDecoder(2)
dd = DictDecoder({'d1': d1, 'd2': d2})
assert (dd.action_space == gym.spaces.Dict({'d1': gym.spaces.Box((- np.inf), np.inf, (1,)), 'd2': gym.spaces.Box((- np.inf), np.inf, (1,))}))
messages = dd.decode(None, {'d1': 1, 'd2': 2})
assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')])
|
def test_chained_decoder_reset():
d1 = MockDecoder(1)
d2 = MockDecoder(2)
dd = DictDecoder({'d1': d1, 'd2': d2})
dd.reset()
assert (d1.id is None)
assert (d2.id is None)
|
class SimpleEncoder(Encoder):
def __init__(self, id: int):
self.id = id
@property
def observation_space(self) -> gym.Space:
return gym.spaces.Box((- np.inf), np.inf, (1,))
def encode(self, ctx: Context) -> np.ndarray:
return np.array([self.id])
def reset(self):
self.id = None
|
def test_chained_encoder():
e1 = SimpleEncoder(1)
e2 = SimpleEncoder(2)
ce1 = ChainedEncoder([e1, e2])
obs = ce1.encode(None)
assert (obs == (np.array([1]), np.array([2])))
|
def test_chained_encoder_reset():
e1 = SimpleEncoder(1)
e2 = SimpleEncoder(2)
cd = ChainedEncoder([e1, e2])
cd.reset()
assert (e1.id is None)
assert (e2.id is None)
|
class MockEncoder(Encoder):
def __init__(self, id: int):
self.id = id
@property
def observation_space(self) -> gym.Space:
return gym.spaces.Box((- np.inf), np.inf, (1,))
def encode(self, ctx: Context) -> np.ndarray:
return np.array([self.id])
def reset(self):
self.id = None
|
def test_dict_encoder():
e1 = MockEncoder(1)
e2 = MockEncoder(2)
de = DictEncoder({'e1': e1, 'e2': e2})
assert (de.observation_space == gym.spaces.Dict({'e1': gym.spaces.Box((- np.inf), np.inf, (1,)), 'e2': gym.spaces.Box((- np.inf), np.inf, (1,))}))
obs = de.encode(None)
assert (obs == {'e1': np.array([1]), 'e2': np.array([2])})
|
def test_dict_encoder_reset():
e1 = MockEncoder(1)
e2 = MockEncoder(2)
de = DictEncoder({'e1': e1, 'e2': e2})
de.reset()
assert (e1.id is None)
assert (e2.id is None)
|
def test_decorator_style():
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA'])
def handle(self):
return 'StageA'
env = Env()
env.reset()
env.step({})
|
def test_state_definition_list_style():
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockAgent('A')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA', stages=[ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA'], handler=self.handle)])
def handle(self):
return 'StageA'
env = Env()
env.reset()
env.step({})
|
def test_no_stages_registered():
'\n All FSM envs must have at least one state registered using the FSMStage decorator.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage=None)
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_duplicate_stages():
'\n All FSM envs must not have more than one state registered with the same ID.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_1(self):
pass
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_2(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_initial_stage():
'\n All FSM envs must have an initial state that is a valid registered state.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageB')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_next_state():
'\n All next states passed into the FSMStage decorator must be valid states.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageB'])
def handle_1(self):
pass
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_no_handler_stage_next_stages():
'\n All stages without a provided handler must have exactly one next stage\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA', stages=[ph.FSMStage('StageA', acting_agents=['agent'], next_stages=[])])
with pytest.raises(ph.fsm.FSMValidationError):
Env()
|
def test_invalid_next_state_runtime():
'\n A valid registered next state must be returned by the state handler functions.\n '
class Env(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=1, network=network, initial_stage='StageA')
@ph.FSMStage(stage_id='StageA', acting_agents=['agent'], next_stages=['StageA'])
def handle_1(self):
return 'StageB'
env = Env()
env.reset()
with pytest.raises(ph.fsm.FSMRuntimeError):
env.step(actions={'agent': 0})
|
def test_is_fsm_deterministic():
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['A'])])
assert env.is_fsm_deterministic()
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['B']), ph.FSMStage(stage_id='B', acting_agents=[], next_stages=['C']), ph.FSMStage(stage_id='C', acting_agents=[], next_stages=['A'])])
assert env.is_fsm_deterministic()
env = ph.FiniteStateMachineEnv(num_steps=1, network=ph.Network([]), initial_stage='A', stages=[ph.FSMStage(stage_id='A', acting_agents=[], next_stages=['A', 'B'], handler=(lambda x: 'B')), ph.FSMStage(stage_id='B', acting_agents=[], next_stages=['A', 'B'], handler=(lambda x: 'A'))])
assert (not env.is_fsm_deterministic())
|
class MockFSMEnv(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
super().__init__(num_steps=3, network=network, initial_stage='ODD', stages=[ph.FSMStage(stage_id='ODD', acting_agents=['agent'], next_stages=['EVEN']), ph.FSMStage(stage_id='EVEN', acting_agents=['agent'], next_stages=['ODD'])])
|
def test_odd_even_one_agent():
env = MockFSMEnv()
assert (env.reset() == ({'agent': np.array([0])}, {}))
assert (env.current_stage == 'ODD')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'EVEN')
assert (step.observations == {'agent': np.array([(1.0 / 3.0)])})
assert (step.rewards == {'agent': 0.0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
|
class MockFSMEnv(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('odd_agent'), MockStrategicAgent('even_agent')]
network = ph.Network(agents)
super().__init__(num_steps=3, network=network, initial_stage='ODD', stages=[ph.FSMStage(stage_id='ODD', next_stages=['EVEN'], acting_agents=['odd_agent'], rewarded_agents=['odd_agent']), ph.FSMStage(stage_id='EVEN', next_stages=['ODD'], acting_agents=['even_agent'], rewarded_agents=['even_agent'])])
|
def test_odd_even_two_agents():
env = MockFSMEnv()
assert (env.reset() == ({'odd_agent': np.array([0])}, {}))
assert (env.current_stage == 'ODD')
assert (env.agents['odd_agent'].compute_reward_count == 0)
assert (env.agents['odd_agent'].encode_obs_count == 1)
assert (env.agents['odd_agent'].decode_action_count == 0)
assert (env.agents['even_agent'].compute_reward_count == 0)
assert (env.agents['even_agent'].encode_obs_count == 0)
assert (env.agents['even_agent'].decode_action_count == 0)
step = env.step({'odd_agent': np.array([1])})
assert (env.current_stage == 'EVEN')
assert (step.observations == {'even_agent': np.array([(1.0 / 3.0)])})
assert (step.rewards == {'even_agent': None})
assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.infos == {'even_agent': {}})
assert (env.agents['odd_agent'].compute_reward_count == 1)
assert (env.agents['odd_agent'].encode_obs_count == 1)
assert (env.agents['odd_agent'].decode_action_count == 1)
assert (env.agents['even_agent'].compute_reward_count == 0)
assert (env.agents['even_agent'].encode_obs_count == 1)
assert (env.agents['even_agent'].decode_action_count == 0)
step = env.step({'even_agent': np.array([0])})
assert (env.current_stage == 'ODD')
assert (step.observations == {'odd_agent': np.array([(2.0 / 3.0)])})
assert (step.rewards == {'odd_agent': 0.0})
assert (step.terminations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.truncations == {'even_agent': False, 'odd_agent': False, '__all__': False})
assert (step.infos == {'odd_agent': {}})
assert (env.agents['odd_agent'].compute_reward_count == 1)
assert (env.agents['odd_agent'].encode_obs_count == 2)
assert (env.agents['odd_agent'].decode_action_count == 1)
assert (env.agents['even_agent'].compute_reward_count == 1)
assert (env.agents['even_agent'].encode_obs_count == 1)
assert (env.agents['even_agent'].decode_action_count == 1)
|
class OneStateFSMEnvWithHandler(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
network.add_connection('agent', 'agent')
super().__init__(num_steps=2, network=network, initial_stage='UNIT')
@ph.FSMStage(stage_id='UNIT', acting_agents=['agent'], next_stages=['UNIT'])
def handle(self):
return 'UNIT'
|
def test_one_state_with_handler():
env = OneStateFSMEnvWithHandler()
assert (env.reset() == ({'agent': np.array([0.0])}, {}))
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
assert (step.observations == {'agent': np.array([0.5])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 2)
assert (env.agents['agent'].encode_obs_count == 3)
assert (env.agents['agent'].decode_action_count == 2)
assert (step.observations == {'agent': np.array([1.0])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': True})
assert (step.infos == {'agent': {}})
|
class OneStateFSMEnvWithoutHandler(ph.FiniteStateMachineEnv):
def __init__(self):
agents = [MockStrategicAgent('agent')]
network = ph.Network(agents)
network.add_connection('agent', 'agent')
super().__init__(num_steps=2, network=network, initial_stage='UNIT', stages=[ph.FSMStage(stage_id='UNIT', acting_agents=['agent'], next_stages=['UNIT'], handler=None)])
|
def test_one_state_without_handler():
env = OneStateFSMEnvWithoutHandler()
assert (env.reset() == ({'agent': np.array([0.0])}, {}))
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 0)
assert (env.agents['agent'].encode_obs_count == 1)
assert (env.agents['agent'].decode_action_count == 0)
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 1)
assert (env.agents['agent'].encode_obs_count == 2)
assert (env.agents['agent'].decode_action_count == 1)
assert (step.observations == {'agent': np.array([0.5])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': False})
assert (step.infos == {'agent': {}})
step = env.step({'agent': np.array([0])})
assert (env.current_stage == 'UNIT')
assert (env.agents['agent'].compute_reward_count == 2)
assert (env.agents['agent'].encode_obs_count == 3)
assert (env.agents['agent'].decode_action_count == 2)
assert (step.observations == {'agent': np.array([1.0])})
assert (step.rewards == {'agent': 0})
assert (step.terminations == {'agent': False, '__all__': False})
assert (step.truncations == {'agent': False, '__all__': True})
assert (step.infos == {'agent': {}})
|
@dataclass
class MockEpisode():
user_data: dict = field(default_factory=dict)
custom_metrics: dict = field(default_factory=dict)
media: dict = field(default_factory=dict)
|
class MockMetric(SimpleMetric):
def __init__(self, value: int, train_reduce_action='last', fsm_stages=None):
super().__init__(train_reduce_action, fsm_stages=fsm_stages)
self.value = value
def extract(self, _env) -> int:
return self.value
|
class MockBaseEnv():
def __init__(self, env_class):
self.envs = [env_class]
def step(self, actions={}):
for env in self.envs:
env.step(actions)
|
def test_fsm_logging():
env = FiniteStateMachineEnv(num_steps=2, network=Network(), initial_stage=0, stages=[FSMStage(0, [], None, [1]), FSMStage(1, [], None, [0])])
episode = MockEpisode()
base_env = MockBaseEnv(env)
callback = RLlibMetricLogger({'stage_0_metric': MockMetric(0, 'sum', fsm_stages=[0]), 'stage_1_metric': MockMetric(1, 'sum', fsm_stages=[1])})()
callback.on_episode_start(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0], 'stage_1_metric': [NotRecorded()]})
base_env.step()
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0, NotRecorded()], 'stage_1_metric': [NotRecorded(), 1]})
callback.on_episode_end(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
assert (episode.custom_metrics == {'stage_0_metric': 0, 'stage_1_metric': 1})
|
class MockAgent():
def __init__(self, inc):
self.inc = inc
self.test_property = 0.0
def step(self):
self.test_property += self.inc
|
class MockProperty():
def __init__(self):
self.sub_property = 1.0
|
class MockEnv():
def __init__(self):
self.test_property = 0.0
self.nested_property = MockProperty()
self.agents = {'agent1': MockAgent(1.0), 'agent2': MockAgent(2.0)}
def step(self):
self.test_property += 1.0
for agent in self.agents.values():
agent.step()
|
def test_simple_env_metric_1():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='last', eval_reduce_action='none')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
assert np.all((metric.reduce(values, mode='evaluate') == np.arange(1.0, 6.0)))
|
def test_simple_env_metric_2():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='mean')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 3.0)
|
def test_simple_env_metric_3():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='sum')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_simple_agent_metric_1():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
|
def test_simple_agent_metric_2():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='mean')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 3.0)
|
def test_simple_agent_metric_3():
env = MockEnv()
metric = ph.metrics.SimpleAgentMetric(agent_id='agent1', agent_property='test_property', train_reduce_action='sum')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_aggregated_agent_metric_1():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='min', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
|
def test_aggregated_agent_metric_2():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='max', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 10.0)
|
def test_aggregated_agent_metric_3():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='mean', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 7.5)
|
def test_aggregated_agent_metric_4():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='sum', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
|
def test_nested_metric():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='nested_property.sub_property', train_reduce_action='last')
env.step()
assert (metric.extract(env) == 1.0)
|
def test_lambda_metric():
env = MockEnv()
metric = ph.metrics.LambdaMetric(extract_fn=(lambda env: env.test_property), train_reduce_fn=(lambda values: np.sum(values)), eval_reduce_fn=(lambda values: (np.sum(values) * 2)))
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
assert (metric.reduce(values, mode='evaluate') == 30.0)
|
def test_RLlibMetricLogger():
episode = MockEpisode()
base_env = MockBaseEnv(PhantomEnv(Network()))
callback = RLlibMetricLogger({'test_metric': MockMetric(1)})()
callback.on_episode_start(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data['test_metric'] == [1])
callback.on_episode_end(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
assert (episode.custom_metrics['test_metric'] == 1)
|
@msg_payload()
class MockMessage():
cash: float
|
class MockAgent(Agent):
def __init__(self, aid: AgentID) -> None:
super().__init__(aid)
self.total_cash = 0.0
def reset(self) -> None:
self.total_cash = 0.0
@msg_handler(MockMessage)
def handle_message(self, _: Context, message: MockMessage) -> List[Tuple[(AgentID, MsgPayload)]]:
if (message.payload.cash > 25):
self.total_cash += (message.payload.cash / 2.0)
return [(message.sender_id, MockMessage((message.payload.cash / 2.0)))]
|
def test_init():
net = Network([MockAgent('a1'), MockAgent('a2')])
net.add_connection('a1', 'a2')
Network([MockAgent('a1'), MockAgent('a2')], connections=[('a1', 'a2')])
|
def test_bad_init():
with pytest.raises(ValueError):
Network([MockAgent('a1'), MockAgent('a1')])
with pytest.raises(ValueError):
Network([MockAgent('a1')], connections=[('a1', 'a2')])
|
@pytest.fixture
def net() -> Network:
net = Network([MockAgent('mm'), MockAgent('inv'), MockAgent('inv2')])
net.add_connection('mm', 'inv')
return net
|
def test_getters(net):
assert ('mm' in net.agent_ids)
assert ('inv' in net.agent_ids)
agents = net.get_agents_where((lambda a: (a.id == 'mm')))
assert (len(agents) == 1)
assert (list(agents.keys())[0] == 'mm')
assert (net.get_agents_with_type(Agent) == net.agents)
assert (net.get_agents_without_type(Agent) == {})
|
def test_call_response(net):
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
assert (net.agents['mm'].total_cash == 25.0)
assert (net.agents['inv'].total_cash == 50.0)
|
def test_send_many(net):
net.send('mm', 'inv', MockMessage(100.0))
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
assert (net.agents['mm'].total_cash == 50.0)
assert (net.agents['inv'].total_cash == 100.0)
|
def test_invalid_send(net):
with pytest.raises(NetworkError):
net.send('mm', 'inv2', MockMessage(100.0))
|
def test_context_existence(net):
assert ('inv' in net.context_for('mm', EnvView(0, 0.0)))
assert ('mm' in net.context_for('inv', EnvView(0, 0.0)))
|
def test_reset(net):
net.send('mm', 'inv', MockMessage(100.0))
net.send('mm', 'inv', MockMessage(100.0))
net.resolve({aid: net.context_for(aid, EnvView(0, 0.0)) for aid in net.agents})
net.reset()
assert (net.agents['mm'].total_cash == 0.0)
assert (net.agents['inv'].total_cash == 0.0)
|
@pytest.fixture
def net2() -> Network:
return Network([MockAgent('a'), MockAgent('b'), MockAgent('c')])
|
def test_adjacency_matrix(net2):
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 1], [1, 0]]))
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b', 'c'], np.array([[0, 1], [1, 0]]))
assert (str(e.value) == "Number of agent IDs doesn't match adjacency matrix dimensions.")
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 0, 0], [0, 0, 0]]))
assert (str(e.value) == 'Adjacency matrix must be square.')
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[0, 0], [1, 0]]))
assert (str(e.value) == 'Adjacency matrix must be symmetric.')
with pytest.raises(ValueError) as e:
net2.add_connections_with_adjmat(['a', 'b'], np.array([[1, 1], [1, 1]]))
assert (str(e.value) == 'Adjacency matrix must be hollow.')
|
class AgentA(ph.Agent):
pass
|
class AgentB(ph.Agent):
pass
|
def test_payload_checks():
agents = [AgentA('A'), AgentB('B')]
net = ph.Network(agents, enforce_msg_payload_checks=True)
net.add_connection('A', 'B')
@ph.msg_payload()
class Payload1():
pass
net.send('A', 'B', Payload1())
net.send('B', 'A', Payload1())
@ph.msg_payload(AgentA, AgentB)
class Payload2():
pass
net.send('A', 'B', Payload2())
@ph.msg_payload([AgentA, AgentB], [AgentA, AgentB])
class Payload3():
pass
net.send('A', 'B', Payload3())
net.send('B', 'A', Payload3())
@ph.msg_payload(sender_type=AgentA, receiver_type=None)
class Payload4():
pass
@ph.msg_payload(sender_type=None, receiver_type=AgentA)
class Payload5():
pass
net.send('A', 'B', Payload4())
net.send('B', 'A', Payload5())
with pytest.raises(ph.network.NetworkError):
net.send('B', 'A', Payload2())
net = ph.Network(agents, enforce_msg_payload_checks=False)
net.add_connection('A', 'B')
net.send('A', 'B', Payload2())
net.send('B', 'A', Payload2())
|
@msg_payload()
class Request():
cash: float
|
@msg_payload()
class Response():
cash: float
|
class _TestAgent(Agent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.req_time = time.time()
self.res_time = time.time()
@msg_handler(Request)
def handle_request(self, _: Context, message: Request) -> List[Tuple[(AgentID, MsgPayload)]]:
self.req_time = time.time()
return [(message.sender_id, Response((message.payload.cash / 2.0)))]
@msg_handler(Response)
def handle_response(self, _: Context, message: Response) -> List[Tuple[(AgentID, MsgPayload)]]:
self.res_time = time.time()
return []
|
def test_ordering():
n = Network([_TestAgent('A'), _TestAgent('B'), _TestAgent('C')], BatchResolver())
n.add_connection('A', 'B')
n.add_connection('A', 'C')
n.add_connection('B', 'C')
n.send('A', 'B', Request(100.0))
n.send('A', 'C', Request(100.0))
n.send('B', 'C', Request(100.0))
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
assert (n['A'].req_time <= n['B'].req_time)
assert (n['B'].req_time <= n['C'].req_time)
assert (n['C'].res_time <= n['A'].res_time)
assert (n['A'].res_time <= n['B'].res_time)
|
def test_batch_resolver_round_limit():
n = Network([_TestAgent('A'), _TestAgent('B')], BatchResolver(round_limit=0))
n.add_connection('A', 'B')
n.send('A', 'B', Request(0))
with pytest.raises(Exception):
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
|
class _TestAgent2(Agent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handle_message(self, _: Context, message: bool) -> List[Tuple[(AgentID, MsgPayload)]]:
return ([('C', True)] if (self.id == 'B') else [])
|
def test_invalid_response_connection():
n = Network([_TestAgent2('A'), _TestAgent2('B'), _TestAgent2('C')], BatchResolver())
n.add_connection('A', 'B')
n.send('A', 'B', Request(0.0))
with pytest.raises(NetworkError):
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
|
@pytest.fixture
def net():
return StochasticNetwork([Agent('A'), Agent('B')], BatchResolver(2))
|
def test_stochastic_network_1(net):
net.add_connection('A', 'B', 1.0)
assert net.graph.has_edge('A', 'B')
assert net.graph.has_edge('B', 'A')
net.resample_connectivity()
assert net.graph.has_edge('A', 'B')
assert net.graph.has_edge('B', 'A')
|
def test_stochastic_network_2(net):
net.add_connection('A', 'B', 0.0)
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
def test_stochastic_network_3(net):
net.add_connections_from([('A', 'B', 0.0)])
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
def test_stochastic_network_4(net):
net.add_connections_between(['A'], ['B'], rate=0.0)
assert (not net.graph.has_edge('A', 'B'))
net.resample_connectivity()
assert (not net.graph.has_edge('A', 'B'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.