code
stringlengths
17
6.64M
class FSMRuntimeError(Exception): '\n Error raised when validating FSM stage changes when running an episode using the\n :class:`FiniteStateMachineEnv`.\n '
class FSMStage(): "\n Decorator used in the :class:`FiniteStateMachineEnv` to declare the finite state\n machine structure and assign handler functions to stages.\n\n A 'stage' corresponds to a state in the finite state machine, however to avoid any\n confusion with Environment states we refer to them as stages.\n\n Attributes:\n id: The name of this stage.\n acting_agents: The agents that will take an action at the end of the steps that\n belong to this stage..\n rewarded_agents: If provided, only the given agents will calculate and return a\n reward at the end of the step for this stage. If not provided, a reward will\n be computed for all acting agents for the current stage.\n next_stages: The stages that this stage can transition to.\n handler: Environment class method to be called when the FSM enters this stage.\n " def __init__(self, stage_id: StageID, acting_agents: Sequence[AgentID], rewarded_agents: Optional[Sequence[AgentID]]=None, next_stages: Optional[Sequence[StageID]]=None, handler: Optional[Callable[([], StageID)]]=None) -> None: self.id = stage_id self.acting_agents = acting_agents self.rewarded_agents = rewarded_agents self.next_stages = (next_stages or []) self.handler = handler def __call__(self, handler_fn: Callable[(..., Optional[StageID])]): setattr(handler_fn, '_decorator', self) self.handler = handler_fn return handler_fn
@dataclass(frozen=True) class FSMEnvView(EnvView): '\n Extension of the :class:`EnvView` class that records the current stage that the\n environment is in.\n ' stage: StageID
class FiniteStateMachineEnv(PhantomEnv): "\n Base environment class that allows implementation of a finite state machine to\n handle complex environment multi-step setups.\n This class should not be used directly and instead should be subclassed.\n Use the :class:`FSMStage` decorator on handler methods within subclasses of this\n class to register stages to the FSM.\n\n A 'stage' corresponds to a state in the finite state machine, however to avoid any\n confusion with Environment states we refer to them as stages.\n Stage IDs can be anything type that is hashable, eg. strings, ints, enums.\n\n Arguments:\n num_steps: The maximum number of steps the environment allows per episode.\n network: A Network class or derived class describing the connections between\n agents and agents in the environment.\n initial_stage: The initial starting stage of the FSM. When the reset() method is\n called the environment is initialised into this stage.\n env_supertype: Optional Supertype class instance for the environment. If this is\n set, it will be sampled from and the :attr:`env_type` property set on the\n class with every call to :meth:`reset()`.\n agent_supertypes: Optional mapping of agent IDs to Supertype class instances. If\n these are set, each supertype will be sampled from and the :attr:`type`\n property set on the related agent with every call to :meth:`reset()`.\n stages: List of FSM stages. FSM stages can be defined via this list or\n alternatively via the :class:`FSMStage` decorator.\n " def __init__(self, num_steps: int, network: Network, initial_stage: StageID, env_supertype: Optional[Supertype]=None, agent_supertypes: Optional[Mapping[(AgentID, Supertype)]]=None, stages: Optional[Sequence[FSMStage]]=None) -> None: super().__init__(num_steps, network, env_supertype, agent_supertypes) self._initial_stage = initial_stage self._rewards: Dict[(AgentID, Optional[float])] = {} self._observations: Dict[(AgentID, Any)] = {} self._infos: Dict[(AgentID, Dict[(str, Any)])] = {} self._stages: Dict[(StageID, FSMStage)] = {} self._current_stage = self.initial_stage self.previous_stage: Optional[StageID] = None for stage in (stages or []): if (stage.id not in self._stages): self._stages[stage.id] = stage for attr_name in dir(self): attr = getattr(self, attr_name) if callable(attr): handler_fn = attr if hasattr(handler_fn, '_decorator'): if (handler_fn._decorator.id in self._stages): raise FSMValidationError(f"Found multiple stages with ID '{handler_fn._decorator.id}'") self._stages[handler_fn._decorator.id] = handler_fn._decorator if (len(self._stages) == 0): raise FSMValidationError("No registered stages. Please use the 'FSMStage' decorator or the stage_definitions init parameter") if (self.initial_stage not in self._stages): raise FSMValidationError(f"Initial stage '{self.initial_stage}' is not a valid stage") for stage in self._stages.values(): for next_stage in stage.next_stages: if (next_stage not in self._stages): raise FSMValidationError(f"Next stage '{next_stage}' given in stage '{stage.id}' is not a valid stage") for stage in self._stages.values(): if ((len(stage.next_stages) != 1) and (stage.handler is None)): raise FSMValidationError(f"Stage '{stage.id}' without handler must have exactly one next stage (got {len(stage.next_stages)})") @property def initial_stage(self) -> StageID: 'Returns the initial stage of the FSM Env.' return self._initial_stage @property def current_stage(self) -> StageID: 'Returns the current stage of the FSM Env.' return self._current_stage def is_fsm_deterministic(self) -> bool: 'Returns true if all stages are followed by exactly one stage.' return all(((len(s.next_stages) == 1) for s in self._stages.values())) def view(self, agent_views: Dict[(AgentID, AgentView)]) -> FSMEnvView: "Return an immutable view to the FSM environment's public state." return FSMEnvView(self.current_step, (self.current_step / self.num_steps), self.current_stage) def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]: '\n Reset the environment and return an initial observation.\n\n This method resets the step count and the :attr:`network`. This includes all the\n agents in the network.\n\n Args:\n seed: An optional seed to use for the new episode.\n options : Additional information to specify how the environment is reset.\n\n Returns:\n - A dictionary mapping Agent IDs to observations made by the respective\n agents. It is not required for all agents to make an initial observation.\n - An optional dictionary with auxillary information, equivalent to the info\n dictionary in `env.step()`.\n ' logger.log_reset() self._current_step = 0 self._current_stage = self.initial_stage for sampler in self._samplers: sampler.sample() if (self.env_supertype is not None): self.env_type = self.env_supertype.sample() self.network.reset() self._terminations = set() self._truncations = set() self._rewards = {aid: None for aid in self.strategic_agent_ids} acting_agents = self._stages[self.current_stage].acting_agents self._make_ctxs([aid for aid in acting_agents if (aid in self.strategic_agent_ids)]) obs = {ctx.agent.id: ctx.agent.encode_observation(ctx) for ctx in self._ctxs.values()} logger.log_observations(obs) return ({k: v for (k, v) in obs.items() if (v is not None)}, {}) def step(self, actions: Mapping[(AgentID, Any)]) -> PhantomEnv.Step: '\n Step the simulation forward one step given some set of agent actions.\n\n Arguments:\n actions: Actions output by the agent policies to be translated into\n messages and passed throughout the network.\n\n Returns:\n A :class:`PhantomEnv.Step` object containing observations, rewards,\n terminations, truncations and infos.\n ' self._current_step += 1 logger.log_step(self.current_step, self.num_steps) logger.log_actions(actions) logger.log_start_decoding_actions() self._make_ctxs(self.agent_ids) acting_agents = self._stages[self.current_stage].acting_agents self._handle_acting_agents(acting_agents, actions) env_handler = self._stages[self.current_stage].handler if (env_handler is None): self.resolve_network() next_stages = self._stages[self.current_stage].next_stages if (len(next_stages) == 0): raise ValueError(f"Current stage '{self.current_stage}' does not have an env handler or a next stage defined") next_stage = next_stages[0] elif hasattr(env_handler, '__self__'): next_stage = env_handler() else: next_stage = env_handler(self) if (next_stage not in self._stages[self.current_stage].next_stages): raise FSMRuntimeError(f"FiniteStateMachineEnv attempted invalid transition from '{self.current_stage}' to {next_stage}") observations: Dict[(AgentID, Any)] = {} rewards: Dict[(AgentID, float)] = {} terminations: Dict[(AgentID, bool)] = {} truncations: Dict[(AgentID, bool)] = {} infos: Dict[(AgentID, Dict[(str, Any)])] = {} if (self._stages[self.current_stage].rewarded_agents is None): rewarded_agents = self.strategic_agent_ids next_acting_agents = self.strategic_agent_ids else: rewarded_agents = self._stages[self.current_stage].rewarded_agents next_acting_agents = self._stages[next_stage].acting_agents for aid in self.strategic_agent_ids: if ((aid in self._terminations) or (aid in self._truncations)): continue ctx = self._ctxs[aid] if (aid in next_acting_agents): obs = ctx.agent.encode_observation(ctx) if (obs is not None): observations[aid] = obs infos[aid] = ctx.agent.collect_infos(ctx) if (aid in rewarded_agents): rewards[aid] = ctx.agent.compute_reward(ctx) terminations[aid] = ctx.agent.is_terminated(ctx) truncations[aid] = ctx.agent.is_truncated(ctx) if terminations[aid]: self._terminations.add(aid) if truncations[aid]: self._truncations.add(aid) logger.log_step_values(observations, rewards, terminations, truncations, infos) logger.log_metrics(self) self._observations.update(observations) self._rewards.update(rewards) self._infos.update(infos) logger.log_fsm_transition(self.current_stage, next_stage) (self.previous_stage, self._current_stage) = (self.current_stage, next_stage) terminations['__all__'] = self.is_terminated() truncations['__all__'] = self.is_truncated() if ((self.current_stage is None) or terminations['__all__'] or truncations['__all__']): logger.log_episode_done() return self.Step(observations=self._observations, rewards=self._rewards, terminations=terminations, truncations=truncations, infos=self._infos) rewards = {aid: self._rewards[aid] for aid in observations} return self.Step(observations, rewards, terminations, truncations, infos)
@dataclass(frozen=True) class MsgPayload(): 'Message payload structure.'
def msg_payload(sender_type: AgentTypeArg=None, receiver_type: AgentTypeArg=None): def wrap(message_class: Type) -> Type: if (sender_type is None): s_types = None else: s_types = (sender_type if isinstance(sender_type, list) else [sender_type]) s_types = [(t.__name__ if isinstance(t, type) else t) for t in s_types] if (receiver_type is None): r_types = None else: r_types = (receiver_type if isinstance(receiver_type, list) else [receiver_type]) r_types = [(t.__name__ if isinstance(t, type) else t) for t in r_types] message_class._sender_types = s_types message_class._receiver_types = r_types return dataclass(frozen=True)(message_class) return wrap
@dataclass(frozen=True) class Message(Generic[MsgPayloadType]): '\n Message class storing the sender agent ID, receiver agent ID and message payload.\n ' sender_id: AgentID receiver_id: AgentID payload: MsgPayload
class NotRecorded(): def __new__(cls): if (not hasattr(cls, 'instance')): cls.instance = super(NotRecorded, cls).__new__(cls) return cls.instance def __repr__(self) -> str: return '<NotRecorded>'
class Metric(Generic[MetricValue], ABC): 'Class for extracting metrics from a :class:`phantom.PhantomEnv` instance.\n\n Arguments:\n fsm_stages: Optional list of FSM stages to filter metric recording on. If None\n is given metrics will be recorded on all stages when used with an FSM Env.\n If a list of FSM stages is given, the metric will only be recorded when the\n Env is in these stages, otherwise a None value will be recorded.\n description: Optional description string for use in data exploration tools.\n ' def __init__(self, fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: self.fsm_stages = fsm_stages self.description = description @abstractmethod def extract(self, env: PhantomEnv) -> MetricValue: 'Extract and return the current metric value from `env`.\n\n Arguments:\n env: The environment instance.\n ' raise NotImplementedError def reduce(self, values: Sequence[MetricValue], mode: Literal[('train', 'evaluate')]) -> MetricValue: 'Reduce a set of observations into a single representative value.\n\n The default implementation is to return the latest observation.\n\n Arguments:\n values: Set of observations to reduce.\n mode: Whether the metric is being recorded during training or evaluation.\n ' return values[(- 1)]
class LambdaMetric(Metric, Generic[MetricValue]): 'Class for extracting metrics from a :class:`phantom.PhantomEnv` instance with a\n provided extraction function.\n\n Arguments:\n extract_fn: Function to extract the metric value from the environment.\n train_reduce_fn: Function to reduce a set of observations into a single\n representative value during training.\n eval_reduce_fn: Function to reduce a set of observations into a single\n representative value during evaluation.\n fsm_stages: Optional list of FSM stages to filter metric recording on. If None\n is given metrics will be recorded on all stages when used with an FSM Env.\n If a list of FSM stages is given, the metric will only be recorded when the\n Env is in these stages, otherwise a None value will be recorded.\n description: Optional description string for use in data exploration tools.\n ' def __init__(self, extract_fn: Callable[([PhantomEnv], MetricValue)], train_reduce_fn: Callable[([Sequence[MetricValue]], MetricValue)], eval_reduce_fn: Callable[([Sequence[MetricValue]], MetricValue)], fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: self.extract_fn = extract_fn self.train_reduce_fn = train_reduce_fn self.eval_reduce_fn = eval_reduce_fn self.fsm_stages = fsm_stages self.description = description def extract(self, env: PhantomEnv) -> MetricValue: 'Extract and return the current metric value from `env`.\n\n Arguments:\n env: The environment instance.\n ' return self.extract_fn(env) def reduce(self, values: Sequence[MetricValue], mode: Literal[('train', 'evaluate')]) -> MetricValue: 'Reduce a set of observations into a single representative value.\n\n The default implementation is to return the latest observation.\n\n Arguments:\n values: Set of observations to reduce.\n mode: Whether the metric is being recorded during training or evaluation.\n ' if (mode == 'train'): return self.train_reduce_fn(values) elif (mode == 'evaluate'): return self.eval_reduce_fn(values) else: raise ValueError(f'Unknown mode: {mode}')
class SimpleMetric(Metric, Generic[SimpleMetricValue], ABC): 'Base class of a set of helper metric classes.' def __init__(self, train_reduce_action: Literal[('last', 'mean', 'sum')]='mean', eval_reduce_action: Literal[('last', 'mean', 'sum', 'none')]='none', fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: if (train_reduce_action not in ('last', 'mean', 'sum')): raise ValueError(f"train_reduce_action field of {self.__class__} metric must be one of: 'last', 'mean' or 'sum'. Got '{train_reduce_action}'.") if (eval_reduce_action not in ('last', 'mean', 'sum', 'none')): raise ValueError(f"eval_reduce_action field of {self.__class__} metric class must be one of: 'last', 'mean', 'sum' or 'none'. Got '{eval_reduce_action}'.") self.train_reduce_action = train_reduce_action self.eval_reduce_action = eval_reduce_action super().__init__(fsm_stages, description) def reduce(self, values: Sequence[SimpleMetricValue], mode: Literal[('train', 'evaluate')]) -> SimpleMetricValue: reduce_action = (self.train_reduce_action if (mode == 'train') else self.eval_reduce_action) if (reduce_action == 'none'): return np.array(values) if (self.fsm_stages is not None): values = [v for v in values if (v is not not_recorded)] if (reduce_action == 'last'): return (values[(- 1)] if (len(values) > 0) else None) if (reduce_action == 'mean'): return np.mean(values) if (reduce_action == 'sum'): return np.sum(values) raise ValueError
class SimpleAgentMetric(SimpleMetric, Generic[SimpleMetricValue]): "\n Simple helper class for extracting single ints or floats from the state of a given\n agent.\n\n Three options are available for summarizing the values at the end of each episode\n during training or evaluation:\n\n - 'last' - takes the value from the last step\n - 'mean' - takes the mean of all the per-step values\n - 'sum' - takes the sum of all the per-step values\n\n During evaluation there is also the option for no value summarizing by using 'none'.\n\n Arguments:\n agent_id: The ID of the agent to record the metric for.\n agent_property: The property existing on the agent to record, can be nested\n (e.g. ``Agent.property.sub_property``).\n train_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum').\n eval_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum', 'none').\n description: Optional description string for use in data exploration tools.\n " def __init__(self, agent_id: str, agent_property: str, train_reduce_action: Literal[('last', 'mean', 'sum')]='mean', eval_reduce_action: Literal[('last', 'mean', 'sum', 'none')]='none', fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: self.agent_id = agent_id self.agent_property = agent_property super().__init__(train_reduce_action, eval_reduce_action, fsm_stages, description) def extract(self, env: PhantomEnv) -> SimpleMetricValue: return _rgetattr(env.agents[self.agent_id], self.agent_property)
class SimpleEnvMetric(SimpleMetric, Generic[SimpleMetricValue]): "\n Simple helper class for extracting single ints or floats from the state of the env.\n\n Three options are available for summarizing the values at the end of each episode\n during training or evaluation:\n\n - 'last' - takes the value from the last step\n - 'mean' - takes the mean of all the per-step values\n - 'sum' - takes the sum of all the per-step values\n\n During evaluation there is also the option for no value summarizing by using 'none'.\n\n Arguments:\n env_property: The property existing on the environment to record, can be nested\n (e.g. ``Agent.property.sub_property``).\n train_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum').\n eval_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum', 'none').\n description: Optional description string for use in data exploration tools.\n " def __init__(self, env_property: str, train_reduce_action: Literal[('last', 'mean', 'sum')]='mean', eval_reduce_action: Literal[('last', 'mean', 'sum', 'none')]='none', fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: self.env_property = env_property super().__init__(train_reduce_action, eval_reduce_action, fsm_stages, description) def extract(self, env: PhantomEnv) -> SimpleMetricValue: return _rgetattr(env, self.env_property)
class AggregatedAgentMetric(SimpleMetric, Generic[SimpleMetricValue]): "\n Simple helper class for extracting single ints or floats from the states of a group\n of agents and performing a reduction operation on the values.\n\n Three options are available for reducing the values from the group of agents:\n\n - 'min' - takes the mean of all the per-step values\n - 'max' - takes the mean of all the per-step values\n - 'mean' - takes the mean of all the per-step values\n - 'sum' - takes the sum of all the per-step values\n\n Three options are available for summarizing the values at the end of each episode\n during training or evaluation:\n\n - 'last' - takes the value from the last step\n - 'mean' - takes the mean of all the per-step values\n - 'sum' - takes the sum of all the per-step values\n\n During evaluation there is also the option for no value summarizing by using 'none'.\n\n Arguments:\n agent_ids: The ID's of the agents to record the metric for.\n agent_property: The property existing on the agent to record, can be nested\n (e.g. ``Agent.property.sub_property``).\n train_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum').\n eval_reduce_action: The operation to perform on all the per-step recorded\n values at the end of the episode ('last', 'mean' or 'sum', 'none').\n description: Optional description string for use in data exploration tools.\n " def __init__(self, agent_ids: Iterable[str], agent_property: str, group_reduce_action: Literal[('min', 'max', 'mean', 'sum')]='mean', train_reduce_action: Literal[('last', 'mean', 'sum')]='mean', eval_reduce_action: Literal[('last', 'mean', 'sum', 'none')]='none', fsm_stages: Optional[Sequence[FSMStage]]=None, description: Optional[str]=None) -> None: if (group_reduce_action not in ['min', 'max', 'mean', 'sum']): raise ValueError("group_reduce_action field of SimpleMetric class must be one of: 'min', 'max', 'mean' or 'sum'.") self.agent_ids = agent_ids self.agent_property = agent_property self.group_reduce_action = group_reduce_action super().__init__(train_reduce_action, eval_reduce_action, fsm_stages, description) def extract(self, env: PhantomEnv) -> SimpleMetricValue: values = [_rgetattr(env.agents[agent_id], self.agent_property) for agent_id in self.agent_ids] if (self.group_reduce_action == 'min'): return np.min(values) if (self.group_reduce_action == 'max'): return np.max(values) if (self.group_reduce_action == 'mean'): return np.mean(values) if (self.group_reduce_action == 'sum'): return np.sum(values) raise ValueError
def _rgetattr(obj, attr, *args): def _getattr(obj, attr): return getattr(obj, attr, *args) return reduce(_getattr, ([obj] + attr.split('.')))
def logging_helper(env: PhantomEnv, metrics: Dict[(str, Metric)], metric_values: DefaultDict[(str, List[float])]) -> None: for (metric_id, metric) in metrics.items(): if ((not isinstance(env, FiniteStateMachineEnv)) or (metric.fsm_stages is None) or (env.current_stage in metric.fsm_stages)): value = metric.extract(env) else: value = not_recorded metric_values[metric_id].append(value)
class NetworkError(Exception): pass
class Network(): 'P2P messaging network.\n\n This class is responsible for monitoring connections and tracking\n state/flow between adjacent agents in a peer-to-peer network. The\n underlying representation is based on dictionaries via the NetworkX\n library.\n\n Arguments:\n agents: Optional list of agents to add to the network.\n resolver: Optional custom resolver to use, by default will use the BatchResolver\n with a `round_limit` of 2.\n connections: Optional initial list of connections to create in the network.\n ignore_connection_errors: If True will not raise errors if an attempt is made\n to send a message along an non-existant connection.\n enforce_msg_payload_checks: If True will ensure that accepted agent types given\n with the `@msg_payload` decorator are enforced.\n\n Attributes:\n agents: Mapping between IDs and the corresponding agents in the\n network.\n graph: Directed graph modelling the connections between agents.\n ' def __init__(self, agents: Optional[Iterable[Agent]]=None, resolver: Optional[Resolver]=None, connections: Optional[Iterable[Tuple[(AgentID, AgentID)]]]=None, ignore_connection_errors: bool=False, enforce_msg_payload_checks: bool=True) -> None: self.graph = nx.DiGraph() self.agents: Dict[(AgentID, Agent)] = {} self.resolver = (resolver or BatchResolver()) self.ignore_connection_errors = ignore_connection_errors self.enforce_msg_payload_checks = enforce_msg_payload_checks self._has_raised_msg_payload_deprecation_warning = False if (agents is not None): self.add_agents(agents) if (connections is not None): for connection in connections: self.add_connection(*connection) @property def agent_ids(self) -> KeysView[AgentID]: 'Iterator over the IDs of active agents in the network.' return self.agents.keys() def add_agent(self, agent: Agent) -> None: 'Add a new agent node to the network.\n\n Arguments:\n agent: The new agent instance type to be added.\n ' if (agent.id in self.agent_ids): raise ValueError(f"Agent with ID = '{agent.id}' already exists.") self.agents[agent.id] = agent self.graph.add_node(agent.id) def add_agents(self, agents: Iterable[Agent]) -> None: 'Add new agent nodes to the network.\n\n Arguments:\n agents: An iterable object over the agents to be added.\n ' for agent in agents: self.add_agent(agent) def add_connection(self, u: AgentID, v: AgentID) -> None: "Connect the agents with IDs :code:`u` and :code:`v`.\n\n Arguments:\n u: One agent's ID.\n v: The other agent's ID.\n " if (u not in self.agent_ids): raise ValueError(f"Agent with ID = '{u}' does not exist.") if (v not in self.agent_ids): raise ValueError(f"Agent with ID = '{v}' does not exist.") self.graph.add_edge(u, v) self.graph.add_edge(v, u) def add_connections_from(self, ebunch: Iterable[Tuple[(AgentID, AgentID)]]) -> None: 'Connect all agent ID pairs in :code:`ebunch`.\n\n Arguments:\n ebunch: Pairs of vertices to be connected.\n ' for (u, v) in ebunch: self.add_connection(u, v) def add_connections_between(self, us: Iterable[AgentID], vs: Iterable[AgentID]) -> None: 'Connect all agents in :code:`us` to all agents in :code:`vs`.\n\n Arguments:\n us: Collection of nodes.\n vs: Collection of nodes.\n ' self.add_connections_from(product(us, vs)) def add_connections_with_adjmat(self, agent_ids: Sequence[AgentID], adjacency_matrix: np.ndarray) -> None: 'Connect a subset of agents to one another via an adjacency matrix.\n\n Arguments:\n agent_ids: Sequence of agent IDs that correspond to each dimension of\n the adjacency matrix.\n adjacency_matrix: A square, symmetric, hollow matrix with entries\n in {0, 1}. A value of 1 indicates a connection between two\n agents.\n ' num_nodes = adjacency_matrix.shape[0] if (len(agent_ids) != num_nodes): raise ValueError("Number of agent IDs doesn't match adjacency matrix dimensions.") if (len(set(adjacency_matrix.shape)) != 1): raise ValueError('Adjacency matrix must be square.') if (not (adjacency_matrix.transpose() == adjacency_matrix).all()): raise ValueError('Adjacency matrix must be symmetric.') if (not (np.abs((adjacency_matrix.diagonal() - 0.0)) < 1e-05).all()): raise ValueError('Adjacency matrix must be hollow.') for (i, agent_id) in enumerate(agent_ids): self.add_connections_between([agent_id], [agent_ids[j] for j in range(num_nodes) if (adjacency_matrix[(i, j)] > 0)]) def reset(self) -> None: 'Reset the message queues along each edge.' self.resolver.reset() for agent in self.agents.values(): agent.reset() def subnet_for(self, agent_id: AgentID) -> 'Network': 'Returns a Sub Network associated with a given agent\n\n Arguments:\n agent_id: The ID of the focal agent\n ' network = Network.__new__(Network) network.graph = self.graph.subgraph(chain(iter((agent_id,)), self.graph.successors(agent_id), self.graph.predecessors(agent_id))) network.agents = {aid: self.agents[aid] for aid in network.graph.nodes} network.resolver = deepcopy(self.resolver) network.resolver.reset() return network def context_for(self, agent_id: AgentID, env_view: EnvView) -> Context: 'Returns the local context for agent :code:`agent_id`.\n\n Here we define a neighbourhood as being the first-order ego-graph with\n :code:`agent_id` set as the focal node.\n\n Arguments:\n agent_id: The ID of the focal agent.\n ' agent_views = {neighbour_id: self.agents[neighbour_id].view(agent_id) for neighbour_id in self.graph.neighbors(agent_id)} return Context(self.agents[agent_id], agent_views, env_view) def has_edge(self, sender_id: AgentID, receiver_id: AgentID) -> bool: 'Returns whether two agents are connected.\n\n Arguments:\n sender_id: The sender ID.\n receiver_id: The receiver ID.\n ' return ((sender_id, receiver_id) in self.graph.edges) def send(self, sender_id: AgentID, receiver_id: AgentID, payload: MsgPayload) -> None: 'Send message batches across the network.\n\n Arguments:\n sender_id: The sender ID.\n receiver_id: The receiver ID.\n payload: The contents of the message.\n ' if ((not self.ignore_connection_errors) and (not self.has_edge(sender_id, receiver_id))): raise NetworkError(f'No connection between {sender_id} and {receiver_id}.') if self.enforce_msg_payload_checks: self._enforce_payload_checks(sender_id, receiver_id, payload) self.resolver.push(Message(sender_id, receiver_id, payload)) def resolve(self, contexts: Mapping[(AgentID, Context)]) -> None: 'Resolve all messages in the network and clear volatile memory.\n\n Arguments:\n contexts: The current contexts for all agents for the current step.\n ' logger.log_start_resolving_msgs() self.resolver.resolve(self, contexts) self.resolver.reset() def get_agents_where(self, pred: Callable[([Agent], bool)]) -> Dict[(AgentID, Agent)]: 'Returns the set of agents in the network that satisfy a predicate.\n\n Arguments:\n pred: The filter predicate; should return :code:`True` iff the\n agent **should** be included in the set. This method is\n akin to the standard Python function :code:`filter`.\n ' return {agent_id: self.agents[agent_id] for agent_id in self.graph.nodes if pred(self.agents[agent_id])} def get_agents_with_type(self, agent_type: Type) -> Dict[(AgentID, Agent)]: 'Returns a collection of agents in the network with a given type.\n\n Arguments:\n agent_type: The class type of agents to include in the set.\n ' return self.get_agents_where((lambda a: isinstance(a, agent_type))) def get_agents_without_type(self, agent_type: Type) -> Dict[(AgentID, Agent)]: 'Returns a collection of agents in the network without a given type.\n\n Arguments:\n agent_type: The class type of agents you want to exclude.\n ' return self.get_agents_where((lambda a: (not isinstance(a, agent_type)))) def _enforce_payload_checks(self, sender_id, receiver_id, payload): 'Internal method.' if ((not hasattr(payload, '_sender_types')) or (not hasattr(payload, '_receiver_types'))): if isinstance(payload, MsgPayload): if (not self._has_raised_msg_payload_deprecation_warning): warnings.warn('MsgPayload type is deprecated. In future, use the @msg_payload decorator', DeprecationWarning) self._has_raised_msg_payload_deprecation_warning = True return raise NetworkError(f"Message payloads sent across the network must use the 'msg_payload' decorator (bad payload = '{payload}')") (sender, receiver) = (self.agents[sender_id], self.agents[receiver_id]) if ((payload._sender_types is not None) and (sender.__class__.__name__ not in payload._sender_types)): raise NetworkError(f"Message payload of type '{payload.__class__.__name__}' cannot be sent by agent with type '{sender.__class__.__name__:}' (expected one of {payload._sender_types})") if ((payload._receiver_types is not None) and (receiver.__class__.__name__ not in payload._receiver_types)): raise NetworkError(f"Message payload of type '{payload.__class__.__name__}' cannot be received by agent with type '{receiver.__class__.__name__:}' (expected one of {payload._receiver_types})") def __getitem__(self, agent_id: AgentID) -> Agent: return self.agents[agent_id] def __len__(self) -> int: return len(self.graph)
class StochasticNetwork(Network): 'Stochastic P2P messaging network.\n\n This class builds on the base Network class but adds the ability to resample the\n connectivity of all connections.\n\n Arguments:\n agents: Optional list of agents to add to the network.\n resolver: Optional custom resolver to use, by default will use the BatchResolver\n with a `round_limit` of 2.\n connections: Optional initial list of connections to create in the network.\n ignore_connection_errors: If True will not raise errors if an attempt is made\n to send a message along an non-existant connection.\n enforce_msg_payload_checks: If True will ensure that accepted agent types given\n with the `@msg_payload` decorator are enforced.\n\n Attributes:\n agents: Mapping between IDs and the corresponding agents in the\n network.\n graph: Directed graph modelling the connections between agents.\n ' def __init__(self, agents: Optional[Iterable[Agent]]=None, resolver: Optional[Resolver]=None, connections: Optional[Iterable[Tuple[(AgentID, AgentID)]]]=None, ignore_connection_errors: bool=False, enforce_msg_payload_checks: bool=True) -> None: super().__init__(agents, resolver, connections, ignore_connection_errors, enforce_msg_payload_checks) self._base_connections: List[Tuple[(AgentID, AgentID, float)]] = [] def add_connection(self, u: AgentID, v: AgentID, rate: float=1.0) -> None: "Connect the agents with IDs :code:`u` and :code:`v`.\n\n Arguments:\n u: One agent's ID.\n v: The other agent's ID.\n rate: The connectivity of this connection.\n " if (np.random.random() < rate): self.graph.add_edge(u, v) self.graph.add_edge(v, u) self._base_connections.append((u, v, rate)) def add_connections_from(self, ebunch: Iterable[Union[(Tuple[(AgentID, AgentID)], Tuple[(AgentID, AgentID, float)])]]) -> None: 'Connect all agent ID pairs in :code:`ebunch`.\n\n Arguments:\n ebunch: Pairs of vertices to be connected.\n ' for connection in ebunch: n = len(connection) if (n == 2): (u, v) = cast(Tuple[(AgentID, AgentID)], connection) self.add_connection(u, v) elif (n == 3): (u, v, r) = cast(Tuple[(AgentID, AgentID, float)], connection) self.add_connection(u, v, r) else: raise ValueError(f'Ill-formatted connection tuple {connection}.') def add_connections_between(self, us: Iterable[AgentID], vs: Iterable[AgentID], rate: float=1.0) -> None: 'Connect all agents in :code:`us` to all agents in :code:`vs`.\n\n Arguments:\n us: Collection of nodes.\n vs: Collection of nodes.\n rate: The connectivity given to all connections.\n ' for (u, v) in product(us, vs): self.add_connection(u, v, rate) def resample_connectivity(self) -> None: self.graph = nx.DiGraph() for agent in self.agents.values(): self.graph.add_node(agent.id) for (u, v, rate) in self._base_connections: if (np.random.random() < rate): self.graph.add_edge(u, v) self.graph.add_edge(v, u) def reset(self) -> None: self.resample_connectivity() Network.reset(self)
class Policy(ABC): '\n Base Policy class for defining custom policies.\n\n Arguments:\n observation_space: Observation space of the policy.\n action_space: Action space of the policy.\n ' def __init__(self, observation_space: gym.Space, action_space: gym.Space) -> None: self.observation_space = observation_space self.action_space = action_space @abstractmethod def compute_action(self, observation: Any) -> Any: '\n Arguments:\n observation: A single observation for the policy to act on.\n\n Returns:\n The action taken by the policy based on the given observation.\n ' raise NotImplementedError
class Resolver(ABC): "Network message resolver.\n\n This type is responsible for resolution processing. That is, the order in which\n (and any special logic therein) messages are handled in a Network.\n\n In many cases, this type can be arbitrary since the sequence doesn't matter (i.e.\n the problem is not path dependent). In other cases, however, this is not the case;\n e.g. processing incoming market orders in an LOB.\n\n Implementations of this class must provide implementations of the abstract methods\n below.\n\n Arguments:\n enable_tracking: If True, the resolver will save all messages in a time-ordered\n list that can be accessed with :attr:`tracked_messages`.\n " def __init__(self, enable_tracking: bool=False) -> None: self.enable_tracking = enable_tracking self._tracked_messages: List[Message] = [] def push(self, message: Message) -> None: 'Called by the Network to add messages to the resolver.' if self.enable_tracking: self._tracked_messages.append(message) logger.log_msg_send(message) self.handle_push(message) def clear_tracked_messages(self) -> None: 'Clears any stored messages.\n\n Useful for when incrementally processing/storing batches of tracked messages.\n ' self._tracked_messages.clear() @property def tracked_messages(self) -> List[Message]: '\n Returns all messages that have passed through the resolver if tracking is enabled.\n ' return self._tracked_messages @abstractmethod def handle_push(self, message: Message) -> None: '\n Called by the resolver to handle batches of messages. Any further created\n messages (e.g. responses from agents) must be handled by being passed to the\n `push` method (not `handle_push`).\n ' raise NotImplementedError @abstractmethod def resolve(self, network: 'Network', contexts: Mapping[(AgentID, Context)]) -> None: 'Process queues messages for a (sub) set of network contexts.\n\n Arguments:\n network: An instance of the Network class to resolve.\n contexts: The contexts for all agents for the current step.\n ' raise NotImplementedError @abstractmethod def reset(self) -> None: 'Resets the resolver and clears any potential message queues.\n\n Note:\n Does not clear any tracked messages.\n ' raise NotImplementedError
class BatchResolver(Resolver): '\n Resolver that handles messages in multiple discrete rounds. Each round, all agents\n have the opportunity to respond to previously received messages with new messages.\n These messages are held in a queue until all agents have been processed before being\n consumed in the next round. Messages for each agent are delivered in a batch,\n allowing the recipient agent to decide how to handle the messages within the batch.\n\n Arguments:\n enable_tracking: If True, the resolver will save all messages in a time-ordered\n list that can be accessed with :attr:`tracked_messages`.\n round_limit: The maximum number of rounds of messages to resolve. If the limit\n is reached an exception will be thrown. By default the resolver will keep\n resolving until no more messages are sent.\n shuffle_batches: If True, the order in which messages for a particular\n recipient are sent to the recipient will be randomised.\n ' def __init__(self, enable_tracking: bool=False, round_limit: Optional[int]=None, shuffle_batches: bool=False) -> None: super().__init__(enable_tracking) self.round_limit = round_limit self.shuffle_batches = shuffle_batches self.messages: DefaultDict[(AgentID, List[Message])] = defaultdict(list) def reset(self) -> None: self.messages.clear() def handle_push(self, message: Message) -> None: self.messages[message.receiver_id].append(message) def resolve(self, network: 'Network', contexts: Mapping[(AgentID, Context)]) -> None: iterator = (itertools.count() if (self.round_limit is None) else range(self.round_limit)) for i in iterator: if (len(self.messages) == 0): break logger.log_resolver_round(i, self.round_limit) processing_messages = self.messages self.messages = defaultdict(list) for (receiver_id, messages) in processing_messages.items(): if (receiver_id not in contexts): continue msgs = [m for m in messages if network.has_edge(m.sender_id, m.receiver_id)] if self.shuffle_batches: np.random.shuffle(msgs) ctx = contexts[receiver_id] responses = ctx.agent.handle_batch(ctx, msgs) if (responses is not None): for (sub_receiver_id, sub_payload) in responses: network.send(receiver_id, sub_receiver_id, sub_payload) if (len(self.messages) > 0): raise RuntimeError(f'{len(self.messages)} message(s) still in queue after BatchResolver round limit reached.')
class RewardFunction(ABC): '\n A trait for types that can compute rewards from a local context.\n\n Note: this trait only support scalar rewards for the time being.\n ' @abstractmethod def reward(self, ctx: Context) -> float: 'Compute the reward from context.\n\n Arguments:\n ctx: The local network context.\n ' raise NotImplementedError def reset(self): 'Resets the reward function.'
class Constant(RewardFunction): '\n A reward function that always returns a given constant.\n\n Attributes:\n value: The reward to be returned in any state.\n ' def __init__(self, value: float=0.0) -> None: self.value = value def reward(self, _: Context) -> float: return self.value
class StackelbergEnv(PhantomEnv): "\n An environment modelling a Stackelberg game/competition.\n\n Arguments:\n num_steps: The maximum number of steps the environment allows per episode.\n network: A Network class or derived class describing the connections between\n agents and agents in the environment.\n leader_agents: A list of Agent IDs to use as 'leaders'.\n follower_agents: A list of Agent IDs to use as 'followers'.\n env_supertype: Optional Supertype class instance for the environment. If this is\n set, it will be sampled from and the :attr:`env_type` property set on the\n class with every call to :meth:`reset()`.\n agent_supertypes: Optional mapping of agent IDs to Supertype class instances. If\n these are set, each supertype will be sampled from and the :attr:`type`\n property set on the related agent with every call to :meth:`reset()`.\n " def __init__(self, num_steps: int, network: Network, leader_agents: Sequence[AgentID], follower_agents: Sequence[AgentID], env_supertype: Optional[Supertype]=None, agent_supertypes: Optional[Mapping[(AgentID, Supertype)]]=None) -> None: super().__init__(num_steps, network, env_supertype, agent_supertypes) for aid in (leader_agents + follower_agents): assert (aid in network.agent_ids), f"Agent '{aid}' not in network" for aid in leader_agents: assert (aid not in follower_agents), f"Agent '{aid}' not in network" self.leader_agents = leader_agents self.follower_agents = follower_agents self._rewards: Dict[(AgentID, Optional[float])] = {} def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]: '\n Reset the environment and return initial observations from the leader agents.\n\n This method resets the step count and the :attr:`network`. This includes all the\n agents in the network.\n\n Args:\n seed: An optional seed to use for the new episode.\n options : Additional information to specify how the environment is reset.\n\n Returns:\n - A dictionary mapping Agent IDs to observations made by the respective\n agents. It is not required for all agents to make an initial observation.\n - A dictionary with auxillary information, equivalent to the info dictionary\n in `env.step()`.\n ' logger.log_reset() gym.Env.reset(self, seed=seed, options=options) self._current_step = 0 for sampler in self._samplers: sampler.sample() if (self.env_supertype is not None): self.env_type = self.env_supertype.sample() self.network.reset() self._terminations = set() self._truncations = set() self._rewards = {aid: None for aid in self.strategic_agent_ids} self._make_ctxs([aid for aid in self.leader_agents if (aid in self.strategic_agent_ids)]) obs = {ctx.agent.id: ctx.agent.encode_observation(ctx) for ctx in self._ctxs.values()} logger.log_observations(obs) return ({k: v for (k, v) in obs.items() if (v is not None)}, {}) def step(self, actions: Mapping[(AgentID, Any)]) -> PhantomEnv.Step: '\n Step the simulation forward one step given some set of agent actions.\n\n Arguments:\n actions: Actions output by the agent policies to be translated into\n messages and passed throughout the network.\n\n Returns:\n A :class:`PhantomEnv.Step` object containing observations, rewards,\n terminations, truncations and infos.\n ' self._current_step += 1 logger.log_step(self.current_step, self.num_steps) logger.log_actions(actions) logger.log_start_decoding_actions() self._make_ctxs(self.agent_ids) (acting_agents, next_acting_agents) = ((self.leader_agents, self.follower_agents) if ((self.current_step % 2) == 1) else (self.follower_agents, self.leader_agents)) self._handle_acting_agents(acting_agents, actions) self.resolve_network() observations: Dict[(AgentID, Any)] = {} rewards: Dict[(AgentID, float)] = {} terminations: Dict[(AgentID, bool)] = {} truncations: Dict[(AgentID, bool)] = {} infos: Dict[(AgentID, Dict[(str, Any)])] = {} for aid in self.strategic_agent_ids: if ((aid in self._terminations) or (aid in self._truncations)): continue ctx = self._ctxs[aid] if (aid in next_acting_agents): obs = ctx.agent.encode_observation(ctx) if (obs is not None): observations[aid] = obs infos[aid] = ctx.agent.collect_infos(ctx) if (aid in acting_agents): self._rewards[aid] = ctx.agent.compute_reward(ctx) terminations[aid] = ctx.agent.is_terminated(ctx) truncations[aid] = ctx.agent.is_truncated(ctx) if terminations[aid]: self._terminations.add(aid) if truncations[aid]: self._truncations.add(aid) logger.log_step_values(observations, rewards, terminations, truncations, infos) logger.log_metrics(self) terminations['__all__'] = self.is_terminated() truncations['__all__'] = self.is_truncated() if (terminations['__all__'] or truncations['__all__']): logger.log_episode_done() return self.Step(observations, self._rewards, terminations, truncations, infos) rewards = {aid: self._rewards[aid] for aid in observations if (self._rewards[aid] is not None)} return self.Step(observations, rewards, terminations, truncations, infos)
@dataclass class Supertype(ABC): def sample(self) -> 'Supertype': sampled_fields = {} for field_name in self.__dataclass_fields__: field = getattr(self, field_name) if isinstance(field, Sampler): if hasattr(self, '_managed'): sampled_fields[field_name] = field.value else: sampled_fields[field_name] = field.sample() else: sampled_fields[field_name] = field return self.__class__(**sampled_fields) def to_obs_space_compatible_type(self) -> Dict[(str, ObsSpaceCompatibleTypes)]: '\n Converts the parameters of the Supertype into a dict for use in observation\n spaces.\n ' return {name: _to_compatible_type(name, getattr(self, name)) for name in self.__dataclass_fields__} def to_obs_space(self, low=(- np.inf), high=np.inf) -> gym.Space: "\n Converts the parameters of the Supertype into a `gym.Space` representing\n the space.\n\n All elements of the space span the same range given by the `low` and `high`\n arguments.\n\n Arguments:\n low: Optional 'low' bound for the space (default is -∞)\n high: Optional 'high' bound for the space (default is ∞)\n " return gym.spaces.Dict({name: _to_obs_space(name, getattr(self, name), low, high) for name in self.__dataclass_fields__})
def _to_compatible_type(field: str, obj: Any) -> ObsSpaceCompatibleTypes: 'Internal function.' if isinstance(obj, dict): return {key: _to_compatible_type(key, value) for (key, value) in obj.items()} if isinstance(obj, (float, int)): return np.array([obj], dtype=np.float32) if isinstance(obj, list): return [_to_compatible_type(f'{field}[{i}]', value) for (i, value) in enumerate(obj)] if isinstance(obj, tuple): return tuple((_to_compatible_type(f'{field}[{i}]', value) for (i, value) in enumerate(obj))) if isinstance(obj, np.ndarray): return obj raise ValueError(f"Can't encode field '{field}' with type '{type(obj)}' into obs space compatible type")
def _to_obs_space(field: str, obj: Any, low: float, high: float) -> gym.Space: 'Internal function.' if isinstance(obj, dict): return gym.spaces.Dict({key: _to_obs_space(key, value, low, high) for (key, value) in obj.items()}) if isinstance(obj, float): return gym.spaces.Box(low, high, (1,), np.float32) if isinstance(obj, int): return gym.spaces.Box(low, high, (1,), np.float32) if isinstance(obj, (list, tuple)): return gym.spaces.Tuple([_to_obs_space(f'{field}[{i}]', value, low, high) for (i, value) in enumerate(obj)]) if isinstance(obj, np.ndarray): return gym.spaces.Box(low, high, obj.shape, np.float32) raise ValueError(f"Can't encode field '{field}' with type '{type(obj)}' into gym.Space")
class TelemetryLogger(): '\n This class is for logging episodes either to the terminal or to a JSON stream file.\n\n An instance of this class is automatically initialised when the Phantom library is\n imported. It should be configured by the user using the\n :meth:`configure_print_logging` and :meth:`configure_file_logging` methods. Both\n print and file logging are turned off by default.\n\n .. warning::\n This feature will not produce desired results when using any form of\n multiprocessing / multiple workers! This feature is intended for debugging and\n testing purposes when using manual episode invocation.\n\n .. note::\n Any custom derived environments that modify the :meth:`reset` and :meth:`step`\n methods should take care to call the required class methods to enable telemetry\n logging.\n ' def __init__(self) -> None: self._enable_print = False self._print_actions: Union[(bool, Sequence[AgentID])] = False self._print_observations: Union[(bool, Sequence[AgentID])] = False self._print_rewards: Union[(bool, Sequence[AgentID])] = False self._print_terminations: Union[(bool, Sequence[AgentID])] = False self._print_truncations: Union[(bool, Sequence[AgentID])] = False self._print_infos: Union[(bool, Sequence[AgentID])] = False self._print_messages: Union[(bool, Sequence[AgentID])] = False self._print_metrics: Optional[Mapping[(str, 'Metric')]] = None self._file_metrics: Optional[Mapping[(str, 'Metric')]] = None self._output_file: Optional[io.TextIOBase] = None self._human_readable: bool = False self._current_episode = None def configure_print_logging(self, enable: Union[(bool, None)]=None, print_actions: Union[(bool, Sequence[AgentID], None)]=None, print_observations: Union[(bool, Sequence[AgentID], None)]=None, print_rewards: Union[(bool, Sequence[AgentID], None)]=None, print_terminations: Union[(bool, Sequence[AgentID], None)]=None, print_truncations: Union[(bool, Sequence[AgentID], None)]=None, print_infos: Union[(bool, Sequence[AgentID], None)]=None, print_messages: Union[(bool, Sequence[AgentID], None)]=None, metrics: Optional[Mapping[(str, 'Metric')]]=None) -> None: 'Configures logging to the terminal/stdout.\n\n All options except :attr:`metrics` will log for:\n\n - All agents if True is given.\n - No agents if False is given.\n - A subset of agents if a list of :type:`AgentID`s is given.\n - The pre-existing choice if None is given.\n\n Arguments:\n enable: If False, nothing will be logged to the terminal.\n print_actions: Updates whether and what action data will be logged.\n print_observations: Updates whether and what observation data will be logged.\n print_rewards: Updates whether and what reward data will be logged.\n print_terminations: Updates whether and what termination data will be logged.\n print_truncations: Updates whether and what truncation data will be logged.\n print_infos: Updates whether and what info data will be logged.\n print_messages: Updates whether and what message data will be logged.\n metrics: Sets which metrics will be logged.\n ' if (enable is not None): self._enable_print = enable if (print_actions is not None): self._print_actions = print_actions if (print_observations is not None): self._print_observations = print_observations if (print_rewards is not None): self._print_rewards = print_rewards if (print_terminations is not None): self._print_terminations = print_terminations if (print_truncations is not None): self._print_truncations = print_truncations if (print_infos is not None): self._print_infos = print_infos if (print_messages is not None): self._print_messages = print_messages if (metrics is not None): self._print_metrics = metrics def configure_file_logging(self, file_path: Union[(str, Path, None)], append: bool=True, human_readable: Optional[bool]=None, metrics: Optional[Mapping[(str, 'Metric')]]=None) -> None: '\n Configures logging to the a file in the JSON stream format (each episode is a\n JSON object separated by a newline).\n\n Arguments:\n file_path: The path to the file to save telemetry to.\n append: If True will append to the file if it already exists, if False will\n overwrite any existing data.\n human_readable: If True will save the data in a human readable format.\n metrics: Sets which metrics will be logged.\n ' if (file_path is None): self._output_file = None else: self._output_file = open(file_path, ('a' if append else 'w')) if (human_readable is not None): self._human_readable = human_readable if (metrics is not None): self._file_metrics = metrics def log_reset(self) -> None: if (self._output_file is not None): self._current_episode = {'start': str(datetime.now()), 'steps': [{'messages': [], 'metrics': []}]} if self._enable_print: print(colored(('=' * 80), attrs=['dark'])) print(colored('ENV RESET', attrs=['bold'])) print(colored(('-' * 80), attrs=['dark'])) def log_step(self, current_step: int, num_steps: int) -> None: if (self._current_episode is not None): self._current_episode['steps'].append({'messages': []}) if self._enable_print: print(colored(('-' * 80), attrs=['dark'])) print(colored(f'STEP {current_step}/{num_steps}:', attrs=['bold'])) def log_start_decoding_actions(self) -> None: if self._enable_print: print((_t(1) + colored('DECODING ACTIONS:', color='cyan'))) def log_actions(self, actions: Mapping[(AgentID, Action)]) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['actions'] = actions if (self._enable_print and self._print_actions): print((_t(1) + colored('ACTIONS:', color='cyan'))) if (not isinstance(self._print_actions, bool)): actions = {a: act for (a, act) in actions.items() if (a in self._print_actions)} if (len(actions) > 0): for (agent, action) in actions.items(): print((_t(2) + f'{agent}: {_pretty_format_space(action)}')) else: print((_t(2) + 'None')) def log_observations(self, observations: Mapping[(AgentID, Observation)]) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['observations'] = observations if (self._enable_print and self._print_observations): print((_t(1) + colored('OBSERVATIONS:', color='cyan'))) if (not isinstance(self._print_observations, bool)): observations = {a: obs for (a, obs) in observations.items() if (a in self._print_observations)} if (len(observations) > 0): for (agent, observation) in observations.items(): print((_t(2) + f'{agent}: {_pretty_format_space(observation)}')) else: print((_t(2) + 'None')) def log_rewards(self, rewards: Mapping[(AgentID, float)]) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['rewards'] = rewards if (self._enable_print and self._print_rewards): print((_t(1) + colored('REWARDS:', color='cyan'))) if (not isinstance(self._print_rewards, bool)): rewards = {a: rew for (a, rew) in rewards.items() if (a in self._print_rewards)} if (len(rewards) > 0): for (agent, reward) in rewards.items(): print((_t(2) + f'{agent}: {reward:.2f}')) else: print((_t(2) + 'None')) def log_terminations(self, terminations: Mapping[(AgentID, bool)]) -> None: terminations = [a for (a, done) in terminations.items() if done] if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['terminations'] = terminations if (self._enable_print and self._print_terminations): print((_t(1) + colored('TERMINATIONS:', color='cyan'))) if (not isinstance(self._print_terminations, bool)): terminations = [a for a in terminations if (a in self._print_terminations)] if (len(terminations) > 0): print((_t(2) + ', '.join(terminations))) else: print((_t(2) + 'None')) def log_truncations(self, truncations: Mapping[(AgentID, bool)]) -> None: truncations = [a for (a, done) in truncations.items() if done] if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['truncations'] = truncations if (self._enable_print and self._print_truncations): print((_t(1) + colored('TRUNCATIONS:', color='cyan'))) if (not isinstance(self._print_truncations, bool)): truncations = [a for a in truncations if (a in self._print_truncations)] if (len(truncations) > 0): print((_t(2) + ', '.join(truncations))) else: print((_t(2) + 'None')) def log_infos(self, infos: Mapping[(AgentID, Any)]) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['infos'] = infos if (self._enable_print and self._print_infos): print((_t(1) + colored('INFOS:', color='cyan'))) if (not isinstance(self._print_infos, bool)): infos = {a: info for (a, info) in infos.items() if (a in self._print_infos)} infos = {a: info for (a, info) in infos.items() if ((info is not None) and (info != {}))} if (len(infos) > 0): for (agent, info) in infos.items(): print((_t(2) + f'{agent}: {info}')) else: print((_t(2) + 'None')) def log_step_values(self, observations: Mapping[(AgentID, Observation)], rewards: Mapping[(AgentID, float)], terminations: Mapping[(AgentID, bool)], truncations: Mapping[(AgentID, bool)], infos: Mapping[(AgentID, Any)]) -> None: self.log_observations(observations) self.log_rewards(rewards) self.log_terminations(terminations) self.log_truncations(truncations) self.log_infos(infos) def log_fsm_transition(self, current_stage: StageID, next_stage: StageID) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['fsm_current_stage'] = current_stage self._current_episode['steps'][(- 1)]['fsm_next_stage'] = next_stage if self._enable_print: print((_t(1) + colored(f'FSM TRANSITION: {current_stage} --> {next_stage}', 'magenta'))) def log_start_resolving_msgs(self) -> None: if self._enable_print: print((_t(1) + colored('RESOLVING MESSAGES:', color='cyan'))) def log_resolver_round(self, round: int, max: Optional[int]) -> None: if (self._enable_print and self._print_messages): print((_t(1) + colored(f"Batch Resolver round {(round + 1)}/{(max or 'Inf')}:", color='grey'))) def log_msg_send(self, message: Message) -> None: self._print_msg(message, 'SEND') def log_msg_recv(self, message: Message) -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['messages'].append(asdict(message)) self._print_msg(message, 'RECV') def log_metrics(self, env: 'PhantomEnv') -> None: if (self._current_episode is not None): self._current_episode['steps'][(- 1)]['metrics'] = {name: metric.extract(env) for (name, metric) in self._file_metrics.items()} if (self._enable_print and (self._print_metrics is not None)): print((_t(1) + colored('METRICS:', color='cyan'))) if (len(self._print_metrics) > 0): for (name, metric) in self._print_metrics.items(): print((_t(2) + f'{name: <30} : {metric.extract(env)}')) else: print((_t(2) + 'None')) def log_episode_done(self) -> None: self._write_episode_to_file() if self._enable_print: print((_t(1) + colored('EPISODE DONE', color='green', attrs=['bold']))) def _print_msg(self, message: Message, string: str) -> None: if self._should_print_msg(message): route_str = f'{message.sender_id: >10} --> {message.receiver_id: <10}' msg_name = f'({message.payload.__class__.__name__})' fields = ', '.join((f'{k}: {v}' for (k, v) in message.payload.__dict__.items())) print(((_t(2) + f'MSG {string}: {route_str} {msg_name: <20}') + colored(fields, attrs=['dark']))) def _write_episode_to_file(self) -> None: if ((self._output_file is not None) and (self._current_episode is not None)): json.dump(self._current_episode, self._output_file, indent=(2 if self._human_readable else None), cls=NumpyArrayEncoder) self._output_file.write('\n') self._output_file.flush() self._current_episode = None def _should_print_msg(self, message: Message) -> bool: return (self._enable_print and self._print_messages and (isinstance(self._print_messages, bool) or (message.sender_id in self._print_messages) or (message.receiver_id in self._print_messages)))
def _t(n: int) -> str: return ((' ' * n) * TAB_SIZE)
def _pretty_format_space(space) -> str: if isinstance(space, tuple): return (('(' + ', '.join((_pretty_format_space(x) for x in space))) + ')') if isinstance(space, dict): return (('{' + ', '.join((((k + ': ') + _pretty_format_space(v)) for (k, v) in space.items()))) + '}') if isinstance(space, np.ndarray): return (str(space[0]) if (space.shape == (1,)) else str(space)) if isinstance(space, (int, float, np.number, np.floating)): return str(space) raise NotImplementedError(type(space))
class NumpyArrayEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, np.ndarray): return o.tolist() return json.JSONEncoder.default(self, o)
@dataclass(frozen=True) class TrainingResults(): '\n Returned when :func:`Trainer.train` is run. By default, only contains all policy\n objects. Can be extended by :class:`Trainer` subclasses to return additional info.\n\n Attributes:\n policies: A mapping of policy IDs to policy objects for all policies, not just\n trained policies.\n ' policies: Dict[(PolicyID, Policy)]
class Trainer(ABC): '\n Base Trainer class providing interfaces and common functions for subclassed trainers.\n\n Some basic tensorboard logging via tensorboardX is included.\n\n Subclasses must set the :attr:`policy_class` class property and implement either the\n :meth:`train` or :meth:`training_step` methods.\n\n Arguments:\n tensorboard_log_dir: If provided, will save metrics to the given directory\n in a format that can be viewed with tensorboard.\n\n Note: These classes and interfaces are new in Phantom and are subject to change in\n the future.\n ' policy_class: Type[Policy] def __init__(self, tensorboard_log_dir: Optional[str]=None) -> None: if (self.policy_class is None): raise ValueError self.tensorboard_log_dir = tensorboard_log_dir self.metrics: Mapping[(str, Metric)] = {} self.logged_metrics: DefaultDict[(str, List[float])] = defaultdict(list) self.logged_rewards: DefaultDict[(AgentID, List[float])] = defaultdict(list) if (tensorboard_log_dir is not None): current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') tb_dir = Path(tensorboard_log_dir, current_time) self.tbx_writer = tbx.SummaryWriter(tb_dir) def train(self, env_class: Type[PhantomEnv], num_iterations: int, policies: PolicyMapping, policies_to_train: Sequence[PolicyID], env_config: Optional[Mapping[(str, Any)]]=None, metrics: Optional[Mapping[(str, Metric)]]=None) -> TrainingResults: '\n Entry point to training.\n\n For some algorithms this implementation is sufficient and only the\n :meth:`training_step` method needs to be implemented by the sub-class (for\n example, see the Q-Learning Trainer). For other algorithms it may be necessary\n to override this implementation (for example, see the PPO Trainer).\n\n Arguments:\n env_class: The environment class to train the policy/policies with.\n num_iterations: The number of units of training, defined by each algorithm,\n to perform.\n policies: A mapping of policy IDs to the agents to use them along with any\n configuration options.\n policies_to_train: A list of IDs of policies to train (must be of the Policy\n type related to the Trainer).\n env_config: Configuration parameters to pass to the environment init method.\n metrics: Optional set of metrics to record and log.\n\n Returns:\n A :class:`TrainingResults` object containing all policies (including those\n not trained with the Trainer).\n\n Policy Mapping Usage:\n .. code-block:: python\n\n policies = {\n # Type[Agent]\n # (all agents of this class will use the default policy of the trainer,\n # policy config options are handled by the trainer)\n "PolicyID1": SomeAgentClass,\n\n # List[AgentID]\n # (all agents with the given IDs will use the default policy of the trainer)\n "PolicyID2": ["Agent1", "Agent2"],\n\n # Tuple[Type[Policy], Type[Agent]]\n # (all agents of this class will use this custom policy class with no\n # provided config options)\n "PolicyID3": (CustomPolicyClass1, SomeAgentClass),\n\n # Tuple[Type[Policy], Type[Agent], Mapping[str, Any]]\n # (all agents of this class will use this custom policy class with the\n # provided config options)\n "PolicyID4": (CustomPolicyClass1, SomeAgentClass, {...}),\n\n # Tuple[Type[Policy], List[AgentID]]\n # (all agents with the given IDs will use this custom policy class with no\n # provided config options)\n "PolicyID5": (CustomPolicyClass1, ["Agent3", "Agent4"]),\n\n # Tuple[Type[Policy], List[AgentID], Mapping[str, Any]]\n # (all agents with the given IDs will use this custom policy class with the\n # provided config options)\n "PolicyID6": (CustomPolicyClass1, ["Agent5", "Agent6"], {...}),\n }\n ' env_config = (env_config or {}) self.metrics = (metrics or {}) self.logged_metrics = defaultdict(list) self.logged_rewards = defaultdict(list) if (len(policies_to_train) == 0): raise ValueError(f'Must provide at least one policy to train to {self.__class__.__name__}') check_env_config(env_config) env = env_class(**env_config) env.reset() (policy_mapping, policy_instances) = self.setup_policy_specs_and_mapping(env, policies) for policy_to_train in policies_to_train: if (not isinstance(policy_instances[policy_to_train], self.policy_class)): raise ValueError(f"Policy ID '{policy_to_train}' in 'policies_to_train' must be of trainer policy type '{self.policy_class.__name__}'") for i in rich.progress.track(range(num_iterations), description='Training...'): self.training_step(env, policy_mapping, policy_instances, policies_to_train) self.tbx_write_values(i) return TrainingResults(policy_instances) def training_step(self, env: PhantomEnv, policy_mapping: Mapping[(AgentID, PolicyID)], policies: Mapping[(PolicyID, Policy)], policies_to_train: Sequence[PolicyID]) -> None: '\n Performs one unit of policy training.\n\n Arguments:\n env: The environment instance to use.\n policy_mapping: A mapping of agent IDs to policy IDs.\n policies: A mapping of policy IDs to policy class instances.\n policies_to_train: A list of IDs of policies to train.\n ' raise NotImplementedError def log_metrics(self, env: PhantomEnv) -> None: "Logs the trainer's set metrics from a provided env." for (name, metric) in self.metrics.items(): self.logged_metrics[name].append(metric.extract(env)) def log_vec_metrics(self, envs: Sequence[PhantomEnv]) -> None: "Logs the trainer's set metrics from a provided list of envs." for (name, metric) in self.metrics.items(): self.logged_metrics[name].append(np.mean([metric.extract(env) for env in envs])) def log_rewards(self, rewards: Mapping[(AgentID, float)]) -> None: 'Logs the rewards from a provided env.' for (agent_id, reward) in rewards.items(): self.logged_rewards[agent_id].append(reward) def log_vec_rewards(self, rewards: Sequence[Mapping[(AgentID, float)]]) -> None: 'Logs the rewards from a provided list of envs.' for sub_rewards in rewards: for (agent_id, reward) in sub_rewards.items(): self.logged_rewards[agent_id].append(reward) def tbx_write_values(self, step: int) -> None: 'Writes logged metrics and rewards to tensorboardX and flushes the cache.' for (name, metric) in self.metrics.items(): self.tbx_write_scalar(name, metric.reduce(self.logged_metrics[name]), step) group_reward_count = [] for (agent_id, rewards) in self.logged_rewards.items(): self.tbx_write_scalar(f'rewards/{agent_id}', np.mean(rewards), step) group_reward_count += rewards self.tbx_write_scalar('rewards/group', np.mean(group_reward_count), step) self.logged_metrics = defaultdict(list) self.logged_rewards = defaultdict(list) def tbx_write_scalar(self, name: str, value: float, step: int) -> None: 'Writes a custom scalar value to tensorboard.' if (self.tensorboard_log_dir is not None): self.tbx_writer.add_scalar(name, value, global_step=step) def setup_policy_specs_and_mapping(self, env: PhantomEnv, policies: PolicyMapping) -> Tuple[(Dict[(AgentID, PolicyID)], Dict[(PolicyID, Policy)])]: '\n Parses a policy mapping object, validates it against an env instance and returns\n mappings of AgentID -> PolicyID and PolicyID -> Policy.\n\n Useful for when defining custom :meth:`train` methods.\n ' @dataclass(frozen=True) class PolicySpec(): 'Defines a policy that has been reduced to a single obs and action space.' observation_space: gym.Space action_space: gym.Space policy_class: Optional[Type[Policy]] = None config: Mapping[(str, Any)] = field(default_factory=dict) policy_specs: Dict[(PolicyID, PolicySpec)] = {} policy_mapping: Dict[(AgentID, PolicyID)] = {} agents_with_policies: List[AgentID] = [] for (policy_name, policy_config) in policies.items(): if (isclass(policy_config) and issubclass(policy_config, Agent)): agent_class = policy_config agent_ids = list(env.network.get_agents_with_type(agent_class).keys()) policy_specs[policy_name] = PolicySpec(action_space=env.agents[agent_ids[0]].action_space, observation_space=env.agents[agent_ids[0]].observation_space) for agent_id in agent_ids: policy_mapping[agent_id] = policy_name elif isinstance(policy_config, list): agent_ids = policy_config policy_specs[policy_name] = PolicySpec(action_space=env.agents[agent_ids[0]].action_space, observation_space=env.agents[agent_ids[0]].observation_space) for agent_id in agent_ids: policy_mapping[agent_id] = policy_name elif isinstance(policy_config, tuple): if (len(policy_config) == 2): (policy_class, agents_param) = policy_config config: Mapping[(str, Any)] = {} else: (policy_class, agents_param, config) = policy_config if (isclass(agents_param) and issubclass(agents_param, Agent)): agent_ids = list(env.network.get_agents_with_type(agents_param).keys()) elif isinstance(agents_param, list): agent_ids = agents_param else: raise ValueError policy_specs[policy_name] = PolicySpec(policy_class=policy_class, action_space=env.agents[agent_ids[0]].action_space, observation_space=env.agents[agent_ids[0]].observation_space, config=config) for agent_id in agent_ids: policy_mapping[agent_id] = policy_name else: raise TypeError(type(policy_config)) agents_with_policies += agent_ids for agent in env.agents.values(): if ((agent.action_space is not None) and (agent.id not in agents_with_policies)): raise ValueError(f"Agent '{agent.id}' takes actions but is not assigned a policy.") policy_instances = {name: (self.policy_class if (spec.policy_class is None) else spec.policy_class)(spec.observation_space, spec.action_space, **spec.config) for (name, spec) in policy_specs.items()} return (policy_mapping, policy_instances)
def flatten(xs: Iterable[Any]) -> List[Any]: 'Recursively flatten an iterable object into a list.\n\n Arguments:\n xs: The iterable object.\n ' return sum((([x] if (not isinstance(x, Iterable)) else flatten(x)) for x in xs), [])
def contains_type(value: Any, type_: Type) -> bool: if isinstance(value, type_): return True if isinstance(value, str): return False if hasattr(value, '__dataclass_fields__'): for field in value.__dataclass_fields__: if contains_type(getattr(value, field), type_): return True elif isinstance(value, collections.abc.Mapping): for v in value.values(): if contains_type(v, type_): return True elif isinstance(value, collections.abc.Iterable): for v in value: if contains_type(v, type_): return True return False
def collect_instances_of_type(type_: Type[CollectedType], obj: Any, collection: Optional[List[CollectedType]]=None) -> List[CollectedType]: collection = (collection or []) if (isinstance(obj, type_) and (obj not in collection)): collection.append(obj) elif isinstance(obj, str): pass elif isinstance(obj, collections.abc.Mapping): for val in obj.values(): collection = collect_instances_of_type(type_, val, collection) elif isinstance(obj, collections.abc.Iterable): for val in obj: collection = collect_instances_of_type(type_, val, collection) elif hasattr(obj, '__dataclass_fields__'): for field in obj.__dataclass_fields__: collection = collect_instances_of_type(type_, getattr(obj, field), collection) return collection
def collect_instances_of_type_with_paths(type_: Type[CollectedType], obj: Any, collection: Optional[List[Tuple[(CollectedType, List[ObjPath])]]]=None, current_path: Optional[ObjPath]=None) -> List[Tuple[(CollectedType, List[ObjPath])]]: collection = (collection or []) current_path = (current_path or []) if isinstance(obj, type_): added = False for (obj2, paths) in collection: if (obj == obj2): paths.append(current_path) added = True break if (not added): collection.append((obj, [current_path])) elif isinstance(obj, str): pass elif isinstance(obj, collections.abc.Mapping): for (key, val) in obj.items(): collection = collect_instances_of_type_with_paths(type_, val, collection, (current_path + [(True, key)])) elif isinstance(obj, collections.abc.Iterable): for (i, val) in enumerate(obj): collection = collect_instances_of_type_with_paths(type_, val, collection, (current_path + [i])) elif hasattr(obj, '__dataclass_fields__'): for field in obj.__dataclass_fields__: collection = collect_instances_of_type_with_paths(type_, getattr(obj, field), collection, (current_path + [(False, field)])) elif inspect.isclass(obj): for field in obj.__dict__: collection = collect_instances_of_type(type_, getattr(obj, field), collection) return collection
def update_val(obj: Any, path: ObjPath, new_val: Any) -> None: for item in path[:(- 1)]: if isinstance(item, int): obj = obj[item] elif (item[0] is True): obj = obj[item[1]] else: obj = getattr(obj, item[1]) if isinstance(path[(- 1)], int): obj[path[(- 1)]] = new_val elif (path[(- 1)][0] is True): obj[path[(- 1)][1]] = new_val else: setattr(obj, path[(- 1)][1], new_val)
def check_env_config(env_config: Mapping[(str, Any)]) -> None: if contains_type(env_config, Range): raise Exception for (name, value) in env_config.items(): if (name not in ['env_supertype', 'agent_supertypes']): if contains_type(value, Sampler): raise Exception
def show_pythonhashseed_warning() -> None: string = '================================================================\n' string += 'WARNING: The $PYTHONHASHSEED environment variable is not set!\n' string += 'Please set this before using Phantom to improve reproducibility.\n' string += '================================================================' if ('PYTHONHASHSEED' not in os.environ): print(colored(string, 'yellow'))
def rich_progress(text: str) -> rich.progress.Progress: class RateColumn(rich.progress.ProgressColumn): 'Renders human readable processing rate.' def render(self, task): 'Render the speed in iterations per second.' speed = (task.finished_speed or task.speed) if (speed is None): return rich.text.Text('', style='progress.percentage') (unit, suffix) = rich.progress.filesize.pick_unit_and_suffix(int(speed), ['', '×10³', '×10⁶', '×10⁹', '×10¹²'], 1000) data_speed = (speed / unit) return rich.text.Text(f'{data_speed:.1f}{suffix} it/s', style='progress.percentage') return rich.progress.Progress(rich.progress.TextColumn(text), rich.progress.BarColumn(), RateColumn(), rich.progress.MofNCompleteColumn(), rich.progress.TimeElapsedColumn(), rich.progress.TimeRemainingColumn())
class Range(ABC, Generic[T]): 'Ranges are used in Agent/Environment Supertypes to define how they are sampled.\n\n Ranges are designed to be used when generating rollouts post-training and a\n non-stochastic distribution of values is required for the Supertype sampling.\n\n Ranges return a fixed number of total values and as such all values must be returned\n in one go with the :meth:`values` method.\n ' def __init__(self, name: Optional[str]=None) -> None: self.name = name @abstractmethod def values(self) -> Sequence[T]: '\n Returns the complete set of values defined by the Range.\n ' raise NotImplementedError def __repr__(self) -> str: if (self.name is not None): return f"<{self.__class__.__name__} name='{self.name}'>" return f'<{self.__class__.__name__}>'
class UniformRange(Range[float]): 'Returns an array of values spaced by a step between a start and end value.\n\n Uses :func:`np.arange` internally.\n ' def __init__(self, start: float, end: float, step: float=1.0, name: Optional[str]=None, dtype=None) -> None: self.start = start self.end = end self.step = step self.dtype = dtype super().__init__(name) def values(self) -> np.ndarray: return np.arange(self.start, self.end, self.step, dtype=self.dtype)
class LinspaceRange(Range[float]): 'Returns an array of n values evenly distributed between a start and end value.\n\n Uses :func:`np.linspace` internally.\n ' def __init__(self, start: float, end: float, n: int, name: Optional[str]=None, dtype=None) -> None: self.n = n self.start = start self.end = end self.dtype = dtype super().__init__(name) def values(self) -> np.ndarray: return np.linspace(self.start, self.end, self.n, dtype=self.dtype)
class UnitArrayUniformRange(UniformRange, Range[np.ndarray]): '\n Returns a list of n shape (1,) numpy arrays with values spaced by a step between a\n start and end value. Useful for encoding observation spaces with single element\n boxes.\n\n Uses :func:`np.arange` internally.\n ' def values(self) -> List[np.ndarray]: return [np.array([x]) for x in np.arange(self.start, self.end, self.step, dtype=self.dtype)]
class UnitArrayLinspaceRange(LinspaceRange, Range[np.ndarray]): '\n Returns a list of n shape (1,) numpy arrays with values evenly distributed between a\n start and end value. Useful for encoding observation spaces with single element\n boxes.\n\n Uses :func:`np.linspace` internally.\n ' def values(self) -> List[np.ndarray]: return [np.array([x]) for x in np.linspace(self.start, self.end, self.n, dtype=self.dtype)]
def find_most_recent_results_dir(base_path: Union[(Path, str)]) -> Path: '\n Scans a directory containing ray experiment results and returns the path of\n the most recent experiment.\n\n Arguments:\n base_path: The directory to search in.\n ' base_path = Path(os.path.expanduser(base_path)) directories = [d for d in base_path.iterdir() if d.is_dir()] experiment_directories = [] for directory in directories: try: datetime.strptime(str(directory)[(- 27):(- 8)], '%Y-%m-%d_%H-%M-%S') experiment_directories.append(directory) except ValueError: pass if (len(experiment_directories) == 0): raise ValueError(f"No experiment directories found in '{base_path}'") experiment_directories.sort(key=(lambda d: datetime.strptime(str(d)[(- 27):(- 8)], '%Y-%m-%d_%H-%M-%S'))) return experiment_directories[(- 1)]
def get_checkpoints(results_dir: Union[(Path, str)]) -> List[int]: "\n Scans a directory containing an experiment's results and returns a list of all the\n checkpoints in that directory.\n\n Arguments:\n results_dir: The directory to search in.\n " checkpoint_dirs = list(Path(results_dir).glob('checkpoint_*')) if (len(checkpoint_dirs) == 0): raise FileNotFoundError(f"No checkpoints found in directory '{results_dir}'") return list(sorted((int(str(checkpoint_dir).rsplit('_', maxsplit=1)[(- 1)]) for checkpoint_dir in checkpoint_dirs)))
def construct_results_paths(directory: Union[(str, Path)], checkpoint: Optional[int]=None) -> Tuple[(Path, Path)]: if (checkpoint is not None): assert isinstance(checkpoint, int) ray_dir = os.path.expanduser('~/ray_results') directory = Path(directory) if (directory.stem == 'LATEST'): parent_dir = Path(os.path.expanduser(directory.parent)) if (not parent_dir.exists()): parent_dir = Path(ray_dir, parent_dir) if (not parent_dir.exists()): raise FileNotFoundError(f"Base results directory '{parent_dir}' does not exist") directory = find_most_recent_results_dir(parent_dir) else: directory = Path(os.path.expanduser(directory)) if (not directory.exists()): directory = Path(ray_dir, directory) if (not directory.exists()): raise FileNotFoundError(f"Results directory '{directory}' does not exist") if (checkpoint is None): checkpoint = get_checkpoints(directory)[(- 1)] checkpoint_path = Path(directory, f'checkpoint_{str(checkpoint).zfill(6)}') return (directory, checkpoint_path)
def evaluate_policy(directory: Union[(str, Path)], policy_id: str, obs: Any, explore: bool, batch_size: int=100, checkpoint: Optional[int]=None, show_progress_bar: bool=True) -> Generator[(Tuple[(Dict[(str, Any)], Any, Any)], None, None)]: '\n Evaluates a given pre-trained RLlib policy over a one of more dimensional\n observation space.\n\n Arguments:\n directory: Results directory containing trained policies. By default, this is\n located within `~/ray_results/`. If LATEST is given as the last element of\n the path, the parent directory will be scanned for the most recent run and\n this will be used.\n policy_id: The ID of the trained policy to evaluate.\n obs: The observation space to evaluate the policy with, of which can include\n :class:`Range` class instances to evaluate the policy over multiple\n dimensions in a similar fashion to the :func:`ph.utils.rllib.rollout`\n function.\n explore: Parameter passed to the policy.\n batch_size: Number of observations to evaluate at a time.\n checkpoint: Checkpoint to use (defaults to most recent).\n show_progress_bar: If True shows a progress bar in the terminal output.\n\n Returns:\n A generator of tuples of the form (params, obs, action).\n ' (directory, checkpoint_path) = construct_results_paths(directory, checkpoint) policy = Policy.from_checkpoint(((checkpoint_path / 'policies') / policy_id)) with open(Path(directory, 'phantom-training-params.pkl'), 'rb') as params_file: ph_config = cloudpickle.load(params_file) policy_specs = ph_config['policy_specs'] obs_s = policy_specs[policy_id].observation_space preprocessor = get_preprocessor(obs_s)(obs_s) ranges = collect_instances_of_type_with_paths(Range, ({}, obs)) variations: List[List[Dict[(str, Any)]]] = [[{}, deepcopy(obs)]] unamed_range_count = 0 for (range_obj, paths) in reversed(ranges): values = range_obj.values() name = range_obj.name if (name is None): name = f'range-{unamed_range_count}' unamed_range_count += 1 variations2 = [] for value in values: for variation in variations: variation = deepcopy(variation) variation[0][name] = value for path in paths: update_val(variation, path, value) variations2.append(variation) variations = variations2 def chunker(seq, size): return (seq[pos:(pos + size)] for pos in range(0, len(seq), size)) batched_variations = chunker(variations, batch_size) if show_progress_bar: batched_variations = rich.progress.track(batched_variations) for variation_batch in batched_variations: (params, obs) = zip(*variation_batch) processed_obs = [preprocessor.transform(ob) for ob in obs] squashed_actions = policy.compute_actions(processed_obs, explore=explore)[0] actions = [unsquash_action(action, policy.action_space_struct) for action in squashed_actions] for (p, o, a) in zip(params, obs, actions): (yield (p, o, a))
def rollout(directory: Union[(str, Path)], env_class: Optional[Type[PhantomEnv]]=None, env_config: Optional[Dict[(str, Any)]]=None, custom_policy_mapping: Optional[CustomPolicyMapping]=None, num_repeats: int=1, num_workers: Optional[int]=None, checkpoint: Optional[int]=None, metrics: Optional[Mapping[(str, Metric)]]=None, record_messages: bool=False, show_progress_bar: bool=True, policy_inference_batch_size: int=1, explore: bool=False) -> Generator[(Rollout, None, None)]: "Performs rollouts for a previously trained Phantom experiment.\n\n Any objects that inherit from the Range class in the env_config parameter will be\n expanded out into a multidimensional space of rollouts.\n\n For example, if two distinct UniformRanges are used, one with a length of 10 and one\n with a length of 5, 10 * 5 = 50 rollouts will be performed.\n\n If num_repeats is also given, say with a value of 2, then each of the 50 rollouts\n will be repeated twice, each time with a different random seed.\n\n Arguments:\n directory: Results directory containing trained policies. By default, this is\n located within `~/ray_results/`. If LATEST is given as the last element of\n the path, the parent directory will be scanned for the most recent run and\n this will be used.\n env_class: Optionally pass the Environment class to use. If not give will\n fallback to the copy of the environment class saved during training.\n env_config: Configuration parameters to pass to the environment init method.\n custom_policy_mapping: Optionally replace agent policies with custom fixed\n policies.\n num_workers: Number of rollout worker processes to initialise\n (defaults to 'NUM CPU - 1').\n num_repeats: Number of rollout repeats to perform, distributed over all workers.\n checkpoint: Checkpoint to use (defaults to most recent).\n metrics: Optional set of metrics to record and log.\n record_messages: If True the full list of episode messages for each of the\n rollouts will be recorded. Only applies if `save_trajectories` is also True.\n show_progress_bar: If True shows a progress bar in the terminal output.\n policy_inference_batch_size: Number of policy inferences to perform in one go.\n explore: If True, trained policies will be sampled from in exploration mode.\n\n Returns:\n A Generator of Rollouts.\n\n .. note::\n It is the users responsibility to invoke rollouts via the provided ``phantom``\n command or ensure the ``PYTHONHASHSEED`` environment variable is set before\n starting the Python interpreter to run this code. Not setting this may lead to\n reproducibility issues.\n " assert (num_repeats > 0), 'num_repeats must be at least 1' assert (policy_inference_batch_size > 0), 'policy_inference_batch_size must be at least 1' if (num_workers is not None): assert (num_workers >= 0), 'num_workers must be at least 0' show_pythonhashseed_warning() metrics = (metrics or {}) env_config = (env_config or {}) custom_policy_mapping = (custom_policy_mapping or {}) if contains_type(env_config, Sampler): raise TypeError('env_config should not contain instances of classes inheriting from BaseSampler') (directory, checkpoint_path) = construct_results_paths(directory, checkpoint) ranges = collect_instances_of_type_with_paths(Range, ({}, env_config)) variations: List[List[Dict[(str, Any)]]] = [deepcopy([{}, env_config])] unamed_range_count = 0 for (range_obj, paths) in reversed(ranges): values = range_obj.values() name = range_obj.name if (name is None): name = f'range-{unamed_range_count}' unamed_range_count += 1 variations2 = [] for value in values: for variation in variations: variation = deepcopy(variation) variation[0][name] = value for path in paths: update_val(variation, path, value) variations2.append(variation) variations = variations2 rollout_configs = [_RolloutConfig(((i * num_repeats) + j), j, env_config, rollout_params) for (i, (rollout_params, env_config)) in enumerate(variations) for j in range(num_repeats)] with open(Path(directory, 'params.pkl'), 'rb') as params_file: config = cloudpickle.load(params_file) with open(Path(directory, 'phantom-training-params.pkl'), 'rb') as params_file: ph_config = cloudpickle.load(params_file) if (env_class is None): env_class = ph_config['env_class'] env = env_class(**rollout_configs[0].env_config) if ((policy_inference_batch_size > 1) and issubclass(env_class, FiniteStateMachineEnv) and (not env.is_fsm_deterministic())): raise ValueError('Cannot use non-determinisic FSM when policy_inference_batch_size > 1') num_workers_ = ((os.cpu_count() - 1) if (num_workers is None) else num_workers) print(f'Starting {len(rollout_configs):,} rollout(s) using {num_workers_} worker process(es)') if (num_workers_ == 0): rollouts = _rollout_task_fn(config, checkpoint_path, rollout_configs, env_class, ph_config['policy_specs'], custom_policy_mapping, policy_inference_batch_size, explore, metrics, record_messages) if show_progress_bar: with rich_progress('Rollouts...') as progress: (yield from progress.track(rollouts, total=len(rollout_configs))) else: (yield from rollouts) else: q = Queue() rollouts_per_worker = int(math.ceil((len(rollout_configs) / max(num_workers_, 1)))) @ray.remote def remote_rollout_task_fn(*args): for x in _rollout_task_fn(*args): q.put(x) worker_payloads = [(config, checkpoint_path, rollout_configs[i:(i + rollouts_per_worker)], env_class, ph_config['policy_specs'], custom_policy_mapping, policy_inference_batch_size, explore, metrics, record_messages) for i in range(0, len(rollout_configs), rollouts_per_worker)] for payload in worker_payloads: remote_rollout_task_fn.remote(*payload) if show_progress_bar: with rich_progress('Rollouts...') as progress: for _ in progress.track(range(len(rollout_configs))): (yield q.get()) else: for _ in range(len(rollout_configs)): (yield q.get())
def _rollout_task_fn(config, checkpoint_path: Path, all_configs: List['_RolloutConfig'], env_class: Type[PhantomEnv], policy_specs, custom_policy_mapping: CustomPolicyMapping, policy_inference_batch_size: int, explore: bool, metric_objects: Optional[Mapping[(str, Metric)]]=None, record_messages: bool=False) -> Generator[(Rollout, None, None)]: 'Internal function' def chunker(seq, size): return (seq[pos:(pos + size)] for pos in range(0, len(seq), size)) saved_policies: Dict[(str, Tuple[(RLlibPolicy, Preprocessor)])] = {} ray.rllib.utils.debug.update_global_seed_if_necessary(config.framework_str, all_configs[0].rollout_id) for configs in chunker(all_configs, policy_inference_batch_size): batch_size = len(configs) vec_envs = [env_class(**rollout_config.env_config) for rollout_config in configs] if record_messages: for env in vec_envs: env.network.resolver.enable_tracking = True vec_metrics = [defaultdict(list) for _ in range(batch_size)] vec_all_steps = [[] for _ in range(batch_size)] vec_observations = [env.reset(seed=config.rollout_id)[0] for (env, config) in zip(vec_envs, configs)] initted_policy_mapping = {agent_id: policy(vec_envs[0][agent_id].observation_space, vec_envs[0][agent_id].action_space) for (agent_id, policy) in custom_policy_mapping.items()} for i in range(vec_envs[0].num_steps): actions = {} dict_observations = {k: [dic[k] for dic in vec_observations] for k in vec_observations[0]} for (agent_id, vec_agent_obs) in dict_observations.items(): if (agent_id in initted_policy_mapping): actions[agent_id] = [initted_policy_mapping[agent_id].compute_action(agent_obs) for agent_obs in vec_agent_obs] else: policy_id = config.policy_mapping_fn(agent_id, 0, 0) if (policy_id not in saved_policies): policy = RLlibPolicy.from_checkpoint(((checkpoint_path / 'policies') / policy_id)) obs_s = policy_specs[policy_id].observation_space preprocessor = get_preprocessor(obs_s)(obs_s) saved_policies[policy_id] = (policy, preprocessor) else: (policy, preprocessor) = saved_policies[policy_id] processed_obs = [preprocessor.transform(ob) for ob in vec_agent_obs] squashed_actions = policy.compute_actions(processed_obs, explore=explore)[0] actions[agent_id] = [unsquash_action(action, policy.action_space_struct) for action in squashed_actions] if (len(dict_observations) == 0): vec_actions = ([{}] * batch_size) else: vec_actions = [dict(zip(actions, t)) for t in zip(*actions.values())] vec_steps = [env.step(actions) for (env, actions) in zip(vec_envs, vec_actions)] for j in range(batch_size): if (metric_objects is not None): logging_helper(vec_envs[j], metric_objects, vec_metrics[j]) if record_messages: messages = deepcopy(vec_envs[j].network.resolver.tracked_messages) vec_envs[j].network.resolver.clear_tracked_messages() else: messages = None vec_all_steps[j].append(Step(i, vec_observations[j], vec_steps[j].rewards, vec_steps[j].terminations, vec_steps[j].truncations, vec_steps[j].infos, vec_actions[j], messages, (vec_envs[j].previous_stage if isinstance(vec_envs[j], FiniteStateMachineEnv) else None))) vec_observations = [step.observations for step in vec_steps] for j in range(batch_size): reduced_metrics = {metric_id: metric_objects[metric_id].reduce(vec_metrics[j][metric_id], 'evaluate') for metric_id in metric_objects} (yield Rollout(configs[j].rollout_id, configs[j].repeat_id, configs[j].env_config, configs[j].rollout_params, vec_all_steps[j], reduced_metrics))
@dataclass(frozen=True) class _RolloutConfig(): 'Internal class' rollout_id: int repeat_id: int env_config: Mapping[(str, Any)] rollout_params: Mapping[(str, Any)]
def train(algorithm: str, env_class: Type[PhantomEnv], policies: PolicyMapping, iterations: int, checkpoint_freq: Optional[int]=None, num_workers: Optional[int]=None, env_config: Optional[Mapping[(str, Any)]]=None, rllib_config: Optional[Mapping[(str, Any)]]=None, ray_config: Optional[Mapping[(str, Any)]]=None, metrics: Optional[Mapping[(str, Metric)]]=None, results_dir: str=ray.tune.result.DEFAULT_RESULTS_DIR, show_training_metrics: bool=False) -> Algorithm: "Performs training of a Phantom experiment using the RLlib library.\n\n Any objects that inherit from BaseSampler in the env_supertype or agent_supertypes\n parameters will be automatically sampled from and fed back into the environment at\n the start of each episode.\n\n Arguments:\n algorithm: RL algorithm to use (optional - one of 'algorithm' or 'trainer' must\n be provided).\n env_class: A PhantomEnv subclass.\n policies: A mapping of policy IDs to policy configurations.\n iterations: Number of training iterations to perform.\n checkpoint_freq: The iteration frequency to save policy checkpoints at (defaults\n to taking one checkpoint at the end of training).\n num_workers: Number of Ray rollout workers to use (defaults to 'NUM CPU - 1').\n env_config: Configuration parameters to pass to the environment init method.\n rllib_config: Optional algorithm parameters dictionary to pass to RLlib.\n ray_config: Optional algorithm parameters dictionary to pass to ``ray.init()``.\n metrics: Optional set of metrics to record and log.\n results_dir: A custom results directory, default is ~/ray_results/\n show_training_metrics: Set to True to print training metrics every iteration.\n\n The ``policies`` parameter defines which agents will use which policy. This is key\n to performing shared policy learning. The function expects a mapping of\n ``{<policy_id> : <policy_setup>}``. The policy setup values can take one of the\n following forms:\n\n * ``Type[Agent]``: All agents that are an instance of this class will learn the\n same RLlib policy.\n * ``List[AgentID]``: All agents that have IDs in this list will learn the same\n RLlib policy.\n * ``Tuple[PolicyClass, Type[Agent]]``: All agents that are an instance of this\n class will use the same fixed/learnt policy.\n * ``Tuple[PolicyClass, Type[Agent], Mapping[str, Any]]``: All agents that are an\n instance of this class will use the same fixed/learnt policy configured with the\n given options.\n * ``Tuple[PolicyClass, List[AgentID]]``: All agents that have IDs in this list use\n the same fixed/learnt policy.\n * ``Tuple[PolicyClass, List[AgentID], Mapping[str, Any]]``: All agents that have\n IDs in this list use the same fixed/learnt policy configured with the given\n options.\n\n Returns:\n The Ray Tune experiment results object.\n\n .. note::\n It is the users responsibility to invoke training via the provided ``phantom``\n command or ensure the ``PYTHONHASHSEED`` environment variable is set before\n starting the Python interpreter to run this code. Not setting this may lead to\n reproducibility issues.\n " show_pythonhashseed_warning() iterations = int(iterations) assert (iterations > 0), "'iterations' parameter must be > 0" if (num_workers is not None): assert (num_workers >= 0), "'num_workers' parameter must be >= 0" if (checkpoint_freq is None): checkpoint_freq = iterations env_config = (env_config or {}) rllib_config = (rllib_config or {}) metrics = (metrics or {}) check_env_config(env_config) ray.init(ignore_reinit_error=True, **(ray_config or {})) env = env_class(**env_config) env.reset() policy_specs: Dict[(str, rllib.policy.policy.PolicySpec)] = {} policy_mapping: Dict[(AgentID, str)] = {} policies_to_train: List[str] = [] for (policy_name, params) in policies.items(): policy_class = None config = None if isinstance(params, list): agent_ids = params policies_to_train.append(policy_name) elif (isclass(params) and issubclass(params, Agent)): agent_ids = list(env.network.get_agents_with_type(params).keys()) policies_to_train.append(policy_name) elif isinstance(params, tuple): if (len(params) == 2): (policy_class, agent_ids) = params else: (policy_class, agent_ids, config) = params if issubclass(policy_class, Policy): policy_class = make_rllib_wrapped_policy_class(policy_class) if (isclass(agent_ids) and issubclass(agent_ids, Agent)): agent_ids = list(env.network.get_agents_with_type(agent_ids).keys()) else: raise TypeError(type(params)) policy_specs[policy_name] = rllib.policy.policy.PolicySpec(policy_class=policy_class, action_space=env.agents[agent_ids[0]].action_space, observation_space=env.agents[agent_ids[0]].observation_space, config=config) for agent_id in agent_ids: policy_mapping[agent_id] = policy_name def policy_mapping_fn(agent_id, *args, **kwargs): return policy_mapping[agent_id] ray.tune.registry.register_env(env_class.__name__, (lambda config: RLlibEnvWrapper(env_class(**config)))) num_workers_ = ((os.cpu_count() - 1) if (num_workers is None) else num_workers) timestr = datetime.today().strftime('%Y-%m-%d_%H-%M-%S') logdir_prefix = f'{algorithm}_{env.__class__.__name__}_{timestr}' results_dir = os.path.expanduser(results_dir) def logger_creator(config): os.makedirs(results_dir, exist_ok=True) logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=results_dir) return ray.tune.logger.UnifiedLogger(config, logdir, loggers=None) config = {'callbacks': RLlibMetricLogger(metrics), 'enable_connectors': True, 'env': env_class.__name__, 'env_config': env_config, 'framework': 'torch', 'logger_creator': logger_creator, 'num_sgd_iter': 10, 'num_rollout_workers': num_workers_, 'rollout_fragment_length': env.num_steps, 'seed': 0, 'train_batch_size': (env.num_steps * max(1, num_workers_))} config.update(rllib_config) config['multiagent'] = {'policies': policy_specs, 'policy_mapping_fn': policy_mapping_fn, 'policies_to_train': policies_to_train} config['multiagent'].update(rllib_config.get('multiagent', {})) if (algorithm == 'PPO'): config['sgd_minibatch_size'] = max(int((config['train_batch_size'] / 10)), 1) config_obj = ray.tune.registry.get_trainable_cls(algorithm).get_default_config().from_dict(config) config_obj.rl_module(_enable_rl_module_api=False) config_obj.training(_enable_learner_api=False) algo = config_obj.build() with rich_progress('Training...') as progress: for i in progress.track(range(1, (iterations + 1))): result = algo.train() if show_training_metrics: rich.pretty.pprint({'iteration': i, 'metrics': result['custom_metrics'], 'rewards': {'policy_reward_min': result['policy_reward_min'], 'policy_reward_max': result['policy_reward_max'], 'policy_reward_mean': result['policy_reward_mean']}}) if (i == 1): config = {'algorithm': algorithm, 'env_class': env_class, 'iterations': iterations, 'checkpoint_freq': checkpoint_freq, 'policy_specs': policy_specs, 'policy_mapping': policy_mapping, 'policies_to_train': policies_to_train, 'env_config': env_config, 'rllib_config': rllib_config, 'metrics': metrics} with open(Path(algo.logdir, 'phantom-training-params.pkl'), 'wb') as f: cloudpickle.dump(config, f) if (checkpoint_freq and (((i % checkpoint_freq) == 0) or (i == iterations))): checkpoint_path = Path(algo.logdir, f'checkpoint_{str(i).zfill(6)}') algo.save(checkpoint_path) print(f'Logs & checkpoints saved to: {algo.logdir}') return algo
class RLlibMetricLogger(DefaultCallbacks): 'RLlib callback that logs Phantom metrics.' def __init__(self, metrics: Mapping[(str, 'Metric')]) -> None: super().__init__() self.metrics = metrics def on_episode_start(self, *, episode, **kwargs) -> None: for metric_id in self.metrics.keys(): episode.user_data[metric_id] = [] def on_episode_step(self, *, base_env, episode, **kwargs) -> None: env = base_env.envs[0] logging_helper(env, self.metrics, episode.user_data) def on_episode_end(self, *, episode, **kwargs) -> None: for (metric_id, metric) in self.metrics.items(): episode.custom_metrics[metric_id] = metric.reduce(episode.user_data[metric_id], mode='train') def __call__(self) -> 'RLlibMetricLogger': return self
def make_rllib_wrapped_policy_class(policy_class: Type[Policy]) -> Type[rllib.Policy]: class RLlibPolicyWrapper(rllib.Policy): def __init__(self, observation_space: gym.Space, action_space: gym.Space, config: Mapping[(str, Any)], **kwargs): self.policy = policy_class(observation_space, action_space, **kwargs) super().__init__(observation_space, action_space, config) def get_weights(self): return None def set_weights(self, weights): pass def learn_on_batch(self, samples): return {} def compute_single_action(self, obs: Optional[TensorStructType]=None, state: Optional[List[TensorType]]=None, *, prev_action: Optional[TensorStructType]=None, prev_reward: Optional[TensorStructType]=None, info: dict=None, input_dict: Optional[SampleBatch]=None, episode: Optional[Episode]=None, explore: Optional[bool]=None, timestep: Optional[int]=None, **kwargs) -> Tuple[(TensorStructType, List[TensorType], Dict[(str, TensorType)])]: return (self.policy.compute_action(obs), [], {}) def compute_actions(self, obs_batch: Union[(List[TensorStructType], TensorStructType)], state_batches: Optional[List[TensorType]]=None, prev_action_batch: Union[(List[TensorStructType], TensorStructType)]=None, prev_reward_batch: Union[(List[TensorStructType], TensorStructType)]=None, info_batch: Optional[Dict[(str, list)]]=None, episodes: Optional[List[MultiAgentEpisode]]=None, explore: Optional[bool]=None, timestep: Optional[int]=None, **kwargs) -> Tuple[(TensorType, List[TensorType], Dict[(str, TensorType)])]: if isinstance(self.action_space, gym.spaces.Tuple): unbatched = [self.policy.compute_action(obs) for obs in obs_batch] actions = tuple((np.array([unbatched[j][i] for j in range(len(unbatched))]) for i in range(len(unbatched[0])))) else: actions = [self.policy.compute_action(obs) for obs in obs_batch] return (actions, [], {}) return RLlibPolicyWrapper
class RLlibEnvWrapper(rllib.MultiAgentEnv): '\n Wrapper around a :class:`PhantomEnv` that provides compatibility with the RLlib\n :class:`MultiAgentEnv` interface.\n ' def __init__(self, env: PhantomEnv) -> None: self.env = env self.env.reset() self._agent_ids = self.env.strategic_agent_ids self.action_space = gym.spaces.Dict({agent_id: env.agents[agent_id].action_space for agent_id in self._agent_ids}) self.observation_space = gym.spaces.Dict({agent_id: env.agents[agent_id].observation_space for agent_id in self._agent_ids}) super().__init__() def step(self, action_dict: Mapping[(AgentID, Any)]) -> PhantomEnv.Step: return self.env.step(action_dict) def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]: return self.env.reset(seed, options) def is_terminated(self) -> bool: return self.env.is_terminated() def __getattr__(self, name: str) -> Any: return getattr(self.env, name) def __getitem__(self, agent_id: AgentID) -> AgentID: return self.env.__getitem__(agent_id) def __str__(self): return f'<{type(self).__name__}{self.env}>'
@dataclass(frozen=True) class AgentStep(): 'Describes a step taken by a single agent in an episode.' i: int observation: Optional[Any] reward: Optional[float] done: bool info: Optional[Dict[(str, Any)]] action: Optional[Any] stage: Optional[StageID] = None
@dataclass(frozen=True) class Step(): 'Describes a step taken in an episode.' i: int observations: Dict[(AgentID, Any)] rewards: Dict[(AgentID, float)] terminations: Dict[(AgentID, bool)] truncations: Dict[(AgentID, bool)] infos: Dict[(AgentID, Dict[(str, Any)])] actions: Dict[(AgentID, Any)] messages: Optional[List[Message]] = None stage: Optional[StageID] = None
@dataclass(frozen=True) class Rollout(): rollout_id: int repeat_id: int env_config: Mapping[(str, Any)] rollout_params: Dict[(str, Any)] steps: List[Step] metrics: Dict[(str, np.ndarray)] def observations_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[Any]]: 'Helper method to filter all observations for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter observations for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n ' return [step.observations.get(agent_id, None) for step in self.steps if (((drop_nones is False) or (agent_id in step.observations)) and ((stages is None) or (step.stage in stages)))] def rewards_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[float]]: 'Helper method to filter all rewards for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter rewards for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n ' return [step.rewards.get(agent_id, None) for step in self.steps if (((drop_nones is False) or ((agent_id in step.rewards) and (step.rewards[agent_id] is not None))) and ((stages is None) or (step.stage in stages)))] def terminations_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[bool]]: "Helper method to filter all 'terminations' for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter 'terminations' for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n " return [step.terminations.get(agent_id, None) for step in self.steps if (((drop_nones is False) or (agent_id in step.terminations)) and ((stages is None) or (step.stage in stages)))] def truncations_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[bool]]: "Helper method to filter all 'truncations' for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter 'truncations' for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n " return [step.truncations.get(agent_id, None) for step in self.steps if (((drop_nones is False) or (agent_id in step.truncations)) and ((stages is None) or (step.stage in stages)))] def infos_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[Dict[(str, Any)]]]: "Helper method to filter all 'infos' for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter 'infos' for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n " return [step.infos.get(agent_id, None) for step in self.steps if (((drop_nones is False) or (agent_id in step.infos)) and ((stages is None) or (step.stage in stages)))] def actions_for_agent(self, agent_id: AgentID, drop_nones: bool=False, stages: Optional[Iterable[StageID]]=None) -> List[Optional[Any]]: 'Helper method to filter all actions for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter actions for.\n drop_nones: Drops any None values if True.\n stages: Optionally also filter by multiple stages.\n ' return [step.actions.get(agent_id, None) for step in self.steps if (((drop_nones is False) or (agent_id in step.actions)) and ((stages is None) or (step.stage in stages)))] def steps_for_agent(self, agent_id: AgentID, stages: Optional[Iterable[StageID]]=None) -> List[AgentStep]: 'Helper method to filter all steps for a single agent.\n\n Arguments:\n agent_id: The ID of the agent to filter steps for.\n stages: Optionally also filter by multiple stages.\n ' if (stages is None): steps = self.steps else: steps = [step for step in self.steps if (step.stage in stages)] return [AgentStep(step.i, step.observations.get(agent_id, None), step.rewards.get(agent_id, None), step.terminations.get(agent_id, None), step.truncations.get(agent_id, None), step.infos.get(agent_id, None), step.actions.get(agent_id, None), step.stage) for step in steps] def count_actions(self, stages: Optional[Iterable[StageID]]=None) -> List[Tuple[(Any, int)]]: 'Helper method to count the occurances of all actions for all agents.\n\n Arguments:\n stages: Optionally filter by multiple stages.\n ' if (stages is None): filtered_actions = (action for step in self.steps for action in step.actions.values()) else: filtered_actions = (action for step in self.steps for action in step.actions.values() if (step.stage in stages)) return Counter(filtered_actions).most_common() def count_agent_actions(self, agent_id: AgentID, stages: Optional[Iterable[StageID]]=None) -> List[Tuple[(Any, int)]]: 'Helper method to count the occurances of all actions for a single agents.\n\n Arguments:\n agent_id: The ID of the agent to count actions for.\n stages: Optionally also filter by multiple stages.\n ' if (stages is None): filtered_actions = (step.actions.get(agent_id, None) for step in self.steps) else: filtered_actions = (step.actions.get(agent_id, None) for step in self.steps if (step.stage in stages)) return Counter(filtered_actions).most_common() def __getitem__(self, index: int): 'Returns a step for a given index in the episode.' try: return self.steps[index] except KeyError: raise KeyError(f'Index {index} not valid for trajectory')
def rollouts_to_dataframe(rollouts: Iterable[Rollout], avg_over_repeats: bool=True, index_value_precision: Optional[int]=None) -> pd.DataFrame: '\n Converts a list of Rollouts into a MultiIndex DataFrame with rollout params as the\n indexes and metrics as the columns.\n\n Arguments:\n rollouts: The list/iterator of Phantom Rollout objects to use.\n avg_over_repeats: If True will average all metric values over each set of\n repeats. This is very useful for reducing the overall data size if\n individual rollouts are not required.\n index_value_precision: If given will round the index values to the given\n precision and convert to strings. This can be useful for avoiding floating\n point inaccuracies when indexing (eg. 2.0 != 2.000000001).\n\n Returns:\n A Pandas DataFrame containing the results.\n ' rollouts = [(rollout.rollout_params, rollout.metrics) for rollout in rollouts] index_cols = list(rollouts[0][0].keys()) df = pd.DataFrame([{**params, **metrics} for (params, metrics) in rollouts]) if (index_value_precision is not None): for col in index_cols: df[col] = df[col].round(index_value_precision).astype(str) if (len(index_cols) > 0): if avg_over_repeats: df = df.groupby(index_cols).mean().reset_index() df = df.set_index(index_cols) return df
def rollouts_to_jsonl(rollouts: Iterable[Rollout], file_obj: io.TextIOBase, human_readable: bool=False) -> None: "\n Writes multiple rollouts to a file using the JSONL (JSON Lines) format.\n\n Arguments:\n rollouts: The list/iterator of Phantom Rollout objects to use.\n file_obj: A writable file object to output to.\n human_readable: If True the output will be 'pretty printed'.\n " for rollout in rollouts: json.dump(rollout, file_obj, indent=(2 if human_readable else None), cls=RolloutJSONEncoder) file_obj.write('\n') file_obj.flush()
class RolloutJSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, np.ndarray): return o.tolist() if isinstance(o, np.bool_): return bool(o) if isinstance(o, np.floating): return float(o) if isinstance(o, np.number): return int(o) if isinstance(o, Rollout): return asdict(o) if isinstance(o, Step): return asdict(o) return json.JSONEncoder.default(self, o)
class ComparableType(Generic[T], ABC): 'Interface for Types that can be compared.' @abstractmethod def __lt__(self, other: T) -> bool: raise NotImplementedError @abstractmethod def __le__(self, other: T) -> bool: raise NotImplementedError @abstractmethod def __gt__(self, other: T) -> bool: raise NotImplementedError @abstractmethod def __ge__(self, other: T) -> bool: raise NotImplementedError @abstractmethod def __eq__(self, other: object) -> bool: raise NotImplementedError @abstractmethod def __ne__(self, other: object) -> bool: raise NotImplementedError
class Sampler(ABC, Generic[T]): 'Samplers are used in Agent/Environment Supertypes to define how they are sampled.\n\n Samplers are designed to be used when training policies and a stochastic\n distribution of values is required for the Supertype sampling.\n\n Samplers return an unbounded number of total values with one value being returned at\n a time with the :meth:`sample` method.\n ' def __init__(self): self._value: Optional[T] = None self._id = uuid4() @property def value(self) -> Optional[T]: return self._value @abstractmethod def sample(self) -> T: "\n Returns a single value defined by the Sampler's internal distribution.\n\n Implementations of this function should also update the instance's\n :attr:`_value` property.\n " raise NotImplementedError
class ComparableSampler(Sampler[ComparableT], Generic[ComparableT]): '\n Extension of the :class:`Sampler` for ComparableTypes in order to treat the\n :class:`ComparableSampler` like its actual internal value.\n\n Example:\n >>> s = UniformFloatSampler()\n >>> s.value = s.sample()\n >>> s <= 1.0\n # True\n >>> s == 1.5\n # False\n ' def __lt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool: if isinstance(other, ComparableSampler): return super().__lt__(other) if (self.value is None): raise ValueError('`self.value` is None') return (self.value < other) def __eq__(self, other: object) -> bool: if isinstance(other, ComparableSampler): return object.__eq__(self, other) return (self.value == other) def __ne__(self, other: object) -> bool: if isinstance(other, ComparableSampler): return object.__ne__(self, other) return (self.value != other) def __le__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool: return (self.__lt__(other) or self.__eq__(other)) def __gt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool: return (not self.__le__(other)) def __ge__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool: return (self.__gt__(other) or self.__eq__(other))
class UniformFloatSampler(ComparableSampler[float]): 'Samples a single float value from a uniform distribution.\n\n Uses :func:`np.random.uniform` internally.\n ' def __init__(self, low: float=0.0, high: float=1.0, clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None: assert (high >= low) self.low = low self.high = high self.clip_low = clip_low self.clip_high = clip_high super().__init__() def sample(self) -> float: self._value = np.random.uniform(self.low, self.high) if ((self.clip_low is not None) or (self.clip_high is not None)): self._value = np.clip(self._value, self.clip_low, self.clip_high) return self._value
class UniformIntSampler(ComparableSampler[int]): 'Samples a single int value from a uniform distribution.\n\n Uses :func:`np.random.randint` internally.\n ' def __init__(self, low: int=0, high: int=1, clip_low: Optional[int]=None, clip_high: Optional[int]=None) -> None: assert (high >= low) self.low = low self.high = high self.clip_low = clip_low self.clip_high = clip_high super().__init__() def sample(self) -> float: self._value = np.random.randint(self.low, self.high) if ((self.clip_low is not None) or (self.clip_high is not None)): self._value = np.clip(self._value, self.clip_low, self.clip_high) return self._value
class UniformArraySampler(ComparableSampler[np.ndarray]): 'Samples an array of float values from a uniform distribution.\n\n Uses :func:`np.random.uniform()` internally.\n ' def __init__(self, low: float=0.0, high: float=1.0, shape: Iterable[int]=(1,), clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None: assert (high >= low) self.low = low self.high = high self.shape = shape self.clip_low = clip_low self.clip_high = clip_high super().__init__() def sample(self) -> np.ndarray: self._value = np.random.uniform(self.low, self.high, self.shape) if ((self.clip_low is not None) or (self.clip_high is not None)): self._value = np.clip(self._value, self.clip_low, self.clip_high) return self._value
class NormalSampler(ComparableSampler[float]): 'Samples a single float value from a normal distribution.\n\n Uses :func:`np.random.normal()` internally.\n ' def __init__(self, mu: float, sigma: float, clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None: self.mu = mu self.sigma = sigma self.clip_low = clip_low self.clip_high = clip_high super().__init__() def sample(self) -> float: self._value = np.random.normal(self.mu, self.sigma) if ((self.clip_low is not None) or (self.clip_high is not None)): self._value = np.clip(self._value, self.clip_low, self.clip_high) return self._value
class NormalArraySampler(ComparableSampler[np.ndarray]): 'Samples an array of float values from a normal distribution.\n\n Uses :func:`np.random.normal()` internally.\n ' def __init__(self, mu: float, sigma: float, shape: Tuple[int]=(1,), clip_low: Optional[float]=None, clip_high: Optional[float]=None) -> None: self.mu = mu self.sigma = sigma self.shape = shape self.clip_low = clip_low self.clip_high = clip_high super().__init__() def sample(self) -> np.ndarray: self._value = np.random.normal(self.mu, self.sigma, self.shape) if ((self.clip_low is not None) or (self.clip_high is not None)): self._value = np.clip(self._value, self.clip_low, self.clip_high) return self._value
class LambdaSampler(Sampler[T]): 'Samples using an arbitrary lambda function.' def __init__(self, func: Callable[(..., T)], *args, **kwargs): self.func = func self.args = args self.kwargs = kwargs super().__init__() def sample(self) -> T: self._value = self.func(*self.args, **self.kwargs) return self._value
@dataclass(frozen=True) class View(ABC): "\n Base class for the View class hierarchy. Implementations should subclass either\n :class:`AgentView`, :class:`EnvView` or :class:`FSMEnvView`.\n\n Views are used to share state between agents (and the Env) in a formalised manner\n and in a way that is easier than using request and response messages.\n\n Views should be created via the calling of the agent/env's :meth:`view()` method.\n Views can be tailored to particular agents, i.e. the view given can depend on the\n agent that the view is being given to.\n "
@dataclass(frozen=True) class AgentView(View): '\n Immutable references to public :class:`phantom.Agent` state.\n '
@dataclass(frozen=True) class EnvView(View): '\n Immutable references to public :class:`phantom.PhantomEnv` state.\n ' current_step: int proportion_time_elapsed: float
def parse_concatenated_json(json_str: str): decoder = json.JSONDecoder() pos = 0 objs = [] while (pos < len(json_str)): json_str = json_str[pos:].strip() if (not json_str): break (obj, pos) = decoder.raw_decode(json_str) objs.append(obj) return objs
@st.cache def load_data(file: str): return parse_concatenated_json(open(file, 'r').read())
def _get_version(): with open(os.path.join(NAME, '__init__.py')) as fp: return re.match('__version__\\s*=\\s*[\\"\\\'](?P<version>.*)[\\",\\\']', fp.read()).group('version')
def _get_long_description(): with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md'), encoding='utf-8') as readme_file: long_description = readme_file.read() return long_description
def _get_requirements(): with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'requirements.txt'), encoding='utf-8') as requirements_file: requirements = [line.strip() for line in requirements_file.readlines() if (not (line.strip().startswith('#') or line.strip().startswith('-')))] return requirements
class MockSampler(ph.utils.samplers.Sampler[float]): def __init__(self, value: float) -> None: self._value = value def sample(self) -> float: self._value += 1 return self._value
class MockComparableSampler(ph.utils.samplers.ComparableSampler[float]): def __init__(self, value: float) -> None: self._value = value def sample(self) -> float: self._value += 1 return self._value
class MockAgent(ph.Agent): def __init__(self, *args, num_steps: Optional[int]=None, **kwargs): super().__init__(*args, **kwargs) self.num_steps = num_steps
class MockStrategicAgent(ph.StrategicAgent): @dataclass class Supertype(ph.Supertype): type_value: float = 0.0 def __init__(self, *args, num_steps: Optional[int]=None, **kwargs): super().__init__(*args, **kwargs) self.action_space = gym.spaces.Box(0, 1, (1,)) self.observation_space = gym.spaces.Box(0, 1, (1,)) self.encode_obs_count = 0 self.decode_action_count = 0 self.compute_reward_count = 0 self.num_steps = num_steps def encode_observation(self, ctx: ph.Context): self.encode_obs_count += 1 return np.array([ctx.env_view.proportion_time_elapsed]) def decode_action(self, ctx: ph.Context, action: np.ndarray): self.decode_action_count += 1 return [] def compute_reward(self, ctx: ph.Context) -> float: self.compute_reward_count += 1 return 0.0 def is_terminated(self, ctx: ph.Context) -> bool: return (ctx.env_view.current_step == self.num_steps) def is_truncated(self, ctx: ph.Context) -> bool: return (ctx.env_view.current_step == self.num_steps)
class MockPolicy(ph.Policy): def compute_action(self, obs) -> int: return 1
class MockEnv(ph.PhantomEnv): @dataclass class Supertype(ph.Supertype): type_value: float def __init__(self, env_supertype=None): agents = [MockStrategicAgent('a1'), MockStrategicAgent('a2'), MockAgent('a3')] network = ph.network.Network(agents) network.add_connection('a1', 'a2') network.add_connection('a2', 'a3') network.add_connection('a3', 'a1') super().__init__(num_steps=5, network=network, env_supertype=env_supertype)
class SimpleDecoder(Decoder): def __init__(self, id: int): self.id = id @property def action_space(self) -> gym.Space: return gym.spaces.Box((- np.inf), np.inf, (1,)) def decode(self, ctx: Context, action) -> List[Tuple[(AgentID, Message)]]: return [('RECIPIENT', f'FROM {self.id}')] def reset(self): self.id = None
def test_chained_decoder(): d1 = SimpleDecoder(1) d2 = SimpleDecoder(2) cd1 = ChainedDecoder([d1, d2]) messages = cd1.decode(None, [None, None]) assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')]) cd2 = d1.chain(d2) cd2.decode(None, [None, None]) assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')])
def test_chained_decoder_reset(): d1 = SimpleDecoder(1) d2 = SimpleDecoder(2) cd = ChainedDecoder([d1, d2]) cd.reset() assert (d1.id is None) assert (d2.id is None)
class MockDecoder(Decoder): def __init__(self, id: int): self.id = id @property def action_space(self) -> gym.Space: return gym.spaces.Box((- np.inf), np.inf, (1,)) def decode(self, ctx: Context, action) -> List[Tuple[(AgentID, Message)]]: assert (action == self.id) return [('RECIPIENT', f'FROM {self.id}')] def reset(self): self.id = None
def test_dict_decoder(): d1 = MockDecoder(1) d2 = MockDecoder(2) dd = DictDecoder({'d1': d1, 'd2': d2}) assert (dd.action_space == gym.spaces.Dict({'d1': gym.spaces.Box((- np.inf), np.inf, (1,)), 'd2': gym.spaces.Box((- np.inf), np.inf, (1,))})) messages = dd.decode(None, {'d1': 1, 'd2': 2}) assert (messages == [('RECIPIENT', 'FROM 1'), ('RECIPIENT', 'FROM 2')])
def test_chained_decoder_reset(): d1 = MockDecoder(1) d2 = MockDecoder(2) dd = DictDecoder({'d1': d1, 'd2': d2}) dd.reset() assert (d1.id is None) assert (d2.id is None)
class SimpleEncoder(Encoder): def __init__(self, id: int): self.id = id @property def observation_space(self) -> gym.Space: return gym.spaces.Box((- np.inf), np.inf, (1,)) def encode(self, ctx: Context) -> np.ndarray: return np.array([self.id]) def reset(self): self.id = None
def test_chained_encoder(): e1 = SimpleEncoder(1) e2 = SimpleEncoder(2) ce1 = ChainedEncoder([e1, e2]) obs = ce1.encode(None) assert (obs == (np.array([1]), np.array([2])))
def test_chained_encoder_reset(): e1 = SimpleEncoder(1) e2 = SimpleEncoder(2) cd = ChainedEncoder([e1, e2]) cd.reset() assert (e1.id is None) assert (e2.id is None)
class MockEncoder(Encoder): def __init__(self, id: int): self.id = id @property def observation_space(self) -> gym.Space: return gym.spaces.Box((- np.inf), np.inf, (1,)) def encode(self, ctx: Context) -> np.ndarray: return np.array([self.id]) def reset(self): self.id = None
def test_dict_encoder(): e1 = MockEncoder(1) e2 = MockEncoder(2) de = DictEncoder({'e1': e1, 'e2': e2}) assert (de.observation_space == gym.spaces.Dict({'e1': gym.spaces.Box((- np.inf), np.inf, (1,)), 'e2': gym.spaces.Box((- np.inf), np.inf, (1,))})) obs = de.encode(None) assert (obs == {'e1': np.array([1]), 'e2': np.array([2])})
def test_dict_encoder_reset(): e1 = MockEncoder(1) e2 = MockEncoder(2) de = DictEncoder({'e1': e1, 'e2': e2}) de.reset() assert (e1.id is None) assert (e2.id is None)
def test_decorator_style(): class Env(ph.FiniteStateMachineEnv): def __init__(self): agents = [MockAgent('A')] network = ph.Network(agents) super().__init__(num_steps=1, network=network, initial_stage='StageA') @ph.FSMStage(stage_id='StageA', acting_agents=['A'], next_stages=['StageA']) def handle(self): return 'StageA' env = Env() env.reset() env.step({})