code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import dataclasses from collections import defaultdict from dataclasses import dataclass from typing import Any, Dict, List, Optional, Sequence, TypeVar, Union import numpy as np from torch.utils.tensorboard.writer import SummaryWriter @dataclass class Episode: score: float = 0 length: int = 0 info: Dict[str, Dict[str, Any]] = dataclasses.field(default_factory=dict) StatisticSelf = TypeVar("StatisticSelf", bound="Statistic") @dataclass class Statistic: values: np.ndarray round_digits: int = 2 score_function: str = "mean-std" @property def mean(self) -> float: return np.mean(self.values).item() @property def std(self) -> float: return np.std(self.values).item() @property def min(self) -> float: return np.min(self.values).item() @property def max(self) -> float: return np.max(self.values).item() def sum(self) -> float: return np.sum(self.values).item() def __len__(self) -> int: return len(self.values) def score(self) -> float: if self.score_function == "mean-std": return self.mean - self.std elif self.score_function == "mean": return self.mean else: raise NotImplemented( f"Only mean-std and mean score_functions supported ({self.score_function})" ) def _diff(self: StatisticSelf, o: StatisticSelf) -> float: return self.score() - o.score() def __gt__(self: StatisticSelf, o: StatisticSelf) -> bool: return self._diff(o) > 0 def __ge__(self: StatisticSelf, o: StatisticSelf) -> bool: return self._diff(o) >= 0 def __repr__(self) -> str: mean = round(self.mean, self.round_digits) if self.round_digits == 0: mean = int(mean) if self.score_function == "mean": return f"{mean}" std = round(self.std, self.round_digits) if self.round_digits == 0: std = int(std) return f"{mean} +/- {std}" def to_dict(self) -> Dict[str, float]: return { "mean": self.mean, "std": self.std, "min": self.min, "max": self.max, } EpisodesStatsSelf = TypeVar("EpisodesStatsSelf", bound="EpisodesStats") class EpisodesStats: def __init__( self, episodes: Sequence[Episode], simple: bool = False, score_function: str = "mean-std", ) -> None: self.episodes = episodes self.simple = simple self.score = Statistic( np.array([e.score for e in episodes]), score_function=score_function ) self.length = Statistic(np.array([e.length for e in episodes]), round_digits=0) additional_values = defaultdict(list) for e in self.episodes: if e.info: for k, v in e.info.items(): if isinstance(v, dict): for k2, v2 in v.items(): additional_values[f"{k}_{k2}"].append(v2) else: additional_values[k].append(v) self.additional_stats = { k: Statistic(np.array(values)) for k, values in additional_values.items() } self.score_function = score_function def __gt__(self: EpisodesStatsSelf, o: EpisodesStatsSelf) -> bool: return self.score > o.score def __ge__(self: EpisodesStatsSelf, o: EpisodesStatsSelf) -> bool: return self.score >= o.score def __repr__(self) -> str: mean = self.score.mean score = self.score.score() if mean != score: return f"Score: {self.score} ({round(score)}) | Length: {self.length}" else: return f"Score: {self.score} | Length: {self.length}" def __len__(self) -> int: return len(self.episodes) def _asdict(self) -> dict: return { "n_episodes": len(self.episodes), "score": self.score.to_dict(), "length": self.length.to_dict(), } def write_to_tensorboard( self, tb_writer: SummaryWriter, main_tag: str, global_step: Optional[int] = None ) -> None: stats = {"mean": self.score.mean} if not self.simple: stats.update( { "min": self.score.min, "max": self.score.max, "result": self.score.score(), "n_episodes": len(self.episodes), "length": self.length.mean, } ) for k, addl_stats in self.additional_stats.items(): stats[k] = addl_stats.mean for name, value in stats.items(): tb_writer.add_scalar(f"{main_tag}/{name}", value, global_step=global_step) class EpisodeAccumulator: def __init__(self, num_envs: int): self._episodes = [] self.current_episodes = [Episode() for _ in range(num_envs)] @property def episodes(self) -> List[Episode]: return self._episodes def step(self, reward: np.ndarray, done: np.ndarray, info: List[Dict]) -> None: for idx, current in enumerate(self.current_episodes): current.score += reward[idx] current.length += 1 if done[idx]: self._episodes.append(current) self.current_episodes[idx] = Episode() self.on_done(idx, current, info[idx]) def __len__(self) -> int: return len(self.episodes) def on_done(self, ep_idx: int, episode: Episode, info: Dict) -> None: pass def stats(self) -> EpisodesStats: return EpisodesStats(self.episodes) def log_scalars( tb_writer: SummaryWriter, main_tag: str, tag_scalar_dict: Dict[str, Union[int, float]], global_step: int, ) -> None: for tag, value in tag_scalar_dict.items(): tb_writer.add_scalar(f"{main_tag}/{tag}", value, global_step)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/stats.py
0.900283
0.452596
stats.py
pypi
import numpy as np import torch from typing import NamedTuple, Sequence from rl_algo_impls.shared.policy.actor_critic import OnPolicy from rl_algo_impls.shared.trajectory import Trajectory from rl_algo_impls.wrappers.vectorable_wrapper import VecEnvObs class RtgAdvantage(NamedTuple): rewards_to_go: torch.Tensor advantage: torch.Tensor def discounted_cumsum(x: np.ndarray, gamma: float) -> np.ndarray: dc = x.copy() for i in reversed(range(len(x) - 1)): dc[i] += gamma * dc[i + 1] return dc def compute_advantage_from_trajectories( trajectories: Sequence[Trajectory], policy: OnPolicy, gamma: float, gae_lambda: float, device: torch.device, ) -> torch.Tensor: advantage = [] for traj in trajectories: last_val = 0 if not traj.terminated and traj.next_obs is not None: last_val = policy.value(traj.next_obs) rew = np.append(np.array(traj.rew), last_val) v = np.append(np.array(traj.v), last_val) deltas = rew[:-1] + gamma * v[1:] - v[:-1] advantage.append(discounted_cumsum(deltas, gamma * gae_lambda)) return torch.as_tensor( np.concatenate(advantage), dtype=torch.float32, device=device ) def compute_rtg_and_advantage_from_trajectories( trajectories: Sequence[Trajectory], policy: OnPolicy, gamma: float, gae_lambda: float, device: torch.device, ) -> RtgAdvantage: rewards_to_go = [] advantages = [] for traj in trajectories: last_val = 0 if not traj.terminated and traj.next_obs is not None: last_val = policy.value(traj.next_obs) rew = np.append(np.array(traj.rew), last_val) v = np.append(np.array(traj.v), last_val) deltas = rew[:-1] + gamma * v[1:] - v[:-1] adv = discounted_cumsum(deltas, gamma * gae_lambda) advantages.append(adv) rewards_to_go.append(v[:-1] + adv) return RtgAdvantage( torch.as_tensor( np.concatenate(rewards_to_go), dtype=torch.float32, device=device ), torch.as_tensor(np.concatenate(advantages), dtype=torch.float32, device=device), ) def compute_advantages( rewards: np.ndarray, values: np.ndarray, episode_starts: np.ndarray, next_episode_starts: np.ndarray, next_obs: VecEnvObs, policy: OnPolicy, gamma: float, gae_lambda: float, ) -> np.ndarray: advantages = np.zeros_like(rewards) last_gae_lam = 0 n_steps = advantages.shape[0] for t in reversed(range(n_steps)): if t == n_steps - 1: next_nonterminal = 1.0 - next_episode_starts next_value = policy.value(next_obs) else: next_nonterminal = 1.0 - episode_starts[t + 1] next_value = values[t + 1] delta = rewards[t] + gamma * next_value * next_nonterminal - values[t] last_gae_lam = delta + gamma * gae_lambda * next_nonterminal * last_gae_lam advantages[t] = last_gae_lam return advantages
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/gae.py
0.740456
0.55926
gae.py
pypi
import numpy as np from dataclasses import dataclass, field from typing import Generic, List, Optional, Type, TypeVar from rl_algo_impls.wrappers.vectorable_wrapper import VecEnvObs @dataclass class Trajectory: obs: List[np.ndarray] = field(default_factory=list) act: List[np.ndarray] = field(default_factory=list) next_obs: Optional[np.ndarray] = None rew: List[float] = field(default_factory=list) terminated: bool = False v: List[float] = field(default_factory=list) def add( self, obs: np.ndarray, act: np.ndarray, next_obs: np.ndarray, rew: float, terminated: bool, v: float, ): self.obs.append(obs) self.act.append(act) self.next_obs = next_obs if not terminated else None self.rew.append(rew) self.terminated = terminated self.v.append(v) def __len__(self) -> int: return len(self.obs) T = TypeVar("T", bound=Trajectory) class TrajectoryAccumulator(Generic[T]): def __init__(self, num_envs: int, trajectory_class: Type[T] = Trajectory) -> None: self.num_envs = num_envs self.trajectory_class = trajectory_class self._trajectories = [] self._current_trajectories = [trajectory_class() for _ in range(num_envs)] def step( self, obs: VecEnvObs, action: np.ndarray, next_obs: VecEnvObs, reward: np.ndarray, done: np.ndarray, val: np.ndarray, *args, ) -> None: assert isinstance(obs, np.ndarray) assert isinstance(next_obs, np.ndarray) for i, args in enumerate(zip(obs, action, next_obs, reward, done, val, *args)): trajectory = self._current_trajectories[i] # TODO: Eventually take advantage of terminated/truncated differentiation in # later versions of gym. trajectory.add(*args) if done[i]: self._trajectories.append(trajectory) self._current_trajectories[i] = self.trajectory_class() self.on_done(i, trajectory) @property def all_trajectories(self) -> List[T]: return self._trajectories + list( filter(lambda t: len(t), self._current_trajectories) ) def n_timesteps(self) -> int: return sum(len(t) for t in self.all_trajectories) def on_done(self, env_idx: int, trajectory: T) -> None: pass
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/trajectory.py
0.73307
0.479138
trajectory.py
pypi
import itertools import os import shutil from time import perf_counter from typing import Dict, List, Optional, Union import numpy as np from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.shared.callbacks import Callback from rl_algo_impls.shared.policy.policy import Policy from rl_algo_impls.shared.stats import Episode, EpisodeAccumulator, EpisodesStats from rl_algo_impls.wrappers.action_mask_wrapper import find_action_masker from rl_algo_impls.wrappers.vec_episode_recorder import VecEpisodeRecorder from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv class EvaluateAccumulator(EpisodeAccumulator): def __init__( self, num_envs: int, goal_episodes: int, print_returns: bool = True, ignore_first_episode: bool = False, additional_keys_to_log: Optional[List[str]] = None, ): super().__init__(num_envs) self.completed_episodes_by_env_idx = [[] for _ in range(num_envs)] self.goal_episodes_per_env = int(np.ceil(goal_episodes / num_envs)) self.print_returns = print_returns if ignore_first_episode: first_done = set() def should_record_done(idx: int) -> bool: has_done_first_episode = idx in first_done first_done.add(idx) return has_done_first_episode self.should_record_done = should_record_done else: self.should_record_done = lambda idx: True self.additional_keys_to_log = additional_keys_to_log def on_done(self, ep_idx: int, episode: Episode, info: Dict) -> None: if self.additional_keys_to_log: episode.info = {k: info[k] for k in self.additional_keys_to_log} if ( self.should_record_done(ep_idx) and len(self.completed_episodes_by_env_idx[ep_idx]) >= self.goal_episodes_per_env ): return self.completed_episodes_by_env_idx[ep_idx].append(episode) if self.print_returns: print( f"Episode {len(self)} | " f"Score {episode.score} | " f"Length {episode.length}" ) def __len__(self) -> int: return sum(len(ce) for ce in self.completed_episodes_by_env_idx) @property def episodes(self) -> List[Episode]: return list(itertools.chain(*self.completed_episodes_by_env_idx)) def is_done(self) -> bool: return all( len(ce) == self.goal_episodes_per_env for ce in self.completed_episodes_by_env_idx ) def evaluate( env: VecEnv, policy: Policy, n_episodes: int, render: bool = False, deterministic: bool = True, print_returns: bool = True, ignore_first_episode: bool = False, additional_keys_to_log: Optional[List[str]] = None, score_function: str = "mean-std", ) -> EpisodesStats: policy.sync_normalization(env) policy.eval() episodes = EvaluateAccumulator( env.num_envs, n_episodes, print_returns, ignore_first_episode, additional_keys_to_log=additional_keys_to_log, ) obs = env.reset() get_action_mask = getattr(env, "get_action_mask", None) while not episodes.is_done(): act = policy.act( obs, deterministic=deterministic, action_masks=get_action_mask() if get_action_mask else None, ) obs, rew, done, info = env.step(act) episodes.step(rew, done, info) if render: env.render() stats = EpisodesStats( episodes.episodes, score_function=score_function, ) if print_returns: print(stats) return stats class EvalCallback(Callback): def __init__( self, policy: Policy, env: VecEnv, tb_writer: SummaryWriter, best_model_path: Optional[str] = None, step_freq: Union[int, float] = 50_000, n_episodes: int = 10, save_best: bool = True, deterministic: bool = True, only_record_video_on_best: bool = True, video_env: Optional[VecEnv] = None, video_dir: Optional[str] = None, max_video_length: int = 3600, ignore_first_episode: bool = False, additional_keys_to_log: Optional[List[str]] = None, score_function: str = "mean-std", wandb_enabled: bool = False, ) -> None: super().__init__() self.policy = policy self.env = env self.tb_writer = tb_writer self.best_model_path = best_model_path self.step_freq = int(step_freq) self.n_episodes = n_episodes self.save_best = save_best self.deterministic = deterministic self.stats: List[EpisodesStats] = [] self.best = None self.only_record_video_on_best = only_record_video_on_best assert (video_env is not None) == (video_dir is not None) self.video_env = video_env self.video_dir = video_dir if video_dir: os.makedirs(video_dir, exist_ok=True) self.max_video_length = max_video_length self.ignore_first_episode = ignore_first_episode self.additional_keys_to_log = additional_keys_to_log self.score_function = score_function self.wandb_enabled = wandb_enabled def on_step(self, timesteps_elapsed: int = 1) -> bool: super().on_step(timesteps_elapsed) if self.timesteps_elapsed // self.step_freq >= len(self.stats): self.evaluate() return True def evaluate( self, n_episodes: Optional[int] = None, print_returns: Optional[bool] = None ) -> EpisodesStats: start_time = perf_counter() eval_stat = evaluate( self.env, self.policy, n_episodes or self.n_episodes, deterministic=self.deterministic, print_returns=print_returns or False, ignore_first_episode=self.ignore_first_episode, additional_keys_to_log=self.additional_keys_to_log, score_function=self.score_function, ) end_time = perf_counter() self.tb_writer.add_scalar( "eval/steps_per_second", eval_stat.length.sum() / (end_time - start_time), self.timesteps_elapsed, ) self.policy.train(True) print(f"Eval Timesteps: {self.timesteps_elapsed} | {eval_stat}") self.stats.append(eval_stat) if not self.best or eval_stat >= self.best: strictly_better = not self.best or eval_stat > self.best self.best = eval_stat if self.save_best: assert self.best_model_path self.policy.save(self.best_model_path) print("Saved best model") if self.wandb_enabled: import wandb best_model_name = os.path.split(self.best_model_path)[-1] shutil.make_archive( os.path.join(wandb.run.dir, best_model_name), # type: ignore "zip", self.best_model_path, ) self.best.write_to_tensorboard( self.tb_writer, "best_eval", self.timesteps_elapsed ) else: strictly_better = False if self.video_env and (not self.only_record_video_on_best or strictly_better): assert self.video_env and self.video_dir best_video_base_path = os.path.join( self.video_dir, str(self.timesteps_elapsed) ) video_wrapped = VecEpisodeRecorder( self.video_env, best_video_base_path, max_video_length=self.max_video_length, ) video_stats = evaluate( video_wrapped, self.policy, 1, deterministic=self.deterministic, print_returns=False, score_function=self.score_function, ) print(f"Saved video: {video_stats}") eval_stat.write_to_tensorboard(self.tb_writer, "eval", self.timesteps_elapsed) return eval_stat
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/callbacks/eval_callback.py
0.791015
0.26699
eval_callback.py
pypi
from typing import Any, Dict, List, Optional import numpy as np from rl_algo_impls.runner.config import Config from rl_algo_impls.shared.algorithm import Algorithm from rl_algo_impls.shared.callbacks.callback import Callback from rl_algo_impls.shared.schedule import constant_schedule, lerp from rl_algo_impls.wrappers.lux_env_gridnet import LuxRewardWeights from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv GAMMA_NAME = "gamma" GAE_LAMBDA_NAME = "gae_lambda" REWARD_WEIGHTS_NAME = "reward_weights" class LuxHyperparamTransitions(Callback): def __init__( self, config: Config, env: VecEnv, algo: Algorithm, phases: List[Dict[str, Any]], durations: List[float], start_timesteps: int = 0, ) -> None: super().__init__() self.env = env self.algo = algo self.phases = phases assert ( len(durations) == len(phases) * 2 - 1 ), f"Durations expected to be 2*len(phases)-1 to account for transitions between phases" assert np.isclose(np.sum(durations), 1) self.durations = durations self.total_train_timesteps = config.n_timesteps self.timesteps_elapsed = start_timesteps self.current_phase_idx: Optional[int] = None self.update() def on_step(self, timesteps_elapsed: int = 1) -> bool: super().on_step(timesteps_elapsed) self.update() return True def update(self) -> None: progress = self.timesteps_elapsed / self.total_train_timesteps prior_duration_accumulation = 0 current_duration_accumulation = 0 for idx, d in enumerate(self.durations): current_duration_accumulation += d if progress < current_duration_accumulation: current_or_prior_phase = idx // 2 if idx % 2 == 0: self.maybe_update_phase(current_or_prior_phase) else: self.update_phase_transition( current_or_prior_phase, (progress - prior_duration_accumulation) / d, ) break prior_duration_accumulation = current_duration_accumulation else: self.maybe_update_phase(len(self.phases) - 1) def maybe_update_phase(self, phase_idx: int) -> None: if phase_idx == self.current_phase_idx: return self.current_phase_idx = phase_idx phase = self.phases[phase_idx] print(f"{self.timesteps_elapsed}: Entering phase {phase_idx}: {phase}") for k, v in phase.items(): if k == GAMMA_NAME: name = f"{k}_schedule" assert hasattr(self.algo, name) setattr(self.algo, name, constant_schedule(v)) elif k == GAE_LAMBDA_NAME: assert hasattr(self.algo, k) setattr(self.algo, k, v) elif k == REWARD_WEIGHTS_NAME: assert hasattr(self.env.unwrapped, k) setattr(self.env, k, LuxRewardWeights(**v)) else: raise ValueError(f"{k} not supported in {self.__class__.__name__}") if REWARD_WEIGHTS_NAME in phase and hasattr(self.env, REWARD_WEIGHTS_NAME): print(f"Current reward weights: {getattr(self.env, 'reward_weights')}") def update_phase_transition( self, prior_phase_idx: int, transition_progress: float ) -> None: if self.current_phase_idx is not None: print(f"{self.timesteps_elapsed}: Exiting phase {self.current_phase_idx}") self.current_phase_idx = None prior_phase = self.phases[prior_phase_idx] next_phase = self.phases[prior_phase_idx + 1] assert set(prior_phase.keys()) == set( next_phase.keys() ), f"An override has to be specified in every phase" for k, next_v in next_phase.items(): old_v = prior_phase[k] if k == GAMMA_NAME: name = f"{k}_schedule" assert hasattr(self.algo, name) setattr( self.algo, name, constant_schedule(lerp(old_v, next_v, transition_progress)), ) elif k == GAE_LAMBDA_NAME: assert hasattr(self.algo, k) setattr(self.algo, k, lerp(old_v, next_v, transition_progress)) elif k == REWARD_WEIGHTS_NAME: assert hasattr(self.env, k) setattr( self.env.unwrapped, k, LuxRewardWeights.lerp(old_v, next_v, transition_progress), ) else: raise ValueError(f"{k} not supported in {self.__class__.__name__}")
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/callbacks/lux_hyperparam_transitions.py
0.873815
0.337395
lux_hyperparam_transitions.py
pypi
import numpy as np import optuna from time import perf_counter from torch.utils.tensorboard.writer import SummaryWriter from typing import NamedTuple, Union from rl_algo_impls.shared.callbacks import Callback from rl_algo_impls.shared.callbacks.eval_callback import evaluate from rl_algo_impls.shared.policy.policy import Policy from rl_algo_impls.shared.stats import EpisodesStats from rl_algo_impls.wrappers.episode_stats_writer import EpisodeStatsWriter from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv, find_wrapper class Evaluation(NamedTuple): eval_stat: EpisodesStats train_stat: EpisodesStats score: float class OptimizeCallback(Callback): def __init__( self, policy: Policy, env: VecEnv, trial: optuna.Trial, tb_writer: SummaryWriter, step_freq: Union[int, float] = 50_000, n_episodes: int = 10, deterministic: bool = True, ) -> None: super().__init__() self.policy = policy self.env = env self.trial = trial self.tb_writer = tb_writer self.step_freq = step_freq self.n_episodes = n_episodes self.deterministic = deterministic stats_writer = find_wrapper(policy.env, EpisodeStatsWriter) assert stats_writer self.stats_writer = stats_writer self.eval_step = 1 self.is_pruned = False self.last_eval_stat = None self.last_train_stat = None self.last_score = -np.inf def on_step(self, timesteps_elapsed: int = 1) -> bool: super().on_step(timesteps_elapsed) if self.timesteps_elapsed >= self.eval_step * self.step_freq: self.evaluate() return not self.is_pruned return True def evaluate(self) -> None: self.last_eval_stat, self.last_train_stat, score = evaluation( self.policy, self.env, self.tb_writer, self.n_episodes, self.deterministic, self.timesteps_elapsed, ) self.last_score = score self.trial.report(score, self.eval_step) if self.trial.should_prune(): self.is_pruned = True self.eval_step += 1 def evaluation( policy: Policy, env: VecEnv, tb_writer: SummaryWriter, n_episodes: int, deterministic: bool, timesteps_elapsed: int, ) -> Evaluation: start_time = perf_counter() eval_stat = evaluate( env, policy, n_episodes, deterministic=deterministic, print_returns=False, ) end_time = perf_counter() tb_writer.add_scalar( "eval/steps_per_second", eval_stat.length.sum() / (end_time - start_time), timesteps_elapsed, ) policy.train() print(f"Eval Timesteps: {timesteps_elapsed} | {eval_stat}") eval_stat.write_to_tensorboard(tb_writer, "eval", timesteps_elapsed) stats_writer = find_wrapper(policy.env, EpisodeStatsWriter) assert stats_writer train_stat = EpisodesStats(stats_writer.episodes) print(f" Train Stat: {train_stat}") score = (eval_stat.score.mean + train_stat.score.mean) / 2 print(f" Score: {round(score, 2)}") tb_writer.add_scalar( "eval/score", score, timesteps_elapsed, ) return Evaluation(eval_stat, train_stat, score)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/callbacks/optimize_callback.py
0.902138
0.295662
optimize_callback.py
pypi
from abc import abstractmethod from typing import NamedTuple, Optional, Sequence, Tuple, TypeVar import gym import numpy as np import torch from gym.spaces import Box from rl_algo_impls.shared.policy.actor_critic_network import ( ConnectedTrioActorCriticNetwork, SeparateActorCriticNetwork, UNetActorCriticNetwork, ) from rl_algo_impls.shared.policy.policy import Policy from rl_algo_impls.wrappers.vectorable_wrapper import ( VecEnv, VecEnvObs, single_action_space, single_observation_space, ) class Step(NamedTuple): a: np.ndarray v: np.ndarray logp_a: np.ndarray clamped_a: np.ndarray class ACForward(NamedTuple): logp_a: torch.Tensor entropy: torch.Tensor v: torch.Tensor FEAT_EXT_FILE_NAME = "feat_ext.pt" V_FEAT_EXT_FILE_NAME = "v_feat_ext.pt" PI_FILE_NAME = "pi.pt" V_FILE_NAME = "v.pt" ActorCriticSelf = TypeVar("ActorCriticSelf", bound="ActorCritic") def clamp_actions( actions: np.ndarray, action_space: gym.Space, squash_output: bool ) -> np.ndarray: if isinstance(action_space, Box): low, high = action_space.low, action_space.high # type: ignore if squash_output: # Squashed output is already between -1 and 1. Rescale if the actual # output needs to something other than -1 and 1 return low + 0.5 * (actions + 1) * (high - low) else: return np.clip(actions, low, high) return actions class OnPolicy(Policy): @abstractmethod def value(self, obs: VecEnvObs) -> np.ndarray: ... @abstractmethod def step(self, obs: VecEnvObs, action_masks: Optional[np.ndarray] = None) -> Step: ... @property @abstractmethod def action_shape(self) -> Tuple[int, ...]: ... class ActorCritic(OnPolicy): def __init__( self, env: VecEnv, pi_hidden_sizes: Optional[Sequence[int]] = None, v_hidden_sizes: Optional[Sequence[int]] = None, init_layers_orthogonal: bool = True, activation_fn: str = "tanh", log_std_init: float = -0.5, use_sde: bool = False, full_std: bool = True, squash_output: bool = False, share_features_extractor: bool = True, cnn_flatten_dim: int = 512, cnn_style: str = "nature", cnn_layers_init_orthogonal: Optional[bool] = None, impala_channels: Sequence[int] = (16, 32, 32), actor_head_style: str = "single", **kwargs, ) -> None: super().__init__(env, **kwargs) observation_space = single_observation_space(env) action_space = single_action_space(env) action_plane_space = getattr(env, "action_plane_space", None) self.action_space = action_space self.squash_output = squash_output if actor_head_style == "unet": self.network = UNetActorCriticNetwork( observation_space, action_space, action_plane_space, v_hidden_sizes=v_hidden_sizes, init_layers_orthogonal=init_layers_orthogonal, activation_fn=activation_fn, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, ) elif share_features_extractor: self.network = ConnectedTrioActorCriticNetwork( observation_space, action_space, pi_hidden_sizes=pi_hidden_sizes, v_hidden_sizes=v_hidden_sizes, init_layers_orthogonal=init_layers_orthogonal, activation_fn=activation_fn, log_std_init=log_std_init, use_sde=use_sde, full_std=full_std, squash_output=squash_output, cnn_flatten_dim=cnn_flatten_dim, cnn_style=cnn_style, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, impala_channels=impala_channels, actor_head_style=actor_head_style, action_plane_space=action_plane_space, ) else: self.network = SeparateActorCriticNetwork( observation_space, action_space, pi_hidden_sizes=pi_hidden_sizes, v_hidden_sizes=v_hidden_sizes, init_layers_orthogonal=init_layers_orthogonal, activation_fn=activation_fn, log_std_init=log_std_init, use_sde=use_sde, full_std=full_std, squash_output=squash_output, cnn_flatten_dim=cnn_flatten_dim, cnn_style=cnn_style, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, impala_channels=impala_channels, actor_head_style=actor_head_style, action_plane_space=action_plane_space, ) def forward( self, obs: torch.Tensor, action: torch.Tensor, action_masks: Optional[torch.Tensor] = None, ) -> ACForward: (_, logp_a, entropy), v = self.network(obs, action, action_masks=action_masks) assert logp_a is not None assert entropy is not None return ACForward(logp_a, entropy, v) def value(self, obs: VecEnvObs) -> np.ndarray: o = self._as_tensor(obs) with torch.no_grad(): v = self.network.value(o) return v.cpu().numpy() def step(self, obs: VecEnvObs, action_masks: Optional[np.ndarray] = None) -> Step: o = self._as_tensor(obs) a_masks = self._as_tensor(action_masks) if action_masks is not None else None with torch.no_grad(): (pi, _, _), v = self.network.distribution_and_value(o, action_masks=a_masks) a = pi.sample() logp_a = pi.log_prob(a) a_np = a.cpu().numpy() clamped_a_np = clamp_actions(a_np, self.action_space, self.squash_output) return Step(a_np, v.cpu().numpy(), logp_a.cpu().numpy(), clamped_a_np) def act( self, obs: np.ndarray, deterministic: bool = True, action_masks: Optional[np.ndarray] = None, ) -> np.ndarray: if not deterministic: return self.step(obs, action_masks=action_masks).clamped_a else: o = self._as_tensor(obs) a_masks = ( self._as_tensor(action_masks) if action_masks is not None else None ) with torch.no_grad(): (pi, _, _), _ = self.network.distribution_and_value( o, action_masks=a_masks ) a = pi.mode return clamp_actions(a.cpu().numpy(), self.action_space, self.squash_output) def load(self, path: str) -> None: super().load(path) self.reset_noise() def load_from(self: ActorCriticSelf, policy: ActorCriticSelf) -> ActorCriticSelf: super().load_from(policy) self.reset_noise() return self def reset_noise(self, batch_size: Optional[int] = None) -> None: self.network.reset_noise( batch_size=batch_size if batch_size else self.env.num_envs ) @property def action_shape(self) -> Tuple[int, ...]: return self.network.action_shape
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/policy/actor_critic.py
0.939178
0.328934
actor_critic.py
pypi
import os from abc import ABC, abstractmethod from copy import deepcopy from typing import Dict, Optional, Type, TypeVar, Union import numpy as np import torch import torch.nn as nn from stable_baselines3.common.vec_env import unwrap_vec_normalize from stable_baselines3.common.vec_env.vec_normalize import VecNormalize from rl_algo_impls.wrappers.normalize import NormalizeObservation, NormalizeReward from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv, VecEnvObs, find_wrapper ACTIVATION: Dict[str, Type[nn.Module]] = { "tanh": nn.Tanh, "relu": nn.ReLU, } VEC_NORMALIZE_FILENAME = "vecnormalize.pkl" MODEL_FILENAME = "model.pth" NORMALIZE_OBSERVATION_FILENAME = "norm_obs.npz" NORMALIZE_REWARD_FILENAME = "norm_reward.npz" PolicySelf = TypeVar("PolicySelf", bound="Policy") class Policy(nn.Module, ABC): @abstractmethod def __init__(self, env: VecEnv, **kwargs) -> None: super().__init__() self.env = env self.vec_normalize = unwrap_vec_normalize(env) self.norm_observation = find_wrapper(env, NormalizeObservation) self.norm_reward = find_wrapper(env, NormalizeReward) self.device = None def to( self: PolicySelf, device: Optional[torch.device] = None, dtype: Optional[Union[torch.dtype, str]] = None, non_blocking: bool = False, ) -> PolicySelf: super().to(device, dtype, non_blocking) self.device = device return self @abstractmethod def act( self, obs: VecEnvObs, deterministic: bool = True, action_masks: Optional[np.ndarray] = None, ) -> np.ndarray: ... def save(self, path: str) -> None: os.makedirs(path, exist_ok=True) if self.vec_normalize: self.vec_normalize.save(os.path.join(path, VEC_NORMALIZE_FILENAME)) if self.norm_observation: self.norm_observation.save( os.path.join(path, NORMALIZE_OBSERVATION_FILENAME) ) if self.norm_reward: self.norm_reward.save(os.path.join(path, NORMALIZE_REWARD_FILENAME)) torch.save( self.state_dict(), os.path.join(path, MODEL_FILENAME), ) def load(self, path: str) -> None: # VecNormalize load occurs in env.py self.load_state_dict( torch.load(os.path.join(path, MODEL_FILENAME), map_location=self.device) ) if self.norm_observation: self.norm_observation.load( os.path.join(path, NORMALIZE_OBSERVATION_FILENAME) ) if self.norm_reward: self.norm_reward.load(os.path.join(path, NORMALIZE_REWARD_FILENAME)) def load_from(self: PolicySelf, policy: PolicySelf) -> PolicySelf: self.load_state_dict(policy.state_dict()) if self.norm_observation: assert policy.norm_observation self.norm_observation.load_from(policy.norm_observation) if self.norm_reward: assert policy.norm_reward self.norm_reward.load_from(policy.norm_reward) return self def reset_noise(self) -> None: pass def _as_tensor(self, obs: VecEnvObs) -> torch.Tensor: assert isinstance(obs, np.ndarray) o = torch.as_tensor(obs) if self.device is not None: o = o.to(self.device) return o def num_trainable_parameters(self) -> int: return sum(p.numel() for p in self.parameters() if p.requires_grad) def num_parameters(self) -> int: return sum(p.numel() for p in self.parameters()) def sync_normalization(self, destination_env) -> None: current = destination_env while current != current.unwrapped: if isinstance(current, VecNormalize): assert self.vec_normalize current.ret_rms = deepcopy(self.vec_normalize.ret_rms) if hasattr(self.vec_normalize, "obs_rms"): current.obs_rms = deepcopy(self.vec_normalize.obs_rms) elif isinstance(current, NormalizeObservation): assert self.norm_observation current.rms = deepcopy(self.norm_observation.rms) elif isinstance(current, NormalizeReward): assert self.norm_reward current.rms = deepcopy(self.norm_reward.rms) current = getattr(current, "venv", getattr(current, "env", current)) if not current: raise AttributeError( f"{type(current)} doesn't include env or venv attribute" )
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/policy/policy.py
0.897339
0.273529
policy.py
pypi
from typing import Optional, Sequence, Tuple, Type import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from gym.spaces import MultiDiscrete, Space from rl_algo_impls.shared.actor import pi_forward from rl_algo_impls.shared.actor.gridnet import GridnetDistribution from rl_algo_impls.shared.actor.gridnet_decoder import Transpose from rl_algo_impls.shared.module.utils import layer_init from rl_algo_impls.shared.policy.actor_critic_network.network import ( ACNForward, ActorCriticNetwork, default_hidden_sizes, ) from rl_algo_impls.shared.policy.critic import CriticHead from rl_algo_impls.shared.policy.policy import ACTIVATION class UNetActorCriticNetwork(ActorCriticNetwork): def __init__( self, observation_space: Space, action_space: Space, action_plane_space: Space, v_hidden_sizes: Optional[Sequence[int]] = None, init_layers_orthogonal: bool = True, activation_fn: str = "tanh", cnn_layers_init_orthogonal: Optional[bool] = None, ) -> None: if cnn_layers_init_orthogonal is None: cnn_layers_init_orthogonal = True super().__init__() assert isinstance(action_space, MultiDiscrete) assert isinstance(action_plane_space, MultiDiscrete) self.range_size = np.max(observation_space.high) - np.min(observation_space.low) # type: ignore self.map_size = len(action_space.nvec) // len(action_plane_space.nvec) # type: ignore self.action_vec = action_plane_space.nvec # type: ignore activation = ACTIVATION[activation_fn] def conv_relu( in_channels: int, out_channels: int, kernel_size: int = 3, padding: int = 1 ) -> nn.Module: return nn.Sequential( layer_init( nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, padding=padding, ), cnn_layers_init_orthogonal, ), activation(), ) def up_conv_relu(in_channels: int, out_channels: int) -> nn.Module: return nn.Sequential( layer_init( nn.ConvTranspose2d( in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1, ), cnn_layers_init_orthogonal, ), activation(), ) in_channels = observation_space.shape[0] # type: ignore self.enc1 = conv_relu(in_channels, 32) self.enc2 = nn.Sequential(max_pool(), conv_relu(32, 64)) self.enc3 = nn.Sequential(max_pool(), conv_relu(64, 128)) self.enc4 = nn.Sequential(max_pool(), conv_relu(128, 256)) self.enc5 = nn.Sequential( max_pool(), conv_relu(256, 512, kernel_size=1, padding=0) ) self.dec4 = up_conv_relu(512, 256) self.dec3 = nn.Sequential(conv_relu(512, 256), up_conv_relu(256, 128)) self.dec2 = nn.Sequential(conv_relu(256, 128), up_conv_relu(128, 64)) self.dec1 = nn.Sequential(conv_relu(128, 64), up_conv_relu(64, 32)) self.out = nn.Sequential( conv_relu(64, 32), layer_init( nn.Conv2d(32, self.action_vec.sum(), kernel_size=1, padding=0), cnn_layers_init_orthogonal, std=0.01, ), Transpose((0, 2, 3, 1)), ) with torch.no_grad(): cnn_out = torch.flatten( F.adaptive_avg_pool2d( self.enc5( self.enc4( self.enc3( self.enc2( self.enc1( self._preprocess( torch.as_tensor(observation_space.sample()) ) ) ) ) ) ), output_size=1, ), start_dim=1, ) v_hidden_sizes = ( v_hidden_sizes if v_hidden_sizes is not None else default_hidden_sizes(observation_space) ) self.critic_head = CriticHead( in_dim=cnn_out.shape[1:], hidden_sizes=v_hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) def _preprocess(self, obs: torch.Tensor) -> torch.Tensor: if len(obs.shape) == 3: obs = obs.unsqueeze(0) return obs.float() / self.range_size def forward( self, obs: torch.Tensor, action: torch.Tensor, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: return self._distribution_and_value( obs, action=action, action_masks=action_masks ) def distribution_and_value( self, obs: torch.Tensor, action_masks: Optional[torch.Tensor] = None ) -> ACNForward: return self._distribution_and_value(obs, action_masks=action_masks) def _distribution_and_value( self, obs: torch.Tensor, action: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: assert ( action_masks is not None ), f"No mask case unhandled in {self.__class__.__name__}" obs = self._preprocess(obs) e1 = self.enc1(obs) e2 = self.enc2(e1) e3 = self.enc3(e2) e4 = self.enc4(e3) e5 = self.enc5(e4) v = self.critic_head(F.adaptive_avg_pool2d(e5, output_size=1)) d4 = self.dec4(e5) d3 = self.dec3(torch.cat((d4, e4), dim=1)) d2 = self.dec2(torch.cat((d3, e3), dim=1)) d1 = self.dec1(torch.cat((d2, e2), dim=1)) logits = self.out(torch.cat((d1, e1), dim=1)) pi = GridnetDistribution( int(np.prod(obs.shape[-2:])), self.action_vec, logits, action_masks ) return ACNForward(pi_forward(pi, action), v) def value(self, obs: torch.Tensor) -> torch.Tensor: obs = self._preprocess(obs) e1 = self.enc1(obs) e2 = self.enc2(e1) e3 = self.enc3(e2) e4 = self.enc4(e3) e5 = self.enc5(e4) return self.critic_head(F.adaptive_avg_pool2d(e5, output_size=1)) def reset_noise(self, batch_size: Optional[int] = None) -> None: pass @property def action_shape(self) -> Tuple[int, ...]: return (self.map_size, len(self.action_vec)) def max_pool() -> nn.MaxPool2d: return nn.MaxPool2d(3, stride=2, padding=1)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/policy/actor_critic_network/unet.py
0.955089
0.368463
unet.py
pypi
from typing import Optional, Sequence, Tuple import torch import torch.nn as nn from gym.spaces import Space from rl_algo_impls.shared.actor import actor_head from rl_algo_impls.shared.encoder import Encoder from rl_algo_impls.shared.policy.actor_critic_network.network import ( ACNForward, ActorCriticNetwork, default_hidden_sizes, ) from rl_algo_impls.shared.policy.critic import CriticHead from rl_algo_impls.shared.policy.policy import ACTIVATION class SeparateActorCriticNetwork(ActorCriticNetwork): def __init__( self, observation_space: Space, action_space: Space, pi_hidden_sizes: Optional[Sequence[int]] = None, v_hidden_sizes: Optional[Sequence[int]] = None, init_layers_orthogonal: bool = True, activation_fn: str = "tanh", log_std_init: float = -0.5, use_sde: bool = False, full_std: bool = True, squash_output: bool = False, cnn_flatten_dim: int = 512, cnn_style: str = "nature", cnn_layers_init_orthogonal: Optional[bool] = None, impala_channels: Sequence[int] = (16, 32, 32), actor_head_style: str = "single", action_plane_space: Optional[Space] = None, ) -> None: super().__init__() pi_hidden_sizes = ( pi_hidden_sizes if pi_hidden_sizes is not None else default_hidden_sizes(observation_space) ) v_hidden_sizes = ( v_hidden_sizes if v_hidden_sizes is not None else default_hidden_sizes(observation_space) ) activation = ACTIVATION[activation_fn] self._feature_extractor = Encoder( observation_space, activation, init_layers_orthogonal=init_layers_orthogonal, cnn_flatten_dim=cnn_flatten_dim, cnn_style=cnn_style, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, impala_channels=impala_channels, ) self._pi = actor_head( action_space, self._feature_extractor.out_dim, tuple(pi_hidden_sizes), init_layers_orthogonal, activation, log_std_init=log_std_init, use_sde=use_sde, full_std=full_std, squash_output=squash_output, actor_head_style=actor_head_style, action_plane_space=action_plane_space, ) v_encoder = Encoder( observation_space, activation, init_layers_orthogonal=init_layers_orthogonal, cnn_flatten_dim=cnn_flatten_dim, cnn_style=cnn_style, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, ) self._v = nn.Sequential( v_encoder, CriticHead( in_dim=v_encoder.out_dim, hidden_sizes=v_hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ), ) def forward( self, obs: torch.Tensor, action: torch.Tensor, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: return self._distribution_and_value( obs, action=action, action_masks=action_masks ) def distribution_and_value( self, obs: torch.Tensor, action_masks: Optional[torch.Tensor] = None ) -> ACNForward: return self._distribution_and_value(obs, action_masks=action_masks) def _distribution_and_value( self, obs: torch.Tensor, action: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: pi_forward = self._pi( self._feature_extractor(obs), actions=action, action_masks=action_masks ) v = self._v(obs) return ACNForward(pi_forward, v) def value(self, obs: torch.Tensor) -> torch.Tensor: return self._v(obs) def reset_noise(self, batch_size: int) -> None: self._pi.sample_weights(batch_size=batch_size) @property def action_shape(self) -> Tuple[int, ...]: return self._pi.action_shape
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/policy/actor_critic_network/separate_actor_critic.py
0.956513
0.266006
separate_actor_critic.py
pypi
from typing import Optional, Sequence, Tuple import torch from gym.spaces import Space from rl_algo_impls.shared.actor import actor_head from rl_algo_impls.shared.encoder import Encoder from rl_algo_impls.shared.policy.actor_critic_network.network import ( ACNForward, ActorCriticNetwork, default_hidden_sizes, ) from rl_algo_impls.shared.policy.critic import CriticHead from rl_algo_impls.shared.policy.policy import ACTIVATION class ConnectedTrioActorCriticNetwork(ActorCriticNetwork): """Encode (feature extractor), decoder (actor head), critic head networks""" def __init__( self, observation_space: Space, action_space: Space, pi_hidden_sizes: Optional[Sequence[int]] = None, v_hidden_sizes: Optional[Sequence[int]] = None, init_layers_orthogonal: bool = True, activation_fn: str = "tanh", log_std_init: float = -0.5, use_sde: bool = False, full_std: bool = True, squash_output: bool = False, cnn_flatten_dim: int = 512, cnn_style: str = "nature", cnn_layers_init_orthogonal: Optional[bool] = None, impala_channels: Sequence[int] = (16, 32, 32), actor_head_style: str = "single", action_plane_space: Optional[Space] = None, ) -> None: super().__init__() pi_hidden_sizes = ( pi_hidden_sizes if pi_hidden_sizes is not None else default_hidden_sizes(observation_space) ) v_hidden_sizes = ( v_hidden_sizes if v_hidden_sizes is not None else default_hidden_sizes(observation_space) ) activation = ACTIVATION[activation_fn] self._feature_extractor = Encoder( observation_space, activation, init_layers_orthogonal=init_layers_orthogonal, cnn_flatten_dim=cnn_flatten_dim, cnn_style=cnn_style, cnn_layers_init_orthogonal=cnn_layers_init_orthogonal, impala_channels=impala_channels, ) self._pi = actor_head( action_space, self._feature_extractor.out_dim, tuple(pi_hidden_sizes), init_layers_orthogonal, activation, log_std_init=log_std_init, use_sde=use_sde, full_std=full_std, squash_output=squash_output, actor_head_style=actor_head_style, action_plane_space=action_plane_space, ) self._v = CriticHead( in_dim=self._feature_extractor.out_dim, hidden_sizes=v_hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) def forward( self, obs: torch.Tensor, action: torch.Tensor, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: return self._distribution_and_value( obs, action=action, action_masks=action_masks ) def distribution_and_value( self, obs: torch.Tensor, action_masks: Optional[torch.Tensor] = None ) -> ACNForward: return self._distribution_and_value(obs, action_masks=action_masks) def _distribution_and_value( self, obs: torch.Tensor, action: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> ACNForward: encoded = self._feature_extractor(obs) pi_forward = self._pi(encoded, actions=action, action_masks=action_masks) v = self._v(encoded) return ACNForward(pi_forward, v) def value(self, obs: torch.Tensor) -> torch.Tensor: encoded = self._feature_extractor(obs) return self._v(encoded) def reset_noise(self, batch_size: int) -> None: self._pi.sample_weights(batch_size=batch_size) @property def action_shape(self) -> Tuple[int, ...]: return self._pi.action_shape
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/policy/actor_critic_network/connected_trio.py
0.955183
0.283465
connected_trio.py
pypi
from dataclasses import astuple from typing import Any, Dict, Optional, Tuple import numpy as np from luxai_s2.actions import move_deltas from rl_algo_impls.shared.lux.actions import FACTORY_ACTION_ENCODED_SIZE, pos_to_idx from rl_algo_impls.shared.lux.shared import ( LuxEnvConfig, LuxFactory, LuxGameState, LuxUnit, agent_id, factory_water_cost, move_power_cost, pos_to_numpy, ) def get_action_mask( player: str, state: LuxGameState, action_mask_shape: Tuple[int, int], enqueued_actions: Dict[str, Optional[np.ndarray]], move_masks: Dict[str, Any], move_validity_map: np.ndarray, ) -> np.ndarray: action_mask = np.full( action_mask_shape, False, dtype=np.bool_, ) config = state.env_cfg for f in state.factories[player].values(): action_mask[ pos_to_idx(f.pos, config.map_size), :FACTORY_ACTION_ENCODED_SIZE ] = np.array( [ is_build_light_valid(f, config), is_build_heavy_valid(f, config), is_water_action_valid(f, state, config), True, # Do nothing is always valid ] ) for u_id, u in state.units[player].items(): enqueued_action = enqueued_actions.get(u_id) move_mask = move_masks[u_id] transfer_direction_mask = valid_transfer_direction_mask( u, state, config, move_mask, move_validity_map, enqueued_action ) transfer_resource_mask = ( valid_transfer_resource_mask(u) if np.any(transfer_direction_mask) else np.zeros(5) ) pickup_resource_mask = valid_pickup_resource_mask(u, state, enqueued_action) valid_action_types = np.array( [ np.any(move_mask), np.any(transfer_direction_mask), np.any(pickup_resource_mask), is_dig_valid(u, state, enqueued_action), if_self_destruct_valid(u, state, enqueued_action), is_recharge_valid(u, enqueued_action), ] ) action_mask[ pos_to_idx(u.pos, config.map_size), FACTORY_ACTION_ENCODED_SIZE: ] = np.concatenate( [ valid_action_types, move_mask, transfer_direction_mask, transfer_resource_mask, pickup_resource_mask, ] ) return action_mask # Factory validity checks def is_build_light_valid(factory: LuxFactory, config: LuxEnvConfig) -> bool: LIGHT_ROBOT = config.ROBOTS["LIGHT"] return ( factory.cargo.metal >= LIGHT_ROBOT.METAL_COST and factory.power >= LIGHT_ROBOT.POWER_COST ) def is_build_heavy_valid(factory: LuxFactory, config: LuxEnvConfig) -> bool: HEAVY_ROBOT = config.ROBOTS["HEAVY"] return ( factory.cargo.metal >= HEAVY_ROBOT.METAL_COST and factory.power >= HEAVY_ROBOT.POWER_COST ) def is_water_action_valid( factory: LuxFactory, state: LuxGameState, config: LuxEnvConfig ) -> bool: water_cost = factory_water_cost(factory, state, config) return factory.cargo.water >= water_cost # Unit validity checks def agent_move_masks( state: LuxGameState, player: str, enqueued_actions: Dict[str, Optional[np.ndarray]] ) -> Dict[str, np.ndarray]: return { u_id: valid_move_mask(u, state, enqueued_actions.get(u_id)) for u_id, u in state.units[player].items() } def valid_destination_map( state: LuxGameState, player: str, agent_move_masks: Dict[str, np.ndarray] ) -> np.ndarray: map_size = state.env_cfg.map_size move_validity_map = np.zeros((map_size, map_size), dtype=np.int8) for u_id, valid_moves_mask in agent_move_masks.items(): u = state.units[player][u_id] pos = pos_to_numpy(u.pos) for direction_idx, move_delta in enumerate(move_deltas): if valid_moves_mask[direction_idx] or direction_idx == 0: move_validity_map[pos[0] + move_delta[0], pos[1] + move_delta[1]] += 1 return move_validity_map def is_position_in_map(pos: np.ndarray, config: LuxEnvConfig) -> bool: return bool(np.all(pos >= 0) and np.all(pos < config.map_size)) def valid_move_mask( unit: LuxUnit, state: LuxGameState, enqueued_action: Optional[np.ndarray], ) -> np.ndarray: config = state.env_cfg def is_valid_target(pos: np.ndarray, move_direction: int) -> bool: if not is_position_in_map(pos, config): return False factory_num_id = state.board.factory_occupancy_map[pos[0], pos[1]] if ( factory_num_id != -1 and f"factory_{factory_num_id}" not in state.factories[agent_id(unit)] ): return False rubble = int(state.board.rubble[pos[0], pos[1]]) power_cost = move_power_cost(unit, rubble) if ( enqueued_action is None or enqueued_action[0] != 0 or enqueued_action[1] != move_direction ): power_cost += unit.unit_cfg.ACTION_QUEUE_POWER_COST if unit.power < power_cost: return False return True return np.array( [False] + [ is_valid_target(pos_to_numpy(unit.pos) + move_delta, idx + 1) for idx, move_delta in enumerate(move_deltas[1:]) ] ) def valid_transfer_direction_mask( unit: LuxUnit, state: LuxGameState, config: LuxEnvConfig, move_mask: np.ndarray, move_validity_map: np.ndarray, enqueued_action: Optional[np.ndarray], ) -> np.ndarray: if ( enqueued_action is None or enqueued_action[0] != 1 ) and unit.power < unit.unit_cfg.ACTION_QUEUE_POWER_COST: return np.full(5, False) def is_valid_target(pos: np.ndarray, move_direction: int) -> bool: if ( enqueued_action is None or enqueued_action[2] != move_direction ) and unit.power < unit.unit_cfg.ACTION_QUEUE_POWER_COST: return False if not is_position_in_map(pos, config): return False factory_at_target = state.board.factory_occupancy_map[pos[0], pos[1]] if ( factory_at_target != -1 and f"factory_{factory_at_target}" in state.factories[agent_id(unit)] ): return True if move_direction == 0: # Center drop-off is just for factory return False return bool( move_validity_map[pos[0], pos[1]] - (1 if move_mask[move_direction] else 0) > 0 ) return np.array( [ is_valid_target(pos_to_numpy(unit.pos) + move_delta, idx) for idx, move_delta in enumerate(move_deltas) ] ) def valid_transfer_resource_mask(unit: LuxUnit) -> np.ndarray: return np.array(astuple(unit.cargo) + (unit.power,)) > 0 def valid_pickup_resource_mask( unit: LuxUnit, state: LuxGameState, enqueued_action: Optional[np.ndarray] ) -> np.ndarray: has_power_to_change = unit.power >= unit.unit_cfg.ACTION_QUEUE_POWER_COST if (enqueued_action is None or enqueued_action[0] != 2) and not has_power_to_change: return np.zeros(5) pos = pos_to_numpy(unit.pos) factory_id = state.board.factory_occupancy_map[pos[0], pos[1]] if factory_id == -1: return np.zeros(5) factory = state.factories[agent_id(unit)][f"factory_{factory_id}"] has_resource = np.array(astuple(factory.cargo) + (factory.power,)) > 0 has_capacity = np.concatenate( [ np.array(astuple(unit.cargo)) < unit.cargo_space, np.array([unit.power < unit.battery_capacity]), ] ) has_power = np.array( [ has_power_to_change or (enqueued_action is not None and enqueued_action[4] == idx) for idx in range(5) ] ) return has_resource * has_capacity * has_power def is_dig_valid( unit: LuxUnit, state: LuxGameState, enqueued_action: Optional[np.ndarray] ) -> bool: power_cost = unit.unit_cfg.DIG_COST if enqueued_action is None or enqueued_action[0] != 3: power_cost += unit.unit_cfg.ACTION_QUEUE_POWER_COST if unit.power < power_cost: return False pos = pos_to_numpy(unit.pos) if ( state.board.rubble[pos[0], pos[1]] or state.board.ice[pos[0], pos[1]] or state.board.ore[pos[0], pos[1]] ): return True lichen_strain = state.board.lichen_strains[pos[0], pos[1]] if ( lichen_strain != -1 and f"factory_{lichen_strain}" not in state.factories[agent_id(unit)] ): return True return False def if_self_destruct_valid( unit: LuxUnit, state: LuxGameState, enqueued_action: Optional[np.ndarray] ) -> bool: pos = pos_to_numpy(unit.pos) factory_id = state.board.factory_occupancy_map[pos[0], pos[1]] if factory_id != -1: return False power_cost = unit.unit_cfg.SELF_DESTRUCT_COST if enqueued_action is None or enqueued_action[0] != 4: power_cost += unit.unit_cfg.ACTION_QUEUE_POWER_COST if unit.power < power_cost: return False return True def is_recharge_valid(unit: LuxUnit, enqueued_action: Optional[np.ndarray]) -> bool: if ( enqueued_action is None or enqueued_action[0] != 5 ) and unit.power < unit.unit_cfg.ACTION_QUEUE_POWER_COST: return False return True
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/lux/action_mask.py
0.836521
0.339609
action_mask.py
pypi
import dataclasses from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np from luxai_s2.env import LuxAI_S2 from luxai_s2.unit import UnitType class AgentRunningStats: stats: np.ndarray NAMES = ( # Change in value stats "ice_generation", "ore_generation", "water_generation", "metal_generation", "power_generation", "lichen", "built_light", "built_heavy", "lost_factory", # Current value stats "factories_alive", "heavies_alive", "lights_alive", ) def __init__(self) -> None: self.stats = np.zeros(len(self.NAMES), dtype=np.int32) def update(self, env: LuxAI_S2, agent: str) -> np.ndarray: generation = env.state.stats[agent]["generation"] strain_ids = env.state.teams[agent].factory_strains agent_lichen_mask = np.isin(env.state.board.lichen_strains, strain_ids) lichen = env.state.board.lichen[agent_lichen_mask].sum() new_delta_stats = np.array( [ sum(generation["ice"].values()), sum(generation["ore"].values()), generation["water"], generation["metal"], sum(generation["power"].values()), lichen, generation["built"]["LIGHT"], generation["built"]["HEAVY"], env.state.stats[agent]["destroyed"]["FACTORY"], ] ) delta = new_delta_stats - self.stats[: len(new_delta_stats)] agent_units = env.state.units[agent] new_current_stats = np.array( [ len(env.state.factories[agent]), len([u for u in agent_units.values() if u.unit_type == UnitType.HEAVY]), len([u for u in agent_units.values() if u.unit_type == UnitType.LIGHT]), ] ) self.stats = np.concatenate((new_delta_stats, new_current_stats)) return np.concatenate((delta, new_current_stats)) @dataclass class ActionStats: ACTION_NAMES = ("move", "transfer", "pickup", "dig", "self_destruct", "recharge") action_type: np.ndarray = dataclasses.field( default_factory=lambda: np.zeros(6, dtype=np.int32) ) no_valid_action = 0 repeat_action = 0 def stats_dict(self, prefix: str) -> Dict[str, int]: _dict = { f"{prefix}{name}": cnt for name, cnt in zip(self.ACTION_NAMES, self.action_type.tolist()) } _dict[f"{prefix}no_valid"] = self.no_valid_action _dict[f"{prefix}repeat"] = self.repeat_action return _dict class StatsTracking: env: LuxAI_S2 agents: List[str] agent_stats: Tuple[AgentRunningStats, AgentRunningStats] action_stats: Tuple[ActionStats, ActionStats] def update(self) -> np.ndarray: per_agent_updates = np.stack( [ self.agent_stats[idx].update(self.env, agent) for idx, agent in enumerate(self.agents) ] ) lichen_idx = AgentRunningStats.NAMES.index("lichen") delta_vs_opponent = np.expand_dims( np.array( [ per_agent_updates[p_idx, lichen_idx] - per_agent_updates[o_idx, lichen_idx] for p_idx, o_idx in zip( range(len(self.agents)), reversed(range(len(self.agents))), ) ] ), axis=-1, ) return np.concatenate([per_agent_updates, delta_vs_opponent], axis=-1) def reset(self, env: LuxAI_S2) -> None: self.env = env self.agents = env.agents self.agent_stats = (AgentRunningStats(), AgentRunningStats()) self.action_stats = (ActionStats(), ActionStats()) self.update()
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/lux/stats.py
0.794225
0.391406
stats.py
pypi
from typing import Dict, NamedTuple, Optional, Tuple, Type import numpy as np from luxai_s2.factory import FactoryStateDict from luxai_s2.state import ObservationStateDict from luxai_s2.unit import UnitStateDict from rl_algo_impls.shared.lux.action_mask import ( agent_move_masks, get_action_mask, is_build_heavy_valid, is_build_light_valid, valid_destination_map, ) from rl_algo_impls.shared.lux.actions import UNIT_ACTION_ENCODED_SIZE, UNIT_ACTION_SIZES from rl_algo_impls.shared.lux.shared import LuxGameState, factory_water_cost ICE_FACTORY_MAX = 100_000 ORE_FACTORY_MAX = 50_000 WATER_FACTORY_MAX = 25_000 METAL_FACTORY_MAX = 10_000 POWER_FACTORY_MAX = 50_000 LICHEN_TILES_FACTORY_MAX = 128 LICHEN_FACTORY_MAX = 128_000 class ObservationAndActionMask(NamedTuple): observation: np.ndarray action_mask: np.ndarray def observation_and_action_mask( player: str, lux_obs: ObservationStateDict, state: LuxGameState, action_mask_shape: Tuple[int, int], enqueued_actions: Dict[str, Optional[np.ndarray]], ) -> ObservationAndActionMask: move_masks = agent_move_masks(state, player, enqueued_actions) move_validity_map = valid_destination_map(state, player, move_masks) action_mask = get_action_mask( player, state, action_mask_shape, enqueued_actions, move_masks, move_validity_map, ) observation = from_lux_observation( player, lux_obs, state, enqueued_actions, move_validity_map ) return ObservationAndActionMask(observation, action_mask) def from_lux_observation( player: str, lux_obs: ObservationStateDict, state: LuxGameState, enqueued_actions: Dict[str, Optional[np.ndarray]], move_validity_map: np.ndarray, ) -> np.ndarray: env_cfg = state.env_cfg map_size = env_cfg.map_size LIGHT_ROBOT = env_cfg.ROBOTS["LIGHT"] HEAVY_ROBOT = env_cfg.ROBOTS["HEAVY"] p1 = player p2 = [p for p in state.teams if p != player][0] x = np.transpose(np.tile(np.linspace(-1, 1, num=map_size), (map_size, 1))) y = np.tile(np.linspace(-1, 1, num=map_size), (map_size, 1)) ore = lux_obs["board"]["ore"] ice = lux_obs["board"]["ice"] _rubble = lux_obs["board"]["rubble"] non_zero_rubble = _rubble > 0 rubble = _rubble / env_cfg.MAX_RUBBLE _lichen = lux_obs["board"]["lichen"] non_zero_lichen = _lichen > 0 lichen = _lichen / env_cfg.MAX_LICHEN_PER_TILE spreadable_lichen = _lichen >= env_cfg.MIN_LICHEN_TO_SPREAD _lichen_strains = lux_obs["board"]["lichen_strains"] _own_lichen_strains = state.teams[p1].factory_strains _opponent_lichen_strains = state.teams[p2].factory_strains own_lichen = np.isin(_lichen_strains, _own_lichen_strains) opponent_lichen = np.isin(_lichen_strains, _opponent_lichen_strains) _lichen_counts = { k: v for k, v in zip(*np.unique(_lichen_strains, return_counts=True)) } def zeros(dtype: Type) -> np.ndarray: return np.zeros((map_size, map_size), dtype=dtype) factory = zeros(np.bool_) own_factory = zeros(np.bool_) opponent_factory = zeros(np.bool_) ice_factory = zeros(np.float32) water_factory = zeros(np.float32) ore_factory = zeros(np.float32) metal_factory = zeros(np.float32) power_factory = zeros(np.float32) can_build_light_robot = zeros(np.bool_) # Handled by invalid action mask? can_build_heavy_robot = zeros(np.bool_) # Handled by invalid action mask? can_water_lichen = zeros(np.bool_) # Handled by invalid action mask? day_survive_factory = zeros(np.float32) over_day_survive_factory = zeros(np.bool_) day_survive_water_factory = zeros(np.float32) connected_lichen_tiles = zeros(np.float32) connected_lichen = zeros(np.float32) def add_factory(f: FactoryStateDict, p_id: str, is_own: bool) -> None: f_state = state.factories[p_id][f["unit_id"]] x, y = f["pos"] factory[x, y] = True if is_own: own_factory[x, y] = True else: opponent_factory[x, y] = True _cargo = f["cargo"] _ice = _cargo["ice"] _water = _cargo["water"] _metal = _cargo["metal"] _power = f["power"] ice_factory[x, y] = _ice / ICE_FACTORY_MAX water_factory[x, y] = _water / WATER_FACTORY_MAX ore_factory[x, y] = _cargo["ore"] / ORE_FACTORY_MAX metal_factory[x, y] = _metal / METAL_FACTORY_MAX power_factory[x, y] = _power / POWER_FACTORY_MAX can_build_light_robot[x, y] = is_build_light_valid(f_state, env_cfg) can_build_heavy_robot[x, y] = is_build_heavy_valid(f_state, env_cfg) _water_lichen_cost = factory_water_cost(f_state, state, env_cfg) can_water_lichen[x, y] = _water > _water_lichen_cost _water_supply = _water + _ice / env_cfg.ICE_WATER_RATIO _day_water_consumption = ( env_cfg.FACTORY_WATER_CONSUMPTION * env_cfg.CYCLE_LENGTH ) day_survive_factory[x, y] = max(_water_supply / _day_water_consumption, 1) over_day_survive_factory[x, y] = _water_supply > _day_water_consumption day_survive_water_factory[x, y] = ( _water_supply - _water_lichen_cost > _day_water_consumption ) connected_lichen_tiles[x, y] = ( _lichen_counts.get(f["strain_id"], 0) / LICHEN_TILES_FACTORY_MAX ) connected_lichen[x, y] = ( np.sum(np.where(_lichen_strains == f["strain_id"], _lichen, 0)) / LICHEN_FACTORY_MAX ) for f in lux_obs["factories"][p1].values(): add_factory(f, p1, True) for f in lux_obs["factories"][p2].values(): add_factory(f, p2, False) is_factory_tile = state.board.factory_occupancy_map != -1 is_own_factory_tile = np.isin( state.board.factory_occupancy_map, _own_lichen_strains ) is_opponent_factory_tile = np.isin( state.board.factory_occupancy_map, _opponent_lichen_strains ) # cargo (fraction of heavy capacity), cargo capacity, exceeds light cap, full unit_cargo_init = lambda: np.zeros((map_size, map_size, 4), dtype=np.float32) unit = zeros(np.bool_) own_unit = zeros(np.bool_) opponent_unit = zeros(np.bool_) unit_is_heavy = zeros(np.bool_) ice_unit = unit_cargo_init() ore_unit = unit_cargo_init() water_unit = unit_cargo_init() metal_unit = unit_cargo_init() power_unit = unit_cargo_init() enqueued_action = np.full( (map_size, map_size, UNIT_ACTION_ENCODED_SIZE), False, dtype=np.bool_ ) def add_unit(u: UnitStateDict, is_own: bool) -> None: _u_id = u["unit_id"] x, y = u["pos"] unit[x, y] = True if is_own: own_unit[x, y] = True else: opponent_unit[x, y] = True _is_heavy = u["unit_type"] == "HEAVY" unit_is_heavy[x, y] = _is_heavy _cargo_space = HEAVY_ROBOT.CARGO_SPACE if _is_heavy else LIGHT_ROBOT.CARGO_SPACE def add_cargo(v: int) -> np.ndarray: return np.array( ( v / HEAVY_ROBOT.CARGO_SPACE, v / _cargo_space, v > LIGHT_ROBOT.CARGO_SPACE, v == _cargo_space, ) ) _cargo = u["cargo"] ice_unit[x, y] = add_cargo(_cargo["ice"]) ore_unit[x, y] = add_cargo(_cargo["ore"]) water_unit[x, y] = add_cargo(_cargo["water"]) metal_unit[x, y] = add_cargo(_cargo["metal"]) _power = u["power"] _h_bat_cap = HEAVY_ROBOT.BATTERY_CAPACITY _l_bat_cap = LIGHT_ROBOT.BATTERY_CAPACITY _bat_cap = _h_bat_cap if _is_heavy else _l_bat_cap power_unit[x, y] = np.array( ( _power / _h_bat_cap, _power / _bat_cap, _power > _l_bat_cap, _power == _bat_cap, ) ) _enqueued_action = enqueued_actions.get(_u_id) if _enqueued_action is not None: enqueued_action[x, y] = unit_action_to_obs(_enqueued_action) can_collide_with_friendly_unit = move_validity_map > 1 for u in lux_obs["units"][p1].values(): add_unit(u, True) for u in lux_obs["units"][p2].values(): add_unit(u, False) turn = ( np.ones((map_size, map_size), dtype=np.float32) * lux_obs["real_env_steps"] / env_cfg.max_episode_length ) _day_fraction = ( lux_obs["real_env_steps"] % env_cfg.CYCLE_LENGTH / env_cfg.CYCLE_LENGTH ) _day_remaining = 1 - _day_fraction * env_cfg.CYCLE_LENGTH / env_cfg.DAY_LENGTH day_cycle = np.ones((map_size, map_size), dtype=np.float32) * _day_remaining return np.concatenate( ( np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1), np.expand_dims(ore, axis=-1), np.expand_dims(ice, axis=-1), np.expand_dims(non_zero_rubble, axis=-1), np.expand_dims(rubble, axis=-1), np.expand_dims(non_zero_lichen, axis=-1), np.expand_dims(lichen, axis=-1), np.expand_dims(spreadable_lichen, axis=-1), np.expand_dims(own_lichen, axis=-1), np.expand_dims(opponent_lichen, axis=-1), np.expand_dims(factory, axis=-1), np.expand_dims(own_factory, axis=-1), np.expand_dims(opponent_factory, axis=-1), np.expand_dims(ice_factory, axis=-1), np.expand_dims(water_factory, axis=-1), np.expand_dims(ore_factory, axis=-1), np.expand_dims(metal_factory, axis=-1), np.expand_dims(power_factory, axis=-1), np.expand_dims(can_build_light_robot, axis=-1), np.expand_dims(can_build_heavy_robot, axis=-1), np.expand_dims(can_water_lichen, axis=-1), np.expand_dims(day_survive_factory, axis=-1), np.expand_dims(over_day_survive_factory, axis=-1), np.expand_dims(day_survive_water_factory, axis=-1), np.expand_dims(connected_lichen_tiles, axis=-1), np.expand_dims(connected_lichen, axis=-1), np.expand_dims(is_factory_tile, axis=-1), np.expand_dims(is_own_factory_tile, axis=-1), np.expand_dims(is_opponent_factory_tile, axis=-1), np.expand_dims(unit, axis=-1), np.expand_dims(own_unit, axis=-1), np.expand_dims(opponent_unit, axis=-1), np.expand_dims(unit_is_heavy, axis=-1), ice_unit, ore_unit, water_unit, metal_unit, power_unit, enqueued_action, np.expand_dims(can_collide_with_friendly_unit, axis=-1), np.expand_dims(turn, axis=-1), np.expand_dims(day_cycle, axis=-1), ), axis=-1, dtype=np.float32, ) def unit_action_to_obs(action: np.ndarray) -> np.ndarray: encoded = [np.zeros(sz, dtype=np.bool_) for sz in UNIT_ACTION_SIZES] for e, a in zip(encoded, action): if a < 0: continue e[a] = True return np.concatenate(encoded)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/lux/observation.py
0.787605
0.380701
observation.py
pypi
from typing import Any, Dict, List import numpy as np from luxai_s2.utils import my_turn_to_place_factory from rl_algo_impls.shared.lux.shared import LuxGameState, pos_to_numpy def bid_action(bid_std_dev: float, faction: str) -> Dict[str, Any]: return {"bid": int(np.random.normal(scale=5)), "faction": faction} def place_factory_action( state: LuxGameState, agents: List[str], player_idx: int ) -> Dict[str, Any]: env_cfg = state.env_cfg player_idx_to_place = int( my_turn_to_place_factory( state.teams[agents[0]].place_first, state.real_env_steps ) ) if player_idx_to_place != player_idx: return {} p1 = agents[player_idx_to_place] p2 = agents[(player_idx_to_place + 1) % 2] own_factories = np.array( [pos_to_numpy(f.pos) for f in state.factories[p1].values()] ) opp_factories = np.array( [pos_to_numpy(f.pos) for f in state.factories[p2].values()] ) water_left = state.teams[p1].init_water metal_left = state.teams[p1].init_metal potential_spawns = np.argwhere(state.board.valid_spawns_mask) ice_tile_locations = np.argwhere(state.board.ice) ore_tile_locations = np.argwhere(state.board.ore) if env_cfg.verbose > 2 and ( len(ice_tile_locations) == 0 or len(ore_tile_locations) == 0 ): print( f"Map missing ice ({len(ice_tile_locations)}) or ore ({len(ore_tile_locations)})" ) best_score = -1e6 best_loc = potential_spawns[0] _rubble = state.board.rubble d_rubble = 10 for loc in potential_spawns: ice_distances = np.linalg.norm(ice_tile_locations - loc, ord=1, axis=1) ore_distances = np.linalg.norm(ore_tile_locations - loc, ord=1, axis=1) closest_ice = np.min(ice_distances) if len(ice_distances) else 0 closest_ore = np.min(ore_distances) if len(ore_distances) else 0 min_loc = np.clip(loc - d_rubble, 0, env_cfg.map_size - 1) max_loc = np.clip(loc + d_rubble, 0, env_cfg.map_size - 1) _rubble_neighbors = _rubble[min_loc[0] : max_loc[0], min_loc[1] : max_loc[1]] density_rubble = np.mean(_rubble_neighbors) if len(own_factories): own_factory_distances = np.linalg.norm(own_factories - loc, ord=1, axis=1) closest_own_factory = np.min(own_factory_distances) else: closest_own_factory = 0 if len(opp_factories): opp_factory_distances = np.linalg.norm(opp_factories - loc, ord=1, axis=1) closest_opp_factory = np.min(opp_factory_distances) else: closest_opp_factory = 0 score = ( -10 * closest_ice - 0.01 * closest_ore - 10 * density_rubble / d_rubble + 0.01 * closest_opp_factory - 0.01 * closest_own_factory ) if score > best_score: best_score = score best_loc = loc return { "metal": min(env_cfg.INIT_WATER_METAL_PER_FACTORY, metal_left), "water": min(env_cfg.INIT_WATER_METAL_PER_FACTORY, water_left), "spawn": best_loc.tolist(), }
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/lux/early.py
0.672977
0.399929
early.py
pypi
import logging from dataclasses import astuple from typing import Any, Dict, List, Optional, Union import numpy as np from luxai_s2.actions import move_deltas from luxai_s2.map.position import Position from rl_algo_impls.shared.lux.shared import ( LuxEnvConfig, LuxGameState, LuxUnit, pos_to_numpy, ) from rl_algo_impls.shared.lux.stats import ActionStats FACTORY_ACTION_SIZES = ( 4, # build light robot, build heavy robot, water lichen, do nothing ) FACTORY_ACTION_ENCODED_SIZE = sum(FACTORY_ACTION_SIZES) FACTORY_DO_NOTHING_ACTION = 3 UNIT_ACTION_SIZES = ( 6, # action type 5, # move direction 5, # transfer direction 5, # transfer resource 5, # pickup resource ) UNIT_ACTION_ENCODED_SIZE = sum(UNIT_ACTION_SIZES) ACTION_SIZES = FACTORY_ACTION_SIZES + UNIT_ACTION_SIZES def pos_to_idx(pos: Union[Position, np.ndarray], map_size: int) -> int: pos = pos_to_numpy(pos) return pos[0] * map_size + pos[1] def to_lux_actions( p: str, state: LuxGameState, actions: np.ndarray, action_mask: np.ndarray, enqueued_actions: Dict[str, Optional[np.ndarray]], action_stats: ActionStats, ) -> Dict[str, Any]: cfg = state.env_cfg lux_actions = {} for f in state.factories[p].values(): a = actions[pos_to_idx(f.pos, cfg.map_size), 0] if a != FACTORY_DO_NOTHING_ACTION: lux_actions[f.unit_id] = a for u in state.units[p].values(): a = actions[pos_to_idx(u.pos, cfg.map_size), 1:] if no_valid_unit_actions(u, action_mask, cfg.map_size): if cfg.verbose > 1: logging.warn(f"No valid action for unit {u}") action_stats.no_valid_action += 1 continue action_stats.action_type[a[0]] += 1 if actions_equal(a, enqueued_actions.get(u.unit_id)): action_stats.repeat_action += 1 continue def resource_amount(unit: LuxUnit, idx: int) -> int: if idx == 4: return unit.power return astuple(unit.cargo)[idx] repeat = cfg.max_episode_length if a[0] == 0: # move direction = a[1] resource = 0 amount = 0 repeat = max_move_repeats(u, direction, cfg) elif a[0] == 1: # transfer direction = a[2] resource = a[3] amount = resource_amount( u, resource ) # TODO: This can lead to waste (especially for light robots) elif a[0] == 2: # pickup direction = 0 resource = a[4] capacity = u.cargo_space if resource < 4 else u.battery_capacity amount = capacity - resource_amount(u, resource) elif a[0] == 3: # dig direction = 0 resource = 0 amount = 0 elif a[0] == 4: # self-destruct direction = 0 resource = 0 amount = 0 elif a[0] == 5: # recharge direction = 0 resource = 0 amount = u.battery_capacity else: raise ValueError(f"Unrecognized action f{a[0]}") lux_actions[u.unit_id] = [ np.array([a[0], direction, resource, amount, 0, repeat]) ] return lux_actions def max_move_repeats(unit: LuxUnit, direction_idx: int, config: LuxEnvConfig) -> int: def steps_til_edge(p: int, delta: int) -> int: if delta < 0: return p else: return config.map_size - p - 1 move_delta = move_deltas[direction_idx] pos = pos_to_numpy(unit.pos) if move_delta[0]: return steps_til_edge(pos[0], move_delta[0]) else: return steps_til_edge(pos[1], move_delta[1]) def enqueued_action_from_obs(action_queue: List[np.ndarray]) -> Optional[np.ndarray]: if len(action_queue) == 0: return None action = action_queue[0] action_type = action[0] if action_type == 0: return np.array((action_type, action[1], -1, -1, -1)) elif action_type == 1: return np.array((action_type, -1, action[1], action[2], -1)) elif action_type == 2: return np.array((action_type, -1, -1, -1, action[2])) elif 3 <= action_type <= 5: return np.array((action_type, -1, -1, -1, -1)) else: raise ValueError(f"action_type {action_type} not supported: {action}") def actions_equal(action: np.ndarray, enqueued: Optional[np.ndarray]) -> bool: if enqueued is None: return False return bool(np.all(np.where(enqueued == -1, True, action == enqueued))) def no_valid_unit_actions( unit: LuxUnit, action_mask: np.ndarray, map_size: int ) -> bool: return not np.any( action_mask[ pos_to_idx(unit.pos, map_size), FACTORY_ACTION_ENCODED_SIZE : FACTORY_ACTION_ENCODED_SIZE + 6, ] )
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/lux/actions.py
0.630002
0.38168
actions.py
pypi
from dataclasses import astuple from typing import Optional import gym import numpy as np from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.runner.config import Config, EnvHyperparams from rl_algo_impls.wrappers.action_mask_wrapper import MicrortsMaskWrapper from rl_algo_impls.wrappers.episode_stats_writer import EpisodeStatsWriter from rl_algo_impls.wrappers.hwc_to_chw_observation import HwcToChwObservation from rl_algo_impls.wrappers.is_vector_env import IsVectorEnv from rl_algo_impls.wrappers.microrts_stats_recorder import MicrortsStatsRecorder from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv def make_microrts_env( config: Config, hparams: EnvHyperparams, training: bool = True, render: bool = False, normalize_load_path: Optional[str] = None, tb_writer: Optional[SummaryWriter] = None, ) -> VecEnv: import gym_microrts from gym_microrts import microrts_ai from rl_algo_impls.shared.vec_env.microrts_compat import ( MicroRTSGridModeSharedMemVecEnvCompat, MicroRTSGridModeVecEnvCompat, ) ( _, # env_type n_envs, _, # frame_stack make_kwargs, _, # no_reward_timeout_steps _, # no_reward_fire_steps _, # vec_env_class _, # normalize _, # normalize_kwargs, rolling_length, _, # train_record_video _, # video_step_interval _, # initial_steps_to_truncate _, # clip_atari_rewards _, # normalize_type _, # mask_actions bots, self_play_kwargs, selfplay_bots, ) = astuple(hparams) seed = config.seed(training=training) make_kwargs = make_kwargs or {} self_play_kwargs = self_play_kwargs or {} if "num_selfplay_envs" not in make_kwargs: make_kwargs["num_selfplay_envs"] = 0 if "num_bot_envs" not in make_kwargs: num_selfplay_envs = make_kwargs["num_selfplay_envs"] if num_selfplay_envs: num_bot_envs = ( n_envs - make_kwargs["num_selfplay_envs"] + self_play_kwargs.get("num_old_policies", 0) + (len(selfplay_bots) if selfplay_bots else 0) ) else: num_bot_envs = n_envs make_kwargs["num_bot_envs"] = num_bot_envs if "reward_weight" in make_kwargs: # Reward Weights: # WinLossRewardFunction # ResourceGatherRewardFunction # ProduceWorkerRewardFunction # ProduceBuildingRewardFunction # AttackRewardFunction # ProduceCombatUnitRewardFunction make_kwargs["reward_weight"] = np.array(make_kwargs["reward_weight"]) if bots: ai2s = [] for ai_name, n in bots.items(): for _ in range(n): if len(ai2s) >= make_kwargs["num_bot_envs"]: break ai = getattr(microrts_ai, ai_name) assert ai, f"{ai_name} not in microrts_ai" ai2s.append(ai) else: ai2s = [microrts_ai.randomAI for _ in range(make_kwargs["num_bot_envs"])] make_kwargs["ai2s"] = ai2s if len(make_kwargs.get("map_paths", [])) < 2: EnvClass = MicroRTSGridModeSharedMemVecEnvCompat else: EnvClass = MicroRTSGridModeVecEnvCompat envs = EnvClass(**make_kwargs) envs = HwcToChwObservation(envs) envs = IsVectorEnv(envs) envs = MicrortsMaskWrapper(envs) if self_play_kwargs: if selfplay_bots: self_play_kwargs["selfplay_bots"] = selfplay_bots envs = SelfPlayWrapper(envs, config, **self_play_kwargs) if seed is not None: envs.action_space.seed(seed) envs.observation_space.seed(seed) envs = gym.wrappers.RecordEpisodeStatistics(envs) envs = MicrortsStatsRecorder(envs, config.algo_hyperparams.get("gamma", 0.99), bots) if training: assert tb_writer envs = EpisodeStatsWriter( envs, tb_writer, training=training, rolling_length=rolling_length, additional_keys_to_log=config.additional_keys_to_log, ) return envs
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/microrts.py
0.792344
0.289833
microrts.py
pypi
from typing import Dict, List, Optional, TypeVar import gym import numpy as np from gym.vector.vector_env import VectorEnv from stable_baselines3.common.vec_env.base_vec_env import tile_images from rl_algo_impls.wrappers.lux_env_gridnet import LuxEnvGridnet, LuxRewardWeights from rl_algo_impls.wrappers.vectorable_wrapper import VecEnvObs, VecEnvStepReturn VecLuxEnvSelf = TypeVar("VecLuxEnvSelf", bound="VecLuxEnv") class VecLuxEnv(VectorEnv): def __init__( self, num_envs: int, bid_std_dev: float = 5, reward_weights: Optional[Dict[str, float]] = None, **kwargs, ) -> None: assert num_envs % 2 == 0, f"{num_envs} must be even" self.envs = [ LuxEnvGridnet( gym.make("LuxAI_S2-v0", collect_stats=True, **kwargs), bid_std_dev=bid_std_dev, reward_weights=reward_weights, ) for _ in range(num_envs // 2) ] single_env = self.envs[0] map_dim = single_env.unwrapped.env_cfg.map_size self.num_map_tiles = map_dim * map_dim single_observation_space = single_env.single_observation_space self.action_plane_space = single_env.action_plane_space single_action_space = single_env.single_action_space self.metadata = single_env.metadata super().__init__(num_envs, single_observation_space, single_action_space) def step(self, action: np.ndarray) -> VecEnvStepReturn: step_returns = [ env.step(action[2 * idx : 2 * idx + 2]) for idx, env in enumerate(self.envs) ] obs = np.concatenate([sr[0] for sr in step_returns]) rewards = np.concatenate([sr[1] for sr in step_returns]) dones = np.concatenate([sr[2] for sr in step_returns]) infos = [info for sr in step_returns for info in sr[3]] return obs, rewards, dones, infos def reset(self) -> VecEnvObs: env_obervations = [env.reset() for env in self.envs] return np.concatenate(env_obervations) def seed(self, seeds=None): # TODO: Seeds aren't supported in LuxAI_S2 pass def close_extras(self, **kwargs): for env in self.envs: env.close() @property def unwrapped(self: VecLuxEnvSelf) -> VecLuxEnvSelf: return self def render(self, mode="human", **kwargs): if self.num_envs == 1: return self.envs[0].render(mode=mode, **kwargs) if mode == "human": for env in self.envs: env.render(mode=mode, **kwargs) elif mode == "rgb_array": imgs = self.get_images() bigimg = tile_images(imgs) return bigimg def get_images(self) -> List[np.ndarray]: return [env.render(mode="rgb_array") for env in self.envs] def get_action_mask(self) -> np.ndarray: return np.concatenate([env.get_action_mask() for env in self.envs]) @property def reward_weights(self) -> LuxRewardWeights: return self.envs[0].reward_weights @reward_weights.setter def reward_weights(self, reward_weights: LuxRewardWeights) -> None: for env in self.envs: env.reward_weights = reward_weights
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/vec_lux_env.py
0.803482
0.495361
vec_lux_env.py
pypi
import os from dataclasses import astuple from typing import Callable, Optional import gym from gym.vector.async_vector_env import AsyncVectorEnv from gym.vector.sync_vector_env import SyncVectorEnv from gym.wrappers.frame_stack import FrameStack from gym.wrappers.gray_scale_observation import GrayScaleObservation from gym.wrappers.resize_observation import ResizeObservation from stable_baselines3.common.atari_wrappers import MaxAndSkipEnv, NoopResetEnv from stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv from stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv from stable_baselines3.common.vec_env.vec_normalize import VecNormalize from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.runner.config import Config, EnvHyperparams from rl_algo_impls.shared.policy.policy import VEC_NORMALIZE_FILENAME from rl_algo_impls.shared.vec_env.utils import ( import_for_env_id, is_atari, is_bullet_env, is_car_racing, is_gym_procgen, is_lux, is_microrts, ) from rl_algo_impls.wrappers.action_mask_wrapper import SingleActionMaskWrapper from rl_algo_impls.wrappers.atari_wrappers import ( ClipRewardEnv, EpisodicLifeEnv, FireOnLifeStarttEnv, ) from rl_algo_impls.wrappers.episode_record_video import EpisodeRecordVideo from rl_algo_impls.wrappers.episode_stats_writer import EpisodeStatsWriter from rl_algo_impls.wrappers.hwc_to_chw_observation import HwcToChwObservation from rl_algo_impls.wrappers.initial_step_truncate_wrapper import ( InitialStepTruncateWrapper, ) from rl_algo_impls.wrappers.is_vector_env import IsVectorEnv from rl_algo_impls.wrappers.lux_env_gridnet import LuxEnvGridnet from rl_algo_impls.wrappers.no_reward_timeout import NoRewardTimeout from rl_algo_impls.wrappers.noop_env_seed import NoopEnvSeed from rl_algo_impls.wrappers.normalize import NormalizeObservation, NormalizeReward from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper from rl_algo_impls.wrappers.sync_vector_env_render_compat import ( SyncVectorEnvRenderCompat, ) from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv from rl_algo_impls.wrappers.video_compat_wrapper import VideoCompatWrapper def make_vec_env( config: Config, hparams: EnvHyperparams, training: bool = True, render: bool = False, normalize_load_path: Optional[str] = None, tb_writer: Optional[SummaryWriter] = None, ) -> VecEnv: ( env_type, n_envs, frame_stack, make_kwargs, no_reward_timeout_steps, no_reward_fire_steps, vec_env_class, normalize, normalize_kwargs, rolling_length, train_record_video, video_step_interval, initial_steps_to_truncate, clip_atari_rewards, normalize_type, mask_actions, _, # bots self_play_kwargs, selfplay_bots, ) = astuple(hparams) import_for_env_id(config.env_id) seed = config.seed(training=training) make_kwargs = make_kwargs.copy() if make_kwargs is not None else {} if is_bullet_env(config) and render: make_kwargs["render"] = True if is_car_racing(config): make_kwargs["verbose"] = 0 if is_gym_procgen(config) and not render: make_kwargs["render_mode"] = "rgb_array" def make(idx: int) -> Callable[[], gym.Env]: def _make() -> gym.Env: env = gym.make(config.env_id, **make_kwargs) env = gym.wrappers.RecordEpisodeStatistics(env) env = VideoCompatWrapper(env) if training and train_record_video and idx == 0: env = EpisodeRecordVideo( env, config.video_prefix, step_increment=n_envs, video_step_interval=int(video_step_interval), ) if training and initial_steps_to_truncate: env = InitialStepTruncateWrapper( env, idx * initial_steps_to_truncate // n_envs ) if is_atari(config): # type: ignore env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env, training=training) action_meanings = env.unwrapped.get_action_meanings() if "FIRE" in action_meanings: # type: ignore env = FireOnLifeStarttEnv(env, action_meanings.index("FIRE")) if clip_atari_rewards: env = ClipRewardEnv(env, training=training) env = ResizeObservation(env, (84, 84)) env = GrayScaleObservation(env, keep_dim=False) env = FrameStack(env, frame_stack) elif is_car_racing(config): env = ResizeObservation(env, (64, 64)) env = GrayScaleObservation(env, keep_dim=False) env = FrameStack(env, frame_stack) elif is_gym_procgen(config): # env = GrayScaleObservation(env, keep_dim=False) env = NoopEnvSeed(env) env = HwcToChwObservation(env) if frame_stack > 1: env = FrameStack(env, frame_stack) elif is_microrts(config): env = HwcToChwObservation(env) if no_reward_timeout_steps: env = NoRewardTimeout( env, no_reward_timeout_steps, n_fire_steps=no_reward_fire_steps ) if seed is not None: env.seed(seed + idx) env.action_space.seed(seed + idx) env.observation_space.seed(seed + idx) return env return _make if env_type == "sb3vec": VecEnvClass = {"sync": DummyVecEnv, "async": SubprocVecEnv}[vec_env_class] elif env_type == "gymvec": VecEnvClass = {"sync": SyncVectorEnv, "async": AsyncVectorEnv}[vec_env_class] else: raise ValueError(f"env_type {env_type} unsupported") envs = VecEnvClass([make(i) for i in range(n_envs)]) if env_type == "gymvec" and vec_env_class == "sync": envs = SyncVectorEnvRenderCompat(envs) if env_type == "sb3vec": envs = IsVectorEnv(envs) if mask_actions: envs = SingleActionMaskWrapper(envs) if self_play_kwargs: if selfplay_bots: self_play_kwargs["selfplay_bots"] = selfplay_bots envs = SelfPlayWrapper(envs, config, **self_play_kwargs) if training: assert tb_writer envs = EpisodeStatsWriter( envs, tb_writer, training=training, rolling_length=rolling_length ) if normalize: if normalize_type is None: normalize_type = "sb3" if env_type == "sb3vec" else "gymlike" normalize_kwargs = normalize_kwargs or {} if normalize_type == "sb3": if normalize_load_path: envs = VecNormalize.load( os.path.join(normalize_load_path, VEC_NORMALIZE_FILENAME), envs, # type: ignore ) else: envs = VecNormalize( envs, # type: ignore training=training, **normalize_kwargs, ) if not training: envs.norm_reward = False elif normalize_type == "gymlike": if normalize_kwargs.get("norm_obs", True): envs = NormalizeObservation( envs, training=training, clip=normalize_kwargs.get("clip_obs", 10.0) ) if training and normalize_kwargs.get("norm_reward", True): envs = NormalizeReward( envs, training=training, clip=normalize_kwargs.get("clip_reward", 10.0), ) else: raise ValueError( f"normalize_type {normalize_type} not supported (sb3 or gymlike)" ) return envs
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/vec_env.py
0.739422
0.356783
vec_env.py
pypi
from dataclasses import astuple from typing import Callable, Dict, Optional import gym from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.runner.config import Config, EnvHyperparams from rl_algo_impls.shared.vec_env.lux_async_vector_env import LuxAsyncVectorEnv from rl_algo_impls.shared.vec_env.vec_lux_env import VecLuxEnv from rl_algo_impls.wrappers.episode_stats_writer import EpisodeStatsWriter from rl_algo_impls.wrappers.hwc_to_chw_observation import HwcToChwObservation from rl_algo_impls.wrappers.lux_env_gridnet import LuxEnvGridnet from rl_algo_impls.wrappers.self_play_eval_wrapper import SelfPlayEvalWrapper from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv def make_lux_env( config: Config, hparams: EnvHyperparams, training: bool = True, render: bool = False, normalize_load_path: Optional[str] = None, tb_writer: Optional[SummaryWriter] = None, ) -> VecEnv: ( _, # env_type, n_envs, _, # frame_stack make_kwargs, _, # no_reward_timeout_steps _, # no_reward_fire_steps vec_env_class, _, # normalize _, # normalize_kwargs, rolling_length, _, # train_record_video _, # video_step_interval _, # initial_steps_to_truncate _, # clip_atari_rewards _, # normalize_type _, # mask_actions _, # bots self_play_kwargs, selfplay_bots, ) = astuple(hparams) seed = config.seed(training=training) make_kwargs = make_kwargs or {} self_play_kwargs = self_play_kwargs or {} num_envs = ( n_envs + self_play_kwargs.get("num_old_policies", 0) + len(selfplay_bots or []) ) if num_envs == 1 and not training: # Workaround for supporting the video env num_envs = 2 def make(idx: int) -> Callable[[], gym.Env]: def _make() -> gym.Env: def _gridnet( bid_std_dev=5, reward_weights: Optional[Dict[str, float]] = None, **kwargs, ) -> LuxEnvGridnet: return LuxEnvGridnet( gym.make("LuxAI_S2-v0", collect_stats=True, **kwargs), bid_std_dev=bid_std_dev, reward_weights=reward_weights, ) return _gridnet(**make_kwargs) return _make if vec_env_class == "sync": envs = VecLuxEnv(num_envs, **make_kwargs) else: envs = LuxAsyncVectorEnv([make(i) for i in range(n_envs)], copy=False) envs = HwcToChwObservation(envs) if self_play_kwargs: if not training and self_play_kwargs.get("eval_use_training_cache", False): envs = SelfPlayEvalWrapper(envs) else: if selfplay_bots: self_play_kwargs["selfplay_bots"] = selfplay_bots envs = SelfPlayWrapper(envs, config, **self_play_kwargs) if seed is not None: envs.action_space.seed(seed) envs.observation_space.seed(seed) envs = gym.wrappers.RecordEpisodeStatistics(envs) if training: assert tb_writer envs = EpisodeStatsWriter( envs, tb_writer, training=training, rolling_length=rolling_length, additional_keys_to_log=config.additional_keys_to_log, ) return envs
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/lux.py
0.859015
0.295538
lux.py
pypi
from dataclasses import astuple from typing import Optional import gym import numpy as np from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.runner.config import Config, EnvHyperparams from rl_algo_impls.wrappers.episode_stats_writer import EpisodeStatsWriter from rl_algo_impls.wrappers.hwc_to_chw_observation import HwcToChwObservation from rl_algo_impls.wrappers.is_vector_env import IsVectorEnv from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv def make_procgen_env( config: Config, hparams: EnvHyperparams, training: bool = True, render: bool = False, normalize_load_path: Optional[str] = None, tb_writer: Optional[SummaryWriter] = None, ) -> VecEnv: from gym3 import ExtractDictObWrapper, ViewerWrapper from procgen.env import ProcgenGym3Env, ToBaselinesVecEnv ( _, # env_type n_envs, _, # frame_stack make_kwargs, _, # no_reward_timeout_steps _, # no_reward_fire_steps _, # vec_env_class normalize, normalize_kwargs, rolling_length, _, # train_record_video _, # video_step_interval _, # initial_steps_to_truncate _, # clip_atari_rewards _, # normalize_type _, # mask_actions _, # bots _, # self_play_kwargs _, # selfplay_bots ) = astuple(hparams) seed = config.seed(training=training) make_kwargs = make_kwargs or {} make_kwargs["render_mode"] = "rgb_array" if seed is not None: make_kwargs["rand_seed"] = seed envs = ProcgenGym3Env(n_envs, config.env_id, **make_kwargs) envs = ExtractDictObWrapper(envs, key="rgb") if render: envs = ViewerWrapper(envs, info_key="rgb") envs = ToBaselinesVecEnv(envs) envs = IsVectorEnv(envs) # TODO: Handle Grayscale and/or FrameStack envs = HwcToChwObservation(envs) envs = gym.wrappers.RecordEpisodeStatistics(envs) if seed is not None: envs.action_space.seed(seed) envs.observation_space.seed(seed) if training: assert tb_writer envs = EpisodeStatsWriter( envs, tb_writer, training=training, rolling_length=rolling_length ) if normalize and training: normalize_kwargs = normalize_kwargs or {} envs = gym.wrappers.NormalizeReward(envs) clip_obs = normalize_kwargs.get("clip_reward", 10.0) envs = gym.wrappers.TransformReward( envs, lambda r: np.clip(r, -clip_obs, clip_obs) ) return envs # type: ignore
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/procgen.py
0.732305
0.359561
procgen.py
pypi
import multiprocessing as mp import sys import time from copy import deepcopy from ctypes import c_bool from enum import Enum from typing import Any, List, Optional, Union import numpy as np from gym import logger from gym.error import ( AlreadyPendingCallError, ClosedEnvironmentError, CustomSpaceError, NoAsyncCallError, ) from gym.vector.utils import ( CloudpickleWrapper, clear_mpi_env_vars, concatenate, create_empty_array, create_shared_memory, read_from_shared_memory, write_to_shared_memory, ) from gym.vector.vector_env import VectorEnv from rl_algo_impls.wrappers.lux_env_gridnet import LuxEnvGridnet, LuxRewardWeights __all__ = ["LuxAsyncVectorEnv"] class AsyncState(Enum): DEFAULT = "default" WAITING_RESET = "reset" WAITING_STEP = "step" WAITING_CALL = "call" class LuxAsyncVectorEnv(VectorEnv): """Vectorized environment that runs multiple environments in parallel. It uses `multiprocessing` processes, and pipes for communication. Parameters ---------- env_fns : iterable of callable Functions that create the environments. observation_space : `gym.spaces.Space` instance, optional Observation space of a single environment. If `None`, then the observation space of the first environment is taken. action_space : `gym.spaces.Space` instance, optional Action space of a single environment. If `None`, then the action space of the first environment is taken. shared_memory : bool (default: `True`) If `True`, then the observations from the worker processes are communicated back through shared variables. This can improve the efficiency if the observations are large (e.g. images). copy : bool (default: `True`) If `True`, then the `reset` and `step` methods return a copy of the observations. context : str, optional Context for multiprocessing. If `None`, then the default context is used. Only available in Python 3. daemon : bool (default: `True`) If `True`, then subprocesses have `daemon` flag turned on; that is, they will quit if the head process quits. However, `daemon=True` prevents subprocesses to spawn children, so for some environments you may want to have it set to `False` worker : function, optional WARNING - advanced mode option! If set, then use that worker in a subprocess instead of a default one. Can be useful to override some inner vector env logic, for instance, how resets on done are handled. Provides high degree of flexibility and a high chance to shoot yourself in the foot; thus, if you are writing your own worker, it is recommended to start from the code for `_worker` (or `_worker_shared_memory`) method below, and add changes """ def __init__( self, env_fns, observation_space=None, action_space=None, shared_memory=True, copy=True, context=None, daemon=True, worker=None, ): ctx = mp.get_context(context) self.env_fns = env_fns self.shared_memory = shared_memory self.copy = copy dummy_env = env_fns[0]() assert isinstance(dummy_env, LuxEnvGridnet) self.metadata = dummy_env.metadata if (observation_space is None) or (action_space is None): observation_space = observation_space or dummy_env.single_observation_space action_space = action_space or dummy_env.single_action_space self._reward_weights = dummy_env.reward_weights super(LuxAsyncVectorEnv, self).__init__( num_envs=len(env_fns) * 2, observation_space=observation_space, action_space=action_space, ) self.action_plane_space = dummy_env.action_plane_space if self.shared_memory: try: _obs_buffer = ctx.Array( self.single_observation_space.dtype.char, self.num_envs * int(np.prod(self.single_observation_space.shape)), ) self.observations = np.frombuffer( _obs_buffer.get_obj(), dtype=self.single_observation_space.dtype ).reshape((self.num_envs,) + self.single_observation_space.shape) _action_masks_buffer = ctx.Array( c_bool, self.num_envs * int(np.prod(dummy_env.action_mask_shape)), ) self.action_masks = np.frombuffer( _action_masks_buffer.get_obj(), dtype=np.bool_ ).reshape((self.num_envs,) + dummy_env.action_mask_shape) except CustomSpaceError: raise ValueError( "Using `shared_memory=True` in `AsyncVectorEnv` " "is incompatible with non-standard Gym observation spaces " "(i.e. custom spaces inheriting from `gym.Space`), and is " "only compatible with default Gym spaces (e.g. `Box`, " "`Tuple`, `Dict`) for batching. Set `shared_memory=False` " "if you use custom observation spaces." ) else: _obs_buffer = None self.observations = create_empty_array( self.single_observation_space, n=self.num_envs, fn=np.zeros ) self.observations = np.zeros( (self.num_envs,) + self.single_observation_space.shape, dtype=self.single_observation_space.dtype, ) _action_masks_buffer = None self.action_masks = np.full( (self.num_envs,) + dummy_env.action_mask_shape, False, dtype=np.bool_, ) dummy_env.close() del dummy_env self.parent_pipes, self.processes = [], [] self.error_queue = ctx.Queue() target = _worker_shared_memory if self.shared_memory else _worker target = worker or target with clear_mpi_env_vars(): for idx, env_fn in enumerate(self.env_fns): parent_pipe, child_pipe = ctx.Pipe() process = ctx.Process( target=target, name="Worker<{0}>-{1}".format(type(self).__name__, idx), args=( idx, CloudpickleWrapper(env_fn), child_pipe, parent_pipe, _obs_buffer, self.error_queue, _action_masks_buffer, ), ) self.parent_pipes.append(parent_pipe) self.processes.append(process) process.daemon = daemon process.start() child_pipe.close() self._state = AsyncState.DEFAULT self._check_observation_spaces() def seed(self, seeds=None): self._assert_is_running() if seeds is None: seeds = [None for _ in range(self.num_envs)] if isinstance(seeds, int): seeds = [seeds + i for i in range(self.num_envs)] assert len(seeds) == self.num_envs if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( "Calling `seed` while waiting " "for a pending call to `{0}` to complete.".format(self._state.value), self._state.value, ) for pipe, seed in zip(self.parent_pipes, seeds): pipe.send(("seed", seed)) _, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) def reset_async(self): self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( "Calling `reset_async` while waiting " "for a pending call to `{0}` to complete".format(self._state.value), self._state.value, ) for pipe in self.parent_pipes: pipe.send(("reset", None)) self._state = AsyncState.WAITING_RESET def reset_wait(self, timeout=None): """ Parameters ---------- timeout : int or float, optional Number of seconds before the call to `reset_wait` times out. If `None`, the call to `reset_wait` never times out. Returns ------- observations : sample from `observation_space` A batch of observations from the vectorized environment. """ self._assert_is_running() if self._state != AsyncState.WAITING_RESET: raise NoAsyncCallError( "Calling `reset_wait` without any prior " "call to `reset_async`.", AsyncState.WAITING_RESET.value, ) if not self._poll(timeout): self._state = AsyncState.DEFAULT raise mp.TimeoutError( "The call to `reset_wait` has timed out after " "{0} second{1}.".format(timeout, "s" if timeout > 1 else "") ) results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) self._state = AsyncState.DEFAULT if not self.shared_memory: obs, action_masks = zip(*results) self.observations = np.concatenate(obs) self.action_masks = np.concatenate(action_masks) return deepcopy(self.observations) if self.copy else self.observations def step_async(self, actions): """ Parameters ---------- actions : iterable of samples from `action_space` List of actions. """ self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( "Calling `step_async` while waiting " "for a pending call to `{0}` to complete.".format(self._state.value), self._state.value, ) paired_actions = np.array(np.split(actions, len(actions) // 2, axis=0)) for pipe, action in zip(self.parent_pipes, paired_actions): pipe.send(("step", action)) self._state = AsyncState.WAITING_STEP def step_wait(self, timeout=None): """ Parameters ---------- timeout : int or float, optional Number of seconds before the call to `step_wait` times out. If `None`, the call to `step_wait` never times out. Returns ------- observations : sample from `observation_space` A batch of observations from the vectorized environment. rewards : `np.ndarray` instance (dtype `np.float_`) A vector of rewards from the vectorized environment. dones : `np.ndarray` instance (dtype `np.bool_`) A vector whose entries indicate whether the episode has ended. infos : list of dict A list of auxiliary diagnostic information. """ self._assert_is_running() if self._state != AsyncState.WAITING_STEP: raise NoAsyncCallError( "Calling `step_wait` without any prior call " "to `step_async`.", AsyncState.WAITING_STEP.value, ) if not self._poll(timeout): self._state = AsyncState.DEFAULT raise mp.TimeoutError( "The call to `step_wait` has timed out after " "{0} second{1}.".format(timeout, "s" if timeout > 1 else "") ) results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) self._state = AsyncState.DEFAULT observations_list, rewards, dones, infos, action_masks = zip(*results) if not self.shared_memory: self.observations = np.concatenate(observations_list) self.action_masks = np.concatenate(action_masks) return ( deepcopy(self.observations) if self.copy else self.observations, np.concatenate(rewards), np.concatenate(dones, dtype=np.bool_), [info for pair in infos for info in pair], ) def call_async(self, name: str, *args, **kwargs): """Calls the method with name asynchronously and apply args and kwargs to the method. Args: name: Name of the method or property to call. *args: Arguments to apply to the method call. **kwargs: Keyword arguments to apply to the method call. Raises: ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called). AlreadyPendingCallError: Calling `call_async` while waiting for a pending call to complete """ self._assert_is_running() if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( "Calling `call_async` while waiting " f"for a pending call to `{self._state.value}` to complete.", self._state.value, ) for pipe in self.parent_pipes: pipe.send(("_call", (name, args, kwargs))) self._state = AsyncState.WAITING_CALL def call_wait(self, timeout: Optional[Union[int, float]] = None) -> list: """Calls all parent pipes and waits for the results. Args: timeout: Number of seconds before the call to `step_wait` times out. If `None` (default), the call to `step_wait` never times out. Returns: List of the results of the individual calls to the method or property for each environment. Raises: NoAsyncCallError: Calling `call_wait` without any prior call to `call_async`. TimeoutError: The call to `call_wait` has timed out after timeout second(s). """ self._assert_is_running() if self._state != AsyncState.WAITING_CALL: raise NoAsyncCallError( "Calling `call_wait` without any prior call to `call_async`.", AsyncState.WAITING_CALL.value, ) if not self._poll(timeout): self._state = AsyncState.DEFAULT raise mp.TimeoutError( f"The call to `call_wait` has timed out after {timeout} second(s)." ) results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) self._state = AsyncState.DEFAULT return results def call(self, name: str, *args, **kwargs) -> List[Any]: self.call_async(name, *args, **kwargs) return self.call_wait() def get_attr(self, name: str): return self.call(name) def set_attr(self, name: str, values: Union[list, tuple, object]): """Sets an attribute of the sub-environments. Args: name: Name of the property to be set in each individual environment. values: Values of the property to be set to. If ``values`` is a list or tuple, then it corresponds to the values for each individual environment, otherwise a single value is set for all environments. Raises: ValueError: Values must be a list or tuple with length equal to the number of environments. AlreadyPendingCallError: Calling `set_attr` while waiting for a pending call to complete. """ self._assert_is_running() if not isinstance(values, (list, tuple)): values = [values for _ in range(self.num_envs)] if len(values) != self.num_envs: raise ValueError( "Values must be a list or tuple with length equal to the " f"number of environments. Got `{len(values)}` values for " f"{self.num_envs} environments." ) if self._state != AsyncState.DEFAULT: raise AlreadyPendingCallError( "Calling `set_attr` while waiting " f"for a pending call to `{self._state.value}` to complete.", self._state.value, ) for pipe, value in zip(self.parent_pipes, values): pipe.send(("_setattr", (name, value))) _, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) @property def reward_weights(self) -> LuxRewardWeights: assert self._reward_weights is not None return self._reward_weights @reward_weights.setter def reward_weights(self, reward_weights: LuxRewardWeights) -> None: self._reward_weights = reward_weights self.set_attr("reward_weights", reward_weights) def get_action_mask(self) -> np.ndarray: return self.action_masks def close_extras(self, timeout=None, terminate=False): """ Parameters ---------- timeout : int or float, optional Number of seconds before the call to `close` times out. If `None`, the call to `close` never times out. If the call to `close` times out, then all processes are terminated. terminate : bool (default: `False`) If `True`, then the `close` operation is forced and all processes are terminated. """ timeout = 0 if terminate else timeout try: if self._state != AsyncState.DEFAULT: logger.warn( "Calling `close` while waiting for a pending " "call to `{0}` to complete.".format(self._state.value) ) function = getattr(self, "{0}_wait".format(self._state.value)) function(timeout) except mp.TimeoutError: terminate = True if terminate: for process in self.processes: if process.is_alive(): process.terminate() else: for pipe in self.parent_pipes: if (pipe is not None) and (not pipe.closed): pipe.send(("close", None)) for pipe in self.parent_pipes: if (pipe is not None) and (not pipe.closed): pipe.recv() for pipe in self.parent_pipes: if pipe is not None: pipe.close() for process in self.processes: process.join() def _poll(self, timeout=None): self._assert_is_running() if timeout is None: return True end_time = time.perf_counter() + timeout delta = None for pipe in self.parent_pipes: delta = max(end_time - time.perf_counter(), 0) if pipe is None: return False if pipe.closed or (not pipe.poll(delta)): return False return True def _check_observation_spaces(self): self._assert_is_running() for pipe in self.parent_pipes: pipe.send(("_check_observation_space", self.single_observation_space)) same_spaces, successes = zip(*[pipe.recv() for pipe in self.parent_pipes]) self._raise_if_errors(successes) if not all(same_spaces): raise RuntimeError( "Some environments have an observation space " "different from `{0}`. In order to batch observations, the " "observation spaces from all environments must be " "equal.".format(self.single_observation_space) ) def _assert_is_running(self): if self.closed: raise ClosedEnvironmentError( "Trying to operate on `{0}`, after a " "call to `close()`.".format(type(self).__name__) ) def _raise_if_errors(self, successes): if all(successes): return num_errors = self.num_envs - sum(successes) assert num_errors > 0 for _ in range(num_errors): index, exctype, value = self.error_queue.get() logger.error( "Received the following error from Worker-{0}: " "{1}: {2}".format(index, exctype.__name__, value) ) logger.error("Shutting down Worker-{0}.".format(index)) self.parent_pipes[index].close() self.parent_pipes[index] = None logger.error("Raising the last exception back to the main process.") raise exctype(value) def _worker( index, env_fn, pipe, parent_pipe, shared_memory, error_queue, action_masks_buffer ): assert shared_memory is None assert action_masks_buffer is None env = env_fn() parent_pipe.close() try: while True: command, data = pipe.recv() if command == "reset": observation = env.reset() action_mask = env.get_action_mask() pipe.send(((observation, action_mask), True)) elif command == "step": observation, reward, done, info = env.step(data) if all(done): observation = env.reset() action_mask = env.get_action_mask() pipe.send(((observation, reward, done, info, action_mask), True)) elif command == "seed": env.seed(data) pipe.send((None, True)) elif command == "close": pipe.send((None, True)) break elif command == "_check_observation_space": pipe.send((data == env.single_observation_space, True)) elif command == "_call": name, args, kwargs = data if name in ["reset", "step", "seed", "close"]: raise ValueError( f"Trying to call function `{name}` with " f"`_call`. Use `{name}` directly instead." ) function = getattr(env, name) if callable(function): pipe.send((function(*args, **kwargs), True)) else: pipe.send((function, True)) elif command == "_setattr": name, value = data setattr(env, name, value) pipe.send((None, True)) else: raise RuntimeError( "Received unknown command `{0}`. Must " "be one of {`reset`, `step`, `seed`, `close`, " "`_check_observation_space`}.".format(command) ) except (KeyboardInterrupt, Exception): error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: env.close() def np_array_to_shared_memory(index: int, shared_memory, np_array: np.ndarray) -> None: size = int(np.prod(np_array.shape)) destination = np.frombuffer(shared_memory.get_obj(), dtype=np_array.dtype) np.copyto( destination[index * size : (index + 1) * size], np.asarray(np_array, dtype=np_array.dtype).flatten(), ) def _worker_shared_memory( index, env_fn, pipe, parent_pipe, shared_memory, error_queue, action_masks_buffer ): assert shared_memory is not None assert action_masks_buffer is not None env = env_fn() parent_pipe.close() try: while True: command, data = pipe.recv() if command == "reset": observation = env.reset() np_array_to_shared_memory(index, shared_memory, observation) action_mask = env.get_action_mask() np_array_to_shared_memory(index, action_masks_buffer, action_mask) pipe.send(((None, None), True)) elif command == "step": observation, reward, done, info = env.step(data) if all(done): observation = env.reset() np_array_to_shared_memory(index, shared_memory, observation) action_mask = env.get_action_mask() np_array_to_shared_memory(index, action_masks_buffer, action_mask) pipe.send(((None, reward, done, info, None), True)) elif command == "seed": env.seed(data) pipe.send((None, True)) elif command == "close": pipe.send((None, True)) break elif command == "_call": name, args, kwargs = data if name in ["reset", "step", "seed", "close"]: raise ValueError( f"Trying to call function `{name}` with " f"`_call`. Use `{name}` directly instead." ) function = getattr(env, name) if callable(function): pipe.send((function(*args, **kwargs), True)) else: pipe.send((function, True)) elif command == "_setattr": name, value = data setattr(env, name, value) pipe.send((None, True)) elif command == "_check_observation_space": pipe.send((data == env.single_observation_space, True)) else: raise RuntimeError( "Received unknown command `{0}`. Must " "be one of {`reset`, `step`, `seed`, `close`, " "`_check_observation_space`}.".format(command) ) except (KeyboardInterrupt, Exception): error_queue.put((index,) + sys.exc_info()[:2]) pipe.send((None, False)) finally: env.close()
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/lux_async_vector_env.py
0.774541
0.404566
lux_async_vector_env.py
pypi
from dataclasses import asdict from typing import Any, Dict, Optional from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.runner.config import Config, EnvHyperparams from rl_algo_impls.shared.vec_env.lux import make_lux_env from rl_algo_impls.shared.vec_env.microrts import make_microrts_env from rl_algo_impls.shared.vec_env.procgen import make_procgen_env from rl_algo_impls.shared.vec_env.vec_env import make_vec_env from rl_algo_impls.wrappers.self_play_eval_wrapper import SelfPlayEvalWrapper from rl_algo_impls.wrappers.self_play_wrapper import SelfPlayWrapper from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv, find_wrapper def make_env( config: Config, hparams: EnvHyperparams, training: bool = True, render: bool = False, normalize_load_path: Optional[str] = None, tb_writer: Optional[SummaryWriter] = None, ) -> VecEnv: if hparams.env_type == "procgen": make_env_fn = make_procgen_env elif hparams.env_type in {"sb3vec", "gymvec"}: make_env_fn = make_vec_env elif hparams.env_type == "microrts": make_env_fn = make_microrts_env elif hparams.env_type == "lux": make_env_fn = make_lux_env else: raise ValueError(f"env_type {hparams.env_type} not supported") return make_env_fn( config, hparams, training=training, render=render, normalize_load_path=normalize_load_path, tb_writer=tb_writer, ) def make_eval_env( config: Config, hparams: EnvHyperparams, override_hparams: Optional[Dict[str, Any]] = None, self_play_wrapper: Optional[SelfPlayWrapper] = None, **kwargs, ) -> VecEnv: kwargs = kwargs.copy() kwargs["training"] = False env_overrides = config.eval_hyperparams.get("env_overrides") if env_overrides: hparams_kwargs = asdict(hparams) hparams_kwargs.update(env_overrides) hparams = EnvHyperparams(**hparams_kwargs) if override_hparams: hparams_kwargs = asdict(hparams) for k, v in override_hparams.items(): hparams_kwargs[k] = v if k == "n_envs" and v == 1: hparams_kwargs["vec_env_class"] = "sync" hparams = EnvHyperparams(**hparams_kwargs) env = make_env(config, hparams, **kwargs) eval_self_play_wrapper = find_wrapper(env, SelfPlayEvalWrapper) if eval_self_play_wrapper: assert self_play_wrapper eval_self_play_wrapper.train_wrapper = self_play_wrapper return env
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/vec_env/make_env.py
0.819893
0.171096
make_env.py
pypi
from typing import Optional, Sequence, Type import gym import torch import torch.nn as nn from rl_algo_impls.shared.encoder.cnn import FlattenedCnnEncoder from rl_algo_impls.shared.module.utils import layer_init class ResidualBlock(nn.Module): def __init__( self, channels: int, activation: Type[nn.Module] = nn.ReLU, init_layers_orthogonal: bool = False, ) -> None: super().__init__() self.residual = nn.Sequential( activation(), layer_init( nn.Conv2d(channels, channels, 3, padding=1), init_layers_orthogonal ), activation(), layer_init( nn.Conv2d(channels, channels, 3, padding=1), init_layers_orthogonal ), ) def forward(self, x: torch.Tensor) -> torch.Tensor: return x + self.residual(x) class ConvSequence(nn.Module): def __init__( self, in_channels: int, out_channels: int, activation: Type[nn.Module] = nn.ReLU, init_layers_orthogonal: bool = False, ) -> None: super().__init__() self.seq = nn.Sequential( layer_init( nn.Conv2d(in_channels, out_channels, 3, padding=1), init_layers_orthogonal, ), nn.MaxPool2d(3, stride=2, padding=1), ResidualBlock(out_channels, activation, init_layers_orthogonal), ResidualBlock(out_channels, activation, init_layers_orthogonal), ) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.seq(x) class ImpalaCnn(FlattenedCnnEncoder): """ IMPALA-style CNN architecture """ def __init__( self, obs_space: gym.Space, activation: Type[nn.Module], cnn_init_layers_orthogonal: Optional[bool], linear_init_layers_orthogonal: bool, cnn_flatten_dim: int, impala_channels: Sequence[int] = (16, 32, 32), **kwargs, ) -> None: if cnn_init_layers_orthogonal is None: cnn_init_layers_orthogonal = False in_channels = obs_space.shape[0] # type: ignore sequences = [] for out_channels in impala_channels: sequences.append( ConvSequence( in_channels, out_channels, activation, cnn_init_layers_orthogonal ) ) in_channels = out_channels sequences.append(activation()) cnn = nn.Sequential(*sequences) super().__init__( obs_space, activation, linear_init_layers_orthogonal, cnn_flatten_dim, cnn, **kwargs, )
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/encoder/impala_cnn.py
0.969252
0.355775
impala_cnn.py
pypi
from typing import Dict, Optional, Sequence, Type import gym import torch import torch.nn as nn import torch.nn.functional as F from gym.spaces import Box, Discrete from stable_baselines3.common.preprocessing import get_flattened_obs_dim from rl_algo_impls.shared.encoder.cnn import CnnEncoder from rl_algo_impls.shared.encoder.gridnet_encoder import GridnetEncoder from rl_algo_impls.shared.encoder.impala_cnn import ImpalaCnn from rl_algo_impls.shared.encoder.microrts_cnn import MicrortsCnn from rl_algo_impls.shared.encoder.nature_cnn import NatureCnn from rl_algo_impls.shared.module.utils import layer_init CNN_EXTRACTORS_BY_STYLE: Dict[str, Type[CnnEncoder]] = { "nature": NatureCnn, "impala": ImpalaCnn, "microrts": MicrortsCnn, "gridnet_encoder": GridnetEncoder, } class Encoder(nn.Module): def __init__( self, obs_space: gym.Space, activation: Type[nn.Module], init_layers_orthogonal: bool = False, cnn_flatten_dim: int = 512, cnn_style: str = "nature", cnn_layers_init_orthogonal: Optional[bool] = None, impala_channels: Sequence[int] = (16, 32, 32), ) -> None: super().__init__() if isinstance(obs_space, Box): # Conv2D: (channels, height, width) if len(obs_space.shape) == 3: # type: ignore self.preprocess = None cnn = CNN_EXTRACTORS_BY_STYLE[cnn_style]( obs_space, activation=activation, cnn_init_layers_orthogonal=cnn_layers_init_orthogonal, linear_init_layers_orthogonal=init_layers_orthogonal, cnn_flatten_dim=cnn_flatten_dim, impala_channels=impala_channels, ) self.feature_extractor = cnn self.out_dim = cnn.out_dim elif len(obs_space.shape) == 1: # type: ignore def preprocess(obs: torch.Tensor) -> torch.Tensor: if len(obs.shape) == 1: obs = obs.unsqueeze(0) return obs.float() self.preprocess = preprocess self.feature_extractor = nn.Flatten() self.out_dim = get_flattened_obs_dim(obs_space) else: raise ValueError(f"Unsupported observation space: {obs_space}") elif isinstance(obs_space, Discrete): self.preprocess = lambda x: F.one_hot(x, obs_space.n).float() self.feature_extractor = nn.Flatten() self.out_dim = obs_space.n # type: ignore else: raise NotImplementedError def forward(self, obs: torch.Tensor) -> torch.Tensor: if self.preprocess: obs = self.preprocess(obs) return self.feature_extractor(obs)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/encoder/encoder.py
0.951063
0.258823
encoder.py
pypi
from typing import Optional, Tuple, Type, Union import gym import torch import torch.nn as nn from rl_algo_impls.shared.encoder.cnn import CnnEncoder, EncoderOutDim from rl_algo_impls.shared.module.utils import layer_init class GridnetEncoder(CnnEncoder): """ Encoder for encoder-decoder for Gym-MicroRTS """ def __init__( self, obs_space: gym.Space, activation: Type[nn.Module] = nn.ReLU, cnn_init_layers_orthogonal: Optional[bool] = None, **kwargs ) -> None: if cnn_init_layers_orthogonal is None: cnn_init_layers_orthogonal = True super().__init__(obs_space, **kwargs) in_channels = obs_space.shape[0] # type: ignore self.encoder = nn.Sequential( layer_init( nn.Conv2d(in_channels, 32, kernel_size=3, padding=1), cnn_init_layers_orthogonal, ), nn.MaxPool2d(3, stride=2, padding=1), activation(), layer_init( nn.Conv2d(32, 64, kernel_size=3, padding=1), cnn_init_layers_orthogonal, ), nn.MaxPool2d(3, stride=2, padding=1), activation(), layer_init( nn.Conv2d(64, 128, kernel_size=3, padding=1), cnn_init_layers_orthogonal, ), nn.MaxPool2d(3, stride=2, padding=1), activation(), layer_init( nn.Conv2d(128, 256, kernel_size=3, padding=1), cnn_init_layers_orthogonal, ), nn.MaxPool2d(3, stride=2, padding=1), activation(), ) with torch.no_grad(): encoder_out = self.encoder( self.preprocess(torch.as_tensor(obs_space.sample())) # type: ignore ) self._out_dim = encoder_out.shape[1:] def forward(self, obs: torch.Tensor) -> torch.Tensor: return self.encoder(super().forward(obs)) @property def out_dim(self) -> EncoderOutDim: return self._out_dim
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/encoder/gridnet_encoder.py
0.958148
0.280051
gridnet_encoder.py
pypi
from typing import Optional, Tuple, Type import gym import torch.nn as nn from gym.spaces import Box, Discrete, MultiDiscrete from rl_algo_impls.shared.actor.actor import Actor from rl_algo_impls.shared.actor.categorical import CategoricalActorHead from rl_algo_impls.shared.actor.gaussian import GaussianActorHead from rl_algo_impls.shared.actor.gridnet import GridnetActorHead from rl_algo_impls.shared.actor.gridnet_decoder import GridnetDecoder from rl_algo_impls.shared.actor.multi_discrete import MultiDiscreteActorHead from rl_algo_impls.shared.actor.state_dependent_noise import ( StateDependentNoiseActorHead, ) from rl_algo_impls.shared.encoder import EncoderOutDim def actor_head( action_space: gym.Space, in_dim: EncoderOutDim, hidden_sizes: Tuple[int, ...], init_layers_orthogonal: bool, activation: Type[nn.Module], log_std_init: float = -0.5, use_sde: bool = False, full_std: bool = True, squash_output: bool = False, actor_head_style: str = "single", action_plane_space: Optional[bool] = None, ) -> Actor: assert not use_sde or isinstance( action_space, Box ), "use_sde only valid if Box action_space" assert not squash_output or use_sde, "squash_output only valid if use_sde" if isinstance(action_space, Discrete): assert isinstance(in_dim, int) return CategoricalActorHead( action_space.n, # type: ignore in_dim=in_dim, hidden_sizes=hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) elif isinstance(action_space, Box): assert isinstance(in_dim, int) if use_sde: return StateDependentNoiseActorHead( action_space.shape[0], # type: ignore in_dim=in_dim, hidden_sizes=hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, log_std_init=log_std_init, full_std=full_std, squash_output=squash_output, ) else: return GaussianActorHead( action_space.shape[0], # type: ignore in_dim=in_dim, hidden_sizes=hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, log_std_init=log_std_init, ) elif isinstance(action_space, MultiDiscrete): if actor_head_style == "single": return MultiDiscreteActorHead( action_space.nvec, # type: ignore in_dim=in_dim, hidden_sizes=hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) elif actor_head_style == "gridnet": assert isinstance(action_plane_space, MultiDiscrete) return GridnetActorHead( len(action_space.nvec) // len(action_plane_space.nvec), # type: ignore action_plane_space.nvec, # type: ignore in_dim=in_dim, hidden_sizes=hidden_sizes, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) elif actor_head_style == "gridnet_decoder": assert isinstance(action_plane_space, MultiDiscrete) return GridnetDecoder( len(action_space.nvec) // len(action_plane_space.nvec), # type: ignore action_plane_space.nvec, # type: ignore in_dim=in_dim, activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) else: raise ValueError(f"Doesn't support actor_head_style {actor_head_style}") else: raise ValueError(f"Unsupported action space: {action_space}")
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/actor/make_actor.py
0.910764
0.33066
make_actor.py
pypi
from typing import Dict, Optional, Tuple, Type import numpy as np import torch import torch.nn as nn from numpy.typing import NDArray from torch.distributions import Distribution, constraints from rl_algo_impls.shared.actor import Actor, PiForward, pi_forward from rl_algo_impls.shared.actor.categorical import MaskedCategorical from rl_algo_impls.shared.encoder import EncoderOutDim from rl_algo_impls.shared.module.utils import mlp class GridnetDistribution(Distribution): def __init__( self, map_size: int, action_vec: NDArray[np.int64], logits: torch.Tensor, masks: torch.Tensor, validate_args: Optional[bool] = None, ) -> None: self.map_size = map_size self.action_vec = action_vec masks = masks.view(-1, masks.shape[-1]) split_masks = torch.split(masks, action_vec.tolist(), dim=1) grid_logits = logits.reshape(-1, action_vec.sum()) split_logits = torch.split(grid_logits, action_vec.tolist(), dim=1) self.categoricals = [ MaskedCategorical(logits=lg, validate_args=validate_args, mask=m) for lg, m in zip(split_logits, split_masks) ] batch_shape = logits.size()[:-1] if logits.ndimension() > 1 else torch.Size() super().__init__(batch_shape=batch_shape, validate_args=validate_args) def log_prob(self, action: torch.Tensor) -> torch.Tensor: prob_stack = torch.stack( [ c.log_prob(a) for a, c in zip(action.view(-1, action.shape[-1]).T, self.categoricals) ], dim=-1, ) logprob = prob_stack.view(-1, self.map_size, len(self.action_vec)) return logprob.sum(dim=(1, 2)) def entropy(self) -> torch.Tensor: ent = torch.stack([c.entropy() for c in self.categoricals], dim=-1) ent = ent.view(-1, self.map_size, len(self.action_vec)) return ent.sum(dim=(1, 2)) def sample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: s = torch.stack([c.sample(sample_shape) for c in self.categoricals], dim=-1) return s.view(-1, self.map_size, len(self.action_vec)) @property def mode(self) -> torch.Tensor: m = torch.stack([c.mode for c in self.categoricals], dim=-1) return m.view(-1, self.map_size, len(self.action_vec)) @property def arg_constraints(self) -> Dict[str, constraints.Constraint]: # Constraints handled by child distributions in dist return {} class GridnetActorHead(Actor): def __init__( self, map_size: int, action_vec: NDArray[np.int64], in_dim: EncoderOutDim, hidden_sizes: Tuple[int, ...] = (32,), activation: Type[nn.Module] = nn.ReLU, init_layers_orthogonal: bool = True, ) -> None: super().__init__() self.map_size = map_size self.action_vec = action_vec assert isinstance(in_dim, int) layer_sizes = (in_dim,) + hidden_sizes + (map_size * action_vec.sum(),) self._fc = mlp( layer_sizes, activation, init_layers_orthogonal=init_layers_orthogonal, final_layer_gain=0.01, ) def forward( self, obs: torch.Tensor, actions: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> PiForward: assert ( action_masks is not None ), f"No mask case unhandled in {self.__class__.__name__}" logits = self._fc(obs) pi = GridnetDistribution(self.map_size, self.action_vec, logits, action_masks) return pi_forward(pi, actions) @property def action_shape(self) -> Tuple[int, ...]: return (self.map_size, len(self.action_vec))
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/actor/gridnet.py
0.954563
0.561515
gridnet.py
pypi
from typing import Dict, Optional, Tuple, Type import numpy as np import torch import torch.nn as nn from numpy.typing import NDArray from torch.distributions import Distribution, constraints from rl_algo_impls.shared.actor.actor import Actor, PiForward, pi_forward from rl_algo_impls.shared.actor.categorical import MaskedCategorical from rl_algo_impls.shared.encoder import EncoderOutDim from rl_algo_impls.shared.module.utils import mlp class MultiCategorical(Distribution): def __init__( self, nvec: NDArray[np.int64], probs=None, logits=None, validate_args=None, masks: Optional[torch.Tensor] = None, ): # Either probs or logits should be set assert (probs is None) != (logits is None) masks_split = ( torch.split(masks, nvec.tolist(), dim=1) if masks is not None else [None] * len(nvec) ) if probs: self.dists = [ MaskedCategorical(probs=p, validate_args=validate_args, mask=m) for p, m in zip(torch.split(probs, nvec.tolist(), dim=1), masks_split) ] param = probs else: assert logits is not None self.dists = [ MaskedCategorical(logits=lg, validate_args=validate_args, mask=m) for lg, m in zip(torch.split(logits, nvec.tolist(), dim=1), masks_split) ] param = logits batch_shape = param.size()[:-1] if param.ndimension() > 1 else torch.Size() super().__init__(batch_shape=batch_shape, validate_args=validate_args) def log_prob(self, action: torch.Tensor) -> torch.Tensor: prob_stack = torch.stack( [c.log_prob(a) for a, c in zip(action.T, self.dists)], dim=-1 ) return prob_stack.sum(dim=-1) def entropy(self) -> torch.Tensor: return torch.stack([c.entropy() for c in self.dists], dim=-1).sum(dim=-1) def sample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: return torch.stack([c.sample(sample_shape) for c in self.dists], dim=-1) @property def mode(self) -> torch.Tensor: return torch.stack([c.mode for c in self.dists], dim=-1) @property def arg_constraints(self) -> Dict[str, constraints.Constraint]: # Constraints handled by child distributions in dist return {} class MultiDiscreteActorHead(Actor): def __init__( self, nvec: NDArray[np.int64], in_dim: EncoderOutDim, hidden_sizes: Tuple[int, ...] = (32,), activation: Type[nn.Module] = nn.ReLU, init_layers_orthogonal: bool = True, ) -> None: super().__init__() self.nvec = nvec assert isinstance(in_dim, int) layer_sizes = (in_dim,) + hidden_sizes + (nvec.sum(),) self._fc = mlp( layer_sizes, activation, init_layers_orthogonal=init_layers_orthogonal, final_layer_gain=0.01, ) def forward( self, obs: torch.Tensor, actions: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> PiForward: logits = self._fc(obs) pi = MultiCategorical(self.nvec, logits=logits, masks=action_masks) return pi_forward(pi, actions) @property def action_shape(self) -> Tuple[int, ...]: return (len(self.nvec),)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/actor/multi_discrete.py
0.942599
0.489381
multi_discrete.py
pypi
from typing import Optional, Tuple, Type import numpy as np import torch import torch.nn as nn from numpy.typing import NDArray from rl_algo_impls.shared.actor import Actor, PiForward, pi_forward from rl_algo_impls.shared.actor.gridnet import GridnetDistribution from rl_algo_impls.shared.encoder import EncoderOutDim from rl_algo_impls.shared.module.utils import layer_init class Transpose(nn.Module): def __init__(self, permutation: Tuple[int, ...]) -> None: super().__init__() self.permutation = permutation def forward(self, x: torch.Tensor) -> torch.Tensor: return x.permute(self.permutation) class GridnetDecoder(Actor): def __init__( self, map_size: int, action_vec: NDArray[np.int64], in_dim: EncoderOutDim, activation: Type[nn.Module] = nn.ReLU, init_layers_orthogonal: bool = True, ) -> None: super().__init__() self.map_size = map_size self.action_vec = action_vec assert isinstance(in_dim, tuple) self.deconv = nn.Sequential( layer_init( nn.ConvTranspose2d( in_dim[0], 128, 3, stride=2, padding=1, output_padding=1 ), init_layers_orthogonal=init_layers_orthogonal, ), activation(), layer_init( nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1), init_layers_orthogonal=init_layers_orthogonal, ), activation(), layer_init( nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1), init_layers_orthogonal=init_layers_orthogonal, ), activation(), layer_init( nn.ConvTranspose2d( 32, action_vec.sum(), 3, stride=2, padding=1, output_padding=1 ), init_layers_orthogonal=init_layers_orthogonal, std=0.01, ), Transpose((0, 2, 3, 1)), ) def forward( self, obs: torch.Tensor, actions: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> PiForward: assert ( action_masks is not None ), f"No mask case unhandled in {self.__class__.__name__}" logits = self.deconv(obs) pi = GridnetDistribution(self.map_size, self.action_vec, logits, action_masks) return pi_forward(pi, actions) @property def action_shape(self) -> Tuple[int, ...]: return (self.map_size, len(self.action_vec))
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/actor/gridnet_decoder.py
0.954223
0.415195
gridnet_decoder.py
pypi
from typing import Optional, Tuple, Type, TypeVar, Union import torch import torch.nn as nn from torch.distributions import Distribution, Normal from rl_algo_impls.shared.actor.actor import Actor, PiForward from rl_algo_impls.shared.module.utils import mlp class TanhBijector: def __init__(self, epsilon: float = 1e-6) -> None: self.epsilon = epsilon @staticmethod def forward(x: torch.Tensor) -> torch.Tensor: return torch.tanh(x) @staticmethod def inverse(y: torch.Tensor) -> torch.Tensor: eps = torch.finfo(y.dtype).eps clamped_y = y.clamp(min=-1.0 + eps, max=1.0 - eps) return torch.atanh(clamped_y) def log_prob_correction(self, x: torch.Tensor) -> torch.Tensor: return torch.log(1.0 - torch.tanh(x) ** 2 + self.epsilon) def sum_independent_dims(tensor: torch.Tensor) -> torch.Tensor: if len(tensor.shape) > 1: return tensor.sum(dim=1) return tensor.sum() class StateDependentNoiseDistribution(Normal): def __init__( self, loc, scale, latent_sde: torch.Tensor, exploration_mat: torch.Tensor, exploration_matrices: torch.Tensor, bijector: Optional[TanhBijector] = None, validate_args=None, ): super().__init__(loc, scale, validate_args) self.latent_sde = latent_sde self.exploration_mat = exploration_mat self.exploration_matrices = exploration_matrices self.bijector = bijector def log_prob(self, a: torch.Tensor) -> torch.Tensor: gaussian_a = self.bijector.inverse(a) if self.bijector else a log_prob = sum_independent_dims(super().log_prob(gaussian_a)) if self.bijector: log_prob -= torch.sum(self.bijector.log_prob_correction(gaussian_a), dim=1) return log_prob def sample(self) -> torch.Tensor: noise = self._get_noise() actions = self.mean + noise return self.bijector.forward(actions) if self.bijector else actions def _get_noise(self) -> torch.Tensor: if len(self.latent_sde) == 1 or len(self.latent_sde) != len( self.exploration_matrices ): return torch.mm(self.latent_sde, self.exploration_mat) # (batch_size, n_features) -> (batch_size, 1, n_features) latent_sde = self.latent_sde.unsqueeze(dim=1) # (batch_size, 1, n_actions) noise = torch.bmm(latent_sde, self.exploration_matrices) return noise.squeeze(dim=1) @property def mode(self) -> torch.Tensor: mean = super().mode return self.bijector.forward(mean) if self.bijector else mean StateDependentNoiseActorHeadSelf = TypeVar( "StateDependentNoiseActorHeadSelf", bound="StateDependentNoiseActorHead" ) class StateDependentNoiseActorHead(Actor): def __init__( self, act_dim: int, in_dim: int, hidden_sizes: Tuple[int, ...] = (32,), activation: Type[nn.Module] = nn.Tanh, init_layers_orthogonal: bool = True, log_std_init: float = -0.5, full_std: bool = True, squash_output: bool = False, learn_std: bool = False, ) -> None: super().__init__() self.act_dim = act_dim layer_sizes = (in_dim,) + hidden_sizes + (act_dim,) if len(layer_sizes) == 2: self.latent_net = nn.Identity() elif len(layer_sizes) > 2: self.latent_net = mlp( layer_sizes[:-1], activation, output_activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) self.mu_net = mlp( layer_sizes[-2:], activation, init_layers_orthogonal=init_layers_orthogonal, final_layer_gain=0.01, ) self.full_std = full_std std_dim = (layer_sizes[-2], act_dim if self.full_std else 1) self.log_std = nn.Parameter( torch.ones(std_dim, dtype=torch.float32) * log_std_init ) self.bijector = TanhBijector() if squash_output else None self.learn_std = learn_std self.device = None self.exploration_mat = None self.exploration_matrices = None self.sample_weights() def to( self: StateDependentNoiseActorHeadSelf, device: Optional[torch.device] = None, dtype: Optional[Union[torch.dtype, str]] = None, non_blocking: bool = False, ) -> StateDependentNoiseActorHeadSelf: super().to(device, dtype, non_blocking) self.device = device return self def _distribution(self, obs: torch.Tensor) -> Distribution: latent = self.latent_net(obs) mu = self.mu_net(latent) latent_sde = latent if self.learn_std else latent.detach() variance = torch.mm(latent_sde**2, self._get_std() ** 2) assert self.exploration_mat is not None assert self.exploration_matrices is not None return StateDependentNoiseDistribution( mu, torch.sqrt(variance + 1e-6), latent_sde, self.exploration_mat, self.exploration_matrices, self.bijector, ) def _get_std(self) -> torch.Tensor: std = torch.exp(self.log_std) if self.full_std: return std ones = torch.ones(self.log_std.shape[0], self.act_dim) if self.device: ones = ones.to(self.device) return ones * std def forward( self, obs: torch.Tensor, actions: Optional[torch.Tensor] = None, action_masks: Optional[torch.Tensor] = None, ) -> PiForward: assert ( not action_masks ), f"{self.__class__.__name__} does not support action_masks" pi = self._distribution(obs) return pi_forward(pi, actions, self.bijector) def sample_weights(self, batch_size: int = 1) -> None: std = self._get_std() weights_dist = Normal(torch.zeros_like(std), std) # Reparametrization trick to pass gradients self.exploration_mat = weights_dist.rsample() self.exploration_matrices = weights_dist.rsample(torch.Size((batch_size,))) @property def action_shape(self) -> Tuple[int, ...]: return (self.act_dim,) def pi_forward( distribution: Distribution, actions: Optional[torch.Tensor] = None, bijector: Optional[TanhBijector] = None, ) -> PiForward: logp_a = None entropy = None if actions is not None: logp_a = distribution.log_prob(actions) entropy = -logp_a if bijector else sum_independent_dims(distribution.entropy()) return PiForward(distribution, logp_a, entropy)
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/shared/actor/state_dependent_noise.py
0.969252
0.694076
state_dependent_noise.py
pypi
import logging from collections import defaultdict from dataclasses import asdict, dataclass from typing import List, Optional, Sequence, TypeVar import numpy as np import torch import torch.nn as nn from torch.optim import Adam from torch.utils.tensorboard.writer import SummaryWriter from rl_algo_impls.shared.algorithm import Algorithm from rl_algo_impls.shared.callbacks import Callback from rl_algo_impls.shared.gae import compute_rtg_and_advantage_from_trajectories from rl_algo_impls.shared.trajectory import Trajectory, TrajectoryAccumulator from rl_algo_impls.vpg.policy import VPGActorCritic from rl_algo_impls.wrappers.vectorable_wrapper import VecEnv @dataclass class TrainEpochStats: pi_loss: float entropy_loss: float v_loss: float envs_with_done: int = 0 episodes_done: int = 0 def write_to_tensorboard(self, tb_writer: SummaryWriter, global_step: int) -> None: for name, value in asdict(self).items(): tb_writer.add_scalar(f"losses/{name}", value, global_step=global_step) class VPGTrajectoryAccumulator(TrajectoryAccumulator): def __init__(self, num_envs: int) -> None: super().__init__(num_envs, trajectory_class=Trajectory) self.completed_per_env: defaultdict[int, int] = defaultdict(int) def on_done(self, env_idx: int, trajectory: Trajectory) -> None: self.completed_per_env[env_idx] += 1 VanillaPolicyGradientSelf = TypeVar( "VanillaPolicyGradientSelf", bound="VanillaPolicyGradient" ) class VanillaPolicyGradient(Algorithm): def __init__( self, policy: VPGActorCritic, env: VecEnv, device: torch.device, tb_writer: SummaryWriter, gamma: float = 0.99, pi_lr: float = 3e-4, val_lr: float = 1e-3, train_v_iters: int = 80, gae_lambda: float = 0.97, max_grad_norm: float = 10.0, n_steps: int = 4_000, sde_sample_freq: int = -1, ent_coef: float = 0.0, ) -> None: super().__init__(policy, env, device, tb_writer) self.policy = policy self.gamma = gamma self.gae_lambda = gae_lambda self.pi_optim = Adam(self.policy.pi.parameters(), lr=pi_lr) self.val_optim = Adam(self.policy.v.parameters(), lr=val_lr) self.max_grad_norm = max_grad_norm self.n_steps = n_steps self.train_v_iters = train_v_iters self.sde_sample_freq = sde_sample_freq self.ent_coef = ent_coef def learn( self: VanillaPolicyGradientSelf, total_timesteps: int, callbacks: Optional[List[Callback]] = None, ) -> VanillaPolicyGradientSelf: timesteps_elapsed = 0 epoch_cnt = 0 while timesteps_elapsed < total_timesteps: epoch_cnt += 1 accumulator = self._collect_trajectories() epoch_stats = self.train(accumulator.all_trajectories) epoch_stats.envs_with_done = len(accumulator.completed_per_env) epoch_stats.episodes_done = sum(accumulator.completed_per_env.values()) epoch_steps = accumulator.n_timesteps() timesteps_elapsed += epoch_steps epoch_stats.write_to_tensorboard( self.tb_writer, global_step=timesteps_elapsed ) print( " | ".join( [ f"Epoch: {epoch_cnt}", f"Pi Loss: {round(epoch_stats.pi_loss, 2)}", f"Epoch Loss: {round(epoch_stats.entropy_loss, 2)}", f"V Loss: {round(epoch_stats.v_loss, 2)}", f"Total Steps: {timesteps_elapsed}", ] ) ) if callbacks: if not all(c.on_step(timesteps_elapsed=epoch_steps) for c in callbacks): logging.info( f"Callback terminated training at {timesteps_elapsed} timesteps" ) break return self def train(self, trajectories: Sequence[Trajectory]) -> TrainEpochStats: self.policy.train() obs = torch.as_tensor( np.concatenate([np.array(t.obs) for t in trajectories]), device=self.device ) act = torch.as_tensor( np.concatenate([np.array(t.act) for t in trajectories]), device=self.device ) rtg, adv = compute_rtg_and_advantage_from_trajectories( trajectories, self.policy, self.gamma, self.gae_lambda, self.device ) _, logp, entropy = self.policy.pi(obs, act) pi_loss = -(logp * adv).mean() entropy_loss = entropy.mean() actor_loss = pi_loss - self.ent_coef * entropy_loss self.pi_optim.zero_grad() actor_loss.backward() nn.utils.clip_grad_norm_(self.policy.pi.parameters(), self.max_grad_norm) self.pi_optim.step() v_loss = 0 for _ in range(self.train_v_iters): v = self.policy.v(obs) v_loss = ((v - rtg) ** 2).mean() self.val_optim.zero_grad() v_loss.backward() nn.utils.clip_grad_norm_(self.policy.v.parameters(), self.max_grad_norm) self.val_optim.step() return TrainEpochStats( pi_loss.item(), entropy_loss.item(), v_loss.item(), # type: ignore ) def _collect_trajectories(self) -> VPGTrajectoryAccumulator: self.policy.eval() obs = self.env.reset() accumulator = VPGTrajectoryAccumulator(self.env.num_envs) self.policy.reset_noise() for i in range(self.n_steps): if self.sde_sample_freq > 0 and i > 0 and i % self.sde_sample_freq == 0: self.policy.reset_noise() action, value, _, clamped_action = self.policy.step(obs) next_obs, reward, done, _ = self.env.step(clamped_action) accumulator.step(obs, action, next_obs, reward, done, value) obs = next_obs return accumulator
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/vpg/vpg.py
0.89945
0.328907
vpg.py
pypi
from typing import Optional, Sequence, Tuple import numpy as np import torch import torch.nn as nn from rl_algo_impls.shared.actor import Actor, PiForward, actor_head from rl_algo_impls.shared.encoder import Encoder from rl_algo_impls.shared.policy.actor_critic import OnPolicy, Step, clamp_actions from rl_algo_impls.shared.policy.actor_critic_network import default_hidden_sizes from rl_algo_impls.shared.policy.critic import CriticHead from rl_algo_impls.shared.policy.policy import ACTIVATION from rl_algo_impls.wrappers.vectorable_wrapper import ( VecEnv, VecEnvObs, single_action_space, single_observation_space, ) PI_FILE_NAME = "pi.pt" V_FILE_NAME = "v.pt" class VPGActor(Actor): def __init__(self, feature_extractor: Encoder, head: Actor) -> None: super().__init__() self.feature_extractor = feature_extractor self.head = head def forward(self, obs: torch.Tensor, a: Optional[torch.Tensor] = None) -> PiForward: fe = self.feature_extractor(obs) return self.head(fe, a) def sample_weights(self, batch_size: int = 1) -> None: self.head.sample_weights(batch_size=batch_size) @property def action_shape(self) -> Tuple[int, ...]: return self.head.action_shape class VPGActorCritic(OnPolicy): def __init__( self, env: VecEnv, hidden_sizes: Optional[Sequence[int]] = None, init_layers_orthogonal: bool = True, activation_fn: str = "tanh", log_std_init: float = -0.5, use_sde: bool = False, full_std: bool = True, squash_output: bool = False, **kwargs, ) -> None: super().__init__(env, **kwargs) activation = ACTIVATION[activation_fn] obs_space = single_observation_space(env) self.action_space = single_action_space(env) self.use_sde = use_sde self.squash_output = squash_output hidden_sizes = ( hidden_sizes if hidden_sizes is not None else default_hidden_sizes(obs_space) ) pi_feature_extractor = Encoder( obs_space, activation, init_layers_orthogonal=init_layers_orthogonal ) pi_head = actor_head( self.action_space, pi_feature_extractor.out_dim, tuple(hidden_sizes), init_layers_orthogonal, activation, log_std_init=log_std_init, use_sde=use_sde, full_std=full_std, squash_output=squash_output, ) self.pi = VPGActor(pi_feature_extractor, pi_head) v_feature_extractor = Encoder( obs_space, activation, init_layers_orthogonal=init_layers_orthogonal ) v_head = CriticHead( v_feature_extractor.out_dim, tuple(hidden_sizes), activation=activation, init_layers_orthogonal=init_layers_orthogonal, ) self.v = nn.Sequential(v_feature_extractor, v_head) def value(self, obs: VecEnvObs) -> np.ndarray: o = self._as_tensor(obs) with torch.no_grad(): v = self.v(o) return v.cpu().numpy() def step(self, obs: VecEnvObs, action_masks: Optional[np.ndarray] = None) -> Step: assert ( action_masks is None ), f"action_masks not currently supported in {self.__class__.__name__}" o = self._as_tensor(obs) with torch.no_grad(): pi, _, _ = self.pi(o) a = pi.sample() logp_a = pi.log_prob(a) v = self.v(o) a_np = a.cpu().numpy() clamped_a_np = clamp_actions(a_np, self.action_space, self.squash_output) return Step(a_np, v.cpu().numpy(), logp_a.cpu().numpy(), clamped_a_np) def act( self, obs: np.ndarray, deterministic: bool = True, action_masks: Optional[np.ndarray] = None, ) -> np.ndarray: assert ( action_masks is None ), f"action_masks not currently supported in {self.__class__.__name__}" if not deterministic: return self.step(obs).clamped_a else: o = self._as_tensor(obs) with torch.no_grad(): pi, _, _ = self.pi(o) a = pi.mode return clamp_actions(a.cpu().numpy(), self.action_space, self.squash_output) def load(self, path: str) -> None: super().load(path) self.reset_noise() def reset_noise(self, batch_size: Optional[int] = None) -> None: self.pi.sample_weights( batch_size=batch_size if batch_size else self.env.num_envs ) @property def action_shape(self) -> Tuple[int, ...]: return self.pi.action_shape
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/vpg/policy.py
0.953837
0.277742
policy.py
pypi
import os import pandas as pd import wandb.apis.public import yaml from collections import defaultdict from dataclasses import dataclass, asdict from typing import Any, Dict, Iterable, List, NamedTuple, Optional, TypeVar from urllib.parse import urlparse from rl_algo_impls.runner.evaluate import Evaluation EvaluationRowSelf = TypeVar("EvaluationRowSelf", bound="EvaluationRow") @dataclass class EvaluationRow: algo: str env: str seed: Optional[int] reward_mean: float reward_std: float eval_episodes: int best: str wandb_url: str @staticmethod def data_frame(rows: List[EvaluationRowSelf]) -> pd.DataFrame: results = defaultdict(list) for r in rows: for k, v in asdict(r).items(): results[k].append(v) return pd.DataFrame(results) class EvalTableData(NamedTuple): run: wandb.apis.public.Run evaluation: Evaluation def evaluation_table(table_data: Iterable[EvalTableData]) -> str: best_stats = sorted( [d.evaluation.stats for d in table_data], key=lambda r: r.score, reverse=True )[0] table_data = sorted(table_data, key=lambda d: d.evaluation.config.seed() or 0) rows = [ EvaluationRow( config.algo, config.env_id, config.seed(), stats.score.mean, stats.score.std, len(stats), "*" if stats == best_stats else "", f"[wandb]({r.url})", ) for (r, (_, stats, config)) in table_data ] df = EvaluationRow.data_frame(rows) return df.to_markdown(index=False) def github_project_link(github_url: str) -> str: return f"[{urlparse(github_url).path}]({github_url})" def header_section(algo: str, env: str, github_url: str, wandb_report_url: str) -> str: algo_caps = algo.upper() lines = [ f"# **{algo_caps}** Agent playing **{env}**", f"This is a trained model of a **{algo_caps}** agent playing **{env}** using " f"the {github_project_link(github_url)} repo.", f"All models trained at this commit can be found at {wandb_report_url}.", ] return "\n\n".join(lines) def github_tree_link(github_url: str, commit_hash: Optional[str]) -> str: if not commit_hash: return github_project_link(github_url) return f"[{commit_hash[:7]}]({github_url}/tree/{commit_hash})" def results_section( table_data: List[EvalTableData], algo: str, github_url: str, commit_hash: str ) -> str: # type: ignore lines = [ "## Training Results", f"This model was trained from {len(table_data)} trainings of **{algo.upper()}** " + "agents using different initial seeds. " + f"These agents were trained by checking out " + f"{github_tree_link(github_url, commit_hash)}. " + "The best and last models were kept from each training. " + "This submission has loaded the best models from each training, reevaluates " + "them, and selects the best model from these latest evaluations (mean - std).", ] lines.append(evaluation_table(table_data)) return "\n\n".join(lines) def prerequisites_section() -> str: return """ ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. """ def usage_section(github_url: str, run_path: str, commit_hash: str) -> str: return f""" ## Usage {urlparse(github_url).path}: {github_url} Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: {github_tree_link(github_url, commit_hash)}. ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path={run_path} ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. """ def training_setion( github_url: str, commit_hash: str, algo: str, env: str, seed: Optional[int] ) -> str: return f""" ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: {github_tree_link(github_url, commit_hash)}. While training is deterministic, different hardware will give different results. ``` python train.py --algo {algo} --env {env} {'--seed ' + str(seed) if seed is not None else ''} ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. """ def benchmarking_section(report_url: str) -> str: return f""" ## Benchmarking (with Lambda Labs instance) This and other models from {report_url} were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone git@github.com:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh [-a {{"ppo a2c dqn vpg"}}] [-e ENVS] [-j {{6}}] [-p {{rl-algo-impls-benchmarks}}] [-s {{"1 2 3"}}] ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. """ def hyperparams_section(run_config: Dict[str, Any]) -> str: return f""" ## Hyperparameters This isn't exactly the format of hyperparams in {os.path.join("hyperparams", run_config["algo"] + ".yml")}, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` {yaml.dump(run_config)} ``` """ def model_card_text( algo: str, env: str, github_url: str, commit_hash: str, wandb_report_url: str, table_data: List[EvalTableData], best_eval: EvalTableData, ) -> str: run, (_, _, config) = best_eval run_path = "/".join(run.path) return "\n\n".join( [ header_section(algo, env, github_url, wandb_report_url), results_section(table_data, algo, github_url, commit_hash), prerequisites_section(), usage_section(github_url, run_path, commit_hash), training_setion(github_url, commit_hash, algo, env, config.seed()), benchmarking_section(wandb_report_url), hyperparams_section(run.config), ] )
/rl_algo_impls-0.0.13.tar.gz/rl_algo_impls-0.0.13/rl_algo_impls/publish/markdown_format.py
0.4231
0.677741
markdown_format.py
pypi
from typing import List from ..Agent import Agent import numpy as np class MonteCarlo(Agent): def __init__(self, actions: List, gamma: float, eps: float): super().__init__() self.actions = actions self.gamma = gamma self.eps = eps self.q_n = {} # q-value & number of prior episodes self.state_action_reward = [] def _action_value(self, state, action): """ Compute state-action value of this pair.""" return self.q_n.get((state, action), (0, 0))[0] def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ if np.random.rand() < eps: return np.random.choice(self.actions) action = max(self.actions, key=lambda action: self._action_value( state=state, action=action)) return action def update(self, reward): """ Store reward assigned to previous state-action pair. Args: reward (float): Reward received upon the transaction to the current state. """ self.state_action_reward.append([self._state, self._action, reward]) def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. It should be an immutable type since it's used as a key. """ state = self.decode_state(state) action = self._get_action(state, self.eps) self._action = action self._state = state return action def end_episode(self): G = 0 for s, a, r in reversed(self.state_action_reward): G = G * self.gamma + r n = self.q_n.get((s, a), (0, 0))[1] q = self._action_value(s, a) self.q_n[s, a] = (G/(n+1) + n/(n+1)*q, n+1) self.state_action_reward = [] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) np.save(path + '.npy', self.q_n) def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.q_n = np.load(path + '.npy', allow_pickle='TRUE').item() except: self.q_n = {} print("No file is found in:", path) self.state_action_reward = []
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/montecarlo/MonteCarlo.py
0.937204
0.514156
MonteCarlo.py
pypi
from typing import List from ..Agent import Agent import numpy as np class Sarsa(Agent): def __init__(self, actions: List, alpha: float, gamma: float, eps: float): super().__init__() self.actions = actions self.alpha = alpha self.gamma = gamma self.eps = eps self.q = {} self.sar = [] def _action_value(self, state, action): """ Compute state-action value of this pair.""" return self.q.get((state, action), 1e-3 * np.random.randn()) def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ if np.random.rand() < eps: return np.random.choice(self.actions) action = max(self.actions, key=lambda action: self._action_value(state=state, action=action)) return action def update(self, reward): """ Update state-action value of previous (state, action). Args: reward (float): Reward received upon the transaction to `state`. """ self.sar[-1][-1] = reward if len(self.sar) < 2: return state = self.sar[1][0] action = self.sar[1][1] reward = self.sar[0][-1] prev_state = self.sar[0][0] prev_action = self.sar[0][1] q = self._action_value(state=prev_state, action=prev_action) tmp = reward - q tmp += self.gamma * self._action_value(state, action) self.q[(prev_state, prev_action)] = q + self.alpha * tmp del self.sar[0] def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. It should be an immutable type since it's used as a key. """ state = self.decode_state(state) action = self._get_action(state, self.eps) self.sar.append([state, action, 0]) return action def end_episode(self): """ Update state-action value of the last (state, action) pair. """ prev_state = self.sar[0][0] prev_action = self.sar[0][1] q = self._action_value(state=prev_state, action=prev_action) self.q[(self.sar[0][0], self.sar[0][1])] = q + self.alpha * (self.sar[0][2] - q) self.sar = [] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) np.save(path + '.npy', self.q) def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.q = np.load(path + '.npy', allow_pickle='TRUE').item() except: self.q = {} print("No file is found in:", path)
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/sarsa/Sarsa.py
0.918068
0.474449
Sarsa.py
pypi
from typing import List from ..Agent import Agent import numpy as np class NStepSarsa(Agent): def __init__(self, actions: List, alpha: float, gamma: float, eps: float, n: int): super().__init__() self.actions = actions self.alpha = alpha self.gamma = gamma self.eps = eps self.n = n self.q = {} self.sar = [] def _action_value(self, state, action): """ Compute state-action value of this pair.""" return self.q.get((state, action), np.random.randn()) def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ if np.random.rand() < eps: return np.random.choice(self.actions) action = max(self.actions, key=lambda action: self._action_value( state=state, action=action)) return action def update(self, reward): """ Update state-action value of previous (state, action). Args: reward (float): Reward received upon the transaction to `state`. """ self.sar[-1][-1] = reward if len(self.sar) < self.n + 1: return q = self._action_value(state=self.sar[0][0], action=self.sar[0][1]) G = 0 for i, (*_, reward) in enumerate(self.sar): G += (self.gamma**i) * reward self.q[self.sar[0][0], self.sar[0][1]] = q + self.alpha * (G - q) del self.sar[0] def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. It should be an immutable type since it's used as a key. """ state = self.decode_state(state) action = self._get_action(state, self.eps) self.sar.append([state, action, 0]) return action def end_episode(self): """ Update state-action value of the last `n` (state, action) pairs. """ for __ in range(len(self.sar)): q = self._action_value(state=self.sar[0][0], action=self.sar[0][1]) G = 0 for i, (*_, reward) in enumerate(self.sar): G += (self.gamma**i) * reward self.q[self.sar[0][0], self.sar[0][1]] = q + self.alpha * (G - q) del self.sar[0] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) np.save(path + '.npy', self.q) def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.q = np.load(path + '.npy', allow_pickle='TRUE').item() except: self.q = {} print("No file is found in:", path)
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/sarsa/NStepSarsa.py
0.933628
0.47384
NStepSarsa.py
pypi
import collections from copy import deepcopy from typing import List import torch from ..Agent import Agent import numpy as np from torch import nn class DSN(Agent): def __init__(self, network: nn.Module, actions: List, alpha: float, gamma: float, eps: float, c: int = 128, t: int = 1024, capacity: int = 1024, bs: int = 32, device='cpu'): super().__init__() self.actions = {i: action for i, action in enumerate(actions)} self.alpha = alpha self.gamma = gamma self.eps = eps self.bs = bs self.c = c self.t = t self.device = device self.buffer = ExperienceReplay(capacity, device) self.Q = network.to(device) self.Q_prime = deepcopy(self.Q).to(device).eval() self.loss = nn.MSELoss() self.opt = torch.optim.SGD(self.Q.parameters(), lr=self.alpha) self.i = 0 # counter used to trigger the update of Q_prime with Q self.sar = [] def _action_value(self, state, action=None, clone: bool = False): """ If clone is False, the `self.Q` network is used, otherwise, `self.Q_prime` is used. """ Q = self.Q if not clone else self.Q_prime n = state.shape[0] state = state.to(self.device) if action is not None: value = Q(state)[list(range(n)), action] else: value = Q(state) return value def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ with torch.no_grad(): if np.random.rand() < eps: # * 0.5*(np.cos(2 * np.pi * self.i/self.t)+1): return torch.from_numpy(np.random.choice(list(self.actions.keys()), size=(state.shape[0],))) actions = self._action_value(state=state, clone=True).argmax(dim=1) return actions def update(self, reward: float): """ Update state-action value of previous (state, action). Args: reward (float): Reward received upon the transaction to `state`. """ self.sar[-1][-1] = reward if len(self.sar) < 2: return # register history state, action, reward, next_state, next_action = ( *self.sar[0], *self.sar[1][:-1]) self.buffer.append((state, action, torch.tensor( reward).unsqueeze(0).float(), next_state, next_action)) # sample batch_size states, actions, rewards, next_states, next_actions = self.buffer.sample( self.bs) # compute loss gt = rewards + self.gamma * \ self._action_value(next_states, next_actions, clone=True) pred = self._action_value(states, actions, clone=False) loss = self.loss(pred, gt) # update Q self.opt.zero_grad() loss.backward() self.opt.step() if self.i == self.c: # update Q_prim self.i = 0 self.Q_prime = deepcopy(self.Q).eval() self.i += 1 del self.sar[0] try: return loss.item() except: return None def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. After fed to ``decode_state``, the output should be eligible to be a network input. """ state = self.decode_state(state) assert state.shape[0] == 1 action = self._get_action(state, self.eps).cpu() self.sar.append([state, action, 0]) return self.actions[action.item()] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) torch.save(self.Q.state_dict(), path + '.pth') def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.Q.load_state_dict(torch.load(path + '.pth')) self.Q = self.Q.to(self.device) self.Q_prime = deepcopy(self.Q).to(self.device).eval() except: print("No file is found in:", path) class ExperienceReplay: def __init__(self, capacity, device): self.buffer = collections.deque(maxlen=capacity) self.device = device def __len__(self): return len(self.buffer) def append(self, experience): self.buffer.append(experience) def sample(self, batch_size): try: indices = np.random.choice( len(self.buffer), batch_size, replace=False) except: indices = np.random.choice( len(self.buffer), batch_size, replace=True) states, actions, rewards, next_states, next_actions = map(lambda x: torch.cat( x, dim=0).to(self.device), zip(*(self.buffer[idx] for idx in indices))) return states, actions, rewards, next_states, next_actions
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/sarsa/DSN.py
0.926145
0.520557
DSN.py
pypi
from typing import List from ..Agent import Agent import numpy as np class QLearning(Agent): def __init__(self, actions: List, alpha: float, gamma: float, eps: float): super().__init__() self.actions = actions self.alpha = alpha self.gamma = gamma self.eps = eps self.q = {} self.prev_state = None self.prev_action = None def _action_value(self, state, action): """ Compute state-action value of this pair.""" return self.q.get((state, action), 1e-2*np.random.randn()) def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ if np.random.rand() < eps: return np.random.choice(self.actions) action = max(self.actions, key=lambda action: self._action_value( state=state, action=action)) return action def update(self, state, reward): """ Update state-action value of previous (state, action). Args: state (Any): The new state representation. reward (float): Reward received upon the transaction to `state`. Note: - The parameter ``state`` should be an immutable type since it's used as a key. """ state = self.decode_state(state) q = self._action_value(state=self.prev_state, action=self.prev_action) tmp = reward - q tmp += self.gamma * \ self._action_value(state, self._get_action(state, 0)) self.q[(self.prev_state, self.prev_action)] = q + self.alpha * tmp def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. It should be an immutable type since it's used as a key. """ state = self.decode_state(state) action = self._get_action(state, self.eps) self.prev_action = action self.prev_state = state return action def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) np.save(path + '.npy', self.q) def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.q = np.load(path + '.npy', allow_pickle='TRUE').item() except: self.q = {} print("No file is found in:", path)
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/qlearning/QLearning.py
0.924227
0.474266
QLearning.py
pypi
import collections from copy import deepcopy from typing import List import torch from ..Agent import Agent import numpy as np from torch import nn class DQN(Agent): def __init__(self, network: nn.Module, actions: List, alpha: float, gamma: float, eps: float, c: int = 128, t: int = 1024, capacity: int = 1024, bs: int = 32, device='cpu'): super().__init__() self.actions = {i: action for i, action in enumerate(actions)} self.alpha = alpha self.gamma = gamma self.eps = eps self.bs = bs self.c = c self.t = t self.device = device self.buffer = ExperienceReplay(capacity, device) self.Q = network.to(device) self.Q_prime = deepcopy(self.Q).to(device).eval() self.loss = nn.MSELoss() self.opt = torch.optim.SGD(self.Q.parameters(), lr=self.alpha) self.i = 0 # counter used to trigger the update of Q_prime with Q self.prev_state = None self.prev_action = None def _action_value(self, state, action=None, clone: bool = False): """ If clone is False, the `self.Q` network is used, otherwise, `self.Q_prime` is used. """ Q = self.Q if not clone else self.Q_prime n = state.shape[0] state = state.to(self.device) if action is not None: value = Q(state)[list(range(n)), action] else: value = Q(state) return value def _get_action(self, state, eps): """ Return an eps-greedy action to be taken from this state. """ with torch.no_grad(): if np.random.rand() < eps: # * 0.5*(np.cos(2 * np.pi * self.i/self.t)+1): return torch.from_numpy(np.random.choice(list(self.actions.keys()), size=(state.shape[0],))) actions = self._action_value(state=state, clone=True).argmax(dim=1) return actions def update(self, state:torch.Tensor, reward:float): """ Update state-action value of previous (state, action). Args: state (Any): The new state representation. reward (float): Reward received upon the transaction to `state`. Note: - The parameter ``state`` should be a tensor with the leading batch dimension. """ state = self.decode_state(state).cpu() # register history self.buffer.append((self.prev_state, self.prev_action, torch.tensor(reward).unsqueeze(0).float(), state)) # sample batch_size states, actions, rewards, next_states = self.buffer.sample(self.bs) gt = rewards + self.gamma * self._action_value(next_states, clone=True).max(dim=1)[0] pred = self._action_value(states, actions, clone=False) loss = self.loss(pred, gt) # update Q self.opt.zero_grad() loss.backward() self.opt.step() if self.i == self.c: # update Q_prim self.i = 0 self.Q_prime = deepcopy(self.Q).eval() self.i += 1 try: return loss.item() except: return None def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. After fed to ``decode_state``, the output should be eligible to be a network input. """ state = self.decode_state(state) assert state.shape[0] ==1 action = self._get_action(state, self.eps).cpu() self.prev_action = action self.prev_state = state return self.actions[action.item()] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) torch.save(self.Q.state_dict(), path + '.pth') def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.Q.load_state_dict(torch.load( path + '.pth')) self.Q = self.Q.to(self.device) self.Q_prime = deepcopy(self.Q).to(self.device).eval() except: print("No file is found in:", path) class ExperienceReplay: def __init__(self, capacity, device): self.buffer = collections.deque(maxlen=capacity) self.device = device def __len__(self): return len(self.buffer) def append(self, experience): self.buffer.append(experience) def sample(self, batch_size): try: indices = np.random.choice( len(self.buffer), batch_size, replace=False) except: indices = np.random.choice( len(self.buffer), batch_size, replace=True) states, actions, rewards, next_states = map(lambda x: torch.cat(x, dim=0).to(self.device), zip(*(self.buffer[idx] for idx in indices))) return states, actions, rewards, next_states
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/qlearning/DQN.py
0.919575
0.511473
DQN.py
pypi
from copy import deepcopy from typing import List from torch.nn.functional import cross_entropy from ..Agent import Agent import numpy as np import torch from torch import nn from torch.distributions.categorical import Categorical class OptionCritic(Agent): def __init__(self, option_net:nn.Module, action_net: nn.Module, termination_net: nn.Module, number_options: int, actions: List, alpha1: float, alpha2: float, gamma: float, eps1: float, eps2: float, device='cpu'): super().__init__() self.actions = {i: action for i, action in enumerate(actions)} self.gamma = gamma self.eps1 = eps1 self.eps2 = eps2 self.device = device self.option_net = option_net.to(device) self.action_net = action_net.to(device) self.termination_net = termination_net.to(device) self.v_loss = nn.MSELoss(reduction='sum') self.policy_loss = nn.CrossEntropyLoss(reduction='none') self.policy_optim = torch.optim.SGD( self.policy_net.parameters(), lr=alpha_policy) self.v_optim = torch.optim.SGD(self.v_net.parameters(), lr=alpha_q) self.softmax = nn.Softmax(dim=1) self.prev_state = None self.prev_action = None def _termination_prob(self, state): return self.beta_net(state) def _policy_over_options(self, state: torch.Tensor, option:int=None): out = self.option_net(state) if option is None: return out return out[:, option] def _intra_option_policy(self, state:torch.Tensor, option:int=None, action:int=None): out = self.action_net(state) if option is None: return out if action is None: return out[:,option] return out[:,option,action] def _state_value(self, state): value = self.v_net(state) return value def _get_action(self, state): """ Return an action to be taken from this state based on the policy. """ if np.random.rand() < self.eps: return torch.from_numpy(np.random.choice(list(self.actions.keys()), size=(state.shape[0],))) with torch.no_grad(): x = self.softmax(self._policy(state=state)) action = Categorical(probs=x).sample() return action def start_episode(self): self.I = 1 self.prev_action = None self.prev_state = None def update(self, state: torch.Tensor, reward: float): """ Update state-action value of previous (state, action). Args: state (Any): The new state representation. reward (float): Reward received upon the transaction to `state`. Note: - The parameter ``state`` should be a tensor with the leading batch dimension. """ state = self.decode_state(state).to(self.device) if self.prev_state is not None: gt = reward + self.gamma * self._state_value(state) pred = self._state_value(self.prev_state) logits = self._policy(self.prev_state) delta = (gt-pred).detach() v_loss = self.v_loss(pred, gt) policy_loss = (self.policy_loss( logits, self.prev_action)*self.I*delta).sum() # update weights self.v_optim.zero_grad() self.policy_optim.zero_grad() v_loss.backward() self.v_optim.step() policy_loss.backward() self.policy_optim.step() self.prev_state = state self.I = self.gamma * self.I try: return v_loss.item(), -policy_loss.item() except: return None, None def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. After fed to ``decode_state``, the output should be eligible to be a network input. """ state = self.decode_state(state).to(self.device) assert state.shape[0] == 1 action = self._get_action(state) self.prev_action = action return self.actions[action.item()] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) torch.save(self.policy_net.state_dict(), path + '_actor.pth') torch.save(self.v_net.state_dict(), path + '_critic.pth') def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.policy_net.load_state_dict(torch.load(path + '_actor.pth')) self.v_net.load_state_dict(torch.load(path + '_critic.pth')) except: print("No file is found in:", path)
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/policy/OptionCritic.py
0.957308
0.401013
OptionCritic.py
pypi
from copy import deepcopy from typing import List from torch.nn.functional import cross_entropy from ..Agent import Agent import numpy as np import torch from torch import nn from torch.distributions.categorical import Categorical class ActorCritic(Agent): def __init__(self, policy_net: nn.Module, v_net: nn.Module, actions: List, alpha_policy: float, alpha_q: float, gamma: float, eps: float, device='cpu'): super().__init__() self.actions = {i: action for i, action in enumerate(actions)} self.gamma = gamma self.eps = eps self.device = device self.policy_net = policy_net.to(device) self.v_net = v_net.to(device) self.v_loss = nn.MSELoss(reduction='sum') self.policy_loss = nn.CrossEntropyLoss(reduction='none') self.policy_optim = torch.optim.SGD( self.policy_net.parameters(), lr=alpha_policy) self.v_optim = torch.optim.SGD(self.v_net.parameters(), lr=alpha_q) self.softmax = nn.Softmax(dim=1) self.prev_state = None self.prev_action = None def _policy(self, state: torch.Tensor): return self.policy_net(state) def _state_value(self, state): value = self.v_net(state) return value def _get_action(self, state): """ Return an action to be taken from this state based on the policy. """ if np.random.rand() < self.eps: return torch.from_numpy(np.random.choice(list(self.actions.keys()), size=(state.shape[0],))) with torch.no_grad(): x = self.softmax(self._policy(state=state)) action = Categorical(probs=x).sample() return action def start_episode(self): self.I = 1 self.prev_action = None self.prev_state = None def update(self, state: torch.Tensor, reward: float): """ Update state-action value of previous (state, action). Args: state (Any): The new state representation. reward (float): Reward received upon the transaction to `state`. Note: - The parameter ``state`` should be a tensor with the leading batch dimension. """ state = self.decode_state(state).to(self.device) if self.prev_state is not None: gt = reward + self.gamma * self._state_value(state) pred = self._state_value(self.prev_state) logits = self._policy(self.prev_state) delta = (gt-pred).detach() v_loss = self.v_loss(pred, gt) policy_loss = (self.policy_loss(logits, self.prev_action)*self.I*delta).sum() # update weights self.v_optim.zero_grad() self.policy_optim.zero_grad() v_loss.backward() self.v_optim.step() policy_loss.backward() self.policy_optim.step() self.prev_state = state self.I = self.gamma * self.I try: return v_loss.item(), -policy_loss.item() except: return None, None def take_action(self, state): """ Choose an eps-greedy action to be taken from this state. Args: state (Any): The current state representation. After fed to ``decode_state``, the output should be eligible to be a network input. """ state = self.decode_state(state).to(self.device) assert state.shape[0] == 1 action = self._get_action(state) self.prev_action = action return self.actions[action.item()] def save(self, path: str): """ Save state-action value table in `path`.npy Args: path (str): The location of where to store the state-action value table. """ super().save(path) torch.save(self.policy_net.state_dict(), path + '_actor.pth') torch.save(self.v_net.state_dict(), path + '_critic.pth') def load(self, path): """ Load state-action value table. If it doesn't exist, a randomly-initialized table is used. Args: path (str): The location of where the state-action value table resides. """ try: self.policy_net.load_state_dict(torch.load(path + '_actor.pth')) self.v_net.load_state_dict(torch.load(path + '_critic.pth')) except: print("No file is found in:", path)
/rl_algorithms-0.0.4.tar.gz/rl_algorithms-0.0.4/rl_algorithms/policy/ActorCritic.py
0.955703
0.449453
ActorCritic.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pandas as pd from rl_benchmark.util import n_step_average def rewards_by_episode(rewards, cut_x=1e12, *args, **kwargs): episodes = np.arange(len(rewards)) episodes, rewards = episodes[episodes < cut_x], rewards[episodes < cut_x] if cut_x > 200: episodes = np.linspace(0, cut_x, 200) rewards = n_step_average(rewards, 200) return episodes, rewards def rewards_by_timestep(rewards, timesteps, cut_x=1e12, *args, **kwargs): timesteps, rewards = timesteps[timesteps < cut_x], rewards[timesteps < cut_x] if cut_x > 200: timesteps = np.linspace(0, cut_x, 200) rewards = n_step_average(rewards, 200) return timesteps, rewards def rewards_by_second(rewards, seconds=None, cut_x=1e12, *args, **kwargs): cut_x = int(cut_x) seconds, rewards = seconds[seconds < cut_x], rewards[seconds < cut_x] if cut_x > len(rewards): seconds = np.linspace(0, cut_x, 200) rewards = n_step_average(rewards, 200) else: seconds = np.linspace(0, cut_x, cut_x) rewards = n_step_average(rewards, cut_x) return seconds, rewards def to_timeseries(benchmark_data, x_label='Episode', y_label='Average Episode Reward', target=rewards_by_episode, cut_x=1e12, smooth=0): """ Convert benchmark data to timeseries data, plottable my mathplotlib. Args: benchmark_data: BenchmarkData object x_label: label for the x axis (time) y_label: label for the y axis (values) target: callback returning processed x and y values cut_x: maximum x value to cut (passed to target) smooth: used to np.ewm(span=smooth) (smooth curve) Returns: pd.DataFrame """ data_experiments, data_times, data_values = [], [], [] for experiment_id, experiment_data in enumerate(benchmark_data): extended_results = experiment_data.extended_results() if smooth > 0: extended_results['rewards'] = np.array(pd.Series(extended_results['rewards']).ewm(span=smooth).mean()) x, y = target(cut_x=cut_x, **extended_results) data_times.extend(x) data_values.extend(y) data_experiments.extend([experiment_id] * len(x)) return pd.DataFrame({'experiment': data_experiments, x_label: data_times, y_label: data_values})
/rl-benchmark-0.0.4.tar.gz/rl-benchmark-0.0.4/rl_benchmark/analyze/transform.py
0.874265
0.262771
transform.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from rl_benchmark.analyze.transform import rewards_by_episode, rewards_by_timestep, rewards_by_second, \ to_timeseries class ResultPlotter(object): def __init__(self): self.benchmarks = list() self.palette = None def make_palette(self): if not self.palette: self.palette = sns.color_palette("husl", len(self.benchmarks)) def add_benchmark(self, benchmark_data, name): self.benchmarks.append((benchmark_data, name)) def plot_reward_by_episode(self, ax=None): self.make_palette() full_data = pd.DataFrame() for idx, (benchmark_data, name) in enumerate(self.benchmarks): plot_data = to_timeseries(benchmark_data, x_label="Episode", y_label="Average Episode Reward", target=rewards_by_episode, cut_x=benchmark_data.min_x('episodes'), smooth=10) plot_data['Benchmark'] = name full_data = full_data.append(plot_data) plot = sns.tsplot(data=full_data, time="Episode", value="Average Episode Reward", unit="experiment", condition='Benchmark', ax=ax, ci=[68, 95], color=self.palette) return plot def plot_reward_by_timestep(self, ax=None): self.make_palette() full_data = pd.DataFrame() for idx, (benchmark_data, name) in enumerate(self.benchmarks): plot_data = to_timeseries(benchmark_data, x_label="Time step", y_label="Average Episode Reward", target=rewards_by_timestep, cut_x=benchmark_data.min_x('timesteps'), smooth=10) plot_data['Benchmark'] = name full_data = full_data.append(plot_data) plot = sns.tsplot(data=full_data, time="Time step", value="Average Episode Reward", unit="experiment", condition='Benchmark', ax=ax, ci=[68, 95], color=self.palette) return plot def plot_reward_by_second(self, ax=None): self.make_palette() full_data = pd.DataFrame() for idx, (benchmark_data, name) in enumerate(self.benchmarks): plot_data = to_timeseries(benchmark_data, x_label="Second", y_label="Average Episode Reward", target=rewards_by_second, cut_x=benchmark_data.min_x('seconds'), smooth=10) plot_data['Benchmark'] = name full_data = full_data.append(plot_data) plot = sns.tsplot(data=full_data, time="Second", value="Average Episode Reward", unit="experiment", condition='Benchmark', ax=ax, ci=[68, 95], color=self.palette) return plot
/rl-benchmark-0.0.4.tar.gz/rl-benchmark-0.0.4/rl_benchmark/analyze/plotter/result_plotter.py
0.850282
0.199737
result_plotter.py
pypi
# Coach [![CI](https://img.shields.io/circleci/project/github/NervanaSystems/coach/master.svg)](https://circleci.com/gh/NervanaSystems/workflows/coach/tree/master) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/NervanaSystems/coach/blob/master/LICENSE) [![Docs](https://readthedocs.org/projects/carla/badge/?version=latest)](https://nervanasystems.github.io/coach/) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1134898.svg)](https://doi.org/10.5281/zenodo.1134898) <p align="center"><img src="img/coach_logo.png" alt="Coach Logo" width="200"/></p> Coach is a python reinforcement learning framework containing implementation of many state-of-the-art algorithms. It exposes a set of easy-to-use APIs for experimenting with new RL algorithms, and allows simple integration of new environments to solve. Basic RL components (algorithms, environments, neural network architectures, exploration policies, ...) are well decoupled, so that extending and reusing existing components is fairly painless. Training an agent to solve an environment is as easy as running: ```bash coach -p CartPole_DQN -r ``` <img src="img/fetch_slide.gif" alt="Fetch Slide"/> <img src="img/pendulum.gif" alt="Pendulum"/> <img src="img/starcraft.gif" width = "281" height ="200" alt="Starcraft"/> <br> <img src="img/doom_deathmatch.gif" alt="Doom Deathmatch"/> <img src="img/carla.gif" alt="CARLA"/> <img src="img/montezuma.gif" alt="MontezumaRevenge" width = "164" height ="200"/> <br> <img src="img/doom_health.gif" alt="Doom Health Gathering"/> <img src="img/minitaur.gif" alt="PyBullet Minitaur" width = "249" height ="200"/> <img src="img/ant.gif" alt="Gym Extensions Ant"/> <br><br> * [Release 0.8.0](https://ai.intel.com/reinforcement-learning-coach-intel/) (initial release) * [Release 0.9.0](https://ai.intel.com/reinforcement-learning-coach-carla-qr-dqn/) * [Release 0.10.0](https://ai.intel.com/introducing-reinforcement-learning-coach-0-10-0/) * [Release 0.11.0](https://ai.intel.com/rl-coach-data-science-at-scale) * [Release 0.12.0](https://github.com/NervanaSystems/coach/releases/tag/v0.12.0) * [Release 1.0.0](https://www.intel.ai/rl-coach-new-release) (current release) ## Table of Contents - [Benchmarks](#benchmarks) - [Installation](#installation) - [Getting Started](#getting-started) * [Tutorials and Documentation](#tutorials-and-documentation) * [Basic Usage](#basic-usage) * [Running Coach](#running-coach) * [Running Coach Dashboard (Visualization)](#running-coach-dashboard-visualization) * [Distributed Multi-Node Coach](#distributed-multi-node-coach) * [Batch Reinforcement Learning](#batch-reinforcement-learning) - [Supported Environments](#supported-environments) - [Supported Algorithms](#supported-algorithms) - [Citation](#citation) - [Contact](#contact) - [Disclaimer](#disclaimer) ## Benchmarks One of the main challenges when building a research project, or a solution based on a published algorithm, is getting a concrete and reliable baseline that reproduces the algorithm's results, as reported by its authors. To address this problem, we are releasing a set of [benchmarks](benchmarks) that shows Coach reliably reproduces many state of the art algorithm results. ## Installation Note: Coach has only been tested on Ubuntu 16.04 LTS, and with Python 3.5. For some information on installing on Ubuntu 17.10 with Python 3.6.3, please refer to the following issue: https://github.com/NervanaSystems/coach/issues/54 In order to install coach, there are a few prerequisites required. This will setup all the basics needed to get the user going with running Coach on top of [OpenAI Gym](https://github.com/openai/gym) environments: ``` # General sudo -E apt-get install python3-pip cmake zlib1g-dev python3-tk python-opencv -y # Boost libraries sudo -E apt-get install libboost-all-dev -y # Scipy requirements sudo -E apt-get install libblas-dev liblapack-dev libatlas-base-dev gfortran -y # PyGame sudo -E apt-get install libsdl-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsmpeg-dev libportmidi-dev libavformat-dev libswscale-dev -y # Dashboard sudo -E apt-get install dpkg-dev build-essential python3.5-dev libjpeg-dev libtiff-dev libsdl1.2-dev libnotify-dev freeglut3 freeglut3-dev libsm-dev libgtk2.0-dev libgtk-3-dev libwebkitgtk-dev libgtk-3-dev libwebkitgtk-3.0-dev libgstreamer-plugins-base1.0-dev -y # Gym sudo -E apt-get install libav-tools libsdl2-dev swig cmake -y ``` We recommend installing coach in a virtualenv: ``` sudo -E pip3 install virtualenv virtualenv -p python3 coach_env . coach_env/bin/activate ``` Finally, install coach using pip: ``` pip3 install rl_coach ``` Or alternatively, for a development environment, install coach from the cloned repository: ``` cd coach pip3 install -e . ``` If a GPU is present, Coach's pip package will install tensorflow-gpu, by default. If a GPU is not present, an [Intel-Optimized TensorFlow](https://software.intel.com/en-us/articles/intel-optimized-tensorflow-wheel-now-available), will be installed. In addition to OpenAI Gym, several other environments were tested and are supported. Please follow the instructions in the Supported Environments section below in order to install more environments. ## Getting Started ### Tutorials and Documentation [Jupyter notebooks demonstrating how to run Coach from command line or as a library, implement an algorithm, or integrate an environment](https://github.com/NervanaSystems/coach/tree/master/tutorials). [Framework documentation, algorithm description and instructions on how to contribute a new agent/environment](https://nervanasystems.github.io/coach/). ### Basic Usage #### Running Coach To allow reproducing results in Coach, we defined a mechanism called _preset_. There are several available presets under the `presets` directory. To list all the available presets use the `-l` flag. To run a preset, use: ```bash coach -r -p <preset_name> ``` For example: * CartPole environment using Policy Gradients (PG): ```bash coach -r -p CartPole_PG ``` * Basic level of Doom using Dueling network and Double DQN (DDQN) algorithm: ```bash coach -r -p Doom_Basic_Dueling_DDQN ``` Some presets apply to a group of environment levels, like the entire Atari or Mujoco suites for example. To use these presets, the requeseted level should be defined using the `-lvl` flag. For example: * Pong using the Nerual Episodic Control (NEC) algorithm: ```bash coach -r -p Atari_NEC -lvl pong ``` There are several types of agents that can benefit from running them in a distributed fashion with multiple workers in parallel. Each worker interacts with its own copy of the environment but updates a shared network, which improves the data collection speed and the stability of the learning process. To specify the number of workers to run, use the `-n` flag. For example: * Breakout using Asynchronous Advantage Actor-Critic (A3C) with 8 workers: ```bash coach -r -p Atari_A3C -lvl breakout -n 8 ``` It is easy to create new presets for different levels or environments by following the same pattern as in presets.py More usage examples can be found [here](https://github.com/NervanaSystems/coach/blob/master/tutorials/0.%20Quick%20Start%20Guide.ipynb). #### Running Coach Dashboard (Visualization) Training an agent to solve an environment can be tricky, at times. In order to debug the training process, Coach outputs several signals, per trained algorithm, in order to track algorithmic performance. While Coach trains an agent, a csv file containing the relevant training signals will be saved to the 'experiments' directory. Coach's dashboard can then be used to dynamically visualize the training signals, and track algorithmic behavior. To use it, run: ```bash dashboard ``` <img src="img/dashboard.gif" alt="Coach Design" style="width: 800px;"/> ### Distributed Multi-Node Coach As of release 0.11.0, Coach supports horizontal scaling for training RL agents on multiple nodes. In release 0.11.0 this was tested on the ClippedPPO and DQN agents. For usage instructions please refer to the documentation [here](https://nervanasystems.github.io/coach/dist_usage.html). ### Batch Reinforcement Learning Training and evaluating an agent from a dataset of experience, where no simulator is available, is supported in Coach. There are [example](https://github.com/NervanaSystems/coach/blob/master/rl_coach/presets/CartPole_DDQN_BatchRL.py) [presets](https://github.com/NervanaSystems/coach/blob/master/rl_coach/presets/Acrobot_DDQN_BCQ_BatchRL.py) and a [tutorial](https://github.com/NervanaSystems/coach/blob/master/tutorials/4.%20Batch%20Reinforcement%20Learning.ipynb). ## Supported Environments * *OpenAI Gym:* Installed by default by Coach's installer * *ViZDoom:* Follow the instructions described in the ViZDoom repository - https://github.com/mwydmuch/ViZDoom Additionally, Coach assumes that the environment variable VIZDOOM_ROOT points to the ViZDoom installation directory. * *Roboschool:* Follow the instructions described in the roboschool repository - https://github.com/openai/roboschool * *GymExtensions:* Follow the instructions described in the GymExtensions repository - https://github.com/Breakend/gym-extensions Additionally, add the installation directory to the PYTHONPATH environment variable. * *PyBullet:* Follow the instructions described in the [Quick Start Guide](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA) (basically just - 'pip install pybullet') * *CARLA:* Download release 0.8.4 from the CARLA repository - https://github.com/carla-simulator/carla/releases Install the python client and dependencies from the release tarball: ``` pip3 install -r PythonClient/requirements.txt pip3 install PythonClient ``` Create a new CARLA_ROOT environment variable pointing to CARLA's installation directory. A simple CARLA settings file (```CarlaSettings.ini```) is supplied with Coach, and is located in the ```environments``` directory. * *Starcraft:* Follow the instructions described in the PySC2 repository - https://github.com/deepmind/pysc2 * *DeepMind Control Suite:* Follow the instructions described in the DeepMind Control Suite repository - https://github.com/deepmind/dm_control ## Supported Algorithms <img src="docs_raw/source/_static/img/algorithms.png" alt="Coach Design" style="width: 800px;"/> ### Value Optimization Agents * [Deep Q Network (DQN)](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf) ([code](rl_coach/agents/dqn_agent.py)) * [Double Deep Q Network (DDQN)](https://arxiv.org/pdf/1509.06461.pdf) ([code](rl_coach/agents/ddqn_agent.py)) * [Dueling Q Network](https://arxiv.org/abs/1511.06581) * [Mixed Monte Carlo (MMC)](https://arxiv.org/abs/1703.01310) ([code](rl_coach/agents/mmc_agent.py)) * [Persistent Advantage Learning (PAL)](https://arxiv.org/abs/1512.04860) ([code](rl_coach/agents/pal_agent.py)) * [Categorical Deep Q Network (C51)](https://arxiv.org/abs/1707.06887) ([code](rl_coach/agents/categorical_dqn_agent.py)) * [Quantile Regression Deep Q Network (QR-DQN)](https://arxiv.org/pdf/1710.10044v1.pdf) ([code](rl_coach/agents/qr_dqn_agent.py)) * [N-Step Q Learning](https://arxiv.org/abs/1602.01783) | **Multi Worker Single Node** ([code](rl_coach/agents/n_step_q_agent.py)) * [Neural Episodic Control (NEC)](https://arxiv.org/abs/1703.01988) ([code](rl_coach/agents/nec_agent.py)) * [Normalized Advantage Functions (NAF)](https://arxiv.org/abs/1603.00748.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/naf_agent.py)) * [Rainbow](https://arxiv.org/abs/1710.02298) ([code](rl_coach/agents/rainbow_dqn_agent.py)) ### Policy Optimization Agents * [Policy Gradients (PG)](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/policy_gradients_agent.py)) * [Asynchronous Advantage Actor-Critic (A3C)](https://arxiv.org/abs/1602.01783) | **Multi Worker Single Node** ([code](rl_coach/agents/actor_critic_agent.py)) * [Deep Deterministic Policy Gradients (DDPG)](https://arxiv.org/abs/1509.02971) | **Multi Worker Single Node** ([code](rl_coach/agents/ddpg_agent.py)) * [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf) ([code](rl_coach/agents/ppo_agent.py)) * [Clipped Proximal Policy Optimization (CPPO)](https://arxiv.org/pdf/1707.06347.pdf) | **Multi Worker Single Node** ([code](rl_coach/agents/clipped_ppo_agent.py)) * [Generalized Advantage Estimation (GAE)](https://arxiv.org/abs/1506.02438) ([code](rl_coach/agents/actor_critic_agent.py#L86)) * [Sample Efficient Actor-Critic with Experience Replay (ACER)](https://arxiv.org/abs/1611.01224) | **Multi Worker Single Node** ([code](rl_coach/agents/acer_agent.py)) * [Soft Actor-Critic (SAC)](https://arxiv.org/abs/1801.01290) ([code](rl_coach/agents/soft_actor_critic_agent.py)) * [Twin Delayed Deep Deterministic Policy Gradient (TD3)](https://arxiv.org/pdf/1802.09477.pdf) ([code](rl_coach/agents/td3_agent.py)) ### General Agents * [Direct Future Prediction (DFP)](https://arxiv.org/abs/1611.01779) | **Multi Worker Single Node** ([code](rl_coach/agents/dfp_agent.py)) ### Imitation Learning Agents * Behavioral Cloning (BC) ([code](rl_coach/agents/bc_agent.py)) * [Conditional Imitation Learning](https://arxiv.org/abs/1710.02410) ([code](rl_coach/agents/cil_agent.py)) ### Hierarchical Reinforcement Learning Agents * [Hierarchical Actor Critic (HAC)](https://arxiv.org/abs/1712.00948.pdf) ([code](rl_coach/agents/hac_ddpg_agent.py)) ### Memory Types * [Hindsight Experience Replay (HER)](https://arxiv.org/abs/1707.01495.pdf) ([code](rl_coach/memories/episodic/episodic_hindsight_experience_replay.py)) * [Prioritized Experience Replay (PER)](https://arxiv.org/abs/1511.05952) ([code](rl_coach/memories/non_episodic/prioritized_experience_replay.py)) ### Exploration Techniques * E-Greedy ([code](rl_coach/exploration_policies/e_greedy.py)) * Boltzmann ([code](rl_coach/exploration_policies/boltzmann.py)) * Ornstein–Uhlenbeck process ([code](rl_coach/exploration_policies/ou_process.py)) * Normal Noise ([code](rl_coach/exploration_policies/additive_noise.py)) * Truncated Normal Noise ([code](rl_coach/exploration_policies/truncated_normal.py)) * [Bootstrapped Deep Q Network](https://arxiv.org/abs/1602.04621) ([code](rl_coach/agents/bootstrapped_dqn_agent.py)) * [UCB Exploration via Q-Ensembles (UCB)](https://arxiv.org/abs/1706.01502) ([code](rl_coach/exploration_policies/ucb.py)) * [Noisy Networks for Exploration](https://arxiv.org/abs/1706.10295) ([code](rl_coach/exploration_policies/parameter_noise.py)) ## Citation If you used Coach for your work, please use the following citation: ``` @misc{caspi_itai_2017_1134899, author = {Caspi, Itai and Leibovich, Gal and Novik, Gal and Endrawis, Shadi}, title = {Reinforcement Learning Coach}, month = dec, year = 2017, doi = {10.5281/zenodo.1134899}, url = {https://doi.org/10.5281/zenodo.1134899} } ``` ## Contact We'd be happy to get any questions or contributions through GitHub issues and PRs. Please make sure to take a look [here](CONTRIBUTING.md) before filing an issue or proposing a PR. The Coach development team can also be contacted over [email](mailto:coach@intel.com) ## Disclaimer Coach is released as a reference code for research purposes. It is not an official Intel product, and the level of quality and support may not be as expected from an official product. Additional algorithms and environments are planned to be added to the framework. Feedback and contributions from the open source and RL research communities are more than welcome.
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/README.md
0.777215
0.964556
README.md
pypi
from typing import Any, Dict, List class Saver(object): """ ABC for saver objects that implement saving/restoring to/from path, and merging two savers. """ @property def path(self): """ Relative path for save/load. If two saver objects return the same path, they must be merge-able. """ raise NotImplementedError def save(self, sess: Any, save_path: str) -> List[str]: """ Save to save_path :param sess: active session for session-based frameworks (e.g. TF) :param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count). :return: list of all saved paths """ raise NotImplementedError def restore(self, sess: Any, restore_path: str) -> None: """ Restore from restore_path :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: full path to load checkpoint from. """ raise NotImplementedError def merge(self, other: 'Saver') -> None: """ Merge other saver into this saver :param other: saver to be merged into self """ raise NotImplementedError class SaverCollection(object): """ Object for storing a collection of saver objects. It takes care of ensuring uniqueness of saver paths and merging savers if they have the same path. For example, if a saver handles saving a generic key/value file for all networks in a single file, it can use a more generic path and all savers of all networks would be merged into a single saver that saves/restores parameters for all networks. NOTE: If two savers have the same path, the respective saver class must support merging them into a single saver that saves/restores all merged parameters. """ def __init__(self, saver: Saver = None): """ :param saver: optional initial saver for the collection """ self._saver_dict = dict() # type: Dict[str, Saver] if saver is not None: self._saver_dict[saver.path] = saver def add(self, saver: Saver): """ Add a new saver to the collection. If saver.path is already in the collection, merge the new saver with the existing saver. :param saver: new saver to be added to collection """ if saver.path in self._saver_dict: self._saver_dict[saver.path].merge(saver) else: self._saver_dict[saver.path] = saver def update(self, other: 'SaverCollection'): """ Merge savers from other collection into self :param other: saver collection to update self with. """ for c in other: self.add(c) def save(self, sess: Any, save_path: str) -> List[str]: """ Call save on all savers in the collection :param sess: active session for session-based frameworks (e.g. TF) :param save_path: path for saving checkpoints using savers. All saved file paths must start with this path in their full path. For example if save_path is '/home/checkpoints/checkpoint-01', then saved file paths can be '/home/checkpoints/checkpoint-01.main-network' but not '/home/checkpoints/main-network' :return: list of all saved paths """ paths = list() for saver in self: paths.extend(saver.save(sess, self._full_path(save_path, saver))) return paths def restore(self, sess: Any, restore_path: str) -> None: """ Call restore on all savers in the collection :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: path for restoring checkpoint using savers. """ for saver in self: saver.restore(sess, self._full_path(restore_path, saver)) def __iter__(self): """ Return an iterator for savers in the collection :return: saver iterator """ return (v for v in self._saver_dict.values()) @staticmethod def _full_path(path_prefix: str, saver: Saver) -> str: """ Concatenates path of the saver to parent prefix to create full save path :param path_prefix: prefix of the path :param saver: saver object to get unique path extension from :return: full path """ if saver.path == "": return path_prefix return "{}.{}".format(path_prefix, saver.path)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/saver.py
0.875268
0.393414
saver.py
pypi
import argparse import os import matplotlib import matplotlib.pyplot as plt from rl_coach.dashboard_components.signals_file import SignalsFile class FigureMaker(object): def __init__(self, path, cols, smoothness, signal_to_plot, x_axis, color): self.experiments_path = path self.environments = self.list_environments() self.cols = cols self.rows = int((len(self.environments) + cols - 1) / cols) self.smoothness = smoothness self.signal_to_plot = signal_to_plot self.x_axis = x_axis self.color = color params = { 'axes.labelsize': 8, 'font.size': 10, 'legend.fontsize': 14, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': [16, 30] } matplotlib.rcParams.update(params) def list_environments(self): environments = sorted([e.name for e in os.scandir(self.experiments_path) if e.is_dir()]) filtered_environments = self.filter_environments(environments) return filtered_environments def filter_environments(self, environments): filtered_environments = [] for idx, environment in enumerate(environments): path = os.path.join(self.experiments_path, environment) experiments = [e.name for e in os.scandir(path) if e.is_dir()] # take only the last updated experiment directory last_experiment_dir = max([os.path.join(path, root) for root in experiments], key=os.path.getctime) # make sure there is a csv file inside it for file_path in os.listdir(last_experiment_dir): full_file_path = os.path.join(last_experiment_dir, file_path) if os.path.isfile(full_file_path) and file_path.endswith('.csv'): filtered_environments.append((environment, full_file_path)) return filtered_environments def plot_figures(self, prev_subplot_map=None): subplot_map = {} for idx, (environment, full_file_path) in enumerate(self.environments): environment = environment.split('level')[1].split('-')[1].split('Deterministic')[0][1:] if prev_subplot_map: # skip on environments which were not plotted before if environment not in prev_subplot_map.keys(): continue subplot_idx = prev_subplot_map[environment] else: subplot_idx = idx + 1 print(environment) axis = plt.subplot(self.rows, self.cols, subplot_idx) subplot_map[environment] = subplot_idx signals = SignalsFile(full_file_path) signals.change_averaging_window(self.smoothness, force=True, signals=[self.signal_to_plot]) steps = signals.bokeh_source.data[self.x_axis] rewards = signals.bokeh_source.data[self.signal_to_plot] yloc = plt.MaxNLocator(4) axis.yaxis.set_major_locator(yloc) axis.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.title(environment, fontsize=10, y=1.08) plt.plot(steps, rewards, self.color, linewidth=0.8) plt.subplots_adjust(hspace=2.0, wspace=0.4) return subplot_map def save_pdf(self, name): plt.savefig(name + ".pdf", bbox_inches='tight') def show_figures(self): plt.show() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-p', '--paths', help="(string) Root directory of the experiments", default=None, type=str) parser.add_argument('-c', '--cols', help="(int) Number of plot columns", default=6, type=int) parser.add_argument('-s', '--smoothness', help="(int) Number of consequent episodes to average over", default=100, type=int) parser.add_argument('-sig', '--signal', help="(str) The name of the signal to plot", default='Evaluation Reward', type=str) parser.add_argument('-x', '--x_axis', help="(str) The meaning of the x axis", default='Total steps', type=str) parser.add_argument('-pdf', '--pdf', help="(str) A name of a pdf to save to", default='atari', type=str) args = parser.parse_args() paths = args.paths.split(",") subplot_map = None for idx, path in enumerate(paths): maker = FigureMaker(path, cols=args.cols, smoothness=args.smoothness, signal_to_plot=args.signal, x_axis=args.x_axis, color='C{}'.format(idx)) subplot_map = maker.plot_figures(subplot_map) plt.legend(paths) maker.save_pdf(args.pdf) maker.show_figures()
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/plot_atari.py
0.639961
0.397588
plot_atari.py
pypi
import inspect import json import os import sys import types from collections import OrderedDict from enum import Enum from typing import Dict, List, Union from rl_coach.core_types import TrainingSteps, EnvironmentSteps, GradientClippingMethod, RunPhase, \ SelectedPhaseOnlyDumpFilter, MaxDumpFilter from rl_coach.filters.filter import NoInputFilter from rl_coach.logger import screen class Frameworks(Enum): tensorflow = "TensorFlow" mxnet = "MXNet" class EmbedderScheme(Enum): Empty = "Empty" Shallow = "Shallow" Medium = "Medium" Deep = "Deep" class MiddlewareScheme(Enum): Empty = "Empty" Shallow = "Shallow" Medium = "Medium" Deep = "Deep" class EmbeddingMergerType(Enum): Concat = 0 Sum = 1 #ConcatDepthWise = 2 #Multiply = 3 class RunType(Enum): ORCHESTRATOR = "orchestrator" TRAINER = "trainer" ROLLOUT_WORKER = "rollout-worker" def __str__(self): return self.value class DeviceType(Enum): CPU = 'cpu' GPU = 'gpu' class Device(object): def __init__(self, device_type: DeviceType, index: int=0): """ :param device_type: type of device (CPU/GPU) :param index: index of device (only used if device type is GPU) """ self._device_type = device_type self._index = index @property def device_type(self): return self._device_type @property def index(self): return self._index def __str__(self): return "{}{}".format(self._device_type, self._index) def __repr__(self): return str(self) # DistributedCoachSynchronizationType provides the synchronization type for distributed Coach. # The default value is None, which means the algorithm or preset cannot be used with distributed Coach. class DistributedCoachSynchronizationType(Enum): # In SYNC mode, the trainer waits for all the experiences to be gathered from distributed rollout workers before # training a new policy and the rollout workers wait for a new policy before gathering experiences. SYNC = "sync" # In ASYNC mode, the trainer doesn't wait for any set of experiences to be gathered from distributed rollout workers # and the rollout workers continously gather experiences loading new policies, whenever they become available. ASYNC = "async" def iterable_to_items(obj): if isinstance(obj, dict) or isinstance(obj, OrderedDict) or isinstance(obj, types.MappingProxyType): items = obj.items() elif isinstance(obj, list): items = enumerate(obj) else: raise ValueError("The given object is not a dict or a list") return items def unfold_dict_or_list(obj: Union[Dict, List, OrderedDict]): """ Recursively unfolds all the parameters in dictionaries and lists :param obj: a dictionary or list to unfold :return: the unfolded parameters dictionary """ parameters = OrderedDict() items = iterable_to_items(obj) for k, v in items: if isinstance(v, dict) or isinstance(v, list) or isinstance(v, OrderedDict): if 'tensorflow.' not in str(v.__class__): parameters[k] = unfold_dict_or_list(v) elif 'tensorflow.' in str(v.__class__): parameters[k] = v elif hasattr(v, '__dict__'): sub_params = v.__dict__ if '__objclass__' not in sub_params.keys(): try: parameters[k] = unfold_dict_or_list(sub_params) except RecursionError: parameters[k] = sub_params parameters[k]['__class__'] = v.__class__.__name__ else: # unfolding this type of object will result in infinite recursion parameters[k] = sub_params else: parameters[k] = v if not isinstance(obj, OrderedDict) and not isinstance(obj, list): parameters = OrderedDict(sorted(parameters.items())) return parameters class Parameters(object): def __setattr__(self, key, value): caller_name = sys._getframe(1).f_code.co_name if caller_name != '__init__' and not hasattr(self, key): raise TypeError("Parameter '{}' does not exist in {}. Parameters are only to be defined in a constructor of" " a class inheriting from Parameters. In order to explicitly register a new parameter " "outside of a constructor use register_var().". format(key, self.__class__)) object.__setattr__(self, key, value) @property def path(self): if hasattr(self, 'parameterized_class_name'): module_path = os.path.relpath(inspect.getfile(self.__class__), os.getcwd())[:-3] + '.py' return ':'.join([module_path, self.parameterized_class_name]) else: raise ValueError("The parameters class does not have an attached class it parameterizes. " "The self.parameterized_class_name should be set to the parameterized class.") def register_var(self, key, value): if hasattr(self, key): raise TypeError("Cannot register an already existing parameter '{}'. ".format(key)) object.__setattr__(self, key, value) def __str__(self): result = "\"{}\" {}\n".format(self.__class__.__name__, json.dumps(unfold_dict_or_list(self.__dict__), indent=4, default=repr)) return result class AlgorithmParameters(Parameters): def __init__(self): # Architecture parameters self.use_accumulated_reward_as_measurement = False # Agent parameters self.num_consecutive_playing_steps = EnvironmentSteps(1) self.num_consecutive_training_steps = 1 # TODO: update this to TrainingSteps self.heatup_using_network_decisions = False self.discount = 0.99 self.apply_gradients_every_x_episodes = 5 self.num_steps_between_copying_online_weights_to_target = TrainingSteps(0) self.rate_for_copying_weights_to_target = 1.0 self.load_memory_from_file_path = None self.store_transitions_only_when_episodes_are_terminated = False # HRL / HER related params self.in_action_space = None # distributed agents params self.share_statistics_between_workers = True # n-step returns self.n_step = -1 # calculate the total return (no bootstrap, by default) # Distributed Coach params self.distributed_coach_synchronization_type = None # Should the workers wait for full episode self.act_for_full_episodes = False # Support for parameter noise self.supports_parameter_noise = False # Override, in retrospective, all the episode rewards with the last reward in the episode # (sometimes useful for sparse, end of the episode, rewards problems) self.override_episode_rewards_with_the_last_transition_reward = False # Filters - TODO consider creating a FilterParameters class and initialize the filters with it self.update_pre_network_filters_state_on_train = False self.update_pre_network_filters_state_on_inference = True class PresetValidationParameters(Parameters): def __init__(self, test=False, min_reward_threshold=0, max_episodes_to_achieve_reward=1, num_workers=1, reward_test_level=None, test_using_a_trace_test=True, trace_test_levels=None, trace_max_env_steps=5000, read_csv_tries=200): """ :param test: A flag which specifies if the preset should be tested as part of the validation process. :param min_reward_threshold: The minimum reward that the agent should pass after max_episodes_to_achieve_reward episodes when the preset is run. :param max_episodes_to_achieve_reward: The maximum number of episodes that the agent should train using the preset in order to achieve the reward specified by min_reward_threshold. :param num_workers: The number of workers that should be used when running this preset in the test suite for validation. :param reward_test_level: The environment level or levels, given by a list of strings, that should be tested as part of the reward tests suite. :param test_using_a_trace_test: A flag that specifies if the preset should be run as part of the trace tests suite. :param trace_test_levels: The environment level or levels, given by a list of strings, that should be tested as part of the trace tests suite. :param trace_max_env_steps: An integer representing the maximum number of environment steps to run when running this preset as part of the trace tests suite. :param read_csv_tries: The number of retries to attempt for reading the experiment csv file, before declaring failure. """ super().__init__() # setting a seed will only work for non-parallel algorithms. Parallel algorithms add uncontrollable noise in # the form of different workers starting at different times, and getting different assignments of CPU # time from the OS. # Testing parameters self.test = test self.min_reward_threshold = min_reward_threshold self.max_episodes_to_achieve_reward = max_episodes_to_achieve_reward self.num_workers = num_workers self.reward_test_level = reward_test_level self.test_using_a_trace_test = test_using_a_trace_test self.trace_test_levels = trace_test_levels self.trace_max_env_steps = trace_max_env_steps self.read_csv_tries = read_csv_tries class NetworkParameters(Parameters): def __init__(self, force_cpu=False, async_training=False, shared_optimizer=True, scale_down_gradients_by_number_of_workers_for_sync_training=True, clip_gradients=None, gradients_clipping_method=GradientClippingMethod.ClipByGlobalNorm, l2_regularization=0, learning_rate=0.00025, learning_rate_decay_rate=0, learning_rate_decay_steps=0, input_embedders_parameters={}, embedding_merger_type=EmbeddingMergerType.Concat, middleware_parameters=None, heads_parameters=[], use_separate_networks_per_head=False, optimizer_type='Adam', optimizer_epsilon=0.0001, adam_optimizer_beta1=0.9, adam_optimizer_beta2=0.99, rms_prop_optimizer_decay=0.9, batch_size=32, replace_mse_with_huber_loss=False, create_target_network=False, tensorflow_support=True, softmax_temperature=1): """ :param force_cpu: Force the neural networks to run on the CPU even if a GPU is available :param async_training: If set to True, asynchronous training will be used, meaning that each workers will progress in its own speed, while not waiting for the rest of the workers to calculate their gradients. :param shared_optimizer: If set to True, a central optimizer which will be shared with all the workers will be used for applying gradients to the network. Otherwise, each worker will have its own optimizer with its own internal parameters that will only be affected by the gradients calculated by that worker :param scale_down_gradients_by_number_of_workers_for_sync_training: If set to True, in synchronous training, the gradients of each worker will be scaled down by the number of workers. This essentially means that the gradients applied to the network are the average of the gradients over all the workers. :param clip_gradients: A value that will be used for clipping the gradients of the network. If set to None, no gradient clipping will be applied. Otherwise, the gradients will be clipped according to the gradients_clipping_method. :param gradients_clipping_method: A gradient clipping method, defined by a GradientClippingMethod enum, and that will be used to clip the gradients of the network. This will only be used if the clip_gradients value is defined as a value other than None. :param l2_regularization: A L2 regularization weight that will be applied to the network weights while calculating the loss function :param learning_rate: The learning rate for the network :param learning_rate_decay_rate: If this value is larger than 0, an exponential decay will be applied to the network learning rate. The rate of the decay is defined by this parameter, and the number of training steps the decay will be applied is defined by learning_rate_decay_steps. Notice that both parameters should be defined in order for this to work correctly. :param learning_rate_decay_steps: If the learning_rate_decay_rate of the network is larger than 0, an exponential decay will be applied to the network learning rate. The number of steps the decay will be applied is defined by this parameter. Notice that both this parameter, as well as learning_rate_decay_rate should be defined in order for the learning rate decay to work correctly. :param input_embedders_parameters: A dictionary mapping between input names and input embedders (InputEmbedderParameters) to use for the network. Each of the keys is an input name as returned from the environment in the state. For example, if the environment returns a state containing 'observation' and 'measurements', then the keys for the input embedders dictionary can be either 'observation' to use the observation as input, 'measurements' to use the measurements as input, or both. The embedder type will be automatically selected according to the input type. Vector inputs will produce a fully connected embedder, and image inputs will produce a convolutional embedder. :param embedding_merger_type: The type of embedding merging to use, given by one of the EmbeddingMergerType enum values. This will be used to merge the outputs of all the input embedders into a single embbeding. :param middleware_parameters: The parameters of the middleware to use, given by a MiddlewareParameters object. Each network will have only a single middleware embedder which will take the merged embeddings from the input embedders and pass them through more neural network layers. :param heads_parameters: A list of heads for the network given by their corresponding HeadParameters. Each network can have one or multiple network heads, where each one will take the output of the middleware and make some additional computation on top of it. Additionally, each head calculates a weighted loss value, and the loss values from all the heads will be summed later on. :param use_separate_networks_per_head: A flag that allows using different copies of the input embedders and middleware for each one of the heads. Regularly, the heads will have a shared input, but in the case where use_separate_networks_per_head is set to True, each one of the heads will get a different input. :param optimizer_type: A string specifying the optimizer type to use for updating the network. The available optimizers are Adam, RMSProp and LBFGS. :param optimizer_epsilon: An internal optimizer parameter used for Adam and RMSProp. :param adam_optimizer_beta1: An beta1 internal optimizer parameter used for Adam. It will be used only if Adam was selected as the optimizer for the network. :param adam_optimizer_beta2: An beta2 internal optimizer parameter used for Adam. It will be used only if Adam was selected as the optimizer for the network. :param rms_prop_optimizer_decay: The decay value for the RMSProp optimizer, which will be used only in case the RMSProp optimizer was selected for this network. :param batch_size: The batch size to use when updating the network. :param replace_mse_with_huber_loss: :param create_target_network: If this flag is set to True, an additional copy of the network will be created and initialized with the same weights as the online network. It can then be queried, and its weights can be synced from the online network at will. :param tensorflow_support: A flag which specifies if the network is supported by the TensorFlow framework. :param softmax_temperature: If a softmax is present in the network head output, use this temperature """ super().__init__() self.framework = Frameworks.tensorflow self.sess = None # hardware parameters self.force_cpu = force_cpu # distributed training options self.async_training = async_training self.shared_optimizer = shared_optimizer self.scale_down_gradients_by_number_of_workers_for_sync_training = scale_down_gradients_by_number_of_workers_for_sync_training # regularization self.clip_gradients = clip_gradients self.gradients_clipping_method = gradients_clipping_method self.l2_regularization = l2_regularization # learning rate self.learning_rate = learning_rate self.learning_rate_decay_rate = learning_rate_decay_rate self.learning_rate_decay_steps = learning_rate_decay_steps # structure self.input_embedders_parameters = input_embedders_parameters self.embedding_merger_type = embedding_merger_type self.middleware_parameters = middleware_parameters self.heads_parameters = heads_parameters self.use_separate_networks_per_head = use_separate_networks_per_head self.optimizer_type = optimizer_type self.replace_mse_with_huber_loss = replace_mse_with_huber_loss self.create_target_network = create_target_network # Framework support self.tensorflow_support = tensorflow_support # Hyper-Parameter values self.optimizer_epsilon = optimizer_epsilon self.adam_optimizer_beta1 = adam_optimizer_beta1 self.adam_optimizer_beta2 = adam_optimizer_beta2 self.rms_prop_optimizer_decay = rms_prop_optimizer_decay self.batch_size = batch_size self.softmax_temperature = softmax_temperature class NetworkComponentParameters(Parameters): def __init__(self, dense_layer): self.dense_layer = dense_layer class VisualizationParameters(Parameters): def __init__(self, print_networks_summary=False, dump_csv=True, dump_signals_to_csv_every_x_episodes=5, dump_gifs=False, dump_mp4=False, video_dump_methods=None, dump_in_episode_signals=False, dump_parameters_documentation=True, render=False, native_rendering=False, max_fps_for_human_control=10, tensorboard=False, add_rendered_image_to_env_response=False): """ :param print_networks_summary: If set to True, a summary of all the networks structure will be printed at the beginning of the experiment :param dump_csv: If set to True, the logger will dump logs to a csv file once in every dump_signals_to_csv_every_x_episodes episodes. The logs can be later used to visualize the training process using Coach Dashboard. :param dump_signals_to_csv_every_x_episodes: Defines the number of episodes between writing new data to the csv log files. Lower values can affect performance, as writing to disk may take time, and it is done synchronously. :param dump_gifs: If set to True, GIF videos of the environment will be stored into the experiment directory according to the filters defined in video_dump_methods. :param dump_mp4: If set to True, MP4 videos of the environment will be stored into the experiment directory according to the filters defined in video_dump_methods. :param dump_in_episode_signals: If set to True, csv files will be dumped for each episode for inspecting different metrics within the episode. This means that for each step in each episode, different metrics such as the reward, the future return, etc. will be saved. Setting this to True may affect performance severely, and therefore this should be used only for debugging purposes. :param dump_parameters_documentation: If set to True, a json file containing all the agent parameters will be saved in the experiment directory. This may be very useful for inspecting the values defined for each parameters and making sure that all the parameters are defined as expected. :param render: If set to True, the environment render function will be called for each step, rendering the image of the environment. This may affect the performance of training, and is highly dependent on the environment. By default, Coach uses PyGame to render the environment image instead of the environment specific rendered. To change this, use the native_rendering flag. :param native_rendering: If set to True, the environment native renderer will be used for rendering the environment image. In some cases this can be slower than rendering using PyGame through Coach, but in other cases the environment opens its native renderer by default, so rendering with PyGame is an unnecessary overhead. :param max_fps_for_human_control: The maximum number of frames per second used while playing the environment as a human. This only has effect while using the --play flag for Coach. :param tensorboard: If set to True, TensorBoard summaries will be stored in the experiment directory. This can later be loaded in TensorBoard in order to visualize the training process. :param video_dump_methods: A list of dump methods that will be used as filters for deciding when to save videos. The filters in the list will be checked one after the other until the first dump method that returns false for should_dump() in the environment class. This list will only be used if dump_mp4 or dump_gif are set to True. :param add_rendered_image_to_env_response: Some environments have a different observation compared to the one displayed while rendering. For some cases it can be useful to pass the rendered image to the agent for visualization purposes. If this flag is set to True, the rendered image will be added to the environment EnvResponse object, which will be passed to the agent and allow using those images. """ super().__init__() if video_dump_methods is None: video_dump_methods = [SelectedPhaseOnlyDumpFilter(RunPhase.TEST), MaxDumpFilter()] self.print_networks_summary = print_networks_summary self.dump_csv = dump_csv self.dump_gifs = dump_gifs self.dump_mp4 = dump_mp4 self.dump_signals_to_csv_every_x_episodes = dump_signals_to_csv_every_x_episodes self.dump_in_episode_signals = dump_in_episode_signals self.dump_parameters_documentation = dump_parameters_documentation self.render = render self.native_rendering = native_rendering self.max_fps_for_human_control = max_fps_for_human_control self.tensorboard = tensorboard self.video_dump_filters = video_dump_methods self.add_rendered_image_to_env_response = add_rendered_image_to_env_response class AgentParameters(Parameters): def __init__(self, algorithm: AlgorithmParameters, exploration: 'ExplorationParameters', memory: 'MemoryParameters', networks: Dict[str, NetworkParameters], visualization: VisualizationParameters=VisualizationParameters()): """ :param algorithm: A class inheriting AlgorithmParameters. The parameters used for the specific algorithm used by the agent. These parameters can be later referenced in the agent implementation through self.ap.algorithm. :param exploration: Either a class inheriting ExplorationParameters or a dictionary mapping between action space types and their corresponding ExplorationParameters. If a dictionary was used, when the agent will be instantiated, the correct exploration policy parameters will be used according to the real type of the environment action space. These parameters will be used to instantiate the exporation policy. :param memory: A class inheriting MemoryParameters. It defines all the parameters used by the memory module. :param networks: A dictionary mapping between network names and their corresponding network parmeters, defined as a class inheriting NetworkParameters. Each element will be used in order to instantiate a NetworkWrapper class, and all the network wrappers will be stored in the agent under self.network_wrappers. self.network_wrappers is a dict mapping between the network name that was given in the networks dict, and the instantiated network wrapper. :param visualization: A class inheriting VisualizationParameters and defining various parameters that can be used for visualization purposes, such as printing to the screen, rendering, and saving videos. """ super().__init__() self.visualization = visualization self.algorithm = algorithm self.exploration = exploration self.memory = memory self.network_wrappers = networks self.input_filter = None self.output_filter = None self.pre_network_filter = NoInputFilter() self.full_name_id = None self.name = None self.is_a_highest_level_agent = True self.is_a_lowest_level_agent = True self.task_parameters = None self.is_batch_rl_training = False @property def path(self): return 'rl_coach.agents.agent:Agent' class TaskParameters(Parameters): def __init__(self, framework_type: Frameworks=Frameworks.tensorflow, evaluate_only: int=None, use_cpu: bool=False, experiment_path='/tmp', seed=None, checkpoint_save_secs=None, checkpoint_restore_dir=None, checkpoint_restore_path=None, checkpoint_save_dir=None, export_onnx_graph: bool=False, apply_stop_condition: bool=False, num_gpu: int=1): """ :param framework_type: deep learning framework type. currently only tensorflow is supported :param evaluate_only: if not None, the task will be used only for evaluating the model for the given number of steps. A value of 0 means that task will be evaluated for an infinite number of steps. :param use_cpu: use the cpu for this task :param experiment_path: the path to the directory which will store all the experiment outputs :param seed: a seed to use for the random numbers generator :param checkpoint_save_secs: the number of seconds between each checkpoint saving :param checkpoint_restore_dir: [DEPECRATED - will be removed in one of the next releases - switch to checkpoint_restore_path] the dir to restore the checkpoints from :param checkpoint_restore_path: the path to restore the checkpoints from :param checkpoint_save_dir: the directory to store the checkpoints in :param export_onnx_graph: If set to True, this will export an onnx graph each time a checkpoint is saved :param apply_stop_condition: If set to True, this will apply the stop condition defined by reaching a target success rate :param num_gpu: number of GPUs to use """ self.framework_type = framework_type self.task_index = 0 # TODO: not really needed self.evaluate_only = evaluate_only self.use_cpu = use_cpu self.experiment_path = experiment_path self.checkpoint_save_secs = checkpoint_save_secs if checkpoint_restore_dir: screen.warning('TaskParameters.checkpoint_restore_dir is DEPECRATED and will be removed in one of the next ' 'releases. Please switch to using TaskParameters.checkpoint_restore_path, with your ' 'directory path. ') self.checkpoint_restore_path = checkpoint_restore_dir else: self.checkpoint_restore_path = checkpoint_restore_path self.checkpoint_save_dir = checkpoint_save_dir self.seed = seed self.export_onnx_graph = export_onnx_graph self.apply_stop_condition = apply_stop_condition self.num_gpu = num_gpu class DistributedTaskParameters(TaskParameters): def __init__(self, framework_type: Frameworks, parameters_server_hosts: str, worker_hosts: str, job_type: str, task_index: int, evaluate_only: int=None, num_tasks: int=None, num_training_tasks: int=None, use_cpu: bool=False, experiment_path=None, dnd=None, shared_memory_scratchpad=None, seed=None, checkpoint_save_secs=None, checkpoint_restore_path=None, checkpoint_save_dir=None, export_onnx_graph: bool=False, apply_stop_condition: bool=False): """ :param framework_type: deep learning framework type. currently only tensorflow is supported :param evaluate_only: if not None, the task will be used only for evaluating the model for the given number of steps. A value of 0 means that task will be evaluated for an infinite number of steps. :param parameters_server_hosts: comma-separated list of hostname:port pairs to which the parameter servers are assigned :param worker_hosts: comma-separated list of hostname:port pairs to which the workers are assigned :param job_type: the job type - either ps (short for parameters server) or worker :param task_index: the index of the process :param num_tasks: the number of total tasks that are running (not including the parameters server) :param num_training_tasks: the number of tasks that are training (not including the parameters server) :param use_cpu: use the cpu for this task :param experiment_path: the path to the directory which will store all the experiment outputs :param dnd: an external DND to use for NEC. This is a workaround needed for a shared DND not using the scratchpad. :param seed: a seed to use for the random numbers generator :param checkpoint_save_secs: the number of seconds between each checkpoint saving :param checkpoint_restore_path: the path to restore the checkpoints from :param checkpoint_save_dir: the directory to store the checkpoints in :param export_onnx_graph: If set to True, this will export an onnx graph each time a checkpoint is saved :param apply_stop_condition: If set to True, this will apply the stop condition defined by reaching a target success rate """ super().__init__(framework_type=framework_type, evaluate_only=evaluate_only, use_cpu=use_cpu, experiment_path=experiment_path, seed=seed, checkpoint_save_secs=checkpoint_save_secs, checkpoint_restore_path=checkpoint_restore_path, checkpoint_save_dir=checkpoint_save_dir, export_onnx_graph=export_onnx_graph, apply_stop_condition=apply_stop_condition) self.parameters_server_hosts = parameters_server_hosts self.worker_hosts = worker_hosts self.job_type = job_type self.task_index = task_index self.num_tasks = num_tasks self.num_training_tasks = num_training_tasks self.device = None # the replicated device which will be used for the global parameters self.worker_target = None self.dnd = dnd self.shared_memory_scratchpad = shared_memory_scratchpad
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/base_parameters.py
0.590425
0.199932
base_parameters.py
pypi
import numpy as np import contextlib with contextlib.redirect_stdout(None): import pygame from pygame.locals import HWSURFACE, DOUBLEBUF class Renderer(object): def __init__(self): self.size = (1, 1) self.screen = None self.clock = pygame.time.Clock() self.display = pygame.display self.fps = 30 self.pressed_keys = [] self.is_open = False def create_screen(self, width, height): """ Creates a pygame window :param width: the width of the window :param height: the height of the window :return: None """ self.size = (width, height) self.screen = self.display.set_mode(self.size, HWSURFACE | DOUBLEBUF) self.display.set_caption("Coach") self.is_open = True def normalize_image(self, image): """ Normalize image values to be between 0 and 255 :param image: 2D/3D array containing an image with arbitrary values :return: the input image with values rescaled to 0-255 """ image_min, image_max = image.min(), image.max() return 255.0 * (image - image_min) / (image_max - image_min) def render_image(self, image): """ Render the given image to the pygame window :param image: a grayscale or color image in an arbitrary size. assumes that the channels are the last axis :return: None """ if self.is_open: if len(image.shape) == 2: image = np.stack([image] * 3) if len(image.shape) == 3: if image.shape[0] == 3 or image.shape[0] == 1: image = np.transpose(image, (1, 2, 0)) surface = pygame.surfarray.make_surface(image.swapaxes(0, 1)) surface = pygame.transform.scale(surface, self.size) self.screen.blit(surface, (0, 0)) self.display.flip() self.clock.tick() self.get_events() def get_events(self): """ Get all the window events in the last tick and reponse accordingly :return: None """ for event in pygame.event.get(): if event.type == pygame.KEYDOWN: self.pressed_keys.append(event.key) # esc pressed if event.key == pygame.K_ESCAPE: self.close() elif event.type == pygame.KEYUP: if event.key in self.pressed_keys: self.pressed_keys.remove(event.key) elif event.type == pygame.QUIT: self.close() def get_key_names(self, key_ids): """ Get the key name for each key index in the list :param key_ids: a list of key id's :return: a list of key names corresponding to the key id's """ return [pygame.key.name(key_id) for key_id in key_ids] def close(self): """ Close the pygame window :return: None """ self.is_open = False pygame.quit()
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/renderer.py
0.689096
0.4575
renderer.py
pypi
import time import os from rl_coach.base_parameters import TaskParameters, DistributedCoachSynchronizationType from rl_coach.checkpoint import CheckpointStateFile, CheckpointStateReader from rl_coach.data_stores.data_store import SyncFiles def wait_for(wait_func, data_store=None, timeout=10): """ block until wait_func is true """ for i in range(timeout): if data_store: data_store.load_from_store() if wait_func(): return time.sleep(10) # one last time if wait_func(): return raise ValueError(( 'Waited {timeout} seconds, but condition timed out' ).format( timeout=timeout, )) def wait_for_trainer_ready(checkpoint_dir, data_store=None, timeout=10): """ Block until trainer is ready """ def wait(): return os.path.exists(os.path.join(checkpoint_dir, SyncFiles.TRAINER_READY.value)) wait_for(wait, data_store, timeout) def rollout_worker(graph_manager, data_store, num_workers, task_parameters): """ wait for first checkpoint then perform rollouts using the model """ wait_for_trainer_ready(checkpoint_dir, data_store) if ( graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC ): timeout = float("inf") else: timeout = None # this could probably be moved up into coach.py graph_manager.create_graph(task_parameters) data_store.load_policy(graph_manager, require_new_policy=False, timeout=60) with graph_manager.phase_context(RunPhase.TRAIN): # this worker should play a fraction of the total playing steps per rollout act_steps = ( graph_manager.agent_params.algorithm.num_consecutive_playing_steps / num_workers ) for i in range(graph_manager.improve_steps / act_steps): if data_store.end_of_policies(): break graph_manager.act( act_steps, wait_for_full_episodes=graph_manager.agent_params.algorithm.act_for_full_episodes, ) data_store.load_policy(graph_manager, require_new_policy=True, timeout=timeout)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/rollout_worker.py
0.422624
0.166303
rollout_worker.py
pypi
from typing import List, Tuple import numpy as np from rl_coach.core_types import EnvironmentSteps class Schedule(object): def __init__(self, initial_value: float): self.initial_value = initial_value self.current_value = initial_value def step(self): raise NotImplementedError("") class ConstantSchedule(Schedule): def __init__(self, initial_value: float): super().__init__(initial_value) def step(self): pass class LinearSchedule(Schedule): """ A simple linear schedule which decreases or increases over time from an initial to a final value """ def __init__(self, initial_value: float, final_value: float, decay_steps: int): """ :param initial_value: the initial value :param final_value: the final value :param decay_steps: the number of steps that are required to decay the initial value to the final value """ super().__init__(initial_value) self.final_value = final_value self.decay_steps = decay_steps self.decay_delta = (initial_value - final_value) / float(decay_steps) def step(self): self.current_value -= self.decay_delta # decreasing schedule if self.final_value < self.initial_value: self.current_value = np.clip(self.current_value, self.final_value, self.initial_value) # increasing schedule if self.final_value > self.initial_value: self.current_value = np.clip(self.current_value, self.initial_value, self.final_value) class PieceWiseSchedule(Schedule): """ A schedule which consists of multiple sub-schedules, where each one is used for a defined number of steps """ def __init__(self, schedules: List[Tuple[Schedule, EnvironmentSteps]]): """ :param schedules: a list of schedules to apply serially. Each element of the list should be a tuple of 2 elements - a schedule and the number of steps to run it in terms of EnvironmentSteps """ super().__init__(schedules[0][0].initial_value) self.schedules = schedules self.current_schedule = schedules[0] self.current_schedule_idx = 0 self.current_schedule_step_count = 0 def step(self): self.current_schedule[0].step() if self.current_schedule_idx < len(self.schedules) - 1 \ and self.current_schedule_step_count >= self.current_schedule[1].num_steps: self.current_schedule_idx += 1 self.current_schedule = self.schedules[self.current_schedule_idx] self.current_schedule_step_count = 0 self.current_value = self.current_schedule[0].current_value self.current_schedule_step_count += 1 class ExponentialSchedule(Schedule): """ A simple exponential schedule which decreases or increases over time from an initial to a final value """ def __init__(self, initial_value: float, final_value: float, decay_coefficient: float): """ :param initial_value: the initial value :param final_value: the final value :param decay_coefficient: the exponential decay coefficient """ super().__init__(initial_value) self.initial_value = initial_value self.final_value = final_value self.decay_coefficient = decay_coefficient self.current_step = 0 self.current_value = self.initial_value if decay_coefficient < 1 and final_value > initial_value: raise ValueError("The final value should be lower than the initial value when the decay coefficient < 1") if decay_coefficient > 1 and initial_value > final_value: raise ValueError("The final value should be higher than the initial value when the decay coefficient > 1") def step(self): self.current_value *= self.decay_coefficient # decreasing schedule if self.final_value < self.initial_value: self.current_value = np.clip(self.current_value, self.final_value, self.initial_value) # increasing schedule if self.final_value > self.initial_value: self.current_value = np.clip(self.current_value, self.initial_value, self.final_value) self.current_step += 1
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/schedules.py
0.938365
0.623893
schedules.py
pypi
import copy import datetime import os import sys import time from itertools import cycle from os import listdir from os.path import isfile, join, isdir from bokeh.layouts import row, column, Spacer, ToolbarBox from bokeh.models import ColumnDataSource, Range1d, LinearAxis, Legend, \ WheelZoomTool, CrosshairTool, ResetTool, SaveTool, Toolbar, PanTool, BoxZoomTool, \ Toggle from bokeh.models.callbacks import CustomJS from bokeh.models.widgets import RadioButtonGroup, MultiSelect, Button, Select, Slider, Div, CheckboxGroup from bokeh.plotting import figure from rl_coach.dashboard_components.globals import signals_files, x_axis_labels, x_axis_options, show_spinner, hide_spinner, \ dialog, FolderType, RunType, add_directory_csv_files, doc, display_boards, layouts, \ crcolor, crx, cry, color_resolution, crRGBs, rgb_to_hex, x_axis from rl_coach.dashboard_components.signals_files_group import SignalsFilesGroup from rl_coach.dashboard_components.signals_file import SignalsFile def update_axis_range(name, range_placeholder): max_val = -float('inf') min_val = float('inf') selected_signal = None if name in x_axis_options: selected_signal = name for signals_file in signals_files.values(): curr_min_val, curr_max_val = signals_file.get_range_of_selected_signals_on_axis(name, selected_signal) max_val = max(max_val, curr_max_val) min_val = min(min_val, curr_min_val) if min_val != float('inf'): if min_val == max_val: range = 5 else: range = max_val - min_val range_placeholder.start = min_val - 0.1 * range range_placeholder.end = max_val + 0.1 * range # update axes ranges def update_y_axis_ranges(): update_axis_range('default', plot.y_range) update_axis_range('secondary', plot.extra_y_ranges['secondary']) def update_x_axis_ranges(): update_axis_range(x_axis[0], plot.x_range) def get_all_selected_signals(): signals = [] for signals_file in signals_files.values(): signals += signals_file.get_selected_signals() return signals # update legend using the legend text dictionary def update_legend(): selected_signals = get_all_selected_signals() max_line_length = 50 items = [] for signal in selected_signals: side_sign = "◀" if signal.axis == 'default' else "▶" signal_name = side_sign + " " + signal.full_name # bokeh legend does not respect a max_width parameter so we split the text manually to lines of constant width signal_name = [signal_name[n:n + max_line_length] for n in range(0, len(signal_name), max_line_length)] for idx, substr in enumerate(signal_name): if idx == 0: lines = [signal.line] if signal.show_bollinger_bands: lines.append(signal.bands) items.append((substr, lines)) else: items.append((substr, [])) if bokeh_legend.items == [] or items == [] or \ any([legend_item.renderers != item[1] for legend_item, item in zip(bokeh_legend.items, items)])\ or any([legend_item.label != item[0] for legend_item, item in zip(bokeh_legend.items, items)]): bokeh_legend.items = items # this step takes a long time because it is redrawing the plot # the visible=false => visible=true is a hack to make the legend render again bokeh_legend.visible = False bokeh_legend.visible = True # select lines to display def select_data(args, old, new): if selected_file is None: return show_spinner("Updating the signal selection...") selected_signals = new for signal_name in selected_file.signals.keys(): is_selected = signal_name in selected_signals selected_file.set_signal_selection(signal_name, is_selected) # update axes ranges update_y_axis_ranges() update_x_axis_ranges() # update the legend update_legend() hide_spinner() # add new lines to the plot def plot_signals(signals_file, signals): for idx, signal in enumerate(signals): signal.line = plot.line('index', signal.name, source=signals_file.bokeh_source, line_color=signal.color, line_width=2) def open_file_dialog(): return dialog.getFileDialog() def open_directory_dialog(): return dialog.getDirDialog() # will create a group from the files def create_files_group_signal(files): global selected_file signals_file = SignalsFilesGroup(files, plot) signals_files[signals_file.filename] = signals_file filenames = [signals_file.filename] if files_selector.options[0] == "": files_selector.options = filenames else: files_selector.options = files_selector.options + filenames files_selector.value = filenames[0] selected_file = signals_file # load files from disk as a group def load_file(): file = open_file_dialog() show_spinner("Loading file...") # no file selected if not file: hide_spinner() return display_boards() create_files_signal([file]) change_selected_signals_in_data_selector([""]) hide_spinner() # classify the folder as containing a single file, multiple files or only folders def classify_folder(dir_path): files = [f for f in listdir(dir_path) if isfile(join(dir_path, f)) and f.endswith('.csv')] folders = [d for d in listdir(dir_path) if isdir(join(dir_path, d)) and any(f.endswith(".csv") for f in os.listdir(join(dir_path, d)))] if len(files) == 1: return FolderType.SINGLE_FILE elif len(files) > 1: return FolderType.MULTIPLE_FILES elif len(folders) == 1: return classify_folder(join(dir_path, folders[0])) elif len(folders) > 1: return FolderType.MULTIPLE_FOLDERS else: return FolderType.EMPTY # finds if this is single-threaded or multi-threaded def get_run_type(dir_path): folder_type = classify_folder(dir_path) if folder_type == FolderType.SINGLE_FILE: folder_type = RunType.SINGLE_FOLDER_SINGLE_FILE elif folder_type == FolderType.MULTIPLE_FILES: folder_type = RunType.SINGLE_FOLDER_MULTIPLE_FILES elif folder_type == FolderType.MULTIPLE_FOLDERS: # folder contains sub dirs -> we assume we can classify the folder using only the first sub dir sub_dirs = [d for d in listdir(dir_path) if isdir(join(dir_path, d))] # checking only the first folder in the root dir for its type, since we assume that all sub dirs will share the # same structure (i.e. if one is a result of multi-threaded run, so will all the other). folder_type = classify_folder(os.path.join(dir_path, sub_dirs[0])) if folder_type == FolderType.SINGLE_FILE: folder_type = RunType.MULTIPLE_FOLDERS_SINGLE_FILES elif folder_type == FolderType.MULTIPLE_FILES: folder_type = RunType.MULTIPLE_FOLDERS_MULTIPLE_FILES return folder_type # create a signal file from the directory path according to the directory underlying structure def handle_dir(dir_path, run_type): paths = add_directory_csv_files(dir_path) if run_type in [RunType.SINGLE_FOLDER_MULTIPLE_FILES, RunType.MULTIPLE_FOLDERS_SINGLE_FILES]: create_files_group_signal(paths) elif run_type == RunType.SINGLE_FOLDER_SINGLE_FILE: create_files_signal(paths, use_dir_name=True) elif run_type == RunType.MULTIPLE_FOLDERS_MULTIPLE_FILES: sub_dirs = [d for d in listdir(dir_path) if isdir(join(dir_path, d))] create_files_group_signal([os.path.join(dir_path, d) for d in sub_dirs]) # load directory from disk as a group def load_directory_group(): directory = open_directory_dialog() show_spinner("Loading directories group...") # no files selected if not directory: hide_spinner() return display_directory_group(directory) def display_directory_group(directory): pause_auto_update() display_boards() show_spinner("Loading directories group...") while get_run_type(directory) == FolderType.EMPTY: show_spinner("Waiting for experiment directory to get populated...") sys.stdout.write("Waiting for experiment directory to get populated...\r") time.sleep(10) handle_dir(directory, get_run_type(directory)) change_selected_signals_in_data_selector([""]) resume_auto_update_according_to_toggle() hide_spinner() def create_files_signal(files, use_dir_name=False): global selected_file new_signal_files = [] for idx, file_path in enumerate(files): signals_file = SignalsFile(str(file_path), plot=plot, use_dir_name=use_dir_name) signals_files[signals_file.filename] = signals_file new_signal_files.append(signals_file) filenames = [f.filename for f in new_signal_files] if files_selector.options[0] == "": files_selector.options = filenames else: files_selector.options = files_selector.options + filenames files_selector.value = filenames[0] selected_file = new_signal_files[0] # update x axis according to the file's default x-axis (which is the index, and thus the first column) idx = x_axis_options.index(new_signal_files[0].csv.columns[0]) change_x_axis(idx) x_axis_selector.active = idx def display_files(files): pause_auto_update() display_boards() show_spinner("Loading files...") create_files_signal(files) change_selected_signals_in_data_selector([""]) resume_auto_update_according_to_toggle() hide_spinner() def unload_file(): global selected_file if selected_file is None: return selected_file.hide_all_signals() del signals_files[selected_file.filename] data_selector.options = [""] filenames_list = copy.copy(files_selector.options) filenames_list.remove(selected_file.filename) if len(filenames_list) == 0: filenames_list = [""] files_selector.options = filenames_list filenames = cycle(filenames_list) if files_selector.options[0] != "": files_selector.value = next(filenames) else: files_selector.value = None update_legend() refresh_info.text = "" if len(signals_files) == 0: selected_file = None # reload the selected csv file def reload_all_files(force=False): pause_auto_update() for file_to_load in signals_files.values(): if force or file_to_load.file_was_modified_on_disk(): show_spinner("Updating files from the disk...") file_to_load.load() hide_spinner() refresh_info.text = "Last Update: " + str(datetime.datetime.now()).split(".")[0] resume_auto_update_according_to_toggle() # unselect the currently selected signals and then select the requested signals in the data selector def change_selected_signals_in_data_selector(selected_signals): # the default bokeh way is not working due to a bug since Bokeh 0.12.6 (https://github.com/bokeh/bokeh/issues/6501) # remove the data selection callback before updating the selector data_selector.remove_on_change('value', select_data) for value in list(data_selector.value): if value in data_selector.options: index = data_selector.options.index(value) data_selector.options.remove(value) data_selector.value.remove(value) data_selector.options.insert(index, value) data_selector.value = selected_signals # add back the data selection callback data_selector.on_change('value', select_data) # change data options according to the selected file def change_data_selector(args, old, new): global selected_file if new is None: selected_file = None return show_spinner("Updating selection...") selected_file = signals_files[new] if isinstance(selected_file, SignalsFile): group_cb.disabled = True elif isinstance(selected_file, SignalsFilesGroup): group_cb.disabled = False data_selector.remove_on_change('value', select_data) data_selector.options = sorted(list(selected_file.signals.keys())) data_selector.on_change('value', select_data) selected_signal_names = [s.name for s in selected_file.signals.values() if s.selected] if not selected_signal_names: selected_signal_names = [""] change_selected_signals_in_data_selector(selected_signal_names) averaging_slider.value = selected_file.signals_averaging_window if len(averaging_slider_dummy_source.data['value']) > 0: averaging_slider_dummy_source.data['value'][0] = selected_file.signals_averaging_window group_cb.active = [0 if selected_file.show_bollinger_bands else None] group_cb.active += [1 if selected_file.separate_files else None] hide_spinner() # smooth all the signals of the selected file def update_averaging(args, old, new): show_spinner("Smoothing the signals...") # get the actual value from the dummy source new = averaging_slider_dummy_source.data['value'][0] selected_file.change_averaging_window(new) hide_spinner() def change_x_axis(val): global x_axis show_spinner("Updating the X axis...") x_axis[0] = x_axis_options[val] plot.xaxis.axis_label = x_axis_labels[val] for file_to_load in signals_files.values(): file_to_load.update_x_axis_index() # this is needed in order to recalculate the mean of all the files if isinstance(file_to_load, SignalsFilesGroup): file_to_load.load() update_axis_range(x_axis[0], plot.x_range) hide_spinner() # move the signal between the main and secondary Y axes def toggle_second_axis(): show_spinner("Switching the Y axis...") plot.yaxis[-1].visible = True selected_file.toggle_y_axis() # this is just for redrawing the signals selected_file.reload_data() update_y_axis_ranges() update_legend() hide_spinner() def toggle_group_property(new): show_spinner("Loading...") # toggle show / hide Bollinger bands selected_file.change_bollinger_bands_state(0 in new) # show a separate signal for each file in a group selected_file.show_files_separately(1 in new) update_legend() hide_spinner() # Color selection - most of these functions are taken from bokeh examples (plotting/color_sliders.py) def select_color(attr, old, new): show_spinner("Changing signal color...") signals = selected_file.get_selected_signals() for signal in signals: signal.set_color(rgb_to_hex(crRGBs[new['1d']['indices'][0]])) hide_spinner() def pause_auto_update(): toggle_auto_update(False) def resume_auto_update_according_to_toggle(): toggle_auto_update(auto_update_toggle_button.active) def toggle_auto_update(new): global file_update_callback if new is False and file_update_callback in doc._session_callbacks: doc.remove_periodic_callback(file_update_callback) elif file_update_callback not in doc._session_callbacks: file_update_callback = doc.add_periodic_callback(reload_all_files, 30000) file_update_callback = doc.add_periodic_callback(reload_all_files, 30000) # ---------------- Build Website Layout ------------------- # file refresh time placeholder refresh_info = Div(text="""""", width=210) # create figures plot = figure(plot_width=1200, plot_height=800, # tools='pan,box_zoom,wheel_zoom,crosshair,undo,redo,reset,save', toolbar_location=None, x_axis_label='Episodes', x_range=Range1d(0, 10000), y_range=Range1d(0, 100000), lod_factor=1000) plot.extra_y_ranges = {"secondary": Range1d(start=-100, end=200)} plot.add_layout(LinearAxis(y_range_name="secondary"), 'right') toolbar = Toolbar(tools=[PanTool(), BoxZoomTool(), WheelZoomTool(), CrosshairTool(), ResetTool(), SaveTool()]) # plot.toolbar = toolbar plot.add_tools(*toolbar.tools) plot.yaxis[-1].visible = False bokeh_legend = Legend( items=[("", [])], orientation="vertical", border_line_color="black", label_text_font_size={'value': '9pt'}, click_policy='hide', visible=False ) bokeh_legend.label_width = 100 plot.add_layout(bokeh_legend, "right") plot.y_range = Range1d(0, 100) plot.extra_y_ranges['secondary'] = Range1d(0, 100) # select file file_selection_button = Button(label="Select File", button_type="success", width=120) file_selection_button.on_click(load_file) files_selector_spacer = Spacer(width=10) group_selection_button = Button(label="Select Directory", button_type="primary", width=140) group_selection_button.on_click(load_directory_group) update_files_button = Button(label="Update Files", button_type="default", width=50) update_files_button.on_click(reload_all_files) auto_update_toggle_button = Toggle(label="Auto Update", button_type="default", width=50, active=True) auto_update_toggle_button.on_click(toggle_auto_update) unload_file_button = Button(label="Unload", button_type="danger", width=50) unload_file_button.on_click(unload_file) # files selection box files_selector = Select(title="Files:", options=[""]) files_selector.on_change('value', change_data_selector) # data selection box data_selector = MultiSelect(title="Data:", options=[], size=12) data_selector.on_change('value', select_data) # x axis selection box x_axis_selector_title = Div(text="""X Axis:""", height=10) x_axis_selector = RadioButtonGroup(labels=x_axis_options, active=0) x_axis_selector.on_click(change_x_axis) # toggle second axis button toggle_second_axis_button = Button(label="Toggle Second Axis", button_type="success") toggle_second_axis_button.on_click(toggle_second_axis) # averaging slider # This data source is just used to communicate / trigger the real callback averaging_slider_dummy_source = ColumnDataSource(data=dict(value=[])) averaging_slider_dummy_source.on_change('data', update_averaging) averaging_slider = Slider(title="Averaging window", start=1, end=101, step=10, callback_policy='mouseup') averaging_slider.callback = CustomJS(args=dict(source=averaging_slider_dummy_source), code=""" source.data = { value: [cb_obj.value] } """) # group properties checkbox group_cb = CheckboxGroup(labels=["Show statistics bands", "Ungroup signals"], active=[]) group_cb.on_click(toggle_group_property) # color selector color_selector_title = Div(text="""Select Color:""") crsource = ColumnDataSource(data=dict(x=crx, y=cry, crcolor=crcolor, RGBs=crRGBs)) color_selector = figure(x_range=(0, color_resolution), y_range=(0, 10), plot_width=300, plot_height=40, tools='tap') color_selector.axis.visible = False color_range = color_selector.rect(x='x', y='y', width=1, height=10, color='crcolor', source=crsource) crsource.on_change('selected', select_color) color_range.nonselection_glyph = color_range.glyph color_selector.toolbar.logo = None color_selector.toolbar_location = None # main layout of the document layout = row(file_selection_button, files_selector_spacer, group_selection_button, width=300) layout = column(layout, files_selector) layout = column(layout, row(update_files_button, Spacer(width=50), auto_update_toggle_button, Spacer(width=50), unload_file_button)) layout = column(layout, row(refresh_info)) layout = column(layout, data_selector) layout = column(layout, color_selector_title) layout = column(layout, color_selector) layout = column(layout, x_axis_selector_title) layout = column(layout, x_axis_selector) layout = column(layout, group_cb) layout = column(layout, toggle_second_axis_button) layout = column(layout, averaging_slider) toolbox = ToolbarBox(toolbar=toolbar, toolbar_location='above') panel = column(toolbox, plot) layout = row(layout, panel) experiment_board_layout = layout layouts["experiment_board"] = experiment_board_layout
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/dashboard_components/experiment_board.py
0.40486
0.214527
experiment_board.py
pypi
from bokeh.layouts import row, column, widgetbox, Spacer from bokeh.models import ColumnDataSource, Range1d, LinearAxis, Legend from bokeh.models.widgets import RadioButtonGroup, MultiSelect, Button, Select, Slider, Div, CheckboxGroup, Toggle from bokeh.plotting import figure from rl_coach.dashboard_components.globals import layouts, crcolor, crx, cry, color_resolution, crRGBs from rl_coach.dashboard_components.experiment_board import file_selection_button, files_selector_spacer, \ group_selection_button, unload_file_button, files_selector # ---------------- Build Website Layout ------------------- # file refresh time placeholder refresh_info = Div(text="""""", width=210) # create figures plot = figure(plot_width=1200, plot_height=800, tools='pan,box_zoom,wheel_zoom,crosshair,undo,redo,reset,save', toolbar_location='above', x_axis_label='Episodes', x_range=Range1d(0, 10000), y_range=Range1d(0, 100000)) plot.extra_y_ranges = {"secondary": Range1d(start=-100, end=200)} plot.add_layout(LinearAxis(y_range_name="secondary"), 'right') plot.yaxis[-1].visible = False # legend div = Div(text="""""") legend = widgetbox([div]) bokeh_legend = Legend( # items=[("12345678901234567890123456789012345678901234567890", [])], # 50 letters items=[("__________________________________________________", [])], # 50 letters location=(0, 0), orientation="vertical", border_line_color="black", label_text_font_size={'value': '9pt'}, margin=30 ) plot.add_layout(bokeh_legend, "right") # select file file_selection_button = Button(label="Select Files", button_type="success", width=120) # file_selection_button.on_click(load_files_group) files_selector_spacer = Spacer(width=10) group_selection_button = Button(label="Select Directory", button_type="primary", width=140) # group_selection_button.on_click(load_directory_group) unload_file_button = Button(label="Unload", button_type="danger", width=50) # unload_file_button.on_click(unload_file) # files selection box files_selector = Select(title="Files:", options=[]) # files_selector.on_change('value', change_data_selector) # data selection box data_selector = MultiSelect(title="Data:", options=[], size=12) # data_selector.on_change('value', select_data) # toggle second axis button toggle_second_axis_button = Button(label="Toggle Second Axis", button_type="success") # toggle_second_axis_button.on_click(toggle_second_axis) # averaging slider averaging_slider = Slider(title="Averaging window", start=1, end=101, step=10) # averaging_slider.on_change('value', update_averaging) # color selector color_selector_title = Div(text="""Select Color:""") crsource = ColumnDataSource(data=dict(x=crx, y=cry, crcolor=crcolor, RGBs=crRGBs)) color_selector = figure(x_range=(0, color_resolution), y_range=(0, 10), plot_width=300, plot_height=40, tools='tap') color_selector.axis.visible = False color_range = color_selector.rect(x='x', y='y', width=1, height=10, color='crcolor', source=crsource) # crsource.on_change('selected', select_color) color_range.nonselection_glyph = color_range.glyph color_selector.toolbar.logo = None color_selector.toolbar_location = None episode_selector = MultiSelect(title="Episode:", options=['0', '1', '2', '3', '4'], size=1) online_toggle = Toggle(label="Online", button_type="success") # main layout of the document layout = row(file_selection_button, files_selector_spacer, group_selection_button, width=300) layout = column(layout, files_selector) layout = column(layout, row(refresh_info, unload_file_button)) layout = column(layout, data_selector) layout = column(layout, color_selector_title) layout = column(layout, color_selector) layout = column(layout, toggle_second_axis_button) layout = column(layout, averaging_slider) layout = column(layout, episode_selector) layout = column(layout, online_toggle) layout = row(layout, plot) episodic_board_layout = layout layouts["episodic_board"] = episodic_board_layout
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/dashboard_components/episodic_board.py
0.669205
0.369287
episodic_board.py
pypi
from collections import OrderedDict import os from genericpath import isdir, isfile from os import listdir from os.path import join from enum import Enum from bokeh.models import Div from bokeh.plotting import curdoc import tkinter as tk from tkinter import filedialog import colorsys from rl_coach.core_types import TimeTypes patches = {} signals_files = {} selected_file = None x_axis = ['Episode #'] x_axis_options = [time_type.value.name for time_type in TimeTypes] x_axis_labels = [time_type.value.label for time_type in TimeTypes] current_color = 0 # spinner root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) with open(os.path.join(root_dir, 'dashboard_components/spinner.css'), 'r') as f: spinner_style = """<style>{}</style>""".format(f.read()) spinner_html = """<ul class="spinner"><li></li><li></li><li></li><li></li> <li> <br> <span style="font-size: 24px; font-weight: bold; margin-left: -175px; width: 400px; position: absolute; text-align: center;"> {} </span> </li></ul>""" spinner = Div(text="""""") displayed_doc = "landing_page" layouts = {} def generate_color_range(N, I): HSV_tuples = [(x*1.0/N, 0.5, I) for x in range(N)] RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples) for_conversion = [] for RGB_tuple in RGB_tuples: for_conversion.append((int(RGB_tuple[0]*255), int(RGB_tuple[1]*255), int(RGB_tuple[2]*255))) hex_colors = [rgb_to_hex(RGB_tuple) for RGB_tuple in for_conversion] return hex_colors, for_conversion # convert RGB tuple to hexadecimal code def rgb_to_hex(rgb): return '#%02x%02x%02x' % rgb # convert hexadecimal to RGB tuple def hex_to_dec(hex): red = ''.join(hex.strip('#')[0:2]) green = ''.join(hex.strip('#')[2:4]) blue = ''.join(hex.strip('#')[4:6]) return int(red, 16), int(green, 16), int(blue,16) color_resolution = 1000 brightness = 0.75 # change to have brighter/darker colors crx = list(range(1, color_resolution+1)) # the resolution is 1000 colors cry = [5 for i in range(len(crx))] crcolor, crRGBs = generate_color_range(color_resolution, brightness) # produce spectrum def display_boards(): global displayed_doc if displayed_doc == "landing_page": doc.remove_root(doc.roots[0]) doc.add_root(layouts["boards"]) displayed_doc = "boards" def show_spinner(text="Loading..."): spinner.text = spinner_style + spinner_html.format(text) def hide_spinner(): spinner.text = "" # takes path to dir and recursively adds all its files to paths def add_directory_csv_files(dir_path, paths=None): if not paths: paths = [] for p in listdir(dir_path): path = join(dir_path, p) if isdir(path): # call recursively for each dir paths = add_directory_csv_files(path, paths) elif isfile(path) and path.endswith('.csv'): # add every file to the list paths.append(path) return paths class DialogApp(): def getFileDialog(self): application_window = tk.Tk() # Build a list of tuples for each file type the file dialog should display my_filetypes = [('csv files', '.csv')] # Ask the user to select a one or more file names. answer = filedialog.askopenfilename(parent=application_window, initialdir=os.getcwd(), title="Please select a file", filetypes=my_filetypes) application_window.destroy() return answer def getDirDialog(self): application_window = tk.Tk() # Ask the user to select a folder. answer = filedialog.askdirectory(parent=application_window, initialdir=os.getcwd(), title="Please select a folder") application_window.destroy() return answer class RunType(Enum): SINGLE_FOLDER_SINGLE_FILE = 1 SINGLE_FOLDER_MULTIPLE_FILES = 2 MULTIPLE_FOLDERS_SINGLE_FILES = 3 MULTIPLE_FOLDERS_MULTIPLE_FILES = 4 UNKNOWN = 0 class FolderType(Enum): SINGLE_FILE = 1 MULTIPLE_FILES = 2 MULTIPLE_FOLDERS = 3 EMPTY = 4 dialog = DialogApp() doc = curdoc()
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/dashboard_components/globals.py
0.449151
0.179387
globals.py
pypi
import random import numpy as np from bokeh.models import ColumnDataSource from bokeh.palettes import Dark2 from rl_coach.dashboard_components.globals import show_spinner, hide_spinner, current_color from rl_coach.utils import squeeze_list class Signal: def __init__(self, name, parent, plot): self.name = name self.full_name = "{}/{}".format(parent.filename, self.name) self.plot = plot self.selected = False self.color = random.choice(Dark2[8]) self.line = None self.scatter = None self.bands = None self.bokeh_source = parent.bokeh_source self.min_val = 0 self.max_val = 0 self.axis = 'default' self.sub_signals = [] for name in self.bokeh_source.data.keys(): if (len(name.split('/')) == 1 and name == self.name) or '/'.join(name.split('/')[:-1]) == self.name: self.sub_signals.append(name) if len(self.sub_signals) > 1: self.mean_signal = squeeze_list([name for name in self.sub_signals if 'Mean' in name.split('/')[-1]]) self.stdev_signal = squeeze_list([name for name in self.sub_signals if 'Stdev' in name.split('/')[-1]]) self.min_signal = squeeze_list([name for name in self.sub_signals if 'Min' in name.split('/')[-1]]) self.max_signal = squeeze_list([name for name in self.sub_signals if 'Max' in name.split('/')[-1]]) else: self.mean_signal = squeeze_list(self.name) self.stdev_signal = None self.min_signal = None self.max_signal = None self.has_bollinger_bands = False if self.mean_signal and self.stdev_signal and self.min_signal and self.max_signal: self.has_bollinger_bands = True self.show_bollinger_bands = False self.bollinger_bands_source = None self.update_range() def set_color(self, color): self.color = color if self.line: self.line.glyph.line_color = color if self.bands: self.bands.glyph.fill_color = color def plot_line(self): global current_color self.set_color(Dark2[8][current_color]) current_color = (current_color + 1) % len(Dark2[8]) if self.has_bollinger_bands: self.set_bands_source() self.create_bands() self.line = self.plot.line('index', self.mean_signal, source=self.bokeh_source, line_color=self.color, line_width=2) # self.scatter = self.plot.scatter('index', self.mean_signal, source=self.bokeh_source) self.line.visible = True def set_selected(self, val): if self.selected != val: self.selected = val if self.line: # self.set_color(Dark2[8][current_color]) # current_color = (current_color + 1) % len(Dark2[8]) self.line.visible = self.selected if self.bands: self.bands.visible = self.selected and self.show_bollinger_bands elif self.selected: # lazy plotting - plot only when selected for the first time self.plot_line() def set_dash(self, dash): self.line.glyph.line_dash = dash def create_bands(self): self.bands = self.plot.patch(x='band_x', y='band_y', source=self.bollinger_bands_source, color=self.color, fill_alpha=0.4, alpha=0.1, line_width=0) self.bands.visible = self.show_bollinger_bands # self.min_line = plot.line('index', self.min_signal, source=self.bokeh_source, # line_color=self.color, line_width=3, line_dash="4 4") # self.max_line = plot.line('index', self.max_signal, source=self.bokeh_source, # line_color=self.color, line_width=3, line_dash="4 4") # self.min_line.visible = self.show_bollinger_bands # self.max_line.visible = self.show_bollinger_bands def set_bands_source(self): x_ticks = self.bokeh_source.data['index'] mean_values = self.bokeh_source.data[self.mean_signal] stdev_values = self.bokeh_source.data[self.stdev_signal] band_x = np.append(x_ticks, x_ticks[::-1]) band_y = np.append(mean_values - stdev_values, mean_values[::-1] + stdev_values[::-1]) source_data = {'band_x': band_x, 'band_y': band_y} if self.bollinger_bands_source: self.bollinger_bands_source.data = source_data else: self.bollinger_bands_source = ColumnDataSource(source_data) def change_bollinger_bands_state(self, new_state): self.show_bollinger_bands = new_state if self.bands and self.selected: self.bands.visible = new_state # self.min_line.visible = new_state # self.max_line.visible = new_state def update_range(self): self.min_val = np.min(self.bokeh_source.data[self.mean_signal]) self.max_val = np.max(self.bokeh_source.data[self.mean_signal]) def set_axis(self, axis): self.axis = axis if not self.line: self.plot_line() self.line.visible = False self.line.y_range_name = axis def toggle_axis(self): if self.axis == 'default': self.set_axis('secondary') else: self.set_axis('default')
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/dashboard_components/signals.py
0.507812
0.202778
signals.py
pypi
from typing import List import numpy as np from rl_coach.core_types import RunPhase, ActionType, EnvironmentSteps from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters from rl_coach.exploration_policies.e_greedy import EGreedy, EGreedyParameters from rl_coach.exploration_policies.exploration_policy import ExplorationParameters from rl_coach.schedules import Schedule, LinearSchedule, PieceWiseSchedule from rl_coach.spaces import ActionSpace class UCBParameters(EGreedyParameters): def __init__(self): super().__init__() self.architecture_num_q_heads = 10 self.bootstrapped_data_sharing_probability = 1.0 self.epsilon_schedule = PieceWiseSchedule([ (LinearSchedule(1, 0.1, 1000000), EnvironmentSteps(1000000)), (LinearSchedule(0.1, 0.01, 4000000), EnvironmentSteps(4000000)) ]) self.lamb = 0.1 @property def path(self): return 'rl_coach.exploration_policies.ucb:UCB' class UCB(EGreedy): """ UCB exploration policy is following the upper confidence bound heuristic to sample actions in discrete action spaces. It assumes that there are multiple network heads that are predicting action values, and that the standard deviation between the heads predictions represents the uncertainty of the agent in each of the actions. It then updates the action value estimates to by mean(actions)+lambda*stdev(actions), where lambda is given by the user. This exploration policy aims to take advantage of the uncertainty of the agent in its predictions, and select the action according to the tradeoff between how uncertain the agent is, and how large it predicts the outcome from those actions to be. """ def __init__(self, action_space: ActionSpace, epsilon_schedule: Schedule, evaluation_epsilon: float, architecture_num_q_heads: int, lamb: int, continuous_exploration_policy_parameters: ExplorationParameters = AdditiveNoiseParameters()): """ :param action_space: the action space used by the environment :param epsilon_schedule: a schedule for the epsilon values :param evaluation_epsilon: the epsilon value to use for evaluation phases :param architecture_num_q_heads: the number of q heads to select from :param lamb: lambda coefficient for taking the standard deviation into account :param continuous_exploration_policy_parameters: the parameters of the continuous exploration policy to use if the e-greedy is used for a continuous policy """ super().__init__(action_space, epsilon_schedule, evaluation_epsilon, continuous_exploration_policy_parameters) self.num_heads = architecture_num_q_heads self.lamb = lamb self.std = 0 self.last_action_values = 0 def select_head(self): pass def get_action(self, action_values: List[ActionType]) -> ActionType: # action values are none in case the exploration policy is going to select a random action if action_values is not None: if self.requires_action_values(): mean = np.mean(action_values, axis=0) if self.phase == RunPhase.TRAIN: self.std = np.std(action_values, axis=0) self.last_action_values = mean + self.lamb * self.std else: self.last_action_values = mean return super().get_action(self.last_action_values) def get_control_param(self): if self.phase == RunPhase.TRAIN: return np.mean(self.std) else: return 0
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/ucb.py
0.940599
0.547162
ucb.py
pypi
from typing import List import numpy as np from rl_coach.core_types import RunPhase, ActionType from rl_coach.exploration_policies.exploration_policy import DiscreteActionExplorationPolicy, ExplorationParameters from rl_coach.schedules import Schedule from rl_coach.spaces import ActionSpace class BoltzmannParameters(ExplorationParameters): def __init__(self): super().__init__() self.temperature_schedule = None @property def path(self): return 'rl_coach.exploration_policies.boltzmann:Boltzmann' class Boltzmann(DiscreteActionExplorationPolicy): """ The Boltzmann exploration policy is intended for discrete action spaces. It assumes that each of the possible actions has some value assigned to it (such as the Q value), and uses a softmax function to convert these values into a distribution over the actions. It then samples the action for playing out of the calculated distribution. An additional temperature schedule can be given by the user, and will control the steepness of the softmax function. """ def __init__(self, action_space: ActionSpace, temperature_schedule: Schedule): """ :param action_space: the action space used by the environment :param temperature_schedule: the schedule for the temperature parameter of the softmax """ super().__init__(action_space) self.temperature_schedule = temperature_schedule def get_action(self, action_values: List[ActionType]) -> (ActionType, List[float]): if self.phase == RunPhase.TRAIN: self.temperature_schedule.step() # softmax calculation exp_probabilities = np.exp(action_values / self.temperature_schedule.current_value) probabilities = exp_probabilities / np.sum(exp_probabilities) # make sure probs sum to 1 probabilities[-1] = 1 - np.sum(probabilities[:-1]) # choose actions according to the probabilities action = np.random.choice(range(self.action_space.shape), p=probabilities) return action, probabilities def get_control_param(self): return self.temperature_schedule.current_value
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/boltzmann.py
0.918521
0.525978
boltzmann.py
pypi
from typing import List import numpy as np from rl_coach.core_types import RunPhase, ActionType from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters from rl_coach.exploration_policies.e_greedy import EGreedy, EGreedyParameters from rl_coach.exploration_policies.exploration_policy import ExplorationParameters from rl_coach.schedules import Schedule, LinearSchedule from rl_coach.spaces import ActionSpace class BootstrappedParameters(EGreedyParameters): def __init__(self): super().__init__() self.architecture_num_q_heads = 10 self.bootstrapped_data_sharing_probability = 1.0 self.epsilon_schedule = LinearSchedule(1, 0.01, 1000000) @property def path(self): return 'rl_coach.exploration_policies.bootstrapped:Bootstrapped' class Bootstrapped(EGreedy): """ Bootstrapped exploration policy is currently only used for discrete action spaces along with the Bootstrapped DQN agent. It assumes that there is an ensemble of network heads, where each one predicts the values for all the possible actions. For each episode, a single head is selected to lead the agent, according to its value predictions. In evaluation, the action is selected using a majority vote over all the heads predictions. .. note:: This exploration policy will only work for Discrete action spaces with Bootstrapped DQN style agents, since it requires the agent to have a network with multiple heads. """ def __init__(self, action_space: ActionSpace, epsilon_schedule: Schedule, evaluation_epsilon: float, architecture_num_q_heads: int, continuous_exploration_policy_parameters: ExplorationParameters = AdditiveNoiseParameters(),): """ :param action_space: the action space used by the environment :param epsilon_schedule: a schedule for the epsilon values :param evaluation_epsilon: the epsilon value to use for evaluation phases :param continuous_exploration_policy_parameters: the parameters of the continuous exploration policy to use if the e-greedy is used for a continuous policy :param architecture_num_q_heads: the number of q heads to select from """ super().__init__(action_space, epsilon_schedule, evaluation_epsilon, continuous_exploration_policy_parameters) self.num_heads = architecture_num_q_heads self.selected_head = 0 self.last_action_values = 0 def select_head(self): self.selected_head = np.random.randint(self.num_heads) def get_action(self, action_values: List[ActionType]) -> ActionType: # action values are none in case the exploration policy is going to select a random action if action_values is not None: if self.phase == RunPhase.TRAIN: action_values = action_values[self.selected_head] else: # ensemble voting for evaluation top_action_votings = np.argmax(action_values, axis=-1) counts = np.bincount(top_action_votings.squeeze()) top_action = np.argmax(counts) # convert the top action to a one hot vector and pass it to e-greedy action_values = np.eye(len(self.action_space.actions))[[top_action]] self.last_action_values = action_values return super().get_action(action_values) def get_control_param(self): return self.selected_head
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/bootstrapped.py
0.932584
0.577972
bootstrapped.py
pypi
from typing import List from rl_coach.base_parameters import Parameters from rl_coach.core_types import RunPhase, ActionType from rl_coach.spaces import ActionSpace, DiscreteActionSpace, BoxActionSpace, GoalsSpace class ExplorationParameters(Parameters): def __init__(self): self.action_space = None @property def path(self): return 'rl_coach.exploration_policies.exploration_policy:ExplorationPolicy' class ExplorationPolicy(object): """ An exploration policy takes the predicted actions or action values from the agent, and selects the action to actually apply to the environment using some predefined algorithm. """ def __init__(self, action_space: ActionSpace): """ :param action_space: the action space used by the environment """ self.phase = RunPhase.HEATUP self.action_space = action_space def reset(self): """ Used for resetting the exploration policy parameters when needed :return: None """ pass def get_action(self, action_values: List[ActionType]) -> ActionType: """ Given a list of values corresponding to each action, choose one actions according to the exploration policy :param action_values: A list of action values :return: The chosen action, The probability of the action (if available, otherwise 1 for absolute certainty in the action) """ raise NotImplementedError() def change_phase(self, phase): """ Change between running phases of the algorithm :param phase: Either Heatup or Train :return: none """ self.phase = phase def requires_action_values(self) -> bool: """ Allows exploration policies to define if they require the action values for the current step. This can save up a lot of computation. For example in e-greedy, if the random value generated is smaller than epsilon, the action is completely random, and the action values don't need to be calculated :return: True if the action values are required. False otherwise """ return True def get_control_param(self): return 0 class DiscreteActionExplorationPolicy(ExplorationPolicy): """ A discrete action exploration policy. """ def __init__(self, action_space: ActionSpace): """ :param action_space: the action space used by the environment """ assert isinstance(action_space, DiscreteActionSpace) super().__init__(action_space) def get_action(self, action_values: List[ActionType]) -> (ActionType, List): """ Given a list of values corresponding to each action, choose one actions according to the exploration policy :param action_values: A list of action values :return: The chosen action, The probabilities of actions to select from (if not available a one-hot vector) """ if self.__class__ == ExplorationPolicy: raise ValueError("The ExplorationPolicy class is an abstract class and should not be used directly. " "Please set the exploration parameters to point to an inheriting class like EGreedy or " "AdditiveNoise") else: raise ValueError("The get_action function should be overridden in the inheriting exploration class") class ContinuousActionExplorationPolicy(ExplorationPolicy): """ A continuous action exploration policy. """ def __init__(self, action_space: ActionSpace): """ :param action_space: the action space used by the environment """ assert isinstance(action_space, BoxActionSpace) or \ (hasattr(action_space, 'filtered_action_space') and isinstance(action_space.filtered_action_space, BoxActionSpace)) or \ isinstance(action_space, GoalsSpace) super().__init__(action_space)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/exploration_policy.py
0.91633
0.568655
exploration_policy.py
pypi
from typing import List import numpy as np from rl_coach.core_types import RunPhase, ActionType from rl_coach.exploration_policies.exploration_policy import ContinuousActionExplorationPolicy, ExplorationParameters from rl_coach.spaces import ActionSpace, BoxActionSpace, GoalsSpace # Based on on the description in: # https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab class OUProcessParameters(ExplorationParameters): def __init__(self): super().__init__() self.mu = 0 self.theta = 0.15 self.sigma = 0.2 self.dt = 0.01 @property def path(self): return 'rl_coach.exploration_policies.ou_process:OUProcess' # Ornstein-Uhlenbeck process class OUProcess(ContinuousActionExplorationPolicy): """ OUProcess exploration policy is intended for continuous action spaces, and selects the action according to an Ornstein-Uhlenbeck process. The Ornstein-Uhlenbeck process implements the action as a Gaussian process, where the samples are correlated between consequent time steps. """ def __init__(self, action_space: ActionSpace, mu: float=0, theta: float=0.15, sigma: float=0.2, dt: float=0.01): """ :param action_space: the action space used by the environment """ super().__init__(action_space) self.mu = float(mu) * np.ones(self.action_space.shape) self.theta = float(theta) self.sigma = float(sigma) * np.ones(self.action_space.shape) self.state = np.zeros(self.action_space.shape) self.dt = dt def reset(self): self.state = np.zeros(self.action_space.shape) def noise(self): x = self.state dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.random.randn(len(x)) * np.sqrt(self.dt) self.state = x + dx return self.state def get_action(self, action_values: List[ActionType]) -> ActionType: if self.phase == RunPhase.TRAIN: noise = self.noise() else: noise = np.zeros(self.action_space.shape) action = action_values.squeeze() + noise return action def get_control_param(self): if self.phase == RunPhase.TRAIN: return self.state else: return np.zeros(self.action_space.shape)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/ou_process.py
0.905989
0.403332
ou_process.py
pypi
from typing import List import numpy as np from rl_coach.core_types import RunPhase, ActionType from rl_coach.exploration_policies.exploration_policy import ContinuousActionExplorationPolicy, ExplorationParameters from rl_coach.schedules import Schedule, LinearSchedule from rl_coach.spaces import ActionSpace, BoxActionSpace # TODO: consider renaming to gaussian sampling class AdditiveNoiseParameters(ExplorationParameters): def __init__(self): super().__init__() self.noise_schedule = LinearSchedule(0.1, 0.1, 50000) self.evaluation_noise = 0.05 self.noise_as_percentage_from_action_space = True @property def path(self): return 'rl_coach.exploration_policies.additive_noise:AdditiveNoise' class AdditiveNoise(ContinuousActionExplorationPolicy): """ AdditiveNoise is an exploration policy intended for continuous action spaces. It takes the action from the agent and adds a Gaussian distributed noise to it. The amount of noise added to the action follows the noise amount that can be given in two different ways: 1. Specified by the user as a noise schedule which is taken in percentiles out of the action space size 2. Specified by the agents action. In case the agents action is a list with 2 values, the 1st one is assumed to be the mean of the action, and 2nd is assumed to be its standard deviation. """ def __init__(self, action_space: ActionSpace, noise_schedule: Schedule, evaluation_noise: float, noise_as_percentage_from_action_space: bool = True): """ :param action_space: the action space used by the environment :param noise_schedule: the schedule for the noise :param evaluation_noise: the noise variance that will be used during evaluation phases :param noise_as_percentage_from_action_space: a bool deciding whether the noise is absolute or as a percentage from the action space """ super().__init__(action_space) self.noise_schedule = noise_schedule self.evaluation_noise = evaluation_noise self.noise_as_percentage_from_action_space = noise_as_percentage_from_action_space if not isinstance(action_space, BoxActionSpace) and \ (hasattr(action_space, 'filtered_action_space') and not isinstance(action_space.filtered_action_space, BoxActionSpace)): raise ValueError("Additive noise exploration works only for continuous controls." "The given action space is of type: {}".format(action_space.__class__.__name__)) if not np.all(-np.inf < action_space.high) or not np.all(action_space.high < np.inf)\ or not np.all(-np.inf < action_space.low) or not np.all(action_space.low < np.inf): raise ValueError("Additive noise exploration requires bounded actions") def get_action(self, action_values: List[ActionType]) -> ActionType: # TODO-potential-bug consider separating internally defined stdev and externally defined stdev into 2 policies # set the current noise if self.phase == RunPhase.TEST: current_noise = self.evaluation_noise else: current_noise = self.noise_schedule.current_value # scale the noise to the action space range if self.noise_as_percentage_from_action_space: action_values_std = current_noise * (self.action_space.high - self.action_space.low) else: action_values_std = current_noise # extract the mean values if isinstance(action_values, list): # the action values are expected to be a list with the action mean and optionally the action stdev action_values_mean = action_values[0].squeeze() else: # the action values are expected to be a numpy array representing the action mean action_values_mean = action_values.squeeze() # step the noise schedule if self.phase is not RunPhase.TEST: self.noise_schedule.step() # the second element of the list is assumed to be the standard deviation if isinstance(action_values, list) and len(action_values) > 1: action_values_std = action_values[1].squeeze() # add noise to the action means if self.phase is not RunPhase.TEST: action = np.random.normal(action_values_mean, action_values_std) else: action = action_values_mean return np.atleast_1d(action) def get_control_param(self): return np.ones(self.action_space.shape)*self.noise_schedule.current_value
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/additive_noise.py
0.6508
0.612049
additive_noise.py
pypi
from typing import List, Dict import numpy as np from rl_coach.architectures.layers import NoisyNetDense from rl_coach.base_parameters import AgentParameters, NetworkParameters from rl_coach.spaces import ActionSpace, BoxActionSpace, DiscreteActionSpace from rl_coach.core_types import ActionType from rl_coach.exploration_policies.exploration_policy import ExplorationPolicy, ExplorationParameters class ParameterNoiseParameters(ExplorationParameters): def __init__(self, agent_params: AgentParameters): super().__init__() if not agent_params.algorithm.supports_parameter_noise: raise ValueError("Currently only DQN variants are supported for using an exploration type of " "ParameterNoise.") self.network_params = agent_params.network_wrappers @property def path(self): return 'rl_coach.exploration_policies.parameter_noise:ParameterNoise' class ParameterNoise(ExplorationPolicy): """ The ParameterNoise exploration policy is intended for both discrete and continuous action spaces. It applies the exploration policy by replacing all the dense network layers with noisy layers. The noisy layers have both weight means and weight standard deviations, and for each forward pass of the network the weights are sampled from a normal distribution that follows the learned weights mean and standard deviation values. Warning: currently supported only by DQN variants """ def __init__(self, network_params: Dict[str, NetworkParameters], action_space: ActionSpace): """ :param action_space: the action space used by the environment """ super().__init__(action_space) self.network_params = network_params self._replace_network_dense_layers() def get_action(self, action_values: List[ActionType]): if type(self.action_space) == DiscreteActionSpace: action = np.argmax(action_values) one_hot_action_probabilities = np.zeros(len(self.action_space.actions)) one_hot_action_probabilities[action] = 1 return action, one_hot_action_probabilities elif type(self.action_space) == BoxActionSpace: action_values_mean = action_values[0].squeeze() action_values_std = action_values[1].squeeze() return np.random.normal(action_values_mean, action_values_std) else: raise ValueError("ActionSpace type {} is not supported for ParameterNoise.".format(type(self.action_space))) def get_control_param(self): return 0 def _replace_network_dense_layers(self): # replace the dense type for all the networks components (embedders, mw, heads) with a NoisyNetDense # NOTE: we are changing network params in a non-params class (an already instantiated class), this could have # been prone to a bug, but since the networks are created very late in the game # (after agent.init_environment_dependent()_modules is called) - then we are fine. for network_wrapper_params in self.network_params.values(): for component_params in list(network_wrapper_params.input_embedders_parameters.values()) + \ [network_wrapper_params.middleware_parameters] + \ network_wrapper_params.heads_parameters: component_params.dense_layer = NoisyNetDense
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/parameter_noise.py
0.946886
0.576125
parameter_noise.py
pypi
from typing import List import numpy as np from scipy.stats import truncnorm from rl_coach.core_types import RunPhase, ActionType from rl_coach.exploration_policies.exploration_policy import ExplorationParameters, ContinuousActionExplorationPolicy from rl_coach.schedules import Schedule, LinearSchedule from rl_coach.spaces import ActionSpace, BoxActionSpace class TruncatedNormalParameters(ExplorationParameters): def __init__(self): super().__init__() self.noise_schedule = LinearSchedule(0.1, 0.1, 50000) self.evaluation_noise = 0.05 self.clip_low = 0 self.clip_high = 1 self.noise_as_percentage_from_action_space = True @property def path(self): return 'rl_coach.exploration_policies.truncated_normal:TruncatedNormal' class TruncatedNormal(ContinuousActionExplorationPolicy): """ The TruncatedNormal exploration policy is intended for continuous action spaces. It samples the action from a normal distribution, where the mean action is given by the agent, and the standard deviation can be given in t wo different ways: 1. Specified by the user as a noise schedule which is taken in percentiles out of the action space size 2. Specified by the agents action. In case the agents action is a list with 2 values, the 1st one is assumed to be the mean of the action, and 2nd is assumed to be its standard deviation. When the sampled action is outside of the action bounds given by the user, it is sampled again and again, until it is within the bounds. """ def __init__(self, action_space: ActionSpace, noise_schedule: Schedule, evaluation_noise: float, clip_low: float, clip_high: float, noise_as_percentage_from_action_space: bool = True): """ :param action_space: the action space used by the environment :param noise_schedule: the schedule for the noise variance :param evaluation_noise: the noise variance that will be used during evaluation phases :param noise_as_percentage_from_action_space: whether to consider the noise as a percentage of the action space or absolute value """ super().__init__(action_space) self.noise_schedule = noise_schedule self.evaluation_noise = evaluation_noise self.noise_as_percentage_from_action_space = noise_as_percentage_from_action_space self.clip_low = clip_low self.clip_high = clip_high if not isinstance(action_space, BoxActionSpace): raise ValueError("Truncated normal exploration works only for continuous controls." "The given action space is of type: {}".format(action_space.__class__.__name__)) if not np.all(-np.inf < action_space.high) or not np.all(action_space.high < np.inf)\ or not np.all(-np.inf < action_space.low) or not np.all(action_space.low < np.inf): raise ValueError("Additive noise exploration requires bounded actions") def get_action(self, action_values: List[ActionType]) -> ActionType: # set the current noise if self.phase == RunPhase.TEST: current_noise = self.evaluation_noise else: current_noise = self.noise_schedule.current_value # scale the noise to the action space range if self.noise_as_percentage_from_action_space: action_values_std = current_noise * (self.action_space.high - self.action_space.low) else: action_values_std = current_noise # extract the mean values if isinstance(action_values, list): # the action values are expected to be a list with the action mean and optionally the action stdev action_values_mean = action_values[0].squeeze() else: # the action values are expected to be a numpy array representing the action mean action_values_mean = action_values.squeeze() # step the noise schedule if self.phase is not RunPhase.TEST: self.noise_schedule.step() # the second element of the list is assumed to be the standard deviation if isinstance(action_values, list) and len(action_values) > 1: action_values_std = action_values[1].squeeze() # sample from truncated normal distribution normalized_low = (self.clip_low - action_values_mean) / action_values_std normalized_high = (self.clip_high - action_values_mean) / action_values_std distribution = truncnorm(normalized_low, normalized_high, loc=action_values_mean, scale=action_values_std) action = distribution.rvs(1) return action def get_control_param(self): return np.ones(self.action_space.shape)*self.noise_schedule.current_value
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/exploration_policies/truncated_normal.py
0.927593
0.637313
truncated_normal.py
pypi
from enum import Enum from typing import Union, List import numpy as np from rl_coach.filters.observation.observation_move_axis_filter import ObservationMoveAxisFilter try: from pysc2 import maps from pysc2.env import sc2_env from pysc2.env import available_actions_printer from pysc2.lib import actions from pysc2.lib import features from pysc2.env import environment from absl import app from absl import flags except ImportError: from rl_coach.logger import failed_imports failed_imports.append("PySc2") from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.base_parameters import VisualizationParameters from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, PlanarMapsObservationSpace, StateSpace, CompoundActionSpace, \ DiscreteActionSpace from rl_coach.filters.filter import InputFilter, OutputFilter from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter from rl_coach.filters.action.linear_box_to_box_map import LinearBoxToBoxMap from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter FLAGS = flags.FLAGS FLAGS(['coach.py']) SCREEN_SIZE = 84 # will also impact the action space size # Starcraft Constants _NOOP = actions.FUNCTIONS.no_op.id _MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id _SELECT_ARMY = actions.FUNCTIONS.select_army.id _PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index _NOT_QUEUED = [0] _SELECT_ALL = [0] class StarcraftObservationType(Enum): Features = 0 RGB = 1 StarcraftInputFilter = InputFilter(is_a_reference_filter=True) StarcraftInputFilter.add_observation_filter('screen', 'move_axis', ObservationMoveAxisFilter(0, -1)) StarcraftInputFilter.add_observation_filter('screen', 'rescaling', ObservationRescaleToSizeFilter( PlanarMapsObservationSpace(np.array([84, 84, 1]), low=0, high=255, channels_axis=-1))) StarcraftInputFilter.add_observation_filter('screen', 'to_uint8', ObservationToUInt8Filter(0, 255)) StarcraftInputFilter.add_observation_filter('minimap', 'move_axis', ObservationMoveAxisFilter(0, -1)) StarcraftInputFilter.add_observation_filter('minimap', 'rescaling', ObservationRescaleToSizeFilter( PlanarMapsObservationSpace(np.array([64, 64, 1]), low=0, high=255, channels_axis=-1))) StarcraftInputFilter.add_observation_filter('minimap', 'to_uint8', ObservationToUInt8Filter(0, 255)) StarcraftNormalizingOutputFilter = OutputFilter(is_a_reference_filter=True) StarcraftNormalizingOutputFilter.add_action_filter( 'normalization', LinearBoxToBoxMap(input_space_low=-SCREEN_SIZE / 2, input_space_high=SCREEN_SIZE / 2 - 1)) class StarCraft2EnvironmentParameters(EnvironmentParameters): def __init__(self, level=None): super().__init__(level=level) self.screen_size = 84 self.minimap_size = 64 self.feature_minimap_maps_to_use = range(7) self.feature_screen_maps_to_use = range(17) self.observation_type = StarcraftObservationType.Features self.disable_fog = False self.auto_select_all_army = True self.default_input_filter = StarcraftInputFilter self.default_output_filter = StarcraftNormalizingOutputFilter self.use_full_action_space = False @property def path(self): return 'rl_coach.environments.starcraft2_environment:StarCraft2Environment' # Environment class StarCraft2Environment(Environment): def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters, target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False, custom_reward_threshold: Union[int, float]=None, screen_size: int=84, minimap_size: int=64, feature_minimap_maps_to_use: List=range(7), feature_screen_maps_to_use: List=range(17), observation_type: StarcraftObservationType=StarcraftObservationType.Features, disable_fog: bool=False, auto_select_all_army: bool=True, use_full_action_space: bool=False, **kwargs): super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate) self.screen_size = screen_size self.minimap_size = minimap_size self.feature_minimap_maps_to_use = feature_minimap_maps_to_use self.feature_screen_maps_to_use = feature_screen_maps_to_use self.observation_type = observation_type self.features_screen_size = None self.feature_minimap_size = None self.rgb_screen_size = None self.rgb_minimap_size = None if self.observation_type == StarcraftObservationType.Features: self.features_screen_size = screen_size self.feature_minimap_size = minimap_size elif self.observation_type == StarcraftObservationType.RGB: self.rgb_screen_size = screen_size self.rgb_minimap_size = minimap_size self.disable_fog = disable_fog self.auto_select_all_army = auto_select_all_army self.use_full_action_space = use_full_action_space # step_mul is the equivalent to frame skipping. Not sure if it repeats actions in between or not though. self.env = sc2_env.SC2Env(map_name=self.env_id, step_mul=frame_skip, visualize=self.is_rendered, agent_interface_format=sc2_env.AgentInterfaceFormat( feature_dimensions=sc2_env.Dimensions( screen=self.features_screen_size, minimap=self.feature_minimap_size ) # rgb_dimensions=sc2_env.Dimensions( # screen=self.rgb_screen_size, # minimap=self.rgb_screen_size # ) ), # feature_screen_size=self.features_screen_size, # feature_minimap_size=self.feature_minimap_size, # rgb_screen_size=self.rgb_screen_size, # rgb_minimap_size=self.rgb_screen_size, disable_fog=disable_fog, random_seed=self.seed ) # print all the available actions # self.env = available_actions_printer.AvailableActionsPrinter(self.env) self.reset_internal_state(True) """ feature_screen: [height_map, visibility_map, creep, power, player_id, player_relative, unit_type, selected, unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields, unit_shields_ratio, unit_density, unit_density_aa, effects] feature_minimap: [height_map, visibility_map, creep, camera, player_id, player_relative, selecte d] player: [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount, army_count, warp_gate_count, larva_count] """ self.screen_shape = np.array(self.env.observation_spec()[0]['feature_screen']) self.screen_shape[0] = len(self.feature_screen_maps_to_use) self.minimap_shape = np.array(self.env.observation_spec()[0]['feature_minimap']) self.minimap_shape[0] = len(self.feature_minimap_maps_to_use) self.state_space = StateSpace({ "screen": PlanarMapsObservationSpace(shape=self.screen_shape, low=0, high=255, channels_axis=0), "minimap": PlanarMapsObservationSpace(shape=self.minimap_shape, low=0, high=255, channels_axis=0), "measurements": VectorObservationSpace(self.env.observation_spec()[0]["player"][0]) }) if self.use_full_action_space: action_identifiers = list(self.env.action_spec()[0].functions) num_action_identifiers = len(action_identifiers) action_arguments = [(arg.name, arg.sizes) for arg in self.env.action_spec()[0].types] sub_action_spaces = [DiscreteActionSpace(num_action_identifiers)] for argument in action_arguments: for dimension in argument[1]: sub_action_spaces.append(DiscreteActionSpace(dimension)) self.action_space = CompoundActionSpace(sub_action_spaces) else: self.action_space = BoxActionSpace(2, 0, self.screen_size - 1, ["X-Axis, Y-Axis"], default_action=np.array([self.screen_size/2, self.screen_size/2])) self.target_success_rate = target_success_rate def _update_state(self): timestep = 0 self.screen = self.last_result[timestep].observation.feature_screen # extract only the requested segmentation maps from the observation self.screen = np.take(self.screen, self.feature_screen_maps_to_use, axis=0) self.minimap = self.last_result[timestep].observation.feature_minimap self.measurements = self.last_result[timestep].observation.player self.reward = self.last_result[timestep].reward self.done = self.last_result[timestep].step_type == environment.StepType.LAST self.state = { 'screen': self.screen, 'minimap': self.minimap, 'measurements': self.measurements } def _take_action(self, action): if self.use_full_action_space: action_identifier = action[0] action_arguments = action[1:] action = actions.FunctionCall(action_identifier, action_arguments) else: coord = np.array(action[0:2]) noop = False coord = coord.round() coord = np.clip(coord, 0, SCREEN_SIZE - 1) self.last_action_idx = coord if noop: action = actions.FunctionCall(_NOOP, []) else: action = actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) self.last_result = self.env.step(actions=[action]) def _restart_environment_episode(self, force_environment_reset=False): # reset the environment self.last_result = self.env.reset() # select all the units on the screen if self.auto_select_all_army: self.env.step(actions=[actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) def get_rendered_image(self): screen = np.squeeze(np.tile(np.expand_dims(self.screen, -1), (1, 1, 3))) screen = screen / np.max(screen) * 255 return screen.astype('uint8') def dump_video_of_last_episode(self): from rl_coach.logger import experiment_path self.env._run_config.replay_dir = experiment_path self.env.save_replay('replays') super().dump_video_of_last_episode() def get_target_success_rate(self): return self.target_success_rate
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/starcraft2_environment.py
0.810629
0.387111
starcraft2_environment.py
pypi
import random from enum import Enum from typing import Union import numpy as np try: from dm_control import suite from dm_control.suite.wrappers import pixels except ImportError: from rl_coach.logger import failed_imports failed_imports.append("DeepMind Control Suite") from rl_coach.base_parameters import VisualizationParameters from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.filters.filter import NoInputFilter, NoOutputFilter from rl_coach.spaces import BoxActionSpace, ImageObservationSpace, VectorObservationSpace, StateSpace class ObservationType(Enum): Measurements = 1 Image = 2 Image_and_Measurements = 3 # Parameters class ControlSuiteEnvironmentParameters(EnvironmentParameters): def __init__(self, level=None): super().__init__(level=level) self.observation_type = ObservationType.Measurements self.default_input_filter = ControlSuiteInputFilter self.default_output_filter = ControlSuiteOutputFilter @property def path(self): return 'rl_coach.environments.control_suite_environment:ControlSuiteEnvironment' """ ControlSuite Environment Components """ ControlSuiteInputFilter = NoInputFilter() ControlSuiteOutputFilter = NoOutputFilter() control_suite_envs = {':'.join(env): ':'.join(env) for env in suite.BENCHMARKING} # Environment class ControlSuiteEnvironment(Environment): def __init__(self, level: LevelSelection, frame_skip: int, visualization_parameters: VisualizationParameters, target_success_rate: float=1.0, seed: Union[None, int]=None, human_control: bool=False, observation_type: ObservationType=ObservationType.Measurements, custom_reward_threshold: Union[int, float]=None, **kwargs): """ :param level: (str) A string representing the control suite level to run. This can also be a LevelSelection object. For example, cartpole:swingup. :param frame_skip: (int) The number of frames to skip between any two actions given by the agent. The action will be repeated for all the skipped frames. :param visualization_parameters: (VisualizationParameters) The parameters used for visualizing the environment, such as the render flag, storing videos etc. :param target_success_rate: (float) Stop experiment if given target success rate was achieved. :param seed: (int) A seed to use for the random number generator when running the environment. :param human_control: (bool) A flag that allows controlling the environment using the keyboard keys. :param observation_type: (ObservationType) An enum which defines which observation to use. The current options are to use: * Measurements only - a vector of joint torques and similar measurements * Image only - an image of the environment as seen by a camera attached to the simulator * Measurements & Image - both type of observations will be returned in the state using the keys 'measurements' and 'pixels' respectively. :param custom_reward_threshold: (float) Allows defining a custom reward that will be used to decide when the agent succeeded in passing the environment. """ super().__init__(level, seed, frame_skip, human_control, custom_reward_threshold, visualization_parameters, target_success_rate) self.observation_type = observation_type # load and initialize environment domain_name, task_name = self.env_id.split(":") self.env = suite.load(domain_name=domain_name, task_name=task_name, task_kwargs={'random': seed}) if observation_type != ObservationType.Measurements: self.env = pixels.Wrapper(self.env, pixels_only=observation_type == ObservationType.Image) # seed if self.seed is not None: np.random.seed(self.seed) random.seed(self.seed) self.state_space = StateSpace({}) # image observations if observation_type != ObservationType.Measurements: self.state_space['pixels'] = ImageObservationSpace(shape=self.env.observation_spec()['pixels'].shape, high=255) # measurements observations if observation_type != ObservationType.Image: measurements_space_size = 0 measurements_names = [] for observation_space_name, observation_space in self.env.observation_spec().items(): if len(observation_space.shape) == 0: measurements_space_size += 1 measurements_names.append(observation_space_name) elif len(observation_space.shape) == 1: measurements_space_size += observation_space.shape[0] measurements_names.extend(["{}_{}".format(observation_space_name, i) for i in range(observation_space.shape[0])]) self.state_space['measurements'] = VectorObservationSpace(shape=measurements_space_size, measurements_names=measurements_names) # actions self.action_space = BoxActionSpace( shape=self.env.action_spec().shape[0], low=self.env.action_spec().minimum, high=self.env.action_spec().maximum ) # initialize the state by getting a new state from the environment self.reset_internal_state(True) # render if self.is_rendered: image = self.get_rendered_image() scale = 1 if self.human_control: scale = 2 if not self.native_rendering: self.renderer.create_screen(image.shape[1]*scale, image.shape[0]*scale) self.target_success_rate = target_success_rate def _update_state(self): self.state = {} if self.observation_type != ObservationType.Measurements: self.pixels = self.last_result.observation['pixels'] self.state['pixels'] = self.pixels if self.observation_type != ObservationType.Image: self.measurements = np.array([]) for sub_observation in self.last_result.observation.values(): if isinstance(sub_observation, np.ndarray) and len(sub_observation.shape) == 1: self.measurements = np.concatenate((self.measurements, sub_observation)) else: self.measurements = np.concatenate((self.measurements, np.array([sub_observation]))) self.state['measurements'] = self.measurements self.reward = self.last_result.reward if self.last_result.reward is not None else 0 self.done = self.last_result.last() def _take_action(self, action): if type(self.action_space) == BoxActionSpace: action = self.action_space.clip_action_to_space(action) self.last_result = self.env.step(action) def _restart_environment_episode(self, force_environment_reset=False): self.last_result = self.env.reset() def _render(self): pass def get_rendered_image(self): return self.env.physics.render(camera_id=0) def get_target_success_rate(self) -> float: return self.target_success_rate
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/control_suite_environment.py
0.892217
0.541833
control_suite_environment.py
pypi
from typing import Union, Dict from rl_coach.core_types import ActionType, EnvResponse, RunPhase from rl_coach.spaces import ActionSpace class EnvironmentInterface(object): def __init__(self): self._phase = RunPhase.UNDEFINED @property def phase(self) -> RunPhase: """ Get the phase of the environment :return: the current phase """ return self._phase @phase.setter def phase(self, val: RunPhase): """ Change the phase of the environment :param val: the new phase :return: None """ self._phase = val @property def action_space(self) -> Union[Dict[str, ActionSpace], ActionSpace]: """ Get the action space of the environment (or of each of the agents wrapped in this environment. i.e. in the LevelManager case") :return: the action space """ raise NotImplementedError("") def get_random_action(self) -> ActionType: """ Get a random action from the environment action space :return: An action that follows the definition of the action space. """ raise NotImplementedError("") def step(self, action: ActionType) -> Union[None, EnvResponse]: """ Make a single step in the environment using the given action :param action: an action to use for stepping the environment. Should follow the definition of the action space. :return: the environment response as returned in get_last_env_response or None for LevelManager """ raise NotImplementedError("") def reset_internal_state(self, force_environment_reset: bool=False) -> Union[None, EnvResponse]: """ Reset the environment episode :param force_environment_reset: in some cases, resetting the environment can be suppressed by the environment itself. This flag allows force the reset. :return: the environment response as returned in get_last_env_response or None for LevelManager """ raise NotImplementedError("")
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/environment_interface.py
0.926968
0.414899
environment_interface.py
pypi
import os import gym import numpy as np from gym import spaces from gym.envs.registration import EnvSpec from mujoco_py import load_model_from_path, MjSim, MjViewer, MjRenderContextOffscreen class PendulumWithGoals(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30 } def __init__(self, goal_reaching_thresholds=np.array([0.075, 0.075, 0.75]), goal_not_reached_penalty=-1, goal_reached_reward=0, terminate_on_goal_reaching=True, time_limit=1000, frameskip=1, random_goals_instead_of_standing_goal=False, polar_coordinates: bool=False): super().__init__() dir = os.path.dirname(__file__) model = load_model_from_path(dir + "/pendulum_with_goals.xml") self.sim = MjSim(model) self.viewer = None self.rgb_viewer = None self.frameskip = frameskip self.goal = None self.goal_reaching_thresholds = goal_reaching_thresholds self.goal_not_reached_penalty = goal_not_reached_penalty self.goal_reached_reward = goal_reached_reward self.terminate_on_goal_reaching = terminate_on_goal_reaching self.time_limit = time_limit self.current_episode_steps_counter = 0 self.random_goals_instead_of_standing_goal = random_goals_instead_of_standing_goal self.polar_coordinates = polar_coordinates # spaces definition self.action_space = spaces.Box(low=-self.sim.model.actuator_ctrlrange[:, 1], high=self.sim.model.actuator_ctrlrange[:, 1], dtype=np.float32) if self.polar_coordinates: self.observation_space = spaces.Dict({ "observation": spaces.Box(low=np.array([-np.pi, -15]), high=np.array([np.pi, 15]), dtype=np.float32), "desired_goal": spaces.Box(low=np.array([-np.pi, -15]), high=np.array([np.pi, 15]), dtype=np.float32), "achieved_goal": spaces.Box(low=np.array([-np.pi, -15]), high=np.array([np.pi, 15]), dtype=np.float32) }) else: self.observation_space = spaces.Dict({ "observation": spaces.Box(low=np.array([-1, -1, -15]), high=np.array([1, 1, 15]), dtype=np.float32), "desired_goal": spaces.Box(low=np.array([-1, -1, -15]), high=np.array([1, 1, 15]), dtype=np.float32), "achieved_goal": spaces.Box(low=np.array([-1, -1, -15]), high=np.array([1, 1, 15]), dtype=np.float32) }) self.spec = EnvSpec('PendulumWithGoals-v0') self.spec.reward_threshold = self.goal_not_reached_penalty * self.time_limit self.reset() def _goal_reached(self): observation = self._get_obs() if np.any(np.abs(observation['achieved_goal'] - observation['desired_goal']) > self.goal_reaching_thresholds): return False else: return True def _terminate(self): if (self._goal_reached() and self.terminate_on_goal_reaching) or \ self.current_episode_steps_counter >= self.time_limit: return True else: return False def _reward(self): if self._goal_reached(): return self.goal_reached_reward else: return self.goal_not_reached_penalty def step(self, action): self.sim.data.ctrl[:] = action for _ in range(self.frameskip): self.sim.step() self.current_episode_steps_counter += 1 state = self._get_obs() # visualize the angular velocities state_velocity = np.copy(state['observation'][-1] / 20) goal_velocity = self.goal[-1] / 20 self.sim.model.site_size[2] = np.array([0.01, 0.01, state_velocity]) self.sim.data.mocap_pos[2] = np.array([0.85, 0, 0.75 + state_velocity]) self.sim.model.site_size[3] = np.array([0.01, 0.01, goal_velocity]) self.sim.data.mocap_pos[3] = np.array([1.15, 0, 0.75 + goal_velocity]) return state, self._reward(), self._terminate(), {} def _get_obs(self): """ y ^ |____ | / | / |~/ |/ --------> x """ # observation angle = self.sim.data.qpos angular_velocity = self.sim.data.qvel if self.polar_coordinates: observation = np.concatenate([angle - np.pi, angular_velocity]) else: x = np.sin(angle) y = np.cos(angle) # qpos is the angle relative to a standing pole observation = np.concatenate([x, y, angular_velocity]) return { "observation": observation, "desired_goal": self.goal, "achieved_goal": observation } def reset(self): self.current_episode_steps_counter = 0 # set initial state angle = np.random.uniform(np.pi / 4, 7 * np.pi / 4) angular_velocity = np.random.uniform(-0.05, 0.05) self.sim.data.qpos[0] = angle self.sim.data.qvel[0] = angular_velocity self.sim.step() # goal if self.random_goals_instead_of_standing_goal: angle_target = np.random.uniform(-np.pi / 8, np.pi / 8) angular_velocity_target = np.random.uniform(-0.2, 0.2) else: angle_target = 0 angular_velocity_target = 0 # convert target values to goal x_target = np.sin(angle_target) y_target = np.cos(angle_target) if self.polar_coordinates: self.goal = np.array([angle_target - np.pi, angular_velocity_target]) else: self.goal = np.array([x_target, y_target, angular_velocity_target]) # visualize the goal self.sim.data.mocap_pos[0] = [x_target, 0, y_target] return self._get_obs() def render(self, mode='human', close=False): if mode == 'human': if self.viewer is None: self.viewer = MjViewer(self.sim) self.viewer.render() elif mode == 'rgb_array': if self.rgb_viewer is None: self.rgb_viewer = MjRenderContextOffscreen(self.sim, 0) self.rgb_viewer.render(500, 500) # window size used for old mujoco-py: data = self.rgb_viewer.read_pixels(500, 500, depth=False) # original image is upside-down, so flip it return data[::-1, :, :]
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/mujoco/pendulum_with_goals.py
0.641198
0.317413
pendulum_with_goals.py
pypi
import random import gym import numpy as np from gym import spaces class BitFlip(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30 } def __init__(self, bit_length=16, max_steps=None, mean_zero=False): super(BitFlip, self).__init__() if bit_length < 1: raise ValueError('bit_length must be >= 1, found {}'.format(bit_length)) self.bit_length = bit_length self.mean_zero = mean_zero if max_steps is None: # default to bit_length self.max_steps = bit_length elif max_steps == 0: self.max_steps = None else: self.max_steps = max_steps # spaces documentation: https://gym.openai.com/docs/ self.action_space = spaces.Discrete(bit_length) self.observation_space = spaces.Dict({ 'state': spaces.Box(low=0, high=1, shape=(bit_length, )), 'desired_goal': spaces.Box(low=0, high=1, shape=(bit_length, )), 'achieved_goal': spaces.Box(low=0, high=1, shape=(bit_length, )) }) self.reset() def _terminate(self): return (self.state == self.goal).all() or self.steps >= self.max_steps def _reward(self): return -1 if (self.state != self.goal).any() else 0 def step(self, action): # action is an int in the range [0, self.bit_length) self.state[action] = int(not self.state[action]) self.steps += 1 return (self._get_obs(), self._reward(), self._terminate(), {}) def reset(self): self.steps = 0 self.state = np.array([random.choice([1, 0]) for _ in range(self.bit_length)]) # make sure goal is not the initial state self.goal = self.state while (self.goal == self.state).all(): self.goal = np.array([random.choice([1, 0]) for _ in range(self.bit_length)]) return self._get_obs() def _mean_zero(self, x): if self.mean_zero: return (x - 0.5) / 0.5 else: return x def _get_obs(self): return { 'state': self._mean_zero(self.state), 'desired_goal': self._mean_zero(self.goal), 'achieved_goal': self._mean_zero(self.state) } def render(self, mode='human', close=False): observation = np.zeros((20, 20 * self.bit_length, 3)) for bit_idx, (state_bit, goal_bit) in enumerate(zip(self.state, self.goal)): # green if the bit matches observation[:, bit_idx * 20:(bit_idx + 1) * 20, 1] = (state_bit == goal_bit) * 255 # red if the bit doesn't match observation[:, bit_idx * 20:(bit_idx + 1) * 20, 0] = (state_bit != goal_bit) * 255 return observation
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/toy_problems/bit_flip.py
0.712232
0.38604
bit_flip.py
pypi
from enum import Enum import gym import numpy as np from gym import spaces class ExplorationChain(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30 } class ObservationType(Enum): OneHot = 0 Therm = 1 def __init__(self, chain_length=16, start_state=1, max_steps=None, observation_type=ObservationType.Therm, left_state_reward=1/1000, right_state_reward=1, simple_render=True): super().__init__() if chain_length <= 3: raise ValueError('Chain length must be > 3, found {}'.format(chain_length)) if not 0 <= start_state < chain_length: raise ValueError('The start state should be within the chain bounds, found {}'.format(start_state)) self.chain_length = chain_length self.start_state = start_state self.max_steps = max_steps self.observation_type = observation_type self.left_state_reward = left_state_reward self.right_state_reward = right_state_reward self.simple_render = simple_render # spaces documentation: https://gym.openai.com/docs/ self.action_space = spaces.Discrete(2) # 0 -> Go left, 1 -> Go right self.observation_space = spaces.Box(0, 1, shape=(chain_length,))#spaces.MultiBinary(chain_length) self.reset() def _terminate(self): return self.steps >= self.max_steps def _reward(self): if self.state == 0: return self.left_state_reward elif self.state == self.chain_length - 1: return self.right_state_reward else: return 0 def step(self, action): # action is 0 or 1 if action == 0: if 0 < self.state: self.state -= 1 elif action == 1: if self.state < self.chain_length - 1: self.state += 1 else: raise ValueError("An invalid action was given. The available actions are - 0 or 1, found {}".format(action)) self.steps += 1 return self._get_obs(), self._reward(), self._terminate(), {} def reset(self): self.steps = 0 self.state = self.start_state return self._get_obs() def _get_obs(self): self.observation = np.zeros((self.chain_length,)) if self.observation_type == self.ObservationType.OneHot: self.observation[self.state] = 1 elif self.observation_type == self.ObservationType.Therm: self.observation[:(self.state+1)] = 1 return self.observation def render(self, mode='human', close=False): if self.simple_render: observation = np.zeros((20, 20*self.chain_length)) observation[:, self.state*20:(self.state+1)*20] = 255 return observation else: # lazy loading of networkx and matplotlib to allow using the environment without installing them if # necessary import networkx as nx from networkx.drawing.nx_agraph import graphviz_layout import matplotlib.pyplot as plt if not hasattr(self, 'G'): self.states = list(range(self.chain_length)) self.G = nx.DiGraph(directed=True) for i, origin_state in enumerate(self.states): if i < self.chain_length - 1: self.G.add_edge(origin_state, origin_state + 1, weight=0.5) if i > 0: self.G.add_edge(origin_state, origin_state - 1, weight=0.5, ) if i == 0 or i < self.chain_length - 1: self.G.add_edge(origin_state, origin_state, weight=0.5, ) fig = plt.gcf() if np.all(fig.get_size_inches() != [10, 2]): fig.set_size_inches(5, 1) color = ['y']*(len(self.G)) color[self.state] = 'r' options = { 'node_color': color, 'node_size': 50, 'width': 1, 'arrowstyle': '-|>', 'arrowsize': 5, 'font_size': 6 } pos = graphviz_layout(self.G, prog='dot', args='-Grankdir=LR') nx.draw_networkx(self.G, pos, arrows=True, **options) fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return data
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/environments/toy_problems/exploration_chain.py
0.868172
0.521837
exploration_chain.py
pypi
from typing import Tuple, List from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \ PresetValidationParameters from rl_coach.environments.environment import EnvironmentParameters, Environment from rl_coach.filters.filter import NoInputFilter, NoOutputFilter from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters from rl_coach.level_manager import LevelManager from rl_coach.utils import short_dynamic_import class BasicRLGraphManager(GraphManager): """ A basic RL graph manager creates the common scheme of RL where there is a single agent which interacts with a single environment. """ def __init__(self, agent_params: AgentParameters, env_params: EnvironmentParameters, schedule_params: ScheduleParameters, vis_params: VisualizationParameters=VisualizationParameters(), preset_validation_params: PresetValidationParameters = PresetValidationParameters(), name='simple_rl_graph'): super().__init__(name, schedule_params, vis_params) self.agent_params = agent_params self.env_params = env_params self.preset_validation_params = preset_validation_params self.agent_params.visualization = vis_params if self.agent_params.input_filter is None: if env_params is not None: self.agent_params.input_filter = env_params.default_input_filter() else: # In cases where there is no environment (e.g. batch-rl and imitation learning), there is nowhere to get # a default filter from. So using a default no-filter. # When there is no environment, the user is expected to define input/output filters (if required) using # the preset. self.agent_params.input_filter = NoInputFilter() if self.agent_params.output_filter is None: if env_params is not None: self.agent_params.output_filter = env_params.default_output_filter() else: self.agent_params.output_filter = NoOutputFilter() def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]: # environment loading self.env_params.seed = task_parameters.seed self.env_params.experiment_path = task_parameters.experiment_path env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__, visualization_parameters=self.visualization_parameters) # agent loading self.agent_params.task_parameters = task_parameters # TODO: this should probably be passed in a different way self.agent_params.name = "agent" agent = short_dynamic_import(self.agent_params.path)(self.agent_params) # set level manager level_manager = LevelManager(agents=agent, environment=env, name="main_level") return [level_manager], [env] def log_signal(self, signal_name, value): self.level_managers[0].agents['agent'].agent_logger.create_signal_value(signal_name, value) def get_signal_value(self, signal_name): return self.level_managers[0].agents['agent'].agent_logger.get_signal_value(signal_name) def get_agent(self): return self.level_managers[0].agents['agent']
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/graph_managers/basic_rl_graph_manager.py
0.698535
0.343424
basic_rl_graph_manager.py
pypi
from typing import List, Union, Tuple from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \ PresetValidationParameters from rl_coach.core_types import EnvironmentSteps from rl_coach.environments.environment import EnvironmentParameters, Environment from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters from rl_coach.level_manager import LevelManager from rl_coach.utils import short_dynamic_import class HACGraphManager(GraphManager): """ A simple HAC graph manager creates a deep hierarchy with a single agent per hierarchy level, and a single environment (on the bottom layer) which is interacted with. """ def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters, schedule_params: ScheduleParameters, vis_params: VisualizationParameters, consecutive_steps_to_run_non_top_levels: Union[EnvironmentSteps, List[EnvironmentSteps]], preset_validation_params: PresetValidationParameters = PresetValidationParameters()): """ :param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the hierarchy to the bottom level :param env_params: the parameters of the environment :param schedule_params: the parameters for scheduling the graph :param vis_params: the visualization parameters :param consecutive_steps_to_run_non_top_levels: the number of time steps that each level is ran. for example, when the top level gives the bottom level a goal, the bottom level can act for consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list. """ super().__init__('hac_graph', schedule_params, vis_params) self.agents_params = agents_params self.env_params = env_params self.preset_validation_params = preset_validation_params self.should_test_current_sub_goal = None # will be filled by the top level agent, and is used by all levels if isinstance(consecutive_steps_to_run_non_top_levels, list): if len(consecutive_steps_to_run_non_top_levels) != len(self.agents_params): raise ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match " "the number of levels in the hierarchy. Alternatively, it is possible to use a single " "value for all the levels, by passing an EnvironmentSteps") elif isinstance(consecutive_steps_to_run_non_top_levels, EnvironmentSteps): self.consecutive_steps_to_run_non_top_levels = consecutive_steps_to_run_non_top_levels for agent_params in agents_params: agent_params.visualization = self.visualization_parameters if agent_params.input_filter is None: agent_params.input_filter = self.env_params.default_input_filter() if agent_params.output_filter is None: agent_params.output_filter = self.env_params.default_output_filter() if len(self.agents_params) < 2: raise ValueError("The HAC graph manager must receive the agent parameters for at least two levels of the " "hierarchy. Otherwise, use the basic RL graph manager.") def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]: env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__, visualization_parameters=self.visualization_parameters) for agent_params in self.agents_params: agent_params.task_parameters = task_parameters # we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level # to be known level_managers = [] current_env = env # out_action_space = env.action_space for level_idx, agent_params in reversed(list(enumerate(self.agents_params))): agent_params.name = "agent_{}".format(level_idx) agent_params.is_a_highest_level_agent = level_idx == 0 agent_params.is_a_lowest_level_agent = level_idx == len(self.agents_params) - 1 agent = short_dynamic_import(agent_params.path)(agent_params) level_manager = LevelManager( agents=agent, environment=current_env, real_environment=env, steps_limit=EnvironmentSteps(1) if level_idx == 0 else self.consecutive_steps_to_run_non_top_levels, should_reset_agent_state_after_time_limit_passes=level_idx > 0, name="level_{}".format(level_idx) ) current_env = level_manager level_managers.insert(0, level_manager) return level_managers, [env]
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/graph_managers/hac_graph_manager.py
0.893491
0.590366
hac_graph_manager.py
pypi
from typing import List, Type, Union from rl_coach.base_parameters import MiddlewareScheme, NetworkComponentParameters class MiddlewareParameters(NetworkComponentParameters): def __init__(self, parameterized_class_name: str, activation_function: str='relu', scheme: Union[List, MiddlewareScheme]=MiddlewareScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0, name='middleware', dense_layer=None, is_training=False): super().__init__(dense_layer=dense_layer) self.activation_function = activation_function self.scheme = scheme self.batchnorm = batchnorm self.dropout_rate = dropout_rate self.name = name self.is_training = is_training self.parameterized_class_name = parameterized_class_name @property def path(self): return 'rl_coach.architectures.tensorflow_components.middlewares:' + self.parameterized_class_name class FCMiddlewareParameters(MiddlewareParameters): def __init__(self, activation_function='relu', scheme: Union[List, MiddlewareScheme] = MiddlewareScheme.Medium, batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_fc_embedder", dense_layer=None, is_training=False, num_streams=1): super().__init__(parameterized_class_name="FCMiddleware", activation_function=activation_function, scheme=scheme, batchnorm=batchnorm, dropout_rate=dropout_rate, name=name, dense_layer=dense_layer, is_training=is_training) self.num_streams = num_streams class LSTMMiddlewareParameters(MiddlewareParameters): def __init__(self, activation_function='relu', number_of_lstm_cells=256, scheme: MiddlewareScheme = MiddlewareScheme.Medium, batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_lstm_embedder", dense_layer=None, is_training=False): super().__init__(parameterized_class_name="LSTMMiddleware", activation_function=activation_function, scheme=scheme, batchnorm=batchnorm, dropout_rate=dropout_rate, name=name, dense_layer=dense_layer, is_training=is_training) self.number_of_lstm_cells = number_of_lstm_cells
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/middleware_parameters.py
0.917168
0.209066
middleware_parameters.py
pypi
from typing import Any, Dict, List, Tuple import numpy as np from rl_coach.base_parameters import AgentParameters from rl_coach.saver import SaverCollection from rl_coach.spaces import SpacesDefinition class Architecture(object): @staticmethod def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'Architecture': """ Construct a network class using the provided variable scope and on requested devices :param variable_scope: string specifying variable scope under which to create network variables :param devices: list of devices (can be list of Device objects, or string for TF distributed) :param args: all other arguments for class initializer :param kwargs: all other keyword arguments for class initializer :return: an object which is a child of Architecture """ raise NotImplementedError def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, name: str= ""): """ Creates a neural network 'architecture', that can be trained and used for inference. :param agent_parameters: the agent parameters :param spaces: the spaces (observation, action, etc.) definition of the agent :param name: the name of the network """ self.spaces = spaces self.name = name self.network_wrapper_name = self.name.split('/')[0] # e.g. 'main/online' --> 'main' self.full_name = "{}/{}".format(agent_parameters.full_name_id, name) self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name] self.batch_size = self.network_parameters.batch_size self.learning_rate = self.network_parameters.learning_rate self.optimizer = None self.ap = agent_parameters def predict(self, inputs: Dict[str, np.ndarray], outputs: List[Any] = None, squeeze_output: bool = True, initial_feed_dict: Dict[Any, np.ndarray] = None) -> Tuple[np.ndarray, ...]: """ Given input observations, use the model to make predictions (e.g. action or value). :param inputs: current state (i.e. observations, measurements, goals, etc.) (e.g. `{'observation': numpy.ndarray}` of shape (batch_size, observation_space_size)) :param outputs: list of outputs to return. Return all outputs if unspecified. Type of the list elements depends on the framework backend. :param squeeze_output: call squeeze_list on output before returning if True :param initial_feed_dict: a dictionary of extra inputs for forward pass. :return: predictions of action or value of shape (batch_size, action_space_size) for action predictions) """ raise NotImplementedError @staticmethod def parallel_predict(sess: Any, network_input_tuples: List[Tuple['Architecture', Dict[str, np.ndarray]]]) -> \ Tuple[np.ndarray, ...]: """ :param sess: active session to use for prediction :param network_input_tuples: tuple of network and corresponding input :return: list or tuple of outputs from all networks """ raise NotImplementedError def train_on_batch(self, inputs: Dict[str, np.ndarray], targets: List[np.ndarray], scaler: float=1., additional_fetches: list=None, importance_weights: np.ndarray=None) -> Tuple[float, List[float], float, list]: """ Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to update the weights. Calls `accumulate_gradients` followed by `apply_and_reset_gradients`. Note: Currently an unused method. :param inputs: typically the environment states (but can also contain other data necessary for loss). (e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size) or `{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,)) :param targets: target values of shape (batch_size, ). For example discounted rewards for value network for calculating the value-network loss would be a target. Length of list and order of arrays in the list matches that of network losses which are defined by network parameters :param scaler: value to scale gradients by before optimizing network weights :param additional_fetches: list of additional values to fetch and return. The type of each list element is framework dependent. :param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss. :return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors total_loss (float): sum of all head losses losses (list of float): list of all losses. The order is list of target losses followed by list of regularization losses. The specifics of losses is dependant on the network parameters (number of heads, etc.) norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied fetched_tensors: all values for additional_fetches """ raise NotImplementedError def get_weights(self) -> List[np.ndarray]: """ Gets model weights as a list of ndarrays. It is used for synchronizing weight between two identical networks. :return: list weights as ndarray """ raise NotImplementedError def set_weights(self, weights: List[np.ndarray], rate: float=1.0) -> None: """ Sets model weights for provided layer parameters. :param weights: list of model weights in the same order as received in get_weights :param rate: controls the mixture of given weight values versus old weight values. i.e. new_weight = rate * given_weight + (1 - rate) * old_weight :return: None """ raise NotImplementedError def reset_accumulated_gradients(self) -> None: """ Sets gradient of all parameters to 0. Once gradients are reset, they must be accessible by `accumulated_gradients` property of this class, which must return a list of numpy ndarrays. Child class must ensure that `accumulated_gradients` is set. """ raise NotImplementedError def accumulate_gradients(self, inputs: Dict[str, np.ndarray], targets: List[np.ndarray], additional_fetches: list=None, importance_weights: np.ndarray=None, no_accumulation: bool=False) -> Tuple[float, List[float], float, list]: """ Given a batch of inputs (i.e. states) and targets (e.g. discounted rewards), computes and accumulates the gradients for model parameters. Will run forward and backward pass to compute gradients, clip the gradient values if required and then accumulate gradients from all learners. It does not update the model weights, that's performed in `apply_and_reset_gradients` method. Once gradients are accumulated, they are accessed by `accumulated_gradients` property of this class.å :param inputs: typically the environment states (but can also contain other data for loss) (e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size) or `{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,)) :param targets: targets for calculating loss. For example discounted rewards for value network for calculating the value-network loss would be a target. Length of list and order of arrays in the list matches that of network losses which are defined by network parameters :param additional_fetches: list of additional values to fetch and return. The type of each list element is framework dependent. :param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss. :param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously calculated gradients :return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors total_loss (float): sum of all head losses losses (list of float): list of all losses. The order is list of target losses followed by list of regularization losses. The specifics of losses is dependant on the network parameters (number of heads, etc.) norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied fetched_tensors: all values for additional_fetches """ raise NotImplementedError def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None: """ Applies the given gradients to the network weights and resets the gradient accumulations. Has the same impact as calling `apply_gradients`, then `reset_accumulated_gradients`. :param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property of an identical network (either self or another identical network) :param scaler: A scaling factor that allows rescaling the gradients before applying them """ raise NotImplementedError def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None: """ Applies the given gradients to the network weights. Will be performed sync or async depending on `network_parameters.async_training` :param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property of an identical network (either self or another identical network) :param scaler: A scaling factor that allows rescaling the gradients before applying them """ raise NotImplementedError def get_variable_value(self, variable: Any) -> np.ndarray: """ Gets value of a specified variable. Type of variable is dependant on the framework. Example of a variable is head.kl_coefficient, which could be a symbol for evaluation or could be a string representing the value. :param variable: variable of interest :return: value of the specified variable """ raise NotImplementedError def set_variable_value(self, assign_op: Any, value: np.ndarray, placeholder: Any): """ Updates the value of a specified variable. Type of assign_op is dependant on the framework and is a unique identifier for assigning value to a variable. For example an agent may use head.assign_kl_coefficient. There is a one to one mapping between assign_op and placeholder (in the example above, placeholder would be head.kl_coefficient_ph). :param assign_op: a parameter representing the operation for assigning value to a specific variable :param value: value of the specified variable used for update :param placeholder: a placeholder for binding the value to assign_op. """ raise NotImplementedError def collect_savers(self, parent_path_suffix: str) -> SaverCollection: """ Collection of all savers for the network (typically only one saver for network and one for ONNX export) :param parent_path_suffix: path suffix of the parent of the network (e.g. could be name of level manager plus name of agent) :return: saver collection for the network """ raise NotImplementedError
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/architecture.py
0.96606
0.664282
architecture.py
pypi
from typing import Type from rl_coach.base_parameters import NetworkComponentParameters class HeadParameters(NetworkComponentParameters): def __init__(self, parameterized_class_name: str, activation_function: str = 'relu', name: str= 'head', num_output_head_copies: int=1, rescale_gradient_from_head_by_factor: float=1.0, loss_weight: float=1.0, dense_layer=None, is_training=False): super().__init__(dense_layer=dense_layer) self.activation_function = activation_function self.name = name self.num_output_head_copies = num_output_head_copies self.rescale_gradient_from_head_by_factor = rescale_gradient_from_head_by_factor self.loss_weight = loss_weight self.parameterized_class_name = parameterized_class_name self.is_training = is_training @property def path(self): return 'rl_coach.architectures.tensorflow_components.heads:' + self.parameterized_class_name class PPOHeadParameters(HeadParameters): def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="PPOHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class VHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='v_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns', output_bias_initializer=None): super().__init__(parameterized_class_name="VHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.initializer = initializer self.output_bias_initializer = output_bias_initializer class DDPGVHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='ddpg_v_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, initializer='normalized_columns', output_bias_initializer=None): super().__init__(parameterized_class_name="DDPGVHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.initializer = initializer self.output_bias_initializer = output_bias_initializer class CategoricalQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='categorical_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None): super().__init__(parameterized_class_name="CategoricalQHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.output_bias_initializer = output_bias_initializer class RegressionHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, scheme=None, output_bias_initializer=None): super().__init__(parameterized_class_name="RegressionHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.output_bias_initializer = output_bias_initializer class DDPGActorHeadParameters(HeadParameters): def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True, num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="DDPGActor", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.batchnorm = batchnorm class WolpertingerActorHeadParameters(HeadParameters): def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', batchnorm: bool=True, num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="WolpertingerActorHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.batchnorm = batchnorm class DNDQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='dnd_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="DNDQHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class DuelingQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='dueling_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="DuelingQHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class MeasurementsPredictionHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='measurements_prediction_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="MeasurementsPredictionHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class NAFHeadParameters(HeadParameters): def __init__(self, activation_function: str ='tanh', name: str='naf_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="NAFHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class PolicyHeadParameters(HeadParameters): def __init__(self, activation_function: str ='tanh', name: str='policy_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="PolicyHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class PPOVHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='ppo_v_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None): super().__init__(parameterized_class_name="PPOVHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.output_bias_initializer = output_bias_initializer class QHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None): super().__init__(parameterized_class_name="QHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.output_bias_initializer = output_bias_initializer class ClassificationHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='classification_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="ClassificationHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class QuantileRegressionQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='quantile_regression_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, output_bias_initializer=None): super().__init__(parameterized_class_name="QuantileRegressionQHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.output_bias_initializer = output_bias_initializer class RainbowQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='rainbow_q_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="RainbowQHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class ACERPolicyHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='acer_policy_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None): super().__init__(parameterized_class_name="ACERPolicyHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) class SACPolicyHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='sac_policy_head_params', dense_layer=None): super().__init__(parameterized_class_name='SACPolicyHead', activation_function=activation_function, name=name, dense_layer=dense_layer) class SACQHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='sac_q_head_params', dense_layer=None, layers_sizes: tuple = (256, 256), output_bias_initializer=None): super().__init__(parameterized_class_name='SACQHead', activation_function=activation_function, name=name, dense_layer=dense_layer) self.network_layers_sizes = layers_sizes self.output_bias_initializer = output_bias_initializer class TD3VHeadParameters(HeadParameters): def __init__(self, activation_function: str ='relu', name: str='td3_v_head_params', num_output_head_copies: int = 1, rescale_gradient_from_head_by_factor: float = 1.0, loss_weight: float = 1.0, dense_layer=None, initializer='xavier', output_bias_initializer=None): super().__init__(parameterized_class_name="TD3VHead", activation_function=activation_function, name=name, dense_layer=dense_layer, num_output_head_copies=num_output_head_copies, rescale_gradient_from_head_by_factor=rescale_gradient_from_head_by_factor, loss_weight=loss_weight) self.initializer = initializer self.output_bias_initializer = output_bias_initializer
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/head_parameters.py
0.947308
0.25303
head_parameters.py
pypi
from typing import List, Tuple from rl_coach.base_parameters import Frameworks, AgentParameters from rl_coach.logger import failed_imports from rl_coach.saver import SaverCollection from rl_coach.spaces import SpacesDefinition from rl_coach.utils import force_list class NetworkWrapper(object): """ The network wrapper contains multiple copies of the same network, each one with a different set of weights which is updating in a different time scale. The network wrapper will always contain an online network. It will contain an additional slow updating target network if it was requested by the user, and it will contain a global network shared between different workers, if Coach is run in a single-node multi-process distributed mode. The network wrapper contains functionality for managing these networks and syncing between them. """ def __init__(self, agent_parameters: AgentParameters, has_target: bool, has_global: bool, name: str, spaces: SpacesDefinition, replicated_device=None, worker_device=None): self.ap = agent_parameters self.network_parameters = self.ap.network_wrappers[name] self.has_target = has_target self.has_global = has_global self.name = name self.sess = None if self.network_parameters.framework == Frameworks.tensorflow: try: import tensorflow as tf except ImportError: raise Exception('Install tensorflow before using it as framework') from rl_coach.architectures.tensorflow_components.general_network import GeneralTensorFlowNetwork general_network = GeneralTensorFlowNetwork.construct elif self.network_parameters.framework == Frameworks.mxnet: try: import mxnet as mx except ImportError: raise Exception('Install mxnet before using it as framework') from rl_coach.architectures.mxnet_components.general_network import GeneralMxnetNetwork general_network = GeneralMxnetNetwork.construct else: raise Exception("{} Framework is not supported" .format(Frameworks().to_string(self.network_parameters.framework))) variable_scope = "{}/{}".format(self.ap.full_name_id, name) # Global network - the main network shared between threads self.global_network = None if self.has_global: # we assign the parameters of this network on the parameters server self.global_network = general_network(variable_scope=variable_scope, devices=force_list(replicated_device), agent_parameters=agent_parameters, name='{}/global'.format(name), global_network=None, network_is_local=False, spaces=spaces, network_is_trainable=True) # Online network - local copy of the main network used for playing self.online_network = None self.online_network = general_network(variable_scope=variable_scope, devices=force_list(worker_device), agent_parameters=agent_parameters, name='{}/online'.format(name), global_network=self.global_network, network_is_local=True, spaces=spaces, network_is_trainable=True) # Target network - a local, slow updating network used for stabilizing the learning self.target_network = None if self.has_target: self.target_network = general_network(variable_scope=variable_scope, devices=force_list(worker_device), agent_parameters=agent_parameters, name='{}/target'.format(name), global_network=self.global_network, network_is_local=True, spaces=spaces, network_is_trainable=False) def sync(self): """ Initializes the weights of the networks to match each other :return: """ self.update_online_network() self.update_target_network() def update_target_network(self, rate=1.0): """ Copy weights: online network >>> target network :param rate: the rate of copying the weights - 1 for copying exactly """ if self.target_network: self.target_network.set_weights(self.online_network.get_weights(), rate) def update_online_network(self, rate=1.0): """ Copy weights: global network >>> online network :param rate: the rate of copying the weights - 1 for copying exactly """ if self.global_network: self.online_network.set_weights(self.global_network.get_weights(), rate) def apply_gradients_to_global_network(self, gradients=None, additional_inputs=None): """ Apply gradients from the online network on the global network :param gradients: optional gradients that will be used instead of teh accumulated gradients :param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's update ops also requires the inputs) :return: """ if gradients is None: gradients = self.online_network.accumulated_gradients if self.network_parameters.shared_optimizer: self.global_network.apply_gradients(gradients, additional_inputs=additional_inputs) else: self.online_network.apply_gradients(gradients, additional_inputs=additional_inputs) def apply_gradients_to_online_network(self, gradients=None, additional_inputs=None): """ Apply gradients from the online network on itself :param gradients: optional gradients that will be used instead of teh accumulated gradients :param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's update ops also requires the inputs) :return: """ if gradients is None: gradients = self.online_network.accumulated_gradients self.online_network.apply_gradients(gradients, additional_inputs=additional_inputs) def train_and_sync_networks(self, inputs, targets, additional_fetches=[], importance_weights=None, use_inputs_for_apply_gradients=False): """ A generic training function that enables multi-threading training using a global network if necessary. :param inputs: The inputs for the network. :param targets: The targets corresponding to the given inputs :param additional_fetches: Any additional tensor the user wants to fetch :param importance_weights: A coefficient for each sample in the batch, which will be used to rescale the loss error of this sample. If it is not given, the samples losses won't be scaled :param use_inputs_for_apply_gradients: Add the inputs also for when applying gradients (e.g. for incorporating batchnorm update ops) :return: The loss of the training iteration """ result = self.online_network.accumulate_gradients(inputs, targets, additional_fetches=additional_fetches, importance_weights=importance_weights, no_accumulation=True) if use_inputs_for_apply_gradients: self.apply_gradients_and_sync_networks(reset_gradients=False, additional_inputs=inputs) else: self.apply_gradients_and_sync_networks(reset_gradients=False) return result def apply_gradients_and_sync_networks(self, reset_gradients=True, additional_inputs=None): """ Applies the gradients accumulated in the online network to the global network or to itself and syncs the networks if necessary :param reset_gradients: If set to True, the accumulated gradients wont be reset to 0 after applying them to the network. this is useful when the accumulated gradients are overwritten instead if accumulated by the accumulate_gradients function. this allows reducing time complexity for this function by around 10% :param additional_inputs: optional additional inputs required for when applying the gradients (e.g. batchnorm's update ops also requires the inputs) """ if self.global_network: self.apply_gradients_to_global_network(additional_inputs=additional_inputs) if reset_gradients: self.online_network.reset_accumulated_gradients() self.update_online_network() else: if reset_gradients: self.online_network.apply_and_reset_gradients(self.online_network.accumulated_gradients, additional_inputs=additional_inputs) else: self.online_network.apply_gradients(self.online_network.accumulated_gradients, additional_inputs=additional_inputs) def parallel_prediction(self, network_input_tuples: List[Tuple]): """ Run several network prediction in parallel. Currently this only supports running each of the network once. :param network_input_tuples: a list of tuples where the first element is the network (online_network, target_network or global_network) and the second element is the inputs :return: the outputs of all the networks in the same order as the inputs were given """ return type(self.online_network).parallel_predict(self.sess, network_input_tuples) def set_is_training(self, state: bool): """ Set the phase of the network between training and testing :param state: The current state (True = Training, False = Testing) :return: None """ self.online_network.set_is_training(state) if self.has_target: self.target_network.set_is_training(state) def set_session(self, sess): self.sess = sess self.online_network.set_session(sess) if self.global_network: self.global_network.set_session(sess) if self.target_network: self.target_network.set_session(sess) def __str__(self): sub_networks = [] if self.global_network: sub_networks.append("global network") if self.online_network: sub_networks.append("online network") if self.target_network: sub_networks.append("target network") result = [] result.append("Network: {}, Copies: {} ({})".format(self.name, len(sub_networks), ' | '.join(sub_networks))) result.append("-"*len(result[-1])) result.append(str(self.online_network)) result.append("") return '\n'.join(result) def collect_savers(self, parent_path_suffix: str) -> SaverCollection: """ Collect all of network's savers for global or online network Note: global, online, and target network are all copies fo the same network which parameters that are updated at different rates. So we only need to save one of the networks; the one that holds the most recent parameters. target network is created for some agents and used for stabilizing training by updating parameters from online network at a slower rate. As a result, target network never contains the most recent set of parameters. In single-worker training, no global network is created and online network contains the most recent parameters. In vertical distributed training with more than one worker, global network is updated by all workers and contains the most recent parameters. Therefore preference is given to global network if it exists, otherwise online network is used for saving. :param parent_path_suffix: path suffix of the parent of the network wrapper (e.g. could be name of level manager plus name of agent) :return: collection of all checkpoint objects """ if self.global_network: savers = self.global_network.collect_savers(parent_path_suffix) else: savers = self.online_network.collect_savers(parent_path_suffix) return savers
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/network_wrapper.py
0.934492
0.364297
network_wrapper.py
pypi
from types import FunctionType from mxnet.gluon import nn from rl_coach.architectures import layers from rl_coach.architectures.mxnet_components import utils # define global dictionary for storing layer type to layer implementation mapping mx_layer_dict = dict() def reg_to_mx(layer_type) -> FunctionType: """ function decorator that registers layer implementation :return: decorated function """ def reg_impl_decorator(func): assert layer_type not in mx_layer_dict mx_layer_dict[layer_type] = func return func return reg_impl_decorator def convert_layer(layer): """ If layer is callable, return layer, otherwise convert to MX type :param layer: layer to be converted :return: converted layer if not callable, otherwise layer itself """ if callable(layer): return layer return mx_layer_dict[type(layer)](layer) class Conv2d(layers.Conv2d): def __init__(self, num_filters: int, kernel_size: int, strides: int): super(Conv2d, self).__init__(num_filters=num_filters, kernel_size=kernel_size, strides=strides) def __call__(self) -> nn.Conv2D: """ returns a conv2d block :return: conv2d block """ return nn.Conv2D(channels=self.num_filters, kernel_size=self.kernel_size, strides=self.strides) @staticmethod @reg_to_mx(layers.Conv2d) def to_mx(base: layers.Conv2d): return Conv2d(num_filters=base.num_filters, kernel_size=base.kernel_size, strides=base.strides) class BatchnormActivationDropout(layers.BatchnormActivationDropout): def __init__(self, batchnorm: bool=False, activation_function=None, dropout_rate: float=0): super(BatchnormActivationDropout, self).__init__( batchnorm=batchnorm, activation_function=activation_function, dropout_rate=dropout_rate) def __call__(self): """ returns a list of mxnet batchnorm, activation and dropout layers :return: batchnorm, activation and dropout layers """ block = nn.HybridSequential() if self.batchnorm: block.add(nn.BatchNorm()) if self.activation_function: block.add(nn.Activation(activation=utils.get_mxnet_activation_name(self.activation_function))) if self.dropout_rate: block.add(nn.Dropout(self.dropout_rate)) return block @staticmethod @reg_to_mx(layers.BatchnormActivationDropout) def to_mx(base: layers.BatchnormActivationDropout): return BatchnormActivationDropout( batchnorm=base.batchnorm, activation_function=base.activation_function, dropout_rate=base.dropout_rate) class Dense(layers.Dense): def __init__(self, units: int): super(Dense, self).__init__(units=units) def __call__(self): """ returns a mxnet dense layer :return: dense layer """ # Set flatten to False for consistent behavior with tf.layers.dense return nn.Dense(self.units, flatten=False) @staticmethod @reg_to_mx(layers.Dense) def to_mx(base: layers.Dense): return Dense(units=base.units)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/layers.py
0.946057
0.473657
layers.py
pypi
import copy from typing import Any, Dict, Generator, List, Tuple, Union import numpy as np import mxnet as mx from mxnet import autograd, gluon, nd from mxnet.ndarray import NDArray from rl_coach.architectures.architecture import Architecture from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS, LOSS_OUT_TYPE_REGULARIZATION from rl_coach.architectures.mxnet_components import utils from rl_coach.architectures.mxnet_components.savers import ParameterDictSaver, OnnxSaver from rl_coach.base_parameters import AgentParameters from rl_coach.logger import screen from rl_coach.saver import SaverCollection from rl_coach.spaces import SpacesDefinition from rl_coach.utils import force_list, squeeze_list class MxnetArchitecture(Architecture): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, devices: List[mx.Context], name: str = "", global_network=None, network_is_local: bool=True, network_is_trainable: bool=False): """ :param agent_parameters: the agent parameters :param spaces: the spaces definition of the agent :param name: the name of the network :param global_network: the global network replica that is shared between all the workers :param network_is_local: is the network global (shared between workers) or local (dedicated to the worker) :param network_is_trainable: is the network trainable (we can apply gradients on it) """ super().__init__(agent_parameters, spaces, name) self.middleware = None self.network_is_local = network_is_local self.global_network = global_network if not self.network_parameters.tensorflow_support: raise ValueError('TensorFlow is not supported for this agent') self.losses = [] # type: List[HeadLoss] self.shared_accumulated_gradients = [] self.curr_rnn_c_in = None self.curr_rnn_h_in = None self.gradients_wrt_inputs = [] self.train_writer = None self.accumulated_gradients = None self.network_is_trainable = network_is_trainable self.is_training = False self.model = None # type: GeneralModel self._devices = self._sanitize_device_list(devices) self.is_chief = self.ap.task_parameters.task_index == 0 self.network_is_global = not self.network_is_local and global_network is None self.distributed_training = self.network_is_global or self.network_is_local and global_network is not None self.optimizer_type = self.network_parameters.optimizer_type if self.ap.task_parameters.seed is not None: mx.random.seed(self.ap.task_parameters.seed) # Call to child class to create the model self.construct_model() self.trainer = None # type: gluon.Trainer def __str__(self): return self.model.summary(*self._dummy_model_inputs()) @staticmethod def _sanitize_device_list(devices: List[mx.Context]) -> List[mx.Context]: """ Returns intersection of devices with available devices. If no intersection, returns mx.cpu() :param devices: list of requested devices :return: list of devices that are actually available """ actual_device = [mx.cpu()] + [mx.gpu(i) for i in mx.test_utils.list_gpus()] intersection = [dev for dev in devices if dev in actual_device] if len(intersection) == 0: intersection = [mx.cpu()] screen.log('Requested devices {} not available. Default to CPU context.'.format(devices)) elif len(intersection) < len(devices): screen.log('{} not available, using {}.'.format( [dev for dev in devices if dev not in intersection], intersection)) return intersection def _model_grads(self, index: int=0) ->\ Union[Generator[NDArray, NDArray, Any], Generator[List[NDArray], List[NDArray], Any]]: """ Creates a copy of model gradients and returns them in a list, in the same order as collect_params() :param index: device index. Set to -1 to get a tuple of list of NDArrays for all devices :return: a generator for model gradient values """ if index < 0: return (p.list_grad() for p in self.model.collect_params().values() if p.grad_req != 'null') else: return (p.list_grad()[index] for p in self.model.collect_params().values() if p.grad_req != 'null') def _model_input_shapes(self) -> List[List[int]]: """ Create a list of input array shapes :return: type of input shapes """ allowed_inputs = copy.copy(self.spaces.state.sub_spaces) allowed_inputs["action"] = copy.copy(self.spaces.action) allowed_inputs["goal"] = copy.copy(self.spaces.goal) embedders = self.model.nets[0].input_embedders return list([1] + allowed_inputs[emb.embedder_name].shape.tolist() for emb in embedders) def _dummy_model_inputs(self) -> Tuple[NDArray, ...]: """ Creates a tuple of input arrays with correct shapes that can be used for shape inference of the model weights and for printing the summary :return: tuple of inputs for model forward pass """ input_shapes = self._model_input_shapes() inputs = tuple(nd.zeros(tuple(shape), ctx=self._devices[0]) for shape in input_shapes) return inputs def construct_model(self) -> None: """ Construct network model. Implemented by child class. """ raise NotImplementedError def set_session(self, sess) -> None: """ Initializes the model parameters and creates the model trainer. NOTEL Session for mxnet backend must be None. :param sess: must be None """ assert sess is None # FIXME Add initializer self.model.collect_params().initialize(ctx=self._devices) # Hybridize model and losses self.model.hybridize() for l in self.losses: l.hybridize() # Pass dummy data with correct shape to trigger shape inference and full parameter initialization self.model(*self._dummy_model_inputs()) if self.network_is_trainable: self.trainer = gluon.Trainer( self.model.collect_params(), optimizer=self.optimizer, update_on_kvstore=False) def reset_accumulated_gradients(self) -> None: """ Reset model gradients as well as accumulated gradients to zero. If accumulated gradients have not been created yet, it constructs them on CPU. """ # Set model gradients to zero for p in self.model.collect_params().values(): p.zero_grad() # Set accumulated gradients to zero if already initialized, otherwise create a copy if self.accumulated_gradients: for a in self.accumulated_gradients: a *= 0 else: self.accumulated_gradients = [g.copy() for g in self._model_grads()] def accumulate_gradients(self, inputs: Dict[str, np.ndarray], targets: List[np.ndarray], additional_fetches: List[Tuple[int, str]] = None, importance_weights: np.ndarray = None, no_accumulation: bool = False) -> Tuple[float, List[float], float, list]: """ Runs a forward & backward pass, clips gradients if needed and accumulates them into the accumulation :param inputs: environment states (observation, etc.) as well extra inputs required by loss. Shape of ndarray is (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size) :param targets: targets required by loss (e.g. sum of discounted rewards) :param additional_fetches: additional fetches to calculate and return. Each fetch is specified as (int, str) tuple of head-type-index and fetch-name. The tuple is obtained from each head. :param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss. :param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously calculated gradients :return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors total_loss (float): sum of all head losses losses (list of float): list of all losses. The order is list of target losses followed by list of regularization losses. The specifics of losses is dependant on the network parameters (number of heads, etc.) norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied fetched_tensors: all values for additional_fetches """ if self.accumulated_gradients is None: self.reset_accumulated_gradients() embedders = [emb.embedder_name for emb in self.model.nets[0].input_embedders] nd_inputs = tuple(nd.array(inputs[emb], ctx=self._devices[0]) for emb in embedders) assert self.middleware.__class__.__name__ != 'LSTMMiddleware', "LSTM middleware not supported" targets = force_list(targets) with autograd.record(): out_per_head = utils.split_outputs_per_head(self.model(*nd_inputs), self.model.output_heads) tgt_per_loss = utils.split_targets_per_loss(targets, self.losses) losses = list() regularizations = list() additional_fetches = [(k, None) for k in additional_fetches] for h, h_loss, h_out, l_tgt in zip(self.model.output_heads, self.losses, out_per_head, tgt_per_loss): l_in = utils.get_loss_agent_inputs(inputs, head_type_idx=h.head_type_idx, loss=h_loss) # Align arguments with loss.loss_forward and convert to NDArray l_args = utils.to_mx_ndarray(utils.align_loss_args(h_out, l_in, l_tgt, h_loss), h_out[0].context) # Calculate loss and all auxiliary outputs loss_outputs = utils.loss_output_dict(utils.to_list(h_loss(*l_args)), h_loss.output_schema) if LOSS_OUT_TYPE_LOSS in loss_outputs: losses.extend(loss_outputs[LOSS_OUT_TYPE_LOSS]) if LOSS_OUT_TYPE_REGULARIZATION in loss_outputs: regularizations.extend(loss_outputs[LOSS_OUT_TYPE_REGULARIZATION]) # Set additional fetches for i, fetch in enumerate(additional_fetches): head_type_idx, fetch_name = fetch[0] # fetch key is a tuple of (head_type_index, fetch_name) if head_type_idx == h.head_type_idx: assert fetch[1] is None # sanity check that fetch is None additional_fetches[i] = (fetch[0], loss_outputs[fetch_name]) # Total loss is losses and regularization (NOTE: order is important) total_loss_list = losses + regularizations total_loss = nd.add_n(*total_loss_list) # Calculate gradients total_loss.backward() assert self.optimizer_type != 'LBFGS', 'LBFGS not supported' # allreduce gradients from all contexts self.trainer.allreduce_grads() model_grads_cpy = [g.copy() for g in self._model_grads()] # Calculate global norm of gradients # FIXME global norm is returned even when not used for clipping! Is this necessary? # FIXME global norm might be calculated twice if clipping method is global norm norm_unclipped_grads = utils.global_norm(model_grads_cpy) # Clip gradients if self.network_parameters.clip_gradients: utils.clip_grad( model_grads_cpy, clip_method=self.network_parameters.gradients_clipping_method, clip_val=self.network_parameters.clip_gradients, inplace=True) # Update self.accumulated_gradients depending on no_accumulation flag if no_accumulation: for acc_grad, model_grad in zip(self.accumulated_gradients, model_grads_cpy): acc_grad[:] = model_grad else: for acc_grad, model_grad in zip(self.accumulated_gradients, model_grads_cpy): acc_grad += model_grad # result of of additional fetches fetched_tensors = [fetch[1] for fetch in additional_fetches] # convert everything to numpy or scalar before returning result = utils.asnumpy_or_asscalar((total_loss, total_loss_list, norm_unclipped_grads, fetched_tensors)) return result def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None: """ Applies the given gradients to the network weights and resets accumulated gradients to zero :param gradients: The gradients to use for the update :param scaler: A scaling factor that allows rescaling the gradients before applying them """ self.apply_gradients(gradients, scaler) self.reset_accumulated_gradients() def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None: """ Applies the given gradients to the network weights :param gradients: The gradients to use for the update :param scaler: A scaling factor that allows rescaling the gradients before applying them. The gradients will be MULTIPLIED by this factor """ assert self.optimizer_type != 'LBFGS' batch_size = 1 if self.distributed_training and not self.network_parameters.async_training: # rescale the gradients so that they average out with the gradients from the other workers if self.network_parameters.scale_down_gradients_by_number_of_workers_for_sync_training: batch_size = self.ap.task_parameters.num_training_tasks # set parameter gradients to gradients passed in for param_grad, gradient in zip(self._model_grads(-1), gradients): for pg in param_grad: pg[:] = gradient # update gradients self.trainer.update(batch_size=batch_size) def _predict(self, inputs: Dict[str, np.ndarray]) -> Tuple[NDArray, ...]: """ Run a forward pass of the network using the given input :param inputs: The input dictionary for the network. Key is name of the embedder. :return: The network output WARNING: must only call once per state since each call is assumed by LSTM to be a new time step. """ embedders = [emb.embedder_name for emb in self.model.nets[0].input_embedders] nd_inputs = tuple(nd.array(inputs[emb], ctx=self._devices[0]) for emb in embedders) assert self.middleware.__class__.__name__ != 'LSTMMiddleware' output = self.model(*nd_inputs) return output def predict(self, inputs: Dict[str, np.ndarray], outputs: List[str]=None, squeeze_output: bool=True, initial_feed_dict: Dict[str, np.ndarray]=None) -> Tuple[np.ndarray, ...]: """ Run a forward pass of the network using the given input :param inputs: The input dictionary for the network. Key is name of the embedder. :param outputs: list of outputs to return. Return all outputs if unspecified (currently not supported) :param squeeze_output: call squeeze_list on output if True :param initial_feed_dict: a dictionary of extra inputs for forward pass (currently not supported) :return: The network output WARNING: must only call once per state since each call is assumed by LSTM to be a new time step. """ assert initial_feed_dict is None, "initial_feed_dict must be None" assert outputs is None, "outputs must be None" output = self._predict(inputs) output = list(o.asnumpy() for o in output) if squeeze_output: output = squeeze_list(output) return output @staticmethod def parallel_predict(sess: Any, network_input_tuples: List[Tuple['MxnetArchitecture', Dict[str, np.ndarray]]]) -> \ Tuple[np.ndarray, ...]: """ :param sess: active session to use for prediction (must be None for MXNet) :param network_input_tuples: tuple of network and corresponding input :return: tuple of outputs from all networks """ assert sess is None output = list() for net, inputs in network_input_tuples: output += net._predict(inputs) return tuple(o.asnumpy() for o in output) def train_on_batch(self, inputs: Dict[str, np.ndarray], targets: List[np.ndarray], scaler: float = 1., additional_fetches: list = None, importance_weights: np.ndarray = None) -> Tuple[float, List[float], float, list]: """ Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to update the weights. :param inputs: environment states (observation, etc.) as well extra inputs required by loss. Shape of ndarray is (batch_size, observation_space_size) or (batch_size, observation_space_size, stack_size) :param targets: targets required by loss (e.g. sum of discounted rewards) :param scaler: value to scale gradients by before optimizing network weights :param additional_fetches: additional fetches to calculate and return. Each fetch is specified as (int, str) tuple of head-type-index and fetch-name. The tuple is obtained from each head. :param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss. :return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors total_loss (float): sum of all head losses losses (list of float): list of all losses. The order is list of target losses followed by list of regularization losses. The specifics of losses is dependant on the network parameters (number of heads, etc.) norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied fetched_tensors: all values for additional_fetches """ loss = self.accumulate_gradients(inputs, targets, additional_fetches=additional_fetches, importance_weights=importance_weights) self.apply_and_reset_gradients(self.accumulated_gradients, scaler) return loss def get_weights(self) -> gluon.ParameterDict: """ :return: a ParameterDict containing all network weights """ return self.model.collect_params() def set_weights(self, weights: gluon.ParameterDict, new_rate: float=1.0) -> None: """ Sets the network weights from the given ParameterDict :param new_rate: ratio for adding new and old weight values: val=rate*weights + (1-rate)*old_weights """ old_weights = self.model.collect_params() for name, p in weights.items(): name = name[len(weights.prefix):] # Strip prefix old_p = old_weights[old_weights.prefix + name] # Add prefix old_p.set_data(new_rate * p._reduce() + (1 - new_rate) * old_p._reduce()) def get_variable_value(self, variable: Union[gluon.Parameter, NDArray]) -> np.ndarray: """ Get the value of a variable :param variable: the variable :return: the value of the variable """ if isinstance(variable, gluon.Parameter): variable = variable._reduce().asnumpy() if isinstance(variable, NDArray): return variable.asnumpy() return variable def set_variable_value(self, assign_op: callable, value: Any, placeholder=None) -> None: """ Updates value of a variable. :param assign_op: a callable assign function for setting the variable :param value: a value to set the variable to :param placeholder: unused (placeholder in symbolic framework backends) """ assert callable(assign_op) assign_op(value) def set_is_training(self, state: bool) -> None: """ Set the phase of the network between training and testing :param state: The current state (True = Training, False = Testing) :return: None """ self.is_training = state def reset_internal_memory(self) -> None: """ Reset any internal memory used by the network. For example, an LSTM internal state :return: None """ assert self.middleware.__class__.__name__ != 'LSTMMiddleware', 'LSTM middleware not supported' def collect_savers(self, parent_path_suffix: str) -> SaverCollection: """ Collection of all checkpoints for the network (typically only one checkpoint) :param parent_path_suffix: path suffix of the parent of the network (e.g. could be name of level manager plus name of agent) :return: checkpoint collection for the network """ name = self.name.replace('/', '.') savers = SaverCollection(ParameterDictSaver( name="{}.{}".format(parent_path_suffix, name), param_dict=self.model.collect_params())) if self.ap.task_parameters.export_onnx_graph: savers.add(OnnxSaver( name="{}.{}.onnx".format(parent_path_suffix, name), model=self.model, input_shapes=self._model_input_shapes())) return savers
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/architecture.py
0.858748
0.272617
architecture.py
pypi
from typing import Any, List, Tuple from mxnet import gluon, sym from mxnet.contrib import onnx as onnx_mxnet import numpy as np from rl_coach.architectures.mxnet_components.utils import ScopedOnnxEnable from rl_coach.saver import Saver class ParameterDictSaver(Saver): """ Child class that implements saver for mxnet gluon parameter dictionary """ def __init__(self, name: str, param_dict: gluon.ParameterDict): self._name = name self._param_dict = param_dict @property def path(self): """ Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able. """ return self._name def save(self, sess: None, save_path: str) -> List[str]: """ Save to save_path :param sess: active session for session-based frameworks (e.g. TF) :param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count). :return: list of all saved paths """ assert sess is None self._param_dict.save(save_path) return [save_path] def restore(self, sess: Any, restore_path: str): """ Restore from restore_path :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: full path to load checkpoint from. """ assert sess is None self._param_dict.load(restore_path) def merge(self, other: 'Saver'): """ Merge other saver into this saver :param other: saver to be merged into self """ if not isinstance(other, ParameterDictSaver): raise TypeError('merging only supported with ParameterDictSaver (type:{})'.format(type(other))) self._param_dict.update(other._param_dict) class OnnxSaver(Saver): """ Child class that implements saver for exporting gluon HybridBlock to ONNX """ def __init__(self, name: str, model: gluon.HybridBlock, input_shapes: List[List[int]]): self._name = name self._sym = self._get_onnx_sym(model, len(input_shapes)) self._param_dict = model.collect_params() self._input_shapes = input_shapes @staticmethod def _get_onnx_sym(model: gluon.HybridBlock, num_inputs: int) -> sym.Symbol: """ Returns a symbolic graph for the model :param model: gluon HybridBlock that constructs the symbolic graph :param num_inputs: number of inputs to the graph :return: symbol for the network """ var_args = [sym.Variable('Data{}'.format(i)) for i in range(num_inputs)] with ScopedOnnxEnable(model): return sym.Group(gluon.block._flatten(model(*var_args), "output")[0]) @property def path(self): """ Relative path for save/load. If two checkpoint objects return the same path, they must be merge-able. """ return self._name def save(self, sess: None, save_path: str) -> List[str]: """ Save to save_path :param sess: active session for session-based frameworks (e.g. TF). Must be None. :param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count). :return: list of all saved paths """ assert sess is None params = {name:param._reduce() for name, param in self._param_dict.items()} export_path = onnx_mxnet.export_model(self._sym, params, self._input_shapes, np.float32, save_path) return [export_path] def restore(self, sess: Any, restore_path: str): """ Restore from restore_path :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: full path to load checkpoint from. """ assert sess is None # Nothing to restore for ONNX def merge(self, other: 'Saver'): """ Merge other saver into this saver :param other: saver to be merged into self """ # No merging is supported for ONNX. self.path must be unique raise RuntimeError('merging not supported for ONNX exporter')
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/savers.py
0.908303
0.350199
savers.py
pypi
import copy from itertools import chain from typing import List, Tuple, Union from types import ModuleType import numpy as np import mxnet as mx from mxnet import nd, sym from mxnet.gluon import HybridBlock from mxnet.ndarray import NDArray from mxnet.symbol import Symbol from rl_coach.base_parameters import NetworkParameters from rl_coach.architectures.embedder_parameters import InputEmbedderParameters from rl_coach.architectures.head_parameters import HeadParameters, PPOHeadParameters from rl_coach.architectures.head_parameters import PPOVHeadParameters, VHeadParameters, QHeadParameters from rl_coach.architectures.middleware_parameters import MiddlewareParameters from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters, LSTMMiddlewareParameters from rl_coach.architectures.mxnet_components.architecture import MxnetArchitecture from rl_coach.architectures.mxnet_components.embedders import ImageEmbedder, TensorEmbedder, VectorEmbedder from rl_coach.architectures.mxnet_components.heads import Head, HeadLoss, PPOHead, PPOVHead, VHead, QHead from rl_coach.architectures.mxnet_components.middlewares import FCMiddleware, LSTMMiddleware from rl_coach.architectures.mxnet_components import utils from rl_coach.base_parameters import AgentParameters, Device, DeviceType, EmbeddingMergerType from rl_coach.spaces import SpacesDefinition, PlanarMapsObservationSpace, TensorObservationSpace class GeneralMxnetNetwork(MxnetArchitecture): """ A generalized version of all possible networks implemented using mxnet. """ @staticmethod def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'GeneralTensorFlowNetwork': """ Construct a network class using the provided variable scope and on requested devices :param variable_scope: string specifying variable scope under which to create network variables :param devices: list of devices (can be list of Device objects, or string for TF distributed) :param args: all other arguments for class initializer :param kwargs: all other keyword arguments for class initializer :return: a GeneralTensorFlowNetwork object """ return GeneralMxnetNetwork(*args, devices=[GeneralMxnetNetwork._mx_device(d) for d in devices], **kwargs) @staticmethod def _mx_device(device: Union[str, Device]) -> mx.Context: """ Convert device to tensorflow-specific device representation :param device: either a specific string (used in distributed mode) which is returned without any change or a Device type :return: tensorflow-specific string for device """ if isinstance(device, Device): if device.device_type == DeviceType.CPU: return mx.cpu() elif device.device_type == DeviceType.GPU: return mx.gpu(device.index) else: raise ValueError("Invalid device_type: {}".format(device.device_type)) else: raise ValueError("Invalid device instance type: {}".format(type(device))) def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, devices: List[mx.Context], name: str, global_network=None, network_is_local: bool=True, network_is_trainable: bool=False): """ :param agent_parameters: the agent parameters :param spaces: the spaces definition of the agent :param devices: list of devices to run the network on :param name: the name of the network :param global_network: the global network replica that is shared between all the workers :param network_is_local: is the network global (shared between workers) or local (dedicated to the worker) :param network_is_trainable: is the network trainable (we can apply gradients on it) """ self.network_wrapper_name = name.split('/')[0] self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name] if self.network_parameters.use_separate_networks_per_head: self.num_heads_per_network = 1 self.num_networks = len(self.network_parameters.heads_parameters) else: self.num_heads_per_network = len(self.network_parameters.heads_parameters) self.num_networks = 1 super().__init__(agent_parameters, spaces, devices, name, global_network, network_is_local, network_is_trainable) def construct_model(self): # validate the configuration if len(self.network_parameters.input_embedders_parameters) == 0: raise ValueError("At least one input type should be defined") if len(self.network_parameters.heads_parameters) == 0: raise ValueError("At least one output type should be defined") if self.network_parameters.middleware_parameters is None: raise ValueError("Exactly one middleware type should be defined") self.model = GeneralModel( num_networks=self.num_networks, num_heads_per_network=self.num_heads_per_network, network_is_local=self.network_is_local, network_name=self.network_wrapper_name, agent_parameters=self.ap, network_parameters=self.network_parameters, spaces=self.spaces) self.losses = self.model.losses() # Learning rate lr_scheduler = None if self.network_parameters.learning_rate_decay_rate != 0: lr_scheduler = mx.lr_scheduler.FactorScheduler( step=self.network_parameters.learning_rate_decay_steps, factor=self.network_parameters.learning_rate_decay_rate) # Optimizer # FIXME Does this code for distributed training make sense? if self.distributed_training and self.network_is_local and self.network_parameters.shared_optimizer: # distributed training + is a local network + optimizer shared -> take the global optimizer self.optimizer = self.global_network.optimizer elif (self.distributed_training and self.network_is_local and not self.network_parameters.shared_optimizer)\ or self.network_parameters.shared_optimizer or not self.distributed_training: if self.network_parameters.optimizer_type == 'Adam': self.optimizer = mx.optimizer.Adam( learning_rate=self.network_parameters.learning_rate, beta1=self.network_parameters.adam_optimizer_beta1, beta2=self.network_parameters.adam_optimizer_beta2, epsilon=self.network_parameters.optimizer_epsilon, lr_scheduler=lr_scheduler) elif self.network_parameters.optimizer_type == 'RMSProp': self.optimizer = mx.optimizer.RMSProp( learning_rate=self.network_parameters.learning_rate, gamma1=self.network_parameters.rms_prop_optimizer_decay, epsilon=self.network_parameters.optimizer_epsilon, lr_scheduler=lr_scheduler) elif self.network_parameters.optimizer_type == 'LBFGS': raise NotImplementedError('LBFGS optimizer not implemented') else: raise Exception("{} is not a valid optimizer type".format(self.network_parameters.optimizer_type)) @property def output_heads(self): return self.model.output_heads def _get_activation(activation_function_string: str): """ Map the activation function from a string to the mxnet framework equivalent :param activation_function_string: the type of the activation function :return: mxnet activation function string """ return utils.get_mxnet_activation_name(activation_function_string) def _sanitize_activation(params: Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]) ->\ Union[InputEmbedderParameters, MiddlewareParameters, HeadParameters]: """ Change activation function to the mxnet specific value :param params: any parameter that has activation_function property :return: copy of params with activation function correctly set """ params_copy = copy.copy(params) params_copy.activation_function = _get_activation(params.activation_function) return params_copy def _get_input_embedder(spaces: SpacesDefinition, input_name: str, embedder_params: InputEmbedderParameters) -> ModuleType: """ Given an input embedder parameters class, creates the input embedder and returns it :param input_name: the name of the input to the embedder (used for retrieving the shape). The input should be a value within the state or the action. :param embedder_params: the parameters of the class of the embedder :return: the embedder instance """ allowed_inputs = copy.copy(spaces.state.sub_spaces) allowed_inputs["action"] = copy.copy(spaces.action) allowed_inputs["goal"] = copy.copy(spaces.goal) if input_name not in allowed_inputs.keys(): raise ValueError("The key for the input embedder ({}) must match one of the following keys: {}" .format(input_name, allowed_inputs.keys())) type = "vector" if isinstance(allowed_inputs[input_name], TensorObservationSpace): type = "tensor" elif isinstance(allowed_inputs[input_name], PlanarMapsObservationSpace): type = "image" def sanitize_params(params: InputEmbedderParameters): params_copy = _sanitize_activation(params) # params_copy.input_rescaling = params_copy.input_rescaling[type] # params_copy.input_offset = params_copy.input_offset[type] params_copy.name = input_name return params_copy embedder_params = sanitize_params(embedder_params) if type == 'vector': module = VectorEmbedder(embedder_params) elif type == 'image': module = ImageEmbedder(embedder_params) elif type == 'tensor': module = TensorEmbedder(embedder_params) else: raise KeyError('Unsupported embedder type: {}'.format(type)) return module def _get_middleware(middleware_params: MiddlewareParameters) -> ModuleType: """ Given a middleware type, creates the middleware and returns it :param middleware_params: the paramaeters of the middleware class :return: the middleware instance """ middleware_params = _sanitize_activation(middleware_params) if isinstance(middleware_params, FCMiddlewareParameters): module = FCMiddleware(middleware_params) elif isinstance(middleware_params, LSTMMiddlewareParameters): module = LSTMMiddleware(middleware_params) else: raise KeyError('Unsupported middleware type: {}'.format(type(middleware_params))) return module def _get_output_head( head_params: HeadParameters, head_idx: int, head_type_index: int, agent_params: AgentParameters, spaces: SpacesDefinition, network_name: str, is_local: bool) -> Head: """ Given a head type, creates the head and returns it :param head_params: the parameters of the head to create :param head_idx: the head index :param head_type_index: the head type index (same index if head_param.num_output_head_copies>0) :param agent_params: agent parameters :param spaces: state and action space definitions :param network_name: name of the network :param is_local: :return: head block """ head_params = _sanitize_activation(head_params) if isinstance(head_params, PPOHeadParameters): module = PPOHead( agent_parameters=agent_params, spaces=spaces, network_name=network_name, head_type_idx=head_type_index, loss_weight=head_params.loss_weight, is_local=is_local, activation_function=head_params.activation_function, dense_layer=head_params.dense_layer) elif isinstance(head_params, VHeadParameters): module = VHead( agent_parameters=agent_params, spaces=spaces, network_name=network_name, head_type_idx=head_type_index, loss_weight=head_params.loss_weight, is_local=is_local, activation_function=head_params.activation_function, dense_layer=head_params.dense_layer) elif isinstance(head_params, PPOVHeadParameters): module = PPOVHead( agent_parameters=agent_params, spaces=spaces, network_name=network_name, head_type_idx=head_type_index, loss_weight=head_params.loss_weight, is_local=is_local, activation_function=head_params.activation_function, dense_layer=head_params.dense_layer) elif isinstance(head_params, QHeadParameters): module = QHead( agent_parameters=agent_params, spaces=spaces, network_name=network_name, head_type_idx=head_type_index, loss_weight=head_params.loss_weight, is_local=is_local, activation_function=head_params.activation_function, dense_layer=head_params.dense_layer) else: raise KeyError('Unsupported head type: {}'.format(type(head_params))) return module class ScaledGradHead(HybridBlock, utils.OnnxHandlerBlock): """ Wrapper block for applying gradient scaling to input before feeding the head network """ def __init__(self, head_index: int, head_type_index: int, network_name: str, spaces: SpacesDefinition, network_is_local: bool, agent_params: AgentParameters, head_params: HeadParameters) -> None: """ :param head_index: the head index :param head_type_index: the head type index (same index if head_param.num_output_head_copies>0) :param network_name: name of the network :param spaces: state and action space definitions :param network_is_local: whether network is local :param agent_params: agent parameters :param head_params: head parameters """ super(ScaledGradHead, self).__init__() utils.OnnxHandlerBlock.__init__(self) head_params = _sanitize_activation(head_params) with self.name_scope(): self.head = _get_output_head( head_params=head_params, head_idx=head_index, head_type_index=head_type_index, agent_params=agent_params, spaces=spaces, network_name=network_name, is_local=network_is_local) self.gradient_rescaler = self.params.get_constant( name='gradient_rescaler', value=np.array([float(head_params.rescale_gradient_from_head_by_factor)])) # self.gradient_rescaler = self.params.get( # name='gradient_rescaler', # shape=(1,), # init=mx.init.Constant(float(head_params.rescale_gradient_from_head_by_factor))) def hybrid_forward(self, F: ModuleType, x: Union[NDArray, Symbol], gradient_rescaler: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]: """ Overrides gluon.HybridBlock.hybrid_forward :param nd or sym F: ndarray or symbol module :param x: head input :param gradient_rescaler: gradient rescaler for partial blocking of gradient :return: head output """ if self._onnx: # ONNX doesn't support BlockGrad() operator, but it's not typically needed for # ONNX because mostly forward calls are performed using ONNX exported network. grad_scaled_x = x else: grad_scaled_x = (F.broadcast_mul((1 - gradient_rescaler), F.BlockGrad(x)) + F.broadcast_mul(gradient_rescaler, x)) out = self.head(grad_scaled_x) return out class SingleModel(HybridBlock): """ Block that connects a single embedder, with middleware and one to multiple heads """ def __init__(self, network_is_local: bool, network_name: str, agent_parameters: AgentParameters, in_emb_param_dict: {str: InputEmbedderParameters}, embedding_merger_type: EmbeddingMergerType, middleware_param: MiddlewareParameters, head_param_list: [HeadParameters], head_type_idx_start: int, spaces: SpacesDefinition, *args, **kwargs): """ :param network_is_local: True if network is local :param network_name: name of the network :param agent_parameters: agent parameters :param in_emb_param_dict: dictionary of embedder name to embedding parameters :param embedding_merger_type: type of merging output of embedders: concatenate or sum :param middleware_param: middleware parameters :param head_param_list: list of head parameters, one per head type :param head_type_idx_start: start index for head type index counting :param spaces: state and action space definition """ super(SingleModel, self).__init__(*args, **kwargs) self._embedding_merger_type = embedding_merger_type self._input_embedders = list() # type: List[HybridBlock] self._output_heads = list() # type: List[ScaledGradHead] with self.name_scope(): for input_name in sorted(in_emb_param_dict): input_type = in_emb_param_dict[input_name] input_embedder = _get_input_embedder(spaces, input_name, input_type) self.register_child(input_embedder) self._input_embedders.append(input_embedder) self.middleware = _get_middleware(middleware_param) for i, head_param in enumerate(head_param_list): for head_copy_idx in range(head_param.num_output_head_copies): # create output head and add it to the output heads list output_head = ScaledGradHead( head_index=(head_type_idx_start + i) * head_param.num_output_head_copies + head_copy_idx, head_type_index=head_type_idx_start + i, network_name=network_name, spaces=spaces, network_is_local=network_is_local, agent_params=agent_parameters, head_params=head_param) self.register_child(output_head) self._output_heads.append(output_head) def hybrid_forward(self, F, *inputs: Union[NDArray, Symbol]) -> Tuple[Union[NDArray, Symbol], ...]: """ Overrides gluon.HybridBlock.hybrid_forward :param nd or sym F: ndarray or symbol block :param inputs: model inputs, one for each embedder :return: head outputs in a tuple """ # Input Embeddings state_embedding = list() for input, embedder in zip(inputs, self._input_embedders): state_embedding.append(embedder(input)) # Merger if len(state_embedding) == 1: state_embedding = state_embedding[0] else: if self._embedding_merger_type == EmbeddingMergerType.Concat: state_embedding = F.concat(*state_embedding, dim=1, name='merger') # NC or NCHW layout elif self._embedding_merger_type == EmbeddingMergerType.Sum: state_embedding = F.add_n(*state_embedding, name='merger') # Middleware state_embedding = self.middleware(state_embedding) # Head outputs = tuple() for head in self._output_heads: out = head(state_embedding) if not isinstance(out, tuple): out = (out,) outputs += out return outputs @property def input_embedders(self) -> List[HybridBlock]: """ :return: list of input embedders """ return self._input_embedders @property def output_heads(self) -> List[Head]: """ :return: list of output heads """ return [h.head for h in self._output_heads] class GeneralModel(HybridBlock): """ Block that creates multiple single models """ def __init__(self, num_networks: int, num_heads_per_network: int, network_is_local: bool, network_name: str, agent_parameters: AgentParameters, network_parameters: NetworkParameters, spaces: SpacesDefinition, *args, **kwargs): """ :param num_networks: number of networks to create :param num_heads_per_network: number of heads per network to create :param network_is_local: True if network is local :param network_name: name of the network :param agent_parameters: agent parameters :param network_parameters: network parameters :param spaces: state and action space definitions """ super(GeneralModel, self).__init__(*args, **kwargs) with self.name_scope(): self.nets = list() for network_idx in range(num_networks): head_type_idx_start = network_idx * num_heads_per_network head_type_idx_end = head_type_idx_start + num_heads_per_network net = SingleModel( head_type_idx_start=head_type_idx_start, network_name=network_name, network_is_local=network_is_local, agent_parameters=agent_parameters, in_emb_param_dict=network_parameters.input_embedders_parameters, embedding_merger_type=network_parameters.embedding_merger_type, middleware_param=network_parameters.middleware_parameters, head_param_list=network_parameters.heads_parameters[head_type_idx_start:head_type_idx_end], spaces=spaces) self.register_child(net) self.nets.append(net) def hybrid_forward(self, F, *inputs): """ Overrides gluon.HybridBlock.hybrid_forward :param nd or sym F: ndarray or symbol block :param inputs: model inputs, one for each embedder. Passed to all networks. :return: head outputs in a tuple """ outputs = tuple() for net in self.nets: out = net(*inputs) outputs += out return outputs @property def output_heads(self) -> List[Head]: """ Return all heads in a single list Note: There is a one-to-one mapping between output_heads and losses :return: list of heads """ return list(chain.from_iterable(net.output_heads for net in self.nets)) def losses(self) -> List[HeadLoss]: """ Construct loss blocks for network training Note: There is a one-to-one mapping between output_heads and losses :return: list of loss blocks """ return [h.loss() for net in self.nets for h in net.output_heads]
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/general_network.py
0.86421
0.246851
general_network.py
pypi
import inspect from typing import Any, Dict, Generator, Iterable, List, Tuple, Union from types import ModuleType import mxnet as mx from mxnet import gluon, nd from mxnet.ndarray import NDArray import numpy as np from rl_coach.core_types import GradientClippingMethod nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] def to_mx_ndarray(data: Union[list, tuple, np.ndarray, NDArray, int, float], ctx: mx.Context=None) ->\ Union[List[NDArray], Tuple[NDArray], NDArray]: """ Convert data to mx.nd.NDArray. Data can be a list or tuple of np.ndarray, int, or float or it can be np.ndarray, int, or float :param data: input data to be converted :param ctx: context of the data (CPU, GPU0, GPU1, etc.) :return: converted output data """ if isinstance(data, list): data = [to_mx_ndarray(d, ctx=ctx) for d in data] elif isinstance(data, tuple): data = tuple(to_mx_ndarray(d, ctx=ctx) for d in data) elif isinstance(data, np.ndarray): data = nd.array(data, ctx=ctx) elif isinstance(data, NDArray): assert data.context == ctx pass elif isinstance(data, int) or isinstance(data, float): data = nd.array([data], ctx=ctx) else: raise TypeError('Unsupported data type: {}'.format(type(data))) return data def asnumpy_or_asscalar(data: Union[NDArray, list, tuple]) -> Union[np.ndarray, np.number, list, tuple]: """ Convert NDArray (or list or tuple of NDArray) to numpy. If shape is (1,), then convert to scalar instead. NOTE: This behavior is consistent with tensorflow :param data: NDArray or list or tuple of NDArray :return: data converted to numpy ndarray or to numpy scalar """ if isinstance(data, list): data = [asnumpy_or_asscalar(d) for d in data] elif isinstance(data, tuple): data = tuple(asnumpy_or_asscalar(d) for d in data) elif isinstance(data, NDArray): data = data.asscalar() if data.shape == (1,) else data.asnumpy() else: raise TypeError('Unsupported data type: {}'.format(type(data))) return data def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray: """ Calculate global norm on list or tuple of NDArrays using this formula: `global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))` :param arrays: list or tuple of parameters to calculate global norm on :return: single-value NDArray """ def _norm(array): if array.stype == 'default': x = array.reshape((-1,)) return nd.dot(x, x) return array.norm().square() total_norm = nd.add_n(*[_norm(arr) for arr in arrays]) total_norm = nd.sqrt(total_norm) return total_norm def split_outputs_per_head(outputs: Tuple[NDArray], heads: list) -> List[List[NDArray]]: """ Split outputs into outputs per head :param outputs: list of all outputs :param heads: list of all heads :return: list of outputs for each head """ head_outputs = [] for h in heads: head_outputs.append(list(outputs[:h.num_outputs])) outputs = outputs[h.num_outputs:] assert len(outputs) == 0 return head_outputs def split_targets_per_loss(targets: list, losses: list) -> List[list]: """ Splits targets into targets per loss :param targets: list of all targets (typically numpy ndarray) :param losses: list of all losses :return: list of targets for each loss """ loss_targets = list() for l in losses: loss_data_len = len(l.input_schema.targets) assert len(targets) >= loss_data_len, "Data length doesn't match schema" loss_targets.append(targets[:loss_data_len]) targets = targets[loss_data_len:] assert len(targets) == 0 return loss_targets def get_loss_agent_inputs(inputs: Dict[str, np.ndarray], head_type_idx: int, loss: Any) -> List[np.ndarray]: """ Collects all inputs with prefix 'output_<head_idx>_' and matches them against agent_inputs in loss input schema. :param inputs: list of all agent inputs :param head_type_idx: head-type index of the corresponding head :param loss: corresponding loss :return: list of agent inputs for this loss. This list matches the length in loss input schema. """ loss_inputs = list() for k in sorted(inputs.keys()): if k.startswith('output_{}_'.format(head_type_idx)): loss_inputs.append(inputs[k]) # Enforce that number of inputs for head_type are the same as agent_inputs specified by loss input_schema assert len(loss_inputs) == len(loss.input_schema.agent_inputs), "agent_input length doesn't match schema" return loss_inputs def align_loss_args( head_outputs: List[NDArray], agent_inputs: List[np.ndarray], targets: List[np.ndarray], loss: Any) -> List[np.ndarray]: """ Creates a list of arguments from head_outputs, agent_inputs, and targets aligned with parameters of loss.loss_forward() based on their name in loss input_schema :param head_outputs: list of all head_outputs for this loss :param agent_inputs: list of all agent_inputs for this loss :param targets: list of all targets for this loss :param loss: corresponding loss :return: list of arguments in correct order to be passed to loss """ arg_list = list() schema = loss.input_schema assert len(schema.head_outputs) == len(head_outputs) assert len(schema.agent_inputs) == len(agent_inputs) assert len(schema.targets) == len(targets) prev_found = True for arg_name in inspect.getfullargspec(loss.loss_forward).args[2:]: # First two args are self and F found = False for schema_list, data in [(schema.head_outputs, head_outputs), (schema.agent_inputs, agent_inputs), (schema.targets, targets)]: try: arg_list.append(data[schema_list.index(arg_name)]) found = True break except ValueError: continue assert not found or prev_found, "missing arguments detected!" prev_found = found return arg_list def to_tuple(data: Union[tuple, list, Any]): """ If input is list, it is converted to tuple. If it's tuple, it is returned untouched. Otherwise returns a single-element tuple of the data. :return: tuple-ified data """ if isinstance(data, tuple): pass elif isinstance(data, list): data = tuple(data) else: data = (data,) return data def to_list(data: Union[tuple, list, Any]): """ If input is tuple, it is converted to list. If it's list, it is returned untouched. Otherwise returns a single-element list of the data. :return: list-ified data """ if isinstance(data, list): pass elif isinstance(data, tuple): data = list(data) else: data = [data] return data def loss_output_dict(output: List[NDArray], schema: List[str]) -> Dict[str, List[NDArray]]: """ Creates a dictionary for loss output based on the output schema. If two output values have the same type string in the schema they are concatenated in the same dicrionary item. :param output: list of output values :param schema: list of type-strings for output values :return: dictionary of keyword to list of NDArrays """ assert len(output) == len(schema) output_dict = dict() for name, val in zip(schema, output): if name in output_dict: output_dict[name].append(val) else: output_dict[name] = [val] return output_dict def clip_grad( grads: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]], clip_method: GradientClippingMethod, clip_val: float, inplace=True) -> List[NDArray]: """ Clip gradient values inplace :param grads: gradients to be clipped :param clip_method: clipping method :param clip_val: clipping value. Interpreted differently depending on clipping method. :param inplace: modify grads if True, otherwise create NDArrays :return: clipped gradients """ output = list(grads) if inplace else list(nd.empty(g.shape) for g in grads) if clip_method == GradientClippingMethod.ClipByGlobalNorm: norm_unclipped_grads = global_norm(grads) scale = clip_val / (norm_unclipped_grads.asscalar() + 1e-8) # todo: use branching operators? if scale < 1.0: for g, o in zip(grads, output): nd.broadcast_mul(g, nd.array([scale]), out=o) elif clip_method == GradientClippingMethod.ClipByValue: for g, o in zip(grads, output): g.clip(-clip_val, clip_val, out=o) elif clip_method == GradientClippingMethod.ClipByNorm: for g, o in zip(grads, output): nd.broadcast_mul(g, nd.minimum(1.0, clip_val / (g.norm() + 1e-8)), out=o) else: raise KeyError('Unsupported gradient clipping method') return output def hybrid_clip(F: ModuleType, x: nd_sym_type, clip_lower: nd_sym_type, clip_upper: nd_sym_type) -> nd_sym_type: """ Apply clipping to input x between clip_lower and clip_upper. Added because F.clip doesn't support clipping bounds that are mx.nd.NDArray or mx.sym.Symbol. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: input data :param clip_lower: lower bound used for clipping, should be of shape (1,) :param clip_upper: upper bound used for clipping, should be of shape (1,) :return: clipped data """ x_clip_lower = broadcast_like(F, clip_lower, x) x_clip_upper = broadcast_like(F, clip_upper, x) x_clipped = F.minimum(F.maximum(x, x_clip_lower), x_clip_upper) return x_clipped def broadcast_like(F: ModuleType, x: nd_sym_type, y: nd_sym_type) -> nd_sym_type: """ Implementation of broadcast_like using broadcast_add and broadcast_mul because ONNX doesn't support broadcast_like. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: input to be broadcast :param y: tensor to broadcast x like :return: broadcast x """ return F.broadcast_mul(x, (y * 0) + 1) def get_mxnet_activation_name(activation_name: str): """ Convert coach activation name to mxnet specific activation name :param activation_name: name of the activation inc coach :return: name of the activation in mxnet """ activation_functions = { 'relu': 'relu', 'tanh': 'tanh', 'sigmoid': 'sigmoid', # FIXME Add other activations # 'elu': tf.nn.elu, 'selu': 'softrelu', # 'leaky_relu': tf.nn.leaky_relu, 'none': None } assert activation_name in activation_functions, \ "Activation function must be one of the following {}. instead it was: {}".format( activation_functions.keys(), activation_name) return activation_functions[activation_name] class OnnxHandlerBlock(object): """ Helper base class for gluon blocks that must behave differently for ONNX export forward pass """ def __init__(self): self._onnx = False def enable_onnx(self): self._onnx = True def disable_onnx(self): self._onnx = False class ScopedOnnxEnable(object): """ Helper scoped ONNX enable class """ def __init__(self, net: gluon.HybridBlock): self._onnx_handlers = self._get_onnx_handlers(net) def __enter__(self): for b in self._onnx_handlers: b.enable_onnx() def __exit__(self, exc_type, exc_val, exc_tb): for b in self._onnx_handlers: b.disable_onnx() @staticmethod def _get_onnx_handlers(block: gluon.HybridBlock) -> List[OnnxHandlerBlock]: """ Iterates through all child blocks and return all of them that are instance of OnnxHandlerBlock :return: list of OnnxHandlerBlock child blocks """ handlers = list() if isinstance(block, OnnxHandlerBlock): handlers.append(block) for child_block in block._children.values(): handlers += ScopedOnnxEnable._get_onnx_handlers(child_block) return handlers
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/utils.py
0.924628
0.547827
utils.py
pypi
from typing import Union from types import ModuleType import mxnet as mx from mxnet.gluon import rnn from rl_coach.architectures.mxnet_components.layers import Dense from rl_coach.architectures.mxnet_components.middlewares.middleware import Middleware from rl_coach.architectures.middleware_parameters import LSTMMiddlewareParameters from rl_coach.base_parameters import MiddlewareScheme nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class LSTMMiddleware(Middleware): def __init__(self, params: LSTMMiddlewareParameters): """ LSTMMiddleware or Long Short Term Memory Middleware can be used in the middle part of the network. It takes the embeddings from the input embedders, after they were aggregated in some method (for example, concatenation) and passes it through a neural network which can be customizable but shared between the heads of the network. :param params: parameters object containing batchnorm, activation_function, dropout and number_of_lstm_cells properties. """ super(LSTMMiddleware, self).__init__(params) self.number_of_lstm_cells = params.number_of_lstm_cells with self.name_scope(): self.lstm = rnn.LSTM(hidden_size=self.number_of_lstm_cells) @property def schemes(self) -> dict: """ Schemes are the pre-defined network architectures of various depths and complexities that can be used for the Middleware. Are used to create Block when LSTMMiddleware is initialised, and are applied before the LSTM. :return: dictionary of schemes, with key of type MiddlewareScheme enum and value being list of mxnet.gluon.Block. """ return { MiddlewareScheme.Empty: [], # Use for PPO MiddlewareScheme.Shallow: [ Dense(units=64) ], # Use for DQN MiddlewareScheme.Medium: [ Dense(units=512) ], MiddlewareScheme.Deep: [ Dense(units=128), Dense(units=128), Dense(units=128) ] } def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type: """ Used for forward pass through LSTM middleware network. Applies dense layers from selected scheme before passing result to LSTM layer. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: state embedding, of shape (batch_size, in_channels). :return: state middleware embedding, where shape is (batch_size, channels). """ x_ntc = x.reshape(shape=(0, 0, -1)) emb_ntc = super(LSTMMiddleware, self).hybrid_forward(F, x_ntc, *args, **kwargs) emb_tnc = emb_ntc.transpose(axes=(1, 0, 2)) return self.lstm(emb_tnc)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/middlewares/lstm_middleware.py
0.948131
0.350477
lstm_middleware.py
pypi
from typing import Union from types import ModuleType import mxnet as mx from rl_coach.architectures.embedder_parameters import InputEmbedderParameters from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class TensorEmbedder(InputEmbedder): def __init__(self, params: InputEmbedderParameters): """ A tensor embedder is an input embedder that takes a tensor with arbitrary dimension and produces a vector embedding by passing it through a neural network. An example is video data or 3D image data (i.e. 4D tensors) or other type of data that is more than 1 dimension (i.e. not vector) but is not an image. NOTE: There are no pre-defined schemes for tensor embedder. User must define a custom scheme by passing a callable object as InputEmbedderParameters.scheme when defining the respective preset. This callable object must return a Gluon HybridBlock. The hybrid_forward() of this block must accept a single input, normalized observation, and return an embedding vector for each sample in the batch. Keep in mind that the scheme is a list of blocks, which are stacked by optional batchnorm, activation, and dropout in between as specified in InputEmbedderParameters. :param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function and dropout properties. """ super(TensorEmbedder, self).__init__(params) self.input_rescaling = params.input_rescaling['tensor'] self.input_offset = params.input_offset['tensor'] @property def schemes(self) -> dict: """ Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used to create Block when InputEmbedder is initialised. Note: Tensor embedder doesn't define any pre-defined scheme. User must provide custom scheme in preset. :return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block. For tensor embedder, this is an empty dictionary. """ return {} def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type: """ Used for forward pass through embedder network. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: image representing environment state, of shape (batch_size, in_channels, height, width). :return: embedding of environment state, of shape (batch_size, channels). """ return super(TensorEmbedder, self).hybrid_forward(F, x, *args, **kwargs)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/embedders/tensor_embedder.py
0.9504
0.507019
tensor_embedder.py
pypi
from typing import Union from types import ModuleType import mxnet as mx from mxnet import nd, sym from rl_coach.architectures.embedder_parameters import InputEmbedderParameters from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder from rl_coach.architectures.mxnet_components.layers import Dense from rl_coach.base_parameters import EmbedderScheme nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class VectorEmbedder(InputEmbedder): def __init__(self, params: InputEmbedderParameters): """ An vector embedder is an input embedder that takes an vector input from the state and produces a vector embedding by passing it through a neural network. :param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function and dropout properties. """ super(VectorEmbedder, self).__init__(params) self.input_rescaling = params.input_rescaling['vector'] self.input_offset = params.input_offset['vector'] @property def schemes(self): """ Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used to create Block when VectorEmbedder is initialised. :return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block. """ return { EmbedderScheme.Empty: [], EmbedderScheme.Shallow: [ Dense(units=128) ], # Use for DQN EmbedderScheme.Medium: [ Dense(units=256) ], # Use for Carla EmbedderScheme.Deep: [ Dense(units=128), Dense(units=128), Dense(units=128) ] } def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type: """ Used for forward pass through embedder network. :param F: backend api, either `nd` or `sym` (if block has been hybridized). :type F: nd or sym :param x: vector representing environment state, of shape (batch_size, in_channels). :return: embedding of environment state, of shape (batch_size, channels). """ if isinstance(x, nd.NDArray) and len(x.shape) != 2 and self.scheme != EmbedderScheme.Empty: raise ValueError("Vector embedders expect the input size to have 2 dimensions. The given size is: {}" .format(x.shape)) return super(VectorEmbedder, self).hybrid_forward(F, x, *args, **kwargs)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/embedders/vector_embedder.py
0.94388
0.418756
vector_embedder.py
pypi
from typing import Union from types import ModuleType import mxnet as mx from rl_coach.architectures.embedder_parameters import InputEmbedderParameters from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder from rl_coach.architectures.mxnet_components.layers import Conv2d from rl_coach.base_parameters import EmbedderScheme nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class ImageEmbedder(InputEmbedder): def __init__(self, params: InputEmbedderParameters): """ An image embedder is an input embedder that takes an image input from the state and produces a vector embedding by passing it through a neural network. :param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function and dropout properties. """ super(ImageEmbedder, self).__init__(params) self.input_rescaling = params.input_rescaling['image'] self.input_offset = params.input_offset['image'] @property def schemes(self) -> dict: """ Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used to create Block when ImageEmbedder is initialised. :return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block. """ return { EmbedderScheme.Empty: [], EmbedderScheme.Shallow: [ Conv2d(num_filters=32, kernel_size=8, strides=4) ], # Use for Atari DQN EmbedderScheme.Medium: [ Conv2d(num_filters=32, kernel_size=8, strides=4), Conv2d(num_filters=64, kernel_size=4, strides=2), Conv2d(num_filters=64, kernel_size=3, strides=1) ], # Use for Carla EmbedderScheme.Deep: [ Conv2d(num_filters=32, kernel_size=5, strides=2), Conv2d(num_filters=32, kernel_size=3, strides=1), Conv2d(num_filters=64, kernel_size=3, strides=2), Conv2d(num_filters=64, kernel_size=3, strides=1), Conv2d(num_filters=128, kernel_size=3, strides=2), Conv2d(num_filters=128, kernel_size=3, strides=1), Conv2d(num_filters=256, kernel_size=3, strides=2), Conv2d(num_filters=256, kernel_size=3, strides=1) ] } def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type: """ Used for forward pass through embedder network. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: image representing environment state, of shape (batch_size, in_channels, height, width). :return: embedding of environment state, of shape (batch_size, channels). """ # convert from NHWC to NCHW (default for MXNet Convolutions) x = x.transpose((0,3,1,2)) return super(ImageEmbedder, self).hybrid_forward(F, x, *args, **kwargs)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/embedders/image_embedder.py
0.96738
0.46308
image_embedder.py
pypi
from typing import Union, List, Tuple from types import ModuleType import mxnet as mx from mxnet.gluon.loss import Loss, HuberLoss, L2Loss from mxnet.gluon import nn from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\ NormalizedRSSInitializer from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import VStateValue from rl_coach.spaces import SpacesDefinition nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class VHeadLoss(HeadLoss): def __init__(self, loss_type: Loss=L2Loss, weight: float=1, batch_axis: int=0) -> None: """ Loss for Value Head. :param loss_type: loss function with default of mean squared error (i.e. L2Loss). :param weight: scalar used to adjust relative weight of loss (if using this loss with others). :param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation. """ super(VHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis) with self.name_scope(): self.loss_fn = loss_type(weight=weight, batch_axis=batch_axis) @property def input_schema(self) -> LossInputSchema: return LossInputSchema( head_outputs=['pred'], agent_inputs=[], targets=['target'] ) def loss_forward(self, F: ModuleType, pred: nd_sym_type, target: nd_sym_type) -> List[Tuple[nd_sym_type, str]]: """ Used for forward pass through loss computations. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param pred: state values predicted by VHead network, of shape (batch_size). :param target: actual state values, of shape (batch_size). :return: loss, of shape (batch_size). """ loss = self.loss_fn(pred, target).mean() return [(loss, LOSS_OUT_TYPE_LOSS)] class VHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool=True, activation_function: str='relu', dense_layer: None=None, loss_type: Union[HuberLoss, L2Loss]=L2Loss): """ Value Head for predicting state values. :param agent_parameters: containing algorithm parameters, but currently unused. :param spaces: containing action spaces, but currently unused. :param network_name: name of head network. currently unused. :param head_type_idx: index of head network. currently unused. :param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others). :param is_local: flag to denote if network is local. currently unused. :param activation_function: activation function to use between layers. currently unused. :param dense_layer: type of dense layer to use in network. currently unused. :param loss_type: loss function with default of mean squared error (i.e. L2Loss), or alternatively HuberLoss. """ super(VHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local, activation_function, dense_layer) assert (loss_type == L2Loss) or (loss_type == HuberLoss), "Only expecting L2Loss or HuberLoss." self.loss_type = loss_type self.return_type = VStateValue with self.name_scope(): self.dense = nn.Dense(units=1, weight_initializer=NormalizedRSSInitializer(1.0)) def loss(self) -> Loss: """ Specifies loss block to be used for specific value head implementation. :return: loss block (can be called as function) for outputs returned by the head network. """ return VHeadLoss(loss_type=self.loss_type, weight=self.loss_weight) def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type: """ Used for forward pass through value head network. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: middleware state representation, of shape (batch_size, in_channels). :return: final output of value network, of shape (batch_size). """ return self.dense(x).squeeze(axis=1)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/heads/v_head.py
0.976389
0.445288
v_head.py
pypi
from typing import Dict, List, Union, Tuple import mxnet as mx from mxnet.initializer import Initializer, register from mxnet.gluon import nn, loss from mxnet.ndarray import NDArray from mxnet.symbol import Symbol from rl_coach.base_parameters import AgentParameters from rl_coach.spaces import SpacesDefinition LOSS_OUT_TYPE_LOSS = 'loss' LOSS_OUT_TYPE_REGULARIZATION = 'regularization' @register class NormalizedRSSInitializer(Initializer): """ Standardizes Root Sum of Squares along the input channel dimension. Used for Dense layer weight matrices only (ie. do not use on Convolution kernels). MXNet Dense layer weight matrix is of shape (out_ch, in_ch), so standardize across axis 1. Root Sum of Squares set to `rss`, which is 1.0 by default. Called `normalized_columns_initializer` in TensorFlow backend (but we work with rows instead of columns for MXNet). """ def __init__(self, rss=1.0): super(NormalizedRSSInitializer, self).__init__(rss=rss) self.rss = float(rss) def _init_weight(self, name, arr): mx.nd.random.normal(0, 1, out=arr) sample_rss = arr.square().sum(axis=1).sqrt() scalers = self.rss / sample_rss arr *= scalers.expand_dims(1) class LossInputSchema(object): """ Helper class to contain schema for loss hybrid_forward input """ def __init__(self, head_outputs: List[str], agent_inputs: List[str], targets: List[str]): """ :param head_outputs: list of argument names in hybrid_forward that are outputs of the head. The order and number MUST MATCH the output from the head. :param agent_inputs: list of argument names in hybrid_forward that are inputs from the agent. The order and number MUST MATCH `output_<head_type_idx>_<order>` for this head. :param targets: list of argument names in hybrid_forward that are targets for the loss. The order and number MUST MATCH targets passed from the agent. """ self._head_outputs = head_outputs self._agent_inputs = agent_inputs self._targets = targets @property def head_outputs(self): return self._head_outputs @property def agent_inputs(self): return self._agent_inputs @property def targets(self): return self._targets class HeadLoss(loss.Loss): """ ABC for loss functions of each head. Child class must implement input_schema() and loss_forward() """ def __init__(self, *args, **kwargs): super(HeadLoss, self).__init__(*args, **kwargs) self._output_schema = None # type: List[str] @property def input_schema(self) -> LossInputSchema: """ :return: schema for input of hybrid_forward. Read docstring for LossInputSchema for details. """ raise NotImplementedError @property def output_schema(self) -> List[str]: """ :return: schema for output of hybrid_forward. Must contain 'loss' and 'regularization' keys at least once. The order and total number must match that of returned values from the loss. 'loss' and 'regularization' are special keys. Any other string is treated as auxiliary outputs and must include match auxiliary fetch names returned by the head. """ return self._output_schema def forward(self, *args): """ Override forward() so that number of outputs can be checked against the schema """ outputs = super(HeadLoss, self).forward(*args) if isinstance(outputs, tuple) or isinstance(outputs, list): num_outputs = len(outputs) else: assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol) num_outputs = 1 assert num_outputs == len(self.output_schema), "Number of outputs don't match schema ({} != {})".format( num_outputs, len(self.output_schema)) return outputs def _loss_output(self, outputs: List[Tuple[Union[NDArray, Symbol], str]]): """ Must be called on the output from hybrid_forward(). Saves the returned output as the schema and returns output values in a list :return: list of output values """ output_schema = [o[1] for o in outputs] assert self._output_schema is None or self._output_schema == output_schema self._output_schema = output_schema return tuple(o[0] for o in outputs) def hybrid_forward(self, F, x, *args, **kwargs): """ Passes the cal to loss_forward() and constructs output schema from its output by calling loss_output() """ return self._loss_output(self.loss_forward(F, x, *args, **kwargs)) def loss_forward(self, F, x, *args, **kwargs) -> List[Tuple[Union[NDArray, Symbol], str]]: """ Similar to hybrid_forward, but returns list of (NDArray, type_str) """ raise NotImplementedError class Head(nn.HybridBlock): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool=True, activation_function: str='relu', dense_layer: None=None): """ A head is the final part of the network. It takes the embedding from the middleware embedder and passes it through a neural network to produce the output of the network. There can be multiple heads in a network, and each one has an assigned loss function. The heads are algorithm dependent. :param agent_parameters: containing algorithm parameters such as clip_likelihood_ratio_using_epsilon and beta_entropy. :param spaces: containing action spaces used for defining size of network output. :param network_name: name of head network. currently unused. :param head_type_idx: index of head network. currently unused. :param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others). :param is_local: flag to denote if network is local. currently unused. :param activation_function: activation function to use between layers. currently unused. :param dense_layer: type of dense layer to use in network. currently unused. """ super(Head, self).__init__() self.head_type_idx = head_type_idx self.network_name = network_name self.loss_weight = loss_weight self.is_local = is_local self.ap = agent_parameters self.spaces = spaces self.return_type = None self.activation_function = activation_function self.dense_layer = dense_layer self._num_outputs = None def loss(self) -> HeadLoss: """ Returns loss block to be used for specific head implementation. :return: loss block (can be called as function) for outputs returned by the head network. """ raise NotImplementedError() @property def num_outputs(self): """ Returns number of outputs that forward() call will return :return: """ assert self._num_outputs is not None, 'must call forward() once to configure number of outputs' return self._num_outputs def forward(self, *args): """ Override forward() so that number of outputs can be automatically set """ outputs = super(Head, self).forward(*args) if isinstance(outputs, tuple): num_outputs = len(outputs) else: assert isinstance(outputs, NDArray) or isinstance(outputs, Symbol) num_outputs = 1 if self._num_outputs is None: self._num_outputs = num_outputs else: assert self._num_outputs == num_outputs, 'Number of outputs cannot change ({} != {})'.format( self._num_outputs, num_outputs) assert self._num_outputs == len(self.loss().input_schema.head_outputs) return outputs def hybrid_forward(self, F, x, *args, **kwargs): """ Used for forward pass through head network. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: middleware state representation, of shape (batch_size, in_channels). :return: final output of network, that will be used in loss calculations. """ raise NotImplementedError()
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/heads/head.py
0.960915
0.573081
head.py
pypi
from typing import List, Tuple, Union from types import ModuleType import mxnet as mx from mxnet.gluon import nn from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\ NormalizedRSSInitializer from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import ActionProbabilities from rl_coach.spaces import SpacesDefinition nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class PPOVHeadLoss(HeadLoss): def __init__(self, clip_likelihood_ratio_using_epsilon: float, weight: float=1, batch_axis: int=0) -> None: """ Loss for PPO Value network. Schulman implemented this extension in OpenAI baselines for PPO2 See https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py#L72 :param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping. :param weight: scalar used to adjust relative weight of loss (if using this loss with others). :param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation. """ super(PPOVHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis) self.weight = weight self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon @property def input_schema(self) -> LossInputSchema: return LossInputSchema( head_outputs=['new_policy_values'], agent_inputs=['old_policy_values'], targets=['target_values'] ) def loss_forward(self, F: ModuleType, new_policy_values: nd_sym_type, old_policy_values: nd_sym_type, target_values: nd_sym_type) -> List[Tuple[nd_sym_type, str]]: """ Used for forward pass through loss computations. Calculates two losses (L2 and a clipped difference L2 loss) and takes the maximum of the two. Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step, new_policy_values, old_policy_values and target_values all must include a time_step dimension. :param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized). :param new_policy_values: values predicted by PPOVHead network, of shape (batch_size) or of shape (batch_size, time_step). :param old_policy_values: values predicted by old value network, of shape (batch_size) or of shape (batch_size, time_step). :param target_values: actual state values, of shape (batch_size) or of shape (batch_size, time_step). :return: loss, of shape (batch_size). """ # L2 loss value_loss_1 = (new_policy_values - target_values).square() # Clipped difference L2 loss diff = new_policy_values - old_policy_values clipped_diff = diff.clip(a_min=-self.clip_likelihood_ratio_using_epsilon, a_max=self.clip_likelihood_ratio_using_epsilon) value_loss_2 = (old_policy_values + clipped_diff - target_values).square() # Maximum of the two losses, element-wise maximum. value_loss_max = mx.nd.stack(value_loss_1, value_loss_2).max(axis=0) # Aggregate over temporal axis, adding if doesn't exist (hense reshape) value_loss_max_w_time = value_loss_max.reshape(shape=(0, -1)) value_loss = value_loss_max_w_time.mean(axis=1) # Weight the loss (and average over samples of batch) value_loss_weighted = value_loss.mean() * self.weight return [(value_loss_weighted, LOSS_OUT_TYPE_LOSS)] class PPOVHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool = True, activation_function: str='relu', dense_layer: None=None) -> None: """ PPO Value Head for predicting state values. :param agent_parameters: containing algorithm parameters, but currently unused. :param spaces: containing action spaces, but currently unused. :param network_name: name of head network. currently unused. :param head_type_idx: index of head network. currently unused. :param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others). :param is_local: flag to denote if network is local. currently unused. :param activation_function: activation function to use between layers. currently unused. :param dense_layer: type of dense layer to use in network. currently unused. """ super(PPOVHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon self.return_type = ActionProbabilities with self.name_scope(): self.dense = nn.Dense(units=1, weight_initializer=NormalizedRSSInitializer(1.0)) def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type: """ Used for forward pass through value head network. :param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized). :param x: middleware state representation, of shape (batch_size, in_channels). :return: final value output of network, of shape (batch_size). """ return self.dense(x).squeeze(axis=1) def loss(self) -> mx.gluon.loss.Loss: """ Specifies loss block to be used for specific value head implementation. :return: loss block (can be called as function) for outputs returned by the value head network. """ return PPOVHeadLoss(self.clip_likelihood_ratio_using_epsilon, weight=self.loss_weight)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/heads/ppo_v_head.py
0.971252
0.489015
ppo_v_head.py
pypi
from typing import Union, List, Tuple from types import ModuleType import mxnet as mx from mxnet.gluon.loss import Loss, HuberLoss, L2Loss from mxnet.gluon import nn from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import QActionStateValue from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol] class QHeadLoss(HeadLoss): def __init__(self, loss_type: Loss=L2Loss, weight: float=1, batch_axis: int=0) -> None: """ Loss for Q-Value Head. :param loss_type: loss function with default of mean squared error (i.e. L2Loss). :param weight: scalar used to adjust relative weight of loss (if using this loss with others). :param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation. """ super(QHeadLoss, self).__init__(weight=weight, batch_axis=batch_axis) with self.name_scope(): self.loss_fn = loss_type(weight=weight, batch_axis=batch_axis) @property def input_schema(self) -> LossInputSchema: return LossInputSchema( head_outputs=['pred'], agent_inputs=[], targets=['target'] ) def loss_forward(self, F: ModuleType, pred: nd_sym_type, target: nd_sym_type) -> List[Tuple[nd_sym_type, str]]: """ Used for forward pass through loss computations. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param pred: state-action q-values predicted by QHead network, of shape (batch_size, num_actions). :param target: actual state-action q-values, of shape (batch_size, num_actions). :return: loss, of shape (batch_size). """ loss = self.loss_fn(pred, target).mean() return [(loss, LOSS_OUT_TYPE_LOSS)] class QHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_type_idx: int=0, loss_weight: float=1., is_local: bool=True, activation_function: str='relu', dense_layer: None=None, loss_type: Union[HuberLoss, L2Loss]=L2Loss) -> None: """ Q-Value Head for predicting state-action Q-Values. :param agent_parameters: containing algorithm parameters, but currently unused. :param spaces: containing action spaces used for defining size of network output. :param network_name: name of head network. currently unused. :param head_type_idx: index of head network. currently unused. :param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others). :param is_local: flag to denote if network is local. currently unused. :param activation_function: activation function to use between layers. currently unused. :param dense_layer: type of dense layer to use in network. currently unused. :param loss_type: loss function to use. """ super(QHead, self).__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local, activation_function, dense_layer) if isinstance(self.spaces.action, BoxActionSpace): self.num_actions = 1 elif isinstance(self.spaces.action, DiscreteActionSpace): self.num_actions = len(self.spaces.action.actions) self.return_type = QActionStateValue assert (loss_type == L2Loss) or (loss_type == HuberLoss), "Only expecting L2Loss or HuberLoss." self.loss_type = loss_type with self.name_scope(): self.dense = nn.Dense(units=self.num_actions) def loss(self) -> Loss: """ Specifies loss block to be used for specific value head implementation. :return: loss block (can be called as function) for outputs returned by the head network. """ return QHeadLoss(loss_type=self.loss_type, weight=self.loss_weight) def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type: """ Used for forward pass through Q-Value head network. :param F: backend api, either `mxnet.nd` or `mxnet.sym` (if block has been hybridized). :param x: middleware state representation, of shape (batch_size, in_channels). :return: predicted state-action q-values, of shape (batch_size, num_actions). """ return self.dense(x)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/mxnet_components/heads/q_head.py
0.974893
0.45308
q_head.py
pypi
from typing import Tuple import tensorflow as tf def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec: """ Creates a ClusterSpec object representing the cluster. :param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned :param workers: comma-separated list of hostname:port pairs to which the workers are assigned :return: a ClusterSpec object representing the cluster """ # extract the parameter servers and workers from the given strings ps_hosts = parameters_server.split(",") worker_hosts = workers.split(",") # Create a cluster spec from the parameter server and worker hosts cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts}) return cluster_spec def create_and_start_parameters_server(cluster_spec: tf.train.ClusterSpec, config: tf.ConfigProto=None) -> None: """ Create and start a parameter server :param cluster_spec: the ClusterSpec object representing the cluster :param config: the tensorflow config to use :return: None """ # create a server object for the parameter server server = tf.train.Server(cluster_spec, job_name="ps", task_index=0, config=config) # wait for the server to finish server.join() def create_worker_server_and_device(cluster_spec: tf.train.ClusterSpec, task_index: int, use_cpu: bool=True, config: tf.ConfigProto=None) -> Tuple[str, tf.device]: """ Creates a worker server and a device setter used to assign the workers operations to :param cluster_spec: a ClusterSpec object representing the cluster :param task_index: the index of the worker task :param use_cpu: if use_cpu=True, all the agent operations will be assigned to a CPU instead of a GPU :param config: the tensorflow config to use :return: the target string for the tf.Session and the worker device setter object """ # Create and start a worker server = tf.train.Server(cluster_spec, job_name="worker", task_index=task_index, config=config) # Assign ops to the local worker worker_device = "/job:worker/task:{}".format(task_index) if use_cpu: worker_device += "/cpu:0" else: worker_device += "/device:GPU:0" device = tf.train.replica_device_setter(worker_device=worker_device, cluster=cluster_spec) return server.target, device def create_monitored_session(target: tf.train.Server, task_index: int, checkpoint_dir: str, checkpoint_save_secs: int, config: tf.ConfigProto=None) -> tf.Session: """ Create a monitored session for the worker :param target: the target string for the tf.Session :param task_index: the task index of the worker :param checkpoint_dir: a directory path where the checkpoints will be stored :param checkpoint_save_secs: number of seconds between checkpoints storing :param config: the tensorflow configuration (optional) :return: the session to use for the run """ # we chose the first task to be the chief is_chief = task_index == 0 # Create the monitored session sess = tf.train.MonitoredTrainingSession( master=target, is_chief=is_chief, hooks=[], checkpoint_dir=checkpoint_dir, save_checkpoint_secs=checkpoint_save_secs, config=config, log_step_count_steps=0 # disable logging of steps to avoid TF warning during inference ) return sess
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/distributed_tf_utils.py
0.93739
0.62134
distributed_tf_utils.py
pypi
import math from types import FunctionType import tensorflow as tf from rl_coach.architectures import layers from rl_coach.architectures.tensorflow_components import utils def batchnorm_activation_dropout(input_layer, batchnorm, activation_function, dropout_rate, is_training, name): layers = [input_layer] # Rationale: passing a bool here will mean that batchnorm and or activation will never activate assert not isinstance(is_training, bool) # batchnorm if batchnorm: layers.append( tf.layers.batch_normalization(layers[-1], name="{}_batchnorm".format(name), training=is_training) ) # activation if activation_function: if isinstance(activation_function, str): activation_function = utils.get_activation_function(activation_function) layers.append( activation_function(layers[-1], name="{}_activation".format(name)) ) # dropout if dropout_rate > 0: layers.append( tf.layers.dropout(layers[-1], dropout_rate, name="{}_dropout".format(name), training=is_training) ) # remove the input layer from the layers list del layers[0] return layers # define global dictionary for storing layer type to layer implementation mapping tf_layer_dict = dict() tf_layer_class_dict = dict() def reg_to_tf_instance(layer_type) -> FunctionType: """ function decorator that registers layer implementation :return: decorated function """ def reg_impl_decorator(func): assert layer_type not in tf_layer_dict tf_layer_dict[layer_type] = func return func return reg_impl_decorator def reg_to_tf_class(layer_type) -> FunctionType: """ function decorator that registers layer type :return: decorated function """ def reg_impl_decorator(func): assert layer_type not in tf_layer_class_dict tf_layer_class_dict[layer_type] = func return func return reg_impl_decorator def convert_layer(layer): """ If layer instance is callable (meaning this is already a concrete TF class), return layer, otherwise convert to TF type :param layer: layer to be converted :return: converted layer if not callable, otherwise layer itself """ if callable(layer): return layer return tf_layer_dict[type(layer)](layer) def convert_layer_class(layer_class): """ If layer instance is callable, return layer, otherwise convert to TF type :param layer: layer to be converted :return: converted layer if not callable, otherwise layer itself """ if hasattr(layer_class, 'to_tf_instance'): return layer_class else: return tf_layer_class_dict[layer_class]() class Conv2d(layers.Conv2d): def __init__(self, num_filters: int, kernel_size: int, strides: int): super(Conv2d, self).__init__(num_filters=num_filters, kernel_size=kernel_size, strides=strides) def __call__(self, input_layer, name: str=None, is_training=None): """ returns a tensorflow conv2d layer :param input_layer: previous layer :param name: layer name :return: conv2d layer """ return tf.layers.conv2d(input_layer, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.strides, data_format='channels_last', name=name) @staticmethod @reg_to_tf_instance(layers.Conv2d) def to_tf_instance(base: layers.Conv2d): return Conv2d( num_filters=base.num_filters, kernel_size=base.kernel_size, strides=base.strides) @staticmethod @reg_to_tf_class(layers.Conv2d) def to_tf_class(): return Conv2d class BatchnormActivationDropout(layers.BatchnormActivationDropout): def __init__(self, batchnorm: bool=False, activation_function=None, dropout_rate: float=0): super(BatchnormActivationDropout, self).__init__( batchnorm=batchnorm, activation_function=activation_function, dropout_rate=dropout_rate) def __call__(self, input_layer, name: str=None, is_training=None): """ returns a list of tensorflow batchnorm, activation and dropout layers :param input_layer: previous layer :param name: layer name :return: batchnorm, activation and dropout layers """ return batchnorm_activation_dropout(input_layer, batchnorm=self.batchnorm, activation_function=self.activation_function, dropout_rate=self.dropout_rate, is_training=is_training, name=name) @staticmethod @reg_to_tf_instance(layers.BatchnormActivationDropout) def to_tf_instance(base: layers.BatchnormActivationDropout): return BatchnormActivationDropout, BatchnormActivationDropout( batchnorm=base.batchnorm, activation_function=base.activation_function, dropout_rate=base.dropout_rate) @staticmethod @reg_to_tf_class(layers.BatchnormActivationDropout) def to_tf_class(): return BatchnormActivationDropout class Dense(layers.Dense): def __init__(self, units: int): super(Dense, self).__init__(units=units) def __call__(self, input_layer, name: str=None, kernel_initializer=None, bias_initializer=None, activation=None, is_training=None): """ returns a tensorflow dense layer :param input_layer: previous layer :param name: layer name :return: dense layer """ if bias_initializer is None: bias_initializer = tf.zeros_initializer() return tf.layers.dense(input_layer, self.units, name=name, kernel_initializer=kernel_initializer, activation=activation, bias_initializer=bias_initializer) @staticmethod @reg_to_tf_instance(layers.Dense) def to_tf_instance(base: layers.Dense): return Dense(units=base.units) @staticmethod @reg_to_tf_class(layers.Dense) def to_tf_class(): return Dense class NoisyNetDense(layers.NoisyNetDense): """ A factorized Noisy Net layer https://arxiv.org/abs/1706.10295. """ def __init__(self, units: int): super(NoisyNetDense, self).__init__(units=units) def __call__(self, input_layer, name: str, kernel_initializer=None, activation=None, is_training=None, bias_initializer=None): """ returns a NoisyNet dense layer :param input_layer: previous layer :param name: layer name :param kernel_initializer: initializer for kernels. Default is to use Gaussian noise that preserves stddev. :param activation: the activation function :return: dense layer """ #TODO: noise sampling should be externally controlled. DQN is fine with sampling noise for every # forward (either act or train, both for online and target networks). # A3C, on the other hand, should sample noise only when policy changes (i.e. after every t_max steps) def _f(values): return tf.sqrt(tf.abs(values)) * tf.sign(values) def _factorized_noise(inputs, outputs): # TODO: use factorized noise only for compute intensive algos (e.g. DQN). # lighter algos (e.g. DQN) should not use it noise1 = _f(tf.random_normal((inputs, 1))) noise2 = _f(tf.random_normal((1, outputs))) return tf.matmul(noise1, noise2) num_inputs = input_layer.get_shape()[-1].value num_outputs = self.units stddev = 1 / math.sqrt(num_inputs) activation = activation if activation is not None else (lambda x: x) if kernel_initializer is None: kernel_mean_initializer = tf.random_uniform_initializer(-stddev, stddev) kernel_stddev_initializer = tf.random_uniform_initializer(-stddev * self.sigma0, stddev * self.sigma0) else: kernel_mean_initializer = kernel_stddev_initializer = kernel_initializer if bias_initializer is None: bias_initializer = tf.zeros_initializer() with tf.variable_scope(None, default_name=name): weight_mean = tf.get_variable('weight_mean', shape=(num_inputs, num_outputs), initializer=kernel_mean_initializer) bias_mean = tf.get_variable('bias_mean', shape=(num_outputs,), initializer=bias_initializer) weight_stddev = tf.get_variable('weight_stddev', shape=(num_inputs, num_outputs), initializer=kernel_stddev_initializer) bias_stddev = tf.get_variable('bias_stddev', shape=(num_outputs,), initializer=kernel_stddev_initializer) bias_noise = _f(tf.random_normal((num_outputs,))) weight_noise = _factorized_noise(num_inputs, num_outputs) bias = bias_mean + bias_stddev * bias_noise weight = weight_mean + weight_stddev * weight_noise return activation(tf.matmul(input_layer, weight) + bias) @staticmethod @reg_to_tf_instance(layers.NoisyNetDense) def to_tf_instance(base: layers.NoisyNetDense): return NoisyNetDense(units=base.units) @staticmethod @reg_to_tf_class(layers.NoisyNetDense) def to_tf_class(): return NoisyNetDense
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/layers.py
0.843315
0.475605
layers.py
pypi
from typing import Union, List import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.middlewares.middleware import Middleware from rl_coach.base_parameters import MiddlewareScheme from rl_coach.core_types import Middleware_FC_Embedding from rl_coach.utils import force_list class FCMiddleware(Middleware): def __init__(self, activation_function=tf.nn.relu, scheme: MiddlewareScheme = MiddlewareScheme.Medium, batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_fc_embedder", dense_layer=Dense, is_training=False, num_streams: int = 1): super().__init__(activation_function=activation_function, batchnorm=batchnorm, dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training) self.return_type = Middleware_FC_Embedding assert(isinstance(num_streams, int) and num_streams >= 1) self.num_streams = num_streams def _build_module(self): self.output = [] for stream_idx in range(self.num_streams): layers = [self.input] for idx, layer_params in enumerate(self.layers_params): layers.extend(force_list( layer_params(layers[-1], name='{}_{}'.format(layer_params.__class__.__name__, idx + stream_idx * len(self.layers_params)), is_training=self.is_training) )) self.output.append((layers[-1])) @property def schemes(self): return { MiddlewareScheme.Empty: [], # ppo MiddlewareScheme.Shallow: [ self.dense_layer(64) ], # dqn MiddlewareScheme.Medium: [ self.dense_layer(512) ], MiddlewareScheme.Deep: \ [ self.dense_layer(128), self.dense_layer(128), self.dense_layer(128) ] } def __str__(self): stream = [str(l) for l in self.layers_params] if self.layers_params: if self.num_streams > 1: stream = [''] + ['\t' + l for l in stream] result = stream * self.num_streams result[0::len(stream)] = ['Stream {}'.format(i) for i in range(self.num_streams)] else: result = stream return '\n'.join(result) else: return 'No layers'
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/middlewares/fc_middleware.py
0.879406
0.262254
fc_middleware.py
pypi
import numpy as np import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.middlewares.middleware import Middleware from rl_coach.base_parameters import MiddlewareScheme from rl_coach.core_types import Middleware_LSTM_Embedding from rl_coach.utils import force_list class LSTMMiddleware(Middleware): def __init__(self, activation_function=tf.nn.relu, number_of_lstm_cells: int=256, scheme: MiddlewareScheme = MiddlewareScheme.Medium, batchnorm: bool = False, dropout_rate: float = 0.0, name="middleware_lstm_embedder", dense_layer=Dense, is_training=False): super().__init__(activation_function=activation_function, batchnorm=batchnorm, dropout_rate=dropout_rate, scheme=scheme, name=name, dense_layer=dense_layer, is_training=is_training) self.return_type = Middleware_LSTM_Embedding self.number_of_lstm_cells = number_of_lstm_cells self.layers = [] def _build_module(self): """ self.state_in: tuple of placeholders containing the initial state self.state_out: tuple of output state todo: it appears that the shape of the output is batch, feature the code here seems to be slicing off the first element in the batch which would definitely be wrong. need to double check the shape """ self.layers.append(self.input) # optionally insert some layers before the LSTM for idx, layer_params in enumerate(self.layers_params): self.layers.extend(force_list( layer_params(self.layers[-1], name='fc{}'.format(idx), is_training=self.is_training) )) # add the LSTM layer lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.number_of_lstm_cells, state_is_tuple=True) self.c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) self.h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) self.state_init = [self.c_init, self.h_init] self.c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) self.h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) self.state_in = (self.c_in, self.h_in) rnn_in = tf.expand_dims(self.layers[-1], [0]) step_size = tf.shape(self.layers[-1])[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(self.c_in, self.h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state self.state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.output = tf.reshape(lstm_outputs, [-1, self.number_of_lstm_cells]) @property def schemes(self): return { MiddlewareScheme.Empty: [], # ppo MiddlewareScheme.Shallow: [ self.dense_layer(64) ], # dqn MiddlewareScheme.Medium: [ self.dense_layer(512) ], MiddlewareScheme.Deep: \ [ self.dense_layer(128), self.dense_layer(128), self.dense_layer(128) ] }
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/middlewares/lstm_middleware.py
0.881793
0.242935
lstm_middleware.py
pypi
from typing import List import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedder from rl_coach.base_parameters import EmbedderScheme from rl_coach.core_types import InputTensorEmbedding class TensorEmbedder(InputEmbedder): """ A tensor embedder is an input embedder that takes a tensor with arbitrary dimension and produces a vector embedding by passing it through a neural network. An example is video data or 3D image data (i.e. 4D tensors) or other type of data that is more than 1 dimension (i.e. not vector) but is not an image. NOTE: There are no pre-defined schemes for tensor embedder. User must define a custom scheme by passing a callable object as InputEmbedderParameters.scheme when defining the respective preset. This callable object must accept a single input, the normalized observation, and return a Tensorflow symbol which will calculate an embedding vector for each sample in the batch. Keep in mind that the scheme is a list of Tensorflow symbols, which are stacked by optional batchnorm, activation, and dropout in between as specified in InputEmbedderParameters. """ def __init__(self, input_size: List[int], activation_function=tf.nn.relu, scheme: EmbedderScheme=None, batchnorm: bool=False, dropout_rate: float=0.0, name: str= "embedder", input_rescaling: float=1.0, input_offset: float=0.0, input_clipping=None, dense_layer=Dense, is_training=False): super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling, input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training) self.return_type = InputTensorEmbedding assert scheme is not None, "Custom scheme (a list of callables) must be specified for TensorEmbedder" @property def schemes(self): return {}
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/embedders/tensor_embedder.py
0.953177
0.591989
tensor_embedder.py
pypi
from typing import List import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedder from rl_coach.base_parameters import EmbedderScheme from rl_coach.core_types import InputImageEmbedding class ImageEmbedder(InputEmbedder): """ An input embedder that performs convolutions on the input and then flattens the result. The embedder is intended for image like inputs, where the channels are expected to be the last axis. The embedder also allows custom rescaling of the input prior to the neural network. """ def __init__(self, input_size: List[int], activation_function=tf.nn.relu, scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0, name: str= "embedder", input_rescaling: float=255.0, input_offset: float=0.0, input_clipping=None, dense_layer=Dense, is_training=False): super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling, input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training) self.return_type = InputImageEmbedding if len(input_size) != 3 and scheme != EmbedderScheme.Empty: raise ValueError("Image embedders expect the input size to have 3 dimensions. The given size is: {}" .format(input_size)) @property def schemes(self): return { EmbedderScheme.Empty: [], EmbedderScheme.Shallow: [ Conv2d(32, 3, 1) ], # atari dqn EmbedderScheme.Medium: [ Conv2d(32, 8, 4), Conv2d(64, 4, 2), Conv2d(64, 3, 1) ], # carla EmbedderScheme.Deep: \ [ Conv2d(32, 5, 2), Conv2d(32, 3, 1), Conv2d(64, 3, 2), Conv2d(64, 3, 1), Conv2d(128, 3, 2), Conv2d(128, 3, 1), Conv2d(256, 3, 2), Conv2d(256, 3, 1) ] }
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/embedders/image_embedder.py
0.947805
0.500183
image_embedder.py
pypi
import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.q_head import QHead from rl_coach.base_parameters import AgentParameters from rl_coach.spaces import SpacesDefinition class DuelingQHead(QHead): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', dense_layer=Dense): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.name = 'dueling_q_values_head' def _build_module(self, input_layer): # state value tower - V with tf.variable_scope("state_value"): self.state_value = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1') self.state_value = self.dense_layer(1)(self.state_value, name='fc2') # action advantage tower - A with tf.variable_scope("action_advantage"): self.action_advantage = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1') self.action_advantage = self.dense_layer(self.num_actions)(self.action_advantage, name='fc2') self.action_mean = tf.reduce_mean(self.action_advantage, axis=1, keepdims=True) self.action_advantage = self.action_advantage - self.action_mean # merge to state-action value function Q self.q_values = self.output = tf.add(self.state_value, self.action_advantage, name='output') # used in batch-rl to estimate a probablity distribution over actions self.softmax = self.add_softmax_with_temperature() def __str__(self): result = [ "State Value Stream - V", "\tDense (num outputs = 512)", "\tDense (num outputs = 1)", "Action Advantage Stream - A", "\tDense (num outputs = 512)", "\tDense (num outputs = {})".format(self.num_actions), "\tSubtract(A, Mean(A))".format(self.num_actions), "Add (V, A)" ] return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/dueling_q_head.py
0.820254
0.296024
dueling_q_head.py
pypi
import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.head import Head from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import ActionProbabilities from rl_coach.spaces import SpacesDefinition from rl_coach.utils import eps LOG_SIG_CAP_MAX = 2 LOG_SIG_CAP_MIN = -20 class SACPolicyHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', squash: bool = True, dense_layer=Dense): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.name = 'sac_policy_head' self.return_type = ActionProbabilities self.num_actions = self.spaces.action.shape # continuous actions self.squash = squash # squashing using tanh def _build_module(self, input_layer): self.given_raw_actions = tf.placeholder(tf.float32, [None, self.num_actions], name="actions") self.input = [self.given_raw_actions] self.output = [] # build the network self._build_continuous_net(input_layer, self.spaces.action) def _squash_correction(self,actions): ''' correct squash operation (in case of bounded actions) according to appendix C in the paper. NOTE : this correction assume the squash is done with tanh. :param actions: unbounded actions :return: the correction to be applied to the log_prob of the actions, assuming tanh squash ''' if not self.squash: return 0 return tf.reduce_sum(tf.log(1 - tf.tanh(actions) ** 2 + eps), axis=1) def _build_continuous_net(self, input_layer, action_space): num_actions = action_space.shape[0] self.policy_mu_and_logsig = self.dense_layer(2*num_actions)(input_layer, name='policy_mu_logsig') self.policy_mean = tf.identity(self.policy_mu_and_logsig[..., :num_actions], name='policy_mean') self.policy_log_std = tf.clip_by_value(self.policy_mu_and_logsig[..., num_actions:], LOG_SIG_CAP_MIN, LOG_SIG_CAP_MAX,name='policy_log_std') self.output.append(self.policy_mean) # output[0] self.output.append(self.policy_log_std) # output[1] # define the distributions for the policy # Tensorflow's multivariate normal distribution supports reparameterization tfd = tf.contrib.distributions self.policy_distribution = tfd.MultivariateNormalDiag(loc=self.policy_mean, scale_diag=tf.exp(self.policy_log_std)) # define network outputs # note that tensorflow supports reparametrization. # i.e. policy_action_sample is a tensor through which gradients can flow self.raw_actions = self.policy_distribution.sample() if self.squash: self.actions = tf.tanh(self.raw_actions) # correct log_prob in case of squash (see appendix C in the paper) squash_correction = self._squash_correction(self.raw_actions) else: self.actions = self.raw_actions squash_correction = 0 # policy_action_logprob is a tensor through which gradients can flow self.sampled_actions_logprob = self.policy_distribution.log_prob(self.raw_actions) - squash_correction self.sampled_actions_logprob_mean = tf.reduce_mean(self.sampled_actions_logprob) self.output.append(self.raw_actions) # output[2] : sampled raw action (before squash) self.output.append(self.actions) # output[3] : squashed (if needed) version of sampled raw_actions self.output.append(self.sampled_actions_logprob) # output[4]: log prob of sampled action (squash corrected) self.output.append(self.sampled_actions_logprob_mean) # output[5]: mean of log prob of sampled actions (squash corrected) def __str__(self): result = [ "policy head:" "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = {0})".format(2*self.num_actions), "policy_mu = output[:num_actions], policy_std = output[num_actions:]" ] return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/sac_head.py
0.847274
0.292829
sac_head.py
pypi
import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.head import Head from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import QActionStateValue from rl_coach.spaces import BoxActionSpace from rl_coach.spaces import SpacesDefinition class NAFHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True,activation_function: str='relu', dense_layer=Dense): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) if not isinstance(self.spaces.action, BoxActionSpace): raise ValueError("NAF works only for continuous action spaces (BoxActionSpace)") self.name = 'naf_q_values_head' self.num_actions = self.spaces.action.shape[0] self.output_scale = self.spaces.action.max_abs_range self.return_type = QActionStateValue if agent_parameters.network_wrappers[self.network_name].replace_mse_with_huber_loss: self.loss_type = tf.losses.huber_loss else: self.loss_type = tf.losses.mean_squared_error def _build_module(self, input_layer): # NAF self.action = tf.placeholder(tf.float32, [None, self.num_actions], name="action") self.input = self.action # V Head self.V = self.dense_layer(1)(input_layer, name='V') # mu Head mu_unscaled = self.dense_layer(self.num_actions)(input_layer, activation=self.activation_function, name='mu_unscaled') self.mu = tf.multiply(mu_unscaled, self.output_scale, name='mu') # A Head # l_vector is a vector that includes a lower-triangular matrix values self.l_vector = self.dense_layer((self.num_actions * (self.num_actions + 1)) / 2)(input_layer, name='l_vector') # Convert l to a lower triangular matrix and exponentiate its diagonal i = 0 columns = [] for col in range(self.num_actions): start_row = col num_non_zero_elements = self.num_actions - start_row zeros_column_part = tf.zeros_like(self.l_vector[:, 0:start_row]) diag_element = tf.expand_dims(tf.exp(self.l_vector[:, i]), 1) non_zeros_non_diag_column_part = self.l_vector[:, (i + 1):(i + num_non_zero_elements)] columns.append(tf.concat([zeros_column_part, diag_element, non_zeros_non_diag_column_part], axis=1)) i += num_non_zero_elements self.L = tf.transpose(tf.stack(columns, axis=1), (0, 2, 1)) # P = L*L^T self.P = tf.matmul(self.L, tf.transpose(self.L, (0, 2, 1))) # A = -1/2 * (u - mu)^T * P * (u - mu) action_diff = tf.expand_dims(self.action - self.mu, -1) a_matrix_form = -0.5 * tf.matmul(tf.transpose(action_diff, (0, 2, 1)), tf.matmul(self.P, action_diff)) self.A = tf.reshape(a_matrix_form, [-1, 1]) # Q Head self.Q = tf.add(self.V, self.A, name='Q') self.output = self.Q def __str__(self): result = [ "State Value Stream - V", "\tDense (num outputs = 1)", "Action Advantage Stream - A", "\tDense (num outputs = {})".format((self.num_actions * (self.num_actions + 1)) / 2), "\tReshape to lower triangular matrix L (new size = {} x {})".format(self.num_actions, self.num_actions), "\tP = L*L^T", "\tA = -1/2 * (u - mu)^T * P * (u - mu)", "Action Stream - mu", "\tDense (num outputs = {})".format(self.num_actions), "\tActivation (type = {})".format(self.activation_function.__name__), "\tMultiply (factor = {})".format(self.output_scale), "State-Action Value Stream - Q", "\tAdd (V, A)" ] return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/naf_head.py
0.894011
0.314011
naf_head.py
pypi
import numpy as np import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import ActionProbabilities from rl_coach.exploration_policies.continuous_entropy import ContinuousEntropyParameters from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace, CompoundActionSpace from rl_coach.spaces import SpacesDefinition from rl_coach.utils import eps, indent_string class PolicyHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh', dense_layer=Dense): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.name = 'policy_values_head' self.return_type = ActionProbabilities self.beta = None self.action_penalty = None self.exploration_policy = agent_parameters.exploration # a scalar weight that penalizes low entropy values to encourage exploration if hasattr(agent_parameters.algorithm, 'beta_entropy'): # we set the beta value as a tf variable so it can be updated later if needed self.beta = tf.Variable(float(agent_parameters.algorithm.beta_entropy), trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES]) self.beta_placeholder = tf.placeholder('float') self.set_beta = tf.assign(self.beta, self.beta_placeholder) # a scalar weight that penalizes high activation values (before the activation function) for the final layer if hasattr(agent_parameters.algorithm, 'action_penalty'): self.action_penalty = agent_parameters.algorithm.action_penalty def _build_module(self, input_layer): self.actions = [] self.input = self.actions self.policy_distributions = [] self.output = [] action_spaces = [self.spaces.action] if isinstance(self.spaces.action, CompoundActionSpace): action_spaces = self.spaces.action.sub_action_spaces # create a compound action network for action_space_idx, action_space in enumerate(action_spaces): with tf.variable_scope("sub_action_{}".format(action_space_idx)): if isinstance(action_space, DiscreteActionSpace): # create a discrete action network (softmax probabilities output) self._build_discrete_net(input_layer, action_space) elif isinstance(action_space, BoxActionSpace): # create a continuous action network (bounded mean and stdev outputs) self._build_continuous_net(input_layer, action_space) if self.is_local: # add entropy regularization if self.beta: self.entropy = tf.add_n([tf.reduce_mean(dist.entropy()) for dist in self.policy_distributions]) self.regularizations += [-tf.multiply(self.beta, self.entropy, name='entropy_regularization')] # calculate loss self.action_log_probs_wrt_policy = \ tf.add_n([dist.log_prob(action) for dist, action in zip(self.policy_distributions, self.actions)]) self.advantages = tf.placeholder(tf.float32, [None], name="advantages") self.target = self.advantages self.loss = -tf.reduce_mean(self.action_log_probs_wrt_policy * self.advantages) tf.losses.add_loss(self.loss_weight[0] * self.loss) def _build_discrete_net(self, input_layer, action_space): num_actions = len(action_space.actions) self.actions.append(tf.placeholder(tf.int32, [None], name="actions")) policy_values = self.dense_layer(num_actions)(input_layer, name='fc') self.policy_probs = tf.nn.softmax(policy_values, name="policy") # define the distributions for the policy and the old policy # (the + eps is to prevent probability 0 which will cause the log later on to be -inf) policy_distribution = tf.contrib.distributions.Categorical(probs=(self.policy_probs + eps)) self.policy_distributions.append(policy_distribution) self.output.append(self.policy_probs) def _build_continuous_net(self, input_layer, action_space): num_actions = action_space.shape self.actions.append(tf.placeholder(tf.float32, [None, num_actions], name="actions")) # output activation function if np.all(action_space.max_abs_range < np.inf): # bounded actions self.output_scale = action_space.max_abs_range self.continuous_output_activation = self.activation_function else: # unbounded actions self.output_scale = 1 self.continuous_output_activation = None # mean pre_activation_policy_values_mean = self.dense_layer(num_actions)(input_layer, name='fc_mean') policy_values_mean = self.continuous_output_activation(pre_activation_policy_values_mean) self.policy_mean = tf.multiply(policy_values_mean, self.output_scale, name='output_mean') self.output.append(self.policy_mean) # standard deviation if isinstance(self.exploration_policy, ContinuousEntropyParameters): # the stdev is an output of the network and uses a softplus activation as defined in A3C policy_values_std = self.dense_layer(num_actions)(input_layer, kernel_initializer=normalized_columns_initializer(0.01), name='fc_std') self.policy_std = tf.nn.softplus(policy_values_std, name='output_variance') + eps self.output.append(self.policy_std) else: # the stdev is an externally given value # Warning: we need to explicitly put this variable in the local variables collections, since defining # it as not trainable puts it for some reason in the global variables collections. If this is not done, # the variable won't be initialized and when working with multiple workers they will get stuck. self.policy_std = tf.Variable(np.ones(num_actions), dtype='float32', trainable=False, name='policy_stdev', collections=[tf.GraphKeys.LOCAL_VARIABLES]) # assign op for the policy std self.policy_std_placeholder = tf.placeholder('float32', (num_actions,)) self.assign_policy_std = tf.assign(self.policy_std, self.policy_std_placeholder) # define the distributions for the policy and the old policy policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.policy_mean, self.policy_std) self.policy_distributions.append(policy_distribution) if self.is_local: # add a squared penalty on the squared pre-activation features of the action if self.action_penalty and self.action_penalty != 0: self.regularizations += [ self.action_penalty * tf.reduce_mean(tf.square(pre_activation_policy_values_mean))] def __str__(self): action_spaces = [self.spaces.action] if isinstance(self.spaces.action, CompoundActionSpace): action_spaces = self.spaces.action.sub_action_spaces result = [] for action_space_idx, action_space in enumerate(action_spaces): action_head_mean_result = [] if isinstance(action_space, DiscreteActionSpace): # create a discrete action network (softmax probabilities output) action_head_mean_result.append("Dense (num outputs = {})".format(len(action_space.actions))) action_head_mean_result.append("Softmax") elif isinstance(action_space, BoxActionSpace): # create a continuous action network (bounded mean and stdev outputs) action_head_mean_result.append("Dense (num outputs = {})".format(action_space.shape)) if np.all(action_space.max_abs_range < np.inf): # bounded actions action_head_mean_result.append("Activation (type = {})".format(self.activation_function.__name__)) action_head_mean_result.append("Multiply (factor = {})".format(action_space.max_abs_range)) action_head_stdev_result = [] if isinstance(self.exploration_policy, ContinuousEntropyParameters): action_head_stdev_result.append("Dense (num outputs = {})".format(action_space.shape)) action_head_stdev_result.append("Softplus") action_head_result = [] if action_head_stdev_result: action_head_result.append("Mean Stream") action_head_result.append(indent_string('\n'.join(action_head_mean_result))) action_head_result.append("Stdev Stream") action_head_result.append(indent_string('\n'.join(action_head_stdev_result))) else: action_head_result.append('\n'.join(action_head_mean_result)) if len(action_spaces) > 1: result.append("Action head {}".format(action_space_idx)) result.append(indent_string('\n'.join(action_head_result))) else: result.append('\n'.join(action_head_result)) return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/policy_head.py
0.855987
0.361052
policy_head.py
pypi
import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.head import Head from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import QActionStateValue from rl_coach.spaces import SpacesDefinition, BoxActionSpace class SACQHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', dense_layer=Dense, output_bias_initializer=None): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.name = 'q_values_head' if isinstance(self.spaces.action, BoxActionSpace): self.num_actions = self.spaces.action.shape # continuous actions else: raise ValueError( 'SACQHead does not support action spaces of type: {class_name}'.format( class_name=self.spaces.action.__class__.__name__, ) ) self.return_type = QActionStateValue # extract the topology from the SACQHeadParameters self.network_layers_sizes = agent_parameters.network_wrappers['q'].heads_parameters[0].network_layers_sizes self.output_bias_initializer = output_bias_initializer def _build_module(self, input_layer): # SAC Q network is basically 2 networks running in parallel on the same input (state , action) # state is the observation fed through the input_layer, action is fed through placeholder to the header # each is calculating q value : q1(s,a) and q2(s,a) # the output of the head is min(q1,q2) self.actions = tf.placeholder(tf.float32, [None, self.num_actions], name="actions") self.target = tf.placeholder(tf.float32, [None, 1], name="q_targets") self.input = [self.actions] self.output = [] # Note (1) : in the author's implementation of sac (in rllab) they summarize the embedding of observation and # action (broadcasting the bias) in the first layer of the network. # build q1 network head with tf.variable_scope("q1_head"): layer_size = self.network_layers_sizes[0] qi_obs_emb = self.dense_layer(layer_size)(input_layer, activation=self.activation_function) qi_act_emb = self.dense_layer(layer_size)(self.actions, activation=self.activation_function) qi_output = qi_obs_emb + qi_act_emb # merging the inputs by summarizing them (see Note (1)) for layer_size in self.network_layers_sizes[1:]: qi_output = self.dense_layer(layer_size)(qi_output, activation=self.activation_function) # the output layer self.q1_output = self.dense_layer(1)(qi_output, name='q1_output', bias_initializer=self.output_bias_initializer) # build q2 network head with tf.variable_scope("q2_head"): layer_size = self.network_layers_sizes[0] qi_obs_emb = self.dense_layer(layer_size)(input_layer, activation=self.activation_function) qi_act_emb = self.dense_layer(layer_size)(self.actions, activation=self.activation_function) qi_output = qi_obs_emb + qi_act_emb # merging the inputs by summarizing them (see Note (1)) for layer_size in self.network_layers_sizes[1:]: qi_output = self.dense_layer(layer_size)(qi_output, activation=self.activation_function) # the output layer self.q2_output = self.dense_layer(1)(qi_output, name='q2_output', bias_initializer=self.output_bias_initializer) # take the minimum as the network's output. this is the log_target (in the original implementation) self.q_output = tf.minimum(self.q1_output, self.q2_output, name='q_output') # the policy gradients # self.q_output_mean = tf.reduce_mean(self.q1_output) # option 1: use q1 self.q_output_mean = tf.reduce_mean(self.q_output) # option 2: use min(q1,q2) self.output.append(self.q_output) self.output.append(self.q_output_mean) # defining the loss self.q1_loss = 0.5*tf.reduce_mean(tf.square(self.q1_output - self.target)) self.q2_loss = 0.5*tf.reduce_mean(tf.square(self.q2_output - self.target)) # eventually both losses are depends on different parameters so we can sum them up self.loss = self.q1_loss+self.q2_loss tf.losses.add_loss(self.loss) def __str__(self): result = [ "q1 output" "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = 1)", "q2 output" "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = 256)", "\t\tDense (num outputs = 1)", "min(Q1,Q2)" ] return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/sac_q_head.py
0.877857
0.369599
sac_q_head.py
pypi
import tensorflow as tf from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.architectures.tensorflow_components.heads.head import Head, normalized_columns_initializer from rl_coach.base_parameters import AgentParameters from rl_coach.core_types import VStateValue from rl_coach.spaces import SpacesDefinition class VHead(Head): def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str, head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu', dense_layer=Dense, initializer='normalized_columns', output_bias_initializer=None): super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function, dense_layer=dense_layer) self.name = 'v_values_head' self.return_type = VStateValue if agent_parameters.network_wrappers[self.network_name.split('/')[0]].replace_mse_with_huber_loss: self.loss_type = tf.losses.huber_loss else: self.loss_type = tf.losses.mean_squared_error self.initializer = initializer self.output_bias_initializer = output_bias_initializer def _build_module(self, input_layer): # Standard V Network if self.initializer == 'normalized_columns': self.output = self.dense_layer(1)(input_layer, name='output', kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=self.output_bias_initializer) elif self.initializer == 'xavier' or self.initializer is None: self.output = self.dense_layer(1)(input_layer, name='output', bias_initializer=self.output_bias_initializer) def __str__(self): result = [ "Dense (num outputs = 1)" ] return '\n'.join(result)
/rl-coach-slim-1.0.1.tar.gz/rl-coach-slim-1.0.1/rl_coach/architectures/tensorflow_components/heads/v_head.py
0.870515
0.251165
v_head.py
pypi