code
stringlengths
17
6.64M
class InnerProductParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LRNParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class MVNParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _MVNPARAMETER
class PoolingParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class PythonParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _PYTHONPARAMETER
class ReLUParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _RELUPARAMETER
class ROIPoolingParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _ROIPOOLINGPARAMETER
class SigmoidParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SIGMOIDPARAMETER
class SliceParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SLICEPARAMETER
class SoftmaxParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOFTMAXPARAMETER
class TanHParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _TANHPARAMETER
class ThresholdParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _THRESHOLDPARAMETER
class WindowDataParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _WINDOWDATAPARAMETER
class V1LayerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _V1LAYERPARAMETER
class V0LayerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _V0LAYERPARAMETER
class PReLUParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _PRELUPARAMETER
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class LayerConnection(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERCONNECTION
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class ConcatParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONCATPARAMETER
class ConvolutionParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONVOLUTIONPARAMETER
class DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATAPARAMETER
class DropoutParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DROPOUTPARAMETER
class HDF5DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5DATAPARAMETER
class HDF5OutputParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5OUTPUTPARAMETER
class ImageDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _IMAGEDATAPARAMETER
class InfogainLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INFOGAINLOSSPARAMETER
class InnerProductParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LRNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class PoolingParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class WindowDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _WINDOWDATAPARAMETER
class V0LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _V0LAYERPARAMETER
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class LayerConnection(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERCONNECTION
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class EvalHistoryIter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EVALHISTORYITER
class EvalHistory(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EVALHISTORY
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): model = ert.Model(forward_params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): model = Model(forward_params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- (r ** 2))))
def forward_model(s, parallelization, ncores=None): params = {'nx': nx, 'ny': ny} model = mare2dem.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- (r ** 2))))
def forward_model(s, parallelization, ncores=None): params = {'nx': nx, 'ny': ny} model = mare2dem.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): params = {} model = dd.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): params = {'log': True} model = dd.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
class BaseAgent(object): '\n Class for the basic agent objects.\n To define your own agent, subclass this class and implement the functions below.\n ' def __init__(self, env, policy, logger, storage, device, num_checkpoints): '\n env: (gym.Env) environment following the openAI Gym API\n ' self.env = env self.policy = policy self.logger = logger self.storage = storage self.device = device self.num_checkpoints = num_checkpoints self.t = 0 def predict(self, obs): '\n Predict the action with the given input \n ' pass def update_policy(self): '\n Train the neural network model\n ' pass def train(self, num_timesteps): '\n Train the agent with collecting the trajectories\n ' pass def evaluate(self): '\n Evaluate the agent\n ' pass
class PPO(BaseAgent): def __init__(self, env, policy, logger, storage, device, n_checkpoints, n_steps=128, n_envs=8, epoch=3, mini_batch_per_epoch=8, mini_batch_size=(32 * 8), gamma=0.99, lmbda=0.95, learning_rate=0.00025, grad_clip_norm=0.5, eps_clip=0.2, value_coef=0.5, entropy_coef=0.01, normalize_adv=True, normalize_rew=True, use_gae=True, **kwargs): super(PPO, self).__init__(env, policy, logger, storage, device, n_checkpoints) self.n_steps = n_steps self.n_envs = n_envs self.epoch = epoch self.mini_batch_per_epoch = mini_batch_per_epoch self.mini_batch_size = mini_batch_size self.gamma = gamma self.lmbda = lmbda self.learning_rate = learning_rate self.optimizer = optim.Adam(self.policy.parameters(), lr=learning_rate, eps=1e-05) self.grad_clip_norm = grad_clip_norm self.eps_clip = eps_clip self.value_coef = value_coef self.entropy_coef = entropy_coef self.normalize_adv = normalize_adv self.normalize_rew = normalize_rew self.use_gae = use_gae def predict(self, obs, hidden_state, done): with torch.no_grad(): obs = torch.FloatTensor(obs).to(device=self.device) hidden_state = torch.FloatTensor(hidden_state).to(device=self.device) mask = torch.FloatTensor((1 - done)).to(device=self.device) (dist, value, hidden_state) = self.policy(obs, hidden_state, mask) act = dist.sample() log_prob_act = dist.log_prob(act) return (act.cpu().numpy(), log_prob_act.cpu().numpy(), value.cpu().numpy(), hidden_state.cpu().numpy()) def optimize(self): (pi_loss_list, value_loss_list, entropy_loss_list) = ([], [], []) batch_size = ((self.n_steps * self.n_envs) // self.mini_batch_per_epoch) if (batch_size < self.mini_batch_size): self.mini_batch_size = batch_size grad_accumulation_steps = (batch_size / self.mini_batch_size) grad_accumulation_cnt = 1 self.policy.train() for e in range(self.epoch): recurrent = self.policy.is_recurrent() generator = self.storage.fetch_train_generator(mini_batch_size=self.mini_batch_size, recurrent=recurrent) for sample in generator: (obs_batch, hidden_state_batch, act_batch, done_batch, old_log_prob_act_batch, old_value_batch, return_batch, adv_batch) = sample mask_batch = (1 - done_batch) (dist_batch, value_batch, _) = self.policy(obs_batch, hidden_state_batch, mask_batch) log_prob_act_batch = dist_batch.log_prob(act_batch) ratio = torch.exp((log_prob_act_batch - old_log_prob_act_batch)) surr1 = (ratio * adv_batch) surr2 = (torch.clamp(ratio, (1.0 - self.eps_clip), (1.0 + self.eps_clip)) * adv_batch) pi_loss = (- torch.min(surr1, surr2).mean()) clipped_value_batch = (old_value_batch + (value_batch - old_value_batch).clamp((- self.eps_clip), self.eps_clip)) v_surr1 = (value_batch - return_batch).pow(2) v_surr2 = (clipped_value_batch - return_batch).pow(2) value_loss = (0.5 * torch.max(v_surr1, v_surr2).mean()) entropy_loss = dist_batch.entropy().mean() loss = ((pi_loss + (self.value_coef * value_loss)) - (self.entropy_coef * entropy_loss)) loss.backward() if ((grad_accumulation_cnt % grad_accumulation_steps) == 0): torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.grad_clip_norm) self.optimizer.step() self.optimizer.zero_grad() grad_accumulation_cnt += 1 pi_loss_list.append(pi_loss.item()) value_loss_list.append(value_loss.item()) entropy_loss_list.append(entropy_loss.item()) summary = {'Loss/pi': np.mean(pi_loss_list), 'Loss/v': np.mean(value_loss_list), 'Loss/entropy': np.mean(entropy_loss_list)} return summary def train(self, num_timesteps): save_every = (num_timesteps // self.num_checkpoints) checkpoint_cnt = 0 obs = self.env.reset() hidden_state = np.zeros((self.n_envs, self.storage.hidden_state_size)) done = np.zeros(self.n_envs) while (self.t < num_timesteps): self.policy.eval() for _ in range(self.n_steps): (act, log_prob_act, value, next_hidden_state) = self.predict(obs, hidden_state, done) (next_obs, rew, done, info) = self.env.step(act) self.storage.store(obs, hidden_state, act, rew, done, info, log_prob_act, value) obs = next_obs hidden_state = next_hidden_state (_, _, last_val, hidden_state) = self.predict(obs, hidden_state, done) self.storage.store_last(obs, hidden_state, last_val) self.storage.compute_estimates(self.gamma, self.lmbda, self.use_gae, self.normalize_adv) summary = self.optimize() self.t += (self.n_steps * self.n_envs) (rew_batch, done_batch) = self.storage.fetch_log_data() self.logger.feed(rew_batch, done_batch) self.logger.write_summary(summary) self.logger.dump() self.optimizer = adjust_lr(self.optimizer, self.learning_rate, self.t, num_timesteps) if (self.t > ((checkpoint_cnt + 1) * save_every)): torch.save({'state_dict': self.policy.state_dict()}, (((self.logger.logdir + '/model_') + str(self.t)) + '.pth')) checkpoint_cnt += 1 self.env.close()
class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): 'Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n ' gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert (env.unwrapped.get_action_meanings()[0] == 'NOOP') def reset(self, **kwargs): ' Do no-op action for a number of steps in [1, noop_max].' self.env.reset(**kwargs) if (self.override_num_noops is not None): noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, (self.noop_max + 1)) assert (noops > 0) obs = None for _ in range(noops): (obs, _, done, _) = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac)
class FireResetEnv(gym.Wrapper): def __init__(self, env): 'Take action on reset for environments that are fixed until firing.' gym.Wrapper.__init__(self, env) assert (env.unwrapped.get_action_meanings()[1] == 'FIRE') assert (len(env.unwrapped.get_action_meanings()) >= 3) def reset(self, **kwargs): self.env.reset(**kwargs) (obs, _, done, _) = self.env.step(1) if done: self.env.reset(**kwargs) (obs, _, done, _) = self.env.step(2) if done: self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): 'Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n ' gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): (obs, reward, done, info) = self.env.step(action) self.was_real_done = done lives = self.env.unwrapped.ale.lives() if ((lives < self.lives) and (lives > 0)): done = True self.lives = lives info['env_done'] = self.was_real_done return (obs, reward, done, info) def reset(self, **kwargs): 'Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n ' if self.was_real_done: obs = self.env.reset(**kwargs) else: (obs, _, _, _) = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs
class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): 'Return only every `skip`-th frame' gym.Wrapper.__init__(self, env) self._obs_buffer = np.zeros(((2,) + env.observation_space.shape), dtype=np.uint8) self._skip = skip def step(self, action): 'Repeat action, sum reward, and max over last observations.' total_reward = 0.0 done = None for i in range(self._skip): (obs, reward, done, info) = self.env.step(action) if (i == (self._skip - 2)): self._obs_buffer[0] = obs if (i == (self._skip - 1)): self._obs_buffer[1] = obs total_reward += reward if done: break max_frame = self._obs_buffer.max(axis=0) return (max_frame, total_reward, done, info) def reset(self, **kwargs): return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper): def __init__(self, env): gym.RewardWrapper.__init__(self, env) def reward(self, reward): 'Bin reward to {+1, 0, -1} by its sign.' return np.sign(reward) def step(self, act): 'Bin reward to {+1, 0, -1} by its sign.' (s, rew, done, info) = self.env.step(act) info['env_reward'] = rew return (s, rew, done, info)
class WarpFrame(gym.ObservationWrapper): def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): '\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n ' super().__init__(env) self._width = width self._height = height self._grayscale = grayscale self._key = dict_space_key if self._grayscale: num_colors = 1 else: num_colors = 3 new_space = gym.spaces.Box(low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8) if (self._key is None): original_space = self.observation_space self.observation_space = new_space else: original_space = self.observation_space.spaces[self._key] self.observation_space.spaces[self._key] = new_space assert ((original_space.dtype == np.uint8) and (len(original_space.shape) == 3)) def observation(self, obs): if (self._key is None): frame = obs else: frame = obs[self._key] if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize(frame, (self._width, self._height), interpolation=cv2.INTER_AREA) if self._grayscale: frame = np.expand_dims(frame, (- 1)) if (self._key is None): obs = frame else: obs = obs.copy() obs[self._key] = frame return obs
class FrameStack(gym.Wrapper): def __init__(self, env, k): 'Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n ' gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:(- 1)] + ((shp[(- 1)] * k),)), dtype=env.observation_space.dtype) def reset(self): ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, action): (ob, reward, done, info) = self.env.step(action) self.frames.append(ob) return (self._get_ob(), reward, done, info) def _get_ob(self): assert (len(self.frames) == self.k) return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper): def __init__(self, env): gym.ObservationWrapper.__init__(self, env) self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32) def observation(self, observation): return (np.array(observation).astype(np.float32) / 255.0)
class LazyFrames(object): def __init__(self, frames): "This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was." self._frames = frames self._out = None def _force(self): if (self._out is None): self._out = np.concatenate(self._frames, axis=(- 1)) self._frames = None return self._out def __array__(self, dtype=None): out = self._force() if (dtype is not None): out = out.astype(dtype) return out def __len__(self): return len(self._force()) def __getitem__(self, i): return self._force()[i] def count(self): frames = self._force() return frames.shape[(frames.ndim - 1)] def frame(self, i): return self._force()[(..., i)]
class TransposeFrame(gym.ObservationWrapper): def __init__(self, env): gym.ObservationWrapper.__init__(self, env) obs_shape = self.observation_space.shape self.observation_space = gym.spaces.Box(low=0, high=1, shape=(obs_shape[2], obs_shape[0], obs_shape[1]), dtype=np.float32) def observation(self, observation): return observation.transpose(2, 0, 1)
def wrap_deepmind(env, episode_life=True, preprocess=True, max_and_skip=True, clip_rewards=True, no_op_reset=True, history_length=4, scale=True, transpose=True): 'Configure environment for DeepMind-style Atari.' if no_op_reset: env = NoopResetEnv(env, noop_max=30) if max_and_skip: env = MaxAndSkipEnv(env, skip=4) if episode_life: env = EpisodicLifeEnv(env) if ('FIRE' in env.unwrapped.get_action_meanings()): env = FireResetEnv(env) if preprocess: env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) if (history_length > 1): env = FrameStack(env, history_length) if scale: env = ScaledFloatFrame(env) if transpose: env = TransposeFrame(env) return env
def worker(worker_id, env, master_end, worker_end): master_end.close() while True: (cmd, data) = worker_end.recv() if (cmd == 'step'): (ob, reward, done, info) = env.step(data) if done: ob = env.reset() worker_end.send((ob, reward, done, info)) elif (cmd == 'seed'): worker_end.send(env.seed(data)) elif (cmd == 'reset'): ob = env.reset() worker_end.send(ob) elif (cmd == 'close'): worker_end.close() break else: raise NotImplementedError
class ParallelEnv(object): '\n This class\n ' def __init__(self, num_processes, env): self.nenvs = num_processes self.waiting = False self.closed = False self.workers = [] self.observation_space = env.observation_space self.action_space = env.action_space (self.master_ends, self.send_ends) = zip(*[Pipe() for _ in range(self.nenvs)]) for (worker_id, (master_end, send_end)) in enumerate(zip(self.master_ends, self.send_ends)): p = Process(target=worker, args=(worker_id, copy.deepcopy(env), master_end, send_end)) p.start() self.workers.append(p) def step(self, actions): '\n Perform step for each environment and return the stacked transitions\n ' for (master_end, action) in zip(self.master_ends, actions): master_end.send(('step', action)) self.waiting = True results = [master_end.recv() for master_end in self.master_ends] self.waiting = False (obs, rews, dones, infos) = zip(*results) return (np.stack(obs), np.stack(rews), np.stack(dones), infos) def seed(self, seed=None): for (idx, master_end) in enumerate(self.master_ends): master_end.send(('seed', (seed + idx))) return [master_end.recv() for master_end in self.master_ends] def reset(self): for master_end in self.master_ends: master_end.send(('reset', None)) results = [master_end.recv() for master_end in self.master_ends] return np.stack(results) def close(self): if self.closed: return if self.waiting: [master_end.recv() for master_end in self.master_ends] for master_end in self.master_ends: master_end.send(('close', None)) for worker in self.workers: worker.join() self.closed = True
class Logger(object): def __init__(self, n_envs, logdir): self.start_time = time.time() self.n_envs = n_envs self.logdir = logdir self.episode_rewards = [] for _ in range(n_envs): self.episode_rewards.append([]) self.episode_len_buffer = deque(maxlen=40) self.episode_reward_buffer = deque(maxlen=40) self.log = pd.DataFrame(columns=['timesteps', 'wall_time', 'num_episodes', 'max_episode_rewards', 'mean_episode_rewards', 'min_episode_rewards', 'max_episode_len', 'mean_episode_len', 'min_episode_len']) self.writer = SummaryWriter(logdir) self.timesteps = 0 self.num_episodes = 0 def feed(self, rew_batch, done_batch): steps = rew_batch.shape[0] rew_batch = rew_batch.T done_batch = done_batch.T for i in range(self.n_envs): for j in range(steps): self.episode_rewards[i].append(rew_batch[i][j]) if done_batch[i][j]: self.episode_len_buffer.append(len(self.episode_rewards[i])) self.episode_reward_buffer.append(np.sum(self.episode_rewards[i])) self.episode_rewards[i] = [] self.num_episodes += 1 self.timesteps += (self.n_envs * steps) def write_summary(self, summary): for (key, value) in summary.items(): self.writer.add_scalar(key, value, self.timesteps) def dump(self): wall_time = (time.time() - self.start_time) if (self.num_episodes > 0): episode_statistics = self._get_episode_statistics() episode_statistics_list = list(episode_statistics.values()) for (key, value) in episode_statistics.items(): self.writer.add_scalar(key, value, self.timesteps) else: episode_statistics_list = ([None] * 6) log = ((([self.timesteps] + [wall_time]) + [self.num_episodes]) + episode_statistics_list) self.log.loc[len(self.log)] = log with open((self.logdir + '/log.csv'), 'w') as f: self.log.to_csv(f, index=False) print(self.log.loc[(len(self.log) - 1)]) def _get_episode_statistics(self): episode_statistics = {} episode_statistics['Rewards/max_episodes'] = np.max(self.episode_reward_buffer) episode_statistics['Rewards/mean_episodes'] = np.mean(self.episode_reward_buffer) episode_statistics['Rewards/min_episodes'] = np.min(self.episode_reward_buffer) episode_statistics['Len/max_episodes'] = np.max(self.episode_len_buffer) episode_statistics['Len/mean_episodes'] = np.mean(self.episode_len_buffer) episode_statistics['Len/min_episodes'] = np.min(self.episode_len_buffer) return episode_statistics
def set_global_seeds(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def set_global_log_levels(level): gym.logger.set_level(level)
def orthogonal_init(module, gain=nn.init.calculate_gain('relu')): if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)): nn.init.orthogonal_(module.weight.data, gain) nn.init.constant_(module.bias.data, 0) return module
def xavier_uniform_init(module, gain=1.0): if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)): nn.init.xavier_uniform_(module.weight.data, gain) nn.init.constant_(module.bias.data, 0) return module
def adjust_lr(optimizer, init_lr, timesteps, max_timesteps): lr = (init_lr * (1 - (timesteps / max_timesteps))) for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer
def get_n_params(model): return (str(np.round((np.array([p.numel() for p in model.parameters()]).sum() / 1000000.0), 3)) + ' M params')
class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), (- 1))
class MlpModel(nn.Module): def __init__(self, input_dims=4, hidden_dims=[64, 64], **kwargs): '\n input_dim: (int) number of the input dimensions\n hidden_dims: (list) list of the dimensions for the hidden layers\n use_batchnorm: (bool) whether to use batchnorm\n ' super(MlpModel, self).__init__() hidden_dims = ([input_dims] + hidden_dims) layers = [] for i in range((len(hidden_dims) - 1)): in_features = hidden_dims[i] out_features = hidden_dims[(i + 1)] layers.append(nn.Linear(in_features, out_features)) layers.append(nn.ReLU()) self.layers = nn.Sequential(*layers) self.output_dim = hidden_dims[(- 1)] self.apply(orthogonal_init) def forward(self, x): for layer in self.layers: x = layer(x) return x
class NatureModel(nn.Module): def __init__(self, in_channels, **kwargs): '\n input_shape: (tuple) tuple of the input dimension shape (channel, height, width)\n filters: (list) list of the tuples consists of (number of channels, kernel size, and strides)\n use_batchnorm: (bool) whether to use batchnorm\n ' super(NatureModel, self).__init__() self.layers = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1), nn.ReLU(), Flatten(), nn.Linear(in_features=((64 * 7) * 7), out_features=512), nn.ReLU()) self.output_dim = 512 self.apply(orthogonal_init) def forward(self, x): x = self.layers(x) return x
class ResidualBlock(nn.Module): def __init__(self, in_channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): out = nn.ReLU()(x) out = self.conv1(out) out = nn.ReLU()(out) out = self.conv2(out) return (out + x)
class ImpalaBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ImpalaBlock, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1) self.res1 = ResidualBlock(out_channels) self.res2 = ResidualBlock(out_channels) def forward(self, x): x = self.conv(x) x = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(x) x = self.res1(x) x = self.res2(x) return x