code stringlengths 17 6.64M |
|---|
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = (np.array(kwargs['max_u']) if (type(kwargs['max_u']) == list) else kwargs['max_u'])
kwargs['gamma'] = (1.0 - (1.0 / kwargs['T']))
if ('lr' in kwargs):
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals', 'alpha', 'beta0', 'beta_iters', 'eps']:
ddpg_params[name] = kwargs[name]
kwargs[('_' + name)] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
|
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
|
def goal_distance(goal_a, goal_b):
assert (goal_a.shape == goal_b.shape)
return np.linalg.norm((np.abs(goal_a) - np.abs(goal_b)), axis=(- 1))
|
def compute_reward(achieved_goal, desired_goal, info):
(distance_threshold, reward_type) = ((0.05 * 6), 'sparse')
d = goal_distance(achieved_goal, desired_goal)
if (reward_type == 'sparse'):
return (- (d > distance_threshold).astype(np.float32))
else:
return (- d)
|
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
if ('Pendulum' in str(env)):
def reward_fun(ag_2, g, info):
return compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
else:
def reward_fun(ag_2, g, info):
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
her_params = {'reward_fun': reward_fun}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params[('_' + name)] = her_params[name]
del params[name]
if (params['prioritization'] == 'entropy'):
sample_her_transitions = make_sample_her_transitions_entropy(**her_params)
elif (params['prioritization'] == 'tderror'):
sample_her_transitions = make_sample_her_transitions_prioritized_replay(**her_params)
else:
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
|
def simple_goal_subtract(a, b):
assert (a.shape == b.shape)
return (a - b)
|
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
temperature = params['temperature']
prioritization = params['prioritization']
env_name = params['env_name']
max_timesteps = params['max_timesteps']
rank_method = params['rank_method']
input_dims = dims.copy()
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, 'T': params['T'], 'clip_pos_returns': True, 'clip_return': ((1.0 / (1.0 - gamma)) if clip_return else np.inf), 'rollout_batch_size': rollout_batch_size, 'subtract_goals': simple_goal_subtract, 'sample_transitions': sample_her_transitions, 'gamma': gamma, 'temperature': temperature, 'prioritization': prioritization, 'env_name': env_name, 'max_timesteps': max_timesteps, 'rank_method': rank_method})
ddpg_params['info'] = {'env_name': params['env_name']}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
|
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
(obs, _, _, info) = env.step(env.action_space.sample())
if ('Pendulum' in str(env)):
(obs, info) = wrap_pendulum_obs(obs)
dims = {'o': obs['observation'].shape[0], 'u': env.action_space.shape[0], 'g': obs['desired_goal'].shape[0]}
for (key, value) in info.items():
value = np.array(value)
if (value.ndim == 0):
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
|
@click.command()
@click.argument('policy_file', type=str)
@click.option('--seed', type=int, default=0)
@click.option('--n_test_rollouts', type=int, default=20)
@click.option('--render', type=int, default=1)
def main(policy_file, seed, n_test_rollouts, render):
set_global_seeds(seed)
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
params = config.DEFAULT_PARAMS
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params['env_name'] = env_name
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {'exploit': True, 'use_target_net': params['test_with_polyak'], 'compute_Q': True, 'rollout_batch_size': 1, 'render': bool(render)}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular()
|
def mpi_average(value):
if (value == []):
value = [0.0]
if (not isinstance(value, list)):
value = [value]
return mpi_moments(np.array(value))[0]
|
def train(policy, rollout_worker, evaluator, n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval, save_policies, num_cpu, dump_buffer, rank_method, fit_interval, prioritization, **kwargs):
rank = MPI.COMM_WORLD.Get_rank()
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')
logger.info('Training...')
best_success_rate = (- 1)
t = 1
for epoch in range(n_epochs):
rollout_worker.clear_history()
for cycle in range(n_cycles):
episode = rollout_worker.generate_rollouts()
if ((((cycle % fit_interval) == 0) and (not (cycle == 0))) or (cycle == (n_cycles - 1))):
if (prioritization == 'entropy'):
policy.fit_density_model()
policy.store_episode(episode, dump_buffer, rank_method, epoch)
for batch in range(n_batches):
t = (((((epoch * n_cycles) * n_batches) + (cycle * n_batches)) + batch) * num_cpu)
policy.train(t, dump_buffer)
policy.update_target_net()
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
logger.record_tabular('epoch', epoch)
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in policy.logs():
logger.record_tabular(key, mpi_average(val))
if (rank == 0):
logger.dump_tabular()
if dump_buffer:
policy.dump_buffer(epoch)
success_rate = mpi_average(evaluator.current_success_rate())
if ((rank == 0) and (success_rate >= best_success_rate) and save_policies):
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if ((rank == 0) and (policy_save_interval > 0) and ((epoch % policy_save_interval) == 0) and save_policies):
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if (rank != 0):
assert (local_uniform[0] != root_uniform[0])
|
def launch(env_name, n_epochs, num_cpu, seed, replay_strategy, policy_save_interval, clip_return, temperature, prioritization, binding, logging, version, dump_buffer, n_cycles, rank_method, fit_interval, override_params={}, save_policies=True):
if (num_cpu > 1):
whoami = mpi_fork(num_cpu, binding)
if (whoami == 'parent'):
sys.exit(0)
import baselines.common.tf_util as U
U.single_threaded_session().__enter__()
rank = MPI.COMM_WORLD.Get_rank()
if logging:
logdir = ((((((((((((('logs/' + str(env_name)) + '-prioritization') + str(prioritization)) + '-replay_strategy') + str(replay_strategy)) + '-n_epochs') + str(n_epochs)) + '-num_cpu') + str(num_cpu)) + '-seed') + str(seed)) + '-version') + str(version))
else:
logdir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
if (rank == 0):
if (logdir or (logger.get_dir() is None)):
logger.configure(dir=logdir)
else:
logger.configure()
logdir = logger.get_dir()
assert (logdir is not None)
os.makedirs(logdir, exist_ok=True)
rank_seed = (seed + (1000000 * rank))
set_global_seeds(rank_seed)
params = config.DEFAULT_PARAMS
params['env_name'] = env_name
params['replay_strategy'] = replay_strategy
params['temperature'] = temperature
params['prioritization'] = prioritization
params['binding'] = binding
params['max_timesteps'] = (((n_epochs * params['n_cycles']) * params['n_batches']) * num_cpu)
params['version'] = version
params['dump_buffer'] = dump_buffer
params['n_cycles'] = n_cycles
params['rank_method'] = rank_method
params['fit_interval'] = fit_interval
params['n_epochs'] = n_epochs
params['num_cpu'] = num_cpu
if params['dump_buffer']:
params['alpha'] = 0
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params.update(**override_params)
with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
json.dump(params, f)
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)
rollout_params = {'exploit': False, 'use_target_net': False, 'use_demo_states': True, 'compute_Q': False, 'T': params['T']}
eval_params = {'exploit': True, 'use_target_net': params['test_with_polyak'], 'use_demo_states': False, 'compute_Q': True, 'T': params['T']}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
rollout_worker = RolloutWorker(params['make_env'], policy, dims, logger, **rollout_params)
rollout_worker.seed(rank_seed)
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(rank_seed)
train(logdir=logdir, policy=policy, rollout_worker=rollout_worker, evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'], n_cycles=params['n_cycles'], n_batches=params['n_batches'], policy_save_interval=policy_save_interval, save_policies=save_policies, num_cpu=num_cpu, dump_buffer=dump_buffer, rank_method=rank_method, fit_interval=fit_interval, prioritization=prioritization)
|
@click.command()
@click.option('--env_name', type=click.Choice(['FetchPickAndPlace-v0', 'FetchSlide-v0', 'FetchPush-v0', 'HandManipulateBlockFull-v0', 'HandManipulateEggFull-v0', 'HandManipulatePenRotate-v0']), default='FetchPickAndPlace-v0', help='the name of the OpenAI Gym environment that you want to train on.')
@click.option('--n_epochs', type=int, default=200, help='the number of training epochs to run')
@click.option('--num_cpu', type=int, default=1, help='the number of CPU cores to use (using MPI)')
@click.option('--seed', type=int, default=0, help='the random seed used to seed both the environment and the training code')
@click.option('--policy_save_interval', type=int, default=5, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
@click.option('--replay_strategy', type=click.Choice(['future', 'final', 'none']), default='future', help='the HER replay strategy to be used. "future" uses HER, "none" disables HER.')
@click.option('--clip_return', type=int, default=1, help='whether or not returns should be clipped')
@click.option('--temperature', type=float, default=1.0, help='temperature value for prioritization')
@click.option('--prioritization', type=click.Choice(['none', 'entropy', 'tderror']), default='entropy', help='the prioritization strategy to be used.')
@click.option('--binding', type=click.Choice(['none', 'core']), default='core', help='configure mpi using bind-to none or core.')
@click.option('--logging', type=bool, default=False, help='whether or not logging')
@click.option('--version', type=int, default=0, help='version')
@click.option('--dump_buffer', type=bool, default=False, help='dump buffer contains achieved goals, energy, tderrors for analysis')
@click.option('--n_cycles', type=int, default=50, help='n_cycles')
@click.option('--rank_method', type=click.Choice(['none', 'min', 'dense', 'average']), default='dense', help='ranking for prioritization')
@click.option('--fit_interval', type=int, default=50, help='fit_interval')
def main(**kwargs):
launch(**kwargs)
|
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"Creates a sample function that can be used for HER experience replay.\n\n Args:\n replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\n regular DDPG experience replay is used\n replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\n as many HER replays as regular replays are used)\n reward_fun (function): function to re-compute the reward with substituted goals\n "
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
'episode_batch is {key: array(buffer_size x T x dim_key)}\n '
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][(episode_idxs, t_samples)].copy() for key in episode_batch.keys()}
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
transitions['g'][her_indexes] = future_ag
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
|
def make_sample_her_transitions_entropy(replay_strategy, replay_k, reward_fun):
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions, rank_method, temperature, update_stats=False):
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
if (not update_stats):
if (rank_method == 'none'):
entropy_trajectory = episode_batch['e']
else:
entropy_trajectory = episode_batch['p']
p_trajectory = np.power(entropy_trajectory, (1 / (temperature + 0.01)))
p_trajectory = (p_trajectory / p_trajectory.sum())
episode_idxs_entropy = np.random.choice(rollout_batch_size, size=batch_size, replace=True, p=p_trajectory.flatten())
episode_idxs = episode_idxs_entropy
transitions = {}
for key in episode_batch.keys():
if ((not (key == 'p')) and (not (key == 's')) and (not (key == 'e'))):
transitions[key] = episode_batch[key][(episode_idxs, t_samples)].copy()
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
transitions['g'][her_indexes] = future_ag
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
|
def make_sample_her_transitions_prioritized_replay(replay_strategy, replay_k, reward_fun):
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_proportional(self, rollout_batch_size, batch_size, T):
episode_idxs = []
t_samples = []
for _ in range(batch_size):
self.n_transitions_stored = min(self.n_transitions_stored, self.size_in_transitions)
mass = (random.random() * self._it_sum.sum(0, (self.n_transitions_stored - 1)))
idx = self._it_sum.find_prefixsum_idx(mass)
assert (idx < self.n_transitions_stored)
episode_idx = (idx // T)
assert (episode_idx < rollout_batch_size)
t_sample = (idx % T)
episode_idxs.append(episode_idx)
t_samples.append(t_sample)
return (episode_idxs, t_samples)
def _sample_her_transitions(self, episode_batch, batch_size_in_transitions, beta):
'episode_batch is {key: array(buffer_size x T x dim_key)}\n '
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
if (rollout_batch_size < self.current_size):
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
else:
assert (beta >= 0)
(episode_idxs, t_samples) = _sample_proportional(self, rollout_batch_size, batch_size, T)
episode_idxs = np.array(episode_idxs)
t_samples = np.array(t_samples)
weights = []
p_min = (self._it_min.min() / self._it_sum.sum())
max_weight = ((p_min * self.n_transitions_stored) ** (- beta))
for (episode_idx, t_sample) in zip(episode_idxs, t_samples):
p_sample = (self._it_sum[((episode_idx * T) + t_sample)] / self._it_sum.sum())
weight = ((p_sample * self.n_transitions_stored) ** (- beta))
weights.append((weight / max_weight))
weights = np.array(weights)
transitions = {}
for key in episode_batch.keys():
if ((not (key == 'td')) and (not (key == 'e'))):
episode_batch_key = episode_batch[key].copy()
transitions[key] = episode_batch_key[(episode_idxs, t_samples)].copy()
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['g'][her_indexes] = future_ag
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
idxs = ((episode_idxs * T) + t_samples)
return (transitions, weights, idxs)
return _sample_her_transitions
|
class Normalizer():
def __init__(self, size, eps=0.01, default_clip_range=np.inf, sess=None):
'A normalizer that ensures that observations are approximately distributed according to\n a standard Normal distribution (i.e. have mean zero and variance one).\n\n Args:\n size (int): the size of the observation to be normalized\n eps (float): a small constant that avoids underflows\n default_clip_range (float): normalized observations are clipped to be in\n [-default_clip_range, default_clip_range]\n sess (object): the TensorFlow session to be used\n '
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = (sess if (sess is not None) else tf.get_default_session())
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum', trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq', trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count', trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(initializer=tf.zeros_initializer(), shape=(self.size,), name='mean', trainable=False, dtype=tf.float32)
self.std = tf.get_variable(initializer=tf.ones_initializer(), shape=(self.size,), name='std', trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(self.count_tf.assign_add(self.count_pl), self.sum_tf.assign_add(self.sum_pl), self.sumsq_tf.assign_add(self.sumsq_pl))
self.recompute_op = tf.group(tf.assign(self.mean, (self.sum_tf / self.count_tf)), tf.assign(self.std, tf.sqrt(tf.maximum(tf.square(self.eps), ((self.sumsq_tf / self.count_tf) - tf.square((self.sum_tf / self.count_tf)))))))
self.lock = threading.Lock()
def update(self, v):
v = v.reshape((- 1), self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += np.square(v).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None):
if (clip_range is None):
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return tf.clip_by_value(((v - mean) / std), (- clip_range), clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return (mean + (v * std))
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return (local_sum, local_sumsq, local_count)
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
(synced_sum, synced_sumsq, synced_count) = self.synchronize(local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={self.count_pl: synced_count, self.sum_pl: synced_sum, self.sumsq_pl: synced_sumsq})
self.sess.run(self.recompute_op)
|
class IdentityNormalizer():
def __init__(self, size, std=1.0):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = (std * tf.ones(self.size, tf.float32))
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return (x / self.std)
def denormalize(self, x):
return (self.std * x)
def synchronize(self):
pass
def recompute_stats(self):
pass
|
def store_args(method):
'Stores provided method args as instance attributes.\n '
argspec = inspect.getfullargspec(method)
defaults = {}
if (argspec.defaults is not None):
defaults = dict(zip(argspec.args[(- len(argspec.defaults)):], argspec.defaults))
if (argspec.kwonlydefaults is not None):
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
args = defaults.copy()
for (name, value) in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
|
def import_function(spec):
'Import a function identified by a string like "pkg.module:fn_name".\n '
(mod_name, fn_name) = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
|
def flatten_grads(var_list, grads):
'Flattens a variables and their gradients.\n '
return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0)
|
def nn(input, layers_sizes, reuse=None, flatten=False, name=''):
'Creates a simple neural network\n '
for (i, size) in enumerate(layers_sizes):
activation = (tf.nn.relu if (i < (len(layers_sizes) - 1)) else None)
input = tf.layers.dense(inputs=input, units=size, kernel_initializer=tf.contrib.layers.xavier_initializer(), reuse=reuse, name=((name + '_') + str(i)))
if activation:
input = activation(input)
if flatten:
assert (layers_sizes[(- 1)] == 1)
input = tf.reshape(input, [(- 1)])
return input
|
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
|
def mpi_fork(n, binding='core'):
'Re-launches the current script with workers\n Returns "parent" for original parent, "child" for MPI children\n '
if (n <= 1):
return 'child'
if (os.getenv('IN_MPI') is None):
env = os.environ.copy()
env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1')
if (platform.system() == 'Darwin'):
args = ['mpirun', '-np', str(n), '-allow-run-as-root', sys.executable]
else:
args = ['mpirun', '-np', str(n), '-bind-to', binding, '-allow-run-as-root', sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return 'parent'
else:
install_mpi_excepthook()
return 'child'
|
def convert_episode_to_batch_major(episode):
'Converts an episode to have the batch dimension in the major (first)\n dimension.\n '
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
|
def transitions_in_episode_batch(episode_batch):
'Number of transitions in a given episode batch.\n '
shape = episode_batch['u'].shape
return (shape[0] * shape[1])
|
def reshape_for_broadcasting(source, target):
'Reshapes a tensor (source) to have the correct shape and dtype of the target\n before broadcasting it with MPI.\n '
dim = len(target.get_shape())
shape = (([1] * (dim - 1)) + [(- 1)])
return tf.reshape(tf.cast(source, target.dtype), shape)
|
def wrap_pendulum_obs(obs):
distance_threshold = (0.05 * 6)
obs_dict = {}
observation = obs.copy()
costh = observation[0]
sinth = observation[1]
theta = math.atan2(sinth, costh)
theta = (((theta + np.pi) % (2 * np.pi)) - np.pi)
observation = np.array([theta, obs[2]])
desired_goal = np.array([0.0])
achieved_goal = observation[0:1]
assert (achieved_goal.shape == desired_goal.shape)
d = np.linalg.norm((np.abs(achieved_goal) - np.abs(desired_goal)), axis=(- 1))
is_success = (d < distance_threshold).astype(np.float32)
info = {'is_success': is_success}
obs_dict['observation'] = observation
obs_dict['desired_goal'] = desired_goal
obs_dict['achieved_goal'] = achieved_goal
obs = obs_dict
return (obs, info)
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = ('%-8.3g' % (val,))
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
return ((s[:20] + '...') if (len(s) > 23) else s)
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = (kvs.keys() - self.keys)
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
Logger.CURRENT.logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
Logger.CURRENT.logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
"\n Write all of the diagnostics from the current iteration\n\n level: int. (see logger.py docs) If the global logger level is higher than\n the level argument here, don't print to stdout.\n "
Logger.CURRENT.dumpkvs()
|
def getkvs():
return Logger.CURRENT.name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
Logger.CURRENT.log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
Logger.CURRENT.set_level(level)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return Logger.CURRENT.get_dir()
|
class ProfileKV():
'\n Usage:\n with logger.ProfileKV("interesting_scope"):\n code\n '
def __init__(self, n):
self.n = ('wait_' + n)
def __enter__(self):
self.t1 = time.time()
def __exit__(self, type, value, traceback):
Logger.CURRENT.name2val[self.n] += (time.time() - self.t1)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if (val is None):
self.name2val[key] = None
return
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.level == DISABLED):
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def configure(dir=None, format_strs=None):
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if (rank > 0):
log_suffix = ('-rank%03i' % rank)
if (format_strs is None):
(strs, strs_mpi) = (os.getenv('OPENAI_LOG_FORMAT'), os.getenv('OPENAI_LOG_FORMAT_MPI'))
format_strs = (strs_mpi if (rank > 0) else strs)
if (format_strs is not None):
format_strs = format_strs.split(',')
else:
format_strs = (LOG_OUTPUT_FORMATS_MPI if (rank > 0) else LOG_OUTPUT_FORMATS)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log(('Logging to %s' % dir))
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
|
def _demo():
info('hi')
debug("shouldn't appear")
set_level(DEBUG)
debug('should appear')
dir = '/tmp/testlogging'
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv('a', 3)
logkv('b', 2.5)
dumpkvs()
logkv('b', (- 2.5))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see a = 5.5')
logkv_mean('b', (- 22.5))
logkv_mean('b', (- 44.4))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see b = 33.3')
logkv('b', (- 2.5))
dumpkvs()
logkv('a', 'longasslongasslongasslongasslongasslongassvalue')
dumpkvs()
|
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
|
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
|
def read_tb(path):
'\n path : a tensorboard file OR a directory, where we will find all TB files\n of the form events.*\n '
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, 'events.*'))
elif osp.basename(path).startswith('events.'):
fnames = [path]
else:
raise NotImplementedError(('Expected tensorboard file or directory containing them. Got %s' % path))
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if (summary.step > 0):
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[((step - 1), colidx)] = value
return pandas.DataFrame(data, columns=tags)
|
def rolling_window(a, window):
shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window) + 1), window))
strides = (a.strides + (a.strides[(- 1)],))
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=(- 1))
return (x[(window - 1):], yw_func)
|
def ts2xy(ts, xaxis):
if (xaxis == X_TIMESTEPS):
x = np.cumsum(ts.l.values)
y = ts.r.values
elif (xaxis == X_EPISODES):
x = np.arange(len(ts))
y = ts.r.values
elif (xaxis == X_WALLTIME):
x = (ts.t.values / 3600.0)
y = ts.r.values
else:
raise NotImplementedError
return (x, y)
|
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8, 2))
maxx = max((xy[0][(- 1)] for xy in xy_list))
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
(x, y_mean) = window_func(x, y, EPISODES_WINDOW, np.mean)
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel('Episode Rewards')
plt.tight_layout()
|
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[(ts.l.cumsum() <= num_timesteps)]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name)
|
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10000000.0))
parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)
parser.add_argument('--task_name', help='Title of plot', default='Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()
|
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, ratioInfl=1):
super(AlexNet, self).__init__()
self.ratioInfl = ratioInfl
self.activation_func = HardTanh_bin
self.channels = [3, int((96 * self.ratioInfl)), int((256 * self.ratioInfl)), int((384 * self.ratioInfl)), int((384 * self.ratioInfl)), 256]
self.features0 = nn.Sequential(nn.Conv2d(self.channels[0], self.channels[1], kernel_size=11, stride=4, padding=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(self.channels[1], momentum=None))
self.features1 = nn.Sequential(nn.LeakyReLU(inplace=True), BinarizeConv2d(self.channels[1], self.channels[2], kernel_size=5, padding=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(self.channels[2], momentum=None))
self.features2 = nn.Sequential(self.activation_func(), BinarizeConv2d(self.channels[2], self.channels[3], kernel_size=3, padding=1), nn.BatchNorm2d(self.channels[3], momentum=None))
self.features3 = nn.Sequential(self.activation_func(), BinarizeConv2d(self.channels[3], self.channels[4], kernel_size=3, padding=1), nn.BatchNorm2d(self.channels[4], momentum=None))
self.features4 = nn.Sequential(self.activation_func(), BinarizeConv2d(self.channels[4], self.channels[5], kernel_size=3, padding=1), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(self.channels[5], momentum=None))
self.features5 = nn.Sequential(self.activation_func())
self.neurons = [((self.channels[5] * 6) * 6), 4096, 4096, num_classes]
self.classifier0 = nn.Sequential(BinarizeLinear(self.neurons[0], self.neurons[1]), nn.BatchNorm1d(self.neurons[1], momentum=None))
self.classifier1 = nn.Sequential(self.activation_func(), BinarizeLinear(self.neurons[1], self.neurons[2]), nn.BatchNorm1d(self.neurons[2], momentum=None))
self.classifier2 = nn.Sequential(self.activation_func(), nn.Linear(self.neurons[2], self.neurons[3]), nn.BatchNorm1d(self.neurons[3], momentum=None), nn.LogSoftmax())
self.distrloss_layers = []
for i in range(2, 6):
self.distrloss_layers.append(Distrloss_layer(self.channels[i]))
for i in range(1, 3):
self.distrloss_layers.append(Distrloss_layer(self.neurons[i]))
self.regime = {0: {'optimizer': 'Adam', 'lr': 0.01}, 20: {'lr': 0.001}, 40: {'lr': 0.0001}, 50: {'lr': 1e-05}, 60: {'lr': 1e-06}, 64: {'lr': 0}}
self.input_transform = {'train': transforms.Compose([transforms.Scale(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip()]), 'eval': transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224)])}
def forward(self, x):
loss = []
x = self.features0(x)
x = self.features1(x)
loss.append(self.distrloss_layers[0](x))
x = self.features2(x)
loss.append(self.distrloss_layers[1](x))
x = self.features3(x)
loss.append(self.distrloss_layers[2](x))
x = self.features4(x)
loss.append(self.distrloss_layers[3](x))
x = self.features5(x)
x = x.view((- 1), ((256 * 6) * 6))
x = self.classifier0(x)
loss.append(self.distrloss_layers[4](x))
x = self.classifier1(x)
loss.append(self.distrloss_layers[5](x))
x = self.classifier2(x)
distrloss1 = (sum([ele[0] for ele in loss]) / len(loss))
distrloss2 = (sum([ele[1] for ele in loss]) / len(loss))
return (x, distrloss1.view(1, 1), distrloss2.view(1, 1))
|
def alexnet_binary_vs_xnor(**kwargs):
num_classes = getattr(kwargs, 'num_classes', 1000)
infl_ratio = 1.0
if ('infl_ratio' in kwargs):
infl_ratio = kwargs['infl_ratio']
return AlexNet(num_classes, infl_ratio)
|
class BinarizeF(Function):
@staticmethod
def forward(ctx, input):
return torch.sign(input)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
|
class HardTanh_bin(nn.Module):
def __init__(self):
super(HardTanh_bin, self).__init__()
self.hardtanh = nn.Hardtanh(inplace=False)
self.binarize = BinarizeF.apply
def forward(self, input):
output = self.hardtanh(input)
output = self.binarize(output)
return output
|
class BinarizeLinear(nn.Linear):
def __init__(self, *kargs, **kwargs):
super(BinarizeLinear, self).__init__(*kargs, **kwargs)
def forward(self, input):
if (not hasattr(self.weight, 'org')):
self.weight.org = self.weight.data.clone()
self.weight.data = self.weight.org.sign()
out = nn.functional.linear(input, self.weight)
if (not (self.bias is None)):
self.bias.org = self.bias.data.clone()
out = (out + self.bias.view(1, (- 1)).expand_as(out))
return out
|
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
def forward(self, input):
if (not hasattr(self.weight, 'org')):
self.weight.org = self.weight.data.clone()
self.weight.data = self.weight.org.sign()
out = nn.functional.conv2d(input, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
if (not (self.bias is None)):
self.bias.org = self.bias.data.clone()
out = (out + self.bias.view(1, (- 1), 1, 1).expand_as(out))
return out
|
class Distrloss_layer(nn.Module):
def __init__(self, channels):
super(Distrloss_layer, self).__init__()
self._channels = channels
def forward(self, input):
if ((input.dim() != 4) and (input.dim() != 2)):
raise ValueError('expected 4D or 2D input (got {}D input)'.format(input.dim()))
if (input.size()[1] != self._channels):
raise ValueError('expected {} channels (got {}D input)'.format(self._channels, input.size()[1]))
if (input.dim() == 4):
mean = input.mean(dim=(- 1)).mean(dim=(- 1)).mean(dim=0)
var = ((input - mean.unsqueeze(0).unsqueeze(2).unsqueeze(3)) ** 2).mean(dim=(- 1)).mean(dim=(- 1)).mean(dim=0)
elif (input.dim() == 2):
mean = input.mean(dim=0)
var = ((input - mean.unsqueeze(0)) ** 2).mean(dim=0)
var = (var + 1e-10)
std = var.abs().sqrt()
distrloss1 = ((torch.min(((2 - mean) - std), ((2 + mean) - std)).clamp(min=0) ** 2).mean() + ((std - 4).clamp(min=0) ** 2).mean())
distrloss2 = ((mean ** 2) - (std ** 2)).clamp(min=0).mean()
return [distrloss1, distrloss2]
|
def setup_logging(log_file='log.txt'):
'Setup logging configuration\n '
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filename=log_file, filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
|
def save_checkpoint(state, is_best, path='.', filename='checkpoint.pth.tar', save_all=False):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
if save_all:
shutil.copyfile(filename, os.path.join(path, ('checkpoint_epoch_%s.pth.tar' % state['epoch'])))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def adjust_optimizer(optimizer, epoch, config):
'Reconfigures the optimizer according to epoch and config dict'
def modify_optimizer(optimizer, setting):
if ('optimizer' in setting):
optimizer = __optimizers[setting['optimizer']](optimizer.param_groups)
logging.debug(('OPTIMIZER - setting method = %s' % setting['optimizer']))
for param_group in optimizer.param_groups:
for key in param_group.keys():
if (key in setting):
logging.debug(('OPTIMIZER - setting %s = %s' % (key, setting[key])))
param_group[key] = setting[key]
return optimizer
if callable(config):
optimizer = modify_optimizer(optimizer, config(epoch))
else:
for e in range((epoch + 1)):
if (e in config):
optimizer = modify_optimizer(optimizer, config[e])
return optimizer
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.float().topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
'\n Demmel p 312\n '
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = '%10i %10.3g %10.3g'
titlestr = '%10s %10s %10s'
if verbose:
print((titlestr % ('iter', 'residual norm', 'soln norm')))
for i in range(cg_iters):
if (callback is not None):
callback(x)
if verbose:
print((fmtstr % (i, rdotr, np.linalg.norm(x))))
z = f_Ax(p)
v = (rdotr / p.dot(z))
x += (v * p)
r -= (v * z)
newrdotr = r.dot(r)
mu = (newrdotr / rdotr)
p = (r + (mu * p))
rdotr = newrdotr
if (rdotr < residual_tol):
break
if (callback is not None):
callback(x)
if verbose:
print((fmtstr % ((i + 1), rdotr, np.linalg.norm(x))))
return x
|
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
'\n Create a wrapped, monitored SubprocVecEnv for Atari.\n '
if (wrapper_kwargs is None):
wrapper_kwargs = {}
def make_env(rank):
def _thunk():
env = make_atari(env_id)
env.seed((seed + rank))
env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))))
return wrap_deepmind(env, **wrapper_kwargs)
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env((i + start_index)) for i in range(num_env)])
|
def make_mujoco_env(env_id, seed):
'\n Create a wrapped, monitored gym.Env for MuJoCo.\n '
set_global_seeds(seed)
env = gym.make(env_id)
env = Monitor(env, logger.get_dir())
env.seed(seed)
return env
|
def make_robotics_env(env_id, seed, rank=0):
'\n Create a wrapped, monitored gym.Env for MuJoCo.\n '
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))), info_keywords=('is_success',))
env.seed(seed)
return env
|
def arg_parser():
'\n Create an empty argparse.ArgumentParser.\n '
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
def atari_arg_parser():
'\n Create an argparse.ArgumentParser for run_atari.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10000000.0))
return parser
|
def mujoco_arg_parser():
'\n Create an argparse.ArgumentParser for run_mujoco.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def robotics_arg_parser():
'\n Create an argparse.ArgumentParser for run_mujoco.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def fmt_row(width, row, header=False):
out = ' | '.join((fmt_item(x, width) for x in row))
if header:
out = ((out + '\n') + ('-' * len(out)))
return out
|
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert (x.ndim == 0)
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (((v < 0.0001) or (v > 10000.0)) and (v > 0)):
rep = ('%7.2e' % x)
else:
rep = ('%7.5f' % x)
else:
rep = str(x)
return ((' ' * (l - len(rep))) + rep)
|
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
|
@contextmanager
def timed(msg):
global MESSAGE_DEPTH
print(colorize(((('\t' * MESSAGE_DEPTH) + '=: ') + msg), color='magenta'))
tstart = time.time()
MESSAGE_DEPTH += 1
(yield)
MESSAGE_DEPTH -= 1
print(colorize((('\t' * MESSAGE_DEPTH) + ('done in %.3f seconds' % (time.time() - tstart))), color='magenta'))
|
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if ((self._next_id >= self.n) and self.enable_shuffle):
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, (self.n - self._next_id))
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:(cur_id + cur_batch_size)]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle:
self.shuffle()
while (self._next_id <= (self.n - batch_size)):
(yield self.next_batch(batch_size))
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
|
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert ((num_batches is None) != (batch_size is None)), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(((a.shape[0] == n) for a in arrays[1:]))
inds = np.arange(n)
if shuffle:
np.random.shuffle(inds)
sections = (np.arange(0, n, batch_size)[1:] if (num_batches is None) else num_batches)
for batch_inds in np.array_split(inds, sections):
if (include_final_partial_batch or (len(batch_inds) == batch_size)):
(yield tuple((a[batch_inds] for a in arrays)))
|
class Filter(object):
def __call__(self, x, update=True):
raise NotImplementedError
def reset(self):
pass
|
class IdentityFilter(Filter):
def __call__(self, x, update=True):
return x
|
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out
|
class ZFilter(Filter):
'\n y = (x-mean)/std\n using running estimates of mean,std\n '
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update:
self.rs.push(x)
if self.demean:
x = (x - self.rs.mean)
if self.destd:
x = (x / (self.rs.std + 1e-08))
if self.clip:
x = np.clip(x, (- self.clip), self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
|
class AddClock(Filter):
def __init__(self):
self.count = 0
def reset(self):
self.count = 0
def __call__(self, x, update=True):
return np.append(x, (self.count / 100.0))
def output_shape(self, input_space):
return ((input_space.shape[0] + 1),)
|
class FlattenFilter(Filter):
def __call__(self, x, update=True):
return x.ravel()
def output_shape(self, input_space):
return (int(np.prod(input_space.shape)),)
|
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,)
|
class DivFilter(Filter):
def __init__(self, divisor):
self.divisor = divisor
def __call__(self, x, update=True):
return (x / self.divisor)
def output_shape(self, input_space):
return input_space.shape
|
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while (len(self.stack) < self.stack.maxlen):
self.stack.append(x)
return np.concatenate(self.stack, axis=(- 1))
def output_shape(self, input_space):
return (input_space.shape[:(- 1)] + ((input_space.shape[(- 1)] * self.stack.maxlen),))
|
def discount(x, gamma):
'\n computes discounted sums along 0th dimension of x.\n\n inputs\n ------\n x: ndarray\n gamma: float\n\n outputs\n -------\n y: ndarray with same shape as x, satisfying\n\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n '
assert (x.ndim >= 1)
return scipy.signal.lfilter([1], [1, (- gamma)], x[::(- 1)], axis=0)[::(- 1)]
|
def explained_variance(ypred, y):
'\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n '
assert ((y.ndim == 1) and (ypred.ndim == 1))
vary = np.var(y)
return (np.nan if (vary == 0) else (1 - (np.var((y - ypred)) / vary)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.