code stringlengths 17 6.64M |
|---|
class Box(Space):
'\n A box in R^n.\n I.e., each coordinate is bounded.\n '
def __init__(self, low, high, shape=None):
'\n Two kinds of valid input:\n Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided\n Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape\n '
if (shape is None):
assert (low.shape == high.shape)
self.low = low
self.high = high
else:
assert (np.isscalar(low) and np.isscalar(high))
self.low = (low + np.zeros(shape))
self.high = (high + np.zeros(shape))
def sample(self):
return np.random.uniform(low=self.low, high=self.high, size=self.low.shape)
def contains(self, x):
return ((x.shape == self.shape) and (x >= self.low).all() and (x <= self.high).all())
@property
def shape(self):
return self.low.shape
@property
def flat_dim(self):
return np.prod(self.low.shape)
@property
def bounds(self):
return (self.low, self.high)
def flatten(self, x):
return np.asarray(x).flatten()
def unflatten(self, x):
return np.asarray(x).reshape(self.shape)
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], (- 1)))
def unflatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape(((xs.shape[0],) + self.shape))
def __repr__(self):
return ('Box' + str(self.shape))
def __eq__(self, other):
return (isinstance(other, Box) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high))
def __hash__(self):
return hash((self.low, self.high))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype=theano.config.floatX)
|
class Discrete(Space):
'\n {0,1,...,n-1}\n '
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def contains(self, x):
x = np.asarray(x)
return ((x.shape == ()) and (x.dtype.kind == 'i') and (x >= 0) and (x < self.n))
def __repr__(self):
return ('Discrete(%d)' % self.n)
def __eq__(self, other):
return (self.n == other.n)
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
@property
def default_value(self):
return 0
def new_tensor_variable(self, name, extra_dims):
if (self.n <= (2 ** 8)):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint8')
elif (self.n <= (2 ** 16)):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint16')
else:
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint32')
def __eq__(self, other):
if (not isinstance(other, Discrete)):
return False
return (self.n == other.n)
def __hash__(self):
return hash(self.n)
|
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert (len(components) == 1)
components = components[0]
self._components = tuple(components)
dtypes = [c.new_tensor_variable('tmp', extra_dims=0).dtype for c in components]
if ((len(dtypes) > 0) and hasattr(dtypes[0], 'as_numpy_dtype')):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple((x.sample() for x in self._components))
@property
def components(self):
return self._components
def contains(self, x):
return (isinstance(x, tuple) and all((c.contains(xi) for (c, xi) in zip(self._components, x))))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype=self._common_dtype)
@property
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate([c.flatten(xi) for (c, xi) in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [c.flatten_n(xi) for (c, xi) in zip(self.components, xs_regrouped)]
return np.concatenate(flat_regrouped, axis=(- 1))
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:(- 1)])
return tuple((c.unflatten(xi) for (c, xi) in zip(self._components, flat_xs)))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:(- 1)], axis=(- 1))
unflat_xs = [c.unflatten_n(xi) for (c, xi) in zip(self.components, flat_xs)]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if (not isinstance(other, Product)):
return False
return (tuple(self.components) == tuple(other.components))
def __hash__(self):
return hash(tuple(self.components))
|
def unique(l):
return list(set(l))
|
def flatten(l):
return [item for sublist in l for item in sublist]
|
def load_progress(progress_csv_path):
print(('Reading %s' % progress_csv_path))
entries = dict()
with open(progress_csv_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (k, v) in row.items():
if (k not in entries):
entries[k] = []
try:
entries[k].append(float(v))
except:
entries[k].append(0.0)
entries = dict([(k, np.array(v)) for (k, v) in entries.items()])
return entries
|
def to_json(stub_object):
from rllab.misc.instrument import StubObject
from rllab.misc.instrument import StubAttr
if isinstance(stub_object, StubObject):
assert (len(stub_object.args) == 0)
data = dict()
for (k, v) in stub_object.kwargs.items():
data[k] = to_json(v)
data['_name'] = ((stub_object.proxy_class.__module__ + '.') + stub_object.proxy_class.__name__)
return data
elif isinstance(stub_object, StubAttr):
return dict(obj=to_json(stub_object.obj), attr=to_json(stub_object.attr_name))
return stub_object
|
def flatten_dict(d):
flat_params = dict()
for (k, v) in d.items():
if isinstance(v, dict):
v = flatten_dict(v)
for (subk, subv) in flatten_dict(v).items():
flat_params[((k + '.') + subk)] = subv
else:
flat_params[k] = v
return flat_params
|
def load_params(params_json_path):
with open(params_json_path, 'r') as f:
data = json.loads(f.read())
if ('args_data' in data):
del data['args_data']
if ('exp_name' not in data):
data['exp_name'] = params_json_path.split('/')[(- 2)]
return data
|
def lookup(d, keys):
if (not isinstance(keys, list)):
keys = keys.split('.')
for k in keys:
if hasattr(d, '__getitem__'):
if (k in d):
d = d[k]
else:
return None
else:
return None
return d
|
def load_exps_data(exp_folder_paths, disable_variant=False):
exps = []
for exp_folder_path in exp_folder_paths:
exps += [x[0] for x in os.walk(exp_folder_path)]
exps_data = []
for exp in exps:
try:
exp_path = exp
params_json_path = os.path.join(exp_path, 'params.json')
variant_json_path = os.path.join(exp_path, 'variant.json')
progress_csv_path = os.path.join(exp_path, 'progress.csv')
progress = load_progress(progress_csv_path)
if disable_variant:
params = load_params(params_json_path)
else:
try:
params = load_params(variant_json_path)
except IOError:
params = load_params(params_json_path)
exps_data.append(ext.AttrDict(progress=progress, params=params, flat_params=flatten_dict(params)))
except IOError as e:
print(e)
return exps_data
|
def smart_repr(x):
if isinstance(x, tuple):
if (len(x) == 0):
return 'tuple()'
elif (len(x) == 1):
return ('(%s,)' % smart_repr(x[0]))
else:
return (('(' + ','.join(map(smart_repr, x))) + ')')
elif hasattr(x, '__call__'):
return ("__import__('pydoc').locate('%s')" % ((x.__module__ + '.') + x.__name__))
else:
return repr(x)
|
def extract_distinct_params(exps_data, excluded_params=('exp_name', 'seed', 'log_dir'), l=1):
try:
stringified_pairs = sorted(map(eval, unique(flatten([list(map(smart_repr, list(d.flat_params.items()))) for d in exps_data]))), key=(lambda x: (tuple(((0.0 if (it is None) else it) for it in x)),)))
except Exception as e:
print(e)
import ipdb
ipdb.set_trace()
proposals = [(k, [x[1] for x in v]) for (k, v) in itertools.groupby(stringified_pairs, (lambda x: x[0]))]
filtered = [(k, v) for (k, v) in proposals if ((len(v) > l) and all([(k.find(excluded_param) != 0) for excluded_param in excluded_params]))]
return filtered
|
class Selector(object):
def __init__(self, exps_data, filters=None, custom_filters=None):
self._exps_data = exps_data
if (filters is None):
self._filters = tuple()
else:
self._filters = tuple(filters)
if (custom_filters is None):
self._custom_filters = []
else:
self._custom_filters = custom_filters
def where(self, k, v):
return Selector(self._exps_data, (self._filters + ((k, v),)), self._custom_filters)
def custom_filter(self, filter):
return Selector(self._exps_data, self._filters, (self._custom_filters + [filter]))
def _check_exp(self, exp):
return (all((((str(exp.flat_params.get(k, None)) == str(v)) or (k not in exp.flat_params)) for (k, v) in self._filters)) and all((custom_filter(exp) for custom_filter in self._custom_filters)))
def extract(self):
return list(filter(self._check_exp, self._exps_data))
def iextract(self):
return filter(self._check_exp, self._exps_data)
|
def hex_to_rgb(hex, opacity=1.0):
if (hex[0] == '#'):
hex = hex[1:]
assert (len(hex) == 6)
return 'rgba({0},{1},{2},{3})'.format(int(hex[:2], 16), int(hex[2:4], 16), int(hex[4:6], 16), opacity)
|
class BatchPolopt(RLAlgorithm):
'\n Base class for batch sampling-based policy optimization methods.\n This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.\n '
def __init__(self, env, policy, baseline, scope=None, n_itr=500, start_itr=0, batch_size=5000, max_path_length=500, discount=0.99, gae_lambda=1, plot=False, pause_for_plot=False, center_adv=True, positive_adv=False, store_paths=False, whole_paths=True, fixed_horizon=False, sampler_cls=None, sampler_args=None, force_batch_sampler=False, load_policy=None, reset_arg=None, latent_dim=4, num_total_tasks=10, noise_opt=False, joint_opt=False, improve=False, **kwargs):
'\n :param env: Environment\n :param policy: Policy\n :type policy: Policy\n :param baseline: Baseline\n :param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms\n simultaneously, each using different environments and policies\n :param n_itr: Number of iterations.\n :param start_itr: Starting iteration.\n :param batch_size: Number of samples per iteration.\n :param max_path_length: Maximum length of a single rollout.\n :param discount: Discount.\n :param gae_lambda: Lambda used for generalized advantage estimation.\n :param plot: Plot evaluation run after each iteration.\n :param pause_for_plot: Whether to pause before contiuing when plotting.\n :param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.\n :param positive_adv: Whether to shift the advantages so that they are always positive. When used in\n conjunction with center_adv the advantages will be standardized before shifting.\n :param store_paths: Whether to save all paths data to the snapshot.\n :return:\n '
self.env = env
self.noise_opt = noise_opt
self.joint_opt = joint_opt
self.latent_dim = latent_dim
self.num_total_tasks = num_total_tasks
self.policy = policy
self.load_policy = load_policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
self.improve = improve
if (sampler_cls is None):
sampler_cls = BatchSampler
if (sampler_args is None):
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.reset_arg = reset_arg
self.latent_dist = DiagonalGaussian(self.latent_dim)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr, reset_args=self.reset_arg)
def process_samples(self, itr, paths, noise_opt=False, joint_opt=False):
return self.sampler.process_samples(itr, paths, task_idx=0, noise_opt=noise_opt, joint_opt=joint_opt)
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if (self.load_policy is not None):
import joblib
loaded = joblib.load(self.load_policy)
self.policy = loaded['policy']
self.baseline = loaded['baseline']
self.init_opt()
uninit_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninit_vars.append(var)
sess.run(tf.initialize_variables(uninit_vars))
self.start_worker()
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix(('itr #%d | ' % itr)):
logger.log('Obtaining samples...')
paths = self.obtain_samples(itr)
logger.log('Processing samples...')
(samples_data, samples_data_latent) = self.process_samples(itr, paths, noise_opt=self.noise_opt, joint_opt=True)
logger.log('Logging diagnostics...')
self.log_diagnostics(paths)
logger.log('Optimizing policy...')
if self.improve:
self.optimize_policy(itr, samples_data)
elif self.joint_opt:
self.optimize_policy(itr, samples_data, samples_data_latent)
else:
self.optimize_policy(itr, samples_data_latent)
logger.log('Saving snapshot...')
if (self.improve or self.joint_opt):
params = self.get_itr_snapshot(itr, samples_data)
else:
params = self.get_itr_snapshot(itr, samples_data_latent)
if self.store_paths:
params['paths'] = samples_data['paths']
logger.save_itr_params(itr, params)
logger.log('Saved')
logger.record_tabular('Time', (time.time() - start_time))
logger.record_tabular('ItrTime', (time.time() - itr_start_time))
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input('Plotting evaluation run: Press Enter to continue...')
self.shutdown_worker()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
'\n Initialize the optimization procedure. If using tensorflow, this may\n include declaring all the variables and compiling functions\n '
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
'\n Returns all the data that should be saved in the snapshot for this\n iteration.\n '
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
|
class BatchPolopt(RLAlgorithm):
'\n Base class for batch sampling-based policy optimization methods.\n This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.\n '
def __init__(self, env, policy, baseline, scope=None, n_itr=500, start_itr=0, batch_size=5000, max_path_length=500, discount=0.99, gae_lambda=1, plot=False, pause_for_plot=False, center_adv=True, positive_adv=False, store_paths=False, whole_paths=True, fixed_horizon=False, sampler_cls=None, sampler_args=None, force_batch_sampler=False, load_policy=None, reset_arg=None, latent_dim=4, num_total_tasks=10, noise_opt=False, joint_opt=False, **kwargs):
'\n :param env: Environment\n :param policy: Policy\n :type policy: Policy\n :param baseline: Baseline\n :param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms\n simultaneously, each using different environments and policies\n :param n_itr: Number of iterations.\n :param start_itr: Starting iteration.\n :param batch_size: Number of samples per iteration.\n :param max_path_length: Maximum length of a single rollout.\n :param discount: Discount.\n :param gae_lambda: Lambda used for generalized advantage estimation.\n :param plot: Plot evaluation run after each iteration.\n :param pause_for_plot: Whether to pause before contiuing when plotting.\n :param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.\n :param positive_adv: Whether to shift the advantages so that they are always positive. When used in\n conjunction with center_adv the advantages will be standardized before shifting.\n :param store_paths: Whether to save all paths data to the snapshot.\n :return:\n '
self.env = env
self.noise_opt = noise_opt
self.joint_opt = joint_opt
self.latent_dim = latent_dim
self.num_total_tasks = num_total_tasks
self.policy = policy
self.load_policy = load_policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
if (sampler_cls is None):
sampler_cls = BatchSampler
if (sampler_args is None):
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.reset_arg = reset_arg
self.latent_dist = DiagonalGaussian(self.latent_dim)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr, reset_args=self.reset_arg)
def process_samples(self, itr, paths, noise_opt=False, joint_opt=False):
return self.sampler.process_samples(itr, paths, task_idx=0, noise_opt=noise_opt, joint_opt=joint_opt)
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if (self.load_policy is not None):
import joblib
loaded = joblib.load(self.load_policy)
self.policy = loaded['policy']
self.baseline = loaded['baseline']
self.init_opt()
uninit_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninit_vars.append(var)
sess.run(tf.initialize_variables(uninit_vars))
self.start_worker()
start_time = time.time()
assign_op_mean = self.policy.all_params['latent_means'].assign(np.zeros((self.num_total_tasks, self.latent_dim)))
assign_op_std = self.policy.all_params['latent_stds'].assign(np.zeros((self.num_total_tasks, self.latent_dim)))
sess.run(assign_op_mean)
sess.run(assign_op_std)
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix(('itr #%d | ' % itr)):
logger.log('Obtaining samples...')
paths = self.obtain_samples(itr)
logger.log('Processing samples...')
(samples_data, samples_data_latent) = self.process_samples(itr, paths, noise_opt=self.noise_opt, joint_opt=True)
logger.log('Logging diagnostics...')
self.log_diagnostics(paths)
logger.log('Optimizing policy...')
if self.joint_opt:
self.optimize_policy(itr, samples_data, samples_data_latent)
else:
self.optimize_policy(itr, samples_data_latent)
logger.log('Saving snapshot...')
if self.joint_opt:
params = self.get_itr_snapshot(itr, samples_data)
else:
params = self.get_itr_snapshot(itr, samples_data_latent)
if self.store_paths:
params['paths'] = samples_data['paths']
logger.save_itr_params(itr, params)
logger.log('Saved')
logger.record_tabular('Time', (time.time() - start_time))
logger.record_tabular('ItrTime', (time.time() - itr_start_time))
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input('Plotting evaluation run: Press Enter to continue...')
self.shutdown_worker()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
'\n Initialize the optimization procedure. If using tensorflow, this may\n include declaring all the variables and compiling functions\n '
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
'\n Returns all the data that should be saved in the snapshot for this\n iteration.\n '
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
|
class MAESN_NPO(BatchMAESNPolopt):
'\n Natural Policy Optimization.\n '
def __init__(self, optimizer=None, optimizer_args=None, step_size=0.01, use_maml=True, **kwargs):
assert (optimizer is not None)
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
if (not use_maml):
default_args = dict(batch_size=None, max_epochs=1)
optimizer = FirstOrderOptimizer(**default_args)
self.optimizer = optimizer
self.step_size = step_size
self.use_maml = use_maml
self.kl_constrain_step = (- 1)
super(MAESN_NPO, self).__init__(**kwargs)
def make_vars(self, stepnum='0'):
(obs_vars, action_vars, adv_vars, noise_vars, task_idx_vars) = ([], [], [], [], [])
for i in range(self.meta_batch_size):
obs_vars.append(self.env.observation_space.new_tensor_variable(((('obs' + stepnum) + '_') + str(i)), extra_dims=1))
action_vars.append(self.env.action_space.new_tensor_variable(((('action' + stepnum) + '_') + str(i)), extra_dims=1))
adv_vars.append(tensor_utils.new_tensor(name=((('advantage' + stepnum) + '_') + str(i)), ndim=1, dtype=tf.float32))
noise_vars.append(tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name=((('noise' + stepnum) + '_') + str(i))))
task_idx_vars.append(tensor_utils.new_tensor(name=((('task_idx' + stepnum) + '_') + str(i)), ndim=1, dtype=tf.int32))
return (obs_vars, action_vars, adv_vars, noise_vars, task_idx_vars)
def make_vars_latent(self, stepnum='0'):
(adv_vars, z_vars, task_idx_vars) = ([], [], [])
for i in range(self.meta_batch_size):
adv_vars.append(tensor_utils.new_tensor(name=((('advantage_latent' + stepnum) + '_') + str(i)), ndim=1, dtype=tf.float32))
z_vars.append(tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name=((('zs_latent' + stepnum) + '_') + str(i))))
task_idx_vars.append(tensor_utils.new_tensor(name=((('task_idx_latents' + stepnum) + '_') + str(i)), ndim=1, dtype=tf.int32))
return (adv_vars, z_vars, task_idx_vars)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
assert (not is_recurrent)
dist = self.policy.distribution
self.kl_weighting_ph = tf.placeholder(dtype=tf.float32, shape=[1], name='kl_weighting_ph')
(old_dist_info_vars, old_dist_info_vars_list) = ([], [])
for i in range(self.meta_batch_size):
old_dist_info_vars.append({k: tf.placeholder(tf.float32, shape=([None] + list(shape)), name=('old_%s_%s' % (i, k))) for (k, shape) in dist.dist_info_specs})
old_dist_info_vars_list += [old_dist_info_vars[i][k] for k in dist.dist_info_keys]
(state_info_vars, state_info_vars_list) = ({}, [])
(all_surr_objs, input_list) = ([], [])
all_surr_objs_latent = []
new_params = None
new_params_latent = None
for j in range(self.num_grad_updates):
(obs_vars, action_vars, adv_vars, noise_vars, task_idx_vars) = self.make_vars(str(j))
(adv_vars_latent, z_vars_latent, task_idx_vars_latent) = self.make_vars_latent(str(j))
surr_objs = []
surr_objs_latent = []
cur_params = new_params
new_params = []
new_params_latent = []
kls = []
for i in range(self.meta_batch_size):
(dist_info_vars, params_temp) = self.policy.dist_info_sym(obs_vars[i], task_idx_vars[i], noise_vars[i], state_info_vars, all_params=self.policy.all_params)
params = OrderedDict()
for param_key in params_temp.keys():
if ('latent' not in param_key):
params[param_key] = params_temp[param_key]
means = tf.gather(self.policy.all_params['latent_means'], task_idx_vars_latent[i])
log_stds = tf.gather(self.policy.all_params['latent_stds'], task_idx_vars_latent[i])
dist_info_vars_latent = {'mean': means, 'log_std': log_stds}
params_latent = OrderedDict()
params_latent['latent_means'] = self.policy.all_params['latent_means']
params_latent['latent_stds'] = self.policy.all_params['latent_stds']
new_params.append(params)
new_params_latent.append(params_latent)
logli = dist.log_likelihood_sym(action_vars[i], dist_info_vars)
logli_latent = self.latent_dist.log_likelihood_sym(z_vars_latent[i], dist_info_vars_latent)
surr_objs.append((- tf.reduce_mean((logli * adv_vars[i]))))
surr_objs_latent.append((- tf.reduce_mean((logli_latent * adv_vars_latent[i]))))
input_list += (((((obs_vars + action_vars) + adv_vars) + noise_vars) + task_idx_vars) + state_info_vars_list)
input_list += ((adv_vars_latent + z_vars_latent) + task_idx_vars_latent)
if (j == 0):
self.policy.set_init_surr_obj(input_list, surr_objs, surr_objs_latent)
init_input_list = input_list
all_surr_objs.append(surr_objs)
all_surr_objs_latent.append(surr_objs_latent)
(obs_vars, action_vars, adv_vars, noise_vars, task_idx_vars) = self.make_vars('test')
surr_objs = []
for i in range(self.meta_batch_size):
(dist_info_vars, _) = self.policy.updated_dist_info_sym(i, all_surr_objs[(- 1)][i], all_surr_objs_latent[(- 1)][i], obs_vars[i], task_idx_vars[i], noise_vars[i], params_dict=new_params[i], params_dict_latent=new_params_latent[i])
if (self.kl_constrain_step == (- 1)):
kl = dist.kl_sym(old_dist_info_vars[i], dist_info_vars)
kls.append(kl)
lr = dist.likelihood_ratio_sym(action_vars[i], old_dist_info_vars[i], dist_info_vars)
curr_obj = (- tf.reduce_mean((lr * adv_vars[i])))
curr_mean = tf.gather(self.policy.all_params['latent_means'], task_idx_vars[i])
curr_logstd = tf.gather(self.policy.all_params['latent_stds'], task_idx_vars[i])
curr_latent_dist = {'mean': curr_mean, 'log_std': curr_logstd}
unit_gaussian_dist = {'mean': tf.zeros_like(curr_mean), 'log_std': tf.zeros_like(curr_logstd)}
kl_regularization = tf.reduce_mean(self.latent_dist.kl_sym(curr_latent_dist, unit_gaussian_dist))
curr_obj += (self.kl_weighting_ph[0] * kl_regularization)
surr_objs.append(curr_obj)
if self.use_maml:
surr_obj = tf.reduce_mean(tf.stack(surr_objs, 0))
input_list += (((((obs_vars + action_vars) + adv_vars) + noise_vars) + task_idx_vars) + old_dist_info_vars_list)
else:
surr_obj = tf.reduce_mean(tf.stack(all_surr_objs[0], 0))
input_list = init_input_list
input_list += [self.kl_weighting_ph]
if self.use_maml:
mean_kl = tf.reduce_mean(tf.concat(kls, 0))
max_kl = tf.reduce_max(tf.concat(kls, 0))
self.optimizer.update_opt(loss=surr_obj, target=self.policy, leq_constraint=(mean_kl, self.step_size), inputs=input_list, constraint_name='mean_kl')
else:
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
return dict()
@overrides
def optimize_policy(self, itr, all_samples_data, all_samples_data_latent):
assert (len(all_samples_data) == (self.num_grad_updates + 1))
sess = tf.get_default_session()
if (not self.use_maml):
all_samples_data = [all_samples_data[0]]
input_list = []
for step in range(len(all_samples_data)):
(obs_list, action_list, adv_list, noise_list, task_idx_list) = ([], [], [], [], [])
for i in range(self.meta_batch_size):
inputs = ext.extract(all_samples_data[step][i], 'observations', 'actions', 'advantages', 'noises', 'task_idxs')
obs_list.append(inputs[0])
action_list.append(inputs[1])
adv_list.append(inputs[2])
noise_list.append(inputs[3])
task_idx_list.append(inputs[4])
input_list += ((((obs_list + action_list) + adv_list) + noise_list) + task_idx_list)
if (step == 0):
(adv_list_latent, z_list_latent, task_idx_list_latent) = ([], [], [])
for i in range(self.meta_batch_size):
inputs = ext.extract(all_samples_data_latent[step][i], 'advantages', 'noises', 'task_idxs')
means = tf.gather(self.policy.all_params['latent_means'], inputs[(- 1)])
stds = tf.gather(self.policy.all_params['latent_stds'], inputs[(- 1)])
zs = sess.run((means + (inputs[(- 2)] * tf.exp(stds))))
adv_list_latent.append(inputs[0])
z_list_latent.append(zs)
task_idx_list_latent.append(inputs[2])
input_list += ((adv_list_latent + z_list_latent) + task_idx_list_latent)
if (step == 0):
init_inputs = input_list
if self.use_maml:
dist_info_list = []
for i in range(self.meta_batch_size):
agent_infos = all_samples_data[self.kl_constrain_step][i]['agent_infos']
dist_info_list += [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
input_list += tuple(dist_info_list)
logger.log('Computing KL before')
mean_kl_before = self.optimizer.constraint_val(input_list)
if (self.kl_scheme is None):
curr_kl_weighting = self.kl_weighting
elif (self.kl_scheme == '0.01step4to0.05'):
curr_kl_weighting = min(0.05, (0.01 + ((itr // 10) * 0.01)))
elif (self.kl_scheme == '0.01step8to0.1'):
curr_kl_weighting = min(0.1, (0.01 + ((itr // 5) * 0.001)))
elif (self.kl_scheme == '0.01step8to0.05'):
curr_kl_weighting = min(0.05, (0.01 + ((itr // 5) * 0.0005)))
elif (self.kl_scheme == '0.01step8to0.2'):
curr_kl_weighting = min(0.2, (0.01 + ((itr // 5) * 0.002)))
elif (self.kl_scheme == '0.002step100to0.1'):
curr_kl_weighting = min(0.1, (0.002 + ((itr // 5) * 0.001)))
elif (self.kl_scheme == '0.002step100to0.02'):
curr_kl_weighting = min(0.02, (0.002 + ((itr // 5) * 0.0002)))
elif (self.kl_scheme == '0.002step100to0.05'):
curr_kl_weighting = min(0.05, (0.002 + ((itr // 5) * 0.0005)))
elif (self.kl_scheme == '0.01stepcontto0.05'):
curr_kl_weighting = min(0.05, (0.01 + (itr * 0.001)))
elif (self.kl_scheme == '0.01stepcontto0.1'):
curr_kl_weighting = min(0.1, (0.01 + (itr * 0.001)))
elif (self.kl_scheme == '0.01step8to0.3'):
curr_kl_weighting = min(0.3, (0.01 + (itr * 0.003)))
else:
print('ERROR')
import IPython
IPython.embed()
input_list += ([curr_kl_weighting],)
logger.log('Computing loss before')
loss_before = self.optimizer.loss(input_list)
logger.log('Optimizing')
self.optimizer.optimize(input_list)
logger.log('Computing loss after')
loss_after = self.optimizer.loss(input_list)
if self.use_maml:
logger.log('Computing KL after')
mean_kl = self.optimizer.constraint_val(input_list)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('dLoss', (loss_before - loss_after))
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class MAESN_TRPO(MAESN_NPO):
'\n Trust Region Policy Optimization\n '
def __init__(self, optimizer=None, optimizer_args=None, **kwargs):
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(MAESN_TRPO, self).__init__(optimizer=optimizer, **kwargs)
|
class NPO(BatchPolopt):
'\n Natural Policy Optimization.\n '
def __init__(self, optimizer=None, optimizer_args=None, step_size=0.01, **kwargs):
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
self.optimizer = optimizer
self.step_size = step_size
super(NPO, self).__init__(**kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
advantage_var = tensor_utils.new_tensor('advantage', ndim=(1 + is_recurrent), dtype=tf.float32)
noise_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='noise')
task_idx_var = tensor_utils.new_tensor(name='task_idx', ndim=1, dtype=tf.int32)
dist = self.policy.distribution
old_dist_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=('old_%s' % k)) for (k, shape) in dist.dist_info_specs}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=k) for (k, shape) in self.policy.state_info_specs}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name='valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, task_idx_var, noise_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if is_recurrent:
mean_kl = (tf.reduce_sum((kl * valid_var)) / tf.reduce_sum(valid_var))
surr_loss = ((- tf.reduce_sum(((lr * advantage_var) * valid_var))) / tf.reduce_sum(valid_var))
else:
mean_kl = tf.reduce_mean(kl)
surr_loss = (- tf.reduce_mean((lr * advantage_var)))
input_list = (([obs_var, action_var, advantage_var, noise_var, task_idx_var] + state_info_vars_list) + old_dist_info_vars_list)
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(loss=surr_loss, target=self.policy, leq_constraint=(mean_kl, self.step_size), inputs=input_list, constraint_name='mean_kl')
return dict()
@overrides
def optimize_policy(self, itr, samples_data):
all_input_values = tuple(ext.extract(samples_data, 'observations', 'actions', 'advantages', 'noises', 'task_idxs'))
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
all_input_values += (tuple(state_info_list) + tuple(dist_info_list))
if self.policy.recurrent:
all_input_values += (samples_data['valids'],)
logger.log('Computing loss before')
loss_before = self.optimizer.loss(all_input_values)
logger.log('Computing KL before')
mean_kl_before = self.optimizer.constraint_val(all_input_values)
logger.log('Optimizing')
self.optimizer.optimize(all_input_values)
logger.log('Computing KL after')
mean_kl = self.optimizer.constraint_val(all_input_values)
logger.log('Computing loss after')
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', (loss_before - loss_after))
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class TRPO(NPO):
'\n Trust Region Policy Optimization\n '
def __init__(self, optimizer=None, optimizer_args=None, **kwargs):
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TRPO, self).__init__(optimizer=optimizer, **kwargs)
|
class VPG(BatchPolopt, Serializable):
'\n Vanilla Policy Gradient.\n '
def __init__(self, env, policy, baseline, optimizer=None, optimizer_args=None, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
default_args = dict(batch_size=None, max_epochs=1)
if (optimizer_args is None):
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=(1 + is_recurrent))
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=(1 + is_recurrent))
advantage_var = tensor_utils.new_tensor(name='advantage', ndim=(1 + is_recurrent), dtype=tf.float32)
noise_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='noise')
task_idx_var = tensor_utils.new_tensor(name='task_idx', ndim=1, dtype=tf.int32)
dist = self.policy.distribution
old_dist_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=('old_%s' % k)) for (k, shape) in dist.dist_info_specs}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {k: tf.placeholder(tf.float32, shape=(([None] * (1 + is_recurrent)) + list(shape)), name=k) for (k, shape) in self.policy.state_info_specs}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name='valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, task_idx_var, noise_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
if is_recurrent:
surr_obj = ((- tf.reduce_sum(((logli * advantage_var) * valid_var))) / tf.reduce_sum(valid_var))
mean_kl = (tf.reduce_sum((kl * valid_var)) / tf.reduce_sum(valid_var))
max_kl = tf.reduce_max((kl * valid_var))
else:
surr_obj = (- tf.reduce_mean((logli * advantage_var)))
mean_kl = tf.reduce_mean(kl)
max_kl = tf.reduce_max(kl)
input_list = ([obs_var, action_var, advantage_var, noise_var, task_idx_var] + state_info_vars_list)
if is_recurrent:
input_list.append(valid_var)
import ipdb
ipdb.set_trace()
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
f_kl = tensor_utils.compile_function(inputs=(input_list + old_dist_info_vars_list), outputs=[mean_kl, max_kl])
self.opt_info = dict(f_kl=f_kl)
@overrides
def optimize_policy(self, itr, samples_data):
logger.log('optimizing policy')
inputs = ext.extract(samples_data, 'observations', 'actions', 'advantages', 'noises', 'task_idxs')
agent_infos = samples_data['agent_infos']
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data['valids'],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer.loss(inputs)
curr_mean = sess.run(self.policy.all_params['latent_means'])
curr_std = np.exp(sess.run(self.policy.all_params['latent_stds']))
import ipdb
ipdb.set_trace()
self.optimizer.optimize(inputs)
curr_mean = sess.run(self.policy.all_params['latent_means'])
curr_std = np.exp(sess.run(self.policy.all_params['latent_stds']))
import ipdb
ipdb.set_trace()
loss_after = self.optimizer.loss(inputs)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
(mean_kl, max_kl) = self.opt_info['f_kl'](*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
|
class VPG(BatchPolopt, Serializable):
'\n Vanilla Policy Gradient.\n '
def __init__(self, env, policy, baseline, default_step, **kwargs):
Serializable.quick_init(self, locals())
self.default_step_size = default_step
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
def make_vars(self):
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=1)
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=1)
adv_var = tensor_utils.new_tensor(name='advantage', ndim=1, dtype=tf.float32)
noise_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='noise')
task_idx_var = tensor_utils.new_tensor(name='task_idx', ndim=1, dtype=tf.int32)
return (obs_var, action_var, adv_var, noise_var, task_idx_var)
def make_vars_latent(self):
adv_var = tensor_utils.new_tensor(name='advantage_latent', ndim=1, dtype=tf.float32)
z_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='zs_latent')
task_idx_var = tensor_utils.new_tensor(name='task_idx_latent', ndim=1, dtype=tf.int32)
return (adv_var, z_var, task_idx_var)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
assert (not is_recurrent)
(adv_var_latent, z_var_latent, task_idx_var_latent) = self.make_vars_latent()
self.input_list_for_grad = [adv_var_latent, z_var_latent, task_idx_var_latent]
means = tf.gather(self.policy.all_params['latent_means'], task_idx_var_latent)
log_stds = tf.gather(self.policy.all_params['latent_stds'], task_idx_var_latent)
dist_info_vars_latent = {'mean': means, 'log_std': log_stds}
logli_latent = self.latent_dist.log_likelihood_sym(z_var_latent, dist_info_vars_latent)
self.surr_obj_latent = (- tf.reduce_mean((logli_latent * adv_var_latent)))
all_keys = list(self.policy.all_params.keys())
sess = tf.get_default_session()
self.policy.all_param_vals = OrderedDict()
for key in all_keys:
self.policy.all_param_vals[key] = sess.run(self.policy.all_params[key])
@overrides
def optimize_policy(self, itr, samples_latent):
logger.log('optimizing policy')
latent_inputs = ext.extract(samples_latent, 'advantages', 'noises', 'task_idxs')
latent_advantages = latent_inputs[0]
latent_noises = latent_inputs[1]
latent_task_idxs = latent_inputs[2]
sess = tf.get_default_session()
means = sess.run(tf.gather(self.policy.all_params['latent_means'], latent_task_idxs))
logstds = sess.run(tf.gather(self.policy.all_params['latent_stds'], latent_task_idxs))
zs = (means + (latent_noises * np.exp(logstds)))
inputs = [latent_advantages, zs, latent_task_idxs]
self.optimize(inputs, sess, itr)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
def optimize(self, inputs, sess, itr):
param_keys = []
param_keys_latent = []
all_keys = list(self.policy.all_params.keys())
for key in all_keys:
if ('latent' not in key):
param_keys.append(key)
else:
param_keys_latent.append(key)
update_param_keys = param_keys
update_param_keys_latent = param_keys_latent
step_sizes_sym = {}
for key in all_keys:
step_sizes_sym[key] = self.default_step_size
gradients_latent = dict(zip(update_param_keys_latent, tf.gradients(self.surr_obj_latent, [self.policy.all_params[key] for key in update_param_keys_latent])))
update_tensor = OrderedDict(zip(update_param_keys_latent, [(self.policy.all_params[key] - (step_sizes_sym[key] * tf.convert_to_tensor(gradients_latent[key]))) for key in update_param_keys_latent]))
result = sess.run(update_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))
self.policy.all_param_vals['latent_means'] = result['latent_means']
self.policy.all_param_vals['latent_stds'] = result['latent_stds']
if (itr >= 2):
self.default_step_size /= 2
self.policy.assign_params(self.policy.all_params, self.policy.all_param_vals)
|
class VPG(BatchPolopt, Serializable):
'\n Vanilla Policy Gradient.\n '
def __init__(self, env, policy, baseline, default_step, **kwargs):
Serializable.quick_init(self, locals())
self.default_step_size = default_step
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
def make_vars(self):
obs_var = self.env.observation_space.new_tensor_variable('obs', extra_dims=1)
action_var = self.env.action_space.new_tensor_variable('action', extra_dims=1)
adv_var = tensor_utils.new_tensor(name='advantage', ndim=1, dtype=tf.float32)
noise_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='noise')
task_idx_var = tensor_utils.new_tensor(name='task_idx', ndim=1, dtype=tf.int32)
return (obs_var, action_var, adv_var, noise_var, task_idx_var)
def make_vars_latent(self):
adv_var = tensor_utils.new_tensor(name='advantage_latent', ndim=1, dtype=tf.float32)
z_var = tf.placeholder(dtype=tf.float32, shape=[None, self.latent_dim], name='zs_latent')
task_idx_var = tensor_utils.new_tensor(name='task_idx_latent', ndim=1, dtype=tf.int32)
return (adv_var, z_var, task_idx_var)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
assert (not is_recurrent)
(adv_var_latent, z_var_latent, task_idx_var_latent) = self.make_vars_latent()
self.input_list_for_grad = [adv_var_latent, z_var_latent, task_idx_var_latent]
means = tf.gather(self.policy.all_params['latent_means'], task_idx_var_latent)
log_stds = tf.gather(self.policy.all_params['latent_stds'], task_idx_var_latent)
dist_info_vars_latent = {'mean': means, 'log_std': log_stds}
logli_latent = self.latent_dist.log_likelihood_sym(z_var_latent, dist_info_vars_latent)
self.surr_obj_latent = (- tf.reduce_mean((logli_latent * adv_var_latent)))
all_keys = list(self.policy.all_params.keys())
sess = tf.get_default_session()
self.policy.all_param_vals = OrderedDict()
for key in all_keys:
self.policy.all_param_vals[key] = sess.run(self.policy.all_params[key])
@overrides
def optimize_policy(self, itr, samples_latent):
logger.log('optimizing policy')
latent_inputs = ext.extract(samples_latent, 'advantages', 'noises', 'task_idxs')
latent_advantages = latent_inputs[0]
latent_noises = latent_inputs[1]
latent_task_idxs = latent_inputs[2]
sess = tf.get_default_session()
means = sess.run(tf.gather(self.policy.all_params['latent_means'], latent_task_idxs))
logstds = sess.run(tf.gather(self.policy.all_params['latent_stds'], latent_task_idxs))
zs = (means + (latent_noises * np.exp(logstds)))
inputs = [latent_advantages, zs, latent_task_idxs]
self.optimize(inputs, sess, itr)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(itr=itr, policy=self.policy, baseline=self.baseline, env=self.env)
def optimize(self, inputs, sess, itr):
param_keys = []
param_keys_latent = []
all_keys = list(self.policy.all_params.keys())
all_keys.remove('latent_means_stepsize')
all_keys.remove('latent_stds_stepsize')
for key in all_keys:
if ('latent' not in key):
param_keys.append(key)
else:
param_keys_latent.append(key)
update_param_keys = param_keys
update_param_keys_latent = param_keys_latent
step_sizes_sym = {}
for key in all_keys:
step_sizes_sym[key] = self.default_step_size
step_sizes_sym['latent_means'] = self.policy.all_params['latent_means_stepsize']
step_sizes_sym['latent_stds'] = self.policy.all_params['latent_stds_stepsize']
gradients_latent = dict(zip(update_param_keys_latent, tf.gradients(self.surr_obj_latent, [self.policy.all_params[key] for key in update_param_keys_latent])))
update_tensor = OrderedDict(zip(update_param_keys_latent, [(self.policy.all_params[key] - (step_sizes_sym[key] * tf.convert_to_tensor(gradients_latent[key]))) for key in update_param_keys_latent]))
result = sess.run(update_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))
self.policy.all_param_vals['latent_means'] = result['latent_means']
self.policy.all_param_vals['latent_stds'] = result['latent_stds']
if (itr >= 1):
self.policy.all_param_vals['latent_means_stepsize'] /= 2
self.policy.all_param_vals['latent_stds_stepsize'] /= 2
self.policy.assign_params(self.policy.all_params, self.policy.all_param_vals)
|
class LayersPowered(Parameterized):
def __init__(self, output_layers, input_layers=None):
self._output_layers = output_layers
self._input_layers = input_layers
Parameterized.__init__(self)
def get_params_internal(self, **tags):
layers = L.get_all_layers(self._output_layers, treat_as_input=self._input_layers)
params = itertools.chain.from_iterable((l.get_params(**tags) for l in layers))
return L.unique(params)
|
class MLP(LayersPowered, Serializable):
def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (input_layer is None):
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var, name='input')
else:
l_in = input_layer
self._layers = [l_in]
l_hid = l_in
if batch_normalization:
l_hid = L.batch_norm(l_hid)
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
self._layers.append(l_hid)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name='output', W=output_W_init, b=output_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_out = L.batch_norm(l_out)
self._layers.append(l_out)
self._l_in = l_in
self._l_out = l_out
self._output = L.get_output(l_out)
LayersPowered.__init__(self, l_out)
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
@property
def layers(self):
return self._layers
@property
def output(self):
return self._output
|
class ConvNetwork(LayersPowered, Serializable):
def __init__(self, name, input_shape, output_dim, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False):
Serializable.quick_init(self, locals())
"\n A network composed of several convolution layers followed by some fc layers.\n input_shape: (width,height,channel)\n HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.\n conv_filters: a list of numbers of convolution kernel\n conv_filter_sizes: a list of sizes (int) of the convolution kernels\n conv_strides: a list of strides (int) of the conv kernels\n conv_pads: a list of pad formats (either 'SAME' or 'VALID')\n hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers\n hidden_sizes: a list of numbers of hidden units for all fc layers\n "
with tf.variable_scope(name):
if (input_layer is not None):
l_in = input_layer
l_hid = l_in
elif (len(input_shape) == 3):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name='input')
l_hid = L.reshape(l_in, (([0],) + input_shape), name='reshape_input')
elif (len(input_shape) == 2):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name='input')
input_shape = ((1,) + input_shape)
l_hid = L.reshape(l_in, (([0],) + input_shape), name='reshape_input')
else:
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var, name='input')
l_hid = l_in
if batch_normalization:
l_hid = L.batch_norm(l_hid)
for (idx, conv_filter, filter_size, stride, pad) in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads):
l_hid = L.Conv2DLayer(l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name=('conv_hidden_%d' % idx), weight_normalization=weight_normalization)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
if (output_nonlinearity == L.spatial_expected_softmax):
assert (len(hidden_sizes) == 0)
assert (output_dim == (conv_filters[(- 1)] * 2))
l_hid.nonlinearity = tf.identity
l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
else:
l_hid = L.flatten(l_hid, name='conv_flatten')
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name='output', W=output_W_init, b=output_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_out = L.batch_norm(l_out)
self._l_in = l_in
self._l_out = l_out
LayersPowered.__init__(self, l_out)
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
|
class GRUNetwork(object):
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu, gru_layer_cls=L.GRULayer, output_nonlinearity=None, input_var=None, input_layer=None, layer_args=None):
with tf.variable_scope(name):
if (input_layer is None):
l_in = L.InputLayer(shape=((None, None) + input_shape), input_var=input_var, name='input')
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=((None,) + input_shape), name='step_input')
l_step_prev_state = L.InputLayer(shape=(None, hidden_dim), name='step_prev_state')
if (layer_args is None):
layer_args = dict()
l_gru = gru_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False, name='gru', **layer_args)
l_gru_flat = L.ReshapeLayer(l_gru, shape=((- 1), hidden_dim), name='gru_flat')
l_output_flat = L.DenseLayer(l_gru_flat, num_units=output_dim, nonlinearity=output_nonlinearity, name='output_flat')
l_output = L.OpLayer(l_output_flat, op=(lambda flat_output, l_input: tf.reshape(flat_output, tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], (- 1))))), shape_op=(lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[(- 1)])), extras=[l_in], name='output')
l_step_state = l_gru.get_step_layer(l_step_input, l_step_prev_state, name='step_state')
l_step_hidden = l_step_state
l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b, name='step_output')
self._l_in = l_in
self._hid_init_param = l_gru.h0
self._l_gru = l_gru
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_state = l_step_prev_state
self._l_step_hidden = l_step_hidden
self._l_step_state = l_step_state
self._l_step_output = l_step_output
self._hidden_dim = hidden_dim
@property
def state_dim(self):
return self._hidden_dim
@property
def hidden_dim(self):
return self._hidden_dim
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def recurrent_layer(self):
return self._l_gru
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_state_layer(self):
return self._l_step_prev_state
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_state_layer(self):
return self._l_step_state
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
@property
def state_init_param(self):
return self._hid_init_param
|
class LSTMNetwork(object):
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu, lstm_layer_cls=L.LSTMLayer, output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False, layer_args=None):
with tf.variable_scope(name):
if (input_layer is None):
l_in = L.InputLayer(shape=((None, None) + input_shape), input_var=input_var, name='input')
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=((None,) + input_shape), name='step_input')
l_step_prev_state = L.InputLayer(shape=(None, (hidden_dim * 2)), name='step_prev_state')
if (layer_args is None):
layer_args = dict()
l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False, name='lstm', forget_bias=forget_bias, cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args)
l_lstm_flat = L.ReshapeLayer(l_lstm, shape=((- 1), hidden_dim), name='lstm_flat')
l_output_flat = L.DenseLayer(l_lstm_flat, num_units=output_dim, nonlinearity=output_nonlinearity, name='output_flat')
l_output = L.OpLayer(l_output_flat, op=(lambda flat_output, l_input: tf.reshape(flat_output, tf.pack((tf.shape(l_input)[0], tf.shape(l_input)[1], (- 1))))), shape_op=(lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[(- 1)])), extras=[l_in], name='output')
l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name='step_state')
l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name='step_hidden')
l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name='step_cell')
l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b, name='step_output')
self._l_in = l_in
self._hid_init_param = l_lstm.h0
self._cell_init_param = l_lstm.c0
self._l_lstm = l_lstm
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_state = l_step_prev_state
self._l_step_hidden = l_step_hidden
self._l_step_cell = l_step_cell
self._l_step_state = l_step_state
self._l_step_output = l_step_output
self._hidden_dim = hidden_dim
@property
def state_dim(self):
return (self._hidden_dim * 2)
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def recurrent_layer(self):
return self._l_lstm
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_state_layer(self):
return self._l_step_prev_state
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_state_layer(self):
return self._l_step_state
@property
def step_cell_layer(self):
return self._l_step_cell
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
@property
def cell_init_param(self):
return self._cell_init_param
@property
def state_init_param(self):
return tf.concat(axis=0, values=[self._hid_init_param, self._cell_init_param])
|
class ConvMergeNetwork(LayersPowered, Serializable):
'\n This network allows the input to consist of a convolution-friendly component, plus a non-convolution-friendly\n component. These two components will be concatenated in the fully connected layers. There can also be a list of\n optional layers for the non-convolution-friendly component alone.\n\n\n The input to the network should be a matrix where each row is a single input entry, with both the aforementioned\n components flattened out and then concatenated together\n '
def __init__(self, name, input_shape, extra_input_shape, output_dim, hidden_sizes, conv_filters, conv_filter_sizes, conv_strides, conv_pads, extra_hidden_sizes=None, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, input_var=None, input_layer=None):
Serializable.quick_init(self, locals())
if (extra_hidden_sizes is None):
extra_hidden_sizes = []
with tf.variable_scope(name):
input_flat_dim = np.prod(input_shape)
extra_input_flat_dim = np.prod(extra_input_shape)
total_input_flat_dim = (input_flat_dim + extra_input_flat_dim)
if (input_layer is None):
l_in = L.InputLayer(shape=(None, total_input_flat_dim), input_var=input_var, name='input')
else:
l_in = input_layer
l_conv_in = L.reshape(L.SliceLayer(l_in, indices=slice(input_flat_dim), name='conv_slice'), (([0],) + input_shape), name='conv_reshaped')
l_extra_in = L.reshape(L.SliceLayer(l_in, indices=slice(input_flat_dim, None), name='extra_slice'), (([0],) + extra_input_shape), name='extra_reshaped')
l_conv_hid = l_conv_in
for (idx, conv_filter, filter_size, stride, pad) in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads):
l_conv_hid = L.Conv2DLayer(l_conv_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name=('conv_hidden_%d' % idx))
l_extra_hid = l_extra_in
for (idx, hidden_size) in enumerate(extra_hidden_sizes):
l_extra_hid = L.DenseLayer(l_extra_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('extra_hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init)
l_joint_hid = L.concat([L.flatten(l_conv_hid, name='conv_hidden_flat'), l_extra_hid], name='joint_hidden')
for (idx, hidden_size) in enumerate(hidden_sizes):
l_joint_hid = L.DenseLayer(l_joint_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('joint_hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init)
l_out = L.DenseLayer(l_joint_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name='output', W=output_W_init, b=output_b_init)
self._l_in = l_in
self._l_out = l_out
LayersPowered.__init__(self, [l_out], input_layers=[l_in])
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
|
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
(yield)
load_params = True
|
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
self._cached_assign_ops = {}
self._cached_assign_placeholders = {}
def get_params_internal(self, **tags):
'\n Internal method to be implemented which does not perform caching\n '
raise NotImplementedError
def get_params(self, **tags):
"\n Get the list of parameters, filtered by the provided tags.\n Some common tags include 'regularizable' and 'trainable'\n "
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_params):
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_dtypes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_shapes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop('debug', False)
param_values = unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for (param, dtype, value) in zip(self.get_params(**tags), self.get_param_dtypes(**tags), param_values):
if (param not in self._cached_assign_ops):
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print(('setting value of %s' % param.name))
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d['params'] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.initialize_variables(self.get_params()))
self.set_param_values(d['params'])
|
class JointParameterized(Parameterized):
def __init__(self, components):
super(JointParameterized, self).__init__()
self.components = components
def get_params_internal(self, **tags):
params = [param for comp in self.components for param in comp.get_params_internal(**tags)]
return sorted(set(params), key=hash)
|
def make_input(shape, input_var=None, name='input', **kwargs):
if (input_var is None):
if (name is not None):
with tf.variable_scope(name):
input_var = tf.placeholder(tf.float32, shape=shape, name='input')
else:
input_var = tf.placeholder(tf.float32, shape=shape, name='input')
return input_var
|
def _create_param(spec, shape, name, trainable=True, regularizable=True):
if (not hasattr(spec, '__call__')):
assert isinstance(spec, (tf.Tensor, tf.Variable))
return spec
assert hasattr(spec, '__call__')
if regularizable:
regularizer = None
else:
regularizer = (lambda _: tf.constant(0.0))
return tf.get_variable(name=name, shape=shape, initializer=spec, trainable=trainable, regularizer=regularizer, dtype=tf.float32)
|
def add_param(spec, shape, layer_name, name, weight_norm=None, variable_reuse=None, **tags):
with tf.variable_scope(layer_name, reuse=variable_reuse):
tags['trainable'] = tags.get('trainable', True)
tags['regularizable'] = tags.get('regularizable', True)
param = _create_param(spec, shape, name, **tags)
if weight_norm:
raise NotImplementedError('Not supported.')
return param
|
def make_dense_layer(input_shape, num_units, name='fc', W=L.XavierUniformInitializer(), b=tf.zeros_initializer, weight_norm=False, **kwargs):
num_inputs = int(np.prod(input_shape[1:]))
W = add_param(W, (num_inputs, num_units), layer_name=name, name='W', weight_norm=weight_norm)
if (b is not None):
b = add_param(b, (num_units,), layer_name=name, name='b', regularizable=False, weight_norm=weight_norm)
output_shape = (input_shape[0], num_units)
return (W, b, output_shape)
|
def forward_dense_layer(input, W, b, nonlinearity=tf.identity, batch_norm=False, scope='', reuse=True, is_training=False):
if (input.get_shape().ndims > 2):
input = tf.reshape(input, tf.stack([tf.shape(input)[0], (- 1)]))
activation = tf.matmul(input, W)
if (b is not None):
activation = (activation + tf.expand_dims(b, 0))
if batch_norm:
raise NotImplementedError('not supported')
else:
return nonlinearity(activation)
|
def make_param_layer(num_units, name='', param=tf.zeros_initializer(), trainable=True):
param = add_param(param, (num_units,), layer_name=name, name='param', trainable=trainable)
return param
|
def forward_param_layer(input, param):
ndim = input.get_shape().ndims
param = tf.convert_to_tensor(param)
num_units = int(param.get_shape()[0])
reshaped_param = tf.reshape(param, (((1,) * (ndim - 1)) + (num_units,)))
tile_arg = tf.concat([tf.shape(input)[:(ndim - 1)], [1]], 0)
tiled = tf.tile(reshaped_param, tile_arg)
return tiled
|
class Distribution(object):
@property
def dim(self):
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
'\n Compute the symbolic KL divergence of two distributions\n '
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
'\n Compute the KL divergence of two distributions\n '
raise NotImplementedError
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def log_likelihood(self, xs, dist_info):
raise NotImplementedError
@property
def dist_info_specs(self):
raise NotImplementedError
@property
def dist_info_keys(self):
return [k for (k, _) in self.dist_info_specs]
|
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
kl = ((old_p * (tf.log((old_p + TINY)) - tf.log((new_p + TINY)))) + ((1 - old_p) * (tf.log(((1 - old_p) + TINY)) - tf.log(((1 - new_p) + TINY)))))
ndims = kl.get_shape().ndims
return tf.reduce_sum(kl, reduction_indices=(ndims - 1))
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info['p']
new_p = new_dist_info['p']
kl = ((old_p * (np.log((old_p + TINY)) - np.log((new_p + TINY)))) + ((1 - old_p) * (np.log(((1 - old_p) + TINY)) - np.log(((1 - new_p) + TINY)))))
return np.sum(kl, axis=(- 1))
def sample(self, dist_info):
p = np.asarray(dist_info['p'])
return np.cast['int']((np.random.uniform(low=0.0, high=1.0, size=p.shape) < p))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
ndims = old_p.get_shape().ndims
return tf.reduce_prod((((x_var * new_p) / (old_p + TINY)) + (((1 - x_var) * (1 - new_p)) / ((1 - old_p) + TINY))), reduction_indices=(ndims - 1))
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars['p']
ndims = p.get_shape().ndims
return tf.reduce_sum(((x_var * tf.log((p + TINY))) + ((1 - x_var) * tf.log(((1 - p) + TINY)))), reduction_indices=(ndims - 1))
def log_likelihood(self, xs, dist_info):
p = dist_info['p']
return np.sum(((xs * np.log((p + TINY))) + ((1 - xs) * np.log(((1 - p) + TINY)))), axis=(- 1))
def entropy(self, dist_info):
p = dist_info['p']
return np.sum((((- p) * np.log((p + TINY))) - ((1 - p) * np.log(((1 - p) + TINY)))), axis=(- 1))
@property
def dist_info_keys(self):
return ['p']
|
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info['mean']
old_log_stds = old_dist_info['log_std']
new_means = new_dist_info['mean']
new_log_stds = new_dist_info['log_std']
'\n Compute the KL divergence of two multivariate Gaussian distribution with\n diagonal covariance matrices\n '
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
numerator = ((np.square((old_means - new_means)) + np.square(old_std)) - np.square(new_std))
denominator = ((2 * np.square(new_std)) + 1e-08)
return np.sum((((numerator / denominator) + new_log_stds) - old_log_stds), axis=(- 1))
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars['mean']
old_log_stds = old_dist_info_vars['log_std']
new_means = new_dist_info_vars['mean']
new_log_stds = new_dist_info_vars['log_std']
'\n Compute the KL divergence of two multivariate Gaussian distribution with\n diagonal covariance matrices\n '
old_std = tf.exp(old_log_stds)
new_std = tf.exp(new_log_stds)
numerator = ((tf.square((old_means - new_means)) + tf.square(old_std)) - tf.square(new_std))
denominator = ((2 * tf.square(new_std)) + 1e-08)
return tf.reduce_sum((((numerator / denominator) + new_log_stds) - old_log_stds), reduction_indices=(- 1))
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return tf.exp((logli_new - logli_old))
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars['mean']
log_stds = dist_info_vars['log_std']
zs = ((x_var - means) / tf.exp(log_stds))
return (((- tf.reduce_sum(log_stds, reduction_indices=(- 1))) - (0.5 * tf.reduce_sum(tf.square(zs), reduction_indices=(- 1)))) - ((0.5 * self.dim) * np.log((2 * np.pi))))
def sample(self, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
rnd = np.random.normal(size=means.shape)
return ((rnd * np.exp(log_stds)) + means)
def log_likelihood(self, xs, dist_info):
means = dist_info['mean']
log_stds = dist_info['log_std']
zs = ((xs - means) / np.exp(log_stds))
return (((- np.sum(log_stds, axis=(- 1))) - (0.5 * np.sum(np.square(zs), axis=(- 1)))) - ((0.5 * self.dim) * np.log((2 * np.pi))))
def entropy(self, dist_info):
log_stds = dist_info['log_std']
return np.sum((log_stds + np.log(np.sqrt(((2 * np.pi) * np.e)))), axis=(- 1))
@property
def dist_info_specs(self):
return [('mean', (self.dim,)), ('log_std', (self.dim,))]
|
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
'\n Compute the symbolic KL divergence of two categorical distributions\n '
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
return tf.reduce_sum((old_prob_var * (tf.log((old_prob_var + TINY)) - tf.log((new_prob_var + TINY)))), reduction_indices=2)
def kl(self, old_dist_info, new_dist_info):
'\n Compute the KL divergence of two categorical distributions\n '
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum((old_prob * (np.log((old_prob + TINY)) - np.log((new_prob + TINY)))), axis=2)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
a_dim = tf.shape(x_var)[2]
flat_ratios = self._cat.likelihood_ratio_sym(tf.reshape(x_var, tf.pack([(- 1), a_dim])), dict(prob=tf.reshape(old_prob_var, tf.pack([(- 1), a_dim]))), dict(prob=tf.reshape(new_prob_var, tf.pack([(- 1), a_dim]))))
return tf.reshape(flat_ratios, tf.shape(old_prob_var)[:2])
def entropy(self, dist_info):
probs = dist_info['prob']
return (- np.sum((probs * np.log((probs + TINY))), axis=2))
def entropy_sym(self, dist_info_vars):
probs = dist_info_vars['prob']
return (- tf.reduce_sum((probs * tf.log((probs + TINY))), 2))
def log_likelihood_sym(self, xs, dist_info_vars):
probs = dist_info_vars['prob']
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(tf.reshape(xs, tf.pack([(- 1), a_dim])), dict(prob=tf.reshape(probs, tf.pack(((- 1), a_dim)))))
return tf.reshape(flat_logli, tf.shape(probs)[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(xs.reshape(((- 1), a_dim)), dict(prob=probs.reshape(((- 1), a_dim))))
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_specs(self):
return [('prob', (self.dim,))]
|
def to_tf_space(space):
if isinstance(space, TheanoBox):
return Box(low=space.low, high=space.high)
elif isinstance(space, TheanoDiscrete):
return Discrete(space.n)
elif isinstance(space, TheanoProduct):
return Product(list(map(to_tf_space, space.components)))
else:
raise NotImplementedError
|
class WrappedCls(object):
def __init__(self, cls, env_cls, extra_kwargs):
self.cls = cls
self.env_cls = env_cls
self.extra_kwargs = extra_kwargs
def __call__(self, *args, **kwargs):
return self.cls(self.env_cls(*args, **dict(self.extra_kwargs, **kwargs)))
|
class TfEnv(ProxyEnv):
@cached_property
def observation_space(self):
return to_tf_space(self.wrapped_env.observation_space)
@cached_property
def action_space(self):
return to_tf_space(self.wrapped_env.action_space)
@cached_property
def spec(self):
return EnvSpec(observation_space=self.observation_space, action_space=self.action_space)
@property
def vectorized(self):
return getattr(self.wrapped_env, 'vectorized', False)
def vec_env_executor(self, n_envs, max_path_length):
return VecTfEnv(self.wrapped_env.vec_env_executor(n_envs=n_envs, max_path_length=max_path_length))
@classmethod
def wrap(cls, env_cls, **extra_kwargs):
return WrappedCls(cls, env_cls, extra_kwargs)
|
class VecTfEnv(object):
def __init__(self, vec_env):
self.vec_env = vec_env
def reset(self, reset_args=None):
return self.vec_env.reset(reset_args=reset_args)
@property
def num_envs(self):
return self.vec_env.num_envs
def step(self, action_n):
return self.vec_env.step(action_n)
def terminate(self):
self.vec_env.terminate()
|
def worker_init_envs(G, alloc, scope, env):
logger.log(('initializing environment on worker %d' % G.worker_id))
if (not hasattr(G, 'parallel_vec_envs')):
G.parallel_vec_envs = dict()
G.parallel_vec_env_template = dict()
G.parallel_vec_envs[scope] = [(idx, pickle.loads(pickle.dumps(env))) for idx in alloc]
G.parallel_vec_env_template[scope] = env
|
def worker_run_reset(G, flags, scope):
if (not hasattr(G, 'parallel_vec_envs')):
logger.log(('on worker %d' % G.worker_id))
import traceback
for line in traceback.format_stack():
logger.log(line)
logger.log('oops')
for (k, v) in G.__dict__.items():
logger.log(((str(k) + ' : ') + str(v)))
assert hasattr(G, 'parallel_vec_envs')
assert (scope in G.parallel_vec_envs)
N = len(G.parallel_vec_envs[scope])
env_template = G.parallel_vec_env_template[scope]
obs_dim = env_template.observation_space.flat_dim
ret_arr = np.zeros((N, obs_dim))
ids = []
flat_obs = []
reset_ids = []
for (itr_idx, (idx, env)) in enumerate(G.parallel_vec_envs[scope]):
flag = flags[idx]
if flag:
flat_obs.append(env.reset())
reset_ids.append(itr_idx)
ids.append(idx)
if (len(reset_ids) > 0):
ret_arr[reset_ids] = env_template.observation_space.flatten_n(flat_obs)
return (ids, ret_arr)
|
def worker_run_step(G, action_n, scope):
assert hasattr(G, 'parallel_vec_envs')
assert (scope in G.parallel_vec_envs)
env_template = G.parallel_vec_env_template[scope]
ids = []
step_results = []
for (idx, env) in G.parallel_vec_envs[scope]:
action = action_n[idx]
ids.append(idx)
step_results.append(tuple(env.step(action)))
if (len(step_results) == 0):
return None
(obs, rewards, dones, env_infos) = list(map(list, list(zip(*step_results))))
obs = env_template.observation_space.flatten_n(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
env_infos = tensor_utils.stack_tensor_dict_list(env_infos)
return (ids, obs, rewards, dones, env_infos)
|
def worker_collect_env_time(G):
return G.env_time
|
class ParallelVecEnvExecutor(object):
def __init__(self, env, n, max_path_length, scope=None):
if (scope is None):
scope = str(uuid.uuid4())
envs_per_worker = int(np.ceil(((n * 1.0) / singleton_pool.n_parallel)))
alloc_env_ids = []
rest_alloc = n
start_id = 0
for _ in range(singleton_pool.n_parallel):
n_allocs = min(envs_per_worker, rest_alloc)
alloc_env_ids.append(list(range(start_id, (start_id + n_allocs))))
start_id += n_allocs
rest_alloc = max(0, (rest_alloc - envs_per_worker))
singleton_pool.run_each(worker_init_envs, [(alloc, scope, env) for alloc in alloc_env_ids])
self._alloc_env_ids = alloc_env_ids
self._action_space = env.action_space
self._observation_space = env.observation_space
self._num_envs = n
self.scope = scope
self.ts = np.zeros(n, dtype='int')
self.max_path_length = max_path_length
def step(self, action_n):
results = singleton_pool.run_each(worker_run_step, [(action_n, self.scope) for _ in self._alloc_env_ids])
results = [x for x in results if (x is not None)]
(ids, obs, rewards, dones, env_infos) = list(zip(*results))
ids = np.concatenate(ids)
obs = self.observation_space.unflatten_n(np.concatenate(obs))
rewards = np.concatenate(rewards)
dones = np.concatenate(dones)
env_infos = tensor_utils.split_tensor_dict_list(tensor_utils.concat_tensor_dict_list(env_infos))
if (env_infos is None):
env_infos = [dict() for _ in range(self.num_envs)]
items = list(zip(ids, obs, rewards, dones, env_infos))
items = sorted(items, key=(lambda x: x[0]))
(ids, obs, rewards, dones, env_infos) = list(zip(*items))
obs = list(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
self.ts += 1
dones[(self.ts >= self.max_path_length)] = True
reset_obs = self._run_reset(dones)
for (i, done) in enumerate(dones):
if done:
obs[i] = reset_obs[i]
self.ts[i] = 0
return (obs, rewards, dones, tensor_utils.stack_tensor_dict_list(list(env_infos)))
def _run_reset(self, dones):
dones = np.asarray(dones)
results = singleton_pool.run_each(worker_run_reset, [(dones, self.scope) for _ in self._alloc_env_ids])
(ids, flat_obs) = list(map(np.concatenate, list(zip(*results))))
zipped = list(zip(ids, flat_obs))
sorted_obs = np.asarray([x[1] for x in sorted(zipped, key=(lambda x: x[0]))])
(done_ids,) = np.where(dones)
done_flat_obs = sorted_obs[done_ids]
done_unflat_obs = self.observation_space.unflatten_n(done_flat_obs)
all_obs = ([None] * self.num_envs)
done_cursor = 0
for (idx, done) in enumerate(dones):
if done:
all_obs[idx] = done_unflat_obs[done_cursor]
done_cursor += 1
return all_obs
def reset(self):
dones = np.asarray(([True] * self.num_envs))
return self._run_reset(dones)
@property
def num_envs(self):
return self._num_envs
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def terminate(self):
pass
|
class VecEnvExecutor(object):
def __init__(self, envs, max_path_length):
self.envs = envs
self._action_space = envs[0].action_space
self._observation_space = envs[0].observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
self.max_path_length = max_path_length
def step(self, action_n, reset_args=None):
if (reset_args is None):
reset_args = ([None] * len(self.envs))
all_results = [env.step(a) for (a, env) in zip(action_n, self.envs)]
(obs, rewards, dones, env_infos) = list(map(list, list(zip(*all_results))))
dones = np.asarray(dones)
rewards = np.asarray(rewards)
self.ts += 1
if (self.max_path_length is not None):
dones[(self.ts >= self.max_path_length)] = True
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset(reset_args=reset_args[i])
self.ts[i] = 0
return (obs, rewards, dones, tensor_utils.stack_tensor_dict_list(env_infos))
def reset(self, reset_args=None):
if (reset_args is not None):
results = [env.reset(reset_args=arg) for (env, arg) in zip(self.envs, reset_args)]
else:
results = [env.reset() for env in self.envs]
self.ts[:] = 0
return results
@property
def num_envs(self):
return len(self.envs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def terminate(self):
pass
|
def compile_function(inputs, outputs, log_name=None):
def run(*input_vals):
sess = tf.get_default_session()
return sess.run(outputs, feed_dict=dict(list(zip(inputs, input_vals))))
return run
|
def flatten_tensor_variables(ts):
return tf.concat(axis=0, values=[tf.reshape(x, [(- 1)]) for x in ts])
|
def unflatten_tensor_variables(flatarr, shapes, symb_arrs):
arrs = []
n = 0
for (shape, symb_arr) in zip(shapes, symb_arrs):
size = np.prod(list(shape))
arr = tf.reshape(flatarr[n:(n + size)], shape)
arrs.append(arr)
n += size
return arrs
|
def new_tensor(name, ndim, dtype):
return tf.placeholder(dtype=dtype, shape=([None] * ndim), name=name)
|
def new_tensor_like(name, arr_like):
return new_tensor(name, arr_like.get_shape().ndims, arr_like.dtype.base_dtype)
|
def concat_tensor_list(tensor_list):
return np.concatenate(tensor_list, axis=0)
|
def concat_tensor_dict_list(tensor_dict_list):
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
|
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
|
def stack_tensor_dict_list(tensor_dict_list):
'\n Stack a list of dictionaries of {tensors or dictionary of tensors}.\n :param tensor_dict_list: a list of dictionaries of {tensors or dictionary of tensors}.\n :return: a dictionary of {stacked tensors or dictionary of stacked tensors}\n '
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
|
def split_tensor_dict_list(tensor_dict):
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if (ret is None):
ret = [{k: v} for v in vals]
else:
for (v, cur_dict) in zip(vals, ret):
cur_dict[k] = v
return ret
|
def to_onehot_sym(inds, dim):
return tf.one_hot(inds, depth=dim, on_value=1, off_value=0)
|
def pad_tensor(x, max_len):
return np.concatenate([x, np.tile(np.zeros_like(x[0]), (((max_len - len(x)),) + ((1,) * np.ndim(x[0]))))])
|
def pad_tensor_n(xs, max_len):
ret = np.zeros(((len(xs), max_len) + xs[0].shape[1:]), dtype=xs[0].dtype)
for (idx, x) in enumerate(xs):
ret[idx][:len(x)] = x
return ret
|
def pad_tensor_dict(tensor_dict, max_len):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
|
class PerlmutterHvp(object):
def __init__(self, num_slices=1):
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = tf.gradients(f, xs=params)
for (idx, (grad, param)) in enumerate(zip(constraint_grads, params)):
if (grad is None):
constraint_grads[idx] = tf.zeros_like(param)
xs = tuple([tensor_utils.new_tensor_like(p.name.split(':')[0], p) for p in params])
def Hx_plain():
Hx_plain_splits = tf.gradients(tf.reduce_sum(tf.pack([tf.reduce_sum((g * x)) for (g, x) in zip(constraint_grads, xs)])), params)
for (idx, (Hx, param)) in enumerate(zip(Hx_plain_splits, params)):
if (Hx is None):
Hx_plain_splits[idx] = tf.zeros_like(param)
return tensor_utils.flatten_tensor_variables(Hx_plain_splits)
self.opt_fun = ext.lazydict(f_Hx_plain=(lambda : tensor_utils.compile_function(inputs=(inputs + xs), outputs=Hx_plain(), log_name='f_Hx_plain')))
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = (sliced_fun(self.opt_fun['f_Hx_plain'], self._num_slices)(inputs, xs) + (self.reg_coeff * x))
return ret
return eval
|
class FiniteDifferenceHvp(object):
def __init__(self, base_eps=1e-08, symmetric=True, grad_clip=None, num_slices=1):
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = tf.gradients(f, xs=params)
for (idx, (grad, param)) in enumerate(zip(constraint_grads, params)):
if (grad is None):
constraint_grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, ((- 1),)) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32']((self.base_eps / (np.linalg.norm(param_val) + 1e-08)))
self.target.set_param_values((param_val + (eps * flat_xs)), trainable=True)
flat_grad_dvplus = self.opt_fun['f_grad'](*inputs_)
self.target.set_param_values(param_val, trainable=True)
if self.symmetric:
self.target.set_param_values((param_val - (eps * flat_xs)), trainable=True)
flat_grad_dvminus = self.opt_fun['f_grad'](*inputs_)
hx = ((flat_grad_dvplus - flat_grad_dvminus) / (2 * eps))
self.target.set_param_values(param_val, trainable=True)
else:
flat_grad = self.opt_fun['f_grad'](*inputs_)
hx = ((flat_grad_dvplus - flat_grad) / eps)
return hx
self.opt_fun = ext.lazydict(f_grad=(lambda : tensor_utils.compile_function(inputs=inputs, outputs=flat_grad, log_name='f_grad')), f_Hx_plain=(lambda : f_Hx_plain))
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = (sliced_fun(self.opt_fun['f_Hx_plain'], self._num_slices)(inputs, xs) + (self.reg_coeff * x))
return ret
return eval
|
class ConjugateGradientOptimizer(Serializable):
'\n Performs constrained optimization via line search. The search direction is computed using a conjugate gradient\n algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient\n of the loss function.\n '
def __init__(self, cg_iters=10, reg_coeff=1e-05, subsample_factor=1.0, backtrack_ratio=0.8, max_backtracks=15, debug_nan=False, accept_violation=False, hvp_approach=None, num_slices=1):
'\n\n :param cg_iters: The number of CG iterations used to calculate A^-1 g\n :param reg_coeff: A small value so that A -> A + reg*I\n :param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the\n computation time for the descent direction dominates, this can greatly reduce the overall computation time.\n :param debug_nan: if set to True, NanGuard will be added to the compilation, and ipdb will be invoked when\n nan is detected\n :param accept_violation: whether to accept the descent step if it violates the line search condition after\n exhausting all backtracking budgets\n :return:\n '
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._debug_nan = debug_nan
self._accept_violation = accept_violation
if (hvp_approach is None):
hvp_approach = FiniteDifferenceHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name='constraint', *args, **kwargs):
'\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed\n that the first dimension of these inputs should correspond to the number of data points\n :param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled\n :return: No return value.\n '
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
(constraint_term, constraint_value) = leq_constraint
params = target.get_params(trainable=True)
grads = tf.gradients(loss, xs=params)
for (idx, (grad, param)) in enumerate(zip(grads, params)):
if (grad is None):
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=(inputs + extra_inputs), reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=loss, log_name='f_loss')), f_grad=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=flat_grad, log_name='f_grad')), f_constraint=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=constraint_term, log_name='constraint')), f_loss_constraint=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=[loss, constraint_term], log_name='f_loss_constraint')))
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_constraint'], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
prev_param = np.copy(self._target.get_param_values(trainable=True))
inputs = tuple(inputs)
if (extra_inputs is None):
extra_inputs = tuple()
if (self._subsample_factor < 1):
if (subsample_grouped_inputs is None):
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(n_samples, int((n_samples * self._subsample_factor)), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log(('Start CG optimization: #parameters: %d, #inputs: %d, #subsample_inputs: %d' % (len(prev_param), len(inputs[0]), len(subsample_inputs[0]))))
logger.log('computing loss before')
loss_before = sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs)
logger.log('performing update')
logger.log('computing gradient')
flat_g = sliced_fun(self._opt_fun['f_grad'], self._num_slices)(inputs, extra_inputs)
logger.log('gradient computed')
logger.log('computing descent direction')
Hx = self._hvp_approach.build_eval((subsample_inputs + extra_inputs))
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(((2.0 * self._max_constraint_val) * (1.0 / (descent_direction.dot(Hx(descent_direction)) + 1e-08))))
if np.isnan(initial_step_size):
initial_step_size = 1.0
flat_descent_step = (initial_step_size * descent_direction)
logger.log('descent direction computed')
n_iter = 0
for (n_iter, ratio) in enumerate((self._backtrack_ratio ** np.arange(self._max_backtracks))):
cur_step = (ratio * flat_descent_step)
cur_param = (prev_param - cur_step)
self._target.set_param_values(cur_param, trainable=True)
(loss, constraint_val) = sliced_fun(self._opt_fun['f_loss_constraint'], self._num_slices)(inputs, extra_inputs)
if (self._debug_nan and np.isnan(constraint_val)):
import ipdb
ipdb.set_trace()
if ((loss < loss_before) and (constraint_val <= self._max_constraint_val)):
break
if ((np.isnan(loss) or np.isnan(constraint_val) or (loss >= loss_before) or (constraint_val >= self._max_constraint_val)) and (not self._accept_violation)):
logger.log('Line search condition violated. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(constraint_val):
logger.log(('Violated because constraint %s is NaN' % self._constraint_name))
if (loss >= loss_before):
logger.log('Violated because loss not improving')
if (constraint_val >= self._max_constraint_val):
logger.log(('Violated because constraint %s is violated' % self._constraint_name))
self._target.set_param_values(prev_param, trainable=True)
logger.log(('backtrack iters: %d' % n_iter))
logger.log('computing loss after')
logger.log('optimization finished')
|
class FirstOrderOptimizer(Serializable):
'\n Performs (stochastic) gradient descent, possibly using fancier methods like adam etc.\n '
def __init__(self, tf_optimizer_cls=None, tf_optimizer_args=None, max_epochs=1000, tolerance=1e-06, batch_size=32, callback=None, verbose=False, init_learning_rate=None, **kwargs):
'\n\n :param max_epochs:\n :param tolerance:\n :param update_method:\n :param batch_size: None or an integer. If None the whole dataset will be used.\n :param callback:\n :param kwargs:\n :return:\n '
Serializable.quick_init(self, locals())
self._opt_fun = None
self._target = None
self._callback = callback
if (tf_optimizer_cls is None):
tf_optimizer_cls = tf.train.AdamOptimizer
if (tf_optimizer_args is None):
tf_optimizer_args = dict(learning_rate=0.001)
self.learning_rate = tf_optimizer_args['learning_rate']
self._tf_optimizer = tf_optimizer_cls(**tf_optimizer_args)
self._init_tf_optimizer = None
if (init_learning_rate is not None):
init_tf_optimizer_args = dict(learning_rate=init_learning_rate)
self._init_tf_optimizer = tf_optimizer_cls(**init_tf_optimizer_args)
self._max_epochs = max_epochs
self._tolerance = tolerance
self._batch_size = batch_size
self._verbose = verbose
self._input_vars = None
self._train_op = None
self._init_train_op = None
def update_opt(self, loss, target, inputs, extra_inputs=None, vars_to_optimize=None, **kwargs):
'\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs\n :return: No return value.\n '
self._target = target
if (vars_to_optimize is None):
vars_to_optimize = target.get_params(trainable=True)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
with tf.control_dependencies([updates]):
self._train_op = self._tf_optimizer.minimize(loss, var_list=vars_to_optimize)
if (self._init_tf_optimizer is not None):
self._init_train_op = self._init_tf_optimizer.minimize(loss, var_list=vars_to_optimize)
else:
self._train_op = self._tf_optimizer.minimize(loss, var_list=vars_to_optimize)
if (self._init_tf_optimizer is not None):
self._init_train_op = self._init_tf_optimizer.minimize(loss, var_list=vars_to_optimize)
if (extra_inputs is None):
extra_inputs = list()
self._input_vars = (inputs + extra_inputs)
self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function((inputs + extra_inputs), loss)))
self.debug_loss = loss
self.debug_vars = target.get_params(trainable=True)
self.debug_target = target
def loss(self, inputs, extra_inputs=None):
if (extra_inputs is None):
extra_inputs = tuple()
return self._opt_fun['f_loss'](*(tuple(inputs) + extra_inputs))
def optimize(self, inputs, extra_inputs=None, callback=None):
if (len(inputs) == 0):
raise NotImplementedError
f_loss = self._opt_fun['f_loss']
if (extra_inputs is None):
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(inputs, self._batch_size, extra_inputs=extra_inputs)
sess = tf.get_default_session()
for epoch in range(self._max_epochs):
if self._verbose:
logger.log(('Epoch %d' % epoch))
progbar = pyprind.ProgBar(len(inputs[0]))
for batch in dataset.iterate(update=True):
if (self._init_train_op is not None):
sess.run(self._init_train_op, dict(list(zip(self._input_vars, batch))))
self._init_train_op = None
else:
sess.run(self._train_op, dict(list(zip(self._input_vars, batch))))
if self._verbose:
progbar.update(len(batch[0]))
if self._verbose:
if progbar.active:
progbar.stop()
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log(('Epoch: %d | Loss: %f' % (epoch, new_loss)))
if (self._callback or callback):
elapsed = (time.time() - start_time)
callback_args = dict(loss=new_loss, params=(self._target.get_param_values(trainable=True) if self._target else None), itr=epoch, elapsed=elapsed)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if (abs((last_loss - new_loss)) < self._tolerance):
break
last_loss = new_loss
|
class LbfgsOptimizer(Serializable):
'\n Performs unconstrained optimization via L-BFGS.\n '
def __init__(self, name, max_opt_itr=20, callback=None):
Serializable.quick_init(self, locals())
self._name = name
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._callback = callback
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
'\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs\n :return: No return value.\n '
self._target = target
def get_opt_output():
flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]
if (extra_inputs is None):
extra_inputs = list()
self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function((inputs + extra_inputs), loss)), f_opt=(lambda : tensor_utils.compile_function(inputs=(inputs + extra_inputs), outputs=get_opt_output())))
def loss(self, inputs, extra_inputs=None):
if (extra_inputs is None):
extra_inputs = list()
return self._opt_fun['f_loss'](*(list(inputs) + list(extra_inputs)))
def optimize(self, inputs, extra_inputs=None):
f_opt = self._opt_fun['f_opt']
if (extra_inputs is None):
extra_inputs = list()
def f_opt_wrapper(flat_params):
self._target.set_param_values(flat_params, trainable=True)
ret = f_opt(*inputs)
return ret
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
loss = self._opt_fun['f_loss'](*(inputs + extra_inputs))
elapsed = (time.time() - start_time)
self._callback(dict(loss=loss, params=params, itr=itr[0], elapsed=elapsed))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True), maxiter=self._max_opt_itr, callback=opt_callback)
|
class PenaltyLbfgsOptimizer(Serializable):
'\n Performs constrained optimization via penalized L-BFGS. The penalty term is adaptively adjusted to make sure that\n the constraint is satisfied.\n '
def __init__(self, name, max_opt_itr=20, initial_penalty=1.0, min_penalty=0.01, max_penalty=1000000.0, increase_penalty_factor=2, decrease_penalty_factor=0.5, max_penalty_itr=10, adapt_penalty=True):
Serializable.quick_init(self, locals())
self._name = name
self._max_opt_itr = max_opt_itr
self._penalty = initial_penalty
self._initial_penalty = initial_penalty
self._min_penalty = min_penalty
self._max_penalty = max_penalty
self._increase_penalty_factor = increase_penalty_factor
self._decrease_penalty_factor = decrease_penalty_factor
self._max_penalty_itr = max_penalty_itr
self._adapt_penalty = adapt_penalty
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name='constraint', *args, **kwargs):
'\n :param loss: Symbolic expression for the loss function.\n :param target: A parameterized object to optimize over. It should implement methods of the\n :class:`rllab.core.paramerized.Parameterized` class.\n :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.\n :param inputs: A list of symbolic variables as inputs\n :return: No return value.\n '
(constraint_term, constraint_value) = leq_constraint
with tf.variable_scope(self._name):
penalty_var = tf.placeholder(tf.float32, tuple(), name='penalty')
penalized_loss = (loss + (penalty_var * constraint_term))
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
params = target.get_params(trainable=True)
grads = tf.gradients(penalized_loss, params)
for (idx, (grad, param)) in enumerate(zip(grads, params)):
if (grad is None):
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
return [tf.cast(penalized_loss, tf.float64), tf.cast(flat_grad, tf.float64)]
self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function(inputs, loss, log_name='f_loss')), f_constraint=(lambda : tensor_utils.compile_function(inputs, constraint_term, log_name='f_constraint')), f_penalized_loss=(lambda : tensor_utils.compile_function(inputs=(inputs + [penalty_var]), outputs=[penalized_loss, loss, constraint_term], log_name='f_penalized_loss')), f_opt=(lambda : tensor_utils.compile_function(inputs=(inputs + [penalty_var]), outputs=get_opt_output())))
def loss(self, inputs):
return self._opt_fun['f_loss'](*inputs)
def constraint_val(self, inputs):
return self._opt_fun['f_constraint'](*inputs)
def optimize(self, inputs):
inputs = tuple(inputs)
try_penalty = np.clip(self._penalty, self._min_penalty, self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun['f_opt']
f_penalized_loss = self._opt_fun['f_penalized_loss']
def gen_f_opt(penalty):
def f(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*(inputs + (penalty,)))
return f
cur_params = self._target.get_param_values(trainable=True).astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log(('trying penalty=%.3f...' % try_penalty))
(itr_opt_params, _, _) = scipy.optimize.fmin_l_bfgs_b(func=gen_f_opt(try_penalty), x0=cur_params, maxiter=self._max_opt_itr)
(_, try_loss, try_constraint_val) = f_penalized_loss(*(inputs + (try_penalty,)))
logger.log(('penalty %f => loss %f, %s %f' % (try_penalty, try_loss, self._constraint_name, try_constraint_val)))
if ((try_constraint_val < self._max_constraint_val) or ((penalty_itr == (self._max_penalty_itr - 1)) and (opt_params is None))):
opt_params = itr_opt_params
if (not self._adapt_penalty):
break
if ((penalty_scale_factor is None) or np.isnan(try_constraint_val)):
if ((try_constraint_val > self._max_constraint_val) or np.isnan(try_constraint_val)):
penalty_scale_factor = self._increase_penalty_factor
else:
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
elif ((penalty_scale_factor > 1) and (try_constraint_val <= self._max_constraint_val)):
break
elif ((penalty_scale_factor < 1) and (try_constraint_val >= self._max_constraint_val)):
break
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty, self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params, trainable=True)
|
def optimize(surr_obj, surr_obj_latent, inputs):
param_keys = []
param_keys_latent = []
all_keys = list(self.policy.all_params.keys())
all_keys.remove('latent_means_stepsize')
all_keys.remove('latent_stds_stepsize')
for key in all_keys:
if ('latent' not in key):
param_keys.append(key)
else:
param_keys_latent.append(key)
update_param_keys = param_keys
update_param_keys_latent = param_keys_latent
step_sizes_sym = {}
for key in all_keys:
step_sizes_sym[key] = step_size
step_sizes_sym['latent_means'] = self.policy.all_params['latent_means_stepsize']
step_sizes_sym['latent_stds'] = self.policy.all_params['latent_stds_stepsize']
gradients = dict(zip(update_param_keys, tf.gradients((self.policy.only_latents * surr_obj), [self.policy.all_params[key] for key in update_param_keys])))
gradients_latent = dict(zip(update_param_keys_latent, tf.gradients(surr_obj_latent, [self.policy.all_params[key] for key in update_param_keys_latent])))
gradients.update(gradients_latent)
update_tensor = OrderedDict(zip(all_keys, [(self.policy.all_params[key] - (step_sizes_sym[key] * tf.convert_to_tensor(gradients[key]))) for key in all_keys]))
self.policy.all_param_vals = sess.run(update_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))
|
class Policy(Parameterized):
def __init__(self, env_spec):
Parameterized.__init__(self)
self._env_spec = env_spec
def get_action(self, observation):
raise NotImplementedError
def get_actions(self, observations):
raise NotImplementedError
def reset(self, dones=None):
pass
@property
def vectorized(self):
'\n Indicates whether the policy is vectorized. If True, it should implement get_actions(), and support resetting\n with multiple simultaneous states.\n '
return False
@property
def observation_space(self):
return self._env_spec.observation_space
@property
def action_space(self):
return self._env_spec.action_space
@property
def env_spec(self):
return self._env_spec
@property
def recurrent(self):
'\n Indicates whether the policy is recurrent.\n :return:\n '
return False
def log_diagnostics(self, paths):
'\n Log extra information per iteration based on the collected paths\n '
pass
@property
def state_info_keys(self):
"\n Return keys for the information related to the policy's state when taking an action.\n :return:\n "
return [k for (k, _) in self.state_info_specs]
@property
def state_info_specs(self):
"\n Return keys and shapes for the information related to the policy's state when taking an action.\n :return:\n "
return list()
def terminate(self):
'\n Clean up operation\n '
pass
|
class StochasticPolicy(Policy):
@property
def distribution(self):
'\n :rtype Distribution\n '
raise NotImplementedError
def dist_info_sym(self, obs_var, state_info_vars):
'\n Return the symbolic distribution information about the actions.\n :param obs_var: symbolic variable for observations\n :param state_info_vars: a dictionary whose values should contain information about the state of the policy at\n the time it received the observation\n :return:\n '
raise NotImplementedError
def dist_info(self, obs, state_infos):
'\n Return the distribution information about the actions.\n :param obs_var: observation values\n :param state_info_vars: a dictionary whose values should contain information about the state of the policy at\n the time it received the observation\n :return:\n '
raise NotImplementedError
|
class CategoricalGRUPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, gru_layer_cls=L.GRULayer):
'\n :param env_spec: A spec for the env.\n :param hidden_dim: dimension of hidden layer\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :return:\n '
with tf.variable_scope(name):
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = (obs_dim + action_dim)
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name='input')
if (feature_network is None):
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[(- 1)]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(l_flat_feature, extras=[l_input], name='reshape_feature', op=(lambda flat_feature, input: tf.reshape(flat_feature, tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim]))), shape_op=(lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)))
prob_network = GRUNetwork(input_shape=(feature_dim,), input_layer=l_feature, output_dim=env_spec.action_space.n, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, gru_layer_cls=gru_layer_cls, name='prob_network')
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name='flat_input')
if (feature_network is None):
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = tensor_utils.compile_function([flat_input_var, prob_network.step_prev_hidden_layer.input_var], L.get_output([prob_network.step_output_layer, prob_network.step_hidden_layer], {prob_network.step_input_layer: feature_var}))
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if (feature_network is not None):
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, (- 1)]))
obs_var = tf.cast(obs_var, tf.float32)
if self.state_include_action:
prev_action_var = tf.cast(state_info_vars['prev_action'], tf.float32)
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if (self.feature_network is None):
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var}))
else:
flat_input_var = tf.reshape(all_input_var, ((- 1), self.input_dim))
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}))
@property
def vectorized(self):
return True
def reset(self, dones=None):
if (dones is None):
dones = [True]
dones = np.asarray(dones)
if ((self.prev_actions is None) or (len(dones) != len(self.prev_actions))):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.0
self.prev_hiddens[dones] = self.prob_network.hid_init_param.eval()
@overrides
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert (self.prev_actions is not None)
all_input = np.concatenate([flat_obs, self.prev_actions], axis=(- 1))
else:
all_input = flat_obs
(probs, hidden_vec) = self.f_step_prob(all_input, self.prev_hiddens)
actions = special.weighted_sample_n(probs, np.arange(self.action_space.n))
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return (actions, agent_info)
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [('prev_action', (self.action_dim,))]
else:
return []
|
class CategoricalLSTMPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_dim=32, feature_network=None, prob_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, forget_bias=1.0, use_peepholes=False, lstm_layer_cls=L.LSTMLayer):
'\n :param env_spec: A spec for the env.\n :param hidden_dim: dimension of hidden layer\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :return:\n '
with tf.variable_scope(name):
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalLSTMPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = (obs_dim + action_dim)
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name='input')
if (feature_network is None):
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[(- 1)]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(l_flat_feature, extras=[l_input], name='reshape_feature', op=(lambda flat_feature, input: tf.reshape(flat_feature, tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim]))), shape_op=(lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)))
if (prob_network is None):
prob_network = LSTMNetwork(input_shape=(feature_dim,), input_layer=l_feature, output_dim=env_spec.action_space.n, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, forget_bias=forget_bias, use_peepholes=use_peepholes, lstm_layer_cls=lstm_layer_cls, name='prob_network')
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name='flat_input')
if (feature_network is None):
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = tensor_utils.compile_function([flat_input_var, prob_network.step_prev_hidden_layer.input_var, prob_network.step_prev_cell_layer.input_var], L.get_output([prob_network.step_output_layer, prob_network.step_hidden_layer, prob_network.step_cell_layer], {prob_network.step_input_layer: feature_var}))
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.prev_cells = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if (feature_network is not None):
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, (- 1)]))
obs_var = tf.cast(obs_var, tf.float32)
if self.state_include_action:
prev_action_var = state_info_vars['prev_action']
prev_action_var = tf.cast(prev_action_var, tf.float32)
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if (self.feature_network is None):
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var}))
else:
flat_input_var = tf.reshape(all_input_var, ((- 1), self.input_dim))
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}))
@property
def vectorized(self):
return True
def reset(self, dones=None):
if (dones is None):
dones = [True]
dones = np.asarray(dones)
if ((self.prev_actions is None) or (len(dones) != len(self.prev_actions))):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_cells = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.0
self.prev_hiddens[dones] = self.prob_network.hid_init_param.eval()
self.prev_cells[dones] = self.prob_network.cell_init_param.eval()
@overrides
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert (self.prev_actions is not None)
all_input = np.concatenate([flat_obs, self.prev_actions], axis=(- 1))
else:
all_input = flat_obs
(probs, hidden_vec, cell_vec) = self.f_step_prob(all_input, self.prev_hiddens, self.prev_cells)
actions = special.weighted_sample_n(probs, np.arange(self.action_space.n))
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
self.prev_cells = cell_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return (actions, agent_info)
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [('prev_action', (self.action_dim,))]
else:
return []
|
class CategoricalMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, prob_network=None):
'\n :param env_spec: A spec for the mdp.\n :param hidden_sizes: list of sizes for the fully connected hidden layers\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param prob_network: manually specified network for this policy, other network params\n are ignored\n :return:\n '
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
with tf.variable_scope(name):
if (prob_network is None):
prob_network = MLP(input_shape=(env_spec.observation_space.flat_dim,), output_dim=env_spec.action_space.n, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, name='prob_network')
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = tensor_utils.compile_function([prob_network.input_layer.input_var], L.get_output(prob_network.output_layer))
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMLPPolicy, self).__init__(env_spec)
LayersPowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: tf.cast(obs_var, tf.float32)}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return (action, dict(prob=prob))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return (actions, dict(prob=probs))
@property
def distribution(self):
return self._dist
|
class GaussianGRUPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, gru_layer_cls=L.GRULayer, learn_std=True, init_std=1.0, output_nonlinearity=None):
'\n :param env_spec: A spec for the env.\n :param hidden_dim: dimension of hidden layer\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :return:\n '
with tf.variable_scope(name):
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = (obs_dim + action_dim)
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name='input')
if (feature_network is None):
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[(- 1)]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(l_flat_feature, extras=[l_input], name='reshape_feature', op=(lambda flat_feature, input: tf.reshape(flat_feature, tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim]))), shape_op=(lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)))
mean_network = GRUNetwork(input_shape=(feature_dim,), input_layer=l_feature, output_dim=action_dim, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, gru_layer_cls=gru_layer_cls, name='mean_network')
l_log_std = L.ParamLayer(mean_network.input_layer, num_units=action_dim, param=tf.constant_initializer(np.log(init_std)), name='output_log_std', trainable=learn_std)
l_step_log_std = L.ParamLayer(mean_network.step_input_layer, num_units=action_dim, param=l_log_std.param, name='step_output_log_std', trainable=learn_std)
self.mean_network = mean_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name='flat_input')
if (feature_network is None):
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_mean_std = tensor_utils.compile_function([flat_input_var, mean_network.step_prev_hidden_layer.input_var], L.get_output([mean_network.step_output_layer, l_step_log_std, mean_network.step_hidden_layer], {mean_network.step_input_layer: feature_var}))
self.l_log_std = l_log_std
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.dist = RecurrentDiagonalGaussian(action_dim)
out_layers = [mean_network.output_layer, l_log_std, l_step_log_std]
if (feature_network is not None):
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, (- 1)]))
if self.state_include_action:
prev_action_var = state_info_vars['prev_action']
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if (self.feature_network is None):
(means, log_stds) = L.get_output([self.mean_network.output_layer, self.l_log_std], {self.l_input: all_input_var})
else:
flat_input_var = tf.reshape(all_input_var, ((- 1), self.input_dim))
(means, log_stds) = L.get_output([self.mean_network.output_layer, self.l_log_std], {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var})
return dict(mean=means, log_std=log_stds)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if (dones is None):
dones = [True]
dones = np.asarray(dones)
if ((self.prev_actions is None) or (len(dones) != len(self.prev_actions))):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.0
self.prev_hiddens[dones] = self.mean_network.hid_init_param.eval()
@overrides
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert (self.prev_actions is not None)
all_input = np.concatenate([flat_obs, self.prev_actions], axis=(- 1))
else:
all_input = flat_obs
(means, log_stds, hidden_vec) = self.f_step_mean_std(all_input, self.prev_hiddens)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
agent_info = dict(mean=means, log_std=log_stds)
if self.state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return (actions, agent_info)
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [('prev_action', (self.action_dim,))]
else:
return []
def log_diagnostics(self, paths):
log_stds = np.vstack([path['agent_infos']['log_std'] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
|
class GaussianLSTMPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=tf.tanh, learn_std=True, init_std=1.0, output_nonlinearity=None, lstm_layer_cls=L.LSTMLayer):
'\n :param env_spec: A spec for the env.\n :param hidden_dim: dimension of hidden layer\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :return:\n '
with tf.variable_scope(name):
Serializable.quick_init(self, locals())
super(GaussianLSTMPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = (obs_dim + action_dim)
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name='input')
if (feature_network is None):
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[(- 1)]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(l_flat_feature, extras=[l_input], name='reshape_feature', op=(lambda flat_feature, input: tf.reshape(flat_feature, tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim]))), shape_op=(lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)))
mean_network = LSTMNetwork(input_shape=(feature_dim,), input_layer=l_feature, output_dim=action_dim, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, lstm_layer_cls=lstm_layer_cls, name='mean_network')
l_log_std = L.ParamLayer(mean_network.input_layer, num_units=action_dim, param=tf.constant_initializer(np.log(init_std)), name='output_log_std', trainable=learn_std)
l_step_log_std = L.ParamLayer(mean_network.step_input_layer, num_units=action_dim, param=l_log_std.param, name='step_output_log_std', trainable=learn_std)
self.mean_network = mean_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name='flat_input')
if (feature_network is None):
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_mean_std = tensor_utils.compile_function([flat_input_var, mean_network.step_prev_hidden_layer.input_var, mean_network.step_prev_cell_layer.input_var], L.get_output([mean_network.step_output_layer, l_step_log_std, mean_network.step_hidden_layer, mean_network.step_cell_layer], {mean_network.step_input_layer: feature_var}))
self.l_log_std = l_log_std
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.prev_cells = None
self.dist = RecurrentDiagonalGaussian(action_dim)
out_layers = [mean_network.output_layer, l_log_std]
if (feature_network is not None):
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, (- 1)]))
if self.state_include_action:
prev_action_var = state_info_vars['prev_action']
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if (self.feature_network is None):
(means, log_stds) = L.get_output([self.mean_network.output_layer, self.l_log_std], {self.l_input: all_input_var})
else:
flat_input_var = tf.reshape(all_input_var, ((- 1), self.input_dim))
(means, log_stds) = L.get_output([self.mean_network.output_layer, self.l_log_std], {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var})
return dict(mean=means, log_std=log_stds)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if (dones is None):
dones = [True]
dones = np.asarray(dones)
if ((self.prev_actions is None) or (len(dones) != len(self.prev_actions))):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_cells = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.0
self.prev_hiddens[dones] = self.mean_network.hid_init_param.eval()
self.prev_cells[dones] = self.mean_network.cell_init_param.eval()
@overrides
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert (self.prev_actions is not None)
all_input = np.concatenate([flat_obs, self.prev_actions], axis=(- 1))
else:
all_input = flat_obs
(means, log_stds, hidden_vec, cell_vec) = self.f_step_mean_std(all_input, self.prev_hiddens, self.prev_cells)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
self.prev_cells = cell_vec
agent_info = dict(mean=means, log_std=log_stds)
if self.state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return (actions, agent_info)
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [('prev_action', (self.action_dim,))]
else:
return []
|
class GaussianMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-06, std_hidden_nonlinearity=tf.nn.tanh, hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=None, mean_network=None, std_network=None, std_parametrization='exp'):
'\n :param env_spec:\n :param hidden_sizes: list of sizes for the fully-connected hidden layers\n :param learn_std: Is std trainable\n :param init_std: Initial std\n :param adaptive_std:\n :param std_share_network:\n :param std_hidden_sizes: list of sizes for the fully-connected layers for std\n :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues\n :param std_hidden_nonlinearity:\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param output_nonlinearity: nonlinearity for the output layer\n :param mean_network: custom network for the output mean\n :param std_network: custom network for the output log std\n :param std_parametrization: how the std should be parametrized. There are a few options:\n - exp: the logarithm of the std will be stored, and applied a exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n :return:\n '
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
with tf.variable_scope(name):
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if (mean_network is None):
mean_network = MLP(name='mean_network', input_shape=(obs_dim,), output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if (std_network is not None):
l_std_param = std_network.output_layer
elif adaptive_std:
std_network = MLP(name='std_network', input_shape=(obs_dim,), input_layer=mean_network.input_layer, output_dim=action_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=None)
l_std_param = std_network.output_layer
else:
if (std_parametrization == 'exp'):
init_std_param = np.log(init_std)
elif (std_parametrization == 'softplus'):
init_std_param = np.log((np.exp(init_std) - 1))
else:
raise NotImplementedError
l_std_param = L.ParamLayer(mean_network.input_layer, num_units=action_dim, param=tf.constant_initializer(init_std_param), name='output_std_param', trainable=learn_std)
self.std_parametrization = std_parametrization
if (std_parametrization == 'exp'):
min_std_param = np.log(min_std)
elif (std_parametrization == 'softplus'):
min_std_param = np.log((np.exp(min_std) - 1))
else:
raise NotImplementedError
self.min_std_param = min_std_param
self._l_mean = l_mean
self._l_std_param = l_std_param
self._dist = DiagonalGaussian(action_dim)
LayersPowered.__init__(self, [l_mean, l_std_param])
super(GaussianMLPPolicy, self).__init__(env_spec)
dist_info_sym = self.dist_info_sym(mean_network.input_layer.input_var, dict())
mean_var = dist_info_sym['mean']
log_std_var = dist_info_sym['log_std']
self._f_dist = tensor_utils.compile_function(inputs=[obs_var], outputs=[mean_var, log_std_var])
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None):
(mean_var, std_param_var) = L.get_output([self._l_mean, self._l_std_param], obs_var)
if (self.min_std_param is not None):
std_param_var = tf.maximum(std_param_var, self.min_std_param)
if (self.std_parametrization == 'exp'):
log_std_var = std_param_var
elif (self.std_parametrization == 'softplus'):
log_std_var = tf.log(tf.log((1.0 + tf.exp(std_param_var))))
else:
raise NotImplementedError
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
(mean, log_std) = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = ((rnd * np.exp(log_std)) + mean)
return (action, dict(mean=mean, log_std=log_std))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
(means, log_stds) = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
return (actions, dict(mean=means, log_std=log_stds))
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
'\n Given observations, old actions, and distribution of old actions, return a symbolically reparameterized\n representation of the actions in terms of the policy parameters\n :param obs_var:\n :param action_var:\n :param old_dist_info_vars:\n :return:\n '
import pdb
pdb.set_trace()
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
(new_mean_var, new_log_std_var) = (new_dist_info_vars['mean'], new_dist_info_vars['log_std'])
(old_mean_var, old_log_std_var) = (old_dist_info_vars['mean'], old_dist_info_vars['log_std'])
epsilon_var = ((action_var - old_mean_var) / (tf.exp(old_log_std_var) + 1e-08))
new_action_var = (new_mean_var + (epsilon_var * tf.exp(new_log_std_var)))
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path['agent_infos']['log_std'] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
@property
def distribution(self):
return self._dist
|
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
(yield)
load_params = True
|
class CategoricalMLPPolicy(StochasticPolicy, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, prob_network=None):
'\n :param env_spec: A spec for the mdp.\n :param hidden_sizes: list of sizes for the fully connected hidden layers\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param prob_network: manually specified network for this policy, other network params\n are ignored\n :return:\n '
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
with tf.variable_scope(name):
if (prob_network is None):
prob_network = self.create_MLP(input_shape=(obs_dim,), output_dim=env_spec.action_space.n, hidden_sizes=hidden_sizes, name='prob_network')
(self._l_obs, self._l_prob) = self.forward_MLP('prob_network', prob_network, n_hidden=len(hidden_sizes), input_shape=(obs_dim,), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, reuse=None)
self._forward_out = (lambda x, is_train: self.forward_MLP('prob_network', prob_network, n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1])
self._f_prob = tensor_utils.compile_function([self._l_obs], L.get_output(self._l_prob))
self._dist = Categorical(env_spec.action_space.n)
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None, is_training=True):
output = self._forward_out(tf.cast(obs_var, tf.float32), is_training)
return dict(prob=output)
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return (action, dict(prob=prob))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return (actions, dict(prob=probs))
@property
def distribution(self):
return self._dist
def create_MLP(self, name, output_dim, hidden_sizes, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_shape=None, weight_normalization=False):
assert (input_shape is not None)
cur_shape = input_shape
with tf.variable_scope(name):
all_params = {}
for (idx, hidden_size) in enumerate(hidden_sizes):
(W, b, cur_shape) = make_dense_layer(cur_shape, num_units=hidden_size, name=('hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init, weight_norm=weight_normalization)
all_params[('W' + str(idx))] = W
all_params[('b' + str(idx))] = b
(W, b, _) = make_dense_layer(cur_shape, num_units=output_dim, name='output', W=output_W_init, b=output_b_init, weight_norm=weight_normalization)
all_params[('W' + str(len(hidden_sizes)))] = W
all_params[('b' + str(len(hidden_sizes)))] = b
return all_params
def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=(- 1), hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity, batch_normalization=False, reuse=True, is_training=False):
with tf.variable_scope(name):
if (input_tensor is None):
assert (input_shape is not None)
l_in = make_input(shape=((None,) + input_shape), input_var=None, name='input')
else:
l_in = input_tensor
l_hid = l_in
for idx in range(n_hidden):
l_hid = forward_dense_layer(l_hid, all_params[('W' + str(idx))], all_params[('b' + str(idx))], batch_norm=batch_normalization, nonlinearity=hidden_nonlinearity, scope=str(idx), reuse=reuse, is_training=is_training)
output = forward_dense_layer(l_hid, all_params[('W' + str(n_hidden))], all_params[('b' + str(n_hidden))], batch_norm=False, nonlinearity=output_nonlinearity)
return (l_in, output)
|
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
(yield)
load_params = True
|
class GaussianMLPPolicy(StochasticPolicy, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-06, std_hidden_nonlinearity=tf.nn.tanh, hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=tf.identity, mean_network=None, std_network=None, std_parametrization='exp'):
'\n :param env_spec:\n :param hidden_sizes: list of sizes for the fully-connected hidden layers\n :param learn_std: Is std trainable\n :param init_std: Initial std\n :param adaptive_std:\n :param std_share_network:\n :param std_hidden_sizes: list of sizes for the fully-connected layers for std\n :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues\n :param std_hidden_nonlinearity:\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param output_nonlinearity: nonlinearity for the output layer\n :param mean_network: custom network for the output mean\n :param std_network: custom network for the output log std\n :param std_parametrization: how the std should be parametrized. There are a few options:\n - exp: the logarithm of the std will be stored, and applied a exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n :return:\n '
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if (mean_network is None):
self.mean_params = mean_params = self.create_MLP(name='mean_network', input_shape=(None, obs_dim), output_dim=action_dim, hidden_sizes=hidden_sizes)
(input_tensor, mean_tensor) = self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes), input_shape=(obs_dim,), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, reuse=None)
self._forward_mean = (lambda x, is_train: self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1])
else:
raise NotImplementedError('Chelsea does not support this.')
if (std_network is not None):
raise NotImplementedError('Minimal Gaussian MLP does not support this.')
elif adaptive_std:
raise NotImplementedError('Minimal Gaussian MLP doesnt have a tested version of this.')
self.std_params = std_params = self.create_MLP(name='std_network', input_shape=(None, obs_dim), output_dim=action_dim, hidden_sizes=std_hidden_sizes)
self._forward_std = (lambda x: self.forward_MLP('std_network', std_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=tf.identity, input_tensor=x)[1])
else:
if (std_parametrization == 'exp'):
init_std_param = np.log(init_std)
elif (std_parametrization == 'softplus'):
init_std_param = np.log((np.exp(init_std) - 1))
else:
raise NotImplementedError
self.std_params = make_param_layer(num_units=action_dim, param=tf.constant_initializer(init_std_param), name='output_std_param', trainable=learn_std)
self._forward_std = (lambda x: forward_param_layer(x, self.std_params))
self.std_parametrization = std_parametrization
if (std_parametrization == 'exp'):
min_std_param = np.log(min_std)
elif (std_parametrization == 'softplus'):
min_std_param = np.log((np.exp(min_std) - 1))
else:
raise NotImplementedError
self.min_std_param = min_std_param
self._dist = DiagonalGaussian(action_dim)
self._cached_params = {}
super(GaussianMLPPolicy, self).__init__(env_spec)
dist_info_sym = self.dist_info_sym(input_tensor, dict(), is_training=False)
mean_var = dist_info_sym['mean']
log_std_var = dist_info_sym['log_std']
self._f_dist = tensor_utils.compile_function(inputs=[input_tensor], outputs=[mean_var, log_std_var])
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None, is_training=True):
mean_var = self._forward_mean(obs_var, is_training)
std_param_var = self._forward_std(obs_var)
if (self.min_std_param is not None):
std_param_var = tf.maximum(std_param_var, self.min_std_param)
if (self.std_parametrization == 'exp'):
log_std_var = std_param_var
elif (self.std_parametrization == 'softplus'):
log_std_var = tf.log(tf.log((1.0 + tf.exp(std_param_var))))
else:
raise NotImplementedError
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
(mean, log_std) = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = ((rnd * np.exp(log_std)) + mean)
return (action, dict(mean=mean, log_std=log_std))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
(means, log_stds) = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
return (actions, dict(mean=means, log_std=log_stds))
@property
def distribution(self):
return self._dist
def get_params_internal(self, **tags):
if tags.get('trainable', False):
params = tf.trainable_variables()
else:
params = tf.all_variables()
params = [p for p in params if (p.name.startswith('mean_network') or p.name.startswith('output_std_param'))]
params = [p for p in params if ('Adam' not in p.name)]
return params
def create_MLP(self, name, output_dim, hidden_sizes, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_shape=None, weight_normalization=False):
assert (input_shape is not None)
cur_shape = input_shape
with tf.variable_scope(name):
all_params = {}
for (idx, hidden_size) in enumerate(hidden_sizes):
(W, b, cur_shape) = make_dense_layer(cur_shape, num_units=hidden_size, name=('hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init, weight_norm=weight_normalization)
all_params[('W' + str(idx))] = W
all_params[('b' + str(idx))] = b
(W, b, _) = make_dense_layer(cur_shape, num_units=output_dim, name='output', W=output_W_init, b=output_b_init, weight_norm=weight_normalization)
all_params[('W' + str(len(hidden_sizes)))] = W
all_params[('b' + str(len(hidden_sizes)))] = b
return all_params
def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=(- 1), hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity, batch_normalization=False, reuse=True, is_training=False):
with tf.variable_scope(name):
if (input_tensor is None):
assert (input_shape is not None)
l_in = make_input(shape=((None,) + input_shape), input_var=None, name='input')
else:
l_in = input_tensor
l_hid = l_in
for idx in range(n_hidden):
l_hid = forward_dense_layer(l_hid, all_params[('W' + str(idx))], all_params[('b' + str(idx))], batch_norm=batch_normalization, nonlinearity=hidden_nonlinearity, scope=str(idx), reuse=reuse, is_training=is_training)
output = forward_dense_layer(l_hid, all_params[('W' + str(n_hidden))], all_params[('b' + str(n_hidden))], batch_norm=False, nonlinearity=output_nonlinearity)
return (l_in, output)
def get_params(self, **tags):
"\n Get the list of parameters (symbolically), filtered by the provided tags.\n Some common tags include 'regularizable' and 'trainable'\n "
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_params):
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def log_diagnostics(self, paths):
log_stds = np.vstack([path['agent_infos']['log_std'] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
'\n Given observations, old actions, and distribution of old actions, return a symbolically reparameterized\n representation of the actions in terms of the policy parameters\n :param obs_var:\n :param action_var:\n :param old_dist_info_vars:\n :return:\n '
print('--this really shouldnt be used, not updated from non-minimal policy--')
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
(new_mean_var, new_log_std_var) = (new_dist_info_vars['mean'], new_dist_info_vars['log_std'])
(old_mean_var, old_log_std_var) = (old_dist_info_vars['mean'], old_dist_info_vars['log_std'])
epsilon_var = ((action_var - old_mean_var) / (tf.exp(old_log_std_var) + 1e-08))
new_action_var = (new_mean_var + (epsilon_var * tf.exp(new_log_std_var)))
return new_action_var
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_dtypes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=(lambda x: x[0])))
if (tag_tuple not in self._cached_param_shapes):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def set_param_values(self, flattened_params, **tags):
debug = tags.pop('debug', False)
param_values = unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for (param, dtype, value) in zip(self.get_params(**tags), self.get_param_dtypes(**tags), param_values):
if (param not in self._cached_assign_ops):
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print(('setting value of %s' % param.name))
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d['params'] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.initialize_variables(self.get_params()))
self.set_param_values(d['params'])
|
class UniformControlPolicy(Policy, Serializable):
def __init__(self, env_spec):
Serializable.quick_init(self, locals())
super(UniformControlPolicy, self).__init__(env_spec=env_spec)
@property
def vectorized(self):
return True
def get_action(self, observation):
return (self.action_space.sample(), dict())
def get_actions(self, observations):
return (self.action_space.sample_n(len(observations)), dict())
def get_params_internal(self, **tags):
return []
|
class BernoulliMLPRegressor(LayersPowered, Serializable):
'\n A class for performing regression (or classification, really) by fitting a bernoulli distribution to each of the\n output units.\n '
def __init__(self, input_shape, output_dim, name, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, optimizer=None, tr_optimizer=None, use_trust_region=True, step_size=0.01, normalize_inputs=True, no_initial_trust_region=True):
'\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n :param use_trust_region: Whether to use trust region constraint.\n :param step_size: KL divergence constraint for each iteration\n '
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
optimizer = LbfgsOptimizer(name='optimizer')
if (tr_optimizer is None):
tr_optimizer = ConjugateGradientOptimizer()
self.output_dim = output_dim
self.optimizer = optimizer
self.tr_optimizer = tr_optimizer
p_network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.sigmoid, name='p_network')
l_p = p_network.output_layer
LayersPowered.__init__(self, [l_p])
xs_var = p_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=(None, output_dim), name='ys')
old_p_var = tf.placeholder(dtype=tf.float32, shape=(None, output_dim), name='old_p')
x_mean_var = tf.get_variable(name='x_mean', initializer=tf.zeros_initializer, shape=((1,) + input_shape))
x_std_var = tf.get_variable(name='x_std', initializer=tf.ones_initializer, shape=((1,) + input_shape))
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
p_var = L.get_output(l_p, {p_network.input_layer: normalized_xs_var})
old_info_vars = dict(p=old_p_var)
info_vars = dict(p=p_var)
dist = self._dist = Bernoulli(output_dim)
mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))
loss = (- tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars)))
predicted = (p_var >= 0.5)
self.f_predict = tensor_utils.compile_function([xs_var], predicted)
self.f_p = tensor_utils.compile_function([xs_var], p_var)
self.l_p = l_p
self.optimizer.update_opt(loss=loss, target=self, network_outputs=[p_var], inputs=[xs_var, ys_var])
self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[p_var], inputs=[xs_var, ys_var, old_p_var], leq_constraint=(mean_kl, step_size))
self.use_trust_region = use_trust_region
self.name = name
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
self.first_optimized = (not no_initial_trust_region)
def fit(self, xs, ys):
if self.normalize_inputs:
new_mean = np.mean(xs, axis=0, keepdims=True)
new_std = (np.std(xs, axis=0, keepdims=True) + 1e-08)
tf.get_default_session().run(tf.group(tf.assign(self.x_mean_var, new_mean), tf.assign(self.x_std_var, new_std)))
if (self.use_trust_region and self.first_optimized):
old_p = self.f_p(xs)
inputs = [xs, ys, old_p]
optimizer = self.tr_optimizer
else:
inputs = [xs, ys]
optimizer = self.optimizer
loss_before = optimizer.loss(inputs)
if self.name:
prefix = (self.name + '_')
else:
prefix = ''
logger.record_tabular((prefix + 'LossBefore'), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
logger.record_tabular((prefix + 'LossAfter'), loss_after)
logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after))
self.first_optimized = True
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def sample_predict(self, xs):
p = self.f_p(xs)
return self._dist.sample(dict(p=p))
def predict_log_likelihood(self, xs, ys):
p = self.f_p(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(p=p))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
|
class CategoricalMLPRegressor(LayersPowered, Serializable):
'\n A class for performing regression (or classification, really) by fitting a categorical distribution to the outputs.\n Assumes that the outputs will be always a one hot vector.\n '
def __init__(self, name, input_shape, output_dim, prob_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, optimizer=None, tr_optimizer=None, use_trust_region=True, step_size=0.01, normalize_inputs=True, no_initial_trust_region=True):
'\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n :param use_trust_region: Whether to use trust region constraint.\n :param step_size: KL divergence constraint for each iteration\n '
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
optimizer = LbfgsOptimizer(name='optimizer')
if (tr_optimizer is None):
tr_optimizer = ConjugateGradientOptimizer()
self.output_dim = output_dim
self.optimizer = optimizer
self.tr_optimizer = tr_optimizer
if (prob_network is None):
prob_network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=tf.nn.softmax, name='prob_network')
l_prob = prob_network.output_layer
LayersPowered.__init__(self, [l_prob])
xs_var = prob_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='ys')
old_prob_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='old_prob')
x_mean_var = tf.get_variable(name='x_mean', shape=((1,) + input_shape), initializer=tf.constant_initializer(0.0, dtype=tf.float32))
x_std_var = tf.get_variable(name='x_std', shape=((1,) + input_shape), initializer=tf.constant_initializer(1.0, dtype=tf.float32))
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
prob_var = L.get_output(l_prob, {prob_network.input_layer: normalized_xs_var})
old_info_vars = dict(prob=old_prob_var)
info_vars = dict(prob=prob_var)
dist = self._dist = Categorical(output_dim)
mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))
loss = (- tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars)))
predicted = tensor_utils.to_onehot_sym(tf.argmax(prob_var, dimension=1), output_dim)
self.prob_network = prob_network
self.f_predict = tensor_utils.compile_function([xs_var], predicted)
self.f_prob = tensor_utils.compile_function([xs_var], prob_var)
self.l_prob = l_prob
self.optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var], inputs=[xs_var, ys_var])
self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var], inputs=[xs_var, ys_var, old_prob_var], leq_constraint=(mean_kl, step_size))
self.use_trust_region = use_trust_region
self.name = name
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
self.first_optimized = (not no_initial_trust_region)
def fit(self, xs, ys):
if self.normalize_inputs:
new_mean = np.mean(xs, axis=0, keepdims=True)
new_std = (np.std(xs, axis=0, keepdims=True) + 1e-08)
tf.get_default_session().run(tf.group(tf.assign(self.x_mean_var, new_mean), tf.assign(self.x_std_var, new_std)))
if (self.use_trust_region and self.first_optimized):
old_prob = self.f_prob(xs)
inputs = [xs, ys, old_prob]
optimizer = self.tr_optimizer
else:
inputs = [xs, ys]
optimizer = self.optimizer
loss_before = optimizer.loss(inputs)
if self.name:
prefix = (self.name + '_')
else:
prefix = ''
logger.record_tabular((prefix + 'LossBefore'), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
logger.record_tabular((prefix + 'LossAfter'), loss_after)
logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after))
self.first_optimized = True
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def predict_log_likelihood(self, xs, ys):
prob = self.f_prob(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(prob=prob))
def dist_info_sym(self, x_var):
normalized_xs_var = ((x_var - self.x_mean_var) / self.x_std_var)
prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
return dict(prob=prob)
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = ((x_var - self.x_mean_var) / self.x_std_var)
prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
return self._dist.log_likelihood_sym(y_var, dict(prob=prob))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
|
class DeterministicMLPRegressor(LayersPowered, Serializable):
'\n A class for performing nonlinear regression.\n '
def __init__(self, name, input_shape, output_dim, network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=None, optimizer=None, normalize_inputs=True):
'\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n '
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
optimizer = LbfgsOptimizer(name='optimizer')
self.output_dim = output_dim
self.optimizer = optimizer
if (network is None):
network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, name='network')
l_out = network.output_layer
LayersPowered.__init__(self, [l_out])
xs_var = network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='ys')
x_mean_var = tf.get_variable(name='x_mean', shape=((1,) + input_shape), initializer=tf.constant_initializer(0.0, dtype=tf.float32))
x_std_var = tf.get_variable(name='x_std', shape=((1,) + input_shape), initializer=tf.constant_initializer(1.0, dtype=tf.float32))
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var})
loss = (- tf.reduce_mean(tf.square((fit_ys_var - ys_var))))
self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var)
optimizer_args = dict(loss=loss, target=self, network_outputs=[fit_ys_var])
optimizer_args['inputs'] = [xs_var, ys_var]
self.optimizer.update_opt(**optimizer_args)
self.name = name
self.l_out = l_out
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
|
class GaussianMLPRegressor(LayersPowered, Serializable):
'\n A class for performing regression by fitting a Gaussian distribution to the outputs.\n '
def __init__(self, name, input_shape, output_dim, mean_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, optimizer=None, use_trust_region=True, step_size=0.01, learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), std_nonlinearity=None, normalize_inputs=True, normalize_outputs=True, subsample_factor=1.0):
'\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n :param use_trust_region: Whether to use trust region constraint.\n :param step_size: KL divergence constraint for each iteration\n :param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If\n adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.\n :param adaptive_std: Whether to make the std a function of the states.\n :param std_share_network: Whether to use the same network as the mean.\n :param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if\n `std_share_network` is False. It defaults to the same architecture as the mean.\n :param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`\n is False. It defaults to the same non-linearity as the mean.\n '
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer('optimizer')
else:
optimizer = LbfgsOptimizer('optimizer')
self._optimizer = optimizer
self._subsample_factor = subsample_factor
if (mean_network is None):
mean_network = MLP(name='mean_network', input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=None)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(name='log_std_network', input_shape=input_shape, input_var=mean_network.input_layer.input_var, output_dim=output_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_nonlinearity, output_nonlinearity=None).output_layer
else:
l_log_std = L.ParamLayer(mean_network.input_layer, num_units=output_dim, param=tf.constant_initializer(np.log(init_std)), name='output_log_std', trainable=learn_std)
LayersPowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim))
old_means_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim))
old_log_stds_var = tf.placeholder(dtype=tf.float32, name='old_log_stds', shape=(None, output_dim))
x_mean_var = tf.Variable(np.zeros(((1,) + input_shape), dtype=np.float32), name='x_mean')
x_std_var = tf.Variable(np.ones(((1,) + input_shape), dtype=np.float32), name='x_std')
y_mean_var = tf.Variable(np.zeros((1, output_dim), dtype=np.float32), name='y_mean')
y_std_var = tf.Variable(np.ones((1, output_dim), dtype=np.float32), name='y_std')
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
normalized_ys_var = ((ys_var - y_mean_var) / y_std_var)
normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * y_std_var) + y_mean_var)
log_stds_var = (normalized_log_stds_var + tf.log(y_std_var))
normalized_old_means_var = ((old_means_var - y_mean_var) / y_std_var)
normalized_old_log_stds_var = (old_log_stds_var - tf.log(y_std_var))
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = tf.reduce_mean(dist.kl_sym(dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars))
loss = (- tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars)))
self._f_predict = tensor_utils.compile_function([xs_var], means_var)
self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(loss=loss, target=self, network_outputs=[normalized_means_var, normalized_log_stds_var])
if use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, step_size)
optimizer_args['inputs'] = [xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args['inputs'] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if (self._subsample_factor < 1):
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int((num_samples_tot * self._subsample_factor)))
(xs, ys) = (xs[idx], ys[idx])
sess = tf.get_default_session()
if self._normalize_inputs:
sess.run([tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)), tf.assign(self._x_std_var, (np.std(xs, axis=0, keepdims=True) + 1e-08))])
if self._normalize_outputs:
sess.run([tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)), tf.assign(self._y_std_var, (np.std(ys, axis=0, keepdims=True) + 1e-08))])
if self._use_trust_region:
(old_means, old_log_stds) = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
if self._name:
prefix = (self._name + '_')
else:
prefix = ''
logger.record_tabular((prefix + 'LossBefore'), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
logger.record_tabular((prefix + 'LossAfter'), loss_after)
if self._use_trust_region:
logger.record_tabular((prefix + 'MeanKL'), self._optimizer.constraint_val(inputs))
logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after))
def predict(self, xs):
'\n Return the maximum likelihood estimate of the predicted y.\n :param xs:\n :return:\n '
return self._f_predict(xs)
def sample_predict(self, xs):
'\n Sample one possible output from the prediction distribution.\n :param xs:\n :return:\n '
(means, log_stds) = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
(means, log_stds) = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = ((x_var - self._x_mean_var) / self._x_std_var)
(normalized_means_var, normalized_log_stds_var) = L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * self._y_std_var) + self._y_mean_var)
log_stds_var = (normalized_log_stds_var + TT.log(self._y_std_var))
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
LayersPowered.set_param_values(self, flattened_params, **tags)
|
def worker_init_tf(G):
G.sess = tf.Session()
G.sess.__enter__()
|
def worker_init_tf_vars(G):
G.sess.run(tf.global_variables_initializer())
|
class BatchSampler(BaseSampler):
def __init__(self, algo, n_envs=1):
super(BatchSampler, self).__init__(algo)
self.n_envs = n_envs
def start_worker(self):
if (singleton_pool.n_parallel > 1):
singleton_pool.run_each(worker_init_tf)
parallel_sampler.populate_task(self.algo.env, self.algo.policy)
if (singleton_pool.n_parallel > 1):
singleton_pool.run_each(worker_init_tf_vars)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr, reset_args=None, return_dict=False, log_prefix=''):
init_policy_params = cur_policy_params = self.algo.policy.get_param_values()
if hasattr(self.algo.env, 'get_param_values'):
try:
cur_env_params = self.algo.env.get_param_values()
except:
cur_env_params = None
else:
cur_env_params = None
import time
start = time.time()
if ((type(reset_args) != list) and (type(reset_args) != np.ndarray)):
reset_args = ([reset_args] * self.n_envs)
if (hasattr(self.algo.policy, 'all_param_vals') and self.algo.policy.all_param_vals):
cur_policy_params = [flatten_tensors(self.algo.policy.all_param_vals.values())]
else:
cur_policy_params = ([cur_policy_params] * self.n_envs)
paths = {}
for i in range(self.n_envs):
paths[i] = parallel_sampler.sample_paths(policy_params=cur_policy_params[i], env_params=cur_env_params, max_samples=(self.algo.batch_size / self.n_envs), max_path_length=self.algo.max_path_length, scope=self.algo.scope, reset_arg=reset_args[i], show_prog_bar=False)
total_time = (time.time() - start)
logger.record_tabular((log_prefix + 'TotalExecTime'), total_time)
if (not return_dict):
flatten_list = (lambda l: [item for sublist in l for item in sublist])
paths = flatten_list(paths.values())
self.algo.policy.set_param_values(init_policy_params)
assert self.algo.whole_paths
return paths
|
class VectorizedSampler(BaseSampler):
def __init__(self, algo, n_envs=None, latent_dim=None):
super(VectorizedSampler, self).__init__(algo)
self.n_envs = n_envs
self.latent_dim = latent_dim
def start_worker(self):
n_envs = self.n_envs
if (n_envs is None):
n_envs = int((self.algo.batch_size / self.algo.max_path_length))
n_envs = max(1, min(n_envs, 100))
if getattr(self.algo.env, 'vectorized', False):
self.vec_env = self.algo.env.vec_env_executor(n_envs=n_envs, max_path_length=self.algo.max_path_length)
else:
envs = [pickle.loads(pickle.dumps(self.algo.env)) for _ in range(n_envs)]
self.vec_env = VecEnvExecutor(envs=envs, max_path_length=self.algo.max_path_length)
self.env_spec = self.algo.env.spec
def shutdown_worker(self):
self.vec_env.terminate()
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], (- 1)))
def obtain_samples(self, itr, reset_args=None, task_idxs=None, return_dict=False, log_prefix=''):
logger.log(('Obtaining samples for iteration %d...' % itr))
paths = {}
for i in range(self.vec_env.num_envs):
paths[i] = []
if ((reset_args is not None) and ((type(reset_args) != list) and (type(reset_args) != np.ndarray))):
reset_args = ([reset_args] * self.vec_env.num_envs)
n_samples = 0
curr_noises = [np.random.normal(0, 1, size=(self.latent_dim,)) for _ in range(self.vec_env.num_envs)]
obses = self.vec_env.reset(reset_args)
dones = np.asarray(([True] * self.vec_env.num_envs))
running_paths = ([None] * self.vec_env.num_envs)
pbar = ProgBarCounter(self.algo.batch_size)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
import time
while (n_samples < self.algo.batch_size):
t = time.time()
policy.reset(dones)
(actions, agent_infos) = policy.get_actions(obses, task_idxs, curr_noises)
policy_time += (time.time() - t)
t = time.time()
(next_obses, rewards, dones, env_infos) = self.vec_env.step(actions, reset_args)
env_time += (time.time() - t)
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if (env_infos is None):
env_infos = [dict() for _ in range(self.vec_env.num_envs)]
if (agent_infos is None):
agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
for (idx, observation, action, reward, env_info, agent_info, done, noise) in zip(itertools.count(), obses, actions, rewards, env_infos, agent_infos, dones, curr_noises):
if (running_paths[idx] is None):
running_paths[idx] = dict(observations=[], actions=[], rewards=[], env_infos=[], agent_infos=[], noises=[])
running_paths[idx]['observations'].append(observation)
running_paths[idx]['actions'].append(action)
running_paths[idx]['rewards'].append(reward)
running_paths[idx]['env_infos'].append(env_info)
running_paths[idx]['agent_infos'].append(agent_info)
running_paths[idx]['noises'].append(noise)
if done:
paths[idx].append(dict(observations=self.env_spec.observation_space.flatten_n(running_paths[idx]['observations']), noises=self.flatten_n(running_paths[idx]['noises']), actions=self.env_spec.action_space.flatten_n(running_paths[idx]['actions']), rewards=tensor_utils.stack_tensor_list(running_paths[idx]['rewards']), env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['env_infos']), agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['agent_infos'])))
n_samples += len(running_paths[idx]['rewards'])
running_paths[idx] = None
curr_noises[idx] = np.random.normal(0, 1, size=(self.latent_dim,))
process_time += (time.time() - t)
pbar.inc(len(obses))
obses = next_obses
pbar.stop()
logger.record_tabular((log_prefix + 'PolicyExecTime'), policy_time)
logger.record_tabular((log_prefix + 'EnvExecTime'), env_time)
logger.record_tabular((log_prefix + 'ProcessExecTime'), process_time)
if (not return_dict):
flatten_list = (lambda l: [item for sublist in l for item in sublist])
paths = flatten_list(paths.values())
return paths
|
class VectorizedSamplerNoNoise(BaseSampler):
def __init__(self, algo, n_envs=None, latent_dim=None):
super(VectorizedSamplerNoNoise, self).__init__(algo)
self.n_envs = n_envs
self.latent_dim = latent_dim
def start_worker(self):
n_envs = self.n_envs
if (n_envs is None):
n_envs = int((self.algo.batch_size / self.algo.max_path_length))
n_envs = max(1, min(n_envs, 100))
if getattr(self.algo.env, 'vectorized', False):
self.vec_env = self.algo.env.vec_env_executor(n_envs=n_envs, max_path_length=self.algo.max_path_length)
else:
envs = [pickle.loads(pickle.dumps(self.algo.env)) for _ in range(n_envs)]
self.vec_env = VecEnvExecutor(envs=envs, max_path_length=self.algo.max_path_length)
self.env_spec = self.algo.env.spec
def shutdown_worker(self):
self.vec_env.terminate()
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], (- 1)))
def obtain_samples(self, itr, reset_args=None, task_idxs=None, return_dict=False, log_prefix=''):
logger.log(('Obtaining samples for iteration %d...' % itr))
paths = {}
for i in range(self.vec_env.num_envs):
paths[i] = []
if ((reset_args is not None) and ((type(reset_args) != list) and (type(reset_args) != np.ndarray))):
reset_args = ([reset_args] * self.vec_env.num_envs)
n_samples = 0
curr_noises = [np.zeros(shape=self.latent_dim) for _ in range(self.vec_env.num_envs)]
obses = self.vec_env.reset(reset_args)
dones = np.asarray(([True] * self.vec_env.num_envs))
running_paths = ([None] * self.vec_env.num_envs)
pbar = ProgBarCounter(self.algo.batch_size)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
import time
while (n_samples < self.algo.batch_size):
t = time.time()
policy.reset(dones)
(actions, agent_infos) = policy.get_actions(obses, task_idxs, curr_noises)
policy_time += (time.time() - t)
t = time.time()
(next_obses, rewards, dones, env_infos) = self.vec_env.step(actions, reset_args)
env_time += (time.time() - t)
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if (env_infos is None):
env_infos = [dict() for _ in range(self.vec_env.num_envs)]
if (agent_infos is None):
agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
for (idx, observation, action, reward, env_info, agent_info, done, noise) in zip(itertools.count(), obses, actions, rewards, env_infos, agent_infos, dones, curr_noises):
if (running_paths[idx] is None):
running_paths[idx] = dict(observations=[], actions=[], rewards=[], env_infos=[], agent_infos=[], noises=[])
running_paths[idx]['observations'].append(observation)
running_paths[idx]['actions'].append(action)
running_paths[idx]['rewards'].append(reward)
running_paths[idx]['env_infos'].append(env_info)
running_paths[idx]['agent_infos'].append(agent_info)
running_paths[idx]['noises'].append(noise)
if done:
paths[idx].append(dict(observations=self.env_spec.observation_space.flatten_n(running_paths[idx]['observations']), noises=self.flatten_n(running_paths[idx]['noises']), actions=self.env_spec.action_space.flatten_n(running_paths[idx]['actions']), rewards=tensor_utils.stack_tensor_list(running_paths[idx]['rewards']), env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['env_infos']), agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['agent_infos'])))
n_samples += len(running_paths[idx]['rewards'])
running_paths[idx] = None
curr_noises[idx] = np.zeros(shape=self.latent_dim)
process_time += (time.time() - t)
pbar.inc(len(obses))
obses = next_obses
pbar.stop()
logger.record_tabular((log_prefix + 'PolicyExecTime'), policy_time)
logger.record_tabular((log_prefix + 'EnvExecTime'), env_time)
logger.record_tabular((log_prefix + 'ProcessExecTime'), process_time)
if (not return_dict):
flatten_list = (lambda l: [item for sublist in l for item in sublist])
paths = flatten_list(paths.values())
return paths
|
class Box(TheanoBox):
def new_tensor_variable(self, name, extra_dims):
return tf.placeholder(tf.float32, shape=(([None] * extra_dims) + [self.flat_dim]), name=name)
|
class Discrete(Space):
'\n {0,1,...,n-1}\n '
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def sample_n(self, n):
return np.random.randint(low=0, high=self.n, size=n)
def contains(self, x):
x = np.asarray(x)
return ((x.shape == ()) and (x.dtype.kind == 'i') and (x >= 0) and (x < self.n))
def __repr__(self):
return ('Discrete(%d)' % self.n)
def __eq__(self, other):
return (self.n == other.n)
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def default_value(self):
return 0
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
def new_tensor_variable(self, name, extra_dims):
return tf.placeholder(dtype=tf.uint8, shape=(([None] * extra_dims) + [self.flat_dim]), name=name)
def __eq__(self, other):
if (not isinstance(other, Discrete)):
return False
return (self.n == other.n)
def __hash__(self):
return hash(self.n)
|
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert (len(components) == 1)
components = components[0]
self._components = tuple(components)
dtypes = [c.new_tensor_variable('tmp', extra_dims=0).dtype for c in components]
if ((len(dtypes) > 0) and hasattr(dtypes[0], 'as_numpy_dtype')):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple((x.sample() for x in self._components))
@property
def components(self):
return self._components
def contains(self, x):
return (isinstance(x, tuple) and all((c.contains(xi) for (c, xi) in zip(self._components, x))))
def new_tensor_variable(self, name, extra_dims):
return tf.placeholder(dtype=self._common_dtype, shape=(([None] * extra_dims) + [self.flat_dim]), name=name)
@property
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate([c.flatten(xi) for (c, xi) in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [c.flatten_n(xi) for (c, xi) in zip(self.components, xs_regrouped)]
return np.concatenate(flat_regrouped, axis=(- 1))
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:(- 1)])
return tuple((c.unflatten(xi) for (c, xi) in zip(self._components, flat_xs)))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:(- 1)], axis=(- 1))
unflat_xs = [c.unflatten_n(xi) for (c, xi) in zip(self.components, flat_xs)]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if (not isinstance(other, Product)):
return False
return (tuple(self.components) == tuple(other.components))
def __hash__(self):
return hash(tuple(self.components))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.