code stringlengths 17 6.64M |
|---|
@U.in_session
def test_dist():
np.random.seed(0)
(p1, p2, p3) = (np.random.randn(3, 1), np.random.randn(4, 1), np.random.randn(5, 1))
(q1, q2, q3) = (np.random.randn(6, 1), np.random.randn(7, 1), np.random.randn(8, 1))
comm = MPI.COMM_WORLD
assert (comm.Get_size() == 2)
if (comm.Get_rank() == 0):
(x1, x2, x3) = (p1, p2, p3)
elif (comm.Get_rank() == 1):
(x1, x2, x3) = (q1, q2, q3)
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1, p2, p3, q1, q2, q3])
def checkallclose(x, y):
print(x, y)
return np.allclose(x, y)
assert checkallclose(bigvec.mean(axis=0), rms.mean.eval())
assert checkallclose(bigvec.std(axis=0), rms.std.eval())
|
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
nenv = env.num_envs
self.batch_ob_shape = (((nenv * nsteps),) + env.observation_space.shape)
self.obs = np.zeros(((nenv,) + env.observation_space.shape), dtype=model.train_model.X.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
@abstractmethod
def run(self):
raise NotImplementedError
|
class RunningMeanStd(object):
def __init__(self, epsilon=0.0001, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = (batch_mean - self.mean)
tot_count = (self.count + batch_count)
new_mean = (self.mean + ((delta * batch_count) / tot_count))
m_a = (self.var * self.count)
m_b = (batch_var * batch_count)
M2 = ((m_a + m_b) + (((np.square(delta) * self.count) * batch_count) / (self.count + batch_count)))
new_var = (M2 / (self.count + batch_count))
new_count = (batch_count + self.count)
self.mean = new_mean
self.var = new_var
self.count = new_count
|
def test_runningmeanstd():
for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
assert np.allclose(ms1, ms2)
|
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert (x.shape == self._M.shape)
self._n += 1
if (self._n == 1):
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = (oldM + ((x - oldM) / self._n))
self._S[...] = (self._S + ((x - oldM) * (x - self._M)))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return ((self._S / (self._n - 1)) if (self._n > 1) else np.square(self._M))
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
|
def test_running_stat():
for shp in ((), (3,), (3, 4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = (np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0))
assert np.allclose(rs.var, v)
|
class Schedule(object):
def value(self, t):
'Value of the schedule at time t'
raise NotImplementedError()
|
class ConstantSchedule(object):
def __init__(self, value):
'Value remains constant over time.\n\n Parameters\n ----------\n value: float\n Constant value of the schedule\n '
self._v = value
def value(self, t):
'See Schedule.value'
return self._v
|
def linear_interpolation(l, r, alpha):
return (l + (alpha * (r - l)))
|
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
'Piecewise schedule.\n\n endpoints: [(int, int)]\n list of pairs `(time, value)` meanining that schedule should output\n `value` when `t==time`. All the values for time must be sorted in\n an increasing order. When t is between two times, e.g. `(time_a, value_a)`\n and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs\n `interpolation(value_a, value_b, alpha)` where alpha is a fraction of\n time passed between `time_a` and `time_b` for time `t`.\n interpolation: lambda float, float, float: float\n a function that takes value to the left and to the right of t according\n to the `endpoints`. Alpha is the fraction of distance from left endpoint to\n right endpoint that t has covered. See linear_interpolation for example.\n outside_value: float\n if the value is requested outside of all the intervals sepecified in\n `endpoints` this value is returned. If None then AssertionError is\n raised when outside value is requested.\n '
idxes = [e[0] for e in endpoints]
assert (idxes == sorted(idxes))
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
'See Schedule.value'
for ((l_t, l), (r_t, r)) in zip(self._endpoints[:(- 1)], self._endpoints[1:]):
if ((l_t <= t) and (t < r_t)):
alpha = (float((t - l_t)) / (r_t - l_t))
return self._interpolation(l, r, alpha)
assert (self._outside_value is not None)
return self._outside_value
|
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
'Linear interpolation between initial_p and final_p over\n schedule_timesteps. After this many timesteps pass final_p is\n returned.\n\n Parameters\n ----------\n schedule_timesteps: int\n Number of timesteps for which to linearly anneal initial_p\n to final_p\n initial_p: float\n initial output value\n final_p: float\n final output value\n '
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
'See Schedule.value'
fraction = min((float(t) / self.schedule_timesteps), 1.0)
return (self.initial_p + (fraction * (self.final_p - self.initial_p)))
|
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n "
assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.'
self._capacity = capacity
self._value = [neutral_element for _ in range((2 * capacity))]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if ((start == node_start) and (end == node_end)):
return self._value[node]
mid = ((node_start + node_end) // 2)
if (end <= mid):
return self._reduce_helper(start, end, (2 * node), node_start, mid)
elif ((mid + 1) <= start):
return self._reduce_helper(start, end, ((2 * node) + 1), (mid + 1), node_end)
else:
return self._operation(self._reduce_helper(start, mid, (2 * node), node_start, mid), self._reduce_helper((mid + 1), end, ((2 * node) + 1), (mid + 1), node_end))
def reduce(self, start=0, end=None):
'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.\n '
if (end is None):
end = self._capacity
if (end < 0):
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
def __setitem__(self, idx, val):
idx += self._capacity
self._value[idx] = val
idx //= 2
while (idx >= 1):
self._value[idx] = self._operation(self._value[(2 * idx)], self._value[((2 * idx) + 1)])
idx //= 2
def __getitem__(self, idx):
assert (0 <= idx < self._capacity)
return self._value[(self._capacity + idx)]
|
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0)
def sum(self, start=0, end=None):
'Returns arr[start] + ... + arr[end]'
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n '
assert (0 <= prefixsum <= (self.sum() + 1e-05))
idx = 1
while (idx < self._capacity):
if (self._value[(2 * idx)] > prefixsum):
idx = (2 * idx)
else:
prefixsum -= self._value[(2 * idx)]
idx = ((2 * idx) + 1)
return (idx - self._capacity)
|
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, neutral_element=float('inf'))
def min(self, start=0, end=None):
'Returns min(arr[start], ..., arr[end])'
return super(MinSegmentTree, self).reduce(start, end)
|
def test_piecewise_schedule():
ps = PiecewiseSchedule([((- 5), 100), (5, 200), (10, 50), (100, 50), (200, (- 50))], outside_value=500)
assert np.isclose(ps.value((- 10)), 500)
assert np.isclose(ps.value(0), 150)
assert np.isclose(ps.value(5), 200)
assert np.isclose(ps.value(9), 80)
assert np.isclose(ps.value(50), 50)
assert np.isclose(ps.value(80), 50)
assert np.isclose(ps.value(150), 0)
assert np.isclose(ps.value(175), (- 25))
assert np.isclose(ps.value(201), 500)
assert np.isclose(ps.value(500), 500)
assert np.isclose(ps.value((200 - 1e-10)), (- 50))
|
def test_constant_schedule():
cs = ConstantSchedule(5)
for i in range((- 100), 100):
assert np.isclose(cs.value(i), 5)
|
def test_tree_set():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, (- 1)), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
|
def test_tree_set_overlap():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, (- 1)), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
|
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert (tree.find_prefixsum_idx(0.0) == 2)
assert (tree.find_prefixsum_idx(0.5) == 2)
assert (tree.find_prefixsum_idx(0.99) == 2)
assert (tree.find_prefixsum_idx(1.01) == 3)
assert (tree.find_prefixsum_idx(3.0) == 3)
assert (tree.find_prefixsum_idx(4.0) == 3)
|
def test_prefixsum_idx2():
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert (tree.find_prefixsum_idx(0.0) == 0)
assert (tree.find_prefixsum_idx(0.55) == 1)
assert (tree.find_prefixsum_idx(0.99) == 1)
assert (tree.find_prefixsum_idx(1.51) == 2)
assert (tree.find_prefixsum_idx(3.0) == 3)
assert (tree.find_prefixsum_idx(5.5) == 3)
|
def test_max_interval_tree():
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, (- 1)), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, (- 1)), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, (- 1)), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, (- 1)), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
|
def test_function():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name='x')
y = tf.placeholder(tf.int32, (), name='y')
z = ((3 * x) + (2 * y))
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert (lin(2) == 6)
assert (lin(2, 2) == 10)
|
def test_multikwargs():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name='x')
with tf.variable_scope('other'):
x2 = tf.placeholder(tf.int32, (), name='x')
z = ((3 * x) + (2 * x2))
lin = function([x, x2], z, givens={x2: 0})
with single_threaded_session():
initialize()
assert (lin(2) == 6)
assert (lin(2, 2) == 10)
|
def switch(condition, then_expression, else_expression):
'Switches between two operations depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n # Arguments\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.\n '
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'), (lambda : then_expression), (lambda : else_expression))
x.set_shape(x_shape)
return x
|
def lrelu(x, leak=0.2):
f1 = (0.5 * (1 + leak))
f2 = (0.5 * (1 - leak))
return ((f1 * x) + (f2 * abs(x)))
|
def huber_loss(x, delta=1.0):
'Reference: https://en.wikipedia.org/wiki/Huber_loss'
return tf.where((tf.abs(x) < delta), (tf.square(x) * 0.5), (delta * (tf.abs(x) - (0.5 * delta))))
|
def make_session(num_cpu=None, make_default=False, graph=None):
"Returns a session that will use <num_cpu> CPU's only"
if (num_cpu is None):
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config, graph=graph)
else:
return tf.Session(config=tf_config, graph=graph)
|
def single_threaded_session():
'Returns a session which will only use a single CPU'
return make_session(num_cpu=1)
|
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
|
def initialize():
'Initialize all the uninitialized variables in the global scope.'
new_variables = (set(tf.global_variables()) - ALREADY_INITIALIZED)
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
|
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= (std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)))
return tf.constant(out)
return _initializer
|
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad='SAME', dtype=tf.float32, collections=None, summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
fan_in = intprod(filter_shape[:3])
fan_out = (intprod(filter_shape[:2]) * num_filters)
w_bound = np.sqrt((6.0 / (fan_in + fan_out)))
w = tf.get_variable('W', filter_shape, dtype, tf.random_uniform_initializer((- w_bound), w_bound), collections=collections)
b = tf.get_variable('b', [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections)
if (summary_tag is not None):
tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], (- 1), 1]), [2, 0, 1, 3]), max_images=10)
return (tf.nn.conv2d(x, w, stride_shape, pad) + b)
|
def function(inputs, outputs, updates=None, givens=None):
'Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n values to be fed to the input\'s placeholders and produces the values of the expressions\n in outputs.\n\n Input values can be passed in the same order as inputs or can be provided as kwargs based\n on placeholder name (passed to constructor or accessible via placeholder.op.name).\n\n Example:\n x = tf.placeholder(tf.int32, (), name="x")\n y = tf.placeholder(tf.int32, (), name="y")\n z = 3 * x + 2 * y\n lin = function([x, y], z, givens={y: 0})\n\n with single_threaded_session():\n initialize()\n\n assert lin(2) == 6\n assert lin(x=3) == 9\n assert lin(2, 2) == 10\n assert lin(x=2, y=3) == 12\n\n Parameters\n ----------\n inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n list of input arguments\n outputs: [tf.Variable] or tf.Variable\n list of outputs or a single output to be returned from function. Returned\n value will also have the same shape.\n '
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return (lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs))))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return (lambda *args, **kwargs: f(*args, **kwargs)[0])
|
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if ((not hasattr(inpt, 'make_feed_dict')) and (not ((type(inpt) is tf.Tensor) and (len(inpt.op.inputs) == 0)))):
assert False, 'inputs should all be placeholders, constants, or have a make_feed_dict method'
self.inputs = inputs
updates = (updates or [])
self.update_group = tf.group(*updates)
self.outputs_update = (list(outputs) + [self.update_group])
self.givens = ({} if (givens is None) else givens)
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert (len(args) <= len(self.inputs)), 'Too many arguments provided'
feed_dict = {}
for (inpt, value) in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:(- 1)]
return results
|
def var_shape(x):
out = x.get_shape().as_list()
assert all((isinstance(a, int) for a in out)), 'shape function assumes that shape is fully known'
return out
|
def numel(x):
return intprod(var_shape(x))
|
def intprod(x):
return int(np.prod(x))
|
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if (clip_norm is not None):
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[tf.reshape((grad if (grad is not None) else tf.zeros_like(v)), [numel(v)]) for (v, grad) in zip(var_list, grads)])
|
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:(start + size)], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
|
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
|
def get_placeholder(name, dtype, shape):
if (name in _PLACEHOLDER_CACHE):
(out, dtype1, shape1) = _PLACEHOLDER_CACHE[name]
assert ((dtype1 == dtype) and (shape1 == shape))
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
|
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
|
def flattenallbut0(x):
return tf.reshape(x, [(- 1), intprod(x.get_shape().as_list()[1:])])
|
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if (('/Adam' in name) or ('beta1_power' in name) or ('beta2_power' in name)):
continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if (('/b:' in name) or ('/biases' in name)):
continue
logger.info((' %s%s %i params %s' % (name, (' ' * (55 - len(name))), v_params, str(v.shape))))
logger.info(('Total model parameters: %0.2f million' % (count_params * 1e-06)))
|
class AlreadySteppingError(Exception):
'\n Raised when an asynchronous step is running while\n step_async() is called again.\n '
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
|
class NotSteppingError(Exception):
'\n Raised when an asynchronous step is not running but\n step_wait() is called.\n '
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
|
class VecEnv(ABC):
'\n An abstract asynchronous, vectorized environment.\n '
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
'\n Reset all the environments and return an array of\n observations, or a tuple of observation arrays.\n\n If step_async is still doing work, that work will\n be cancelled and step_wait() should not be called\n until step_async() is invoked again.\n '
pass
@abstractmethod
def step_async(self, actions):
'\n Tell all the environments to start taking a step\n with the given actions.\n Call step_wait() to get the results of the step.\n\n You should not call this if a step_async run is\n already pending.\n '
pass
@abstractmethod
def step_wait(self):
'\n Wait for the step taken with step_async().\n\n Returns (obs, rews, dones, infos):\n - obs: an array of observations, or a tuple of\n arrays of observations.\n - rews: an array of rewards\n - dones: an array of "episode done" booleans\n - infos: a sequence of info objects\n '
pass
@abstractmethod
def close(self):
"\n Clean up the environments' resources.\n "
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
logger.warn(('Render not defined for %s' % self))
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
|
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=(observation_space or venv.observation_space), action_space=(action_space or venv.action_space))
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
|
class CloudpickleWrapper(object):
'\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n '
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
(shapes, dtypes) = ({}, {})
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
for (key, box) in obs_space.spaces.items():
assert isinstance(box, spaces.Box)
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
else:
box = obs_space
assert isinstance(box, spaces.Box)
self.keys = [None]
(shapes, dtypes) = ({None: box.shape}, {None: box.dtype})
self.buf_obs = {k: np.zeros(((self.num_envs,) + tuple(shapes[k])), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
(obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e]) = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def _save_obs(self, e, obs):
for k in self.keys:
if (k is None):
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if (self.keys == [None]):
return self.buf_obs[None]
else:
return self.buf_obs
|
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
(cmd, data) = remote.recv()
if (cmd == 'step'):
(ob, reward, done, info) = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif (cmd == 'reset'):
ob = env.reset()
remote.send(ob)
elif (cmd == 'reset_task'):
ob = env.reset_task()
remote.send(ob)
elif (cmd == 'close'):
remote.close()
break
elif (cmd == 'get_spaces'):
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
|
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
'\n envs: list of gym environments to run in subprocesses\n '
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, action_space) = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
class VecNormalize(VecEnvWrapper):
'\n Vectorized environment base class\n '
def __init__(self, venv, ob=True, ret=True, clipob=10.0, cliprew=10.0, gamma=0.99, epsilon=1e-08):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = (RunningMeanStd(shape=self.observation_space.shape) if ob else None)
self.ret_rms = (RunningMeanStd(shape=()) if ret else None)
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"\n Apply sequence of actions to sequence of environments\n actions -> (observations, rewards, news)\n\n where 'news' is a boolean vector indicating whether each element is new.\n "
(obs, rews, news, infos) = self.venv.step_wait()
self.ret = ((self.ret * self.gamma) + rews)
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip((rews / np.sqrt((self.ret_rms.var + self.epsilon))), (- self.cliprew), self.cliprew)
return (obs, rews, news, infos)
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip(((obs - self.ob_rms.mean) / np.sqrt((self.ob_rms.var + self.epsilon))), (- self.clipob), self.clipob)
return obs
else:
return obs
def reset(self):
'\n Reset all environments\n '
obs = self.venv.reset()
return self._obfilt(obs)
|
class ActorCritic():
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers, **kwargs):
'The actor-critic network and related training code.\n\n Args:\n inputs_tf (dict of tensors): all necessary inputs for the network: the\n observation (o), the goal (g), and the action (u)\n dimo (int): the dimension of the observations\n dimg (int): the dimension of the goals\n dimu (int): the dimension of the actions\n max_u (float): the maximum magnitude of actions; action outputs will be scaled\n accordingly\n o_stats (baselines.her.Normalizer): normalizer for observations\n g_stats (baselines.her.Normalizer): normalizer for goals\n hidden (int): number of hidden units that should be used in hidden layers\n layers (int): number of hidden layers\n '
self.o_tf = inputs_tf['o']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
input_pi = tf.concat(axis=1, values=[o, g])
with tf.variable_scope('pi'):
self.pi_tf = (self.max_u * tf.tanh(nn(input_pi, (([self.hidden] * self.layers) + [self.dimu]))))
with tf.variable_scope('Q'):
input_Q = tf.concat(axis=1, values=[o, g, (self.pi_tf / self.max_u)])
self.Q_pi_tf = nn(input_Q, (([self.hidden] * self.layers) + [1]))
input_Q = tf.concat(axis=1, values=[o, g, (self.u_tf / self.max_u)])
self._input_Q = input_Q
self.Q_tf = nn(input_Q, (([self.hidden] * self.layers) + [1]), reuse=True)
|
def cached_make_env(make_env):
'\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n '
if (make_env not in CACHED_ENVS):
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
|
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = (np.array(kwargs['max_u']) if (type(kwargs['max_u']) == list) else kwargs['max_u'])
kwargs['gamma'] = (1.0 - (1.0 / kwargs['T']))
if ('lr' in kwargs):
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals', 'alpha', 'beta0', 'beta_iters', 'eps']:
ddpg_params[name] = kwargs[name]
kwargs[('_' + name)] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
|
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
|
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info):
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
her_params = {'reward_fun': reward_fun}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params[('_' + name)] = her_params[name]
del params[name]
if (params['prioritization'] == 'energy'):
sample_her_transitions = make_sample_her_transitions_energy(**her_params)
elif (params['prioritization'] == 'tderror'):
sample_her_transitions = make_sample_her_transitions_prioritized_replay(**her_params)
else:
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
|
def simple_goal_subtract(a, b):
assert (a.shape == b.shape)
return (a - b)
|
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
temperature = params['temperature']
prioritization = params['prioritization']
env_name = params['env_name']
max_timesteps = params['max_timesteps']
rank_method = params['rank_method']
input_dims = dims.copy()
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, 'T': params['T'], 'clip_pos_returns': True, 'clip_return': ((1.0 / (1.0 - gamma)) if clip_return else np.inf), 'rollout_batch_size': rollout_batch_size, 'subtract_goals': simple_goal_subtract, 'sample_transitions': sample_her_transitions, 'gamma': gamma, 'temperature': temperature, 'prioritization': prioritization, 'env_name': env_name, 'max_timesteps': max_timesteps, 'rank_method': rank_method})
ddpg_params['info'] = {'env_name': params['env_name']}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
|
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
(obs, _, _, info) = env.step(env.action_space.sample())
dims = {'o': obs['observation'].shape[0], 'u': env.action_space.shape[0], 'g': obs['desired_goal'].shape[0]}
for (key, value) in info.items():
value = np.array(value)
if (value.ndim == 0):
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
|
@click.command()
@click.argument('policy_file', type=str)
@click.option('--seed', type=int, default=0)
@click.option('--n_test_rollouts', type=int, default=20)
@click.option('--render', type=int, default=1)
def main(policy_file, seed, n_test_rollouts, render):
set_global_seeds(seed)
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
params = config.DEFAULT_PARAMS
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params['env_name'] = env_name
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {'exploit': True, 'use_target_net': params['test_with_polyak'], 'compute_Q': True, 'rollout_batch_size': 1, 'render': bool(render)}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular()
|
def mpi_average(value):
if (value == []):
value = [0.0]
if (not isinstance(value, list)):
value = [value]
return mpi_moments(np.array(value))[0]
|
def train(policy, rollout_worker, evaluator, n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval, save_policies, num_cpu, dump_buffer, w_potential, w_linear, w_rotational, rank_method, clip_energy, **kwargs):
rank = MPI.COMM_WORLD.Get_rank()
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')
logger.info('Training...')
best_success_rate = (- 1)
t = 1
for epoch in range(n_epochs):
rollout_worker.clear_history()
for cycle in range(n_cycles):
episode = rollout_worker.generate_rollouts()
policy.store_episode(episode, dump_buffer, w_potential, w_linear, w_rotational, rank_method, clip_energy)
for batch in range(n_batches):
t = (((((epoch * n_cycles) * n_batches) + (cycle * n_batches)) + batch) * num_cpu)
policy.train(t, dump_buffer)
policy.update_target_net()
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
logger.record_tabular('epoch', epoch)
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in policy.logs():
logger.record_tabular(key, mpi_average(val))
if (rank == 0):
logger.dump_tabular()
if dump_buffer:
policy.dump_buffer(epoch)
success_rate = mpi_average(evaluator.current_success_rate())
if ((rank == 0) and (success_rate >= best_success_rate) and save_policies):
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if ((rank == 0) and (policy_save_interval > 0) and ((epoch % policy_save_interval) == 0) and save_policies):
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if (rank != 0):
assert (local_uniform[0] != root_uniform[0])
|
def launch(env_name, n_epochs, num_cpu, seed, replay_strategy, policy_save_interval, clip_return, temperature, prioritization, binding, logging, version, dump_buffer, n_cycles, rank_method, w_potential, w_linear, w_rotational, clip_energy, override_params={}, save_policies=True):
if (num_cpu > 1):
whoami = mpi_fork(num_cpu, binding)
if (whoami == 'parent'):
sys.exit(0)
import baselines.common.tf_util as U
U.single_threaded_session().__enter__()
rank = MPI.COMM_WORLD.Get_rank()
if logging:
logdir = ((((((((((((((((((((((((((('logs/' + str(env_name)) + '-temperature') + str(temperature)) + '-prioritization') + str(prioritization)) + '-replay_strategy') + str(replay_strategy)) + '-n_epochs') + str(n_epochs)) + '-num_cpu') + str(num_cpu)) + '-seed') + str(seed)) + '-n_cycles') + str(n_cycles)) + '-rank_method') + str(rank_method)) + '-w_potential') + str(w_potential)) + '-w_linear') + str(w_linear)) + '-w_rotational') + str(w_rotational)) + '-clip_energy') + str(clip_energy)) + '-version') + str(version))
else:
logdir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
if (rank == 0):
if (logdir or (logger.get_dir() is None)):
logger.configure(dir=logdir)
else:
logger.configure()
logdir = logger.get_dir()
assert (logdir is not None)
os.makedirs(logdir, exist_ok=True)
rank_seed = (seed + (1000000 * rank))
set_global_seeds(rank_seed)
params = config.DEFAULT_PARAMS
params['env_name'] = env_name
params['replay_strategy'] = replay_strategy
params['temperature'] = temperature
params['prioritization'] = prioritization
params['binding'] = binding
params['max_timesteps'] = (((n_epochs * params['n_cycles']) * params['n_batches']) * num_cpu)
params['version'] = version
params['dump_buffer'] = dump_buffer
params['n_cycles'] = n_cycles
params['rank_method'] = rank_method
params['w_potential'] = w_potential
params['w_linear'] = w_linear
params['w_rotational'] = w_rotational
params['clip_energy'] = clip_energy
params['n_epochs'] = n_epochs
params['num_cpu'] = num_cpu
if params['dump_buffer']:
params['alpha'] = 0
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params.update(**override_params)
with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
json.dump(params, f)
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)
rollout_params = {'exploit': False, 'use_target_net': False, 'use_demo_states': True, 'compute_Q': False, 'T': params['T']}
eval_params = {'exploit': True, 'use_target_net': params['test_with_polyak'], 'use_demo_states': False, 'compute_Q': True, 'T': params['T']}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
rollout_worker = RolloutWorker(params['make_env'], policy, dims, logger, **rollout_params)
rollout_worker.seed(rank_seed)
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(rank_seed)
train(logdir=logdir, policy=policy, rollout_worker=rollout_worker, evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'], n_cycles=params['n_cycles'], n_batches=params['n_batches'], policy_save_interval=policy_save_interval, save_policies=save_policies, num_cpu=num_cpu, dump_buffer=dump_buffer, w_potential=params['w_potential'], w_linear=params['w_linear'], w_rotational=params['w_rotational'], rank_method=rank_method, clip_energy=clip_energy)
|
@click.command()
@click.option('--env_name', type=click.Choice(['FetchPickAndPlace-v0', 'HandManipulateBlockFull-v0', 'HandManipulateEggFull-v0', 'HandManipulatePenRotate-v0']), default='FetchPickAndPlace-v0', help='the name of the OpenAI Gym environment that you want to train on. We tested EBP on four challenging robotic manipulation tasks, including: FetchPickAndPlace-v0, HandManipulateBlockFull-v0, HandManipulateEggFull-v0, HandManipulatePenRotate-v0')
@click.option('--n_epochs', type=int, default=50, help='the number of training epochs to run')
@click.option('--num_cpu', type=int, default=1, help='the number of CPU cores to use (using MPI)')
@click.option('--seed', type=int, default=0, help='the random seed used to seed both the environment and the training code')
@click.option('--policy_save_interval', type=int, default=5, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
@click.option('--replay_strategy', type=click.Choice(['future', 'final', 'none']), default='future', help='the HER replay strategy to be used. "future" uses HER, "none" disables HER.')
@click.option('--clip_return', type=int, default=1, help='whether or not returns should be clipped')
@click.option('--temperature', type=float, default=1.0, help='temperature value for Enery-Based Prioritization (EBP)')
@click.option('--prioritization', type=click.Choice(['none', 'energy', 'tderror']), default='energy', help='the prioritization strategy to be used. "energy" uses EBP; "none" is vanilla HER; tderror is Prioritized Experience Replay.')
@click.option('--binding', type=click.Choice(['none', 'core']), default='core', help='configure mpi using bind-to none or core.')
@click.option('--logging', type=bool, default=False, help='whether or not logging')
@click.option('--version', type=int, default=0, help='version')
@click.option('--dump_buffer', type=bool, default=False, help='dump buffer contains achieved goals, energy, tderrors for analysis')
@click.option('--n_cycles', type=int, default=50, help='n_cycles')
@click.option('--rank_method', type=click.Choice(['none', 'min', 'dense', 'average']), default='none', help='energy ranking method')
@click.option('--w_potential', type=float, default=1.0, help='w_potential')
@click.option('--w_linear', type=float, default=1.0, help='w_linear')
@click.option('--w_rotational', type=float, default=1.0, help='w_rotational')
@click.option('--clip_energy', type=float, default=999, help='clip_energy')
def main(**kwargs):
launch(**kwargs)
|
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"Creates a sample function that can be used for HER experience replay.\n\n Args:\n replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\n regular DDPG experience replay is used\n replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\n as many HER replays as regular replays are used)\n reward_fun (function): function to re-compute the reward with substituted goals\n "
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
'episode_batch is {key: array(buffer_size x T x dim_key)}\n '
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][(episode_idxs, t_samples)].copy() for key in episode_batch.keys()}
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
transitions['g'][her_indexes] = future_ag
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
|
def make_sample_her_transitions_energy(replay_strategy, replay_k, reward_fun):
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions, rank_method, temperature, update_stats=False):
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
if (not update_stats):
if (rank_method == 'none'):
energy_trajectory = episode_batch['e']
else:
energy_trajectory = episode_batch['p']
p_trajectory = np.power(energy_trajectory, (1 / (temperature + 0.01)))
p_trajectory = (p_trajectory / p_trajectory.sum())
episode_idxs_energy = np.random.choice(rollout_batch_size, size=batch_size, replace=True, p=p_trajectory.flatten())
episode_idxs = episode_idxs_energy
transitions = {}
for key in episode_batch.keys():
if ((not (key == 'p')) and (not (key == 's')) and (not (key == 'e'))):
transitions[key] = episode_batch[key][(episode_idxs, t_samples)].copy()
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
transitions['g'][her_indexes] = future_ag
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
|
def make_sample_her_transitions_prioritized_replay(replay_strategy, replay_k, reward_fun):
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_proportional(self, rollout_batch_size, batch_size, T):
episode_idxs = []
t_samples = []
for _ in range(batch_size):
self.n_transitions_stored = min(self.n_transitions_stored, self.size_in_transitions)
mass = (random.random() * self._it_sum.sum(0, (self.n_transitions_stored - 1)))
idx = self._it_sum.find_prefixsum_idx(mass)
assert (idx < self.n_transitions_stored)
episode_idx = (idx // T)
assert (episode_idx < rollout_batch_size)
t_sample = (idx % T)
episode_idxs.append(episode_idx)
t_samples.append(t_sample)
return (episode_idxs, t_samples)
def _sample_her_transitions(self, episode_batch, batch_size_in_transitions, beta):
'episode_batch is {key: array(buffer_size x T x dim_key)}\n '
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
if (rollout_batch_size < self.current_size):
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
else:
assert (beta >= 0)
(episode_idxs, t_samples) = _sample_proportional(self, rollout_batch_size, batch_size, T)
episode_idxs = np.array(episode_idxs)
t_samples = np.array(t_samples)
weights = []
p_min = (self._it_min.min() / self._it_sum.sum())
max_weight = ((p_min * self.n_transitions_stored) ** (- beta))
for (episode_idx, t_sample) in zip(episode_idxs, t_samples):
p_sample = (self._it_sum[((episode_idx * T) + t_sample)] / self._it_sum.sum())
weight = ((p_sample * self.n_transitions_stored) ** (- beta))
weights.append((weight / max_weight))
weights = np.array(weights)
transitions = {}
for key in episode_batch.keys():
if ((not (key == 'td')) and (not (key == 'e'))):
episode_batch_key = episode_batch[key].copy()
transitions[key] = episode_batch_key[(episode_idxs, t_samples)].copy()
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['g'][her_indexes] = future_ag
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
idxs = ((episode_idxs * T) + t_samples)
return (transitions, weights, idxs)
return _sample_her_transitions
|
class Normalizer():
def __init__(self, size, eps=0.01, default_clip_range=np.inf, sess=None):
'A normalizer that ensures that observations are approximately distributed according to\n a standard Normal distribution (i.e. have mean zero and variance one).\n\n Args:\n size (int): the size of the observation to be normalized\n eps (float): a small constant that avoids underflows\n default_clip_range (float): normalized observations are clipped to be in\n [-default_clip_range, default_clip_range]\n sess (object): the TensorFlow session to be used\n '
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = (sess if (sess is not None) else tf.get_default_session())
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum', trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq', trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count', trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(initializer=tf.zeros_initializer(), shape=(self.size,), name='mean', trainable=False, dtype=tf.float32)
self.std = tf.get_variable(initializer=tf.ones_initializer(), shape=(self.size,), name='std', trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(self.count_tf.assign_add(self.count_pl), self.sum_tf.assign_add(self.sum_pl), self.sumsq_tf.assign_add(self.sumsq_pl))
self.recompute_op = tf.group(tf.assign(self.mean, (self.sum_tf / self.count_tf)), tf.assign(self.std, tf.sqrt(tf.maximum(tf.square(self.eps), ((self.sumsq_tf / self.count_tf) - tf.square((self.sum_tf / self.count_tf)))))))
self.lock = threading.Lock()
def update(self, v):
v = v.reshape((- 1), self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += np.square(v).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None):
if (clip_range is None):
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return tf.clip_by_value(((v - mean) / std), (- clip_range), clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return (mean + (v * std))
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return (local_sum, local_sumsq, local_count)
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
(synced_sum, synced_sumsq, synced_count) = self.synchronize(local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={self.count_pl: synced_count, self.sum_pl: synced_sum, self.sumsq_pl: synced_sumsq})
self.sess.run(self.recompute_op)
|
class IdentityNormalizer():
def __init__(self, size, std=1.0):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = (std * tf.ones(self.size, tf.float32))
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return (x / self.std)
def denormalize(self, x):
return (self.std * x)
def synchronize(self):
pass
def recompute_stats(self):
pass
|
def store_args(method):
'Stores provided method args as instance attributes.\n '
argspec = inspect.getfullargspec(method)
defaults = {}
if (argspec.defaults is not None):
defaults = dict(zip(argspec.args[(- len(argspec.defaults)):], argspec.defaults))
if (argspec.kwonlydefaults is not None):
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
args = defaults.copy()
for (name, value) in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
|
def import_function(spec):
'Import a function identified by a string like "pkg.module:fn_name".\n '
(mod_name, fn_name) = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
|
def flatten_grads(var_list, grads):
'Flattens a variables and their gradients.\n '
return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0)
|
def nn(input, layers_sizes, reuse=None, flatten=False, name=''):
'Creates a simple neural network\n '
for (i, size) in enumerate(layers_sizes):
activation = (tf.nn.relu if (i < (len(layers_sizes) - 1)) else None)
input = tf.layers.dense(inputs=input, units=size, kernel_initializer=tf.contrib.layers.xavier_initializer(), reuse=reuse, name=((name + '_') + str(i)))
if activation:
input = activation(input)
if flatten:
assert (layers_sizes[(- 1)] == 1)
input = tf.reshape(input, [(- 1)])
return input
|
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
|
def mpi_fork(n, binding='core'):
'Re-launches the current script with workers\n Returns "parent" for original parent, "child" for MPI children\n '
if (n <= 1):
return 'child'
if (os.getenv('IN_MPI') is None):
env = os.environ.copy()
env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1')
if (platform.system() == 'Darwin'):
args = ['mpirun', '-np', str(n), '-allow-run-as-root', sys.executable]
else:
args = ['mpirun', '-np', str(n), '-bind-to', binding, '-allow-run-as-root', sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return 'parent'
else:
install_mpi_excepthook()
return 'child'
|
def convert_episode_to_batch_major(episode):
'Converts an episode to have the batch dimension in the major (first)\n dimension.\n '
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
|
def transitions_in_episode_batch(episode_batch):
'Number of transitions in a given episode batch.\n '
shape = episode_batch['u'].shape
return (shape[0] * shape[1])
|
def reshape_for_broadcasting(source, target):
'Reshapes a tensor (source) to have the correct shape and dtype of the target\n before broadcasting it with MPI.\n '
dim = len(target.get_shape())
shape = (([1] * (dim - 1)) + [(- 1)])
return tf.reshape(tf.cast(source, target.dtype), shape)
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = ('%-8.3g' % (val,))
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
return ((s[:20] + '...') if (len(s) > 23) else s)
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = (kvs.keys() - self.keys)
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
Logger.CURRENT.logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
Logger.CURRENT.logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
"\n Write all of the diagnostics from the current iteration\n\n level: int. (see logger.py docs) If the global logger level is higher than\n the level argument here, don't print to stdout.\n "
Logger.CURRENT.dumpkvs()
|
def getkvs():
return Logger.CURRENT.name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
Logger.CURRENT.log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
Logger.CURRENT.set_level(level)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return Logger.CURRENT.get_dir()
|
class ProfileKV():
'\n Usage:\n with logger.ProfileKV("interesting_scope"):\n code\n '
def __init__(self, n):
self.n = ('wait_' + n)
def __enter__(self):
self.t1 = time.time()
def __exit__(self, type, value, traceback):
Logger.CURRENT.name2val[self.n] += (time.time() - self.t1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.