code stringlengths 17 6.64M |
|---|
def atari_arg_parser():
'\n Create an argparse.ArgumentParser for run_atari.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10000000.0))
return parser
|
def mujoco_arg_parser():
'\n Create an argparse.ArgumentParser for run_mujoco.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def robotics_arg_parser():
'\n Create an argparse.ArgumentParser for run_mujoco.py.\n '
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def fmt_row(width, row, header=False):
out = ' | '.join((fmt_item(x, width) for x in row))
if header:
out = ((out + '\n') + ('-' * len(out)))
return out
|
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert (x.ndim == 0)
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (((v < 0.0001) or (v > 10000.0)) and (v > 0)):
rep = ('%7.2e' % x)
else:
rep = ('%7.5f' % x)
else:
rep = str(x)
return ((' ' * (l - len(rep))) + rep)
|
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
|
@contextmanager
def timed(msg):
global MESSAGE_DEPTH
print(colorize(((('\t' * MESSAGE_DEPTH) + '=: ') + msg), color='magenta'))
tstart = time.time()
MESSAGE_DEPTH += 1
(yield)
MESSAGE_DEPTH -= 1
print(colorize((('\t' * MESSAGE_DEPTH) + ('done in %.3f seconds' % (time.time() - tstart))), color='magenta'))
|
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if ((self._next_id >= self.n) and self.enable_shuffle):
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, (self.n - self._next_id))
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:(cur_id + cur_batch_size)]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle:
self.shuffle()
while (self._next_id <= (self.n - batch_size)):
(yield self.next_batch(batch_size))
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
|
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert ((num_batches is None) != (batch_size is None)), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(((a.shape[0] == n) for a in arrays[1:]))
inds = np.arange(n)
if shuffle:
np.random.shuffle(inds)
sections = (np.arange(0, n, batch_size)[1:] if (num_batches is None) else num_batches)
for batch_inds in np.array_split(inds, sections):
if (include_final_partial_batch or (len(batch_inds) == batch_size)):
(yield tuple((a[batch_inds] for a in arrays)))
|
class Filter(object):
def __call__(self, x, update=True):
raise NotImplementedError
def reset(self):
pass
|
class IdentityFilter(Filter):
def __call__(self, x, update=True):
return x
|
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out
|
class ZFilter(Filter):
'\n y = (x-mean)/std\n using running estimates of mean,std\n '
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update:
self.rs.push(x)
if self.demean:
x = (x - self.rs.mean)
if self.destd:
x = (x / (self.rs.std + 1e-08))
if self.clip:
x = np.clip(x, (- self.clip), self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
|
class AddClock(Filter):
def __init__(self):
self.count = 0
def reset(self):
self.count = 0
def __call__(self, x, update=True):
return np.append(x, (self.count / 100.0))
def output_shape(self, input_space):
return ((input_space.shape[0] + 1),)
|
class FlattenFilter(Filter):
def __call__(self, x, update=True):
return x.ravel()
def output_shape(self, input_space):
return (int(np.prod(input_space.shape)),)
|
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,)
|
class DivFilter(Filter):
def __init__(self, divisor):
self.divisor = divisor
def __call__(self, x, update=True):
return (x / self.divisor)
def output_shape(self, input_space):
return input_space.shape
|
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while (len(self.stack) < self.stack.maxlen):
self.stack.append(x)
return np.concatenate(self.stack, axis=(- 1))
def output_shape(self, input_space):
return (input_space.shape[:(- 1)] + ((input_space.shape[(- 1)] * self.stack.maxlen),))
|
def discount(x, gamma):
'\n computes discounted sums along 0th dimension of x.\n\n inputs\n ------\n x: ndarray\n gamma: float\n\n outputs\n -------\n y: ndarray with same shape as x, satisfying\n\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n '
assert (x.ndim >= 1)
return scipy.signal.lfilter([1], [1, (- gamma)], x[::(- 1)], axis=0)[::(- 1)]
|
def explained_variance(ypred, y):
'\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n '
assert ((y.ndim == 1) and (ypred.ndim == 1))
vary = np.var(y)
return (np.nan if (vary == 0) else (1 - (np.var((y - ypred)) / vary)))
|
def explained_variance_2d(ypred, y):
assert ((y.ndim == 2) and (ypred.ndim == 2))
vary = np.var(y, axis=0)
out = (1 - (np.var((y - ypred)) / vary))
out[(vary < 1e-10)] = 0
return out
|
def ncc(ypred, y):
return np.corrcoef(ypred, y)[(1, 0)]
|
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
|
def unflatten_vector(vec, shapes):
i = 0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:(i + size)].reshape(shape)
arrs.append(arr)
i += size
return arrs
|
def discount_with_boundaries(X, New, gamma):
'\n X: 2d array of floats, time x features\n New: 2d array of bools, indicating when a new episode has started\n '
Y = np.zeros_like(X)
T = X.shape[0]
Y[(T - 1)] = X[(T - 1)]
for t in range((T - 2), (- 1), (- 1)):
Y[t] = (X[t] + ((gamma * Y[(t + 1)]) * (1 - New[(t + 1)])))
return Y
|
def test_discount_with_boundaries():
gamma = 0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [((1 + (gamma * 2)) + ((gamma ** 2) * 3)), (2 + (gamma * 3)), 3, 4])
|
def zipsame(*seqs):
L = len(seqs[0])
assert all(((len(seq) == L) for seq in seqs[1:]))
return zip(*seqs)
|
def unpack(seq, sizes):
"\n Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.\n None = just one bare element, not a list\n\n Example:\n unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])\n "
seq = list(seq)
it = iter(seq)
assert (sum(((1 if (s is None) else s) for s in sizes)) == len(seq)), ('Trying to unpack %s into %s' % (seq, sizes))
for size in sizes:
if (size is None):
(yield it.__next__())
else:
li = []
for _ in range(size):
li.append(it.__next__())
(yield li)
|
class EzPickle(object):
'Objects that are pickled and unpickled via their constructor\n arguments.\n\n Example usage:\n\n class Dog(Animal, EzPickle):\n def __init__(self, furcolor, tailkind="bushy"):\n Animal.__init__()\n EzPickle.__init__(furcolor, tailkind)\n ...\n\n When this object is unpickled, a new Dog will be constructed by passing the provided\n furcolor and tailkind into the constructor. However, philosophers are still not sure\n whether it is still the same dog.\n\n This is generally needed only for environments which wrap C/C++ code, such as MuJoCo\n and Atari.\n '
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {'_ezpickle_args': self._ezpickle_args, '_ezpickle_kwargs': self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d['_ezpickle_args'], **d['_ezpickle_kwargs'])
self.__dict__.update(out.__dict__)
|
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
|
def pretty_eta(seconds_left):
'Print the number of seconds in human readable format.\n\n Examples:\n 2 days\n 2 hours and 37 minutes\n less than a minute\n\n Paramters\n ---------\n seconds_left: int\n Number of seconds to be converted to the ETA\n Returns\n -------\n eta: str\n String representing the pretty ETA.\n '
minutes_left = (seconds_left // 60)
seconds_left %= 60
hours_left = (minutes_left // 60)
minutes_left %= 60
days_left = (hours_left // 24)
hours_left %= 24
def helper(cnt, name):
return '{} {}{}'.format(str(cnt), name, ('s' if (cnt > 1) else ''))
if (days_left > 0):
msg = helper(days_left, 'day')
if (hours_left > 0):
msg += (' and ' + helper(hours_left, 'hour'))
return msg
if (hours_left > 0):
msg = helper(hours_left, 'hour')
if (minutes_left > 0):
msg += (' and ' + helper(minutes_left, 'minute'))
return msg
if (minutes_left > 0):
return helper(minutes_left, 'minute')
return 'less than a minute'
|
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
'Keep a running estimate of a quantity. This is a bit like mean\n but more sensitive to recent changes.\n\n Parameters\n ----------\n gamma: float\n Must be between 0 and 1, where 0 is the most sensitive to recent\n changes.\n init_value: float or None\n Initial value of the estimate. If None, it will be set on the first update.\n '
self._value = init_value
self._gamma = gamma
def update(self, new_val):
'Update the estimate.\n\n Parameters\n ----------\n new_val: float\n new observated value of estimated quantity.\n '
if (self._value is None):
self._value = new_val
else:
self._value = ((self._gamma * self._value) + ((1.0 - self._gamma) * new_val))
def __float__(self):
'Get the current estimate'
return self._value
|
def boolean_flag(parser, name, default=False, help=None):
'Add a boolean flag to argparse parser.\n\n Parameters\n ----------\n parser: argparse.Parser\n parser to add the flag to\n name: str\n --<name> will enable the flag, while --no-<name> will disable it\n default: bool or None\n default value of the flag\n help: str\n help string for the flag\n '
dest = name.replace('-', '_')
parser.add_argument(('--' + name), action='store_true', default=default, dest=dest, help=help)
parser.add_argument(('--no-' + name), action='store_false', dest=dest)
|
def get_wrapper_by_name(env, classname):
'Given an a gym environment possibly wrapped multiple times, returns a wrapper\n of class named classname or raises ValueError if no such wrapper was applied\n\n Parameters\n ----------\n env: gym.Env of gym.Wrapper\n gym environment\n classname: str\n name of the wrapper\n\n Returns\n -------\n wrapper: gym.Wrapper\n wrapper named classname\n '
currentenv = env
while True:
if (classname == currentenv.class_name()):
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError(("Couldn't find wrapper named %s" % classname))
|
def relatively_safe_pickle_dump(obj, path, compression=False):
"This is just like regular pickle dump, except from the fact that failure cases are\n different:\n\n - It's never possible that we end up with a pickle in corrupted state.\n - If a there was a different file at the path, that file will remain unchanged in the\n even of failure (provided that filesystem rename is atomic).\n - it is sometimes possible that we end up with useless temp file which needs to be\n deleted manually (it will be removed automatically on the next function call)\n\n The indended use case is periodic checkpoints of experiment state, such that we never\n corrupt previous checkpoints if the current one fails.\n\n Parameters\n ----------\n obj: object\n object to pickle\n path: str\n path to the output file\n compression: bool\n if true pickle will be compressed\n "
temp_storage = (path + '.relatively_safe')
if compression:
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, 'w', compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, 'data')
else:
with open(temp_storage, 'wb') as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
|
def pickle_load(path, compression=False):
'Unpickle a possible compressed pickle.\n\n Parameters\n ----------\n path: str\n path to the output file\n compression: bool\n if true assumes that pickle was compressed when created and attempts decompression.\n\n Returns\n -------\n obj: object\n the unpickled object\n '
if compression:
with zipfile.ZipFile(path, 'r', compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open('data') as f:
return pickle.load(f)
else:
with open(path, 'rb') as f:
return pickle.load(f)
|
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum((U.numel(v) for v in var_list))
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = (MPI.COMM_WORLD if (comm is None) else comm)
def update(self, localg, stepsize):
if ((self.t % 100) == 0):
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
a = ((stepsize * np.sqrt((1 - (self.beta2 ** self.t)))) / (1 - (self.beta1 ** self.t)))
self.m = ((self.beta1 * self.m) + ((1 - self.beta1) * globalg))
self.v = ((self.beta2 * self.v) + ((1 - self.beta2) * (globalg * globalg)))
step = (((- a) * self.m) / (np.sqrt(self.v) + self.epsilon))
self.setfromflat((self.getflat() + step))
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if (self.comm.Get_rank() == 0):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
|
@U.in_session
def test_MpiAdam():
np.random.seed(0)
tf.set_random_seed(0)
a = tf.Variable(np.random.randn(3).astype('float32'))
b = tf.Variable(np.random.randn(2, 5).astype('float32'))
loss = (tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b)))
stepsize = 0.01
update_op = tf.train.AdamOptimizer(stepsize).minimize(loss)
do_update = U.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for i in range(10):
print(i, do_update())
tf.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a, b]
lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for i in range(10):
(l, g) = lossandgrad()
adam.update(g, stepsize)
print(i, l)
|
def mpi_fork(n, bind_to_core=False):
'Re-launches the current script with workers\n Returns "parent" for original parent, "child" for MPI children\n '
if (n <= 1):
return 'child'
if (os.getenv('IN_MPI') is None):
env = os.environ.copy()
env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1')
args = ['mpirun', '-np', str(n)]
if bind_to_core:
args += ['-bind-to', 'core']
args += ([sys.executable] + sys.argv)
subprocess.check_call(args, env=env)
return 'parent'
else:
return 'child'
|
def mpi_mean(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert (x.ndim > 0)
if (comm is None):
comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=keepdims)
n = xsum.size
localsum = np.zeros((n + 1), x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return ((globalsum[:n].reshape(xsum.shape) / globalsum[n]), globalsum[n])
|
def mpi_moments(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert (x.ndim > 0)
(mean, count) = mpi_mean(x, axis=axis, comm=comm, keepdims=True)
sqdiffs = np.square((x - mean))
(meansqdiff, count1) = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True)
assert (count1 == count)
std = np.sqrt(meansqdiff)
if (not keepdims):
newshape = (mean.shape[:axis] + mean.shape[(axis + 1):])
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return (mean, std, count)
|
def test_runningmeanstd():
import subprocess
subprocess.check_call(['mpirun', '-np', '3', 'python', '-c', 'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()'])
|
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple, axis) in [((np.random.randn(3), np.random.randn(4), np.random.randn(5)), 0), ((np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2)), 0), ((np.random.randn(2, 3), np.random.randn(2, 4), np.random.randn(2, 4)), 1)]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()], axis=axis)
for (a1, a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print('ok!')
|
class RunningMeanStd(object):
def __init__(self, epsilon=0.01, shape=()):
self._sum = tf.get_variable(dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(0.0), name='runningsum', trainable=False)
self._sumsq = tf.get_variable(dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(epsilon), name='runningsumsq', trainable=False)
self._count = tf.get_variable(dtype=tf.float64, shape=(), initializer=tf.constant_initializer(epsilon), name='count', trainable=False)
self.shape = shape
self.mean = tf.to_float((self._sum / self._count))
self.std = tf.sqrt(tf.maximum((tf.to_float((self._sumsq / self._count)) - tf.square(self.mean)), 0.01))
newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [], updates=[tf.assign_add(self._sum, newsum), tf.assign_add(self._sumsq, newsumsq), tf.assign_add(self._count, newcount)])
def update(self, x):
x = x.astype('float64')
n = int(np.prod(self.shape))
totalvec = np.zeros(((n * 2) + 1), 'float64')
addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)], dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:(2 * n)].reshape(self.shape), totalvec[(2 * n)])
|
@U.in_session
def test_runningmeanstd():
for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
U.initialize()
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.std(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean.eval(), rms.std.eval()]
assert np.allclose(ms1, ms2)
|
@U.in_session
def test_dist():
np.random.seed(0)
(p1, p2, p3) = (np.random.randn(3, 1), np.random.randn(4, 1), np.random.randn(5, 1))
(q1, q2, q3) = (np.random.randn(6, 1), np.random.randn(7, 1), np.random.randn(8, 1))
comm = MPI.COMM_WORLD
assert (comm.Get_size() == 2)
if (comm.Get_rank() == 0):
(x1, x2, x3) = (p1, p2, p3)
elif (comm.Get_rank() == 1):
(x1, x2, x3) = (q1, q2, q3)
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1, p2, p3, q1, q2, q3])
def checkallclose(x, y):
print(x, y)
return np.allclose(x, y)
assert checkallclose(bigvec.mean(axis=0), rms.mean.eval())
assert checkallclose(bigvec.std(axis=0), rms.std.eval())
|
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
nenv = env.num_envs
self.batch_ob_shape = (((nenv * nsteps),) + env.observation_space.shape)
self.obs = np.zeros(((nenv,) + env.observation_space.shape), dtype=model.train_model.X.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
@abstractmethod
def run(self):
raise NotImplementedError
|
class RunningMeanStd(object):
def __init__(self, epsilon=0.0001, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = (batch_mean - self.mean)
tot_count = (self.count + batch_count)
new_mean = (self.mean + ((delta * batch_count) / tot_count))
m_a = (self.var * self.count)
m_b = (batch_var * batch_count)
M2 = ((m_a + m_b) + (((np.square(delta) * self.count) * batch_count) / (self.count + batch_count)))
new_var = (M2 / (self.count + batch_count))
new_count = (batch_count + self.count)
self.mean = new_mean
self.var = new_var
self.count = new_count
|
def test_runningmeanstd():
for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
assert np.allclose(ms1, ms2)
|
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert (x.shape == self._M.shape)
self._n += 1
if (self._n == 1):
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = (oldM + ((x - oldM) / self._n))
self._S[...] = (self._S + ((x - oldM) * (x - self._M)))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return ((self._S / (self._n - 1)) if (self._n > 1) else np.square(self._M))
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
|
def test_running_stat():
for shp in ((), (3,), (3, 4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = (np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0))
assert np.allclose(rs.var, v)
|
class Schedule(object):
def value(self, t):
'Value of the schedule at time t'
raise NotImplementedError()
|
class ConstantSchedule(object):
def __init__(self, value):
'Value remains constant over time.\n\n Parameters\n ----------\n value: float\n Constant value of the schedule\n '
self._v = value
def value(self, t):
'See Schedule.value'
return self._v
|
def linear_interpolation(l, r, alpha):
return (l + (alpha * (r - l)))
|
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
'Piecewise schedule.\n\n endpoints: [(int, int)]\n list of pairs `(time, value)` meanining that schedule should output\n `value` when `t==time`. All the values for time must be sorted in\n an increasing order. When t is between two times, e.g. `(time_a, value_a)`\n and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs\n `interpolation(value_a, value_b, alpha)` where alpha is a fraction of\n time passed between `time_a` and `time_b` for time `t`.\n interpolation: lambda float, float, float: float\n a function that takes value to the left and to the right of t according\n to the `endpoints`. Alpha is the fraction of distance from left endpoint to\n right endpoint that t has covered. See linear_interpolation for example.\n outside_value: float\n if the value is requested outside of all the intervals sepecified in\n `endpoints` this value is returned. If None then AssertionError is\n raised when outside value is requested.\n '
idxes = [e[0] for e in endpoints]
assert (idxes == sorted(idxes))
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
'See Schedule.value'
for ((l_t, l), (r_t, r)) in zip(self._endpoints[:(- 1)], self._endpoints[1:]):
if ((l_t <= t) and (t < r_t)):
alpha = (float((t - l_t)) / (r_t - l_t))
return self._interpolation(l, r, alpha)
assert (self._outside_value is not None)
return self._outside_value
|
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
'Linear interpolation between initial_p and final_p over\n schedule_timesteps. After this many timesteps pass final_p is\n returned.\n\n Parameters\n ----------\n schedule_timesteps: int\n Number of timesteps for which to linearly anneal initial_p\n to final_p\n initial_p: float\n initial output value\n final_p: float\n final output value\n '
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
'See Schedule.value'
fraction = min((float(t) / self.schedule_timesteps), 1.0)
return (self.initial_p + (fraction * (self.final_p - self.initial_p)))
|
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n "
assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.'
self._capacity = capacity
self._value = [neutral_element for _ in range((2 * capacity))]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if ((start == node_start) and (end == node_end)):
return self._value[node]
mid = ((node_start + node_end) // 2)
if (end <= mid):
return self._reduce_helper(start, end, (2 * node), node_start, mid)
elif ((mid + 1) <= start):
return self._reduce_helper(start, end, ((2 * node) + 1), (mid + 1), node_end)
else:
return self._operation(self._reduce_helper(start, mid, (2 * node), node_start, mid), self._reduce_helper((mid + 1), end, ((2 * node) + 1), (mid + 1), node_end))
def reduce(self, start=0, end=None):
'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.\n '
if (end is None):
end = self._capacity
if (end < 0):
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
def __setitem__(self, idx, val):
idx += self._capacity
self._value[idx] = val
idx //= 2
while (idx >= 1):
self._value[idx] = self._operation(self._value[(2 * idx)], self._value[((2 * idx) + 1)])
idx //= 2
def __getitem__(self, idx):
assert (0 <= idx < self._capacity)
return self._value[(self._capacity + idx)]
|
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add, neutral_element=0.0)
def sum(self, start=0, end=None):
'Returns arr[start] + ... + arr[end]'
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n '
assert (0 <= prefixsum <= (self.sum() + 1e-05))
idx = 1
while (idx < self._capacity):
if (self._value[(2 * idx)] > prefixsum):
idx = (2 * idx)
else:
prefixsum -= self._value[(2 * idx)]
idx = ((2 * idx) + 1)
return (idx - self._capacity)
|
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(capacity=capacity, operation=min, neutral_element=float('inf'))
def min(self, start=0, end=None):
'Returns min(arr[start], ..., arr[end])'
return super(MinSegmentTree, self).reduce(start, end)
|
def test_piecewise_schedule():
ps = PiecewiseSchedule([((- 5), 100), (5, 200), (10, 50), (100, 50), (200, (- 50))], outside_value=500)
assert np.isclose(ps.value((- 10)), 500)
assert np.isclose(ps.value(0), 150)
assert np.isclose(ps.value(5), 200)
assert np.isclose(ps.value(9), 80)
assert np.isclose(ps.value(50), 50)
assert np.isclose(ps.value(80), 50)
assert np.isclose(ps.value(150), 0)
assert np.isclose(ps.value(175), (- 25))
assert np.isclose(ps.value(201), 500)
assert np.isclose(ps.value(500), 500)
assert np.isclose(ps.value((200 - 1e-10)), (- 50))
|
def test_constant_schedule():
cs = ConstantSchedule(5)
for i in range((- 100), 100):
assert np.isclose(cs.value(i), 5)
|
def test_tree_set():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, (- 1)), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
|
def test_tree_set_overlap():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, (- 1)), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
|
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert (tree.find_prefixsum_idx(0.0) == 2)
assert (tree.find_prefixsum_idx(0.5) == 2)
assert (tree.find_prefixsum_idx(0.99) == 2)
assert (tree.find_prefixsum_idx(1.01) == 3)
assert (tree.find_prefixsum_idx(3.0) == 3)
assert (tree.find_prefixsum_idx(4.0) == 3)
|
def test_prefixsum_idx2():
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert (tree.find_prefixsum_idx(0.0) == 0)
assert (tree.find_prefixsum_idx(0.55) == 1)
assert (tree.find_prefixsum_idx(0.99) == 1)
assert (tree.find_prefixsum_idx(1.51) == 2)
assert (tree.find_prefixsum_idx(3.0) == 3)
assert (tree.find_prefixsum_idx(5.5) == 3)
|
def test_max_interval_tree():
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, (- 1)), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, (- 1)), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, (- 1)), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, (- 1)), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
|
def test_function():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name='x')
y = tf.placeholder(tf.int32, (), name='y')
z = ((3 * x) + (2 * y))
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert (lin(2) == 6)
assert (lin(2, 2) == 10)
|
def test_multikwargs():
with tf.Graph().as_default():
x = tf.placeholder(tf.int32, (), name='x')
with tf.variable_scope('other'):
x2 = tf.placeholder(tf.int32, (), name='x')
z = ((3 * x) + (2 * x2))
lin = function([x, x2], z, givens={x2: 0})
with single_threaded_session():
initialize()
assert (lin(2) == 6)
assert (lin(2, 2) == 10)
|
def switch(condition, then_expression, else_expression):
'Switches between two operations depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n # Arguments\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.\n '
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'), (lambda : then_expression), (lambda : else_expression))
x.set_shape(x_shape)
return x
|
def lrelu(x, leak=0.2):
f1 = (0.5 * (1 + leak))
f2 = (0.5 * (1 - leak))
return ((f1 * x) + (f2 * abs(x)))
|
def huber_loss(x, delta=1.0):
'Reference: https://en.wikipedia.org/wiki/Huber_loss'
return tf.where((tf.abs(x) < delta), (tf.square(x) * 0.5), (delta * (tf.abs(x) - (0.5 * delta))))
|
def make_session(num_cpu=None, make_default=False, graph=None):
"Returns a session that will use <num_cpu> CPU's only"
if (num_cpu is None):
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config, graph=graph)
else:
return tf.Session(config=tf_config, graph=graph)
|
def single_threaded_session():
'Returns a session which will only use a single CPU'
return make_session(num_cpu=1)
|
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
|
def initialize():
'Initialize all the uninitialized variables in the global scope.'
new_variables = (set(tf.global_variables()) - ALREADY_INITIALIZED)
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
|
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= (std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)))
return tf.constant(out)
return _initializer
|
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad='SAME', dtype=tf.float32, collections=None, summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
fan_in = intprod(filter_shape[:3])
fan_out = (intprod(filter_shape[:2]) * num_filters)
w_bound = np.sqrt((6.0 / (fan_in + fan_out)))
w = tf.get_variable('W', filter_shape, dtype, tf.random_uniform_initializer((- w_bound), w_bound), collections=collections)
b = tf.get_variable('b', [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections)
if (summary_tag is not None):
tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], (- 1), 1]), [2, 0, 1, 3]), max_images=10)
return (tf.nn.conv2d(x, w, stride_shape, pad) + b)
|
def function(inputs, outputs, updates=None, givens=None):
'Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n values to be fed to the input\'s placeholders and produces the values of the expressions\n in outputs.\n\n Input values can be passed in the same order as inputs or can be provided as kwargs based\n on placeholder name (passed to constructor or accessible via placeholder.op.name).\n\n Example:\n x = tf.placeholder(tf.int32, (), name="x")\n y = tf.placeholder(tf.int32, (), name="y")\n z = 3 * x + 2 * y\n lin = function([x, y], z, givens={y: 0})\n\n with single_threaded_session():\n initialize()\n\n assert lin(2) == 6\n assert lin(x=3) == 9\n assert lin(2, 2) == 10\n assert lin(x=2, y=3) == 12\n\n Parameters\n ----------\n inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n list of input arguments\n outputs: [tf.Variable] or tf.Variable\n list of outputs or a single output to be returned from function. Returned\n value will also have the same shape.\n '
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return (lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs))))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return (lambda *args, **kwargs: f(*args, **kwargs)[0])
|
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if ((not hasattr(inpt, 'make_feed_dict')) and (not ((type(inpt) is tf.Tensor) and (len(inpt.op.inputs) == 0)))):
assert False, 'inputs should all be placeholders, constants, or have a make_feed_dict method'
self.inputs = inputs
updates = (updates or [])
self.update_group = tf.group(*updates)
self.outputs_update = (list(outputs) + [self.update_group])
self.givens = ({} if (givens is None) else givens)
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert (len(args) <= len(self.inputs)), 'Too many arguments provided'
feed_dict = {}
for (inpt, value) in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:(- 1)]
return results
|
def var_shape(x):
out = x.get_shape().as_list()
assert all((isinstance(a, int) for a in out)), 'shape function assumes that shape is fully known'
return out
|
def numel(x):
return intprod(var_shape(x))
|
def intprod(x):
return int(np.prod(x))
|
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if (clip_norm is not None):
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[tf.reshape((grad if (grad is not None) else tf.zeros_like(v)), [numel(v)]) for (v, grad) in zip(var_list, grads)])
|
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:(start + size)], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
|
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
|
def get_placeholder(name, dtype, shape):
if (name in _PLACEHOLDER_CACHE):
(out, dtype1, shape1) = _PLACEHOLDER_CACHE[name]
assert ((dtype1 == dtype) and (shape1 == shape))
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
|
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
|
def flattenallbut0(x):
return tf.reshape(x, [(- 1), intprod(x.get_shape().as_list()[1:])])
|
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if (('/Adam' in name) or ('beta1_power' in name) or ('beta2_power' in name)):
continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if (('/b:' in name) or ('/biases' in name)):
continue
logger.info((' %s%s %i params %s' % (name, (' ' * (55 - len(name))), v_params, str(v.shape))))
logger.info(('Total model parameters: %0.2f million' % (count_params * 1e-06)))
|
class AlreadySteppingError(Exception):
'\n Raised when an asynchronous step is running while\n step_async() is called again.\n '
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
|
class NotSteppingError(Exception):
'\n Raised when an asynchronous step is not running but\n step_wait() is called.\n '
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
|
class VecEnv(ABC):
'\n An abstract asynchronous, vectorized environment.\n '
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
'\n Reset all the environments and return an array of\n observations, or a tuple of observation arrays.\n\n If step_async is still doing work, that work will\n be cancelled and step_wait() should not be called\n until step_async() is invoked again.\n '
pass
@abstractmethod
def step_async(self, actions):
'\n Tell all the environments to start taking a step\n with the given actions.\n Call step_wait() to get the results of the step.\n\n You should not call this if a step_async run is\n already pending.\n '
pass
@abstractmethod
def step_wait(self):
'\n Wait for the step taken with step_async().\n\n Returns (obs, rews, dones, infos):\n - obs: an array of observations, or a tuple of\n arrays of observations.\n - rews: an array of rewards\n - dones: an array of "episode done" booleans\n - infos: a sequence of info objects\n '
pass
@abstractmethod
def close(self):
"\n Clean up the environments' resources.\n "
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
logger.warn(('Render not defined for %s' % self))
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
|
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=(observation_space or venv.observation_space), action_space=(action_space or venv.action_space))
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
|
class CloudpickleWrapper(object):
'\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n '
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
(shapes, dtypes) = ({}, {})
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
for (key, box) in obs_space.spaces.items():
assert isinstance(box, spaces.Box)
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
else:
box = obs_space
assert isinstance(box, spaces.Box)
self.keys = [None]
(shapes, dtypes) = ({None: box.shape}, {None: box.dtype})
self.buf_obs = {k: np.zeros(((self.num_envs,) + tuple(shapes[k])), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
(obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e]) = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def _save_obs(self, e, obs):
for k in self.keys:
if (k is None):
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if (self.keys == [None]):
return self.buf_obs[None]
else:
return self.buf_obs
|
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
(cmd, data) = remote.recv()
if (cmd == 'step'):
(ob, reward, done, info) = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif (cmd == 'reset'):
ob = env.reset()
remote.send(ob)
elif (cmd == 'reset_task'):
ob = env.reset_task()
remote.send(ob)
elif (cmd == 'close'):
remote.close()
break
elif (cmd == 'get_spaces'):
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
|
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
'\n envs: list of gym environments to run in subprocesses\n '
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, action_space) = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
class VecNormalize(VecEnvWrapper):
'\n Vectorized environment base class\n '
def __init__(self, venv, ob=True, ret=True, clipob=10.0, cliprew=10.0, gamma=0.99, epsilon=1e-08):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = (RunningMeanStd(shape=self.observation_space.shape) if ob else None)
self.ret_rms = (RunningMeanStd(shape=()) if ret else None)
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"\n Apply sequence of actions to sequence of environments\n actions -> (observations, rewards, news)\n\n where 'news' is a boolean vector indicating whether each element is new.\n "
(obs, rews, news, infos) = self.venv.step_wait()
self.ret = ((self.ret * self.gamma) + rews)
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip((rews / np.sqrt((self.ret_rms.var + self.epsilon))), (- self.cliprew), self.cliprew)
return (obs, rews, news, infos)
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip(((obs - self.ob_rms.mean) / np.sqrt((self.ob_rms.var + self.epsilon))), (- self.clipob), self.clipob)
return obs
else:
return obs
def reset(self):
'\n Reset all environments\n '
obs = self.venv.reset()
return self._obfilt(obs)
|
class ActorCritic():
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers, **kwargs):
'The actor-critic network and related training code.\n\n Args:\n inputs_tf (dict of tensors): all necessary inputs for the network: the\n observation (o), the goal (g), and the action (u)\n dimo (int): the dimension of the observations\n dimg (int): the dimension of the goals\n dimu (int): the dimension of the actions\n max_u (float): the maximum magnitude of actions; action outputs will be scaled\n accordingly\n o_stats (baselines.her.Normalizer): normalizer for observations\n g_stats (baselines.her.Normalizer): normalizer for goals\n hidden (int): number of hidden units that should be used in hidden layers\n layers (int): number of hidden layers\n '
self.o_tf = inputs_tf['o']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
input_pi = tf.concat(axis=1, values=[o, g])
with tf.variable_scope('pi'):
self.pi_tf = (self.max_u * tf.tanh(nn(input_pi, (([self.hidden] * self.layers) + [self.dimu]))))
with tf.variable_scope('Q'):
input_Q = tf.concat(axis=1, values=[o, g, (self.pi_tf / self.max_u)])
self.Q_pi_tf = nn(input_Q, (([self.hidden] * self.layers) + [1]))
input_Q = tf.concat(axis=1, values=[o, g, (self.u_tf / self.max_u)])
self._input_Q = input_Q
self.Q_tf = nn(input_Q, (([self.hidden] * self.layers) + [1]), reuse=True)
|
def cached_make_env(make_env):
'\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n '
if (make_env not in CACHED_ENVS):
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.