code stringlengths 17 6.64M |
|---|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if (val is None):
self.name2val[key] = None
return
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.level == DISABLED):
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def configure(dir=None, format_strs=None):
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if (rank > 0):
log_suffix = ('-rank%03i' % rank)
if (format_strs is None):
(strs, strs_mpi) = (os.getenv('OPENAI_LOG_FORMAT'), os.getenv('OPENAI_LOG_FORMAT_MPI'))
format_strs = (strs_mpi if (rank > 0) else strs)
if (format_strs is not None):
format_strs = format_strs.split(',')
else:
format_strs = (LOG_OUTPUT_FORMATS_MPI if (rank > 0) else LOG_OUTPUT_FORMATS)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log(('Logging to %s' % dir))
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
|
def _demo():
info('hi')
debug("shouldn't appear")
set_level(DEBUG)
debug('should appear')
dir = '/tmp/testlogging'
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv('a', 3)
logkv('b', 2.5)
dumpkvs()
logkv('b', (- 2.5))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see a = 5.5')
logkv_mean('b', (- 22.5))
logkv_mean('b', (- 44.4))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see b = 33.3')
logkv('b', (- 2.5))
dumpkvs()
logkv('a', 'longasslongasslongasslongasslongasslongassvalue')
dumpkvs()
|
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
|
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
|
def read_tb(path):
'\n path : a tensorboard file OR a directory, where we will find all TB files\n of the form events.*\n '
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, 'events.*'))
elif osp.basename(path).startswith('events.'):
fnames = [path]
else:
raise NotImplementedError(('Expected tensorboard file or directory containing them. Got %s' % path))
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if (summary.step > 0):
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[((step - 1), colidx)] = value
return pandas.DataFrame(data, columns=tags)
|
def rolling_window(a, window):
shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window) + 1), window))
strides = (a.strides + (a.strides[(- 1)],))
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=(- 1))
return (x[(window - 1):], yw_func)
|
def ts2xy(ts, xaxis):
if (xaxis == X_TIMESTEPS):
x = np.cumsum(ts.l.values)
y = ts.r.values
elif (xaxis == X_EPISODES):
x = np.arange(len(ts))
y = ts.r.values
elif (xaxis == X_WALLTIME):
x = (ts.t.values / 3600.0)
y = ts.r.values
else:
raise NotImplementedError
return (x, y)
|
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8, 2))
maxx = max((xy[0][(- 1)] for xy in xy_list))
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
(x, y_mean) = window_func(x, y, EPISODES_WINDOW, np.mean)
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel('Episode Rewards')
plt.tight_layout()
|
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[(ts.l.cumsum() <= num_timesteps)]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name)
|
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10000000.0))
parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)
parser.add_argument('--task_name', help='Title of plot', default='Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()
|
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 8, 5, 1, 2), nn.ReLU())
self.conv2 = nn.Sequential(nn.Conv2d(8, 16, 5, 1, 2), nn.ReLU())
self.conv3 = nn.Sequential(nn.Conv2d(16, 64, 5, 1, 2), nn.ReLU())
self.conv4 = nn.Sequential(nn.Conv2d(64, 128, 5, 1, 2), nn.ReLU())
self.conv5 = nn.Sequential(nn.Conv2d(128, 128, 5, 1, 2), nn.ReLU())
self.conv6 = nn.Sequential(nn.Conv2d(128, 128, 5, 1, 2), nn.ReLU())
self.conv_mask = nn.Sequential(nn.Conv2d(128, 1, 5, 1, 2))
self.conv7 = nn.Sequential(nn.Conv2d(128, 64, 5, 4, 1), nn.ReLU())
self.conv8 = nn.Sequential(nn.Conv2d(64, 32, 5, 4, 1), nn.ReLU())
self.fc = nn.Sequential(nn.Linear(((32 * 14) * 14), 1024), nn.Linear(1024, 1), nn.Sigmoid())
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
mask = self.conv_mask(x)
x = self.conv7((x * mask))
x = self.conv7(x)
x = self.conv8(x)
x = x.view(x.size(0), (- 1))
return (mask, self.fc(x))
|
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.det_conv0 = nn.Sequential(nn.Conv2d(4, 32, 3, 1, 1), nn.ReLU())
self.det_conv1 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU())
self.det_conv2 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU())
self.det_conv3 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU())
self.det_conv4 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU())
self.det_conv5 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU())
self.conv_i = nn.Sequential(nn.Conv2d((32 + 32), 32, 3, 1, 1), nn.Sigmoid())
self.conv_f = nn.Sequential(nn.Conv2d((32 + 32), 32, 3, 1, 1), nn.Sigmoid())
self.conv_g = nn.Sequential(nn.Conv2d((32 + 32), 32, 3, 1, 1), nn.Tanh())
self.conv_o = nn.Sequential(nn.Conv2d((32 + 32), 32, 3, 1, 1), nn.Sigmoid())
self.det_conv_mask = nn.Sequential(nn.Conv2d(32, 1, 3, 1, 1))
self.conv1 = nn.Sequential(nn.Conv2d(4, 64, 5, 1, 2), nn.ReLU())
self.conv2 = nn.Sequential(nn.Conv2d(64, 128, 3, 2, 1), nn.ReLU())
self.conv3 = nn.Sequential(nn.Conv2d(128, 128, 3, 1, 1), nn.ReLU())
self.conv4 = nn.Sequential(nn.Conv2d(128, 256, 3, 2, 1), nn.ReLU())
self.conv5 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), nn.ReLU())
self.conv6 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), nn.ReLU())
self.diconv1 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 2, dilation=2), nn.ReLU())
self.diconv2 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 4, dilation=4), nn.ReLU())
self.diconv3 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 8, dilation=8), nn.ReLU())
self.diconv4 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 16, dilation=16), nn.ReLU())
self.conv7 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), nn.ReLU())
self.conv8 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), nn.ReLU())
self.deconv1 = nn.Sequential(nn.ConvTranspose2d(256, 128, 4, 2, 1), nn.ReflectionPad2d((1, 0, 1, 0)), nn.AvgPool2d(2, stride=1), nn.ReLU())
self.conv9 = nn.Sequential(nn.Conv2d(128, 128, 3, 1, 1), nn.ReLU())
self.deconv2 = nn.Sequential(nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.ReflectionPad2d((1, 0, 1, 0)), nn.AvgPool2d(2, stride=1), nn.ReLU())
self.conv10 = nn.Sequential(nn.Conv2d(64, 32, 3, 1, 1), nn.ReLU())
self.outframe1 = nn.Sequential(nn.Conv2d(256, 3, 3, 1, 1), nn.ReLU())
self.outframe2 = nn.Sequential(nn.Conv2d(128, 3, 3, 1, 1), nn.ReLU())
self.output = nn.Sequential(nn.Conv2d(32, 3, 3, 1, 1))
def forward(self, input):
(batch_size, row, col) = (input.size(0), input.size(2), input.size(3))
mask = (Variable(torch.ones(batch_size, 1, row, col)).cuda() / 2.0)
h = Variable(torch.zeros(batch_size, 32, row, col)).cuda()
c = Variable(torch.zeros(batch_size, 32, row, col)).cuda()
mask_list = []
for i in range(ITERATION):
x = torch.cat((input, mask), 1)
x = self.det_conv0(x)
resx = x
x = F.relu((self.det_conv1(x) + resx))
resx = x
x = F.relu((self.det_conv2(x) + resx))
resx = x
x = F.relu((self.det_conv3(x) + resx))
resx = x
x = F.relu((self.det_conv4(x) + resx))
resx = x
x = F.relu((self.det_conv5(x) + resx))
x = torch.cat((x, h), 1)
i = self.conv_i(x)
f = self.conv_f(x)
g = self.conv_g(x)
o = self.conv_o(x)
c = ((f * c) + (i * g))
h = (o * F.tanh(c))
mask = self.det_conv_mask(h)
mask_list.append(mask)
x = torch.cat((input, mask), 1)
x = self.conv1(x)
res1 = x
x = self.conv2(x)
x = self.conv3(x)
res2 = x
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.diconv1(x)
x = self.diconv2(x)
x = self.diconv3(x)
x = self.diconv4(x)
x = self.conv7(x)
x = self.conv8(x)
frame1 = self.outframe1(x)
x = self.deconv1(x)
x = (x + res2)
x = self.conv9(x)
frame2 = self.outframe2(x)
x = self.deconv2(x)
x = (x + res1)
x = self.conv10(x)
x = self.output(x)
return (mask_list, frame1, frame2, x)
|
def trainable(net, trainable):
for para in net.parameters():
para.requires_grad = trainable
|
def vgg_init():
vgg_model = torchvision.models.vgg16(pretrained=True).cuda()
trainable(vgg_model, False)
return vgg_model
|
class vgg(nn.Module):
def __init__(self, vgg_model):
super(vgg, self).__init__()
self.vgg_layers = vgg_model.features
self.layer_name_mapping = {'1': 'relu1_1', '3': 'relu1_2', '6': 'relu2_1', '8': 'relu2_2'}
def forward(self, x):
output = []
for (name, module) in self.vgg_layers._modules.items():
x = module(x)
if (name in self.layer_name_mapping):
output.append(x)
return output
|
def switch(condition, then_expression, else_expression):
'Switches between two operations depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n # Arguments\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.\n '
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'), (lambda : then_expression), (lambda : else_expression))
x.set_shape(x_shape)
return x
|
def lrelu(x, leak=0.2):
f1 = (0.5 * (1 + leak))
f2 = (0.5 * (1 - leak))
return ((f1 * x) + (f2 * abs(x)))
|
def huber_loss(x, delta=1.0):
'Reference: https://en.wikipedia.org/wiki/Huber_loss'
return tf.where((tf.abs(x) < delta), (tf.square(x) * 0.5), (delta * (tf.abs(x) - (0.5 * delta))))
|
def get_session(config=None):
'Get default session or create one with a given config'
sess = tf.get_default_session()
if (sess is None):
sess = make_session(config=config, make_default=True)
return sess
|
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"Returns a session that will use <num_cpu> CPU's only"
if (num_cpu is None):
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if (config is None):
config = tf.ConfigProto(allow_soft_placement=True, inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
|
def single_threaded_session():
'Returns a session which will only use a single CPU'
return make_session(num_cpu=1)
|
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
|
def initialize():
'Initialize all the uninitialized variables in the global scope.'
new_variables = (set(tf.global_variables()) - ALREADY_INITIALIZED)
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
|
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= (std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True)))
return tf.constant(out)
return _initializer
|
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad='SAME', dtype=tf.float32, collections=None, summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
fan_in = intprod(filter_shape[:3])
fan_out = (intprod(filter_shape[:2]) * num_filters)
w_bound = np.sqrt((6.0 / (fan_in + fan_out)))
w = tf.get_variable('W', filter_shape, dtype, tf.random_uniform_initializer((- w_bound), w_bound), collections=collections)
b = tf.get_variable('b', [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections)
if (summary_tag is not None):
tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], (- 1), 1]), [2, 0, 1, 3]), max_images=10)
return (tf.nn.conv2d(x, w, stride_shape, pad) + b)
|
def function(inputs, outputs, updates=None, givens=None):
'Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n values to be fed to the input\'s placeholders and produces the values of the expressions\n in outputs.\n\n Input values can be passed in the same order as inputs or can be provided as kwargs based\n on placeholder name (passed to constructor or accessible via placeholder.op.name).\n\n Example:\n x = tf.placeholder(tf.int32, (), name="x")\n y = tf.placeholder(tf.int32, (), name="y")\n z = 3 * x + 2 * y\n lin = function([x, y], z, givens={y: 0})\n\n with single_threaded_session():\n initialize()\n\n assert lin(2) == 6\n assert lin(x=3) == 9\n assert lin(2, 2) == 10\n assert lin(x=2, y=3) == 12\n\n Parameters\n ----------\n inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n list of input arguments\n outputs: [tf.Variable] or tf.Variable\n list of outputs or a single output to be returned from function. Returned\n value will also have the same shape.\n updates: [tf.Operation] or tf.Operation\n list of update functions or single update function that will be run whenever\n the function is called. The return is ignored.\n\n '
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return (lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs))))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return (lambda *args, **kwargs: f(*args, **kwargs)[0])
|
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if ((not hasattr(inpt, 'make_feed_dict')) and (not ((type(inpt) is tf.Tensor) and (len(inpt.op.inputs) == 0)))):
assert False, 'inputs should all be placeholders, constants, or have a make_feed_dict method'
self.inputs = inputs
self.input_names = {inp.name.split('/')[(- 1)].split(':')[0]: inp for inp in inputs}
updates = (updates or [])
self.update_group = tf.group(*updates)
self.outputs_update = (list(outputs) + [self.update_group])
self.givens = ({} if (givens is None) else givens)
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert ((len(args) + len(kwargs)) <= len(self.inputs)), 'Too many arguments provided'
feed_dict = {}
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
for (inpt, value) in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for (inpt_name, value) in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:(- 1)]
return results
|
def var_shape(x):
out = x.get_shape().as_list()
assert all((isinstance(a, int) for a in out)), 'shape function assumes that shape is fully known'
return out
|
def numel(x):
return intprod(var_shape(x))
|
def intprod(x):
return int(np.prod(x))
|
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if (clip_norm is not None):
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[tf.reshape((grad if (grad is not None) else tf.zeros_like(v)), [numel(v)]) for (v, grad) in zip(var_list, grads)])
|
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:(start + size)], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
|
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
|
def flattenallbut0(x):
return tf.reshape(x, [(- 1), intprod(x.get_shape().as_list()[1:])])
|
def get_placeholder(name, dtype, shape):
if (name in _PLACEHOLDER_CACHE):
(out, dtype1, shape1) = _PLACEHOLDER_CACHE[name]
if (out.graph == tf.get_default_graph()):
assert ((dtype1 == dtype) and (shape1 == shape)), 'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
|
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
|
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if (('/Adam' in name) or ('beta1_power' in name) or ('beta2_power' in name)):
continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if (('/b:' in name) or ('/bias' in name)):
continue
logger.info((' %s%s %i params %s' % (name, (' ' * (55 - len(name))), v_params, str(v.shape))))
logger.info(('Total model parameters: %0.2f million' % (count_params * 1e-06)))
|
def get_available_gpus(session_config=None):
if (session_config is None):
session_config = get_session()._config
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices(session_config)
return [x.name for x in local_device_protos if (x.device_type == 'GPU')]
|
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = (sess or get_session())
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
|
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = (sess or get_session())
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
|
def save_variables(save_path, variables=None, sess=None):
import joblib
sess = (sess or get_session())
variables = (variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
ps = sess.run(variables)
save_dict = {v.name: value for (v, value) in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
|
def load_variables(load_path, variables=None, sess=None, scope=None):
import joblib
sess = (sess or get_session())
variables = (variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert (len(loaded_params) == len(variables)), 'number of variables loaded mismatches len(variables)'
for (d, v) in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
print(f'load_variables in scope: {scope}')
if (not scope):
restores.append(v.assign(loaded_params[v.name]))
else:
for v in variables:
if (v.name.split('/')[0] == scope):
v_name_list = v.name.split('/')
v_name_new = 'agent0'
for name in v_name_list[1:]:
v_name_new = ((v_name_new + '/') + name)
restores.append(v.assign(loaded_params[v_name_new]))
sess.run(restores)
|
def adjust_shape(placeholder, data):
'\n adjust shape of the data to the shape of the placeholder if possible.\n If shape is incompatible, AssertionError is thrown\n\n Parameters:\n placeholder tensorflow input placeholder\n\n data input data to be (potentially) reshaped to be fed into placeholder\n\n Returns:\n reshaped data\n '
if ((not isinstance(data, np.ndarray)) and (not isinstance(data, list))):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [(x or (- 1)) for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
|
def _check_shape(placeholder_shape, data_shape):
' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for (i, s_data) in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if ((s_placeholder != (- 1)) and (s_data != s_placeholder)):
return False
return True
|
def _squeeze_shape(shape):
return [x for x in shape if (x != 1)]
|
def launch_tensorboard_in_background(log_dir):
"\n To log the Tensorflow graph when using rl-algs\n algorithms, you can run the following code\n in your main script:\n import threading, time\n def start_tensorboard(session):\n time.sleep(10) # Wait until graph is setup\n tb_path = osp.join(logger.get_dir(), 'tb')\n summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)\n summary_op = tf.summary.merge_all()\n launch_tensorboard_in_background(tb_path)\n session = tf.get_default_session()\n t = threading.Thread(target=start_tensorboard, args=([session]))\n t.start()\n "
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
(cmd, data) = remote.recv()
if (cmd == 'step'):
(ob, reward, done, info) = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif (cmd == 'reset'):
ob = env.reset()
remote.send(ob)
elif (cmd == 'render'):
remote.send(env.render(mode='rgb_array'))
elif (cmd == 'close'):
remote.close()
break
elif (cmd == 'get_spaces_spec'):
remote.send((env.observation_space, env.action_space, env.spec))
elif (cmd == 'set_agent_idx'):
env.set_agent_idx(data)
elif (cmd == 'get_agent_idx'):
remote.send(env.get_agent_idx())
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
|
class SubprocVecEnv(VecEnv):
'\n VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.\n Recommended to use when num_envs > 1 and step() can be a bottleneck.\n '
def __init__(self, env_fns, spaces=None, context='spawn'):
'\n Arguments:\n\n env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable\n '
self.waiting = False
self.closed = False
self.nenvs = len(env_fns)
ctx = mp.get_context(context)
(self.remotes, self.work_remotes) = zip(*[ctx.Pipe() for _ in range(self.nenvs)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
(observation_space, action_space, self.spec) = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (_flatten_obs(obs), np.stack(rews), np.stack(dones), infos)
def remote_set_agent_idx(self, agent_idx):
for remote in self.remotes:
remote.send(('set_agent_idx', agent_idx))
def remote_get_agent_idx(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('get_agent_idx', None))
return [remote.recv() for remote in self.remotes]
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert (not self.closed), 'Trying to operate on a SubprocVecEnv after calling close()'
def __del__(self):
if (not self.closed):
self.close()
|
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert (len(obs) > 0)
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
|
class Model(object):
'\n We use this object to :\n __init__:\n - Creates the step_model\n - Creates the train_model\n\n train():\n - Make the training part (feedforward and retropropagation of gradients)\n\n save/load():\n - Save load the model\n '
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm, scope, load_path=None, microbatch_size=None):
self.sess = sess = get_session()
self.scope = scope
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
act_model = policy(nbatch_act, 1, sess)
if (microbatch_size is None):
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.LR = LR = tf.placeholder(tf.float32, [])
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = (OLDVPRED + tf.clip_by_value((train_model.vf - OLDVPRED), (- CLIPRANGE), CLIPRANGE))
vf_losses1 = tf.square((vpred - R))
vf_losses2 = tf.square((vpredclipped - R))
vf_loss = (0.5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)))
ratio = tf.exp((OLDNEGLOGPAC - neglogpac))
pg_losses = ((- ADV) * ratio)
pg_losses2 = ((- ADV) * tf.clip_by_value(ratio, (1.0 - CLIPRANGE), (1.0 + CLIPRANGE)))
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = (0.5 * tf.reduce_mean(tf.square((neglogpac - OLDNEGLOGPAC))))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs((ratio - 1.0)), CLIPRANGE)))
loss = ((pg_loss - (entropy * ent_coef)) + (vf_loss * vf_coef))
params = tf.trainable_variables((self.scope + '/ppo2_model'))
if (MPI is not None):
self.trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-05)
else:
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-05)
if (load_path is not None):
self.trainer = tf.train.GradientDescentOptimizer(learning_rate=LR)
grads_and_var = self.trainer.compute_gradients(loss, params)
(grads, var) = zip(*grads_and_var)
if (max_grad_norm is not None):
(grads, _grad_norm) = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess, scope=scope)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='')
if (MPI is not None):
sync_from_root(sess, global_variables)
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = (returns - values)
advs = ((advs - advs.mean()) / (advs.std() + 1e-08))
td_map = {self.train_model.X: obs, self.A: actions, self.ADV: advs, self.R: returns, self.LR: lr, self.CLIPRANGE: cliprange, self.OLDNEGLOGPAC: neglogpacs, self.OLDVPRED: values}
if (states is not None):
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run((self.stats_list + [self._train_op]), td_map)[:(- 1)]
|
def constfn(val):
def f(_):
return val
return f
|
def learn(*, network, env, total_timesteps, early_stopping=False, eval_env=None, seed=None, nsteps=2048, ent_coef=0.0, ent_pool_coef=0.0, ent_version=1, lr=0.0003, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None, model_fn=None, population=None, metric_np=None, scope='', **network_kwargs):
"\n Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)\n\n Parameters:\n ----------\n\n network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See common/models.py/lstm for more details on using recurrent nets in policies\n\n env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.\n The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.\n\n\n nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel)\n\n total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)\n\n ent_coef: float policy entropy coefficient in the optimization objective\n\n lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the\n training and 0 is the end of the training.\n\n vf_coef: float value function loss coefficient in the optimization objective\n\n max_grad_norm: float or None gradient norm clipping coefficient\n\n gamma: float discounting factor\n\n lam: float advantage estimation discounting factor (lambda in the paper)\n\n log_interval: int number of timesteps between logging events\n\n nminibatches: int number of training minibatches per update. For recurrent policies,\n should be smaller or equal than number of environments run in parallel.\n\n noptepochs: int number of training epochs per update\n\n cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training\n and 0 is the end of the training\n\n save_interval: int number of timesteps between saving events\n\n load_path: str path to load the model from\n\n **network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.\n\n\n\n "
additional_params = network_kwargs['network_kwargs']
from baselines import logger
if ('LR_ANNEALING' in additional_params.keys()):
lr_reduction_factor = additional_params['LR_ANNEALING']
start_lr = lr
lr = (lambda prop: ((start_lr / lr_reduction_factor) + ((start_lr - (start_lr / lr_reduction_factor)) * prop)))
if isinstance(lr, float):
lr = constfn(lr)
else:
assert callable(lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
bestrew = 0
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = (nenvs * nsteps)
nbatch_train = (nbatch // nminibatches)
if (model_fn is None):
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, scope=scope, load_path=load_path)
if (load_path is not None):
model.load(load_path)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam, population=population, ent_pool_coef=ent_pool_coef, ent_version=ent_version)
if (eval_env is not None):
eval_runner = Runner(env=eval_env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=100)
if (eval_env is not None):
eval_epinfobuf = deque(maxlen=100)
tfirststart = time.perf_counter()
best_rew_per_step = 0
run_info = defaultdict(list)
nupdates = (total_timesteps // nbatch)
print('TOT NUM UPDATES', nupdates)
for update in range(1, (nupdates + 1)):
assert ((nbatch % nminibatches) == 0), "Have {} total batch size and want {} minibatches, can't split evenly".format(nbatch, nminibatches)
tstart = time.perf_counter()
frac = (1.0 - ((update - 1.0) / nupdates))
lrnow = lr(frac)
cliprangenow = cliprange(frac)
(obs, returns, masks, actions, values, neglogpacs, states, epinfos) = runner.run()
if (eval_env is not None):
(eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos) = eval_runner.run()
eplenmean = safemean([epinfo['ep_length'] for epinfo in epinfos])
eprewmean = safemean([epinfo['r'] for epinfo in epinfos])
rew_per_step = (eprewmean / eplenmean)
print('Curr learning rate {} \t Curr reward per step {}'.format(lrnow, rew_per_step))
if ((rew_per_step > best_rew_per_step) and early_stopping):
best_rew_per_step = (eprewmean / eplenmean)
checkdir = osp.join(logger.get_dir(), 'checkpoints')
model.save((checkdir + '.temp_best_model'))
print('Saved model as best', best_rew_per_step, 'avg rew/step')
epinfobuf.extend(epinfos)
if (eval_env is not None):
eval_epinfobuf.extend(eval_epinfos)
mblossvals = []
if (states is None):
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in tqdm.trange(0, nbatch, nbatch_train, desc='{}/{}'.format(_, noptepochs)):
end = (start + nbatch_train)
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else:
assert ((nenvs % nminibatches) == 0)
envsperbatch = (nenvs // nminibatches)
envinds = np.arange(nenvs)
flatinds = np.arange((nenvs * nsteps)).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = (start + envsperbatch)
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.perf_counter()
fps = int((nbatch / (tnow - tstart)))
if (((update % log_interval) == 0) or (update == 1)):
ev = explained_variance(values, returns)
logger.logkv('ent_pool_coef', runner.ent_pool_coef)
logger.logkv('entropy_pop_delta', runner.entropy_pop_delta_mean)
logger.logkv('entropy_pop_new', runner.entropy_pop_new_mean)
logger.logkv('neg_logp_pop_new', runner.neg_logp_pop_new_mean)
logger.logkv('neg_logp_pop_delta', runner.neg_logp_pop_delta_mean)
logger.logkv('serial_timesteps', (update * nsteps))
logger.logkv('nupdates', update)
logger.logkv('total_timesteps', (update * nbatch))
logger.logkv('fps', fps)
logger.logkv('explained_variance', float(ev))
eprewmean = safemean([epinfo['r'] for epinfo in epinfobuf])
ep_dense_rew_mean = safemean([epinfo['ep_shaped_r'] for epinfo in epinfobuf])
ep_sparse_rew_mean = safemean([epinfo['ep_sparse_r'] for epinfo in epinfobuf])
eplenmean = safemean([epinfo['ep_length'] for epinfo in epinfobuf])
run_info['ent_pool_coef'].append(runner.ent_pool_coef)
run_info['entropy_pop_delta'].append(runner.entropy_pop_delta_mean)
run_info['entropy_pop_new'].append(runner.entropy_pop_new_mean)
run_info['neg_logp_pop_new'].append(runner.neg_logp_pop_new_mean)
run_info['neg_logp_pop_delta'].append(runner.neg_logp_pop_delta_mean)
run_info['eprewmean'].append(eprewmean)
run_info['ep_dense_rew_mean'].append(ep_dense_rew_mean)
run_info['ep_sparse_rew_mean'].append(ep_sparse_rew_mean)
run_info['eplenmean'].append(eplenmean)
run_info['explained_variance'].append(float(ev))
logger.logkv('true_eprew', safemean([epinfo['ep_sparse_r'] for epinfo in epinfobuf]))
logger.logkv('eprewmean', eprewmean)
logger.logkv('eplenmean', eplenmean)
if (eval_env is not None):
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]))
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]))
if (metric_np is not None):
for i in range(metric_np.shape[0]):
logger.logkv((('metric_np[' + '{:02d}'.format(i)) + ']'), safemean([metric_np[i]]))
time_elapsed = (tnow - tfirststart)
logger.logkv('time_elapsed', time_elapsed)
time_per_update = (time_elapsed / update)
time_remaining = ((nupdates - update) * time_per_update)
logger.logkv('time_remaining', (time_remaining / 60))
for (lossval, lossname) in zip(lossvals, model.loss_names):
run_info[lossname].append(lossval)
logger.logkv(lossname, lossval)
if ((MPI is None) or (MPI.COMM_WORLD.Get_rank() == 0)):
logger.dumpkvs()
if (additional_params['RUN_TYPE'] in ['ppo', 'joint_ppo']):
from overcooked_ai_py.utils import save_dict_to_file
save_dict_to_file(run_info, (additional_params['SAVE_DIR'] + 'logs'))
if (additional_params['REW_SHAPING_HORIZON'] != 0):
annealing_horizon = additional_params['REW_SHAPING_HORIZON']
annealing_thresh = 0
def fn(x):
if ((annealing_thresh != 0) and ((annealing_thresh - ((annealing_horizon / annealing_thresh) * x)) > 1)):
return 1
else:
fn = (lambda x: (((((- 1) * (x - annealing_thresh)) * 1) / (annealing_horizon - annealing_thresh)) + 1))
return max(fn(x), 0)
curr_timestep = (update * nbatch)
curr_reward_shaping = fn(curr_timestep)
env.update_reward_shaping_param(curr_reward_shaping)
print('Current reward shaping', curr_reward_shaping)
sp_horizon = additional_params['SELF_PLAY_HORIZON']
if ((ep_sparse_rew_mean > bestrew) and (ep_sparse_rew_mean > additional_params['SAVE_BEST_THRESH'])):
if ((additional_params['OTHER_AGENT_TYPE'][:2] == 'bc') and (sp_horizon != 0) and (env.self_play_randomization > 0)):
pass
else:
from human_aware_rl.ppo.ppo import save_ppo_model
print('BEST REW', ep_sparse_rew_mean, 'overwriting previous model with', bestrew)
save_ppo_model(model, '{}seed{}/best'.format(additional_params['SAVE_DIR'], additional_params['CURR_SEED']))
bestrew = max(ep_sparse_rew_mean, bestrew)
if ((additional_params['OTHER_AGENT_TYPE'] != 'sp') and (sp_horizon is not None)):
if (type(sp_horizon) is not list):
curr_reward = ep_sparse_rew_mean
rew_target = sp_horizon
shift = (rew_target / 2)
t = ((1 / rew_target) * 10)
fn = (lambda x: (((- 1) * (np.exp((t * (x - shift))) / (1 + np.exp((t * (x - shift)))))) + 1))
env.self_play_randomization = fn(curr_reward)
print('Current self-play randomization', env.self_play_randomization)
else:
assert (len(sp_horizon) == 2)
(self_play_thresh, self_play_timeline) = sp_horizon
def fn(x):
if ((self_play_thresh != 0) and ((self_play_timeline - ((self_play_timeline / self_play_thresh) * x)) > 1)):
return 1
else:
fn = (lambda x: (((((- 1) * (x - self_play_thresh)) * 1) / (self_play_timeline - self_play_thresh)) + 1))
return max(fn(x), 0)
curr_timestep = (update * nbatch)
env.self_play_randomization = fn(curr_timestep)
print('Current self-play randomization', env.self_play_randomization)
if (save_interval and (((update % save_interval) == 0) or (update == 1)) and logger.get_dir() and ((MPI is None) or (MPI.COMM_WORLD.Get_rank() == 0))):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, ('%.5i' % update))
print('Saving to', savepath)
model.save(savepath)
run_type = additional_params['RUN_TYPE']
if ((run_type in ['ppo', 'joint_ppo']) and ((update % additional_params['VIZ_FREQUENCY']) == 0)):
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld
from overcooked_ai_py.agents.agent import AgentPair
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from human_aware_rl.baselines_utils import get_agent_from_model
print(additional_params['SAVE_DIR'])
mdp = OvercookedGridworld.from_layout_name(**additional_params['mdp_params'])
overcooked_env = OvercookedEnv(mdp, **additional_params['env_params'])
agent = get_agent_from_model(model, additional_params['sim_threads'], is_joint_action=(run_type == 'joint_ppo'))
agent.set_mdp(mdp)
if (run_type == 'ppo'):
if (additional_params['OTHER_AGENT_TYPE'] == 'sp'):
agent_pair = AgentPair(agent, agent, allow_duplicate_agents=True)
else:
print('PPO agent on index 0:')
env.other_agent.set_mdp(mdp)
agent_pair = AgentPair(agent, env.other_agent)
(trajectory, time_taken, tot_rewards, tot_shaped_rewards) = overcooked_env.run_agents(agent_pair, display=True, display_until=100)
overcooked_env.reset()
agent_pair.reset()
print('tot rew', tot_rewards, 'tot rew shaped', tot_shaped_rewards)
print('PPO agent on index 1:')
agent_pair = AgentPair(env.other_agent, agent)
else:
agent_pair = AgentPair(agent)
(trajectory, time_taken, tot_rewards, tot_shaped_rewards) = overcooked_env.run_agents(agent_pair, display=True, display_until=100)
overcooked_env.reset()
agent_pair.reset()
print('tot rew', tot_rewards, 'tot rew shaped', tot_shaped_rewards)
print(additional_params['SAVE_DIR'])
if ((nupdates > 0) and early_stopping):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
print('Loaded best model', best_rew_per_step)
model.load((checkdir + '.temp_best_model'))
return (model, run_info)
|
def safemean(xs):
return (np.nan if (len(xs) == 0) else np.mean(xs))
|
class RewardShapingEnv(VecEnvWrapper):
'\n Wrapper for the Baselines vectorized environment, which\n modifies the reward obtained to be a combination of intrinsic\n (dense, shaped) and extrinsic (sparse, from environment) reward'
def __init__(self, env, reward_shaping_factor=0.0):
super().__init__(env)
self.reward_shaping_factor = reward_shaping_factor
self.env_name = 'Overcooked-v0'
self.use_action_method = False
self.self_play_randomization = 0.0
self.trajectory_sp = False
self.joint_action_model = False
def reset(self):
return self.venv.reset()
def step_wait(self):
(obs, rew, done, infos) = self.venv.step_wait()
for env_num in range(self.num_envs):
dense_reward = infos[env_num]['shaped_r']
rew = list(rew)
shaped_rew = (rew[env_num] + (float(dense_reward) * self.reward_shaping_factor))
rew[env_num] = shaped_rew
if done[env_num]:
sparse_ep_rew = infos[env_num]['episode']['ep_sparse_r']
dense_ep_rew = infos[env_num]['episode']['ep_shaped_r']
infos[env_num]['episode']['r'] = (sparse_ep_rew + (dense_ep_rew * self.reward_shaping_factor))
return (obs, rew, done, infos)
def update_reward_shaping_param(self, reward_shaping_factor):
'Takes in what fraction of the run we are at, and determines the reward shaping coefficient'
self.reward_shaping_factor = reward_shaping_factor
|
class LinearAnnealer():
'Anneals a parameter from 1 to 0 over the course of training,\n over a specified horizon.'
def __init__(self, horizon):
self.horizon = horizon
def param_value(self, timestep):
if (self.horizon == 0):
return 0
curr_value = max((1 - (timestep / self.horizon)), 0)
assert (0 <= curr_value <= 1)
return curr_value
|
class DummyEnv(object):
'\n Class used to save number of envs, observation space and action \n space data, when loading and saving baselines models\n '
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
pass
|
@register('conv_and_mlp')
def conv_network_fn(**kwargs):
'Used to register custom network type used by Baselines for Overcooked'
if ('network_kwargs' in kwargs.keys()):
params = kwargs['network_kwargs']
else:
params = kwargs
num_hidden_layers = params['NUM_HIDDEN_LAYERS']
size_hidden_layers = params['SIZE_HIDDEN_LAYERS']
num_filters = params['NUM_FILTERS']
num_convs = params['NUM_CONV_LAYERS']
def network_fn(X):
print(X.shape)
conv_out = tf.layers.conv2d(inputs=X, filters=num_filters, kernel_size=[5, 5], padding='same', activation=tf.nn.leaky_relu, name='conv_initial')
for i in range(0, (num_convs - 1)):
padding = ('same' if (i < (num_convs - 2)) else 'valid')
conv_out = tf.layers.conv2d(inputs=conv_out, filters=num_filters, kernel_size=[3, 3], padding=padding, activation=tf.nn.leaky_relu, name='conv_{}'.format(i))
out = tf.layers.flatten(conv_out)
for _ in range(num_hidden_layers):
out = tf.layers.dense(out, size_hidden_layers, activation=tf.nn.leaky_relu)
print('Last layer conv network output shape', out.shape)
return out
return network_fn
|
def get_vectorized_gym_env(base_env, gym_env_name, agent_idx, featurize_fn=None, **kwargs):
'\n Create a one-player overcooked gym environment in which the other player is fixed (embedded in the environment)\n \n base_env: A OvercookedEnv instance (fixed or variable map)\n sim_threads: number of threads used during simulation, that corresponds to the number of parallel\n environments used\n '
def gym_env_fn():
gym_env = gym.make(gym_env_name)
if (kwargs['RUN_TYPE'] == 'joint_ppo'):
gym_env.custom_init(base_env, joint_actions=True, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx)
else:
gym_env.custom_init(base_env, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx)
return gym_env
vectorized_gym_env = RewardShapingEnv(SubprocVecEnv(([gym_env_fn] * kwargs['sim_threads'])))
return vectorized_gym_env
|
def get_pbt_agent_from_config(save_dir=None, sim_threads=0, seed=0, agent_idx=0, best=False, agent_to_load_path=None):
if (agent_to_load_path is None):
agent_folder = (save_dir + 'seed_{}/agent{}'.format(seed, agent_idx))
if best:
agent_to_load_path = (agent_folder + '/best')
else:
agent_to_load_path = ((agent_folder + '/pbt_iter') + str(get_max_iter(agent_folder)))
agent = get_agent_from_saved_model(agent_to_load_path, sim_threads)
return agent
|
def get_agent_from_saved_model(save_dir, sim_threads):
'Get Agent corresponding to a saved model'
(state_policy, processed_obs_policy) = get_model_policy_from_saved_model(save_dir, sim_threads)
return AgentFromPolicy(state_policy, processed_obs_policy)
|
def get_agent_from_model(model, sim_threads, is_joint_action=False):
'Get Agent corresponding to a loaded model'
(state_policy, processed_obs_policy) = get_model_policy_from_model(model, sim_threads, is_joint_action=is_joint_action)
return AgentFromPolicy(state_policy, processed_obs_policy)
|
def get_random_agent_model(sim_threads):
'Get RandomAgent'
return RandomAgent(sim_threads)
|
def get_model_policy_from_saved_model(save_dir, sim_threads):
'Get a policy function from a saved model'
predictor = tf.contrib.predictor.from_saved_model(save_dir)
step_fn = (lambda obs: predictor({'obs': obs})['action_probs'])
return get_model_policy(step_fn, sim_threads)
|
def get_model_policy_from_model(model, sim_threads, is_joint_action=False):
def step_fn(obs):
action_probs = model.act_model.step(obs, return_action_probs=True)
return action_probs
return get_model_policy(step_fn, sim_threads, is_joint_action=is_joint_action)
|
def get_model_policy(step_fn, sim_threads, is_joint_action=False):
'\n Returns the policy function `p(s, index)` from a saved model at `save_dir`.\n \n step_fn: a function that takes in observations and returns the corresponding\n action probabilities of the agent\n '
def encoded_state_policy(observations, stochastic=True, return_action_probs=False):
'Takes in SIM_THREADS many losslessly encoded states and returns corresponding actions'
action_probs_n = step_fn(observations)
if return_action_probs:
return action_probs_n
if stochastic:
action_idxs = [np.random.choice(len(Action.ALL_ACTIONS), p=action_probs) for action_probs in action_probs_n]
else:
action_idxs = [np.argmax(action_probs) for action_probs in action_probs_n]
return np.array(action_idxs)
def state_policy(mdp_state, mdp, agent_index, stochastic=True, return_action_probs=False):
'Takes in a Overcooked state object and returns the corresponding action'
obs = mdp.lossless_state_encoding(mdp_state)[agent_index]
padded_obs = np.array(([obs] + ([np.zeros(obs.shape)] * (sim_threads - 1))))
action_probs = step_fn(padded_obs)[0]
if return_action_probs:
return action_probs
if stochastic:
action_idx = np.random.choice(len(action_probs), p=action_probs)
else:
action_idx = np.argmax(action_probs)
if is_joint_action:
action_idxs = Action.INDEX_TO_ACTION_INDEX_PAIRS[action_idx]
joint_action = [Action.INDEX_TO_ACTION[i] for i in action_idxs]
return joint_action
return Action.INDEX_TO_ACTION[action_idx]
return (state_policy, encoded_state_policy)
|
def create_model(env, agent_name, use_pretrained_weights=False, **kwargs):
'Creates a model and saves it at a location\n \n env: a dummy environment that is used to determine observation and action spaces\n agent_name: the scope under which the weights of the agent are saved\n '
(model, _) = learn(network=kwargs['NETWORK_TYPE'], env=env, total_timesteps=1, save_interval=0, nsteps=kwargs['BATCH_SIZE'], nminibatches=kwargs['MINIBATCHES'], noptepochs=kwargs['STEPS_PER_UPDATE'], scope=agent_name, network_kwargs=kwargs)
model.agent_name = agent_name
model.dummy_env = env
return model
|
def save_baselines_model(model, save_dir):
'\n Saves Model (from baselines) into `path/model` file, \n and saves the tensorflow graph in the `path` directory\n \n NOTE: Overwrites previously saved models at the location\n '
create_dir_if_not_exists(save_dir)
model.save((save_dir + '/model'))
dummy_env = DummyEnv(model.dummy_env.num_envs, model.dummy_env.observation_space, model.dummy_env.action_space)
save_pickle(dummy_env, (save_dir + '/dummy_env'))
|
def load_baselines_model(save_dir, agent_name, config):
'\n NOTE: Before using load it might be necessary to clear the tensorflow graph\n if there are already other variables defined\n '
dummy_env = load_pickle((save_dir + '/dummy_env'))
(model, _) = learn(network='conv_and_mlp', env=dummy_env, total_timesteps=0, load_path=(save_dir + '/model'), scope=agent_name, network_kwargs=config)
model.dummy_env = dummy_env
return model
|
def update_model(env, model, population=None, ent_version=1, metric_np=None, **kwargs):
'\n Train agent defined by a model using the specified environment.\n\n The idea is that one can update model on a different environment than the one \n that was used to create the model (vs a different agent for example, where the\n agent is embedded within the environment)\n '
def model_fn(**kwargs):
return model
(updated_model, run_info) = learn(network=kwargs['NETWORK_TYPE'], env=env, total_timesteps=kwargs['PPO_RUN_TOT_TIMESTEPS'], nsteps=kwargs['BATCH_SIZE'], ent_coef=kwargs['ENTROPY'], ent_pool_coef=kwargs['ENTROPY_POOL'], lr=kwargs['LR'], vf_coef=kwargs['VF_COEF'], max_grad_norm=kwargs['MAX_GRAD_NORM'], gamma=kwargs['GAMMA'], lam=kwargs['LAM'], nminibatches=kwargs['MINIBATCHES'], noptepochs=kwargs['STEPS_PER_UPDATE'], cliprange=kwargs['CLIPPING'], model_fn=model_fn, population=population, ent_version=ent_version, metric_np=metric_np, save_interval=0, log_interval=1, network_kwargs=kwargs)
return run_info
|
def overwrite_model(model_from, model_to):
model_from_vars = tf.trainable_variables(model_from.scope)
model_to_vars = tf.trainable_variables(model_to.scope)
overwrite_variables(model_from_vars, model_to_vars)
|
def overwrite_variables(variables_to_copy, variables_to_overwrite):
sess = tf.get_default_session()
restores = []
assert (len(variables_to_copy) == len(variables_to_overwrite)), 'number of variables loaded mismatches len(variables)'
for (d, v) in zip(variables_to_copy, variables_to_overwrite):
restores.append(v.assign(d))
sess.run(restores)
|
def get_model_value_fn(model, sim_threads, debug=False):
'Returns the estimated value function `V(s, index)` from a saved model at `save_dir`.'
print(model)
def value_fn(mdp_state, mdp, agent_index):
obs = mdp.lossless_state_encoding(mdp_state, debug=debug)[agent_index]
padded_obs = np.array(([obs] + ([np.zeros(obs.shape)] * (sim_threads - 1))))
(a, v, state, neglogp) = model.act_model.step(padded_obs)
return v[0]
return value_fn
|
def get_model_value_fn_policy(model, sim_threads, boltzmann_rationality=1):
'Returns a policy based on the value function approximation of the model'
v_fn = get_model_value_fn(model, sim_threads)
def v_policy(mdp_state, mdp, agent_index):
successor_vals = []
for a in Action.INDEX_TO_ACTION:
joint_action = ((a, Direction.STAY) if (agent_index == 0) else (Direction.STAY, a))
s_prime = mdp.get_state_transition(mdp_state, joint_action)[0][0][0]
s_prime_val = v_fn(s_prime, mdp, agent_index)
successor_vals.append(s_prime_val)
numerator = (boltzmann_rationality * np.exp(successor_vals))
normalizer = sum(numerator)
num_actions = len(Action.INDEX_TO_ACTION)
if (normalizer != 0):
probability_distribution = (numerator / normalizer)
else:
probability_distribution = (np.ones(num_actions) / num_actions)
action_idx_array = list(range(num_actions))
sampled_action_idx = np.random.choice(action_idx_array, p=probability_distribution)
return Action.INDEX_TO_ACTION[sampled_action_idx]
return v_policy
|
def get_boltzmann_rational_agent_from_model(model, sim_threads, boltzmann_rationality):
p = get_model_value_fn_policy(model, sim_threads, boltzmann_rationality=boltzmann_rationality)
trained_agent = AgentFromPolicy(p, None)
return trained_agent
|
class PBTAgent(object):
'An agent that can be saved and loaded and all and the main data it contains is the self.model\n \n Goal is to be able to pass in save_locations or PBTAgents to workers that will load such agents\n and train them together.\n '
def __init__(self, agent_name, start_params, start_logs=None, model=None, gym_env=None):
self.params = start_params
self.logs = (start_logs if (start_logs is not None) else {'agent_name': agent_name, 'avg_rew_per_step': [], 'avg_rew_per_step_2': [], 'params_hist': defaultdict(list), 'num_ppo_runs': 0, 'reward_shaping': []})
with tf.device('/device:GPU:{}'.format(self.params['GPU_ID'])):
self.model = (model if (model is not None) else create_model(gym_env, agent_name, **start_params))
@property
def num_ppo_runs(self):
return self.logs['num_ppo_runs']
@property
def agent_name(self):
return self.logs['agent_name']
def get_agent(self):
return get_agent_from_model(self.model, self.params['sim_threads'])
def update(self, gym_env, metric_np=None):
with tf.device('/device:GPU:{}'.format(self.params['GPU_ID'])):
train_info = update_model(gym_env, self.model, metric_np=metric_np, **self.params)
for (k, v) in train_info.items():
if (k not in self.logs.keys()):
self.logs[k] = []
self.logs[k].extend(v)
self.logs['num_ppo_runs'] += 1
def update_avg_rew_per_step_logs(self, avg_rew_per_step_stats):
self.logs['avg_rew_per_step'] = avg_rew_per_step_stats
def update_avg_rew_per_step_logs_2(self, avg_rew_per_step_stats):
self.logs['avg_rew_per_step_2'] = avg_rew_per_step_stats
def save(self, save_folder):
'Save agent model, logs, and parameters'
create_dir_if_not_exists(save_folder)
save_baselines_model(self.model, save_folder)
save_dict_to_file(dict(self.logs), (save_folder + 'logs'))
save_dict_to_file(self.params, (save_folder + 'params'))
@staticmethod
def from_dir(load_folder, agent_name):
logs = load_dict_from_file((load_folder + 'logs.txt'))
params = load_dict_from_file((load_folder + 'params.txt'))
model = load_baselines_model(load_folder[0:(- 1)], agent_name, params)
return PBTAgent(agent_name, params, start_logs=logs, model=model)
@staticmethod
def update_from_files(file0, file1, gym_env, save_dir):
pbt_agent0 = PBTAgent.from_dir(file0)
pbt_agent1 = PBTAgent.from_dir(file1)
gym_env.other_agent = pbt_agent1
pbt_agent0.update(gym_env)
return pbt_agent0
def save_predictor(self, save_folder):
'Saves easy-to-load simple_save tensorflow predictor for agent'
simple_save(tf.get_default_session(), save_folder, inputs={'obs': self.model.act_model.X}, outputs={'action': self.model.act_model.action, 'value': self.model.act_model.vf, 'action_probs': self.model.act_model.action_probs})
def update_pbt_iter_logs(self):
for (k, v) in self.params.items():
self.logs['params_hist'][k].append(v)
self.logs['params_hist'] = dict(self.logs['params_hist'])
def explore_from(self, best_training_agent):
overwrite_model(best_training_agent.model, self.model)
self.logs['num_ppo_runs'] = best_training_agent.num_ppo_runs
self.params = best_training_agent.params.copy()
|
@ex.config
def my_config():
TIMESTAMP_DIR = True
EX_NAME = 'undefined_name'
if TIMESTAMP_DIR:
SAVE_DIR = (((PBT_DATA_DIR + time.strftime('%Y_%m_%d-%H_%M_%S_')) + EX_NAME) + '/')
else:
SAVE_DIR = ((PBT_DATA_DIR + EX_NAME) + '/')
print('Saving data to ', SAVE_DIR)
RUN_TYPE = 'pbt'
LOCAL_TESTING = False
GPU_ID = 1
SEEDS = [0]
sim_threads = (50 if (not LOCAL_TESTING) else 2)
TOTAL_STEPS_PER_AGENT = (15000000.0 if (not LOCAL_TESTING) else 10000.0)
POPULATION_SIZE = 4
ITER_PER_SELECTION = POPULATION_SIZE
RESAMPLE_PROB = 0.33
MUTATION_FACTORS = [0.75, 1.25]
HYPERPARAMS_TO_MUTATE = ['LAM', 'CLIPPING', 'LR', 'STEPS_PER_UPDATE', 'ENTROPY', 'VF_COEF']
NUM_SELECTION_GAMES = (10 if (not LOCAL_TESTING) else 2)
PPO_RUN_TOT_TIMESTEPS = (40000 if (not LOCAL_TESTING) else 1000)
NUM_PBT_ITER = int(((TOTAL_STEPS_PER_AGENT * math.sqrt(POPULATION_SIZE)) // (ITER_PER_SELECTION * PPO_RUN_TOT_TIMESTEPS)))
TOTAL_BATCH_SIZE = (20000 if (not LOCAL_TESTING) else 1000)
MINIBATCHES = (5 if (not LOCAL_TESTING) else 1)
BATCH_SIZE = (TOTAL_BATCH_SIZE // sim_threads)
STEPS_PER_UPDATE = (8 if (not LOCAL_TESTING) else 1)
LR = 0.005
ENTROPY = 0.5
ENTROPY_POOL = 0.0
EPSILON = 1e-06
PRIORITIZED_SAMPLING = False
ALPHA = 1.0
METRIC = 1.0
LOAD_FOLDER_LST = ''
VF_COEF = 0.1
GAMMA = 0.99
LAM = 0.98
MAX_GRAD_NORM = 0.1
CLIPPING = 0.05
REW_SHAPING_HORIZON = 0
NETWORK_TYPE = 'conv_and_mlp'
NUM_HIDDEN_LAYERS = 3
SIZE_HIDDEN_LAYERS = 64
NUM_FILTERS = 25
NUM_CONV_LAYERS = 3
layout_name = None
start_order_list = None
rew_shaping_params = {'PLACEMENT_IN_POT_REW': 3, 'DISH_PICKUP_REWARD': 3, 'SOUP_PICKUP_REWARD': 5, 'DISH_DISP_DISTANCE_REW': 0.015, 'POT_DISTANCE_REW': 0.03, 'SOUP_DISTANCE_REW': 0.1}
horizon = 400
mdp_generation_params = {'padded_mdp_shape': (11, 7), 'mdp_shape_fn': ([5, 11], [5, 7]), 'prop_empty_fn': [0.6, 1], 'prop_feats_fn': [0, 0.6]}
GRAD_UPDATES_PER_AGENT = (((((STEPS_PER_UPDATE * MINIBATCHES) * (PPO_RUN_TOT_TIMESTEPS // TOTAL_BATCH_SIZE)) * ITER_PER_SELECTION) * NUM_PBT_ITER) // POPULATION_SIZE)
print('Total steps per agent', TOTAL_STEPS_PER_AGENT)
print('Grad updates per agent', GRAD_UPDATES_PER_AGENT)
params = {'LOCAL_TESTING': LOCAL_TESTING, 'RUN_TYPE': RUN_TYPE, 'EX_NAME': EX_NAME, 'SAVE_DIR': SAVE_DIR, 'GPU_ID': GPU_ID, 'mdp_params': {'layout_name': layout_name, 'start_order_list': start_order_list, 'rew_shaping_params': rew_shaping_params}, 'env_params': {'horizon': horizon}, 'PPO_RUN_TOT_TIMESTEPS': PPO_RUN_TOT_TIMESTEPS, 'NUM_PBT_ITER': NUM_PBT_ITER, 'ITER_PER_SELECTION': ITER_PER_SELECTION, 'POPULATION_SIZE': POPULATION_SIZE, 'RESAMPLE_PROB': RESAMPLE_PROB, 'MUTATION_FACTORS': MUTATION_FACTORS, 'mdp_generation_params': mdp_generation_params, 'HYPERPARAMS_TO_MUTATE': HYPERPARAMS_TO_MUTATE, 'REW_SHAPING_HORIZON': REW_SHAPING_HORIZON, 'ENTROPY': ENTROPY, 'ENTROPY_POOL': ENTROPY_POOL, 'EPSILON': EPSILON, 'PRIORITIZED_SAMPLING': PRIORITIZED_SAMPLING, 'ALPHA': ALPHA, 'METRIC': METRIC, 'LOAD_FOLDER_LST': LOAD_FOLDER_LST.split(':'), 'GAMMA': GAMMA, 'sim_threads': sim_threads, 'TOTAL_BATCH_SIZE': TOTAL_BATCH_SIZE, 'BATCH_SIZE': BATCH_SIZE, 'MAX_GRAD_NORM': MAX_GRAD_NORM, 'LR': LR, 'VF_COEF': VF_COEF, 'STEPS_PER_UPDATE': STEPS_PER_UPDATE, 'MINIBATCHES': MINIBATCHES, 'CLIPPING': CLIPPING, 'LAM': LAM, 'NETWORK_TYPE': NETWORK_TYPE, 'NUM_HIDDEN_LAYERS': NUM_HIDDEN_LAYERS, 'SIZE_HIDDEN_LAYERS': SIZE_HIDDEN_LAYERS, 'NUM_FILTERS': NUM_FILTERS, 'NUM_CONV_LAYERS': NUM_CONV_LAYERS, 'SEEDS': SEEDS, 'NUM_SELECTION_GAMES': NUM_SELECTION_GAMES, 'total_steps_per_agent': TOTAL_STEPS_PER_AGENT, 'grad_updates_per_agent': GRAD_UPDATES_PER_AGENT}
|
@ex.named_config
def fixed_mdp():
LOCAL_TESTING = False
layout_name = 'simple'
sim_threads = (30 if (not LOCAL_TESTING) else 2)
PPO_RUN_TOT_TIMESTEPS = (36000 if (not LOCAL_TESTING) else 1000)
TOTAL_BATCH_SIZE = (12000 if (not LOCAL_TESTING) else 1000)
STEPS_PER_UPDATE = 5
MINIBATCHES = (6 if (not LOCAL_TESTING) else 2)
LR = 0.0005
|
@ex.named_config
def fixed_mdp_rnd_init():
LOCAL_TESTING = False
fixed_mdp = True
layout_name = 'scenario2'
sim_threads = (10 if LOCAL_TESTING else 50)
PPO_RUN_TOT_TIMESTEPS = 24000
TOTAL_BATCH_SIZE = 8000
STEPS_PER_UPDATE = 4
MINIBATCHES = 4
LR = 0.0005
|
@ex.named_config
def padded_all_scenario():
LOCAL_TESTING = False
fixed_mdp = ['scenario2', 'simple', 'schelling_s', 'unident_s']
PADDED_MDP_SHAPE = (10, 5)
sim_threads = (10 if LOCAL_TESTING else 60)
PPO_RUN_TOT_TIMESTEPS = (40000 if (not LOCAL_TESTING) else 1000)
TOTAL_BATCH_SIZE = (20000 if (not LOCAL_TESTING) else 1000)
STEPS_PER_UPDATE = 8
MINIBATCHES = 4
LR = 0.0005
REW_SHAPING_HORIZON = 10000000.0
|
def pbt_one_run(params, seed):
create_dir_if_not_exists(params['SAVE_DIR'])
save_dict_to_file(params, (params['SAVE_DIR'] + 'config'))
mdp = OvercookedGridworld.from_layout_name(**params['mdp_params'])
overcooked_env = OvercookedEnv(mdp, **params['env_params'])
print('Sample training environments:')
for _ in range(5):
overcooked_env.reset()
print(overcooked_env)
gym_env = get_vectorized_gym_env(overcooked_env, 'Overcooked-v0', agent_idx=0, featurize_fn=(lambda x: mdp.lossless_state_encoding(x)), **params)
gym_env.update_reward_shaping_param(1.0)
annealer = LinearAnnealer(horizon=params['REW_SHAPING_HORIZON'])
population_size = params['POPULATION_SIZE']
pbt_population = []
pbt_agent_names = [('agent' + str(i)) for i in range(population_size)]
print(f"population_size {population_size} len(params['LOAD_FOLDER_LST']) {len(params['LOAD_FOLDER_LST'])}")
assert (population_size == len(params['LOAD_FOLDER_LST']))
for (agent_name, load_folder) in zip(pbt_agent_names, params['LOAD_FOLDER_LST']):
if (not (agent_name == 'agent0')):
agent = PBTAgent.from_dir(load_folder, agent_name)
print(f'loaded model from {load_folder}')
else:
agent = PBTAgent(agent_name, params, gym_env=gym_env)
print(f'Initialized {agent_name}')
pbt_population.append(agent)
print('Initialized agent models')
all_pairs = []
for i in range(population_size):
for j in range((i + 1), population_size):
all_pairs.append((i, j))
def pbt_training():
best_sparse_rew_avg = ([(- np.Inf)] * population_size)
best_sparse_rew_avg_2 = ([(- np.Inf)] * population_size)
metric_np = np.zeros((population_size * 2))
metric_train_np = np.zeros((population_size * 2))
metric_dense_np = np.zeros((population_size * 2))
metric_hand1_np = np.concatenate([np.zeros(population_size), (np.ones(population_size) * 100)])
for pbt_iter in range(1, (params['NUM_PBT_ITER'] + 1)):
print('\n\n\nPBT ITERATION NUM {}'.format(pbt_iter))
assert (params['ITER_PER_SELECTION'] == population_size)
pairs_to_train = list(itertools.product(range(population_size), range(1)))
if (pbt_iter == 1):
avg_ep_returns_dict = defaultdict(list)
avg_ep_returns_sparse_dict = defaultdict(list)
avg_ep_returns_dict_2 = defaultdict(list)
avg_ep_returns_sparse_dict_2 = defaultdict(list)
i = 0
pbt_agent = pbt_population[i]
for j in range(i, population_size):
print('Evaluating agent {} and {}'.format(i, j))
pbt_agent_other = pbt_population[j]
agent_pair = AgentPair(pbt_agent.get_agent(), pbt_agent_other.get_agent())
reward_shaping_param = 1.0
trajs = overcooked_env.get_rollouts(agent_pair, params['NUM_SELECTION_GAMES'], reward_shaping=reward_shaping_param)
(dense_rews, sparse_rews, lens) = (trajs['ep_returns'], trajs['ep_returns_sparse'], trajs['ep_lengths'])
rew_per_step = (np.sum(dense_rews) / np.sum(lens))
avg_ep_returns_dict[i].append(rew_per_step)
avg_ep_returns_sparse_dict[i].append(sparse_rews)
metric_np[i] = np.mean(sparse_rews)
metric_dense_np[i] = np.mean(dense_rews)
if (j != i):
avg_ep_returns_dict[j].append(rew_per_step)
avg_ep_returns_sparse_dict[j].append(sparse_rews)
metric_np[j] = np.mean(sparse_rews)
metric_dense_np[j] = np.mean(dense_rews)
print('Evaluating agent {} and {}'.format(j, i))
agent_pair = AgentPair(pbt_agent_other.get_agent(), pbt_agent.get_agent())
trajs = overcooked_env.get_rollouts(agent_pair, params['NUM_SELECTION_GAMES'], reward_shaping=reward_shaping_param)
(dense_rews, sparse_rews, lens) = (trajs['ep_returns'], trajs['ep_returns_sparse'], trajs['ep_lengths'])
rew_per_step = (np.sum(dense_rews) / np.sum(lens))
avg_ep_returns_dict_2[i].append(rew_per_step)
avg_ep_returns_sparse_dict_2[i].append(sparse_rews)
metric_np[(population_size + i)] = np.mean(sparse_rews)
metric_dense_np[(population_size + i)] = np.mean(dense_rews)
if (j != i):
avg_ep_returns_dict_2[j].append(rew_per_step)
avg_ep_returns_sparse_dict_2[j].append(sparse_rews)
metric_np[(population_size + j)] = np.mean(sparse_rews)
metric_dense_np[(population_size + j)] = np.mean(dense_rews)
print('AVG ep rewards dict', avg_ep_returns_dict)
print('AVG ep rewards dict_2', avg_ep_returns_dict_2)
print(f'The first evaluation metric_np {metric_np} {metric_np.shape}')
for sel_iter in range(params['ITER_PER_SELECTION']):
if params['PRIORITIZED_SAMPLING']:
if (params['METRIC'] == 1.0):
sampling_prob_np = metric_np.copy()
else:
print('METRIC version is unknown')
exit()
print(f"""params["METRIC"] {params['METRIC']}""")
sampling_prob_np += params['EPSILON']
sampling_prob_np = (1 / sampling_prob_np)
sampling_rank_np = rankdata(sampling_prob_np, method='dense')
print(f'sampling_rank_np {sampling_rank_np}')
sampling_prob_np = (sampling_rank_np / sampling_rank_np.sum())
assert (params['ALPHA'] >= 0)
sampling_prob_np = (sampling_prob_np ** params['ALPHA'])
sampling_prob_np = (sampling_prob_np / sampling_prob_np.sum())
print(f"sampling_prob_np {sampling_prob_np} alpha {params['ALPHA']}")
pair_idx = np.random.choice((2 * population_size), p=sampling_prob_np)
idx0 = (pair_idx % population_size)
agent_idx = (pair_idx // population_size)
idx1 = 0
else:
pair_idx = np.random.choice(len(pairs_to_train))
(idx0, idx1) = pairs_to_train.pop(pair_idx)
agent_idx = np.random.choice([0, 1])
print(f'idx0 {idx0} idx1 {idx1} agent_idx {agent_idx}')
(pbt_agent0, pbt_agent1) = (pbt_population[idx0], pbt_population[idx1])
print('Training agent {} ({}) with agent {} ({}) fixed (pbt #{}/{}, sel #{}/{})'.format(idx1, pbt_agent1.num_ppo_runs, idx0, pbt_agent0.num_ppo_runs, pbt_iter, params['NUM_PBT_ITER'], sel_iter, params['ITER_PER_SELECTION']))
agent_env_steps = (pbt_agent1.num_ppo_runs * params['PPO_RUN_TOT_TIMESTEPS'])
reward_shaping_param = annealer.param_value(agent_env_steps)
print('Current reward shaping:', reward_shaping_param, '\t Save_dir', params['SAVE_DIR'])
pbt_agent1.logs['reward_shaping'].append(reward_shaping_param)
gym_env.update_reward_shaping_param(reward_shaping_param)
gym_env.other_agent = pbt_agent0.get_agent()
gym_env.venv.remote_set_agent_idx(agent_idx)
print(f'gym_env.venv.remote_get_agent_idx() {gym_env.venv.remote_get_agent_idx()[0]} {len(gym_env.venv.remote_get_agent_idx())}')
if (params['METRIC'] == 1.0):
pbt_agent1.update(gym_env, metric_np=metric_np)
print(f'metric_np {metric_np} {metric_np.shape}')
else:
print('METRIC version is unknown')
exit()
ep_sparse_rew_mean = pbt_agent1.logs['ep_sparse_rew_mean'][(- 1)]
metric_train_np[pair_idx] = ep_sparse_rew_mean
save_folder = ((params['SAVE_DIR'] + pbt_agent1.agent_name) + '/')
pbt_agent1.save(save_folder)
agent_pair = AgentPair(pbt_agent0.get_agent(), pbt_agent1.get_agent())
overcooked_env.get_rollouts(agent_pair, num_games=1, final_state=True, reward_shaping=reward_shaping_param)
print('\nEVALUATION PHASE\n')
avg_ep_returns_dict = defaultdict(list)
avg_ep_returns_sparse_dict = defaultdict(list)
avg_ep_returns_dict_2 = defaultdict(list)
avg_ep_returns_sparse_dict_2 = defaultdict(list)
i = 0
pbt_agent = pbt_population[i]
pbt_agent.update_pbt_iter_logs()
if True:
save_folder = ((params['SAVE_DIR'] + pbt_agent.agent_name) + '/')
pbt_agent.save_predictor((save_folder + 'pbt_iter{}/'.format(pbt_iter)))
pbt_agent.save((save_folder + 'pbt_iter{}/'.format(pbt_iter)))
delete_dir_if_exists((save_folder + 'pbt_iter{}/'.format((pbt_iter - 1))), verbose=True)
i = 0
for j in range(i, population_size):
print('Evaluating agent {} and {}'.format(i, j))
pbt_agent_other = pbt_population[j]
agent_pair = AgentPair(pbt_agent.get_agent(), pbt_agent_other.get_agent())
trajs = overcooked_env.get_rollouts(agent_pair, params['NUM_SELECTION_GAMES'], reward_shaping=reward_shaping_param)
(dense_rews, sparse_rews, lens) = (trajs['ep_returns'], trajs['ep_returns_sparse'], trajs['ep_lengths'])
rew_per_step = (np.sum(dense_rews) / np.sum(lens))
avg_ep_returns_dict[i].append(rew_per_step)
avg_ep_returns_sparse_dict[i].append(sparse_rews)
metric_np[i] = np.mean(sparse_rews)
metric_dense_np[i] = np.mean(dense_rews)
if (j != i):
avg_ep_returns_dict[j].append(rew_per_step)
avg_ep_returns_sparse_dict[j].append(sparse_rews)
metric_np[j] = np.mean(sparse_rews)
metric_dense_np[j] = np.mean(dense_rews)
print('Evaluating agent {} and {}'.format(j, i))
agent_pair = AgentPair(pbt_agent_other.get_agent(), pbt_agent.get_agent())
trajs = overcooked_env.get_rollouts(agent_pair, params['NUM_SELECTION_GAMES'], reward_shaping=reward_shaping_param)
(dense_rews, sparse_rews, lens) = (trajs['ep_returns'], trajs['ep_returns_sparse'], trajs['ep_lengths'])
rew_per_step = (np.sum(dense_rews) / np.sum(lens))
avg_ep_returns_dict_2[i].append(rew_per_step)
avg_ep_returns_sparse_dict_2[i].append(sparse_rews)
metric_np[(population_size + i)] = np.mean(sparse_rews)
metric_dense_np[(population_size + i)] = np.mean(dense_rews)
if (j != i):
avg_ep_returns_dict_2[j].append(rew_per_step)
avg_ep_returns_sparse_dict_2[j].append(sparse_rews)
metric_np[(population_size + j)] = np.mean(sparse_rews)
metric_dense_np[(population_size + j)] = np.mean(dense_rews)
print('AVG ep rewards dict', avg_ep_returns_dict)
print('AVG ep rewards dict_2', avg_ep_returns_dict_2)
print(f'Evaluation metric_np {metric_np} {metric_np.shape}')
i = 0
pbt_agent = pbt_population[i]
pbt_agent.update_avg_rew_per_step_logs(avg_ep_returns_dict[i])
pbt_agent.update_avg_rew_per_step_logs_2(avg_ep_returns_dict_2[i])
avg_sparse_rew = np.mean(avg_ep_returns_sparse_dict[i])
avg_sparse_rew_2 = np.mean(avg_ep_returns_sparse_dict_2[i])
if ((avg_sparse_rew > best_sparse_rew_avg[i]) and (avg_sparse_rew_2 > best_sparse_rew_avg_2[i])):
best_sparse_rew_avg[i] = avg_sparse_rew
best_sparse_rew_avg_2[i] = avg_sparse_rew_2
agent_name = pbt_agent.agent_name
print('New best avg sparse rews {} and {} for agent {}, saving...'.format(best_sparse_rew_avg, best_sparse_rew_avg_2, agent_name))
best_save_folder = ((params['SAVE_DIR'] + agent_name) + '/best/')
delete_dir_if_exists(best_save_folder, verbose=True)
pbt_agent.save_predictor(best_save_folder)
pbt_agent.save(best_save_folder)
pbt_training()
reset_tf()
print(params['SAVE_DIR'])
|
@ex.automain
def run_pbt(params):
create_dir_if_not_exists(params['SAVE_DIR'])
save_dict_to_file(params, (params['SAVE_DIR'] + 'config'))
for seed in params['SEEDS']:
set_global_seed(seed)
curr_seed_params = params.copy()
curr_seed_params['SAVE_DIR'] += 'seed_{}/'.format(seed)
pbt_one_run(curr_seed_params, seed)
|
class PBTAgent(object):
'An agent that can be saved and loaded and all and the main data it contains is the self.model\n \n Goal is to be able to pass in save_locations or PBTAgents to workers that will load such agents\n and train them together.\n '
def __init__(self, agent_name, start_params, start_logs=None, model=None, gym_env=None):
self.params = start_params
self.logs = (start_logs if (start_logs is not None) else {'agent_name': agent_name, 'avg_rew_per_step': [], 'params_hist': defaultdict(list), 'num_ppo_runs': 0, 'reward_shaping': []})
self.logs['agent_name'] = (agent_name if (agent_name is not None) else self.logs['agent_name'])
with tf.device('/device:GPU:{}'.format(self.params['GPU_ID'])):
self.model = (model if (model is not None) else create_model(gym_env, agent_name, **start_params))
@property
def num_ppo_runs(self):
return self.logs['num_ppo_runs']
@property
def agent_name(self):
return self.logs['agent_name']
def get_agent(self):
return get_agent_from_model(self.model, self.params['sim_threads'])
def update(self, gym_env, population=None, ent_version=1):
with tf.device('/device:GPU:{}'.format(self.params['GPU_ID'])):
train_info = update_model(gym_env, self.model, population, ent_version, **self.params)
for (k, v) in train_info.items():
if (k not in self.logs.keys()):
self.logs[k] = []
self.logs[k].extend(v)
self.logs['num_ppo_runs'] += 1
def update_avg_rew_per_step_logs(self, avg_rew_per_step_stats):
self.logs['avg_rew_per_step'] = avg_rew_per_step_stats
def save(self, save_folder):
'Save agent model, logs, and parameters'
create_dir_if_not_exists(save_folder)
save_baselines_model(self.model, save_folder)
save_dict_to_file(dict(self.logs), (save_folder + 'logs'))
save_dict_to_file(self.params, (save_folder + 'params'))
@staticmethod
def from_dir(load_folder, agent_name):
logs = load_dict_from_file((load_folder + 'logs.txt'))
params = load_dict_from_file((load_folder + 'params.txt'))
model = load_baselines_model(load_folder[0:(- 1)], agent_name, params)
return PBTAgent(agent_name, params, start_logs=logs, model=model)
@staticmethod
def update_from_files(file0, file1, gym_env, save_dir):
pbt_agent0 = PBTAgent.from_dir(file0)
pbt_agent1 = PBTAgent.from_dir(file1)
gym_env.other_agent = pbt_agent1
pbt_agent0.update(gym_env)
return pbt_agent0
def save_predictor(self, save_folder):
'Saves easy-to-load simple_save tensorflow predictor for agent'
simple_save(tf.get_default_session(), save_folder, inputs={'obs': self.model.act_model.X}, outputs={'action': self.model.act_model.action, 'value': self.model.act_model.vf, 'action_probs': self.model.act_model.action_probs})
def update_pbt_iter_logs(self):
for (k, v) in self.params.items():
self.logs['params_hist'][k].append(v)
self.logs['params_hist'] = dict(self.logs['params_hist'])
def explore_from(self, best_training_agent):
overwrite_model(best_training_agent.model, self.model)
self.logs['num_ppo_runs'] = best_training_agent.num_ppo_runs
self.params = best_training_agent.params.copy()
|
@ex.config
def my_config():
TIMESTAMP_DIR = True
EX_NAME = 'undefined_name'
if TIMESTAMP_DIR:
SAVE_DIR = (((PBT_DATA_DIR + time.strftime('%Y_%m_%d-%H_%M_%S_')) + EX_NAME) + '/')
else:
SAVE_DIR = ((PBT_DATA_DIR + EX_NAME) + '/')
print('Saving data to ', SAVE_DIR)
RUN_TYPE = 'pbt'
LOCAL_TESTING = False
GPU_ID = 1
SEEDS = [0]
sim_threads = (50 if (not LOCAL_TESTING) else 2)
TOTAL_STEPS_PER_AGENT = (15000000.0 if (not LOCAL_TESTING) else 10000.0)
POPULATION_SIZE = 4
ITER_PER_SELECTION = POPULATION_SIZE
RESAMPLE_PROB = 0.33
MUTATION_FACTORS = [0.75, 1.25]
HYPERPARAMS_TO_MUTATE = ['LAM', 'CLIPPING', 'LR', 'STEPS_PER_UPDATE', 'ENTROPY', 'VF_COEF']
NUM_SELECTION_GAMES = (10 if (not LOCAL_TESTING) else 2)
PPO_RUN_TOT_TIMESTEPS = (40000 if (not LOCAL_TESTING) else 1000)
NUM_PBT_ITER = int(((TOTAL_STEPS_PER_AGENT * POPULATION_SIZE) // (ITER_PER_SELECTION * PPO_RUN_TOT_TIMESTEPS)))
TOTAL_BATCH_SIZE = (20000 if (not LOCAL_TESTING) else 1000)
MINIBATCHES = (5 if (not LOCAL_TESTING) else 1)
BATCH_SIZE = (TOTAL_BATCH_SIZE // sim_threads)
STEPS_PER_UPDATE = (8 if (not LOCAL_TESTING) else 1)
LR = 0.005
ENTROPY = 0.5
ENTROPY_POOL = 0.1
ENT_VERSION = 1
LOAD_FOLDER_LST = ''
VF_COEF = 0.1
GAMMA = 0.99
LAM = 0.98
MAX_GRAD_NORM = 0.1
CLIPPING = 0.05
REW_SHAPING_HORIZON = 0
NETWORK_TYPE = 'conv_and_mlp'
NUM_HIDDEN_LAYERS = 3
SIZE_HIDDEN_LAYERS = 64
NUM_FILTERS = 25
NUM_CONV_LAYERS = 3
layout_name = None
start_order_list = None
rew_shaping_params = {'PLACEMENT_IN_POT_REW': 3, 'DISH_PICKUP_REWARD': 3, 'SOUP_PICKUP_REWARD': 5, 'DISH_DISP_DISTANCE_REW': 0.015, 'POT_DISTANCE_REW': 0.03, 'SOUP_DISTANCE_REW': 0.1}
horizon = 400
mdp_generation_params = {'padded_mdp_shape': (11, 7), 'mdp_shape_fn': ([5, 11], [5, 7]), 'prop_empty_fn': [0.6, 1], 'prop_feats_fn': [0, 0.6]}
GRAD_UPDATES_PER_AGENT = (((((STEPS_PER_UPDATE * MINIBATCHES) * (PPO_RUN_TOT_TIMESTEPS // TOTAL_BATCH_SIZE)) * ITER_PER_SELECTION) * NUM_PBT_ITER) // POPULATION_SIZE)
print('Total steps per agent', TOTAL_STEPS_PER_AGENT)
print('Grad updates per agent', GRAD_UPDATES_PER_AGENT)
params = {'LOCAL_TESTING': LOCAL_TESTING, 'RUN_TYPE': RUN_TYPE, 'EX_NAME': EX_NAME, 'SAVE_DIR': SAVE_DIR, 'GPU_ID': GPU_ID, 'mdp_params': {'layout_name': layout_name, 'start_order_list': start_order_list, 'rew_shaping_params': rew_shaping_params}, 'env_params': {'horizon': horizon}, 'PPO_RUN_TOT_TIMESTEPS': PPO_RUN_TOT_TIMESTEPS, 'NUM_PBT_ITER': NUM_PBT_ITER, 'ITER_PER_SELECTION': ITER_PER_SELECTION, 'POPULATION_SIZE': POPULATION_SIZE, 'RESAMPLE_PROB': RESAMPLE_PROB, 'MUTATION_FACTORS': MUTATION_FACTORS, 'mdp_generation_params': mdp_generation_params, 'HYPERPARAMS_TO_MUTATE': HYPERPARAMS_TO_MUTATE, 'REW_SHAPING_HORIZON': REW_SHAPING_HORIZON, 'ENTROPY': ENTROPY, 'ENTROPY_POOL': ENTROPY_POOL, 'ENT_VERSION': ENT_VERSION, 'LOAD_FOLDER_LST': LOAD_FOLDER_LST.split(':'), 'GAMMA': GAMMA, 'sim_threads': sim_threads, 'TOTAL_BATCH_SIZE': TOTAL_BATCH_SIZE, 'BATCH_SIZE': BATCH_SIZE, 'MAX_GRAD_NORM': MAX_GRAD_NORM, 'LR': LR, 'VF_COEF': VF_COEF, 'STEPS_PER_UPDATE': STEPS_PER_UPDATE, 'MINIBATCHES': MINIBATCHES, 'CLIPPING': CLIPPING, 'LAM': LAM, 'NETWORK_TYPE': NETWORK_TYPE, 'NUM_HIDDEN_LAYERS': NUM_HIDDEN_LAYERS, 'SIZE_HIDDEN_LAYERS': SIZE_HIDDEN_LAYERS, 'NUM_FILTERS': NUM_FILTERS, 'NUM_CONV_LAYERS': NUM_CONV_LAYERS, 'SEEDS': SEEDS, 'NUM_SELECTION_GAMES': NUM_SELECTION_GAMES, 'total_steps_per_agent': TOTAL_STEPS_PER_AGENT, 'grad_updates_per_agent': GRAD_UPDATES_PER_AGENT}
|
@ex.named_config
def fixed_mdp():
LOCAL_TESTING = False
layout_name = 'simple'
sim_threads = (30 if (not LOCAL_TESTING) else 2)
PPO_RUN_TOT_TIMESTEPS = (36000 if (not LOCAL_TESTING) else 1000)
TOTAL_BATCH_SIZE = (12000 if (not LOCAL_TESTING) else 1000)
STEPS_PER_UPDATE = 5
MINIBATCHES = (6 if (not LOCAL_TESTING) else 2)
LR = 0.0005
|
@ex.named_config
def fixed_mdp_rnd_init():
LOCAL_TESTING = False
fixed_mdp = True
layout_name = 'scenario2'
sim_threads = (10 if LOCAL_TESTING else 50)
PPO_RUN_TOT_TIMESTEPS = 24000
TOTAL_BATCH_SIZE = 8000
STEPS_PER_UPDATE = 4
MINIBATCHES = 4
LR = 0.0005
|
@ex.named_config
def padded_all_scenario():
LOCAL_TESTING = False
fixed_mdp = ['scenario2', 'simple', 'schelling_s', 'unident_s']
PADDED_MDP_SHAPE = (10, 5)
sim_threads = (10 if LOCAL_TESTING else 60)
PPO_RUN_TOT_TIMESTEPS = (40000 if (not LOCAL_TESTING) else 1000)
TOTAL_BATCH_SIZE = (20000 if (not LOCAL_TESTING) else 1000)
STEPS_PER_UPDATE = 8
MINIBATCHES = 4
LR = 0.0005
REW_SHAPING_HORIZON = 10000000.0
|
def pbt_one_run(params, seed):
create_dir_if_not_exists(params['SAVE_DIR'])
save_dict_to_file(params, (params['SAVE_DIR'] + 'config'))
mdp = OvercookedGridworld.from_layout_name(**params['mdp_params'])
overcooked_env = OvercookedEnv(mdp, **params['env_params'])
print('Sample training environments:')
for _ in range(5):
overcooked_env.reset()
print(overcooked_env)
gym_env = get_vectorized_gym_env(overcooked_env, 'Overcooked-v0', agent_idx=0, featurize_fn=(lambda x: mdp.lossless_state_encoding(x)), **params)
gym_env.update_reward_shaping_param(1.0)
annealer = LinearAnnealer(horizon=params['REW_SHAPING_HORIZON'])
population_size = params['POPULATION_SIZE']
pbt_population = []
pbt_agent_names = [('agent' + str(i)) for i in range(population_size)]
for agent_name in pbt_agent_names:
agent = PBTAgent(agent_name, params, gym_env=gym_env)
print(f'Initialized {agent_name}')
pbt_population.append(agent)
print('Initialized agent models')
all_pairs = []
for i in range(population_size):
for j in range((i + 1), population_size):
all_pairs.append((i, j))
def pbt_training():
best_sparse_rew_avg = ([(- np.Inf)] * population_size)
for pbt_iter in range(1, (params['NUM_PBT_ITER'] + 1)):
print('\n\n\nPBT ITERATION NUM {}'.format(pbt_iter))
assert (params['ITER_PER_SELECTION'] == population_size)
pairs_to_train = list(zip(range(population_size), range(population_size)))
for sel_iter in range(params['ITER_PER_SELECTION']):
pair_idx = np.random.choice(len(pairs_to_train))
(idx0, idx1) = pairs_to_train.pop(pair_idx)
assert (idx0 == idx1)
agent_idx = np.random.choice([0, 1])
(pbt_agent0, pbt_agent1) = (pbt_population[idx0], pbt_population[idx1])
print('Training agent {} ({}) with agent {} ({}) fixed (pbt #{}/{}, sel #{}/{})'.format(idx1, pbt_agent1.num_ppo_runs, idx0, pbt_agent0.num_ppo_runs, pbt_iter, params['NUM_PBT_ITER'], sel_iter, params['ITER_PER_SELECTION']))
agent_env_steps = (pbt_agent1.num_ppo_runs * params['PPO_RUN_TOT_TIMESTEPS'])
reward_shaping_param = annealer.param_value(agent_env_steps)
print('Current reward shaping:', reward_shaping_param, '\t Save_dir', params['SAVE_DIR'])
pbt_agent1.logs['reward_shaping'].append(reward_shaping_param)
gym_env.update_reward_shaping_param(reward_shaping_param)
gym_env.other_agent = pbt_agent0.get_agent()
gym_env.venv.remote_set_agent_idx(agent_idx)
pbt_population_copy = pbt_population.copy()
pbt_population_copy.pop(idx0)
pbt_agent1.update(gym_env, pbt_population_copy, ent_version=params['ENT_VERSION'])
save_folder = ((params['SAVE_DIR'] + pbt_agent1.agent_name) + '/')
pbt_agent1.save(save_folder)
agent_pair = AgentPair(pbt_agent0.get_agent(), pbt_agent1.get_agent())
overcooked_env.get_rollouts(agent_pair, num_games=1, final_state=True, reward_shaping=reward_shaping_param)
assert (len(pairs_to_train) == 0)
avg_ep_returns_dict = defaultdict(list)
avg_ep_returns_sparse_dict = defaultdict(list)
for (i, pbt_agent) in enumerate(pbt_population):
pbt_agent.update_pbt_iter_logs()
if ((pbt_iter == 1) or (pbt_iter == (params['NUM_PBT_ITER'] // 2)) or (pbt_iter == params['NUM_PBT_ITER'])):
save_folder = ((params['SAVE_DIR'] + pbt_agent.agent_name) + '/')
pbt_agent.save_predictor((save_folder + 'pbt_iter{}/'.format(pbt_iter)))
pbt_agent.save((save_folder + 'pbt_iter{}/'.format(pbt_iter)))
print('Evaluating agent {} and {}'.format(i, i))
pbt_agent_other = pbt_population[i]
agent_pair = AgentPair(pbt_agent.get_agent(), pbt_agent_other.get_agent())
trajs = overcooked_env.get_rollouts(agent_pair, params['NUM_SELECTION_GAMES'], reward_shaping=reward_shaping_param)
(dense_rews, sparse_rews, lens) = (trajs['ep_returns'], trajs['ep_returns_sparse'], trajs['ep_lengths'])
rew_per_step = (np.sum(dense_rews) / np.sum(lens))
avg_ep_returns_dict[i].append(rew_per_step)
avg_ep_returns_sparse_dict[i].append(sparse_rews)
print('AVG ep rewards dict', avg_ep_returns_dict)
for (i, pbt_agent) in enumerate(pbt_population):
pbt_agent.update_avg_rew_per_step_logs(avg_ep_returns_dict[i])
avg_sparse_rew = np.mean(avg_ep_returns_sparse_dict[i])
if (avg_sparse_rew > best_sparse_rew_avg[i]):
best_sparse_rew_avg[i] = avg_sparse_rew
agent_name = pbt_agent.agent_name
print('New best avg sparse rews {} for agent {}, saving...'.format(best_sparse_rew_avg, agent_name))
best_save_folder = ((params['SAVE_DIR'] + agent_name) + '/best/')
delete_dir_if_exists(best_save_folder, verbose=True)
pbt_agent.save_predictor(best_save_folder)
pbt_agent.save(best_save_folder)
pbt_training()
reset_tf()
print(params['SAVE_DIR'])
|
@ex.automain
def run_pbt(params):
create_dir_if_not_exists(params['SAVE_DIR'])
save_dict_to_file(params, (params['SAVE_DIR'] + 'config'))
for seed in params['SEEDS']:
set_global_seed(seed)
curr_seed_params = params.copy()
curr_seed_params['SAVE_DIR'] += 'seed_{}/'.format(seed)
pbt_one_run(curr_seed_params, seed)
|
class OvercookedEnv(object):
'An environment wrapper for the OvercookedGridworld Markov Decision Process.\n\n The environment keeps track of the current state of the agent, updates\n it as the agent takes actions, and provides rewards to the agent.\n '
def __init__(self, mdp, start_state_fn=None, horizon=MAX_HORIZON, debug=False):
'\n mdp (OvercookedGridworld or function): either an instance of the MDP or a function that returns MDP instances \n start_state_fn (OvercookedState): function that returns start state for the MDP, called at each environment reset\n horizon (float): number of steps before the environment returns done=True\n '
if isinstance(mdp, OvercookedGridworld):
self.mdp_generator_fn = (lambda : mdp)
elif (callable(mdp) and isinstance(mdp(), OvercookedGridworld)):
self.mdp_generator_fn = mdp
else:
raise ValueError('Mdp should be either OvercookedGridworld instance or a generating function')
self.horizon = horizon
self.start_state_fn = start_state_fn
self.reset()
if ((self.horizon >= MAX_HORIZON) and (self.state.order_list is None) and debug):
print('Environment has (near-)infinite horizon and no terminal states')
def __repr__(self):
'Standard way to view the state of an environment programatically\n is just to print the Env object'
return self.mdp.state_string(self.state)
def print_state_transition(self, a_t, r_t, info):
print('Timestep: {}\nJoint action taken: {} \t Reward: {} + shape * {} \n{}\n'.format(self.t, tuple((Action.ACTION_TO_CHAR[a] for a in a_t)), r_t, info['shaped_r'], self))
@property
def env_params(self):
return {'start_state_fn': self.start_state_fn, 'horizon': self.horizon}
def display_states(self, *states):
old_state = self.state
for s in states:
self.state = s
print(self)
self.state = old_state
@staticmethod
def print_state(mdp, s):
e = OvercookedEnv(mdp, s)
print(e)
def copy(self):
return OvercookedEnv(mdp=self.mdp.copy(), start_state_fn=self.start_state_fn, horizon=self.horizon)
def step(self, joint_action):
'Performs a joint action, updating the environment state\n and providing a reward.\n \n On being done, stats about the episode are added to info:\n ep_sparse_r: the environment sparse reward, given only at soup delivery\n ep_shaped_r: the component of the reward that is due to reward shaped (excluding sparse rewards)\n ep_length: length of rollout\n '
assert (not self.is_done())
(next_state, sparse_reward, reward_shaping) = self.mdp.get_state_transition(self.state, joint_action)
self.cumulative_sparse_rewards += sparse_reward
self.cumulative_shaped_rewards += reward_shaping
self.state = next_state
self.t += 1
done = self.is_done()
info = {'shaped_r': reward_shaping}
if done:
info['episode'] = {'ep_sparse_r': self.cumulative_sparse_rewards, 'ep_shaped_r': self.cumulative_shaped_rewards, 'ep_length': self.t}
return (next_state, sparse_reward, done, info)
def reset(self):
'Resets the environment. Does NOT reset the agent.'
self.mdp = self.mdp_generator_fn()
if (self.start_state_fn is None):
self.state = self.mdp.get_standard_start_state()
else:
self.state = self.start_state_fn()
self.cumulative_sparse_rewards = 0
self.cumulative_shaped_rewards = 0
self.t = 0
def is_done(self):
'Whether the episode is over.'
return ((self.t >= self.horizon) or self.mdp.is_terminal(self.state))
def execute_plan(self, start_state, joint_action_plan, display=False):
'Executes action_plan (a list of joint actions) from a start \n state in the mdp and returns the resulting state.'
self.state = start_state
done = False
if display:
print('Starting state\n{}'.format(self))
for joint_action in joint_action_plan:
self.step(joint_action)
done = self.is_done()
if display:
print(self)
if done:
break
successor_state = self.state
self.reset()
return (successor_state, done)
def run_agents(self, agent_pair, include_final_state=False, display=False, display_until=np.Inf):
'\n Trajectory returned will a list of state-action pairs (s_t, joint_a_t, r_t, done_t).\n '
assert (self.cumulative_sparse_rewards == self.cumulative_shaped_rewards == 0), 'Did not reset environment before running agents'
trajectory = []
done = False
if display:
print(self)
while (not done):
s_t = self.state
a_t = agent_pair.joint_action(s_t)
if any([(a is None) for a in a_t]):
break
(s_tp1, r_t, done, info) = self.step(a_t)
trajectory.append((s_t, a_t, r_t, done))
if (display and (self.t < display_until)):
self.print_state_transition(a_t, r_t, info)
assert (len(trajectory) == self.t), '{} vs {}'.format(len(trajectory), self.t)
if include_final_state:
trajectory.append((s_tp1, (None, None), 0, True))
return (np.array(trajectory), self.t, self.cumulative_sparse_rewards, self.cumulative_shaped_rewards)
def get_rollouts(self, agent_pair, num_games, display=False, final_state=False, agent_idx=0, reward_shaping=0.0, display_until=np.Inf, info=True):
'\n Simulate `num_games` number rollouts with the current agent_pair and returns processed \n trajectories.\n\n Only returns the trajectories for one of the agents (the actions _that_ agent took), \n namely the one indicated by `agent_idx`.\n\n Returning excessive information to be able to convert trajectories to any required format \n (baselines, stable_baselines, etc)\n\n NOTE: standard trajectories format used throughout the codebase\n '
trajectories = {'ep_observations': [], 'ep_actions': [], 'ep_rewards': [], 'ep_dones': [], 'ep_returns': [], 'ep_returns_sparse': [], 'ep_lengths': [], 'mdp_params': [], 'env_params': []}
if display:
trajectories_json = {'ep_observations': [], 'ep_actions': [], 'ep_rewards': [], 'mdp_params': []}
for _ in tqdm.trange(num_games):
agent_pair.set_mdp(self.mdp)
(trajectory, time_taken, tot_rews_sparse, tot_rews_shaped) = self.run_agents(agent_pair, display=display, include_final_state=final_state, display_until=display_until)
(obs, actions, rews, dones) = (trajectory.T[0], trajectory.T[1], trajectory.T[2], trajectory.T[3])
trajectories['ep_observations'].append(obs)
trajectories['ep_actions'].append(actions)
trajectories['ep_rewards'].append(rews)
trajectories['ep_dones'].append(dones)
trajectories['ep_returns'].append((tot_rews_sparse + (tot_rews_shaped * reward_shaping)))
trajectories['ep_returns_sparse'].append(tot_rews_sparse)
trajectories['ep_lengths'].append(time_taken)
trajectories['mdp_params'].append(self.mdp.mdp_params)
trajectories['env_params'].append(self.env_params)
if display:
obs_lst = obs.tolist()
obs_dic_lst = []
for o in obs_lst:
obs_dic_lst.append(o.to_dict_json())
trajectories_json['ep_observations'].append(obs_dic_lst)
actions_lst = actions.tolist()
actions_list_lst = []
for a in actions_lst:
a_lst = []
for item in a:
if (item == 'interact'):
item = 'INTERACT'
a_lst.append(item)
actions_list_lst.append(a_lst)
trajectories_json['ep_actions'].append(actions_list_lst)
trajectories_json['ep_rewards'].append(rews.tolist())
trajectories_json['mdp_params'].append(self.mdp.mdp_params)
self.reset()
agent_pair.reset()
(mu, se) = mean_and_std_err(trajectories['ep_returns'])
if info:
print('Avg reward {:.2f} (std: {:.2f}, se: {:.2f}) over {} games of avg length {}'.format(mu, np.std(trajectories['ep_returns']), se, num_games, np.mean(trajectories['ep_lengths'])))
if display:
layout_names_map = {'simple': 'cramped_room', 'unident_s': 'asymmetric_advantages', 'random1': 'coordination_ring', 'random0': 'forced_coordination', 'random3': 'counter_circuit'}
trajectories_json['mdp_params'][0]['layout_name'] = layout_names_map[trajectories_json['mdp_params'][0]['layout_name']]
dir_path = ('data/trajectories/' + EXP_NAME)
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
out_file = open(((((((((dir_path + '/') + EXP_NAME) + '_trajectories_') + time.strftime('%Y_%m_%d-%H_%M_%S')) + '_') + agent_pair.agent_pair_name) + f"_r{trajectories['ep_returns'][0]}") + '.json'), 'w')
json.dump(trajectories_json, out_file)
out_file.close()
trajectories = {k: np.array(v) for (k, v) in trajectories.items()}
return trajectories
|
class Overcooked(gym.Env):
'\n Wrapper for the Env class above that is SOMEWHAT compatible with the standard gym API.\n\n NOTE: Observations returned are in a dictionary format with various information that is\n necessary to be able to handle the multi-agent nature of the environment. There are probably\n better ways to handle this, but we found this to work with minor modifications to OpenAI Baselines.\n \n NOTE: The index of the main agent in the mdp is randomized at each reset of the environment, and \n is kept track of by the self.agent_idx attribute. This means that it is necessary to pass on this \n information in the output to know for which agent index featurizations should be made for other agents.\n \n For example, say one is training A0 paired with A1, and A1 takes a custom state featurization.\n Then in the runner.py loop in OpenAI Baselines, we will get the lossless encodings of the state,\n and the true Overcooked state. When we encode the true state to feed to A1, we also need to know\n what agent index it has in the environment (as encodings will be index dependent).\n '
def custom_init(self, base_env, featurize_fn, agent_idx, baselines=False):
"\n base_env: OvercookedEnv\n featurize_fn: what function is used to featurize states returned in the 'both_agent_obs' field\n "
if baselines:
np.random.seed(0)
self.base_env = base_env
self.mdp = base_env.mdp
self.featurize_fn = featurize_fn
self.observation_space = self._setup_observation_space()
self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
self.set_agent_idx(agent_idx)
self.reset()
def _setup_observation_space(self):
dummy_state = self.mdp.get_standard_start_state()
obs_shape = self.featurize_fn(dummy_state)[0].shape
high = (np.ones(obs_shape) * max(self.mdp.soup_cooking_time, self.mdp.num_items_for_soup, 5))
return gym.spaces.Box((high * 0), high, dtype=np.float32)
def step(self, action):
"\n action: \n (agent with index self.agent_idx action, other agent action)\n is a tuple with the joint action of the primary and secondary agents in index format\n \n returns:\n observation: formatted to be standard input for self.agent_idx's policy\n "
assert all((self.action_space.contains(a) for a in action)), ('%r (%s) invalid' % (action, type(action)))
(agent_action, other_agent_action) = [Action.INDEX_TO_ACTION[a] for a in action]
if (self.agent_idx == 0):
joint_action = (agent_action, other_agent_action)
else:
joint_action = (other_agent_action, agent_action)
(next_state, reward, done, info) = self.base_env.step(joint_action)
(ob_p0, ob_p1) = self.featurize_fn(next_state)
if (self.agent_idx == 0):
both_agents_ob = (ob_p0, ob_p1)
else:
both_agents_ob = (ob_p1, ob_p0)
obs = {'both_agent_obs': both_agents_ob, 'overcooked_state': next_state, 'other_agent_env_idx': (1 - self.agent_idx)}
return (obs, reward, done, info)
def reset(self):
'\n When training on individual maps, we want to randomize which agent is assigned to which\n starting location, in order to make sure that the agents are trained to be able to \n complete the task starting at either of the hardcoded positions.\n\n NOTE: a nicer way to do this would be to just randomize starting positions, and not\n have to deal with randomizing indices.\n '
self.base_env.reset()
(ob_p0, ob_p1) = self.featurize_fn(self.base_env.state)
if (self.agent_idx == 0):
both_agents_ob = (ob_p0, ob_p1)
else:
both_agents_ob = (ob_p1, ob_p0)
return {'both_agent_obs': both_agents_ob, 'overcooked_state': self.base_env.state, 'other_agent_env_idx': (1 - self.agent_idx)}
def render(self, mode='human', close=False):
pass
def set_agent_idx(self, i):
self.agent_idx = i
def get_agent_idx(self):
return self.agent_idx
|
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
'\n Demmel p 312\n '
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = '%10i %10.3g %10.3g'
titlestr = '%10s %10s %10s'
if verbose:
print((titlestr % ('iter', 'residual norm', 'soln norm')))
for i in range(cg_iters):
if (callback is not None):
callback(x)
if verbose:
print((fmtstr % (i, rdotr, np.linalg.norm(x))))
z = f_Ax(p)
v = (rdotr / p.dot(z))
x += (v * p)
r -= (v * z)
newrdotr = r.dot(r)
mu = (newrdotr / rdotr)
p = (r + (mu * p))
rdotr = newrdotr
if (rdotr < residual_tol):
break
if (callback is not None):
callback(x)
if verbose:
print((fmtstr % ((i + 1), rdotr, np.linalg.norm(x))))
return x
|
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
'\n Create a wrapped, monitored SubprocVecEnv for Atari.\n '
if (wrapper_kwargs is None):
wrapper_kwargs = {}
def make_env(rank):
def _thunk():
env = make_atari(env_id)
env.seed((seed + rank))
env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))))
return wrap_deepmind(env, **wrapper_kwargs)
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env((i + start_index)) for i in range(num_env)])
|
def make_mujoco_env(env_id, seed):
'\n Create a wrapped, monitored gym.Env for MuJoCo.\n '
set_global_seeds(seed)
env = gym.make(env_id)
env = Monitor(env, logger.get_dir())
env.seed(seed)
return env
|
def make_robotics_env(env_id, seed, rank=0):
'\n Create a wrapped, monitored gym.Env for MuJoCo.\n '
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))), info_keywords=('is_success',))
env.seed(seed)
return env
|
def arg_parser():
'\n Create an empty argparse.ArgumentParser.\n '
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.