code stringlengths 17 6.64M |
|---|
def flatten_shape_dim(shape):
return reduce(operator.mul, shape, 1)
|
def print_lasagne_layer(layer, prefix=''):
params = ''
if layer.name:
params += (', name=' + layer.name)
if getattr(layer, 'nonlinearity', None):
params += (', nonlinearity=' + layer.nonlinearity.__name__)
params = params[2:]
print(((((prefix + layer.__class__.__name__) + '[') + params) + ']'))
if (hasattr(layer, 'input_layers') and (layer.input_layers is not None)):
[print_lasagne_layer(x, (prefix + ' ')) for x in layer.input_layers]
elif (hasattr(layer, 'input_layer') and (layer.input_layer is not None)):
print_lasagne_layer(layer.input_layer, (prefix + ' '))
|
def unflatten_tensor_variables(flatarr, shapes, symb_arrs):
import theano.tensor as TT
import numpy as np
arrs = []
n = 0
for (shape, symb_arr) in zip(shapes, symb_arrs):
size = np.prod(list(shape))
arr = flatarr[n:(n + size)].reshape(shape)
if (arr.type.broadcastable != symb_arr.type.broadcastable):
arr = TT.patternbroadcast(arr, symb_arr.type.broadcastable)
arrs.append(arr)
n += size
return arrs
|
def sliced_fun(f, n_slices):
def sliced_f(sliced_inputs, non_sliced_inputs=None):
if (non_sliced_inputs is None):
non_sliced_inputs = []
if isinstance(non_sliced_inputs, tuple):
non_sliced_inputs = list(non_sliced_inputs)
n_paths = len(sliced_inputs[0])
slice_size = max(1, (n_paths // n_slices))
ret_vals = None
for start in range(0, n_paths, slice_size):
inputs_slice = [v[start:(start + slice_size)] for v in sliced_inputs]
slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))
if (not isinstance(slice_ret_vals, (tuple, list))):
slice_ret_vals_as_list = [slice_ret_vals]
else:
slice_ret_vals_as_list = slice_ret_vals
scaled_ret_vals = [(np.asarray(v) * len(inputs_slice[0])) for v in slice_ret_vals_as_list]
if (ret_vals is None):
ret_vals = scaled_ret_vals
else:
ret_vals = [(x + y) for (x, y) in zip(ret_vals, scaled_ret_vals)]
ret_vals = [(v / n_paths) for v in ret_vals]
if (not isinstance(slice_ret_vals, (tuple, list))):
ret_vals = ret_vals[0]
elif isinstance(slice_ret_vals, tuple):
ret_vals = tuple(ret_vals)
return ret_vals
return sliced_f
|
def stdize(data, eps=1e-06):
return ((data - np.mean(data, axis=0)) / (np.std(data, axis=0) + eps))
|
def iterate_minibatches_generic(input_lst=None, batchsize=None, shuffle=False):
if (batchsize is None):
batchsize = len(input_lst[0])
assert all(((len(x) == len(input_lst[0])) for x in input_lst))
if shuffle:
indices = np.arange(len(input_lst[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(input_lst[0]), batchsize):
if shuffle:
excerpt = indices[start_idx:(start_idx + batchsize)]
else:
excerpt = slice(start_idx, (start_idx + batchsize))
(yield [input[excerpt] for input in input_lst])
|
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, '__getitem__', args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if (item.startswith('__') and item.endswith('__')):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, '__pow__', [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, '__add__', [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, '__rmul__', [other], dict())
def __div__(self, other):
return StubMethodCall(self, '__div__', [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), 'rdiv', [self, other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, '__rpow__', [power, modulo], dict())
|
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return (b / a)
|
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__['_obj'] = obj
self.__dict__['_attr_name'] = attr_name
@property
def obj(self):
return self.__dict__['_obj']
@property
def attr_name(self):
return self.__dict__['_attr_name']
def __str__(self):
return ('StubAttr(%s, %s)' % (str(self.obj), str(self.attr_name)))
|
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return ('StubMethodCall(%s, %s, %s, %s)' % (str(self.obj), str(self.method_name), str(self.args), str(self.kwargs)))
|
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if (len(args) > 0):
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict['proxy_class']
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return ('StubClass(%s)' % self.proxy_class)
|
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if (len(args) > 0):
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict['args']
self.kwargs = dict['kwargs']
self.proxy_class = dict['proxy_class']
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError(('Cannot get attribute %s from %s' % (item, self.proxy_class)))
def __str__(self):
return ('StubObject(%s, *%s, **%s)' % (str(self.proxy_class), str(self.args), str(self.kwargs)))
|
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for (k, v) in self.items() if (k not in self._hidden_keys)}
|
class VariantGenerator(object):
'\n Usage:\n\n vg = VariantGenerator()\n vg.add("param1", [1, 2, 3])\n vg.add("param2", [\'x\', \'y\'])\n vg.variants() => # all combinations of [1,2,3] x [\'x\',\'y\']\n\n Supports noncyclic dependency among parameters:\n vg = VariantGenerator()\n vg.add("param1", [1, 2, 3])\n vg.add("param2", lambda param1: [param1+1, param1+2])\n vg.variants() => # ..\n '
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for (k, vs, cfg) in self._variants:
if cfg.get('hide', False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(self.__class__, predicate=(lambda x: (inspect.isfunction(x) or inspect.ismethod(x))))
methods = [x[1].__get__(self, self.__class__) for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, '__variant_config', dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for (k, vs, cfg) in self._variants:
if (not cfg.get('hide', False)):
suffix.append(((k + '_') + str(variant[k])))
return '_'.join(suffix)
def ivariants(self):
dependencies = list()
for (key, vals, _) in self._variants:
if hasattr(vals, '__call__'):
args = inspect.getargspec(vals).args
if (hasattr(vals, 'im_self') or hasattr(vals, '__self__')):
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
while (len(sorted_keys) < len(self._variants)):
free_nodes = [k for (k, v) in dependencies if (len(v) == 0)]
if (len(free_nodes) == 0):
error_msg = 'Invalid parameter dependency: \n'
for (k, v) in dependencies:
if (len(v) > 0):
error_msg += (((k + ' depends on ') + ' & '.join(v)) + '\n')
raise ValueError(error_msg)
dependencies = [(k, v) for (k, v) in dependencies if (k not in free_nodes)]
for (_, v) in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if (len(sorted_keys) == 0):
(yield dict())
else:
first_keys = sorted_keys[:(- 1)]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[(- 1)]
last_vals = [v for (k, v, _) in self._variants if (k == last_key)][0]
if hasattr(last_vals, '__call__'):
last_val_keys = inspect.getargspec(last_vals).args
if (hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__')):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, '__call__'):
last_variants = last_vals(**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
(yield AttrDict(variant, **{last_key: last_choice}))
else:
for last_choice in last_vals:
(yield AttrDict(variant, **{last_key: last_choice}))
|
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if ((len(args) == 1) and isinstance(args[0], collections.Callable)):
return _variant(args[0])
return _variant
|
def stub(glbs):
for (k, v) in list(glbs.items()):
if (isinstance(v, type) and (v != StubClass)):
glbs[k] = StubClass(v)
|
def query_yes_no(question, default='yes'):
'Ask a yes/no question via raw_input() and return their answer.\n\n "question" is a string that is presented to the user.\n "default" is the presumed answer if the user just hits <Enter>.\n It must be "yes" (the default), "no" or None (meaning\n an answer is required of the user).\n\n The "answer" return value is True for "yes" or False for "no".\n '
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if (default is None):
prompt = ' [y/n] '
elif (default == 'yes'):
prompt = ' [Y/n] '
elif (default == 'no'):
prompt = ' [y/N] '
else:
raise ValueError(("invalid default answer: '%s'" % default))
while True:
sys.stdout.write((question + prompt))
choice = input().lower()
if ((default is not None) and (choice == '')):
return valid[default]
elif (choice in valid):
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
|
def run_experiment_lite(stub_method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='scripts/run_experiment_lite.py', python_command='python', mode='local', dry=False, docker_image=None, aws_config=None, env=None, variant=None, use_gpu=False, sync_s3_pkl=False, sync_log_on_termination=True, confirm_remote=True, terminate_machine=True, periodic_sync=True, periodic_sync_interval=15, sync_all_data_node_to_s3=True, use_cloudpickle=False, pre_commands=None, **kwargs):
'\n Serialize the stubbed method call and run the experiment using the specified mode.\n :param stub_method_call: A stubbed method call.\n :param script: The name of the entrance point python script\n :param mode: Where & how to run the experiment. Should be one of "local", "local_docker", "ec2",\n and "lab_kube".\n :param dry: Whether to do a dry-run, which only prints the commands without executing them.\n :param exp_prefix: Name prefix for the experiments\n :param docker_image: name of the docker image. Ignored if using local mode.\n :param aws_config: configuration for AWS. Only used under EC2 mode\n :param env: extra environment variables\n :param kwargs: All other parameters will be passed directly to the entrance python script.\n :param variant: If provided, should be a dictionary of parameters\n :param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including\n certain environment flags\n :param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param confirm_remote: Whether to confirm before launching experiments remotely\n :param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using\n mode="ec2". This is useful when one wants to debug after an experiment finishes abnormally.\n :param periodic_sync: Whether to synchronize certain experiment files periodically during execution.\n :param periodic_sync_interval: Time interval between each periodic sync, in seconds.\n '
assert ((stub_method_call is not None) or (batch_tasks is not None)), 'Must provide at least either stub_method_call or batch_tasks'
if (batch_tasks is None):
batch_tasks = [dict(kwargs, stub_method_call=stub_method_call, exp_name=exp_name, log_dir=log_dir, env=env, variant=variant, use_cloudpickle=use_cloudpickle)]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
for task in batch_tasks:
call = task.pop('stub_method_call')
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
else:
data = base64.b64encode(pickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
params = dict(kwargs)
if (task.get('exp_name', None) is None):
task['exp_name'] = ('%s_%s_%04d' % (exp_prefix, timestamp, exp_count))
if (task.get('log_dir', None) is None):
task['log_dir'] = ((((config.LOG_DIR + '/local/') + exp_prefix.replace('_', '-')) + '/') + task['exp_name'])
if (task.get('variant', None) is not None):
variant = task.pop('variant')
if ('exp_name' not in variant):
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(pickle.dumps(variant)).decode('utf-8')
elif ('variant' in task):
del task['variant']
task['remote_log_dir'] = osp.join(config.AWS_S3_PATH, exp_prefix.replace('_', '-'), task['exp_name'])
if ((mode not in ['local', 'local_docker']) and (not remote_confirmed) and (not dry) and confirm_remote):
remote_confirmed = query_yes_no(('Running in (non-dry) mode %s. Confirm?' % mode))
if (not remote_confirmed):
sys.exit(1)
if (mode == 'local'):
for task in batch_tasks:
del task['remote_log_dir']
env = task.pop('env', None)
command = to_local_command(task, python_command=python_command, script=osp.join(config.PROJECT_PATH, script), use_gpu=use_gpu)
print(command)
if dry:
return
try:
if (env is None):
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif (mode == 'local_docker'):
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task['remote_log_dir']
env = task.pop('env', None)
command = to_docker_command(task, docker_image=docker_image, pre_commands=pre_commands, script=script, env=env, use_gpu=use_gpu, use_tty=True)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print('terminating')
p.terminate()
except OSError:
print('os error!')
pass
p.wait()
elif (mode == 'ec2'):
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry)
launch_ec2(batch_tasks, exp_prefix=exp_prefix, docker_image=docker_image, python_command=python_command, script=script, aws_config=aws_config, dry=dry, terminate_machine=terminate_machine, use_gpu=use_gpu, code_full_path=s3_code_path, sync_s3_pkl=sync_s3_pkl, sync_log_on_termination=sync_log_on_termination, periodic_sync=periodic_sync, periodic_sync_interval=periodic_sync_interval, pre_commands=pre_commands)
elif (mode == 'lab_kube'):
s3_code_path = s3_sync_code(config, dry=dry)
if (docker_image is None):
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
task['resources'] = params.pop('resources', config.KUBE_DEFAULT_RESOURCES)
task['node_selector'] = params.pop('node_selector', config.KUBE_DEFAULT_NODE_SELECTOR)
task['exp_prefix'] = exp_prefix
pod_dict = to_lab_kube_pod(task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu, python_command=python_command, sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync, periodic_sync_interval=periodic_sync_interval, sync_all_data_node_to_s3=sync_all_data_node_to_s3, terminate_machine=terminate_machine)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = '{pod_dir}/{exp_prefix}'.format(pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = '{dir}/{exp_name}.json'.format(dir=dir, exp_name=task['exp_name'])
with open(fname, 'w') as fh:
fh.write(pod_str)
kubecmd = ('kubectl create -f %s' % fname)
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while (retry_count <= 5):
try:
return_code = subprocess.call(kubecmd, shell=True)
if (return_code == 0):
break
retry_count += 1
print('trying again...')
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
|
def ensure_dir(dirname):
'\n Ensure that a named directory exists; if it does not, attempt to create it.\n '
try:
os.makedirs(dirname)
except OSError as e:
if (e.errno != errno.EEXIST):
raise
|
def _shellquote(s):
'Return a shell-escaped version of the string *s*.'
if (not s):
return "''"
if (_find_unsafe(s) is None):
return s
return (("'" + s.replace("'", '\'"\'"\'')) + "'")
|
def _to_param_val(v):
if (v is None):
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
|
def to_local_command(params, python_command='python', script=osp.join(config.PROJECT_PATH, 'scripts/run_experiment.py'), use_gpu=False):
command = ((python_command + ' ') + script)
if (use_gpu and (not config.USE_TF)):
command = ("THEANO_FLAGS='device=gpu,dnn.enabled=auto' " + command)
for (k, v) in config.ENV.items():
command = (('%s=%s ' % (k, v)) + command)
for (k, v) in params.items():
if isinstance(v, dict):
for (nk, nv) in v.items():
if (str(nk) == '_name'):
command += (' --%s %s' % (k, _to_param_val(nv)))
else:
command += (' --%s_%s %s' % (k, nk, _to_param_val(nv)))
else:
command += (' --%s %s' % (k, _to_param_val(v)))
return command
|
def to_docker_command(params, docker_image, python_command='python', script='scripts/run_experiment.py', pre_commands=None, use_tty=False, post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
'\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n '
log_dir = params.get('log_dir')
if (not dry):
mkdir_p(log_dir)
if use_gpu:
command_prefix = 'nvidia-docker run'
else:
command_prefix = 'docker run'
docker_log_dir = config.DOCKER_LOG_DIR
if (env is not None):
for (k, v) in env.items():
command_prefix += ' -e "{k}={v}"'.format(k=k, v=v)
command_prefix += ' -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}'.format(local_mujoco_key_dir=config.MUJOCO_KEY_PATH, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += ' -v {local_log_dir}:{docker_log_dir}'.format(local_log_dir=log_dir, docker_log_dir=docker_log_dir)
if (local_code_dir is None):
local_code_dir = config.PROJECT_PATH
command_prefix += ' -v {local_code_dir}:{docker_code_dir}'.format(local_code_dir=local_code_dir, docker_code_dir=config.DOCKER_CODE_DIR)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += ((' -ti ' + docker_image) + ' /bin/bash -c ')
else:
command_prefix += ((' -i ' + docker_image) + ' /bin/bash -c ')
command_list = list()
if (pre_commands is not None):
command_list.extend(pre_commands)
command_list.append('echo "Running in docker"')
command_list.append(to_local_command(params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
if (post_commands is None):
post_commands = ['sleep 120']
command_list.extend(post_commands)
return (((command_prefix + "'") + '; '.join(command_list)) + "'")
|
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
|
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path, python_command='python', pre_commands=None, script='scripts/run_experiment.py', aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False, sync_log_on_termination=True, periodic_sync=True, periodic_sync_interval=15):
if (len(params_list) == 0):
return
default_config = dict(image_id=config.AWS_IMAGE_ID, instance_type=config.AWS_INSTANCE_TYPE, key_name=config.AWS_KEY_NAME, spot=config.AWS_SPOT, spot_price=config.AWS_SPOT_PRICE, iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME, security_groups=config.AWS_SECURITY_GROUPS, security_group_ids=config.AWS_SECURITY_GROUP_IDS, network_interfaces=config.AWS_NETWORK_INTERFACES)
if (aws_config is None):
aws_config = dict()
aws_config = dict(default_config, **aws_config)
from io import StringIO
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('{\n')
sio.write('\n die() { status=$1; shift; echo "FATAL: $*"; exit $status; }\n ')
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"\n ')
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params_list[0].get('exp_name'), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}\n '.format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}\n '.format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write('\n service docker start\n ')
sio.write('\n docker --config /home/ubuntu/.docker pull {docker_image}\n '.format(docker_image=docker_image))
if config.FAST_CODE_SYNC:
sio.write('\n aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
sio.write('\n mkdir -p {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
sio.write('\n tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
else:
sio.write('\n aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
s3_mujoco_key_path = (config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/')
sio.write('\n aws s3 cp --recursive {} {} --region {}\n '.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write('\n cd {local_code_path}\n '.format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get('log_dir')
remote_log_dir = params.pop('remote_log_dir')
env = params.pop('env', None)
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params.get('exp_name'), aws_region=config.AWS_REGION_NAME))
sio.write('\n mkdir -p {log_dir}\n '.format(log_dir=log_dir))
if periodic_sync:
if sync_s3_pkl:
sio.write("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region}\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
else:
sio.write("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
sio.write('\n while /bin/true; do\n if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \\ -f 2) ]\n then\n logger "Running shutdown hook."\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n break\n else\n # Spot instance not yet marked for termination.\n sleep 5\n fi\n done & echo log sync initiated\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write('\n {command}\n '.format(command=to_docker_command(params, docker_image, python_command=python_command, script=script, use_gpu=use_gpu, env=env, pre_commands=pre_commands, local_code_dir=config.DOCKER_CODE_DIR)))
sio.write('\n aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write('\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n '.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
if terminate_machine:
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die "wget instance-id has failed: $?"`"\n aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}\n '.format(aws_region=config.AWS_REGION_NAME))
sio.write('} >> /home/ubuntu/user_data.log 2>&1\n')
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config['spot']:
ec2 = boto3.client('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
else:
ec2 = boto3.resource('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
if ((len(full_script) > 10000) or (len(base64.b64encode(full_script.encode()).decode('utf-8')) > 10000)):
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('\n aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\\n chmod +x /home/ubuntu/remote_script.sh && \\\n bash /home/ubuntu/remote_script.sh\n '.format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
instance_args = dict(ImageId=aws_config['image_id'], KeyName=aws_config['key_name'], UserData=user_data, InstanceType=aws_config['instance_type'], EbsOptimized=True, SecurityGroups=aws_config['security_groups'], SecurityGroupIds=aws_config['security_group_ids'], NetworkInterfaces=aws_config['network_interfaces'], IamInstanceProfile=dict(Name=aws_config['iam_instance_profile_name']))
if (aws_config.get('placement', None) is not None):
instance_args['Placement'] = aws_config['placement']
if (not aws_config['spot']):
instance_args['MinCount'] = 1
instance_args['MaxCount'] = 1
print('************************************************************')
print(instance_args['UserData'])
print('************************************************************')
if aws_config['spot']:
instance_args['UserData'] = base64.b64encode(instance_args['UserData'].encode()).decode('utf-8')
spot_args = dict(DryRun=dry, InstanceCount=1, LaunchSpecification=instance_args, SpotPrice=aws_config['spot_price'])
import pprint
pprint.pprint(spot_args)
if (not dry):
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(Resources=[spot_request_id], Tags=[{'Key': 'Name', 'Value': params_list[0]['exp_name']}])
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(DryRun=dry, **instance_args)
|
def s3_sync_code(config, dry=False):
global S3_CODE_PATH
if (S3_CODE_PATH is not None):
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
except subprocess.CalledProcessError as _:
print('Warning: failed to execute git commands')
current_commit = None
file_name = (((str(timestamp) + '_') + hashlib.sha224(((subprocess.check_output(['pwd']) + str(current_commit).encode()) + str(timestamp).encode())).hexdigest()) + '.tar.gz')
file_path = ('/tmp/' + file_name)
tar_cmd = ['tar', '-zcvf', file_path, '-C', config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ['--exclude', pattern]
tar_cmd += ['-h', '.']
remote_path = ('%s/%s' % (base, file_name))
upload_cmd = ['aws', 's3', 'cp', file_path, remote_path]
mujoco_key_cmd = ['aws', 's3', 'sync', config.MUJOCO_KEY_PATH, '{}/.mujoco/'.format(base)]
print(' '.join(tar_cmd))
print(' '.join(upload_cmd))
print(' '.join(mujoco_key_cmd))
if (not dry):
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
subprocess.check_call(mujoco_key_cmd)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
clean_state = (len(subprocess.check_output(['git', 'status', '--porcelain'])) == 0)
except subprocess.CalledProcessError as _:
print('Warning: failed to execute git commands')
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(['pwd'])).decode('utf-8')
code_path = ('%s_%s' % (dir_hash, ((current_commit if clean_state else ('%s_dirty_%s' % (current_commit, timestamp))) if has_git else timestamp)))
full_path = ('%s/%s' % (base, code_path))
cache_path = ('%s/%s' % (base, dir_hash))
cache_cmds = ((['aws', 's3', 'cp', '--recursive'] + flatten((['--exclude', ('%s' % pattern)] for pattern in config.CODE_SYNC_IGNORES))) + [cache_path, full_path])
cmds = ((['aws', 's3', 'cp', '--recursive'] + flatten((['--exclude', ('%s' % pattern)] for pattern in config.CODE_SYNC_IGNORES))) + ['.', full_path])
caching_cmds = ((['aws', 's3', 'cp', '--recursive'] + flatten((['--exclude', ('%s' % pattern)] for pattern in config.CODE_SYNC_IGNORES))) + [full_path, cache_path])
mujoco_key_cmd = ['aws', 's3', 'sync', config.MUJOCO_KEY_PATH, '{}/.mujoco/'.format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if (not dry):
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
|
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content)
f.close()
remote_path = os.path.join(config.AWS_CODE_SYNC_S3_PATH, 'oversize_bash_scripts', str(uuid.uuid4()))
subprocess.check_call(['aws', 's3', 'cp', f.name, remote_path])
os.unlink(f.name)
return remote_path
|
def to_lab_kube_pod(params, docker_image, code_full_path, python_command='python', script='scripts/run_experiment.py', is_gpu=False, sync_s3_pkl=False, periodic_sync=True, periodic_sync_interval=15, sync_all_data_node_to_s3=False, terminate_machine=True):
'\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n '
log_dir = params.get('log_dir')
remote_log_dir = params.pop('remote_log_dir')
resources = params.pop('resources')
node_selector = params.pop('node_selector')
exp_prefix = params.pop('exp_prefix')
kube_env = [{'name': k, 'value': v} for (k, v) in (params.pop('env', None) or dict()).items()]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(('echo "aws_access_key_id = %s" >> ~/.aws/credentials' % config.AWS_ACCESS_KEY))
pre_commands.append(('echo "aws_secret_access_key = %s" >> ~/.aws/credentials' % config.AWS_ACCESS_SECRET))
s3_mujoco_key_path = (config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/')
pre_commands.append('aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append(('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path))
pre_commands.append(('mkdir -p %s' % config.DOCKER_CODE_DIR))
pre_commands.append(('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR))
else:
pre_commands.append(('aws s3 cp --recursive %s %s' % (code_full_path, config.DOCKER_CODE_DIR)))
pre_commands.append(('cd %s' % config.DOCKER_CODE_DIR))
pre_commands.append(('mkdir -p %s' % log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append('\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated'.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append('\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated'.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
elif periodic_sync:
if sync_s3_pkl:
pre_commands.append("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
post_commands = list()
post_commands.append(('aws s3 cp --recursive %s %s' % (log_dir, remote_log_dir)))
if (not terminate_machine):
post_commands.append('sleep infinity')
command_list = list()
if (pre_commands is not None):
command_list.extend(pre_commands)
command_list.append('echo "Running in docker"')
command_list.append(('%s 2>&1 | tee -a %s' % (to_local_command(params, python_command=python_command, script=script), ('%s/stdouterr.log' % log_dir))))
if (post_commands is not None):
command_list.extend(post_commands)
command = '; '.join(command_list)
pod_name = (config.KUBE_PREFIX + params['exp_name'])
pod_name = pod_name.replace('_', '-')
print('Is gpu: ', is_gpu)
if (not is_gpu):
return {'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': pod_name, 'labels': {'owner': config.LABEL, 'expt': pod_name, 'exp_time': timestamp, 'exp_prefix': exp_prefix}}, 'spec': {'containers': [{'name': 'foo', 'image': docker_image, 'command': ['/bin/bash', '-c', '-li', command], 'resources': resources, 'imagePullPolicy': 'Always'}], 'restartPolicy': 'Never', 'nodeSelector': node_selector, 'dnsPolicy': 'Default'}}
return {'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': pod_name, 'labels': {'owner': config.LABEL, 'expt': pod_name, 'exp_time': timestamp, 'exp_prefix': exp_prefix}}, 'spec': {'containers': [{'name': 'foo', 'image': docker_image, 'env': kube_env, 'command': ['/bin/bash', '-c', '-li', command], 'resources': resources, 'imagePullPolicy': 'Always', 'volumeMounts': [{'name': 'nvidia', 'mountPath': '/usr/local/nvidia', 'readOnly': True}], 'securityContext': {'privileged': True}}], 'volumes': [{'name': 'nvidia', 'hostPath': {'path': '/var/lib/docker/volumes/nvidia_driver_352.63/_data'}}], 'restartPolicy': 'Never', 'nodeSelector': node_selector, 'dnsPolicy': 'Default'}}
|
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if (not hasattr(maybe_stub, '__stub_cache')):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(*args, **kwargs)
except Exception as e:
print(('Error while instantiating %s' % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
ret = dict()
for (k, v) in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
|
def _add_output(file_name, arr, fds, mode='a'):
if (file_name not in arr):
mkdir_p(os.path.dirname(file_name))
arr.append(file_name)
fds[file_name] = open(file_name, mode)
|
def _remove_output(file_name, arr, fds):
if (file_name in arr):
fds[file_name].close()
del fds[file_name]
arr.remove(file_name)
|
def push_prefix(prefix):
_prefixes.append(prefix)
global _prefix_str
_prefix_str = ''.join(_prefixes)
|
def add_text_output(file_name):
_add_output(file_name, _text_outputs, _text_fds, mode='a')
|
def remove_text_output(file_name):
_remove_output(file_name, _text_outputs, _text_fds)
|
def add_tabular_output(file_name):
_add_output(file_name, _tabular_outputs, _tabular_fds, mode='w')
|
def remove_tabular_output(file_name):
if (_tabular_fds[file_name] in _tabular_header_written):
_tabular_header_written.remove(_tabular_fds[file_name])
_remove_output(file_name, _tabular_outputs, _tabular_fds)
|
def set_snapshot_dir(dir_name):
global _snapshot_dir
_snapshot_dir = dir_name
|
def get_snapshot_dir():
return _snapshot_dir
|
def get_snapshot_mode():
return _snapshot_mode
|
def set_snapshot_mode(mode):
global _snapshot_mode
_snapshot_mode = mode
|
def get_snapshot_gap():
return _snapshot_gap
|
def set_snapshot_gap(gap):
global _snapshot_gap
_snapshot_gap = gap
|
def set_log_tabular_only(log_tabular_only):
global _log_tabular_only
_log_tabular_only = log_tabular_only
|
def get_log_tabular_only():
return _log_tabular_only
|
def log(s, with_prefix=True, with_timestamp=True, color=None):
out = s
if with_prefix:
out = (_prefix_str + out)
if with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
out = ('%s | %s' % (timestamp, out))
if (color is not None):
out = colorize(out, color)
if (not _log_tabular_only):
print(out)
for fd in list(_text_fds.values()):
fd.write((out + '\n'))
fd.flush()
sys.stdout.flush()
|
def record_tabular(key, val):
_tabular.append(((_tabular_prefix_str + str(key)), str(val)))
|
def push_tabular_prefix(key):
_tabular_prefixes.append(key)
global _tabular_prefix_str
_tabular_prefix_str = ''.join(_tabular_prefixes)
|
def pop_tabular_prefix():
del _tabular_prefixes[(- 1)]
global _tabular_prefix_str
_tabular_prefix_str = ''.join(_tabular_prefixes)
|
@contextmanager
def prefix(key):
push_prefix(key)
try:
(yield)
finally:
pop_prefix()
|
@contextmanager
def tabular_prefix(key):
push_tabular_prefix(key)
(yield)
pop_tabular_prefix()
|
class TerminalTablePrinter(object):
def __init__(self):
self.headers = None
self.tabulars = []
def print_tabular(self, new_tabular):
if (self.headers is None):
self.headers = [x[0] for x in new_tabular]
else:
assert (len(self.headers) == len(new_tabular))
self.tabulars.append([x[1] for x in new_tabular])
self.refresh()
def refresh(self):
import os
(rows, columns) = os.popen('stty size', 'r').read().split()
tabulars = self.tabulars[(- (int(rows) - 3)):]
sys.stdout.write('\x1b[2J\x1b[H')
sys.stdout.write(tabulate(tabulars, self.headers))
sys.stdout.write('\n')
|
def dump_tabular(*args, **kwargs):
wh = kwargs.pop('write_header', None)
if (len(_tabular) > 0):
if _log_tabular_only:
table_printer.print_tabular(_tabular)
else:
for line in tabulate(_tabular).split('\n'):
log(line, *args, **kwargs)
tabular_dict = dict(_tabular)
for tabular_fd in list(_tabular_fds.values()):
writer = csv.DictWriter(tabular_fd, fieldnames=list(tabular_dict.keys()))
if (wh or ((wh is None) and (tabular_fd not in _tabular_header_written))):
writer.writeheader()
_tabular_header_written.add(tabular_fd)
writer.writerow(tabular_dict)
tabular_fd.flush()
del _tabular[:]
|
def pop_prefix():
del _prefixes[(- 1)]
global _prefix_str
_prefix_str = ''.join(_prefixes)
|
def save_itr_params(itr, params):
if _snapshot_dir:
if (_snapshot_mode == 'all'):
file_name = osp.join(_snapshot_dir, ('itr_%d.pkl' % itr))
joblib.dump(params, file_name, compress=3)
elif (_snapshot_mode == 'last'):
file_name = osp.join(_snapshot_dir, 'params.pkl')
joblib.dump(params, file_name, compress=3)
elif (_snapshot_mode == 'gap'):
if ((itr % _snapshot_gap) == 0):
file_name = osp.join(_snapshot_dir, ('itr_%d.pkl' % itr))
joblib.dump(params, file_name, compress=3)
elif (_snapshot_mode == 'none'):
pass
else:
raise NotImplementedError
|
def log_parameters(log_file, args, classes):
log_params = {}
for (param_name, param_value) in args.__dict__.items():
if any([param_name.startswith(x) for x in list(classes.keys())]):
continue
log_params[param_name] = param_value
for (name, cls) in classes.items():
if isinstance(cls, type):
params = get_all_parameters(cls, args)
params['_name'] = getattr(args, name)
log_params[name] = params
else:
log_params[name] = getattr(cls, '__kwargs', dict())
log_params[name]['_name'] = ((cls.__module__ + '.') + cls.__class__.__name__)
mkdir_p(os.path.dirname(log_file))
with open(log_file, 'w') as f:
json.dump(log_params, f, indent=2, sort_keys=True)
|
def stub_to_json(stub_sth):
from rllab.misc import instrument
if isinstance(stub_sth, instrument.StubObject):
assert (len(stub_sth.args) == 0)
data = dict()
for (k, v) in stub_sth.kwargs.items():
data[k] = stub_to_json(v)
data['_name'] = ((stub_sth.proxy_class.__module__ + '.') + stub_sth.proxy_class.__name__)
return data
elif isinstance(stub_sth, instrument.StubAttr):
return dict(obj=stub_to_json(stub_sth.obj), attr=stub_to_json(stub_sth.attr_name))
elif isinstance(stub_sth, instrument.StubMethodCall):
return dict(obj=stub_to_json(stub_sth.obj), method_name=stub_to_json(stub_sth.method_name), args=stub_to_json(stub_sth.args), kwargs=stub_to_json(stub_sth.kwargs))
elif isinstance(stub_sth, instrument.BinaryOp):
return 'binary_op'
elif isinstance(stub_sth, instrument.StubClass):
return ((stub_sth.proxy_class.__module__ + '.') + stub_sth.proxy_class.__name__)
elif isinstance(stub_sth, dict):
return {stub_to_json(k): stub_to_json(v) for (k, v) in stub_sth.items()}
elif isinstance(stub_sth, (list, tuple)):
return list(map(stub_to_json, stub_sth))
elif (type(stub_sth) == type((lambda : None))):
if (stub_sth.__module__ is not None):
return ((stub_sth.__module__ + '.') + stub_sth.__name__)
return stub_sth.__name__
elif ('theano' in str(type(stub_sth))):
return repr(stub_sth)
return stub_sth
|
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': ((o.__module__ + '.') + o.__name__)}
elif isinstance(o, Enum):
return {'$enum': ((((o.__module__ + '.') + o.__class__.__name__) + '.') + o.name)}
return json.JSONEncoder.default(self, o)
|
def log_parameters_lite(log_file, args):
log_params = {}
for (param_name, param_value) in args.__dict__.items():
log_params[param_name] = param_value
if (args.args_data is not None):
stub_method = pickle.loads(base64.b64decode(args.args_data))
method_args = stub_method.kwargs
log_params['json_args'] = dict()
for (k, v) in list(method_args.items()):
log_params['json_args'][k] = stub_to_json(v)
kwargs = stub_method.obj.kwargs
for k in ['baseline', 'env', 'policy']:
if (k in kwargs):
log_params['json_args'][k] = stub_to_json(kwargs.pop(k))
log_params['json_args']['algo'] = stub_to_json(stub_method.obj)
mkdir_p(os.path.dirname(log_file))
with open(log_file, 'w') as f:
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
|
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
if hasattr(variant_data, 'dump'):
variant_data = variant_data.dump()
variant_json = stub_to_json(variant_data)
with open(log_file, 'w') as f:
json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)
|
def record_tabular_misc_stat(key, values):
record_tabular((key + 'Average'), np.average(values))
record_tabular((key + 'Std'), np.std(values))
record_tabular((key + 'Median'), np.median(values))
record_tabular((key + 'Min'), np.amin(values))
record_tabular((key + 'Max'), np.amax(values))
|
def compute_rect_vertices(fromp, to, radius):
(x1, y1) = fromp
(x2, y2) = to
if (abs((y1 - y2)) < 1e-06):
dx = 0
dy = radius
else:
dx = ((radius * 1.0) / (((((x1 - x2) / (y1 - y2)) ** 2) + 1) ** 0.5))
dy = (((radius ** 2) - (dx ** 2)) ** 0.5)
dy *= ((- 1) if (((x1 - x2) * (y1 - y2)) > 0) else 1)
return ';'.join([','.join(map(str, r)) for r in [[(x1 + dx), (y1 + dy)], [(x2 + dx), (y2 + dy)], [(x2 - dx), (y2 - dy)], [(x1 - dx), (y1 - dy)]]])
|
def plot_experiments(name_or_patterns, legend=False, post_processing=None, key='AverageReturn'):
if (not isinstance(name_or_patterns, (list, tuple))):
name_or_patterns = [name_or_patterns]
data_folder = osp.abspath(osp.join(osp.dirname(__file__), '../../data'))
files = []
for name_or_pattern in name_or_patterns:
matched_files = glob(osp.join(data_folder, name_or_pattern))
files += matched_files
files = sorted(files)
print('plotting the following experiments:')
for f in files:
print(f)
plots = []
legends = []
for f in files:
exp_name = osp.basename(f)
returns = []
with open(osp.join(f, 'progress.csv'), 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row[key]:
returns.append(float(row[key]))
returns = np.array(returns)
if post_processing:
returns = post_processing(returns)
plots.append(plt.plot(returns)[0])
legends.append(exp_name)
if legend:
plt.legend(plots, legends)
|
class Experiment(object):
def __init__(self, progress, params, pkl_data=None):
self.progress = progress
self.params = params
self.pkl_data = pkl_data
self.flat_params = self._flatten_params(params)
self.name = params['exp_name']
def _flatten_params(self, params, depth=2):
flat_params = dict()
for (k, v) in params.items():
if (isinstance(v, dict) and (depth != 0)):
for (subk, subv) in self._flatten_params(v, depth=(depth - 1)).items():
if (subk == '_name'):
flat_params[k] = subv
else:
flat_params[((k + '_') + subk)] = subv
else:
flat_params[k] = v
return flat_params
|
def uniq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if (not ((x in seen) or seen_add(x)))]
|
class ExperimentDatabase(object):
def __init__(self, data_folder, names_or_patterns='*'):
self._load_experiments(data_folder, names_or_patterns)
def _read_data(self, progress_file):
entries = dict()
with open(progress_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (k, v) in row.items():
if (k not in entries):
entries[k] = []
entries[k].append(float(v))
entries = dict([(k, np.array(v)) for (k, v) in entries.items()])
return entries
def _read_params(self, params_file):
with open(params_file, 'r') as f:
return json.loads(f.read())
def _load_experiments(self, data_folder, name_or_patterns):
if (not isinstance(name_or_patterns, (list, tuple))):
name_or_patterns = [name_or_patterns]
files = []
for name_or_pattern in name_or_patterns:
matched_files = glob(osp.join(data_folder, name_or_pattern))
files += matched_files
experiments = []
progress_f = None
params_f = None
pkl_data = None
for f in files:
if os.path.isdir(f):
try:
progress = self._read_data(osp.join(f, 'progress.csv'))
params = self._read_params(osp.join(f, 'params.json'))
params['exp_name'] = osp.basename(f)
if os.path.isfile(osp.join(f, 'params.pkl')):
pkl_data = joblib.load(osp.join(f, 'params.pkl'))
experiments.append(Experiment(progress, params, pkl_data))
else:
experiments.append(Experiment(progress, params))
except Exception as e:
print(e)
elif ('progress.csv' in f):
progress_f = self._read_data(f)
elif ('params.json' in f):
params_f = self._read_params(f)
elif ('params.pkl' in f):
print('about to load', f)
pkl_data = joblib.load(f)
if (params_f and progress_f):
if pkl_data:
experiments.append(Experiment(progress_f, params_f, pkl_data))
else:
experiments.append(Experiment(progress_f, params_f))
self._experiments = experiments
def plot_experiments(self, key=None, legend=None, color_key=None, filter_exp=None, **kwargs):
experiments = list(self.filter_experiments(**kwargs))
if filter_exp:
experiments = list(filter(filter_exp, experiments))
plots = []
legends = []
color_pool = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
color_map = dict()
if (color_key is not None):
exp_color_keys = uniq([exp.flat_params.get(color_key, None) for exp in experiments])
if (len(exp_color_keys) > len(color_pool)):
raise NotImplementedError
for (exp_color_key, color) in zip(exp_color_keys, color_pool):
print(('%s: %s' % (str(exp_color_key), color)))
color_map = dict(list(zip(exp_color_keys, color_pool)))
used_legends = []
legend_list = []
for exp in experiments:
exp_color_key = None
if (color_key is not None):
exp_color_key = exp.flat_params.get(color_key, None)
exp_color = color_map.get(exp_color_key, None)
else:
exp_color = None
plots.append(plt.plot(exp.progress.get(key, [0]), color=exp_color)[0])
if (legend is not None):
legends.append(exp.flat_params[legend])
elif ((exp_color_key is not None) and (exp_color_key not in used_legends)):
used_legends.append(exp_color_key)
legend_list.append(plots[(- 1)])
if (len(legends) > 0):
plt.legend(plots, legends)
elif (len(legend_list) > 0):
plt.legend(legend_list, used_legends)
def filter_experiments(self, **kwargs):
for exp in self._experiments:
exp_params = exp.flat_params
match = True
for (key, val) in kwargs.items():
if (exp_params.get(key, None) != val):
match = False
break
if match:
(yield exp)
def unique(self, param_key):
return uniq([exp.flat_params[param_key] for exp in self._experiments if (param_key in exp.flat_params)])
|
def overrides(method):
"Decorator to indicate that the decorated method overrides a method in superclass.\n The decorator code is executed while loading class. Using this method should have minimal runtime performance\n implications.\n\n This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation\n fwc:s algorithm : http://stackoverflow.com/a/14631397/308189\n my answer : http://stackoverflow.com/a/8313042/308189\n\n How to use:\n from overrides import overrides\n\n class SuperClass(object):\n\n def method(self):\n return 2\n\n class SubClass(SuperClass):\n\n @overrides\n def method(self):\n return 1\n\n :raises AssertionError if no match in super classes for the method name\n :return method with possibly added (if the method doesn't have one) docstring from super class\n "
return method
|
def _get_base_classes(frame, namespace):
return [_get_base_class(class_name_components, namespace) for class_name_components in _get_base_class_names(frame)]
|
def _get_base_class_names(frame):
'Get baseclass names from the code object'
(co, lasti) = (frame.f_code, frame.f_lasti)
code = co.co_code
i = 0
extended_arg = 0
extends = []
while (i <= lasti):
c = code[i]
op = ord(c)
i += 1
if (op >= dis.HAVE_ARGUMENT):
oparg = ((ord(code[i]) + (ord(code[(i + 1)]) * 256)) + extended_arg)
extended_arg = 0
i += 2
if (op == dis.EXTENDED_ARG):
extended_arg = (oparg * int(65536))
if (op in dis.hasconst):
if (type(co.co_consts[oparg]) == str):
extends = []
elif (op in dis.hasname):
if (dis.opname[op] == 'LOAD_NAME'):
extends.append(('name', co.co_names[oparg]))
if (dis.opname[op] == 'LOAD_ATTR'):
extends.append(('attr', co.co_names[oparg]))
items = []
previous_item = []
for (t, s) in extends:
if (t == 'name'):
if previous_item:
items.append(previous_item)
previous_item = [s]
else:
previous_item += [s]
if previous_item:
items.append(previous_item)
return items
|
def _get_base_class(components, namespace):
obj = namespace[components[0]]
for component in components[1:]:
obj = getattr(obj, component)
return obj
|
def classesinmodule(module):
md = module.__dict__
return [md[c] for c in md if (isinstance(md[c], type) and (md[c].__module__ == module.__name__))]
|
def locate_with_hint(class_path, prefix_hints=[]):
module_or_class = locate(class_path)
if (module_or_class is None):
hint = '.'.join(prefix_hints)
module_or_class = locate(((hint + '.') + class_path))
return module_or_class
|
def load_class(class_path, superclass=None, prefix_hints=[]):
module_or_class = locate_with_hint(class_path, prefix_hints)
if (module_or_class is None):
raise ValueError(('Cannot find module or class under path %s' % class_path))
if (type(module_or_class) == types.ModuleType):
if superclass:
classes = [x for x in classesinmodule(module_or_class) if issubclass(x, superclass)]
if (len(classes) == 0):
if superclass:
raise ValueError(('Could not find any subclasses of %s defined in module %s' % (str(superclass), class_path)))
else:
raise ValueError(('Could not find any classes defined in module %s' % class_path))
elif (len(classes) > 1):
if superclass:
raise ValueError(('Multiple subclasses of %s are defined in the module %s' % (str(superclass), class_path)))
else:
raise ValueError(('Multiple classes are defined in the module %s' % class_path))
else:
return classes[0]
elif isinstance(module_or_class, type):
if ((superclass is None) or issubclass(module_or_class, superclass)):
return module_or_class
else:
raise ValueError(('The class %s is not a subclass of %s' % (str(module_or_class), str(superclass))))
else:
raise ValueError(('Unsupported object: %s' % str(module_or_class)))
|
def weighted_sample(weights, objects):
'\n Return a random item from objects, with the weighting defined by weights\n (which must sum to 1).\n '
cs = np.cumsum(weights)
idx = sum((cs < np.random.rand()))
return objects[min(idx, (len(objects) - 1))]
|
def weighted_sample_n(prob_matrix, items):
s = prob_matrix.cumsum(axis=1)
r = np.random.rand(prob_matrix.shape[0])
k = (s < r.reshape(((- 1), 1))).sum(axis=1)
n_items = len(items)
return items[np.minimum(k, (n_items - 1))]
|
def softmax(x):
shifted = (x - np.max(x, axis=(- 1), keepdims=True))
expx = np.exp(shifted)
return (expx / np.sum(expx, axis=(- 1), keepdims=True))
|
def softmax_sym(x):
return theano.tensor.nnet.softmax(x)
|
def cat_entropy(x):
return (- np.sum((x * np.log(x)), axis=(- 1)))
|
def cat_perplexity(x):
return np.exp(cat_entropy(x))
|
def explained_variance_1d(ypred, y):
assert ((y.ndim == 1) and (ypred.ndim == 1))
vary = np.var(y)
if np.isclose(vary, 0):
if (np.var(ypred) > 0):
return 0
else:
return 1
return (1 - (np.var((y - ypred)) / (vary + 1e-08)))
|
def to_onehot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
|
def to_onehot_n(inds, dim):
ret = np.zeros((len(inds), dim))
ret[(np.arange(len(inds)), inds)] = 1
return ret
|
def to_onehot_sym(ind, dim):
assert (ind.ndim == 1)
return theano.tensor.extra_ops.to_one_hot(ind, dim)
|
def from_onehot(v):
return np.nonzero(v)[0][0]
|
def from_onehot_n(v):
if (len(v) == 0):
return []
return np.nonzero(v)[1]
|
def normalize_updates(old_mean, old_std, new_mean, new_std, old_W, old_b):
'\n Compute the updates for normalizing the last (linear) layer of a neural\n network\n '
new_W = ((old_W * old_std[0]) / (new_std[0] + 1e-06))
new_b = ((((old_b * old_std[0]) + old_mean[0]) - new_mean[0]) / (new_std[0] + 1e-06))
return OrderedDict([(old_W, TT.cast(new_W, old_W.dtype)), (old_b, TT.cast(new_b, old_b.dtype)), (old_mean, new_mean), (old_std, new_std)])
|
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float((- discount))], x[::(- 1)], axis=0)[::(- 1)]
|
def discount_return(x, discount):
return np.sum((x * (discount ** np.arange(len(x)))))
|
def rk4(derivs, y0, t, *args, **kwargs):
'\n Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.\n This is a toy implementation which may be useful if you find\n yourself stranded on a system w/o scipy. Otherwise use\n :func:`scipy.integrate`.\n\n *y0*\n initial state vector\n\n *t*\n sample times\n\n *derivs*\n returns the derivative of the system and has the\n signature ``dy = derivs(yi, ti)``\n\n *args*\n additional arguments passed to the derivative function\n\n *kwargs*\n additional keyword arguments passed to the derivative function\n\n Example 1 ::\n\n ## 2D system\n\n def derivs6(x,t):\n d1 = x[0] + 2*x[1]\n d2 = -3*x[0] + 4*x[1]\n return (d1, d2)\n dt = 0.0005\n t = arange(0.0, 2.0, dt)\n y0 = (1,2)\n yout = rk4(derivs6, y0, t)\n\n Example 2::\n\n ## 1D system\n alpha = 2\n def derivs(x,t):\n return -alpha*x + exp(-t)\n\n y0 = 1\n yout = rk4(derivs, y0, t)\n\n\n If you have access to scipy, you should probably be using the\n scipy.integrate tools rather than this function.\n '
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange((len(t) - 1)):
thist = t[i]
dt = (t[(i + 1)] - thist)
dt2 = (dt / 2.0)
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist, *args, **kwargs))
k2 = np.asarray(derivs((y0 + (dt2 * k1)), (thist + dt2), *args, **kwargs))
k3 = np.asarray(derivs((y0 + (dt2 * k2)), (thist + dt2), *args, **kwargs))
k4 = np.asarray(derivs((y0 + (dt * k3)), (thist + dt), *args, **kwargs))
yout[(i + 1)] = (y0 + ((dt / 6.0) * (((k1 + (2 * k2)) + (2 * k3)) + k4)))
return yout
|
def _pipe_segment_with_colons(align, colwidth):
"Return a segment of a horizontal line with optional colons which\n indicate column's alignment (as in `pipe` output format)."
w = colwidth
if (align in ['right', 'decimal']):
return (('-' * (w - 1)) + ':')
elif (align == 'center'):
return ((':' + ('-' * (w - 2))) + ':')
elif (align == 'left'):
return (':' + ('-' * (w - 1)))
else:
return ('-' * w)
|
def _pipe_line_with_colons(colwidths, colaligns):
"Return a horizontal line with optional colons to indicate column's\n alignment (as in `pipe` output format)."
segments = [_pipe_segment_with_colons(a, w) for (a, w) in zip(colaligns, colwidths)]
return (('|' + '|'.join(segments)) + '|')
|
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {'left': '', 'right': 'align="right"| ', 'center': 'align="center"| ', 'decimal': 'align="right"| '}
values_with_attrs = [(((' ' + alignment.get(a, '')) + c) + ' ') for (c, a) in zip(cell_values, colaligns)]
colsep = (separator * 2)
return (separator + colsep.join(values_with_attrs)).rstrip()
|
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = {'left': 'l', 'right': 'r', 'center': 'c', 'decimal': 'r'}
tabular_columns_fmt = ''.join([alignment.get(a, 'l') for a in colaligns])
return (('\\begin{tabular}{' + tabular_columns_fmt) + '}\n\\hline')
|
def simple_separated_format(separator):
'Construct a simple TableFormat with columns separated by a separator.\n\n >>> tsv = simple_separated_format("\\t") ; tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == \'foo \\t 1\\nspam\\t23\'\n True\n\n '
return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None)
|
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
|
def _isnumber(string):
'\n >>> _isnumber("123.45")\n True\n >>> _isnumber("123")\n True\n >>> _isnumber("spam")\n False\n '
return _isconvertible(float, string)
|
def _isint(string):
'\n >>> _isint("123")\n True\n >>> _isint("123.45")\n False\n '
return ((type(string) is int) or ((isinstance(string, _binary_type) or isinstance(string, _text_type)) and _isconvertible(int, string)))
|
def _type(string, has_invisible=True):
'The least generic type (type(None), int, float, str, unicode).\n\n >>> _type(None) is type(None)\n True\n >>> _type("foo") is type("")\n True\n >>> _type("1") is type(1)\n True\n >>> _type(\'\x1b[31m42\x1b[0m\') is type(42)\n True\n >>> _type(\'\x1b[31m42\x1b[0m\') is type(42)\n True\n\n '
if (has_invisible and (isinstance(string, _text_type) or isinstance(string, _binary_type))):
string = _strip_invisible(string)
if (string is None):
return _none_type
elif hasattr(string, 'isoformat'):
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
|
def _afterpoint(string):
'Symbols after a decimal point, -1 if the string lacks the decimal point.\n\n >>> _afterpoint("123.45")\n 2\n >>> _afterpoint("1001")\n -1\n >>> _afterpoint("eggs")\n -1\n >>> _afterpoint("123e45")\n 2\n\n '
if _isnumber(string):
if _isint(string):
return (- 1)
else:
pos = string.rfind('.')
pos = (string.lower().rfind('e') if (pos < 0) else pos)
if (pos >= 0):
return ((len(string) - pos) - 1)
else:
return (- 1)
else:
return (- 1)
|
def _padleft(width, s, has_invisible=True):
"Flush right.\n\n >>> _padleft(6, 'яйца') == ' яйца'\n True\n\n "
iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width)
fmt = ('{0:>%ds}' % iwidth)
return fmt.format(s)
|
def _padright(width, s, has_invisible=True):
"Flush left.\n\n >>> _padright(6, 'яйца') == 'яйца '\n True\n\n "
iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width)
fmt = ('{0:<%ds}' % iwidth)
return fmt.format(s)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.