code stringlengths 17 6.64M |
|---|
def plot_histogram(Effs, Iter, fig_path):
ax = plt.figure()
bins = [(i * 5) for i in range(21)]
plt.hist((Effs * 100), bins, facecolor='blue', alpha=0.5)
plt.xlim(0, 100)
plt.ylim(0, 50)
plt.yticks([])
plt.xticks(fontsize=12)
plt.xlabel('Deflection efficiency (%)', fontsize=12)
plt.title('Iteration {}'.format(Iter), fontsize=16)
plt.savefig(fig_path, dpi=300)
plt.close()
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--G', type=str, default='UnetINDiv4_CCAM', help='choice of network for Generator')
self.parser.add_argument('--dataroot', type=str, default='./../../Hdd_DATA/BRATS2015_mat_std_sbjnorm', help='data root')
self.parser.add_argument('--savepath', type=str, default='./results', help='savepath')
self.parser.add_argument('--nEpoch', type=int, default=1000, help='number of Epoch iteration')
self.parser.add_argument('--lr', type=float, default=1e-05, help='learning rate')
self.parser.add_argument('--lr_D', type=float, default=1e-05, help='learning rate for D')
self.parser.add_argument('--lr_C', type=float, default=1e-05, help='learning rate for C')
self.parser.add_argument('--disp_div_N', type=int, default=100, help=' display N per epoch')
self.parser.add_argument('--nB', type=int, default=1, help='input batch size')
self.parser.add_argument('--DB_small', action='store_true', help='use small DB')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2.')
self.parser.add_argument('--name', type=str, default='demo_exp_CollaGAN_BRATS', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--w_decay', type=float, default=0.01, help='weight decay for generator')
self.parser.add_argument('--w_decay_D', type=float, default=0.0, help='weight decay for discriminator')
self.parser.add_argument('--lambda_l1_cyc', type=float, default=10, help='lambda_L1_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_l2_cyc', type=float, default=0.0, help='lambda_L2_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_ssim_cyc', type=float, default=1.0, help='lambda_ssim')
self.parser.add_argument('--lambda_l2', type=float, default=0.0, help='lambda_L2')
self.parser.add_argument('--lambda_l1', type=float, default=0.0, help='lambda_L1')
self.parser.add_argument('--lambda_ssim', type=float, default=0.0, help='lambda_ssim')
self.parser.add_argument('--lambda_GAN', type=float, default=1.0, help='lambda GAN')
self.parser.add_argument('--lambda_G_clsf', type=float, default=1.0, help='generator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_D_clsf', type=float, default=1.0, help='discriminator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_cyc', type=float, default=1, help='lambda_cyc')
self.parser.add_argument('--nEpochDclsf', type=int, default=0, help='# of nEpoch for Discriminator pretrain')
self.parser.add_argument('--nCh_D', type=int, default=4, help='# of ngf for Discriminator')
self.parser.add_argument('--nCh_C', type=int, default=16, help='# of ngf for Classifier')
self.parser.add_argument('--use_lsgan', action='store_true', help='use lsgan, if not defualt GAN')
self.parser.add_argument('--use_1x1Conv', action='store_true', help='use 1x1Conv, if not defualt 3x3conv')
self.parser.add_argument('--wo_norm_std', action='store_true', help='NOT use std normalization')
self.parser.add_argument('--N_null', type=int, default=1, help='# of nulling in input images')
self.parser.add_argument('--ngf', type=int, default=64, help=' ngf')
self.parser.add_argument('--dropout', type=float, default=0.5, help='droptout ')
self.parser.add_argument('--test_mode', action='store_true', help='not train. just test')
self.parser.add_argument('--AUG', action='store_true', help='use augmentation')
self.parser.add_argument('--nEpochD', type=int, default=2, help='nEpochD update while 1 G update')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.savepath, self.opt.name)
if (not os.path.exists(expr_dir)):
os.makedirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
@staticmethod
def load_opts(opt, exp_name):
exp_dir = os.path.join(opt.savepath, exp_name)
with open(os.path.join(exp_dir, 'opt.txt'), 'r') as opt_file:
for aLine in opt_file.readlines():
idx = aLine.find(':')
if (idx == (- 1)):
continue
else:
cur_opt = aLine[:idx]
cur_val = aLine[(idx + 2):(- 1)]
if (cur_opt == 'model'):
opt.model = cur_val
elif (cur_opt == 'dataroot'):
opt.dataroot = cur_val
elif (cur_opt == 'savepath'):
opt.savepath = cur_val
elif (cur_opt == 'nEpoch'):
opt.savepath = cur_val
elif (cur_opt == 'lr'):
opt.lr = float(cur_val)
elif (cur_opt == 'disp_div_N'):
opt.disp_div_N = int(cur_val)
elif (cur_opt == 'batchSize'):
opt.batchSize = int(cur_val)
elif (cur_opt == 'input_nc'):
opt.input_nc = int(cur_val)
elif (cur_opt == 'gpu_ids'):
cur_val = cur_val[1:(- 1)]
opt.gpu_ids = [int(cur_val)]
print('Use GPU id......')
elif (cur_opt == 'name'):
opt.name = cur_val
elif (cur_opt == 'use_residual'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'no_flip'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'lambda_cost'):
opt.lambda_cost = float(cur_val)
elif (cur_opt == 'weight_decay'):
opt.weight_decay = float(cur_val)
elif (cur_opt == 'use_dropout'):
opt.use_dropout = (cur_val == 'True')
elif (cur_opt == 'optimizer'):
opt.optimizer = cur_val
elif (cur_opt == 'ri'):
opt.ri = (cur_val == 'True')
elif (cur_opt == 'normalize'):
opt.normalize = (cur_val == 'True')
else:
st()
return opt
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--G', type=str, default='NVDLMED', help='choice of network')
self.parser.add_argument('--dataroot', type=str, default='/Hdd_2/BRATS_colla/BRATS2015_mat_std_sbjnorm_D', help='data root')
self.parser.add_argument('--savepath', type=str, default='./seg_results', help='savepath')
self.parser.add_argument('--nEpoch', type=int, default=1000, help='number of Epoch iteration')
self.parser.add_argument('--lr', type=float, default=1e-06, help='learning rate')
self.parser.add_argument('--disp_div_N', type=int, default=10, help=' display N per epoch')
self.parser.add_argument('--nB', type=int, default=1, help='input batch size')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2.')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--w_decay', type=float, default=1e-05, help='weight decay')
self.parser.add_argument('--lambda_l2', type=float, default=0.1, help='lambda_L2')
self.parser.add_argument('--lambda_KL', type=float, default=0.1, help='lambda_L2')
self.parser.add_argument('--ngf', type=int, default=64, help=' ngf')
self.parser.add_argument('--dropout', type=float, default=0.2, help='droptout ')
self.parser.add_argument('--test_mode', action='store_true', help='not train. just test')
self.parser.add_argument('--AUG', action='store_true', help='use augmentation')
self.parser.add_argument('--lambda_WT', type=float, default=1.0, help='lambda_WT')
self.parser.add_argument('--lambda_TC', type=float, default=1.0, help='lambda_TC')
self.parser.add_argument('--lambda_EC', type=float, default=1.0, help='lambda_EC')
self.parser.add_argument('--lambda_precision', type=float, default=0.0, help='lambda_precision')
self.parser.add_argument('--lambda_recall', type=float, default=0.0, help='lambda_recall')
self.parser.add_argument('--tumor', type=int, default=0, help='0:WT, 1:TC, 2:EC')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.savepath, self.opt.name)
if (not os.path.exists(expr_dir)):
os.makedirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
@staticmethod
def load_opts(opt, exp_name):
exp_dir = os.path.join(opt.savepath, exp_name)
with open(os.path.join(exp_dir, 'opt.txt'), 'r') as opt_file:
for aLine in opt_file.readlines():
idx = aLine.find(':')
if (idx == (- 1)):
continue
else:
cur_opt = aLine[:idx]
cur_val = aLine[(idx + 2):(- 1)]
if (cur_opt == 'model'):
opt.model = cur_val
elif (cur_opt == 'dataroot'):
opt.dataroot = cur_val
elif (cur_opt == 'savepath'):
opt.savepath = cur_val
elif (cur_opt == 'nEpoch'):
opt.savepath = cur_val
elif (cur_opt == 'lr'):
opt.lr = float(cur_val)
elif (cur_opt == 'disp_div_N'):
opt.disp_div_N = int(cur_val)
elif (cur_opt == 'batchSize'):
opt.batchSize = int(cur_val)
elif (cur_opt == 'input_nc'):
opt.input_nc = int(cur_val)
elif (cur_opt == 'gpu_ids'):
cur_val = cur_val[1:(- 1)]
opt.gpu_ids = [int(cur_val)]
print('Use GPU id......')
elif (cur_opt == 'name'):
opt.name = cur_val
elif (cur_opt == 'use_residual'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'no_flip'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'lambda_cost'):
opt.lambda_cost = float(cur_val)
elif (cur_opt == 'weight_decay'):
opt.weight_decay = float(cur_val)
elif (cur_opt == 'use_dropout'):
opt.use_dropout = (cur_val == 'True')
elif (cur_opt == 'optimizer'):
opt.optimizer = cur_val
elif (cur_opt == 'ri'):
opt.ri = (cur_val == 'True')
elif (cur_opt == 'normalize'):
opt.normalize = (cur_val == 'True')
else:
st()
return opt
|
class HyperOptSearch(SuggestionAlgorithm):
'A wrapper around HyperOpt to provide trial suggestions.\n\n Requires HyperOpt to be installed from source.\n Uses the Tree-structured Parzen Estimators algorithm, although can be\n trivially extended to support any algorithm HyperOpt uses. Externally\n added trials will not be tracked by HyperOpt. Trials of the current run\n can be saved using save method, trials of a previous run can be loaded\n using restore method, thus enabling a warm start feature.\n\n Parameters:\n space (dict): HyperOpt configuration. Parameters will be sampled\n from this configuration and will be used to override\n parameters generated in the variant generation process.\n max_concurrent (int): Number of maximum concurrent trials. Defaults\n to 10.\n metric (str): The training result objective value attribute.\n mode (str): One of {min, max}. Determines whether objective is\n minimizing or maximizing the metric attribute.\n points_to_evaluate (list): Initial parameter suggestions to be run\n first. This is for when you already have some good parameters\n you want hyperopt to run first to help the TPE algorithm\n make better suggestions for future parameters. Needs to be\n a list of dict of hyperopt-named variables.\n Choice variables should be indicated by their index in the\n list (see example)\n n_initial_points (int): number of random evaluations of the\n objective function before starting to aproximate it with\n tree parzen estimators. Defaults to 20.\n random_state_seed (int, array_like, None): seed for reproducible\n results. Defaults to None.\n gamma (float in range (0,1)): parameter governing the tree parzen\n estimators suggestion algorithm. Defaults to 0.25.\n use_early_stopped_trials (bool): Whether to use early terminated\n trial results in the optimization process.\n\n Example:\n >>> space = {\n >>> \'width\': hp.uniform(\'width\', 0, 20),\n >>> \'height\': hp.uniform(\'height\', -100, 100),\n >>> \'activation\': hp.choice("activation", ["relu", "tanh"])\n >>> }\n >>> current_best_params = [{\n >>> \'width\': 10,\n >>> \'height\': 0,\n >>> \'activation\': 0, # The index of "relu"\n >>> }]\n >>> algo = HyperOptSearch(\n >>> space, max_concurrent=4, metric="mean_loss", mode="min",\n >>> points_to_evaluate=current_best_params)\n '
def __init__(self, space, max_concurrent=10, reward_attr=None, metric='episode_reward_mean', mode='max', points_to_evaluate=None, n_initial_points=20, random_state_seed=None, gamma=0.25, **kwargs):
assert (hpo is not None), 'HyperOpt must be installed!'
from hyperopt.fmin import generate_trials_to_calculate
assert ((type(max_concurrent) is int) and (max_concurrent > 0))
assert (mode in ['min', 'max']), "`mode` must be 'min' or 'max'!"
if (reward_attr is not None):
mode = 'max'
metric = reward_attr
logger.warning('`reward_attr` is deprecated and will be removed in a future version of Tune. Setting `metric={}` and `mode=max`.'.format(reward_attr))
self._max_concurrent = max_concurrent
self._metric = metric
if (mode == 'max'):
self._metric_op = (- 1.0)
elif (mode == 'min'):
self._metric_op = 1.0
if (n_initial_points is None):
self.algo = hpo.tpe.suggest
else:
self.algo = partial(hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if (gamma is not None):
self.algo = partial(self.algo, gamma=gamma)
self.domain = hpo.Domain((lambda spc: spc), space)
if (points_to_evaluate is None):
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert (type(points_to_evaluate) == list)
self._hpopt_trials = generate_trials_to_calculate(points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if (random_state_seed is None):
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
super(HyperOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if (self._num_live_trials() >= self._max_concurrent):
return None
if (self._points_to_evaluate > 0):
new_trial = self._hpopt_trials.trials[(self._points_to_evaluate - 1)]
self._points_to_evaluate -= 1
else:
new_ids = self._hpopt_trials.new_trial_ids(1)
self._hpopt_trials.refresh()
new_trials = self.algo(new_ids, self.domain, self._hpopt_trials, self.rstate.randint(((2 ** 31) - 1)))
self._hpopt_trials.insert_trial_docs(new_trials)
self._hpopt_trials.refresh()
new_trial = new_trials[0]
self._live_trial_mapping[trial_id] = (new_trial['tid'], new_trial)
config = hpo.base.spec_from_misc(new_trial['misc'])
ctrl = hpo.base.Ctrl(self._hpopt_trials, current_trial=new_trial)
memo = self.domain.memo_from_config(config)
hpo.utils.use_obj_for_literal_in_memo(self.domain.expr, ctrl, hpo.base.Ctrl, memo)
suggested_config = hpo.pyll.rec_eval(self.domain.expr, memo=memo, print_node_on_error=self.domain.rec_eval_print_node_on_error)
return copy.deepcopy(suggested_config)
def on_trial_result(self, trial_id, result):
ho_trial = self._get_hyperopt_trial(trial_id)
if (ho_trial is None):
return
now = hpo.utils.coarse_utcnow()
ho_trial['book_time'] = now
ho_trial['refresh_time'] = now
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False):
'Notification for the completion of trial.\n\n The result is internally negated when interacting with HyperOpt\n so that HyperOpt can "maximize" this value, as it minimizes on default.\n '
ho_trial = self._get_hyperopt_trial(trial_id)
if (ho_trial is None):
return
ho_trial['refresh_time'] = hpo.utils.coarse_utcnow()
if error:
ho_trial['state'] = hpo.base.JOB_STATE_ERROR
ho_trial['misc']['error'] = (str(TuneError), 'Tune Error')
self._hpopt_trials.refresh()
else:
self._process_result(trial_id, result, early_terminated)
del self._live_trial_mapping[trial_id]
def _process_result(self, trial_id, result, early_terminated=False):
ho_trial = self._get_hyperopt_trial(trial_id)
ho_trial['refresh_time'] = hpo.utils.coarse_utcnow()
if (early_terminated and (self._use_early_stopped is False)):
ho_trial['state'] = hpo.base.JOB_STATE_ERROR
ho_trial['misc']['error'] = (str(TuneError), 'Tune Removed')
return
ho_trial['state'] = hpo.base.JOB_STATE_DONE
hp_result = self._to_hyperopt_result(result)
ho_trial['result'] = hp_result
self._hpopt_trials.refresh()
def _to_hyperopt_result(self, result):
return {'loss': (self._metric_op * result[self._metric]), 'status': 'ok'}
def _get_hyperopt_trial(self, trial_id):
if (trial_id not in self._live_trial_mapping):
return
hyperopt_tid = self._live_trial_mapping[trial_id][0]
return [t for t in self._hpopt_trials.trials if (t['tid'] == hyperopt_tid)][0]
def _num_live_trials(self):
return len(self._live_trial_mapping)
def _generate_trials(self, experiment_spec, output_path=''):
'Generates trials with configurations from `_suggest`.\n\n Creates a trial_id that is passed into `_suggest`.\n\n Yields:\n Trial objects constructed according to `spec`\n '
if ('run' not in experiment_spec):
raise TuneError('Must specify `run` in {}'.format(experiment_spec))
for _ in range((experiment_spec.get('num_samples', 1) - self._counter)):
trial_id = Trial.generate_id()
while True:
suggested_config = self._suggest(trial_id)
if (suggested_config is None):
(yield None)
else:
break
spec = copy.deepcopy(experiment_spec)
spec['config'] = merge_dicts(spec['config'], copy.deepcopy(suggested_config))
flattened_config = resolve_nested_dict(spec['config'])
self._counter += 1
tag = '{0}_{1}'.format(str(self._counter), format_vars(flattened_config))
(yield create_trial_from_spec(spec, output_path, self._parser, evaluated_params=flatten_dict(suggested_config), experiment_tag=tag, trial_id=trial_id))
def save(self, checkpoint_dir):
trials_object = (self._hpopt_trials, self.rstate.get_state(), self._counter)
with open(checkpoint_dir, 'wb') as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, 'rb') as inputFile:
trials_object = pickle.load(inputFile)
self._hpopt_trials = trials_object[0]
self.rstate.set_state(trials_object[1])
self._counter = trials_object[2]
|
def _naturalize(string):
'Provides a natural representation for string for nice sorting.'
splits = re.split('([0-9]+)', string)
return [(int(text) if text.isdigit() else text.lower()) for text in splits]
|
def _find_newest_ckpt(ckpt_dir):
'Returns path to most recently modified checkpoint.'
full_paths = [os.path.join(ckpt_dir, fname) for fname in os.listdir(ckpt_dir) if (fname.startswith('experiment_state') and fname.endswith('.json'))]
return max(full_paths)
|
class _TuneFunctionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, types.FunctionType):
return self._to_cloudpickle(obj)
try:
return super(_TuneFunctionEncoder, self).default(obj)
except Exception:
logger.debug('Unable to encode. Falling back to cloudpickle.')
return self._to_cloudpickle(obj)
def _to_cloudpickle(self, obj):
return {'_type': 'CLOUDPICKLE_FALLBACK', 'value': binary_to_hex(cloudpickle.dumps(obj))}
|
class _TuneFunctionDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
def object_hook(self, obj):
if (obj.get('_type') == 'CLOUDPICKLE_FALLBACK'):
return self._from_cloudpickle(obj)
return obj
def _from_cloudpickle(self, obj):
return cloudpickle.loads(hex_to_binary(obj['value']))
|
class TrialRunner(object):
'A TrialRunner implements the event loop for scheduling trials on Ray.\n\n Example:\n runner = TrialRunner()\n runner.add_trial(Trial(...))\n runner.add_trial(Trial(...))\n while not runner.is_finished():\n runner.step()\n print(runner.debug_string())\n\n The main job of TrialRunner is scheduling trials to efficiently use cluster\n resources, without overloading the cluster.\n\n While Ray itself provides resource management for tasks and actors, this is\n not sufficient when scheduling trials that may instantiate multiple actors.\n This is because if insufficient resources are available, concurrent trials\n could deadlock waiting for new resources to become available. Furthermore,\n oversubscribing the cluster could degrade training performance, leading to\n misleading benchmark results.\n '
CKPT_FILE_TMPL = 'experiment_state-{}.json'
VALID_RESUME_TYPES = [True, 'LOCAL', 'REMOTE', 'PROMPT']
def __init__(self, search_alg=None, scheduler=None, launch_web_server=False, local_checkpoint_dir=None, remote_checkpoint_dir=None, sync_to_cloud=None, resume=False, server_port=TuneServer.DEFAULT_PORT, verbose=True, checkpoint_period=10, trial_executor=None):
'Initializes a new TrialRunner.\n\n Args:\n search_alg (SearchAlgorithm): SearchAlgorithm for generating\n Trial objects.\n scheduler (TrialScheduler): Defaults to FIFOScheduler.\n launch_web_server (bool): Flag for starting TuneServer\n local_checkpoint_dir (str): Path where\n global checkpoints are stored and restored from.\n remote_checkpoint_dir (str): Remote path where\n global checkpoints are stored and restored from. Used\n if `resume` == REMOTE.\n resume (str|False): see `tune.py:run`.\n sync_to_cloud (func|str): See `tune.py:run`.\n server_port (int): Port number for launching TuneServer.\n verbose (bool): Flag for verbosity. If False, trial results\n will not be output.\n trial_executor (TrialExecutor): Defaults to RayTrialExecutor.\n '
self._search_alg = (search_alg or BasicVariantGenerator())
self._scheduler_alg = (scheduler or FIFOScheduler())
self.trial_executor = (trial_executor or RayTrialExecutor())
self._global_time_limit = float(os.environ.get('TRIALRUNNER_WALLTIME_LIMIT', float('inf')))
self._total_time = 0
self._iteration = 0
self._verbose = verbose
self._server = None
self._server_port = server_port
if launch_web_server:
self._server = TuneServer(self, self._server_port)
self._trials = []
self._stop_queue = []
self._local_checkpoint_dir = local_checkpoint_dir
if (self._local_checkpoint_dir and (not os.path.exists(self._local_checkpoint_dir))):
os.makedirs(self._local_checkpoint_dir)
self._remote_checkpoint_dir = remote_checkpoint_dir
self._syncer = get_cloud_syncer(local_checkpoint_dir, remote_checkpoint_dir, sync_to_cloud)
self._resumed = False
if self._validate_resume(resume_type=resume):
try:
self.resume()
logger.info('Resuming trial.')
self._resumed = True
except Exception:
logger.exception('Runner restore failed. Restarting experiment.')
else:
logger.debug('Starting a new experiment.')
self._search_algo_checkpoint_file = os.path.join(self._local_checkpoint_dir, 'search_algo_checkpoint.pkl')
self._start_time = time.time()
self._last_checkpoint_time = (- float('inf'))
self._checkpoint_period = checkpoint_period
self._session_str = datetime.fromtimestamp(self._start_time).strftime('%Y-%m-%d_%H-%M-%S')
self.checkpoint_file = None
if self._local_checkpoint_dir:
self.checkpoint_file = os.path.join(self._local_checkpoint_dir, TrialRunner.CKPT_FILE_TMPL.format(self._session_str))
@property
def scheduler_alg(self):
return self._scheduler_alg
def _validate_resume(self, resume_type):
'Checks whether to resume experiment.\n\n Args:\n resume_type: One of True, "REMOTE", "LOCAL", "PROMPT".\n '
if (not resume_type):
return False
assert (resume_type in self.VALID_RESUME_TYPES), 'resume_type {} is not one of {}'.format(resume_type, self.VALID_RESUME_TYPES)
assert (self._local_checkpoint_dir or self._remote_checkpoint_dir)
if (resume_type in [True, 'LOCAL', 'PROMPT']):
if (not self.checkpoint_exists(self._local_checkpoint_dir)):
raise ValueError('Called resume when no checkpoint exists in local directory.')
elif (resume_type == 'PROMPT'):
if click.confirm('Resume from local directory?'):
return True
if (resume_type in ['REMOTE', 'PROMPT']):
if ((resume_type == 'PROMPT') and (not click.confirm('Try downloading from remote directory?'))):
return False
if (not self._remote_checkpoint_dir):
raise ValueError('Called resume from remote without remote directory.')
logger.info('Downloading from %s', self._remote_checkpoint_dir)
self._syncer.sync_down_if_needed()
if (not self.checkpoint_exists(self._local_checkpoint_dir)):
raise ValueError('Called resume when no checkpoint exists in remote or local directory.')
return True
@classmethod
def checkpoint_exists(cls, directory):
if (not os.path.exists(directory)):
return False
return any(((fname.startswith('experiment_state') and fname.endswith('.json')) for fname in os.listdir(directory)))
def add_experiment(self, experiment):
if (not self._resumed):
self._search_alg.add_configurations([experiment])
else:
self._search_alg.add_configurations([experiment])
def checkpoint(self, force=False):
'Saves execution state to `self._local_checkpoint_dir`.\n\n Overwrites the current session checkpoint, which starts when self\n is instantiated. Throttle depends on self._checkpoint_period.\n\n Args:\n force (bool): Forces a checkpoint despite checkpoint_period.\n '
if (not self._local_checkpoint_dir):
return
now = time.time()
if (((now - self._last_checkpoint_time) < self._checkpoint_period) and (not force)):
return
self._last_checkpoint_time = now
runner_state = {'checkpoints': list(self.trial_executor.get_checkpoints().values()), 'runner_data': self.__getstate__(), 'stats': {'start_time': self._start_time, 'timestamp': self._last_checkpoint_time}}
tmp_file_name = os.path.join(self._local_checkpoint_dir, '.tmp_checkpoint')
with open(tmp_file_name, 'w') as f:
json.dump(runner_state, f, indent=2, cls=_TuneFunctionEncoder)
os.rename(tmp_file_name, self.checkpoint_file)
' Also save search algo '
self._search_alg.save(self._search_algo_checkpoint_file)
if force:
self._syncer.sync_up()
else:
self._syncer.sync_up_if_needed()
return self._local_checkpoint_dir
def resume(self):
'Resumes all checkpointed trials from previous run.\n\n Requires user to manually re-register their objects. Also stops\n all ongoing trials.\n '
newest_ckpt_path = _find_newest_ckpt(self._local_checkpoint_dir)
with open(newest_ckpt_path, 'r') as f:
runner_state = json.load(f, cls=_TuneFunctionDecoder)
self.checkpoint_file = newest_ckpt_path
logger.warning(''.join(['Attempting to resume experiment from {}. '.format(self._local_checkpoint_dir), 'This feature is experimental, and may not work with all search algorithms. ', 'This will ignore any new changes to the specification.']))
self.__setstate__(runner_state['runner_data'])
trials = []
for trial_cp in runner_state['checkpoints']:
new_trial = Trial(trial_cp['trainable_name'])
new_trial.__setstate__(trial_cp)
trials += [new_trial]
self._n_existing_trials = len(trials)
for trial in sorted(trials, key=(lambda t: t.last_update_time), reverse=True):
self.add_trial(trial)
self._search_alg.restore(self._search_algo_checkpoint_file)
def is_finished(self):
'Returns whether all trials have finished running.'
if (self._total_time > self._global_time_limit):
logger.warning('Exceeded global time limit {} / {}'.format(self._total_time, self._global_time_limit))
return True
trials_done = all((trial.is_finished() for trial in self._trials))
return (trials_done and self._search_alg.is_finished())
def step(self):
"Runs one step of the trial event loop.\n\n Callers should typically run this method repeatedly in a loop. They\n may inspect or modify the runner's state in between calls to step().\n "
if self.is_finished():
raise TuneError('Called step when all trials finished?')
with warn_if_slow('on_step_begin'):
self.trial_executor.on_step_begin(self)
next_trial = self._get_next_trial()
if (next_trial is not None):
with warn_if_slow('start_trial'):
self.trial_executor.start_trial(next_trial)
elif self.trial_executor.get_running_trials():
self._process_events()
else:
self.trial_executor.on_no_available_trials(self)
try:
with warn_if_slow('experiment_checkpoint'):
self.checkpoint()
except Exception:
logger.exception('Trial Runner checkpointing failed.')
self._iteration += 1
if self._server:
with warn_if_slow('server'):
self._process_requests()
if self.is_finished():
self._server.shutdown()
with warn_if_slow('on_step_end'):
self.trial_executor.on_step_end(self)
def get_trial(self, tid):
trial = [t for t in self._trials if (t.trial_id == tid)]
return (trial[0] if trial else None)
def get_trials(self):
'Returns the list of trials managed by this TrialRunner.\n\n Note that the caller usually should not mutate trial state directly.\n '
return self._trials
def add_trial(self, trial):
'Adds a new trial to this TrialRunner.\n\n Trials may be added at any time.\n\n Args:\n trial (Trial): Trial to queue.\n '
trial.set_verbose(self._verbose)
self._trials.append(trial)
with warn_if_slow('scheduler.on_trial_add'):
self._scheduler_alg.on_trial_add(self, trial)
self.trial_executor.try_checkpoint_metadata(trial)
def debug_string(self, delim='\n'):
messages = [self._scheduler_alg.debug_string(), self.trial_executor.debug_string(), trial_progress_str(self.get_trials())]
return delim.join(messages)
def has_resources(self, resources):
'Returns whether this runner has at least the specified resources.'
return self.trial_executor.has_resources(resources)
def _get_next_trial(self):
'Replenishes queue.\n\n Blocks if all trials queued have finished, but search algorithm is\n still not finished.\n '
trials_done = all((trial.is_finished() for trial in self._trials))
wait_for_trial = (trials_done and (not self._search_alg.is_finished()))
self._update_trial_queue(blocking=wait_for_trial)
with warn_if_slow('choose_trial_to_run'):
trial = self._scheduler_alg.choose_trial_to_run(self)
return trial
def _process_events(self):
failed_trial = self.trial_executor.get_next_failed_trial()
if failed_trial:
error_msg = '{} (IP: {}) detected as stale. This is likely because the node was lost'.format(failed_trial, failed_trial.node_ip)
logger.info(error_msg)
with warn_if_slow('process_failed_trial'):
self._process_trial_failure(failed_trial, error_msg=error_msg)
else:
trial = self.trial_executor.get_next_available_trial()
with warn_if_slow('process_trial'):
self._process_trial(trial)
def _process_trial(self, trial):
try:
result = self.trial_executor.fetch_result(trial)
is_duplicate = (RESULT_DUPLICATE in result)
if is_duplicate:
logger.debug("Trial finished without logging 'done'.")
result = trial.last_result
result.update(done=True)
self._total_time += result.get(TIME_THIS_ITER_S, 0)
flat_result = flatten_dict(result)
if trial.should_stop(flat_result):
self._scheduler_alg.on_trial_complete(self, trial, flat_result)
self._search_alg.on_trial_complete(trial.trial_id, result=flat_result)
decision = TrialScheduler.STOP
else:
with warn_if_slow('scheduler.on_trial_result'):
decision = self._scheduler_alg.on_trial_result(self, trial, flat_result)
with warn_if_slow('search_alg.on_trial_result'):
self._search_alg.on_trial_result(trial.trial_id, flat_result)
if (decision == TrialScheduler.STOP):
with warn_if_slow('search_alg.on_trial_complete'):
self._search_alg.on_trial_complete(trial.trial_id, result=flat_result, early_terminated=True)
if (not is_duplicate):
trial.update_last_result(result, terminate=(decision == TrialScheduler.STOP))
self._checkpoint_trial_if_needed(trial, force=result.get(SHOULD_CHECKPOINT, False))
if (decision == TrialScheduler.CONTINUE):
self.trial_executor.continue_training(trial)
elif (decision == TrialScheduler.PAUSE):
self.trial_executor.pause_trial(trial)
elif (decision == TrialScheduler.STOP):
self.trial_executor.export_trial_if_needed(trial)
self.trial_executor.stop_trial(trial)
else:
assert False, 'Invalid scheduling decision: {}'.format(decision)
except Exception:
logger.exception('Error processing event.')
self._process_trial_failure(trial, traceback.format_exc())
def _process_trial_failure(self, trial, error_msg):
'Handle trial failure.\n\n Attempt trial recovery if possible, clean up state otherwise.\n\n Args:\n trial (Trial): Failed trial.\n error_msg (str): Error message prior to invoking this method.\n '
if (trial.status == Trial.RUNNING):
if trial.should_recover():
self._try_recover(trial, error_msg)
else:
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
self.trial_executor.stop_trial(trial, error=True, error_msg=error_msg)
def _checkpoint_trial_if_needed(self, trial, force=False):
'Checkpoints trial based off trial.last_result.'
if (trial.should_checkpoint() or force):
if (hasattr(trial, 'runner') and trial.runner):
self.trial_executor.save(trial, storage=Checkpoint.DISK)
self.trial_executor.try_checkpoint_metadata(trial)
def _try_recover(self, trial, error_msg):
'Tries to recover trial.\n\n Notifies SearchAlgorithm and Scheduler if failure to recover.\n\n Args:\n trial (Trial): Trial to recover.\n error_msg (str): Error message from prior to invoking this method.\n '
try:
self.trial_executor.stop_trial(trial, error=(error_msg is not None), error_msg=error_msg, stop_logger=False)
trial.result_logger.flush()
if self.trial_executor.has_resources(trial.resources):
logger.info('Trial %s: Attempting to recover trial state from last checkpoint.', trial)
self.trial_executor.start_trial(trial)
if (trial.status == Trial.ERROR):
logger.error('Trial %s: Did not start correctly.', trial)
raise RuntimeError('Trial did not start correctly.')
logger.debug('Trial %s: Started correctly.', trial)
else:
logger.debug('Trial %s: Notifying Scheduler and requeueing.', trial)
self._requeue_trial(trial)
except Exception:
logger.exception('Error recovering trial from checkpoint, abort.')
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
def _requeue_trial(self, trial):
'Notification to TrialScheduler and requeue trial.\n\n This does not notify the SearchAlgorithm because the function\n evaluation is still in progress.\n\n '
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
self._trials.pop(self._trials.index(trial))
self._trials.append(trial)
with warn_if_slow('scheduler.on_trial_add'):
self._scheduler_alg.on_trial_add(self, trial)
def _update_trial_queue(self, blocking=False, timeout=600):
'Adds next trials to queue if possible.\n\n Note that the timeout is currently unexposed to the user.\n\n Args:\n blocking (bool): Blocks until either a trial is available\n or is_finished (timeout or search algorithm finishes).\n timeout (int): Seconds before blocking times out.\n '
trials = self._search_alg.next_trials()
if (blocking and (not trials)):
start = time.time()
while ((not trials) and (not self.is_finished()) and ((time.time() - start) < timeout)):
logger.info('Blocking for next trial...')
trials = self._search_alg.next_trials()
time.sleep(1)
for trial in trials:
self.add_trial(trial)
def request_stop_trial(self, trial):
self._stop_queue.append(trial)
def _process_requests(self):
while self._stop_queue:
t = self._stop_queue.pop()
self.stop_trial(t)
def stop_trial(self, trial):
'Stops trial.\n\n Trials may be stopped at any time. If trial is in state PENDING\n or PAUSED, calls `on_trial_remove` for scheduler and\n `on_trial_complete(..., early_terminated=True) for search_alg.\n Otherwise waits for result for the trial and calls\n `on_trial_complete` for scheduler and search_alg if RUNNING.\n '
error = False
error_msg = None
if (trial.status in [Trial.ERROR, Trial.TERMINATED]):
return
elif (trial.status in [Trial.PENDING, Trial.PAUSED]):
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, early_terminated=True)
elif (trial.status is Trial.RUNNING):
try:
result = self.trial_executor.fetch_result(trial)
trial.update_last_result(result, terminate=True)
self._scheduler_alg.on_trial_complete(self, trial, result)
self._search_alg.on_trial_complete(trial.trial_id, result=result)
except Exception:
error_msg = traceback.format_exc()
logger.exception('Error processing event.')
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
error = True
self.trial_executor.stop_trial(trial, error=error, error_msg=error_msg)
def __getstate__(self):
'Gets state for trial.\n\n Note that this is not used as a pickling override as\n does not have all fields.\n '
state = self.__dict__.copy()
for k in ['_trials', '_stop_queue', '_server', '_search_alg', '_scheduler_alg', 'trial_executor', '_syncer']:
del state[k]
state['launch_web_server'] = bool(self._server)
return state
def __setstate__(self, state):
launch_web_server = state.pop('launch_web_server')
session_str = state.pop('_session_str')
self.__dict__.setdefault('_session_str', session_str)
start_time = state.pop('_start_time')
self.__dict__.setdefault('_start_time', start_time)
self.__dict__.update(state)
if launch_web_server:
self._server = TuneServer(self, self._server_port)
|
def _make_scheduler(args):
if (args.scheduler in _SCHEDULERS):
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError('Unknown scheduler: {}, should be one of {}'.format(args.scheduler, _SCHEDULERS.keys()))
|
def _check_default_resources_override(run_identifier):
if (not isinstance(run_identifier, six.string_types)):
return True
trainable_cls = get_trainable_cls(run_identifier)
return (hasattr(trainable_cls, 'default_resource_request') and (trainable_cls.default_resource_request.__code__ != Trainable.default_resource_request.__code__))
|
def run(run_or_experiment, name=None, stop=None, config=None, resources_per_trial=None, num_samples=1, local_dir=None, upload_dir=None, trial_name_creator=None, loggers=None, sync_to_cloud=None, sync_to_driver=None, checkpoint_freq=0, checkpoint_at_end=False, sync_on_checkpoint=True, keep_checkpoints_num=None, checkpoint_score_attr=None, global_checkpoint_period=10, export_formats=None, max_failures=0, restore=None, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True, return_trials=False, ray_auto_init=True, sync_function=None):
'Executes training.\n\n Args:\n run_or_experiment (function|class|str|Experiment): If\n function|class|str, this is the algorithm or model to train.\n This may refer to the name of a built-on algorithm\n (e.g. RLLib\'s DQN or PPO), a user-defined trainable\n function or class, or the string identifier of a\n trainable function or class registered in the tune registry.\n If Experiment, then Tune will execute training based on\n Experiment.spec.\n name (str): Name of experiment.\n stop (dict|func): The stopping criteria. If dict, the keys may be\n any field in the return result of \'train()\', whichever is\n reached first. If function, it must take (trial_id, result) as\n arguments and return a boolean (True if trial should be stopped,\n False otherwise).\n config (dict): Algorithm-specific configuration for Tune variant\n generation (e.g. env, hyperparams). Defaults to empty dict.\n Custom search algorithms may ignore this.\n resources_per_trial (dict): Machine resources to allocate per trial,\n e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be\n assigned unless you specify them here. Defaults to 1 CPU and 0\n GPUs in ``Trainable.default_resource_request()``.\n num_samples (int): Number of times to sample from the\n hyperparameter space. Defaults to 1. If `grid_search` is\n provided as an argument, the grid will be repeated\n `num_samples` of times.\n local_dir (str): Local dir to save training results to.\n Defaults to ``~/ray_results``.\n upload_dir (str): Optional URI to sync training results to\n (e.g. ``s3://bucket``).\n trial_name_creator (func): Optional function for generating\n the trial string representation.\n loggers (list): List of logger creators to be used with\n each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.\n See `ray/tune/logger.py`.\n sync_to_cloud (func|str): Function for syncing the local_dir to and\n from upload_dir. If string, then it must be a string template that\n includes `{source}` and `{target}` for the syncer to run. If not\n provided, the sync command defaults to standard S3 or gsutil sync\n comamnds.\n sync_to_driver (func|str): Function for syncing trial logdir from\n remote node to local. If string, then it must be a string template\n that includes `{source}` and `{target}` for the syncer to run.\n If not provided, defaults to using rsync.\n checkpoint_freq (int): How many training iterations between\n checkpoints. A value of 0 (default) disables checkpointing.\n checkpoint_at_end (bool): Whether to checkpoint at the end of the\n experiment regardless of the checkpoint_freq. Default is False.\n sync_on_checkpoint (bool): Force sync-down of trial checkpoint, to\n guarantee recoverability. If set to False, checkpoint syncing from\n worker to driver is asynchronous. Set this to False only if\n synchronous checkpointing is too slow and trial restoration\n failures can be tolerated. Defaults to True.\n keep_checkpoints_num (int): Number of checkpoints to keep. A value of\n `None` keeps all checkpoints. Defaults to `None`. If set, need\n to provide `checkpoint_score_attr`.\n checkpoint_score_attr (str): Specifies by which attribute to rank the\n best checkpoint. Default is increasing order. If attribute starts\n with `min-` it will rank attribute in decreasing order, i.e.\n `min-validation_loss`.\n global_checkpoint_period (int): Seconds between global checkpointing.\n This does not affect `checkpoint_freq`, which specifies frequency\n for individual trials.\n export_formats (list): List of formats that exported at the end of\n the experiment. Default is None.\n max_failures (int): Try to recover a trial at least this many times.\n Ray will recover from the latest checkpoint if present.\n Setting to -1 will lead to infinite recovery retries.\n Setting to 0 will disable retries. Defaults to 3.\n restore (str): Path to checkpoint. Only makes sense to set if\n running 1 trial. Defaults to None.\n search_alg (SearchAlgorithm): Search Algorithm. Defaults to\n BasicVariantGenerator.\n scheduler (TrialScheduler): Scheduler for executing\n the experiment. Choose among FIFO (default), MedianStopping,\n AsyncHyperBand, HyperBand and PopulationBasedTraining. Refer to\n ray.tune.schedulers for more options.\n with_server (bool): Starts a background Tune server. Needed for\n using the Client API.\n server_port (int): Port number for launching TuneServer.\n verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,\n 1 = only status updates, 2 = status and trial results.\n resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", or bool.\n LOCAL/True restores the checkpoint from the local_checkpoint_dir.\n REMOTE restores the checkpoint from remote_checkpoint_dir.\n PROMPT provides CLI feedback. False forces a new\n experiment. If resume is set but checkpoint does not exist,\n ValueError will be thrown.\n queue_trials (bool): Whether to queue trials when the cluster does\n not currently have enough resources to launch one. This should\n be set to True when running on an autoscaling cluster to enable\n automatic scale-up.\n reuse_actors (bool): Whether to reuse actors between different trials\n when possible. This can drastically speed up experiments that start\n and stop actors often (e.g., PBT in time-multiplexing mode). This\n requires trials to have the same resource requirements.\n trial_executor (TrialExecutor): Manage the execution of trials.\n raise_on_failed_trial (bool): Raise TuneError if there exists failed\n trial (of ERROR state) when the experiments complete.\n ray_auto_init (bool): Automatically starts a local Ray cluster\n if using a RayTrialExecutor (which is the default) and\n if Ray is not initialized. Defaults to True.\n sync_function: Deprecated. See `sync_to_cloud` and\n `sync_to_driver`.\n\n Returns:\n List of Trial objects.\n\n Raises:\n TuneError if any trials failed and `raise_on_failed_trial` is True.\n\n Examples:\n >>> tune.run(mytrainable, scheduler=PopulationBasedTraining())\n\n >>> tune.run(mytrainable, num_samples=5, reuse_actors=True)\n\n >>> tune.run(\n >>> "PG",\n >>> num_samples=5,\n >>> config={\n >>> "env": "CartPole-v0",\n >>> "lr": tune.sample_from(lambda _: np.random.rand())\n >>> }\n >>> )\n '
trial_executor = (trial_executor or RayTrialExecutor(queue_trials=queue_trials, reuse_actors=reuse_actors, ray_auto_init=ray_auto_init))
if isinstance(run_or_experiment, list):
experiments = run_or_experiment
else:
experiments = [run_or_experiment]
if (len(experiments) > 1):
logger.info('Running multiple concurrent experiments is experimental and may not work with certain features.')
for (i, exp) in enumerate(experiments):
if (not isinstance(exp, Experiment)):
run_identifier = Experiment.register_if_needed(exp)
experiments[i] = Experiment(name=name, run=run_identifier, stop=stop, config=config, resources_per_trial=resources_per_trial, num_samples=num_samples, local_dir=local_dir, upload_dir=upload_dir, sync_to_driver=sync_to_driver, trial_name_creator=trial_name_creator, loggers=loggers, checkpoint_freq=checkpoint_freq, checkpoint_at_end=checkpoint_at_end, sync_on_checkpoint=sync_on_checkpoint, keep_checkpoints_num=keep_checkpoints_num, checkpoint_score_attr=checkpoint_score_attr, export_formats=export_formats, max_failures=max_failures, restore=restore, sync_function=sync_function)
else:
logger.debug('Ignoring some parameters passed into tune.run.')
if sync_to_cloud:
for exp in experiments:
assert exp.remote_checkpoint_dir, 'Need `upload_dir` if `sync_to_cloud` given.'
runner = TrialRunner(search_alg=(search_alg or BasicVariantGenerator()), scheduler=(scheduler or FIFOScheduler()), local_checkpoint_dir=experiments[0].checkpoint_dir, remote_checkpoint_dir=experiments[0].remote_checkpoint_dir, sync_to_cloud=sync_to_cloud, checkpoint_period=global_checkpoint_period, resume=resume, launch_web_server=with_server, server_port=server_port, verbose=bool((verbose > 1)), trial_executor=trial_executor)
for exp in experiments:
runner.add_experiment(exp)
if IS_NOTEBOOK:
reporter = JupyterNotebookReporter(overwrite=(verbose < 2))
else:
reporter = CLIReporter()
if trial_executor.has_gpus():
if (isinstance(resources_per_trial, dict) and ('gpu' in resources_per_trial)):
pass
elif _check_default_resources_override(experiments[0].run_identifier):
pass
else:
logger.warning("Tune detects GPUs, but no trials are using GPUs. To enable trials to use GPUs, set tune.run(resources_per_trial={'gpu': 1}...) which allows Tune to expose 1 GPU to each trial. You can also override `Trainable.default_resource_request` if using the Trainable API.")
' --- main loop --- '
last_debug = 0
while (not runner.is_finished()):
runner.step()
if ((time.time() - last_debug) > DEBUG_PRINT_INTERVAL):
if verbose:
reporter.report(runner)
last_debug = time.time()
try:
runner.checkpoint(force=True)
except Exception:
logger.exception('Trial Runner checkpointing failed.')
if verbose:
reporter.report(runner)
wait_for_sync()
errored_trials = []
for trial in runner.get_trials():
if (trial.status != Trial.TERMINATED):
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError('Trials did not complete', errored_trials)
else:
logger.error('Trials did not complete: %s', errored_trials)
trials = runner.get_trials()
if return_trials:
return trials
logger.info('Returning an analysis object by default. You can call `analysis.trials` to retrieve a list of trials. This message will be removed in future versions of Tune.')
return ExperimentAnalysis(runner.checkpoint_file, trials=trials)
|
def run_experiments(experiments, search_alg=None, scheduler=None, with_server=False, server_port=TuneServer.DEFAULT_PORT, verbose=2, resume=False, queue_trials=False, reuse_actors=False, trial_executor=None, raise_on_failed_trial=True, concurrent=False):
'Runs and blocks until all trials finish.\n\n Examples:\n >>> experiment_spec = Experiment("experiment", my_func)\n >>> run_experiments(experiments=experiment_spec)\n\n >>> experiment_spec = {"experiment": {"run": my_func}}\n >>> run_experiments(experiments=experiment_spec)\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> scheduler=MedianStoppingRule(...))\n\n >>> run_experiments(\n >>> experiments=experiment_spec,\n >>> search_alg=SearchAlgorithm(),\n >>> scheduler=MedianStoppingRule(...))\n\n Returns:\n List of Trial objects, holding data for each executed trial.\n\n '
experiments = convert_to_experiment_list(experiments)
if concurrent:
return run(experiments, search_alg=search_alg, scheduler=scheduler, with_server=with_server, server_port=server_port, verbose=verbose, resume=resume, queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor, raise_on_failed_trial=raise_on_failed_trial, return_trials=True)
else:
trials = []
for exp in experiments:
trials += run(exp, search_alg=search_alg, scheduler=scheduler, with_server=with_server, server_port=server_port, verbose=verbose, resume=resume, queue_trials=queue_trials, reuse_actors=reuse_actors, trial_executor=trial_executor, raise_on_failed_trial=raise_on_failed_trial, return_trials=True)
return trials
|
def fit_eval_GPR_mll(param_dict):
sys.path.append(BASE_DIR)
dataset = param_dict.pop('dataset')
seed = param_dict.pop('seed')
results_dict = {'learner': 'gpr_mll', 'dataset': dataset, 'seed': seed}
results_dict.update(**param_dict)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset, DATA_SEED)
@ray.remote
def fit_eval(x_context, y_context, x_test, y_test, params):
from meta_learn.GPR_mll import GPRegressionLearned
torch.set_num_threads(1)
model = GPRegressionLearned(x_context, y_context, **params, random_seed=seed)
model.fit(verbose=False)
return model.eval(x_test, y_test)
results = ray.get([fit_eval.remote(*data, param_dict) for data in data_test])
results = list(zip(*results))
assert (len(results) == 3)
results_dict['ll'] = np.mean(results[0])
results_dict['rmse'] = np.mean(results[1])
results_dict['calib_err'] = np.mean(results[2])
return results_dict
|
@ray.remote
def fit_eval_meta_algo(param_dict):
sys.path.append(BASE_DIR)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
from meta_learn.GPR_meta_vi import GPRegressionMetaLearnedVI
from meta_learn.GPR_meta_mll import GPRegressionMetaLearned
from meta_learn.NPR_meta import NPRegressionMetaLearned
from meta_learn.MAML import MAMLRegression
torch.set_num_threads(1)
meta_learner = param_dict.pop('meta_learner')
dataset = param_dict.pop('dataset')
seed = param_dict.pop('seed')
ALGO_MAP = {'gpr_meta_mll': GPRegressionMetaLearned, 'gpr_meta_vi': GPRegressionMetaLearnedVI, 'gpr_meta_svgd': GPRegressionMetaLearnedSVGD, 'maml': MAMLRegression, 'neural_process': NPRegressionMetaLearned}
meta_learner_cls = ALGO_MAP[meta_learner]
results_dict = {'learner': meta_learner, 'dataset': dataset, 'seed': seed}
results_dict.update(**param_dict)
try:
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset, DATA_SEED)
model = meta_learner_cls(data_train, **param_dict, random_seed=seed)
model.meta_fit(data_test, log_period=5000)
if (meta_learner == 'neural_process'):
eval_result = model.eval_datasets(data_test, flatten_y=False)
else:
eval_result = model.eval_datasets(data_test)
if (meta_learner == 'maml'):
rmse = eval_result
results_dict.update(rmse=rmse)
else:
(ll, rmse, calib_err) = eval_result
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
|
def _create_configurations(param_configs):
confs = []
for conf_dict in param_configs:
conf_dict = dict([(key, (val if ((type(val) == list) or (type(val) == tuple)) else [val])) for (key, val) in conf_dict.items()])
conf_product = list(itertools.product(*list(conf_dict.values())))
conf_product_dicts = [dict(zip(conf_dict.keys(), conf)) for conf in conf_product]
confs.extend(conf_product_dicts)
return confs
|
def main(args):
param_configs = [{'meta_learner': 'gpr_meta_mll', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'covar_module': ['SE', 'NN'], 'mean_module': 'NN', 'num_iter_fit': 40000, 'weight_decay': 0.0, 'task_batch_size': 4, 'lr_decay': [0.97], 'mean_nn_layers': [LAYER_SIZES], 'kernel_nn_layers': [LAYER_SIZES]}, {'meta_learner': 'maml', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'num_iter_fit': 30000, 'task_batch_size': 4, 'lr_inner': [0.02, 0.05, 0.1], 'layer_sizes': [LAYER_SIZES]}, {'meta_learner': 'neural_process', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'num_iter_fit': 30000, 'task_batch_size': 4, 'lr_decay': 0.97, 'lr_params': 0.001, 'r_dim': [32, 64], 'weight_decay': [0.01, 0.1, 0.2, 0.4, 0.8]}]
param_configs = _create_configurations(param_configs)
result_dicts = []
answer = input(('About to run %i jobs with ray. Proceed? [yes/no]\n' % len(param_configs)))
if (answer == 'yes'):
result_dicts += ray.get([fit_eval_meta_algo.remote(param_dict) for param_dict in param_configs])
param_configs_gpr_mll = [{'dataset': DATASETS, 'seed': MODEL_SEEDS, 'covar_module': ['SE'], 'mean_module': ['constant'], 'learning_mode': ['vanilla'], 'num_iter_fit': 20000, 'mean_nn_layers': [LAYER_SIZES]}]
param_configs_gpr_mll = _create_configurations(param_configs_gpr_mll)
result_dicts += [fit_eval_GPR_mll(param_dict) for param_dict in param_configs_gpr_mll]
result_df = pandas.DataFrame(result_dicts)
csv_file_name = os.path.join(EXP_DIR, ('baseline_comp_%s.csv' % datetime.now().strftime('%b_%d_%Y_%H:%M:%S')))
result_df.to_csv(csv_file_name)
print(result_df.to_string())
print(('\nDumped the csv file to %s' % csv_file_name))
|
@ray.remote
def fit_eval_meta_algo(param_dict):
sys.path.append(BASE_DIR)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
from meta_learn.GPR_meta_vi import GPRegressionMetaLearnedVI
from meta_learn.GPR_meta_mll import GPRegressionMetaLearned
from meta_learn.NPR_meta import NPRegressionMetaLearned
from meta_learn.MAML import MAMLRegression
torch.set_num_threads(1)
meta_learner = param_dict.pop('meta_learner')
dataset = param_dict.pop('dataset')
seed = param_dict.pop('seed')
ALGO_MAP = {'gpr_meta_mll': GPRegressionMetaLearned, 'gpr_meta_vi': GPRegressionMetaLearnedVI, 'gpr_meta_svgd': GPRegressionMetaLearnedSVGD, 'maml': MAMLRegression, 'neural_process': NPRegressionMetaLearned}
meta_learner_cls = ALGO_MAP[meta_learner]
results_dict = {'learner': meta_learner, 'dataset': dataset, 'seed': seed}
results_dict.update(**param_dict)
try:
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset, DATA_SEED)
model = meta_learner_cls(data_train, **param_dict, random_seed=seed)
model.meta_fit(data_test, log_period=5000)
if (meta_learner == 'neural_process'):
eval_result = model.eval_datasets(data_test, flatten_y=False)
else:
eval_result = model.eval_datasets(data_test)
if (meta_learner == 'maml'):
rmse = eval_result
results_dict.update(rmse=rmse)
else:
(ll, rmse, calib_err) = eval_result
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
|
def _create_configurations(param_configs):
confs = []
for conf_dict in param_configs:
conf_dict = dict([(key, (val if ((type(val) == list) or (type(val) == tuple)) else [val])) for (key, val) in conf_dict.items()])
conf_product = list(itertools.product(*list(conf_dict.values())))
conf_product_dicts = [dict(zip(conf_dict.keys(), conf)) for conf in conf_product]
confs.extend(conf_product_dicts)
return confs
|
def main(args):
param_configs = [{'meta_learner': 'gpr_meta_mll', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'covar_module': ['NN'], 'mean_module': 'NN', 'num_iter_fit': 40000, 'weight_decay': 0.0, 'task_batch_size': [4], 'lr_decay': [0.97], 'lr_params': [0.005, 0.001, 0.0005], 'mean_nn_layers': [LAYER_SIZES], 'kernel_nn_layers': [LAYER_SIZES]}, {'meta_learner': 'maml', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'num_iter_fit': 40000, 'task_batch_size': 4, 'lr_inner': [0.02, 0.03, 0.05, 0.08, 0.1, 0.15, 0.2], 'layer_sizes': [LAYER_SIZES]}, {'meta_learner': 'neural_process', 'dataset': DATASETS, 'seed': MODEL_SEEDS, 'num_iter_fit': 40000, 'task_batch_size': 4, 'lr_decay': 0.97, 'lr_params': 0.001, 'r_dim': [32, 64, 124], 'weight_decay': [1e-05, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.4, 0.8]}]
param_configs = _create_configurations(param_configs)
result_dicts = []
answer = input(('About to run %i jobs with ray. Proceed? [yes/no]\n' % len(param_configs)))
if (answer == 'yes'):
result_dicts += ray.get([fit_eval_meta_algo.remote(param_dict) for param_dict in param_configs])
result_df = pandas.DataFrame(result_dicts)
csv_file_name = os.path.join(EXP_DIR, ('baseline_comp_%s.csv' % datetime.now().strftime('%b_%d_%Y_%H:%M:%S')))
result_df.to_csv(csv_file_name)
print(result_df.to_string())
print(('\nDumped the csv file to %s' % csv_file_name))
|
class MetaDataset():
def __init__(self, random_state=None):
if (random_state is None):
self.random_state = np.random
else:
self.random_state = random_state
def generate_meta_train_data(self, n_tasks: int, n_samples: int) -> list:
raise NotImplementedError
def generate_meta_test_data(self, n_tasks: int, n_samples_context: int, n_samples_test: int) -> list:
raise NotImplementedError
|
class PhysionetDataset(MetaDataset):
def __init__(self, random_state=None, variable_id=0, dtype=np.float32, physionet_dir=None):
super().__init__(random_state)
self.dtype = dtype
if (physionet_dir is not None):
self.data_dir = physionet_dir
elif (PHYSIONET_DIR is not None):
self.data_dir = PHYSIONET_DIR
else:
raise ValueError('No data directory provided.')
self.variable_list = ['GCS', 'Urine', 'HCT', 'BUN', 'Creatinine', 'DiasABP']
assert (variable_id < len(self.variable_list)), 'Unknown variable ID'
self.variable = self.variable_list[variable_id]
self.data_path = os.path.join(self.data_dir, 'set_a_merged.h5')
with pd.HDFStore(self.data_path, mode='r') as hdf_file:
self.keys = hdf_file.keys()
def generate_meta_train_data(self, n_tasks, n_samples=47):
'\n Samples n_tasks patients and returns measurements from the variable\n with the ID variable_id. n_samples defines in this case the cut-off\n of hours on the ICU, e.g., n_samples=24 returns all measurements that\n were taken in the first 24 hours. Generally, those will be less than\n 24 measurements. If there are less than n_tasks patients that have\n any measurements of variable variable_id before hour n_samples, the\n returned list will contain less than n_tasks tuples.\n '
assert (n_tasks <= 500), "We don't have that many tasks"
assert (n_samples < 48), "We don't have that many samples"
meta_train_tuples = []
for patient in self.keys:
df = pd.read_hdf(self.data_path, patient, mode='r')[self.variable].dropna()
times = df.index.values.astype(self.dtype)
values = df.values.astype(self.dtype)
times_context = [time for time in times if (time <= n_samples)]
if (len(times_context) > 0):
times_context = np.array(times_context, dtype=self.dtype)
values_context = values[:len(times_context)]
if (values_context.shape[0] >= 4):
meta_train_tuples.append((times_context, values_context))
else:
continue
if (len(meta_train_tuples) >= n_tasks):
break
return meta_train_tuples
def generate_meta_test_data(self, n_tasks, n_samples_context=24, n_samples_test=(- 1), variable_id=0):
'\n Samples n_tasks patients and returns measurements from the variable\n with the ID variable_id. n_samples defines in this case the cut-off\n of hours on the ICU, e.g., n_samples=24 returns all measurements that\n were taken in the first 24 hours. Generally, those will be less than\n 24 measurements. The remaining measurements are returned as test points,\n i.e., n_samples_test is unused.\n If there are less than n_tasks patients that have any measurements\n of variable variable_id before hour n_samples, the\n returned list will contain less than n_tasks tuples.\n '
assert (n_tasks <= 1000), "We don't have that many tasks"
assert (n_samples_context < 48), "We don't have that many samples"
meta_test_tuples = []
for patient in reversed(self.keys):
df = pd.read_hdf(self.data_path, patient, mode='r')[self.variable].dropna()
times = df.index.values.astype(self.dtype)
values = df.values.astype(self.dtype)
times_context = [time for time in times if (time <= n_samples_context)]
times_test = [time for time in times if (time > n_samples_context)]
if ((len(times_context) > 0) and (len(times_test) > 0)):
times_context = np.array(times_context, dtype=self.dtype)
times_test = np.array(times_test, dtype=self.dtype)
values_context = values[:len(times_context)]
values_test = values[len(times_context):]
if (values_context.shape[0] >= 4):
meta_test_tuples.append((times_context, values_context, times_test, values_test))
else:
continue
if (len(meta_test_tuples) >= n_tasks):
break
return meta_test_tuples
|
class MNISTRegressionDataset(MetaDataset):
def __init__(self, random_state=None, dtype=np.float32):
import mnist
super().__init__(random_state)
self.dtype = dtype
mnist_dir = (MNIST_DIR if os.path.isdir(MNIST_DIR) else None)
self.train_images = mnist.download_and_parse_mnist_file('train-images-idx3-ubyte.gz', target_dir=mnist_dir)
self.test_images = mnist.download_and_parse_mnist_file('t10k-images-idx3-ubyte.gz', target_dir=mnist_dir)
self.train_images = (self.train_images / 255.0)
self.test_images = (self.train_images / 255.0)
def generate_meta_train_data(self, n_tasks, n_samples):
meta_train_tuples = []
train_indices = self.random_state.choice(self.train_images.shape[0], size=n_tasks, replace=False)
for idx in train_indices:
(x_context, t_context, _, _) = self._image_to_context_transform(self.train_images[idx], n_samples)
meta_train_tuples.append((x_context, t_context))
return meta_train_tuples
def generate_meta_test_data(self, n_tasks, n_samples_context, n_samples_test=(- 1)):
meta_test_tuples = []
test_indices = self.random_state.choice(self.test_images.shape[0], size=n_tasks, replace=False)
for idx in test_indices:
(x_context, t_context, x_test, t_test) = self._image_to_context_transform(self.train_images[idx], n_samples_context)
if ((n_samples_test > 0) and (n_samples_test < x_test.shape[0])):
indices = self.random_state.choice(x_test.shape[0], size=n_samples_test, replace=False)
(x_test, t_test) = (x_test[indices], t_test[indices])
meta_test_tuples.append((x_context, t_context, x_test, t_test))
return meta_test_tuples
def _image_to_context_transform(self, image, num_context_points):
assert ((image.ndim == 2) and (image.shape[0] == image.shape[1]))
image_size = image.shape[0]
assert (num_context_points <= (image_size ** 2))
(xx, yy) = np.meshgrid(np.arange(image_size), np.arange(image_size))
indices = np.array(list(zip(xx.flatten(), yy.flatten())))
context_indices = indices[self.random_state.choice((image_size ** 2), size=num_context_points, replace=False)]
context_values = image[tuple(zip(*context_indices))]
dtype_indices = {'names': ['f{}'.format(i) for i in range(2)], 'formats': (2 * [indices.dtype])}
test_indices_structured = np.setdiff1d(indices.view(dtype_indices), context_indices.view(dtype_indices))
test_indices = test_indices_structured.view(indices.dtype).reshape((- 1), 2)
test_values = image[tuple(zip(*test_indices))]
return (np.array(context_indices, dtype=self.dtype), np.array(context_values, dtype=self.dtype), np.array(test_indices, dtype=self.dtype), np.array(test_values, dtype=self.dtype))
|
class SinusoidDataset(MetaDataset):
def __init__(self, amp_low=0.7, amp_high=1.3, period_low=1.5, period_high=1.5, x_shift_mean=0.0, x_shift_std=0.1, y_shift_mean=5.0, y_shift_std=0.1, slope_mean=0.5, slope_std=0.2, noise_std=0.1, x_low=(- 5), x_high=5, random_state=None):
super().__init__(random_state)
assert ((y_shift_std >= 0) and (noise_std >= 0)), 'std must be non-negative'
(self.amp_low, self.amp_high) = (amp_low, amp_high)
(self.period_low, self.period_high) = (period_low, period_high)
(self.y_shift_mean, self.y_shift_std) = (y_shift_mean, y_shift_std)
(self.x_shift_mean, self.x_shift_std) = (x_shift_mean, x_shift_std)
(self.slope_mean, self.slope_std) = (slope_mean, slope_std)
self.noise_std = noise_std
(self.x_low, self.x_high) = (x_low, x_high)
def generate_meta_test_data(self, n_tasks, n_samples_context, n_samples_test):
assert (n_samples_test > 0)
meta_test_tuples = []
for i in range(n_tasks):
f = self._sample_sinusoid()
X = self.random_state.uniform(self.x_low, self.x_high, size=((n_samples_context + n_samples_test), 1))
Y = (f(X) + (self.noise_std * self.random_state.normal(size=f(X).shape)))
meta_test_tuples.append((X[:n_samples_context], Y[:n_samples_context], X[n_samples_context:], Y[n_samples_context:]))
return meta_test_tuples
def generate_meta_train_data(self, n_tasks, n_samples):
meta_train_tuples = []
for i in range(n_tasks):
f = self._sample_sinusoid()
X = self.random_state.uniform(self.x_low, self.x_high, size=(n_samples, 1))
Y = (f(X) + (self.noise_std * self.random_state.normal(size=f(X).shape)))
meta_train_tuples.append((X, Y))
return meta_train_tuples
def _sample_sinusoid(self):
amplitude = self.random_state.uniform(self.amp_low, self.amp_high)
x_shift = self.random_state.normal(loc=self.x_shift_mean, scale=self.x_shift_std)
y_shift = self.random_state.normal(loc=self.y_shift_mean, scale=self.y_shift_std)
slope = self.random_state.normal(loc=self.slope_mean, scale=self.slope_std)
period = self.random_state.uniform(self.period_low, self.period_high)
return (lambda x: (((slope * x) + (amplitude * np.sin((period * (x - x_shift))))) + y_shift))
|
class SinusoidNonstationaryDataset(MetaDataset):
def __init__(self, noise_std=0.0, x_low=(- 5), x_high=5, random_state=None):
super().__init__(random_state)
self.noise_std = noise_std
(self.x_low, self.x_high) = (x_low, x_high)
def generate_meta_test_data(self, n_tasks, n_samples_context, n_samples_test):
assert (n_samples_test > 0)
meta_test_tuples = []
for i in range(n_tasks):
f = self._sample_fun()
X = self.random_state.uniform(self.x_low, self.x_high, size=((n_samples_context + n_samples_test), 1))
Y = f(X)
meta_test_tuples.append((X[:n_samples_context], Y[:n_samples_context], X[n_samples_context:], Y[n_samples_context:]))
return meta_test_tuples
def generate_meta_train_data(self, n_tasks, n_samples):
meta_train_tuples = []
for i in range(n_tasks):
f = self._sample_fun()
X = self.random_state.uniform(self.x_low, self.x_high, size=(n_samples, 1))
Y = f(X)
meta_train_tuples.append((X, Y))
return meta_train_tuples
def _sample_fun(self):
intersect = self.random_state.normal(loc=(- 2.0), scale=0.2)
slope = self.random_state.normal(loc=1, scale=0.3)
freq = (lambda x: (1 + np.abs(x)))
mean = (lambda x: (intersect + (slope * x)))
return (lambda x: ((mean(x) + np.sin((freq(x) * x))) + self.random_state.normal(loc=0, scale=self.noise_std, size=x.shape)))
|
class GPFunctionsDataset(MetaDataset):
def __init__(self, noise_std=0.1, lengthscale=1.0, mean=0.0, x_low=(- 5), x_high=5, random_state=None):
(self.noise_std, self.lengthscale, self.mean) = (noise_std, lengthscale, mean)
(self.x_low, self.x_high) = (x_low, x_high)
super().__init__(random_state)
def generate_meta_test_data(self, n_tasks, n_samples_context, n_samples_test):
assert (n_samples_test > 0)
meta_test_tuples = []
for i in range(n_tasks):
X = self.random_state.uniform(self.x_low, self.x_high, size=((n_samples_context + n_samples_test), 1))
Y = self._gp_fun_from_prior(X)
meta_test_tuples.append((X[:n_samples_context], Y[:n_samples_context], X[n_samples_context:], Y[n_samples_context:]))
return meta_test_tuples
def generate_meta_train_data(self, n_tasks, n_samples):
meta_train_tuples = []
for i in range(n_tasks):
X = self.random_state.uniform(self.x_low, self.x_high, size=(n_samples, 1))
Y = self._gp_fun_from_prior(X)
meta_train_tuples.append((X, Y))
return meta_train_tuples
def _gp_fun_from_prior(self, X):
assert (X.ndim == 2)
n = X.shape[0]
def kernel(a, b, lengthscale):
sqdist = ((np.sum((a ** 2), 1).reshape((- 1), 1) + np.sum((b ** 2), 1)) - (2 * np.dot(a, b.T)))
return np.exp((((- 0.5) * (1 / lengthscale)) * sqdist))
K_ss = kernel(X, X, self.lengthscale)
L = np.linalg.cholesky((K_ss + (1e-08 * np.eye(n))))
f = (self.mean + np.dot(L, self.random_state.normal(size=(n, 1))))
y = (f + self.random_state.normal(scale=self.noise_std, size=f.shape))
return y
|
class CauchyDataset(MetaDataset):
def __init__(self, noise_std=0.05, ndim_x=2, random_state=None):
self.noise_std = noise_std
self.ndim_x = ndim_x
super().__init__(random_state)
def generate_meta_train_data(self, n_tasks, n_samples):
meta_train_tuples = []
for i in range(n_tasks):
X = truncnorm.rvs((- 3), 2, loc=0, scale=2.5, size=(n_samples, self.ndim_x), random_state=self.random_state)
Y = self._gp_fun_from_prior(X)
meta_train_tuples.append((X, Y))
return meta_train_tuples
def generate_meta_test_data(self, n_tasks, n_samples_context, n_samples_test):
assert (n_samples_test > 0)
meta_test_tuples = []
for i in range(n_tasks):
X = truncnorm.rvs((- 3), 2, loc=0, scale=2.5, size=((n_samples_context + n_samples_test), self.ndim_x), random_state=self.random_state)
Y = self._gp_fun_from_prior(X)
meta_test_tuples.append((X[:n_samples_context], Y[:n_samples_context], X[n_samples_context:], Y[n_samples_context:]))
return meta_test_tuples
def _mean(self, x):
loc1 = ((- 1) * np.ones(x.shape[(- 1)]))
loc2 = (2 * np.ones(x.shape[(- 1)]))
cauchy1 = (1 / (np.pi * (1 + (np.linalg.norm((x - loc1), axis=(- 1)) ** 2))))
cauchy2 = (1 / (np.pi * (1 + (np.linalg.norm((x - loc2), axis=(- 1)) ** 2))))
return (((6 * cauchy1) + (3 * cauchy2)) + 1)
def _gp_fun_from_prior(self, X):
assert (X.ndim == 2)
n = X.shape[0]
def kernel(a, b, lengthscale):
sqdist = ((np.sum((a ** 2), 1).reshape((- 1), 1) + np.sum((b ** 2), 1)) - (2 * np.dot(a, b.T)))
return np.exp((((- 0.5) * (1 / lengthscale)) * sqdist))
K_ss = kernel(X, X, 0.5)
L = np.linalg.cholesky((K_ss + (1e-08 * np.eye(n))))
f = (self._mean(X) + np.dot(L, self.random_state.normal(scale=0.2, size=(n, 1))).flatten())
y = (f + self.random_state.normal(scale=self.noise_std, size=f.shape))
return y.reshape((- 1), 1)
|
class SwissfelDataset(MetaDataset):
runs_12dim = [{'experiment': '2018_10_31/line_ucb_ascent', 'run': 0}, {'experiment': '2018_10_31/line_ucb_ascent', 'run': 1}, {'experiment': '2018_10_31/line_ucb_ascent', 'run': 2}, {'experiment': '2018_10_31/line_ucb', 'run': 0}, {'experiment': '2018_10_31/line_ucb', 'run': 1}, {'experiment': '2018_10_31/line_ucb', 'run': 2}, {'experiment': '2018_10_31/neldermead', 'run': 0}, {'experiment': '2018_10_31/neldermead', 'run': 1}, {'experiment': '2018_10_31/neldermead', 'run': 2}]
runs_24dim = [{'experiment': '2018_11_01/line_ucb_ascent_bpm_24', 'run': 0}, {'experiment': '2018_11_01/line_ucb_ascent_bpm_24', 'run': 1}, {'experiment': '2018_11_01/line_ucb_ascent_bpm_24', 'run': 3}, {'experiment': '2018_11_01/line_ucb_ascent_bpm_24_small', 'run': 0}, {'experiment': '2018_11_01/lipschitz_line_ucb_bpm_24', 'run': 0}, {'experiment': '2018_11_01/neldermead_bpm_24', 'run': 0}, {'experiment': '2018_11_01/neldermead_bpm_24', 'run': 1}, {'experiment': '2018_11_01/parameter_scan_bpm_24', 'run': 0}]
def __init__(self, random_state=None, param_space_id=0, swissfel_dir=None):
super().__init__(random_state)
self.swissfel_dir = (SWISSFEL_DIR if (swissfel_dir is None) else swissfel_dir)
if (param_space_id == 0):
run_specs = copy.deepcopy(self.runs_12dim)
elif (param_space_id == 1):
run_specs = copy.deepcopy(self.runs_24dim)
else:
raise NotImplementedError
self.random_state.shuffle(run_specs)
self.run_specs_train = run_specs[:5]
self.run_specs_test = run_specs[5:]
def _load_data(self, experiment, run=0):
path = os.path.join(self.swissfel_dir, experiment)
import h5py
hdf5_path = os.path.join(path, 'data/evaluations.hdf5')
dset = h5py.File(hdf5_path, 'r')
run = str(run)
data = dset['1'][run][()]
dset.close()
config_path = os.path.join(path, 'experiment.yaml')
config_file = open(config_path, 'r')
import yaml
files = yaml.load(config_file)['swissfel.interface']['channel_config_set']
if (not isinstance(files, list)):
files = [files]
files += ['channel_config_set.txt']
parameters = []
for file in files:
params_path = os.path.join(path, 'sf', os.path.split(file)[1])
if (not os.path.exists(params_path)):
continue
frame = pd.read_csv(params_path, comment='#')
parameters += frame['pv'].tolist()
return (data, parameters)
def _load_meta_dataset(self, train=True):
run_specs = (self.run_specs_train if train else self.run_specs_test)
data_tuples = []
for run_spec in run_specs:
(data, parameters) = self._load_data(**run_spec)
data_tuples.append((data['x'], data['y']))
assert (len(set([X.shape[(- 1)] for (X, _) in data_tuples])) == 1)
assert all([(X.shape[0] == Y.shape[0]) for (X, Y) in data_tuples])
return data_tuples
def generate_meta_train_data(self, n_tasks=5, n_samples=200):
assert (n_tasks == len(self.run_specs_train)), ('number of tasks must be %i' % len(self.run_specs_train))
meta_train_tuples = self._load_meta_dataset(train=True)
max_n_samples = max([X.shape[0] for (X, _) in meta_train_tuples])
assert (n_samples <= max_n_samples), ('only %i number of samples available' % max_n_samples)
meta_train_tuples = [(X[:n_samples], Y[:n_samples]) for (X, Y) in meta_train_tuples]
return meta_train_tuples
def generate_meta_test_data(self, n_tasks=None, n_samples_context=200, n_samples_test=400):
if (n_tasks is None):
n_tasks = len(self.run_specs_test)
assert (n_tasks == len(self.run_specs_test)), ('number of tasks must be %i' % len(self.run_specs_test))
meta_test_tuples = self._load_meta_dataset(train=False)
max_n_samples = min([X.shape[0] for (X, _) in meta_test_tuples])
assert ((n_samples_context + n_samples_test) <= max_n_samples), ('only %i number of samples available' % max_n_samples)
idx = np.arange((n_samples_context + n_samples_test))
self.random_state.shuffle(idx)
(idx_context, idx_test) = (idx[:n_samples_context], idx[n_samples_context:])
meta_test_tuples = [(X[idx_context], Y[idx_context], X[idx_test], Y[idx_test]) for (X, Y) in meta_test_tuples]
return meta_test_tuples
|
def provide_data(dataset, seed=28, n_train_tasks=None, n_samples=None):
import numpy as np
N_TEST_TASKS = 200
N_VALID_TASKS = 200
N_TEST_SAMPLES = 200
' Prepare Data '
if ('sin-nonstat' in dataset):
if (len(dataset.split('_')) == 2):
n_train_tasks = int(dataset.split('_')[(- 1)])
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((seed + 1)))
if (n_samples is None):
n_train_samples = n_context_samples = 20
else:
n_train_samples = n_context_samples = n_samples
if (n_train_tasks is None):
n_train_tasks = 20
elif ('sin' in dataset):
if (len(dataset.split('_')) == 2):
n_train_tasks = int(dataset.split('_')[(- 1)])
dataset = SinusoidDataset(random_state=np.random.RandomState((seed + 1)))
if (n_samples is None):
n_train_samples = n_context_samples = 5
else:
n_train_samples = n_context_samples = n_samples
if (n_train_tasks is None):
n_train_tasks = 20
elif ('gp_funcs' in dataset):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((seed + 1)))
if (n_samples is None):
n_train_samples = n_context_samples = 5
else:
n_train_samples = n_context_samples = n_samples
if (n_train_tasks is None):
n_train_tasks = 20
elif ('cauchy' in dataset):
if (len(dataset.split('_')) == 2):
n_train_tasks = int(dataset.split('_')[(- 1)])
dataset = CauchyDataset(random_state=np.random.RandomState((seed + 1)))
if (n_samples is None):
n_train_samples = n_context_samples = 20
else:
n_train_samples = n_context_samples = n_samples
if (n_train_tasks is None):
n_train_tasks = 20
elif (dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((seed + 1)))
N_TEST_SAMPLES = (- 1)
N_VALID_TASKS = N_TEST_TASKS = 1000
n_context_samples = 200
n_train_samples = (28 * 28)
elif ('physionet' in dataset):
variable_id = int(dataset[(- 1)])
assert (0 <= variable_id <= 5)
dataset = PhysionetDataset(random_state=np.random.RandomState((seed + 1)), variable_id=variable_id)
n_context_samples = 24
n_train_samples = 47
n_train_tasks = 100
N_VALID_TASKS = N_TEST_TASKS = 500
elif (dataset == 'pendulum'):
dataset = PendulumDataset(random_state=np.random.RandomState((seed + 1)))
if (n_train_tasks is None):
n_train_tasks = 10
if (n_samples is None):
n_train_samples = n_context_samples = 20
else:
n_train_samples = n_context_samples = n_samples
elif (dataset == 'swissfel'):
dataset = SwissfelDataset(random_state=np.random.RandomState((seed + 1)))
if (n_train_tasks is None):
n_train_tasks = 5
if (n_samples is None):
n_train_samples = n_context_samples = 200
else:
n_train_samples = n_context_samples = n_samples
N_TEST_SAMPLES = 200
data_train = dataset.generate_meta_train_data(n_tasks=n_train_tasks, n_samples=n_train_samples)
data_test_valid = dataset.generate_meta_test_data(n_samples_context=n_context_samples, n_samples_test=N_TEST_SAMPLES)
return (data_train, data_test_valid, data_test_valid)
else:
raise NotImplementedError('Does not recognize dataset flag')
data_train = dataset.generate_meta_train_data(n_tasks=n_train_tasks, n_samples=n_train_samples)
data_test_valid = dataset.generate_meta_test_data(n_tasks=(N_TEST_TASKS + N_VALID_TASKS), n_samples_context=n_context_samples, n_samples_test=N_TEST_SAMPLES)
data_valid = data_test_valid[N_VALID_TASKS:]
data_test = data_test_valid[:N_VALID_TASKS]
return (data_train, data_valid, data_test)
|
def main(argv):
hparam_search_modules = [algo_map_dict[algo_str] for algo_str in FLAGS.algos.split(',')]
command_list = []
for hparam_search_module in hparam_search_modules:
exp_config = {'dataset': ['cauchy_20', 'sin_20', 'physionet_0', 'physionet_2', 'swissfel'], 'covar_module': ['NN'], 'num_cpus': [(2 * FLAGS.n_cpus)], 'metric': [FLAGS.metric]}
if FLAGS.load_analysis:
exp_config['load_analysis'] = [True]
if FLAGS.resume:
exp_config['resume'] = [True]
command_list += generate_launch_commands(hparam_search_module, exp_config, check_flags=False)
print(command_list)
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
bsub_cmd = (((('bsub -W %i:59' % (3 if FLAGS.load_analysis else 23)) + ' -R "rusage[mem=6000]"') + (' -R "rusage[ngpus_excl_p=%i]"' % FLAGS.n_gpus)) + (' -R "span[hosts=1]" -n %i ' % FLAGS.n_cpus))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
if FLAGS.dry:
print(cmd)
else:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs in a for loop. Proceed? [yes/no]\n' % len(command_list)))
if (answer == 'yes'):
for cmd in command_list:
if FLAGS.dry:
print(cmd)
else:
os.system(cmd)
|
def main(argv):
assert (FLAGS.dataset in ['sin', 'cauchy'])
hparam_search_modules = [algo_map_dict[algo_str] for algo_str in FLAGS.algos.split(',')]
command_list = []
for hparam_search_module in hparam_search_modules:
exp_config = {'dataset': [('%s_%i' % (FLAGS.dataset, n_tasks)) for n_tasks in reversed([5, 10, 20, 40, 80, 160, 320])], 'covar_module': ['NN'], 'num_cpus': [(2 * FLAGS.n_cpus)], 'metric': [FLAGS.metric]}
if FLAGS.load_analysis:
exp_config['load_analysis'] = [True]
if FLAGS.resume:
exp_config['resume'] = [True]
command_list += generate_launch_commands(hparam_search_module, exp_config, check_flags=False)
print(command_list)
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
bsub_cmd = (((('bsub -W %i:59' % (3 if FLAGS.load_analysis else 23)) + ' -R "rusage[mem=6000]"') + (' -R "rusage[ngpus_excl_p=%i]"' % FLAGS.n_gpus)) + (' -R "span[hosts=1]" -n %i ' % FLAGS.n_cpus))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
if FLAGS.dry:
print(cmd)
else:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs in a for loop. Proceed? [yes/no]\n' % len(command_list)))
if (answer == 'yes'):
for cmd in command_list:
if FLAGS.dry:
print(cmd)
else:
os.system(cmd)
|
def main(args):
ray.init(num_cpus=args.num_cpus, memory=(3000 * (1024 ** 2)), object_store_memory=(300 * (1024 ** 2)))
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, data_valid, _) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_mlap import GPRegressionMetaLearnedPAC
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearnedPAC(data_train, **config)
with gpytorch.settings.max_cg_iterations(300):
log_period = 5000
train_iter = 0
loss = 0.0
diagnostics_dict = {}
for i in range((config['num_iter_fit'] // log_period)):
(loss, diagnostics_dict) = model.meta_fit(verbose=False, log_period=1000, eval_period=100000, n_iter=log_period)
train_iter += log_period
if (i < ((config['num_iter_fit'] // log_period) - 1)):
reporter(timesteps_total=train_iter, loss=loss, test_rmse=math.nan, test_ll=math.nan, calib_err=math.nan, **diagnostics_dict)
(ll, rmse, calib_err) = model.eval_datasets(data_valid, n_iter_meta_test=3000)
reporter(timesteps_total=train_iter, loss=loss, test_rmse=rmse, test_ll=ll, calib_err=calib_err, **diagnostics_dict)
@ray.remote
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_mlap import GPRegressionMetaLearnedPAC
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(500):
model = GPRegressionMetaLearnedPAC(data_train, **config)
model.meta_fit(data_test, log_period=1000, eval_period=100000)
(ll, rmse, calib_err) = model.eval_datasets(data_test, n_iter_meta_test=3000)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert (args.metric in ['test_ll', 'test_rmse'])
exp_name = ('tune_meta_pac_%s_kernel_%s' % (args.covar_module, args.dataset))
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print(('Loading existing tune analysis results from %s' % analysis_dir))
analysis = Analysis(analysis_dir)
else:
space = {'task_kl_weight': hp.loguniform('task_kl_weight', math.log(0.05), math.log(1.0)), 'meta_kl_weight': hp.loguniform('meta_kl_weight', math.log(1e-07), math.log(1.0)), 'lr': hp.loguniform('lr', math.log(0.0001), math.log(0.001)), 'lr_decay': hp.loguniform('lr_decay', math.log(0.92), math.log(0.97)), 'posterior_lr_multiplier': hp.loguniform('posterior_lr_multiplier', math.log(1.0), math.log(10.0)), 'svi_batch_size': hp.choice('svi_batch_size', [5, 10]), 'task_batch_size': hp.choice('task_batch_size', [5, 20])}
config = {'num_samples': 150, 'config': {'num_iter_fit': 40000, 'kernel_nn_layers': [32, 32, 32, 32], 'mean_nn_layers': [32, 32, 32, 32], 'random_seed': SEED, 'mean_module': 'NN', 'covar_module': args.covar_module, 'normalize_data': True, 'cov_type': 'diag'}, 'stop': {'timesteps_total': 100000}}
config['config'].update()
algo = HyperOptSearch(space, max_concurrent=args.num_cpus, metric=args.metric, mode=('max' if (args.metric == 'test_ll') else 'min'))
analysis = custom_tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False, local_dir=HPARAM_EXP_DIR, resume=args.resume, **config)
from experiments.hyperparam_search.util import select_best_configs
if (args.metric == 'test_ll'):
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif (args.metric == 'test_rmse'):
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, ('%s_%s.csv' % (exp_name, datetime.now().strftime('%b_%d_%Y_%H:%M:%S'))))
result_df.to_csv(csv_file_name)
print(('\nSaved result csv to %s' % csv_file_name))
|
def main(args):
ray.init(num_cpus=args.num_cpus, memory=(1500 * (1024 ** 2)), object_store_memory=(300 * (1024 ** 2)))
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, data_valid, _) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_mll import GPRegressionMetaLearned
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearned(data_train, **config)
with gpytorch.settings.max_cg_iterations(300):
eval_period = 3000
train_iter = 0
for i in range((config['num_iter_fit'] // eval_period)):
loss = model.meta_fit(verbose=False, log_period=2000, n_iter=eval_period)
train_iter += eval_period
(ll, rmse, calib_err) = model.eval_datasets(data_valid)
reporter(timesteps_total=train_iter, loss=loss, test_rmse=rmse, test_ll=ll, calib_err=calib_err)
@ray.remote
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_mll import GPRegressionMetaLearned
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(500):
model = GPRegressionMetaLearned(data_train, **config)
model.meta_fit(data_test, log_period=5000)
(ll, rmse, calib_err) = model.eval_datasets(data_test)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert (args.metric in ['test_ll', 'test_rmse'])
exp_name = ('tune_meta_mll_%s_kernel_%s' % (args.covar_module, args.dataset))
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print(('Loading existing tune analysis results from %s' % analysis_dir))
analysis = Analysis(analysis_dir)
else:
space = {'weight_decay': hp.loguniform('weight_decay', math.log(1e-06), math.log(1.0)), 'lr_params': hp.loguniform('lr_params', math.log(0.0001), math.log(0.005)), 'lr_decay': hp.loguniform('lr_decay', math.log(0.8), math.log(1.0)), 'task_batch_size': hp.choice('task_batch_size', [4, 10])}
config = {'num_samples': 200, 'config': {'num_iter_fit': 25000, 'kernel_nn_layers': [32, 32, 32, 32], 'mean_nn_layers': [32, 32, 32, 32], 'random_seed': SEED, 'mean_module': 'NN', 'covar_module': args.covar_module}, 'stop': {'timesteps_total': 25000}}
algo = HyperOptSearch(space, max_concurrent=args.num_cpus, metric=args.metric, mode=('max' if (args.metric == 'test_ll') else 'min'))
analysis = tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False, local_dir=HPARAM_EXP_DIR, **config)
from experiments.hyperparam_search.util import select_best_configs
if (args.metric == 'test_ll'):
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif (args.metric == 'test_rmse'):
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, ('%s_%s.csv' % (exp_name, datetime.now().strftime('%b_%d_%Y_%H:%M:%S'))))
result_df.to_csv(csv_file_name)
print(('\nSaved result csv to %s' % csv_file_name))
|
def main(args):
ray.init(num_cpus=args.num_cpus, memory=(1800 * (1024 ** 2)), object_store_memory=(300 * (1024 ** 2)))
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, data_valid, _) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearnedSVGD(data_train, **config)
with gpytorch.settings.max_cg_iterations(300):
eval_period = 3000
train_iter = 0
for i in range((config['num_iter_fit'] // eval_period)):
loss = model.meta_fit(verbose=False, log_period=2000, n_iter=eval_period)
train_iter += eval_period
(ll, rmse, calib_err) = model.eval_datasets(data_valid)
reporter(timesteps_total=train_iter, loss=loss, test_rmse=rmse, test_ll=ll, calib_err=calib_err)
@ray.remote
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(300):
model = GPRegressionMetaLearnedSVGD(data_train, **config)
model.meta_fit(data_test, log_period=5000)
(ll, rmse, calib_err) = model.eval_datasets(data_test)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert (args.metric in ['test_ll', 'test_rmse'])
exp_name = ('tune_meta_svgd_%s_kernel_%s' % (args.covar_module, args.dataset))
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print(('Loading existing tune analysis results from %s' % analysis_dir))
analysis = Analysis(analysis_dir)
else:
space = {'task_kl_weight': hp.loguniform('task_kl_weight', math.log(0.08), math.log(1.0)), 'prior_factor': hp.loguniform('prior_factor', math.log(1e-06), math.log(0.2)), 'lr': hp.loguniform('lr', math.log(0.0005), math.log(0.005)), 'lr_decay': hp.loguniform('lr_decay', math.log(0.8), math.log(1.0)), 'bandwidth': hp.loguniform('bandwidth', math.log(0.001), math.log(500.0)), 'num_particles': hp.choice('num_particles', [10, 50]), 'task_batch_size': hp.choice('task_batch_size', [4, 10])}
config = {'num_samples': 200, 'config': {'num_iter_fit': 30000, 'kernel_nn_layers': [32, 32, 32, 32], 'mean_nn_layers': [32, 32, 32, 32], 'random_seed': SEED, 'mean_module': 'NN', 'covar_module': args.covar_module, 'normalize_data': True}, 'stop': {'timesteps_total': 30000}}
algo = HyperOptSearch(space, max_concurrent=args.num_cpus, metric=args.metric, mode=('max' if (args.metric == 'test_ll') else 'min'))
analysis = tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False, local_dir=HPARAM_EXP_DIR, **config)
from experiments.hyperparam_search.util import select_best_configs
if (args.metric == 'test_ll'):
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif (args.metric == 'test_rmse'):
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, ('%s_%s.csv' % (exp_name, datetime.now().strftime('%b_%d_%Y_%H:%M:%S'))))
result_df.to_csv(csv_file_name)
print(('\nSaved result csv to %s' % csv_file_name))
|
def main(args):
ray.init(num_cpus=args.num_cpus, memory=(3000 * (1024 ** 2)), object_store_memory=(300 * (1024 ** 2)))
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, data_valid, _) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_vi import GPRegressionMetaLearnedVI
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearnedVI(data_train, **config)
with gpytorch.settings.max_cg_iterations(300):
eval_period = 3000
train_iter = 0
for i in range((config['num_iter_fit'] // eval_period)):
loss = model.meta_fit(verbose=False, log_period=2000, n_iter=eval_period)
train_iter += eval_period
(ll, rmse, calib_err) = model.eval_datasets(data_valid)
reporter(timesteps_total=train_iter, loss=loss, test_rmse=rmse, test_ll=ll, calib_err=calib_err)
@ray.remote
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_vi import GPRegressionMetaLearnedVI
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(500):
model = GPRegressionMetaLearnedVI(data_train, **config)
model.meta_fit(data_test, log_period=5000)
(ll, rmse, calib_err) = model.eval_datasets(data_test)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert (args.metric in ['test_ll', 'test_rmse'])
exp_name = ('tune_meta_vi_%s_kernel_%s' % (args.covar_module, args.dataset))
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print(('Loading existing tune analysis results from %s' % analysis_dir))
analysis = Analysis(analysis_dir)
else:
space = {'task_kl_weight': hp.loguniform('task_kl_weight', math.log(0.05), math.log(1.0)), 'prior_factor': hp.loguniform('prior_factor', math.log(1e-06), math.log(0.2)), 'lr': hp.loguniform('lr', math.log(0.0005), math.log(0.005)), 'lr_decay': hp.loguniform('lr_decay', math.log(0.8), math.log(1.0)), 'svi_batch_size': hp.choice('svi_batch_size', [10, 50]), 'task_batch_size': hp.choice('task_batch_size', [4, 10])}
config = {'num_samples': 200, 'config': {'num_iter_fit': 30000, 'kernel_nn_layers': [32, 32, 32, 32], 'mean_nn_layers': [32, 32, 32, 32], 'random_seed': SEED, 'mean_module': 'NN', 'covar_module': args.covar_module, 'normalize_data': True, 'cov_type': 'diag'}, 'stop': {'timesteps_total': 30000}}
algo = HyperOptSearch(space, max_concurrent=args.num_cpus, metric=args.metric, mode=('max' if (args.metric == 'test_ll') else 'min'))
analysis = tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False, local_dir=HPARAM_EXP_DIR, **config)
from experiments.hyperparam_search.util import select_best_configs
if (args.metric == 'test_ll'):
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif (args.metric == 'test_rmse'):
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, ('%s_%s.csv' % (exp_name, datetime.now().strftime('%b_%d_%Y_%H:%M:%S'))))
result_df.to_csv(csv_file_name)
print(('\nSaved result csv to %s' % csv_file_name))
|
def select_best_configs(analysis, metric, mode='max', N=5):
try:
rows = analysis._retrieve_rows(metric=metric, mode=mode)
except:
missing_header = []
for (path, df) in analysis.trial_dataframes.items():
try:
df[metric]
header = df.columns
except:
missing_header.append(path)
for path in missing_header:
df = pd.read_csv(os.path.join(path, 'progress.csv'), header=None)
df.columns = header
analysis.trial_dataframes[path] = df
rows = {}
for (path, df) in analysis.trial_dataframes.items():
if (mode == 'max'):
idx = df[metric].idxmax()
elif (mode == 'min'):
idx = df[metric].idxmin()
else:
idx = (- 1)
if np.isnan(idx):
print('NaN value in experiment: ', path)
continue
rows[path] = df.iloc[idx].to_dict()
all_configs = analysis.get_all_configs()
reverse = (mode == 'max')
best_paths = sorted(rows, key=(lambda k: rows[k][metric]), reverse=reverse)[:N]
best_configs = [all_configs[path] for path in best_paths]
return best_configs
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
data_train = dataset.generate_meta_train_data(n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
data_test = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
torch.set_num_threads(FLAGS.n_threads)
gp_meta = GPRegressionMetaLearnedSVGD(data_train, weight_prior_std=FLAGS.weight_prior_std, prior_factor=FLAGS.prior_factor, covar_module=FLAGS.covar_module, mean_module=FLAGS.mean_module, kernel_nn_layers=nn_layers, mean_nn_layers=nn_layers, random_seed=FLAGS.seed, optimizer=FLAGS.optimizer, lr=FLAGS.lr, lr_decay=FLAGS.lr_decay, num_iter_fit=FLAGS.n_iter_fit, kernel=FLAGS.kernel, bandwidth=FLAGS.bandwidth, num_particles=FLAGS.num_particles, normalize_data=FLAGS.normalize_data, task_batch_size=FLAGS.task_batch_size)
gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=1000)
(test_ll, rmse, calib_err) = gp_meta.eval_datasets(data_test)
results_dict = {'test_ll': test_ll, 'test_rmse': rmse, 'calib_err': calib_err}
print(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'swissfel'):
(data_train, _, data_test) = provide_data(dataset='swissfel')
else:
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
data_train = dataset.generate_meta_train_data(n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
data_test = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
torch.set_num_threads(FLAGS.n_threads)
gp_meta = GPRegressionMetaLearned(data_train, learning_mode=FLAGS.learning_mode, num_iter_fit=FLAGS.n_iter_fit, covar_module=FLAGS.covar_module, mean_module=FLAGS.mean_module, kernel_nn_layers=nn_layers, mean_nn_layers=nn_layers, weight_decay=FLAGS.weight_decay, lr_params=FLAGS.lr, lr_decay=FLAGS.lr_decay, random_seed=FLAGS.seed, task_batch_size=FLAGS.batch_size, optimizer=FLAGS.optimizer, normalize_data=FLAGS.normalize_data)
gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=2000)
(test_ll, rmse, calib_err) = gp_meta.eval_datasets(data_test)
results_dict = {'test_ll': test_ll, 'test_rmse': rmse, 'calib_err': calib_err}
print(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
data_train = dataset.generate_meta_train_data(n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
data_test = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
torch.set_num_threads(FLAGS.n_threads)
gp_meta = GPRegressionMetaLearnedVI(data_train, weight_prior_std=FLAGS.weight_prior_std, prior_factor=FLAGS.prior_factor, covar_module=FLAGS.covar_module, mean_module=FLAGS.mean_module, kernel_nn_layers=nn_layers, mean_nn_layers=nn_layers, random_seed=FLAGS.seed, optimizer=FLAGS.optimizer, lr=FLAGS.lr, lr_decay=FLAGS.lr_decay, num_iter_fit=FLAGS.n_iter_fit, svi_batch_size=FLAGS.svi_batch_size, normalize_data=FLAGS.normalize_data, cov_type=FLAGS.cov_type, task_batch_size=FLAGS.task_batch_size)
gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=1000)
(test_ll_bayes, rmse_bayes, calib_err_bayes) = gp_meta.eval_datasets(data_test, mode='Bayes')
(test_ll_map, rmse_map, calib_err_map) = gp_meta.eval_datasets(data_test, mode='MAP')
results_dict = {'test_ll_bayes': test_ll_bayes, 'test_rmse_bayes': rmse_bayes, 'calib_err_bayes': calib_err_bayes, 'test_ll_map': test_ll_map, 'rmse_map': rmse_map, 'calib_err_map': calib_err_map}
print(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'swissfel'):
raise NotImplementedError
else:
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
meta_train_data = dataset.generate_meta_test_data(n_tasks=1024, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
meta_test_data = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
torch.set_num_threads(FLAGS.n_threads)
meta_train_data = meta_train_data[:FLAGS.n_train_tasks]
data_train = [(context_x, context_y) for (context_x, context_y, _, _) in meta_train_data]
assert (len(data_train) == FLAGS.n_train_tasks)
gp_meta = MAMLRegression(data_train, num_iter_fit=FLAGS.n_iter_fit, layer_sizes=nn_layers, task_batch_size=FLAGS.batch_size, lr_inner=FLAGS.lr_inner, lr_meta=FLAGS.lr, random_seed=FLAGS.seed, optimizer=FLAGS.optimizer, normalize_data=FLAGS.normalize_data)
gp_meta.meta_fit(log_period=1000)
test_rmse_meta_train = gp_meta.eval_datasets(meta_train_data)
test_rmse_meta_test = gp_meta.eval_datasets(meta_test_data)
results_dict = {'test_rmse_meta_train': test_rmse_meta_train, 'test_rmse_meta_test': test_rmse_meta_test}
pprint(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.maml_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if (dataset == 'sin'):
n_context_samples = [5, 10, 20]
elif (dataset == 'cauchy'):
n_context_samples = [20, 40]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {'exp_name': [('meta-overfitting-v2-maml-%s' % dataset)], 'dataset': [dataset], 'n_threads': [N_THREADS], 'seed': list(range(30, 55)), 'data_seed': [28], 'num_layers': [4], 'layer_size': [32], 'n_iter_fit': [50000], 'lr_inner': [0.1, 0.2, 0.3, 0.5], 'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512], 'n_test_tasks': [200], 'n_context_samples': n_context_samples, 'n_test_samples': [100]}
command_list.extend(generate_launch_commands(experiments.meta_overfitting_v2.maml_overfitting_base, exp_config))
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = ('bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out -W 03:59 -R "rusage[mem=1048]" -n %i ' % (cmd_hash, N_THREADS))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs locally on %i workers. Proceed? [yes/no]\n' % (len(command_list), FLAGS.n_workers)))
if (answer == 'yes'):
exec_fn = (lambda cmd: os.system(cmd))
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list)
|
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.meta_GPR_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if (dataset == 'sin'):
n_context_samples = [5, 10, 20]
elif (dataset == 'cauchy'):
n_context_samples = [20, 40]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {'exp_name': [('meta-overfitting-v2-mll-%s' % dataset)], 'dataset': [dataset], 'n_threads': [N_THREADS], 'seed': list(range(30, 55)), 'data_seed': [28], 'weight_decay': [0.0], 'covar_module': ['NN'], 'mean_module': ['NN'], 'num_layers': [4], 'layer_size': [32], 'n_iter_fit': [40000], 'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512], 'n_test_tasks': [200], 'n_context_samples': n_context_samples, 'n_test_samples': [100]}
command_list.extend(generate_launch_commands(experiments.meta_overfitting_v2.meta_GPR_overfitting_base, exp_config))
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = ('bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out -W 03:59 -R "rusage[mem=1048]" -n %i ' % (cmd_hash, N_THREADS))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs locally on %i workers. Proceed? [yes/no]\n' % (len(command_list), FLAGS.n_workers)))
if (answer == 'yes'):
exec_fn = (lambda cmd: os.system(cmd))
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list)
|
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.neural_processes_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if (dataset == 'sin'):
n_context_samples = [5]
elif (dataset == 'cauchy'):
n_context_samples = [20]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {'exp_name': [('meta-overfitting-v2-nps-%s' % dataset)], 'dataset': [dataset], 'n_threads': [N_THREADS], 'seed': list(range(30, 55)), 'data_seed': [28], 'weight_decay': [0.0, 0.001, 0.01, 0.1, 0.2, 0.4], 'r_dim': [256, 512], 'n_iter_fit': [30000], 'lr': [0.001], 'lr_decay': [0.97], 'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512], 'n_test_tasks': [200], 'n_context_samples': n_context_samples, 'n_test_samples': [100]}
command_list.extend(generate_launch_commands(experiments.meta_overfitting_v2.neural_processes_overfitting_base, exp_config))
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = ('bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out -W 03:59 -R "rusage[mem=1048]" -n %i ' % (cmd_hash, N_THREADS))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs locally on %i workers. Proceed? [yes/no]\n' % (len(command_list), FLAGS.n_workers)))
if (answer == 'yes'):
exec_fn = (lambda cmd: os.system(cmd))
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list)
|
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.meta_GPR_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if (dataset == 'sin'):
n_context_samples = [5, 10, 20]
elif (dataset == 'cauchy'):
n_context_samples = [20, 40]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {'exp_name': [('meta-overfitting-v2-pacoh-map-%s' % dataset)], 'dataset': [dataset], 'n_threads': [N_THREADS], 'seed': list(range(30, 55)), 'data_seed': [28], 'weight_decay': list(np.logspace((- 2), (- 0.25), num=10)), 'covar_module': ['NN'], 'mean_module': ['NN'], 'num_layers': [4], 'layer_size': [32], 'n_iter_fit': [30000], 'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512], 'n_test_tasks': [200], 'n_context_samples': n_context_samples, 'n_test_samples': [100]}
command_list.extend(generate_launch_commands(experiments.meta_overfitting_v2.meta_GPR_overfitting_base, exp_config))
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = ('bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out -W 03:59 -R "rusage[mem=1048]" -n %i ' % (cmd_hash, N_THREADS))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs locally on %i workers. Proceed? [yes/no]\n' % (len(command_list), FLAGS.n_workers)))
if (answer == 'yes'):
exec_fn = (lambda cmd: os.system(cmd))
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list)
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'swissfel'):
raise NotImplementedError
else:
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
meta_train_data = dataset.generate_meta_test_data(n_tasks=1024, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
meta_test_data = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
torch.set_num_threads(FLAGS.n_threads)
meta_train_data = meta_train_data[:FLAGS.n_train_tasks]
data_train = [(context_x, context_y) for (context_x, context_y, _, _) in meta_train_data]
assert (len(data_train) == FLAGS.n_train_tasks)
gp_meta = GPRegressionMetaLearned(data_train, learning_mode=FLAGS.learning_mode, num_iter_fit=FLAGS.n_iter_fit, covar_module=FLAGS.covar_module, mean_module=FLAGS.mean_module, kernel_nn_layers=nn_layers, mean_nn_layers=nn_layers, weight_decay=FLAGS.weight_decay, lr_params=FLAGS.lr, lr_decay=FLAGS.lr_decay, random_seed=FLAGS.seed, task_batch_size=FLAGS.batch_size, optimizer=FLAGS.optimizer, normalize_data=FLAGS.normalize_data)
gp_meta.meta_fit(log_period=1000)
(test_ll_meta_train, test_rmse_meta_train, calib_err_meta_train) = gp_meta.eval_datasets(meta_train_data)
(test_ll_meta_test, test_rmse_meta_test, calib_err_test) = gp_meta.eval_datasets(meta_test_data)
results_dict = {'test_ll_meta_train': test_ll_meta_train, 'test_ll_meta_test': test_ll_meta_test, 'test_rmse_meta_train': test_rmse_meta_train, 'test_rmse_meta_test': test_rmse_meta_test, 'calib_err_meta_train': calib_err_meta_train, 'calib_err_test': calib_err_test}
pprint(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def main(argv):
(logger, exp_dir) = setup_exp_doc(FLAGS.exp_name)
if (FLAGS.dataset == 'swissfel'):
raise NotImplementedError
else:
if (FLAGS.dataset == 'sin-nonstat'):
dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'sin'):
dataset = SinusoidDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'cauchy'):
dataset = CauchyDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'mnist'):
dataset = MNISTRegressionDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'physionet'):
dataset = PhysionetDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
elif (FLAGS.dataset == 'gp-funcs'):
dataset = GPFunctionsDataset(random_state=np.random.RandomState((FLAGS.seed + 1)))
else:
raise NotImplementedError('Does not recognize dataset flag')
meta_train_data = dataset.generate_meta_test_data(n_tasks=1024, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
meta_test_data = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples, n_samples_test=FLAGS.n_test_samples)
torch.set_num_threads(FLAGS.n_threads)
meta_train_data = meta_train_data[:FLAGS.n_train_tasks]
data_train = [(context_x, context_y) for (context_x, context_y, _, _) in meta_train_data]
assert (len(data_train) == FLAGS.n_train_tasks)
npr = NPRegressionMetaLearned(data_train, num_iter_fit=FLAGS.n_iter_fit, r_dim=FLAGS.r_dim, z_dim=FLAGS.z_dim, h_dim=FLAGS.h_dim, weight_decay=FLAGS.weight_decay, task_batch_size=FLAGS.batch_size, lr_params=FLAGS.lr, random_seed=FLAGS.seed, optimizer=FLAGS.optimizer, normalize_data=FLAGS.normalize_data)
npr.meta_fit(log_period=1000)
(test_ll_meta_train, test_rmse_meta_train, calib_err_meta_train) = npr.eval_datasets(meta_train_data, flatten_y=False)
(test_ll_meta_test, test_rmse_meta_test, calib_err_test) = npr.eval_datasets(meta_test_data, flatten_y=False)
results_dict = {'test_ll_meta_train': test_ll_meta_train, 'test_ll_meta_test': test_ll_meta_test, 'test_rmse_meta_train': test_rmse_meta_train, 'test_rmse_meta_test': test_rmse_meta_test, 'calib_err_meta_train': calib_err_meta_train, 'calib_err_test': calib_err_test}
pprint(results_dict)
save_results(results_dict, exp_dir, log=True)
|
def setup_exp_doc(exp_name, data_dir=None):
flags_dict = get_flags_dict()
flags_dict['exp_name'] = exp_name
task_hash = hash_dict(flags_dict)
flags_dict['task_hash'] = task_hash
exp_dir = create_exp_dir(exp_name, task_hash, data_dir=data_dir)
logger = get_logger(log_dir=exp_dir, expname=exp_name)
save_dict(flags_dict, os.path.join(exp_dir, 'config.json'))
flags_table_str = dict_to_tabular_str(flags_dict)
logger.info(((((((' ------ Starting experiment: %s ------ \n' % exp_name) + '----------------------------------------\n') + ' Configuration \n') + '----------------------------------------') + ('%s' % flags_table_str)) + '----------------------------------------\n'))
return (logger, exp_dir)
|
def save_results(results_dict, exp_dir, log=True):
results_file = os.path.join(exp_dir, 'results.json')
save_dict(results_dict, results_file)
if log:
logger = get_logger(log_dir=exp_dir)
results_table_str = dict_to_tabular_str(results_dict)
logger.info(((((('\n' + '----------------------------------------\n') + ' Results \n') + '----------------------------------------') + ('%s' % results_table_str)) + '----------------------------------------\n'))
|
def create_exp_parent_dir(exp_name, data_dir=None):
if (data_dir is None):
data_dir = DATA_DIR
exp_parent_dir = os.path.join(data_dir, exp_name)
if (not os.path.isdir(exp_parent_dir)):
os.mkdir(exp_parent_dir)
return exp_parent_dir
|
def create_exp_dir(exp_name, task_hash, data_dir=None):
exp_parent_dir = create_exp_parent_dir(exp_name, data_dir=data_dir)
exp_dir = os.path.join(exp_parent_dir, str(task_hash))
if (not os.path.isdir(exp_dir)):
os.mkdir(exp_dir)
return exp_dir
|
def get_flags_dict():
flags_dict = copy.deepcopy(flags.FLAGS.flag_values_dict())
list(map(flags_dict.__delitem__, DEFAULT_FLAGS))
return flags_dict
|
def hash_dict(dict):
return hashlib.md5(str.encode(json.dumps(dict, sort_keys=True))).hexdigest()
|
def save_dict(dict, dump_path):
with open(dump_path, 'w') as json_file:
json.dump(dict, json_file, indent=4, sort_keys=True)
|
def dict_to_tabular_str(dict):
s = '\n'
format = '{:<25}{:<10}'
for (key, value) in collections.OrderedDict(dict).items():
s += (format.format(key, value) + '\n')
return s
|
def collect_exp_results(exp_name, verbose=True):
exp_dir = os.path.join(DATA_DIR, exp_name)
no_results_counter = 0
exp_dicts = []
for exp_sub_dir in glob.glob((exp_dir + '/*')):
config_file = os.path.join(exp_sub_dir, 'config.json')
results_file = os.path.join(exp_sub_dir, 'results.json')
if (os.path.isfile(config_file) and os.path.isfile(results_file)):
with open(config_file, 'r') as f:
exp_dict = json.load(f)
with open(results_file, 'r') as f:
exp_dict.update(json.load(f))
exp_dicts.append(exp_dict)
else:
no_results_counter += 1
if verbose:
logger = get_logger()
logger.info(('Parsed results %s - found %i folders with results and %i folders without results' % (exp_name, len(exp_dicts), no_results_counter)))
return pd.DataFrame(data=exp_dicts)
|
def generate_launch_commands(module, exp_config, check_flags=True):
base_cmd = generate_base_command(module)
if check_flags:
allowed_flags = set(module.FLAGS.flag_values_dict().keys())
for (key, value) in exp_config.items():
assert hasattr(value, '__iter__')
assert (key in allowed_flags), ('%s is not a flag in %s' % (key, str(module)))
config_product = list(itertools.product(*list(exp_config.values())))
config_product_dicts = [dict(zip(exp_config.keys(), conf)) for conf in config_product]
cmds = []
for config_dict in config_product_dicts:
cmd = base_cmd
for (key, value) in config_dict.items():
cmd += (' --%s=%s' % (str(key), str(value)))
cmds.append(cmd)
return cmds
|
def generate_base_command(module):
interpreter_script = sys.executable
base_exp_script = os.path.abspath(module.__file__)
base_cmd = ((interpreter_script + ' ') + base_exp_script)
return base_cmd
|
class AsyncExecutor():
def __init__(self, n_jobs=1):
self.num_workers = (n_jobs if (n_jobs > 0) else multiprocessing.cpu_count())
self._pool = []
self._populate_pool()
def run(self, target, *args_iter, verbose=False):
workers_idle = ([False] * self.num_workers)
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
while (not all(workers_idle)):
for i in range(self.num_workers):
if (not self._pool[i].is_alive()):
self._pool[i].terminate()
if (len(tasks) > 0):
if verbose:
print((n_tasks - len(tasks)))
next_task = tasks.pop(0)
self._pool[i] = _start_process(target, next_task)
else:
workers_idle[i] = True
def _populate_pool(self):
self._pool = [_start_process(_dummy_fun) for _ in range(self.num_workers)]
|
def _start_process(target, args=None):
if args:
p = multiprocessing.Process(target=target, args=args)
else:
p = multiprocessing.Process(target=target)
p.start()
return p
|
def _dummy_fun():
pass
|
class GPRegressionMetaLearnedPAC(RegressionModelMetaLearned):
def __init__(self, meta_train_data, num_iter_fit=40000, feature_dim=1, weight_prior_std=0.5, bias_prior_std=3.0, delta=0.1, task_kl_weight=1.0, meta_kl_weight=1.0, posterior_lr_multiplier=1.0, covar_module='SE', mean_module='zero', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), optimizer='Adam', lr=0.001, lr_decay=1.0, svi_batch_size=5, cov_type='diag', task_batch_size=(- 1), likelihood_noise_init=0.01, normalize_data=True, random_seed=None):
"\n PACOH-VI: Variational Inference on the PAC-optimal hyper-posterior with Gaussian family.\n Meta-Learns a distribution over GP-priors.\n\n Args:\n meta_train_data: list of tuples of ndarrays[(train_x_1, train_t_1), ..., (train_x_n, train_t_n)]\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n prior_factor: (float) weighting of the hyper-prior (--> meta-regularization parameter)\n weight_prior_std (float): std of Gaussian hyper-prior on weights\n bias_prior_std (float): std of Gaussian hyper-prior on biases\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n lr: (float) learning rate for prior parameters\n lr_decay: (float) lr rate decay multiplier applied after every 1000 steps\n kernel (std): SVGD kernel, either 'RBF' or 'IMQ'\n bandwidth (float): bandwidth of kernel, if None the bandwidth is chosen via heuristic\n num_particles: (int) number particles to approximate the hyper-posterior\n task_batch_size: (int) mini-batch size of tasks for estimating gradients\n normalize_data: (bool) whether the data should be normalized\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data, random_seed)
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.num_iter_fit, self.feature_dim) = (num_iter_fit, feature_dim)
(self.task_kl_weight, self.meta_kl_weight) = (task_kl_weight, meta_kl_weight)
(self.weight_prior_std, self.bias_prior_std) = (weight_prior_std, bias_prior_std)
self.svi_batch_size = svi_batch_size
self.lr = lr
self.n_tasks = len(meta_train_data)
self.delta = torch.tensor(delta, dtype=torch.float32)
if (task_batch_size < 1):
self.task_batch_size = len(meta_train_data)
else:
self.task_batch_size = min(task_batch_size, len(meta_train_data))
self._check_meta_data_shapes(meta_train_data)
self._compute_normalization_stats(meta_train_data)
' --- Setup model & inference --- '
self.meta_train_params = []
self._setup_meta_train_step(mean_module, covar_module, mean_nn_layers, kernel_nn_layers, cov_type)
self.meta_train_params.append({'params': self.hyper_posterior.parameters(), 'lr': lr})
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
self.likelihood.noise = (likelihood_noise_init * torch.ones((1,)))
self.meta_train_params.append({'params': self.likelihood.parameters(), 'lr': lr})
(self.task_dicts, posterior_params) = self._setup_task_dicts(meta_train_data)
self.meta_train_params.append({'params': posterior_params, 'lr': (posterior_lr_multiplier * lr)})
self._setup_optimizer(optimizer, lr, lr_decay)
self.fitted = False
def meta_fit(self, valid_tuples=None, verbose=True, log_period=500, eval_period=5000, n_iter=None):
'\n fits the variational hyper-posterior by minimizing the negative ELBO\n\n Args:\n valid_tuples: list of valid tuples, i.e. [(test_context_x_1, test_context_t_1, test_x_1, test_t_1), ...]\n verbose: (boolean) whether to print training progress\n log_period (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
assert ((eval_period % log_period) == 0), 'eval_period should be multiple of log_period'
assert ((valid_tuples is None) or all([(len(valid_tuple) == 4) for valid_tuple in valid_tuples]))
t = time.time()
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
task_dict_batch = self.rds_numpy.choice(self.task_dicts, size=self.task_batch_size)
self.optimizer.zero_grad()
(loss, diagnostics_dict) = self._meta_train_pac_bound(task_dict_batch)
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
if (verbose and ((itr == 1) or ((itr % log_period) == 0))):
duration = (time.time() - t)
t = time.time()
message = ('Iter %d/%d - Loss: %.6f - Time %.2f sec - ' % (itr, self.num_iter_fit, loss.item(), duration))
if ((valid_tuples is not None) and ((itr % eval_period) == 0) and (itr > 0)):
(valid_ll, valid_rmse, calibr_err) = self.eval_datasets(valid_tuples)
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
message += ' - '.join([('%s: %.4f' % (key, value)) for (key, value) in diagnostics_dict.items()])
self.logger.info(message)
self.fitted = True
return (loss.item(), diagnostics_dict)
def predict(self, context_x, context_y, test_x, n_iter_meta_test=3000, return_density=False):
"\n computes the predictive distribution of the targets p(t|test_x, test_context_x, context_y)\n\n Args:\n context_x: (ndarray) context input data for which to compute the posterior\n context_y: (ndarray) context targets for which to compute the posterior\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n n_posterior_samples: (int) number of samples from posterior to average over\n mode: (std) either of ['Bayes' , 'MAP']\n return_density: (bool) whether to return result as mean and std ndarray or as MultivariateNormal pytorch object\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(t|test_x, test_context_x, context_y)\n "
(context_x, context_y) = _handle_input_dimensionality(context_x, context_y)
test_x = _handle_input_dimensionality(test_x)
assert (test_x.shape[1] == context_x.shape[1])
task_dict = self._meta_test_inference([(context_x, context_y)], verbose=True, log_period=500, n_iter=n_iter_meta_test)[0]
with torch.no_grad():
test_x = self._normalize_data(X=test_x, Y=None)
test_x = torch.from_numpy(test_x).float().to(device)
gp_model = task_dict['gp_model']
gp_model.eval()
pred_dist = self.likelihood(gp_model(test_x))
pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
if return_density:
return pred_dist
else:
pred_mean = pred_dist.mean.cpu().numpy()
pred_std = pred_dist.stddev.cpu().numpy()
return (pred_mean, pred_std)
def eval_datasets(self, test_tuples, n_iter_meta_test=3000, **kwargs):
'\n Performs meta-testing on multiple tasks / datasets.\n Computes the average test log likelihood, the rmse and the calibration error over multiple test datasets\n\n Args:\n test_tuples: list of test set tuples, i.e. [(test_context_x_1, test_context_y_1, test_x_1, test_y_1), ...]\n\n Returns: (avg_log_likelihood, rmse, calibr_error)\n\n '
assert all([(len(valid_tuple) == 4) for valid_tuple in test_tuples])
context_tuples = [test_tuple[:2] for test_tuple in test_tuples]
task_dicts = self._meta_test_inference(context_tuples, verbose=True, log_period=500, n_iter=n_iter_meta_test)
(ll_list, rmse_list, calibr_err_list) = ([], [], [])
for (task_dict, test_tuple) in zip(task_dicts, test_tuples):
(_, _, test_x, test_y) = test_tuple
(test_x, test_y) = _handle_input_dimensionality(test_x, test_y)
test_x_tensor = torch.from_numpy(self._normalize_data(X=test_x, Y=None)).float().to(device)
test_y_tensor = torch.from_numpy(test_y).float().flatten().to(device)
gp_model = task_dict['gp_model']
gp_model.eval()
self.likelihood.eval()
pred_dist = self.likelihood(gp_model(test_x_tensor))
pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
ll_list.append(torch.mean((pred_dist.log_prob(test_y_tensor) / test_y_tensor.shape[0])).cpu().item())
rmse_list.append(torch.mean(torch.pow((pred_dist.mean - test_y_tensor), 2)).sqrt().cpu().item())
pred_dist_vect = self._vectorize_pred_dist(pred_dist)
calibr_err_list.append(self._calib_error(pred_dist_vect, test_y_tensor).cpu().item())
return (np.mean(ll_list), np.mean(rmse_list), np.mean(calibr_err_list))
def state_dict(self):
state_dict = {'optimizer': self.optimizer.state_dict(), 'model': self.task_dicts[0]['model'].state_dict()}
for task_dict in self.task_dicts:
for (key, tensor) in task_dict['model'].state_dict().items():
assert torch.all((state_dict['model'][key] == tensor)).item()
return state_dict
def load_state_dict(self, state_dict):
for task_dict in self.task_dicts:
task_dict['model'].load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
def _setup_task_dicts(self, train_data_tuples):
(task_dicts, parameters) = ([], [])
for (train_x, train_y) in train_data_tuples:
task_dict = OrderedDict()
(x_tensor, y_tensor) = self._prepare_data_per_task(train_x, train_y)
(task_dict['train_x'], task_dict['train_y']) = (x_tensor, y_tensor)
prior_param_sample = self.hyper_posterior.sample(sample_shape=(20,))
(mean_module, covar_module) = self._aggregate_gp_priors(prior_param_sample)
task_dict['gp_model'] = LearnedGPRegressionModelApproximate(x_tensor, y_tensor, self.likelihood, mean_module=mean_module, covar_module=covar_module)
init_mean = mean_module(x_tensor)
init_mean += (0.001 * torch.randn_like(init_mean))
task_dict['gp_model'].variational_distribution.variational_mean.data.copy_(init_mean)
prior_covar = covar_module(x_tensor)
init_chol_covar = torch.cholesky((prior_covar + (0.001 * torch.eye(prior_covar.shape[0]))))
task_dict['gp_model'].variational_distribution.chol_variational_covar.data.copy_(init_chol_covar)
parameters.extend(task_dict['gp_model'].variational_parameters())
task_dicts.append(task_dict)
return (task_dicts, parameters)
def _meta_test_inference(self, context_tuples, n_iter=5000, lr=0.01, log_period=100, verbose=False):
n_tasks = len(context_tuples)
(task_dicts, posterior_params) = self._setup_task_dicts(context_tuples)
optimizer = torch.optim.Adam(posterior_params, lr=lr)
t = time.time()
for itr in range(n_iter):
optimizer.zero_grad()
param_sample = self.hyper_posterior.rsample(sample_shape=(self.svi_batch_size,))
(task_pac_bounds, diagnostics_dict) = self._task_pac_bounds(task_dicts, param_sample, task_kl_weight=self.task_kl_weight, meta_kl_weight=self.meta_kl_weight, meta_test=True)
loss = torch.sum(torch.stack(task_pac_bounds))
loss.backward()
optimizer.step()
if (((itr % log_period) == 0) and verbose):
duration = (time.time() - t)
t = time.time()
message = ('\t Meta-Test Iter %d/%d - Loss: %.6f - Time %.2f sec - ' % (itr, n_iter, (loss.item() / n_tasks), duration))
message += ' - '.join([('%s: %.4f' % (key, value)) for (key, value) in diagnostics_dict.items()])
self.logger.info(message)
return task_dicts
def _setup_meta_train_step(self, mean_module_str, covar_module_str, mean_nn_layers, kernel_nn_layers, cov_type):
assert (mean_module_str in ['NN', 'constant'])
assert (covar_module_str in ['NN', 'SE'])
' random gp model '
self.random_gp = RandomGPMeta(size_in=self.input_dim, prior_factor=1.0, weight_prior_std=self.weight_prior_std, bias_prior_std=self.bias_prior_std, covar_module_str=covar_module_str, mean_module_str=mean_module_str, mean_nn_layers=mean_nn_layers, kernel_nn_layers=kernel_nn_layers)
param_shapes_dict = self.random_gp.parameter_shapes()
' variational posterior '
self.hyper_posterior = RandomGPPosterior(param_shapes_dict, cov_type=cov_type)
def _tile_data_tuple(task_dict, tile_size):
(x_data, y_data) = (task_dict['train_x'], task_dict['train_y'])
x_data = x_data.view((torch.Size((1,)) + x_data.shape)).repeat(tile_size, 1, 1)
y_data = y_data.view((torch.Size((1,)) + y_data.shape)).repeat(tile_size, 1)
return (x_data, y_data)
def _hyper_kl(prior_param_sample):
return torch.mean((self.hyper_posterior.log_prob(prior_param_sample) - self.random_gp.hyper_prior.log_prob(prior_param_sample)))
def _task_pac_bounds(task_dicts, prior_param_sample, task_kl_weight=1.0, meta_kl_weight=1.0, meta_test=False):
fn = self.random_gp.get_forward_fn(prior_param_sample)
kl_outer = (meta_kl_weight * _hyper_kl(prior_param_sample))
task_pac_bounds = []
for task_dict in task_dicts:
if meta_test:
posterior = task_dict['gp_model'](task_dict['train_x'])
else:
posterior = task_dict['gp_model'].variational_distribution()
avg_ll = torch.mean(self.likelihood.expected_log_prob(task_dict['train_y'], posterior))
(x_data_tiled, y_data_tiled) = _tile_data_tuple(task_dict, self.svi_batch_size)
(gp, _) = fn(x_data_tiled, None, prior=True)
prior = gp.forward(x_data_tiled)
kl_inner = (task_kl_weight * torch.mean(_kl_divergence_safe(posterior.expand((self.svi_batch_size,)), prior)))
m = torch.tensor(task_dict['train_y'].shape[0], dtype=torch.float32)
n = torch.tensor(self.n_tasks, dtype=torch.float32)
task_complexity = torch.sqrt(((((((kl_outer + kl_inner) + math.log(2.0)) + torch.log(m)) + torch.log(n)) - torch.log(self.delta)) / (2 * (m - 1))))
diagnostics_dict = {'avg_ll': avg_ll.item(), 'kl_outer_weighted': kl_outer.item(), 'kl_inner_weighted': kl_inner.item()}
task_pac_bound = ((- avg_ll) + task_complexity)
task_pac_bounds.append(task_pac_bound)
return (task_pac_bounds, diagnostics_dict)
def _meta_complexity(prior_param_sample, meta_kl_weight=1.0):
outer_kl = _hyper_kl(prior_param_sample)
n = torch.tensor(self.n_tasks, dtype=torch.float32)
return torch.sqrt(((((meta_kl_weight * outer_kl) + math.log(2.0)) + torch.log(n)) - (torch.log(self.delta) / (2 * (n - 1)))))
def _meta_train_pac_bound(task_dicts):
param_sample = self.hyper_posterior.rsample(sample_shape=(self.svi_batch_size,))
(task_pac_bounds, diagnostics_dict) = _task_pac_bounds(task_dicts, param_sample, task_kl_weight=self.task_kl_weight, meta_kl_weight=self.meta_kl_weight)
meta_complexity = _meta_complexity(param_sample, meta_kl_weight=self.meta_kl_weight)
pac_bound = (torch.mean(torch.stack(task_pac_bounds)) + meta_complexity)
return (pac_bound, diagnostics_dict)
self._task_pac_bounds = _task_pac_bounds
self._meta_train_pac_bound = _meta_train_pac_bound
def _setup_optimizer(self, optimizer, lr, lr_decay):
if (optimizer == 'Adam'):
self.optimizer = torch.optim.Adam(self.meta_train_params, lr=lr)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD(self.meta_train_params, lr=lr)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if (lr_decay < 1.0):
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1000, gamma=lr_decay)
else:
self.lr_scheduler = DummyLRScheduler()
def _vectorize_pred_dist(self, pred_dist):
return torch.distributions.Normal(pred_dist.mean, pred_dist.stddev)
def prior_mean(self, x, n_hyperposterior_samples=1000):
x = ((x - self.x_mean) / self.x_std)
assert ((x.ndim == 1) or ((x.ndim == 2) and (x.shape[(- 1)] == 1)))
x_data_tiled = np.tile(x.reshape(1, x.shape[0], 1), (n_hyperposterior_samples, 1, 1))
x_data_tiled = torch.tensor(x_data_tiled, dtype=torch.float32)
with torch.no_grad():
param_sample = self.hyper_posterior.rsample(sample_shape=(n_hyperposterior_samples,))
fn = self.random_gp.get_forward_fn(param_sample)
(gp, _) = fn(x_data_tiled, None, prior=True)
prior = gp(x_data_tiled)
mean = ((torch.mean(prior.mean, axis=0).numpy() * self.y_std) + self.y_mean)
return mean
def _aggregate_gp_priors(self, prior_param_sample, jitter=1e-05):
assert (prior_param_sample.ndim == 2)
n_samples = prior_param_sample.shape[0]
forward_fn = self.random_gp.get_forward_fn(prior_param_sample)
def _tile(x_data):
assert (x_data.ndim == 2)
return x_data.view((torch.Size((1,)) + x_data.shape)).repeat(n_samples, 1, 1)
def mean_module(x):
(gp, _) = forward_fn(_tile(x), None, prior=True)
mean = torch.mean(gp.forward(x).mean, axis=0)
return mean
def covar_module(x):
(gp, _) = forward_fn(_tile(x), None, prior=True)
dist = gp.forward(x)
mean = torch.mean(dist.mean, axis=0)
residual = (dist.mean - mean)
cov_loc = torch.mean(torch.bmm(residual.unsqueeze(2), residual.unsqueeze(1)), axis=0)
cov_var = torch.mean(dist.covariance_matrix, axis=0)
return ((cov_loc + cov_var) + (1e-05 * torch.eye(cov_var.shape[0])))
return (mean_module, covar_module)
|
def _kl_divergence_safe(posterior, prior):
for jitter_eps in [1e-06, 1e-05, 0.0001]:
try:
return torch.distributions.kl.kl_divergence(posterior, prior)
except RuntimeError:
posterior = _add_jitter(prior, eps=jitter_eps)
prior = _add_jitter(prior, eps=jitter_eps)
import warnings
warnings.warn(('added jitter of %s to the diagonal posterior and prior covariance' % str(jitter_eps)))
return torch.distributions.kl.kl_divergence(posterior, prior)
|
def _add_jitter(distr, eps=1e-06):
from meta_learn.models import GaussianLikelihoodLight
jitter = GaussianLikelihoodLight(noise_var=(eps * torch.ones((1,))))
return jitter(distr)
|
class GPRegressionMetaLearned(RegressionModelMetaLearned):
def __init__(self, meta_train_data, learning_mode='both', lr_params=0.001, weight_decay=0.0, feature_dim=2, num_iter_fit=10000, covar_module='NN', mean_module='NN', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), task_batch_size=5, normalize_data=True, optimizer='Adam', lr_decay=1.0, random_seed=None):
"\n Meta-Learning GP priors (i.e. mean and kernel function) via PACOH-MAP\n\n Args:\n meta_train_data: list of tuples of ndarrays[(train_x_1, train_t_1), ..., (train_x_n, train_t_n)]\n learning_mode: (str) specifying which of the GP prior parameters to optimize. Either one of\n ['learned_mean', 'learned_kernel', 'both', 'vanilla']\n lr_params: (float) learning rate for GP prior parameters\n weight_decay: (float) weight decay multiplier for meta-level regularization\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n learning_rate: (float) learning rate for AdamW optimizer\n task_batch_size: (int) batch size for meta training, i.e. number of tasks for computing gradients\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n lr_decay: (str) multiplicative learning rate decay applied every 1000 iterations\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data, random_seed)
assert (learning_mode in ['learn_mean', 'learn_kernel', 'both', 'vanilla'])
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.lr_params, self.weight_decay, self.feature_dim) = (lr_params, weight_decay, feature_dim)
(self.num_iter_fit, self.task_batch_size, self.normalize_data) = (num_iter_fit, task_batch_size, normalize_data)
self._check_meta_data_shapes(meta_train_data)
self._compute_normalization_stats(meta_train_data)
self._setup_gp_prior(mean_module, covar_module, learning_mode, feature_dim, mean_nn_layers, kernel_nn_layers)
self.likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint=gpytorch.likelihoods.noise_models.GreaterThan(0.001)).to(device)
self.shared_parameters.append({'params': self.likelihood.parameters(), 'lr': self.lr_params})
self.task_dicts = []
for (train_x, train_y) in meta_train_data:
task_dict = {}
(x_tensor, y_tensor) = self._prepare_data_per_task(train_x, train_y)
(task_dict['train_x'], task_dict['train_y']) = (x_tensor, y_tensor)
task_dict['model'] = LearnedGPRegressionModel(task_dict['train_x'], task_dict['train_y'], self.likelihood, learned_kernel=self.nn_kernel_map, learned_mean=self.nn_mean_fn, covar_module=self.covar_module, mean_module=self.mean_module)
task_dict['mll_fn'] = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, task_dict['model']).to(device)
self.task_dicts.append(task_dict)
self._setup_optimizer(optimizer, lr_params, lr_decay)
self.fitted = False
def meta_fit(self, valid_tuples=None, verbose=True, log_period=500, n_iter=None):
'\n meta-learns the GP prior parameters\n\n Args:\n valid_tuples: list of valid tuples, i.e. [(test_context_x_1, test_context_t_1, test_x_1, test_t_1), ...]\n verbose: (boolean) whether to print training progress\n log_period: (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
for task_dict in self.task_dicts:
task_dict['model'].train()
self.likelihood.train()
assert ((valid_tuples is None) or all([(len(valid_tuple) == 4) for valid_tuple in valid_tuples]))
if (len(self.shared_parameters) > 0):
t = time.time()
cum_loss = 0.0
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
loss = 0.0
self.optimizer.zero_grad()
for task_dict in self.rds_numpy.choice(self.task_dicts, size=self.task_batch_size):
output = task_dict['model'](task_dict['train_x'])
mll = task_dict['mll_fn'](output, task_dict['train_y'])
loss -= mll
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
cum_loss += loss
if ((itr == 1) or ((itr % log_period) == 0)):
duration = (time.time() - t)
avg_loss = (cum_loss / (log_period if (itr > 1) else 1.0))
cum_loss = 0.0
t = time.time()
message = ('Iter %d/%d - Loss: %.6f - Time %.2f sec' % (itr, self.num_iter_fit, avg_loss.item(), duration))
if (valid_tuples is not None):
self.likelihood.eval()
(valid_ll, valid_rmse, calibr_err) = self.eval_datasets(valid_tuples)
self.likelihood.train()
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
if verbose:
self.logger.info(message)
else:
self.logger.info('Vanilla mode - nothing to fit')
self.fitted = True
for task_dict in self.task_dicts:
task_dict['model'].eval()
self.likelihood.eval()
return loss.item()
def predict(self, context_x, context_y, test_x, return_density=False):
'\n Performs posterior inference (target training) with (context_x, context_y) as training data and then\n computes the predictive distribution of the targets p(y|test_x, test_context_x, context_y) in the test points\n\n Args:\n context_x: (ndarray) context input data for which to compute the posterior\n context_y: (ndarray) context targets for which to compute the posterior\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n return_density: (bool) whether to return result as mean and std ndarray or as MultivariateNormal pytorch object\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(t|test_x, test_context_x, context_y)\n '
(context_x, context_y) = _handle_input_dimensionality(context_x, context_y)
test_x = _handle_input_dimensionality(test_x)
assert (test_x.shape[1] == context_x.shape[1])
(context_x, context_y) = self._prepare_data_per_task(context_x, context_y)
test_x = self._normalize_data(X=test_x, Y=None)
test_x = torch.from_numpy(test_x).float().to(device)
with torch.no_grad():
gp_model = LearnedGPRegressionModel(context_x, context_y, self.likelihood, learned_kernel=self.nn_kernel_map, learned_mean=self.nn_mean_fn, covar_module=self.covar_module, mean_module=self.mean_module)
gp_model.eval()
self.likelihood.eval()
pred_dist = self.likelihood(gp_model(test_x))
pred_dist_transformed = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
if return_density:
return pred_dist_transformed
else:
pred_mean = pred_dist_transformed.mean
pred_std = pred_dist_transformed.stddev
return (pred_mean.cpu().numpy(), pred_std.cpu().numpy())
def state_dict(self):
state_dict = {'optimizer': self.optimizer.state_dict(), 'model': self.task_dicts[0]['model'].state_dict()}
for task_dict in self.task_dicts:
for (key, tensor) in task_dict['model'].state_dict().items():
assert torch.all((state_dict['model'][key] == tensor)).item()
return state_dict
def load_state_dict(self, state_dict):
for task_dict in self.task_dicts:
task_dict['model'].load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
def _setup_gp_prior(self, mean_module, covar_module, learning_mode, feature_dim, mean_nn_layers, kernel_nn_layers):
self.shared_parameters = []
if (covar_module == 'NN'):
assert (learning_mode in ['learn_kernel', 'both']), 'neural network parameters must be learned'
self.nn_kernel_map = NeuralNetwork(input_dim=self.input_dim, output_dim=feature_dim, layer_sizes=kernel_nn_layers).to(device)
self.shared_parameters.append({'params': self.nn_kernel_map.parameters(), 'lr': self.lr_params, 'weight_decay': self.weight_decay})
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=feature_dim)).to(device)
else:
self.nn_kernel_map = None
if (covar_module == 'SE'):
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=self.input_dim)).to(device)
elif isinstance(covar_module, gpytorch.kernels.Kernel):
self.covar_module = covar_module.to(device)
if (mean_module == 'NN'):
assert (learning_mode in ['learn_mean', 'both']), 'neural network parameters must be learned'
self.nn_mean_fn = NeuralNetwork(input_dim=self.input_dim, output_dim=1, layer_sizes=mean_nn_layers).to(device)
self.shared_parameters.append({'params': self.nn_mean_fn.parameters(), 'lr': self.lr_params, 'weight_decay': self.weight_decay})
self.mean_module = None
else:
self.nn_mean_fn = None
if (mean_module == 'constant'):
self.mean_module = gpytorch.means.ConstantMean().to(device)
elif (mean_module == 'zero'):
self.mean_module = gpytorch.means.ZeroMean().to(device)
elif isinstance(mean_module, gpytorch.means.Mean):
self.mean_module = mean_module.to(device)
if (learning_mode in ['learn_kernel', 'both']):
self.shared_parameters.append({'params': self.covar_module.hyperparameters(), 'lr': self.lr_params})
if ((learning_mode in ['learn_mean', 'both']) and (self.mean_module is not None)):
self.shared_parameters.append({'params': self.mean_module.hyperparameters(), 'lr': self.lr_params})
def _setup_optimizer(self, optimizer, lr, lr_decay):
if (optimizer == 'Adam'):
self.optimizer = torch.optim.AdamW(self.shared_parameters, lr=lr, weight_decay=self.weight_decay)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD(self.shared_parameters, lr=lr)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if (lr_decay < 1.0):
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1000, gamma=lr_decay)
else:
self.lr_scheduler = DummyLRScheduler()
def _vectorize_pred_dist(self, pred_dist):
return torch.distributions.Normal(pred_dist.mean, pred_dist.stddev)
|
class GPRegressionMetaLearnedSVGD(RegressionModelMetaLearned):
def __init__(self, meta_train_data, num_iter_fit=10000, feature_dim=1, prior_factor=0.01, weight_prior_std=0.5, bias_prior_std=3.0, covar_module='NN', mean_module='NN', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), optimizer='Adam', lr=0.001, lr_decay=1.0, kernel='RBF', bandwidth=None, num_particles=10, task_batch_size=(- 1), normalize_data=True, random_seed=None):
"\n PACOH-SVGD: Stein Variational Gradient Descent on PAC-optimal hyper-posterior.\n Meta-learns a set of GP priors (i.e. mean and kernel function)\n\n Args:\n meta_train_data: list of tuples of ndarrays[(train_x_1, train_t_1), ..., (train_x_n, train_t_n)]\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n prior_factor: (float) weighting of the hyper-prior (--> meta-regularization parameter)\n weight_prior_std (float): std of Gaussian hyper-prior on weights\n bias_prior_std (float): std of Gaussian hyper-prior on biases\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n lr: (float) learning rate for prior parameters\n lr_decay: (float) lr rate decay multiplier applied after every 1000 steps\n kernel (std): SVGD kernel, either 'RBF' or 'IMQ'\n bandwidth (float): bandwidth of kernel, if None the bandwidth is chosen via heuristic\n num_particles: (int) number particles to approximate the hyper-posterior\n task_batch_size: (int) mini-batch size of tasks for estimating gradients\n normalize_data: (bool) whether the data should be normalized\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data, random_seed)
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.num_iter_fit, self.prior_factor, self.feature_dim) = (num_iter_fit, prior_factor, feature_dim)
(self.weight_prior_std, self.bias_prior_std) = (weight_prior_std, bias_prior_std)
self.num_particles = num_particles
if (task_batch_size < 1):
self.task_batch_size = len(meta_train_data)
else:
self.task_batch_size = min(task_batch_size, len(meta_train_data))
self._check_meta_data_shapes(meta_train_data)
self._compute_normalization_stats(meta_train_data)
' --- Setup model & inference --- '
self._setup_model_inference(mean_module, covar_module, mean_nn_layers, kernel_nn_layers, kernel, bandwidth, optimizer, lr, lr_decay)
self.task_dicts = []
for (train_x, train_y) in meta_train_data:
task_dict = {}
(x_tensor, y_tensor) = self._prepare_data_per_task(train_x, train_y)
(task_dict['train_x'], task_dict['train_y']) = (x_tensor, y_tensor)
self.task_dicts.append(task_dict)
self.fitted = False
def meta_fit(self, valid_tuples=None, verbose=True, log_period=500, n_iter=None):
'\n fits the hyper-posterior particles with SVGD\n\n Args:\n valid_tuples: list of valid tuples, i.e. [(test_context_x_1, test_context_t_1, test_x_1, test_t_1), ...]\n verbose: (boolean) whether to print training progress\n log_period (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
assert ((valid_tuples is None) or all([(len(valid_tuple) == 4) for valid_tuple in valid_tuples]))
t = time.time()
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
task_dict_batch = self.rds_numpy.choice(self.task_dicts, size=self.task_batch_size)
self.svgd_step(task_dict_batch)
self.lr_scheduler.step()
if ((itr == 1) or ((itr % log_period) == 0)):
duration = (time.time() - t)
t = time.time()
message = ('Iter %d/%d - Time %.2f sec' % (itr, self.num_iter_fit, duration))
if (valid_tuples is not None):
(valid_ll, valid_rmse, calibr_err) = self.eval_datasets(valid_tuples)
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
if verbose:
self.logger.info(message)
self.fitted = True
def predict(self, context_x, context_y, test_x, return_density=False):
'\n Performs posterior inference (target training) with (context_x, context_y) as training data and then\n computes the predictive distribution of the targets p(y|test_x, test_context_x, context_y) in the test points\n\n Args:\n context_x: (ndarray) context input data for which to compute the posterior\n context_y: (ndarray) context targets for which to compute the posterior\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n return_density: (bool) whether to return result as mean and std ndarray or as MultivariateNormal pytorch object\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(t|test_x, test_context_x, context_y)\n '
(context_x, context_y) = _handle_input_dimensionality(context_x, context_y)
test_x = _handle_input_dimensionality(test_x)
assert (test_x.shape[1] == context_x.shape[1])
(context_x, context_y) = self._prepare_data_per_task(context_x, context_y)
test_x = self._normalize_data(X=test_x, Y=None)
test_x = torch.from_numpy(test_x).float().to(device)
with torch.no_grad():
pred_dist = self.get_pred_dist(context_x, context_y, test_x)
pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
pred_dist = EqualWeightedMixtureDist(pred_dist, batched=True)
if return_density:
return pred_dist
else:
pred_mean = pred_dist.mean.cpu().numpy()
pred_std = pred_dist.stddev.cpu().numpy()
return (pred_mean, pred_std)
def _setup_model_inference(self, mean_module_str, covar_module_str, mean_nn_layers, kernel_nn_layers, kernel, bandwidth, optimizer, lr, lr_decay):
assert (mean_module_str in ['NN', 'constant'])
assert (covar_module_str in ['NN', 'SE'])
' random gp model '
self.random_gp = RandomGPMeta(size_in=self.input_dim, prior_factor=self.prior_factor, weight_prior_std=self.weight_prior_std, bias_prior_std=self.bias_prior_std, covar_module_str=covar_module_str, mean_module_str=mean_module_str, mean_nn_layers=mean_nn_layers, kernel_nn_layers=kernel_nn_layers)
' Setup SVGD inference'
if (kernel == 'RBF'):
kernel = RBF_Kernel(bandwidth=bandwidth)
elif (kernel == 'IMQ'):
kernel = IMQSteinKernel(bandwidth=bandwidth)
else:
raise NotImplemented
self.particles = self.random_gp.sample_params_from_prior(shape=(self.num_particles,))
self._setup_optimizer(optimizer, lr, lr_decay)
self.svgd = SVGD(self.random_gp, kernel, optimizer=self.optimizer)
' define svgd step '
def svgd_step(tasks_dicts):
train_data_tuples_tiled = []
for task_dict in tasks_dicts:
(x_data, y_data) = (task_dict['train_x'], task_dict['train_y'])
x_data = x_data.view((torch.Size((1,)) + x_data.shape)).repeat(self.num_particles, 1, 1)
y_data = y_data.view((torch.Size((1,)) + y_data.shape)).repeat(self.num_particles, 1)
train_data_tuples_tiled.append((x_data, y_data))
self.svgd.step(self.particles, train_data_tuples_tiled)
' define predictive dist '
def get_pred_dist(x_context, y_context, x_valid):
with torch.no_grad():
x_context = x_context.view((torch.Size((1,)) + x_context.shape)).repeat(self.num_particles, 1, 1)
y_context = y_context.view((torch.Size((1,)) + y_context.shape)).repeat(self.num_particles, 1)
x_valid = x_valid.view((torch.Size((1,)) + x_valid.shape)).repeat(self.num_particles, 1, 1)
gp_fn = self.random_gp.get_forward_fn(self.particles)
(gp, likelihood) = gp_fn(x_context, y_context, train=False)
pred_dist = likelihood(gp(x_valid))
return pred_dist
self.svgd_step = svgd_step
self.get_pred_dist = get_pred_dist
def _setup_optimizer(self, optimizer, lr, lr_decay):
assert hasattr(self, 'particles'), 'SVGD must be initialized before setting up optimizer'
if (optimizer == 'Adam'):
self.optimizer = torch.optim.Adam([self.particles], lr=lr)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD([self.particles], lr=lr)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if (lr_decay < 1.0):
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1000, gamma=lr_decay)
else:
self.lr_scheduler = DummyLRScheduler()
def _vectorize_pred_dist(self, pred_dist):
multiv_normal_batched = pred_dist.dists
normal_batched = torch.distributions.Normal(multiv_normal_batched.mean, multiv_normal_batched.stddev)
return EqualWeightedMixtureDist(normal_batched, batched=True, num_dists=multiv_normal_batched.batch_shape[0])
|
class GPRegressionMetaLearnedVI(RegressionModelMetaLearned):
def __init__(self, meta_train_data, num_iter_fit=10000, feature_dim=1, prior_factor=0.01, weight_prior_std=0.5, bias_prior_std=3.0, covar_module='NN', mean_module='NN', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), optimizer='Adam', lr=0.001, lr_decay=1.0, svi_batch_size=10, cov_type='diag', task_batch_size=(- 1), normalize_data=True, random_seed=None):
"\n PACOH-VI: Variational Inference on the PAC-optimal hyper-posterior with Gaussian family.\n Meta-Learns a distribution over GP-priors.\n\n Args:\n meta_train_data: list of tuples of ndarrays[(train_x_1, train_t_1), ..., (train_x_n, train_t_n)]\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n prior_factor: (float) weighting of the hyper-prior (--> meta-regularization parameter)\n weight_prior_std (float): std of Gaussian hyper-prior on weights\n bias_prior_std (float): std of Gaussian hyper-prior on biases\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n lr: (float) learning rate for prior parameters\n lr_decay: (float) lr rate decay multiplier applied after every 1000 steps\n kernel (std): SVGD kernel, either 'RBF' or 'IMQ'\n bandwidth (float): bandwidth of kernel, if None the bandwidth is chosen via heuristic\n num_particles: (int) number particles to approximate the hyper-posterior\n task_batch_size: (int) mini-batch size of tasks for estimating gradients\n normalize_data: (bool) whether the data should be normalized\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data, random_seed)
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.num_iter_fit, self.prior_factor, self.feature_dim) = (num_iter_fit, prior_factor, feature_dim)
(self.weight_prior_std, self.bias_prior_std) = (weight_prior_std, bias_prior_std)
self.svi_batch_size = svi_batch_size
if (task_batch_size < 1):
self.task_batch_size = len(meta_train_data)
else:
self.task_batch_size = min(task_batch_size, len(meta_train_data))
self._check_meta_data_shapes(meta_train_data)
self._compute_normalization_stats(meta_train_data)
' --- Setup model & inference --- '
self._setup_model_inference(mean_module, covar_module, mean_nn_layers, kernel_nn_layers, cov_type)
self._setup_optimizer(optimizer, lr, lr_decay)
self.task_dicts = []
for (train_x, train_y) in meta_train_data:
task_dict = {}
(x_tensor, y_tensor) = self._prepare_data_per_task(train_x, train_y)
(task_dict['train_x'], task_dict['train_y']) = (x_tensor, y_tensor)
self.task_dicts.append(task_dict)
self.fitted = False
def meta_fit(self, valid_tuples=None, verbose=True, log_period=500, n_iter=None):
'\n fits the variational hyper-posterior by minimizing the negative ELBO\n\n Args:\n valid_tuples: list of valid tuples, i.e. [(test_context_x_1, test_context_t_1, test_x_1, test_t_1), ...]\n verbose: (boolean) whether to print training progress\n log_period (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
assert ((valid_tuples is None) or all([(len(valid_tuple) == 4) for valid_tuple in valid_tuples]))
t = time.time()
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
task_dict_batch = self.rds_numpy.choice(self.task_dicts, size=self.task_batch_size)
self.optimizer.zero_grad()
loss = self.get_neg_elbo(task_dict_batch)
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
if ((itr == 1) or ((itr % log_period) == 0)):
duration = (time.time() - t)
t = time.time()
message = ('Iter %d/%d - Loss: %.6f - Time %.2f sec' % (itr, self.num_iter_fit, loss.item(), duration))
if (valid_tuples is not None):
(valid_ll, valid_rmse, calibr_err) = self.eval_datasets(valid_tuples)
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
if verbose:
self.logger.info(message)
self.fitted = True
return loss.item()
def predict(self, context_x, context_y, test_x, n_posterior_samples=100, mode='Bayes', return_density=False):
"\n computes the predictive distribution of the targets p(t|test_x, test_context_x, context_y)\n\n Args:\n context_x: (ndarray) context input data for which to compute the posterior\n context_y: (ndarray) context targets for which to compute the posterior\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n n_posterior_samples: (int) number of samples from posterior to average over\n mode: (std) either of ['Bayes' , 'MAP']\n return_density: (bool) whether to return result as mean and std ndarray or as MultivariateNormal pytorch object\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(t|test_x, test_context_x, context_y)\n "
assert (mode in ['bayes', 'Bayes', 'MAP', 'map'])
(context_x, context_y) = _handle_input_dimensionality(context_x, context_y)
test_x = _handle_input_dimensionality(test_x)
assert (test_x.shape[1] == context_x.shape[1])
(context_x, context_y) = self._prepare_data_per_task(context_x, context_y)
test_x = self._normalize_data(X=test_x, Y=None)
test_x = torch.from_numpy(test_x).float().to(device)
with torch.no_grad():
if ((mode == 'Bayes') or (mode == 'bayes')):
pred_dist = self.get_pred_dist(context_x, context_y, test_x, n_post_samples=n_posterior_samples)
pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
pred_dist = EqualWeightedMixtureDist(pred_dist, batched=True)
else:
pred_dist = self.get_pred_dist_map(context_x, context_y, test_x)
pred_dist = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
if return_density:
return pred_dist
else:
pred_mean = pred_dist.mean.cpu().numpy()
pred_std = pred_dist.stddev.cpu().numpy()
return (pred_mean, pred_std)
def state_dict(self):
state_dict = {'optimizer': self.optimizer.state_dict(), 'model': self.task_dicts[0]['model'].state_dict()}
for task_dict in self.task_dicts:
for (key, tensor) in task_dict['model'].state_dict().items():
assert torch.all((state_dict['model'][key] == tensor)).item()
return state_dict
def load_state_dict(self, state_dict):
for task_dict in self.task_dicts:
task_dict['model'].load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
def _setup_model_inference(self, mean_module_str, covar_module_str, mean_nn_layers, kernel_nn_layers, cov_type):
assert (mean_module_str in ['NN', 'constant'])
assert (covar_module_str in ['NN', 'SE'])
' random gp model '
self.random_gp = RandomGPMeta(size_in=self.input_dim, prior_factor=self.prior_factor, weight_prior_std=self.weight_prior_std, bias_prior_std=self.bias_prior_std, covar_module_str=covar_module_str, mean_module_str=mean_module_str, mean_nn_layers=mean_nn_layers, kernel_nn_layers=kernel_nn_layers)
param_shapes_dict = self.random_gp.parameter_shapes()
' variational posterior '
self.posterior = RandomGPPosterior(param_shapes_dict, cov_type=cov_type)
def _tile_data_tuples(tasks_dicts, tile_size):
train_data_tuples_tiled = []
for task_dict in tasks_dicts:
(x_data, y_data) = (task_dict['train_x'], task_dict['train_y'])
x_data = x_data.view((torch.Size((1,)) + x_data.shape)).repeat(tile_size, 1, 1)
y_data = y_data.view((torch.Size((1,)) + y_data.shape)).repeat(tile_size, 1)
train_data_tuples_tiled.append((x_data, y_data))
return train_data_tuples_tiled
' define negative ELBO '
def get_neg_elbo(tasks_dicts):
data_tuples_tiled = _tile_data_tuples(tasks_dicts, self.svi_batch_size)
param_sample = self.posterior.rsample(sample_shape=(self.svi_batch_size,))
elbo = (self.random_gp.log_prob(param_sample, data_tuples_tiled) - (self.prior_factor * self.posterior.log_prob(param_sample)))
assert ((elbo.ndim == 1) and (elbo.shape[0] == self.svi_batch_size))
return (- torch.mean(elbo))
self.get_neg_elbo = get_neg_elbo
' define predictive dist '
def get_pred_dist(x_context, y_context, x_valid, n_post_samples=100):
with torch.no_grad():
x_context = x_context.view((torch.Size((1,)) + x_context.shape)).repeat(n_post_samples, 1, 1)
y_context = y_context.view((torch.Size((1,)) + y_context.shape)).repeat(n_post_samples, 1)
x_valid = x_valid.view((torch.Size((1,)) + x_valid.shape)).repeat(n_post_samples, 1, 1)
param_sample = self.posterior.sample(sample_shape=(n_post_samples,))
gp_fn = self.random_gp.get_forward_fn(param_sample)
(gp, likelihood) = gp_fn(x_context, y_context, train=False)
pred_dist = likelihood(gp(x_valid))
return pred_dist
def get_pred_dist_map(x_context, y_context, x_valid):
with torch.no_grad():
x_context = x_context.view((torch.Size((1,)) + x_context.shape)).repeat(1, 1, 1)
y_context = y_context.view((torch.Size((1,)) + y_context.shape)).repeat(1, 1)
x_valid = x_valid.view((torch.Size((1,)) + x_valid.shape)).repeat(1, 1, 1)
param = self.posterior.mode
param = param.view((torch.Size((1,)) + param.shape)).repeat(1, 1)
gp_fn = self.random_gp.get_forward_fn(param)
(gp, likelihood) = gp_fn(x_context, y_context, train=False)
pred_dist = likelihood(gp(x_valid))
return MultivariateNormal(pred_dist.loc, pred_dist.covariance_matrix[0])
self.get_pred_dist = get_pred_dist
self.get_pred_dist_map = get_pred_dist_map
def _setup_optimizer(self, optimizer, lr, lr_decay):
if (optimizer == 'Adam'):
self.optimizer = torch.optim.Adam(self.posterior.parameters(), lr=lr)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD(self.posterior.parameters(), lr=lr)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if (lr_decay < 1.0):
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1000, gamma=lr_decay)
else:
self.lr_scheduler = DummyLRScheduler()
def _vectorize_pred_dist(self, pred_dist):
multiv_normal_batched = pred_dist.dists
normal_batched = torch.distributions.Normal(multiv_normal_batched.mean, multiv_normal_batched.stddev)
return EqualWeightedMixtureDist(normal_batched, batched=True, num_dists=multiv_normal_batched.batch_shape[0])
|
class GPRegressionLearned(RegressionModel):
def __init__(self, train_x, train_t, learning_mode='both', lr=0.001, weight_decay=0.0, feature_dim=2, num_iter_fit=1000, covar_module='NN', mean_module='NN', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), optimizer='Adam', normalize_data=True, lr_scheduler=True, random_seed=None):
"\n Gaussian Process Regression with learnable mean and kernel function.\n Note that this class does not perform any meta-learning. The GP priors mean and kernel function are learned\n based on the same train dataset that is also used for posterior inference.\n\n Args:\n train_x: (ndarray) train inputs - shape: (n_sampls, ndim_x)\n train_t: (ndarray) train targets - shape: (n_sampls, 1)\n learning_mode: (str) specifying which of the GP prior parameters to optimize. Either one of\n ['learned_mean', 'learned_kernel', 'both', 'vanilla']\n lr: (float) learning rate for prior parameters\n weight_decay: (float) weight decay penalty\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data=normalize_data, random_seed=random_seed)
assert (learning_mode in ['learn_mean', 'learn_kernel', 'both', 'vanilla'])
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.lr, self.weight_decay, self.num_iter_fit, self.lr_scheduler) = (lr, weight_decay, num_iter_fit, lr_scheduler)
' ------ Data handling ------ '
(self.train_x_tensor, self.train_t_tensor) = self._initial_data_handling(train_x, train_t)
assert (self.train_t_tensor.shape[(- 1)] == 1)
self.train_t_tensor = self.train_t_tensor.flatten()
' ------ Setup model ------ '
self.parameters = []
if (covar_module == 'NN'):
assert (learning_mode in ['learn_kernel', 'both']), 'neural network parameters must be learned'
nn_kernel_map = NeuralNetwork(input_dim=self.input_dim, output_dim=feature_dim, layer_sizes=kernel_nn_layers).to(device)
self.parameters.append({'params': nn_kernel_map.parameters(), 'lr': self.lr, 'weight_decay': self.weight_decay})
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=feature_dim)).to(device)
else:
nn_kernel_map = None
if (covar_module == 'SE'):
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=self.input_dim)).to(device)
if (mean_module == 'NN'):
assert (learning_mode in ['learn_mean', 'both']), 'neural network parameters must be learned'
nn_mean_fn = NeuralNetwork(input_dim=self.input_dim, output_dim=1, layer_sizes=mean_nn_layers).to(device)
self.parameters.append({'params': nn_mean_fn.parameters(), 'lr': self.lr, 'weight_decay': self.weight_decay})
mean_module = None
else:
nn_mean_fn = None
if (mean_module == 'constant'):
mean_module = gpytorch.means.ConstantMean().to(device)
elif (mean_module == 'zero'):
mean_module = gpytorch.means.ZeroMean().to(device)
self.likelihood = gpytorch.likelihoods.GaussianLikelihood().to(device)
self.parameters.append({'params': self.likelihood.parameters(), 'lr': self.lr})
self.model = LearnedGPRegressionModel(self.train_x_tensor, self.train_t_tensor, self.likelihood, learned_kernel=nn_kernel_map, learned_mean=nn_mean_fn, covar_module=covar_module, mean_module=mean_module)
self.mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model).to(device)
if (learning_mode in ['learn_kernel', 'both']):
self.parameters.append({'params': self.model.covar_module.hyperparameters(), 'lr': self.lr})
if ((learning_mode in ['learn_mean', 'both']) and (mean_module is not None)):
self.parameters.append({'params': self.model.mean_module.hyperparameters(), 'lr': self.lr})
if (optimizer == 'Adam'):
self.optimizer = torch.optim.AdamW(self.parameters)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD(self.parameters)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if self.lr_scheduler:
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', factor=0.2)
else:
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', factor=1.0)
self.fitted = False
def fit(self, valid_x=None, valid_t=None, verbose=True, log_period=500, n_iter=None):
'\n fits GP prior parameters of by maximizing the marginal log-likelihood (mll) of the training data\n\n Args:\n verbose: (boolean) whether to print training progress\n valid_x: (np.ndarray) validation inputs - shape: (n_sampls, ndim_x)\n valid_y: (np.ndarray) validation targets - shape: (n_sampls, 1)\n log_period: (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
self.model.train()
self.likelihood.train()
assert (((valid_x is None) and (valid_t is None)) or (isinstance(valid_x, np.ndarray) and isinstance(valid_x, np.ndarray)))
if (len(self.parameters) > 0):
t = time.time()
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
self.optimizer.zero_grad()
output = self.model(self.train_x_tensor)
loss = (- self.mll(output, self.train_t_tensor))
loss.backward()
self.optimizer.step()
if ((itr == 1) or ((itr % log_period) == 0)):
duration = (time.time() - t)
t = time.time()
message = ('Iter %d/%d - Loss: %.3f - Time %.3f sec' % (itr, self.num_iter_fit, loss.item(), duration))
if (valid_x is not None):
self.model.eval()
self.likelihood.eval()
(valid_ll, valid_rmse, calibr_err) = self.eval(valid_x, valid_t)
self.lr_scheduler.step(valid_ll)
self.model.train()
self.likelihood.train()
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
if verbose:
self.logger.info(message)
else:
self.logger.info('Vanilla mode - nothing to fit')
self.fitted = True
self.model.eval()
self.likelihood.eval()
return loss.item()
def predict(self, test_x, return_density=False, **kwargs):
'\n computes the predictive distribution of the targets p(t|test_x, train_x, train_y)\n\n Args:\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n return_density (bool) whether to return a density object or\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(y_test|X_test, X_train, y_train)\n '
if (test_x.ndim == 1):
test_x = np.expand_dims(test_x, axis=(- 1))
with torch.no_grad():
test_x_normalized = self._normalize_data(test_x)
test_x_tensor = torch.from_numpy(test_x_normalized).contiguous().float().to(device)
pred_dist = self.likelihood(self.model(test_x_tensor))
pred_dist_transformed = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
if return_density:
return pred_dist_transformed
else:
pred_mean = pred_dist_transformed.mean.cpu().numpy()
pred_std = pred_dist_transformed.stddev.cpu().numpy()
return (pred_mean, pred_std)
def state_dict(self):
state_dict = {'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
def _vectorize_pred_dist(self, pred_dist):
return torch.distributions.Normal(pred_dist.mean, pred_dist.stddev)
|
class GPRegressionLearnedPAC(RegressionModel):
def __init__(self, train_x, train_t, learning_mode='both', lr=0.001, delta=0.1, weight_decay=0.0, feature_dim=2, num_iter_fit=1000, covar_module='NN', mean_module='NN', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), optimizer='Adam', normalize_data=True, lr_scheduler=True, random_seed=None):
"\n Gaussian Process Regression with learnable mean and kernel function. The mean and kernel function is learned\n by minimizing the McAllester PAC-Bayesian bound. Note that this class does not perform any meta-learning.\n The GP priors mean and kernel function are learned based on the same train dataset that is also\n used for posterior inference.\n\n Args:\n train_x: (ndarray) train inputs - shape: (n_sampls, ndim_x)\n train_t: (ndarray) train targets - shape: (n_sampls, 1)\n learning_mode: (str) specifying which of the GP prior parameters to optimize. Either one of\n ['learned_mean', 'learned_kernel', 'both', 'vanilla']\n lr: (float) learning rate for prior parameters\n weight_decay: (float) weight decay penalty\n feature_dim: (int) output dimensionality of NN feature map for kernel function\n num_iter_fit: (int) number of gradient steps for fitting the parameters\n covar_module: (gpytorch.mean.Kernel) optional kernel module, default: RBF kernel\n mean_module: (gpytorch.mean.Mean) optional mean module, default: ZeroMean\n mean_nn_layers: (tuple) hidden layer sizes of mean NN\n kernel_nn_layers: (tuple) hidden layer sizes of kernel NN\n optimizer: (str) type of optimizer to use - must be either 'Adam' or 'SGD'\n random_seed: (int) seed for pytorch\n "
super().__init__(normalize_data=normalize_data, random_seed=random_seed)
assert (learning_mode in ['learn_mean', 'learn_kernel', 'both', 'vanilla'])
assert ((mean_module in ['NN', 'constant', 'zero']) or isinstance(mean_module, gpytorch.means.Mean))
assert ((covar_module in ['NN', 'SE']) or isinstance(covar_module, gpytorch.kernels.Kernel))
assert (optimizer in ['Adam', 'SGD'])
(self.lr, self.weight_decay, self.num_iter_fit, self.lr_scheduler) = (lr, weight_decay, num_iter_fit, lr_scheduler)
self.delta = delta
' ------ Data handling ------ '
(self.train_x_tensor, self.train_t_tensor) = self._initial_data_handling(train_x, train_t)
assert (self.train_t_tensor.shape[(- 1)] == 1)
self.train_t_tensor = self.train_t_tensor.flatten()
' ------ Setup model ------ '
self.parameters = []
if (covar_module == 'NN'):
assert (learning_mode in ['learn_kernel', 'both']), 'neural network parameters must be learned'
nn_kernel_map = NeuralNetwork(input_dim=self.input_dim, output_dim=feature_dim, layer_sizes=kernel_nn_layers).to(device)
self.parameters.append({'params': nn_kernel_map.parameters(), 'lr': self.lr, 'weight_decay': self.weight_decay})
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=feature_dim)).to(device)
else:
nn_kernel_map = None
if (covar_module == 'SE'):
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=self.input_dim)).to(device)
if (mean_module == 'NN'):
assert (learning_mode in ['learn_mean', 'both']), 'neural network parameters must be learned'
nn_mean_fn = NeuralNetwork(input_dim=self.input_dim, output_dim=1, layer_sizes=mean_nn_layers).to(device)
self.parameters.append({'params': nn_mean_fn.parameters(), 'lr': self.lr, 'weight_decay': self.weight_decay})
mean_module = None
else:
nn_mean_fn = None
if (mean_module == 'constant'):
mean_module = gpytorch.means.ConstantMean().to(device)
elif (mean_module == 'zero'):
mean_module = gpytorch.means.ZeroMean().to(device)
self.likelihood = gpytorch.likelihoods.GaussianLikelihood().to(device)
self.parameters.append({'params': self.likelihood.parameters(), 'lr': self.lr})
self.model = LearnedGPRegressionModelApproximate(self.train_x_tensor, self.train_t_tensor, self.likelihood, learned_kernel=nn_kernel_map, learned_mean=nn_mean_fn, covar_module=covar_module, mean_module=mean_module)
self.model.eval()
self.model(self.train_x_tensor)
self.parameters.append({'params': self.model.variational_parameters(), 'lr': self.lr})
if (learning_mode in ['learn_kernel', 'both']):
self.parameters.append({'params': self.model.covar_module.hyperparameters(), 'lr': self.lr})
if ((learning_mode in ['learn_mean', 'both']) and (mean_module is not None)):
self.parameters.append({'params': self.model.mean_module.hyperparameters(), 'lr': self.lr})
if (optimizer == 'Adam'):
self.optimizer = torch.optim.AdamW(self.parameters)
elif (optimizer == 'SGD'):
self.optimizer = torch.optim.SGD(self.parameters)
else:
raise NotImplementedError('Optimizer must be Adam or SGD')
if self.lr_scheduler:
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', factor=0.2)
else:
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', factor=1.0)
self.fitted = False
def fit(self, valid_x=None, valid_t=None, verbose=True, log_period=500, n_iter=None):
'\n fits GP prior parameters of by maximizing the marginal log-likelihood (mll) of the training data\n\n Args:\n verbose: (boolean) whether to print training progress\n valid_x: (np.ndarray) validation inputs - shape: (n_sampls, ndim_x)\n valid_y: (np.ndarray) validation targets - shape: (n_sampls, 1)\n log_period: (int) number of steps after which to print stats\n n_iter: (int) number of gradient descent iterations\n '
self.model.train()
self.likelihood.train()
assert (((valid_x is None) and (valid_t is None)) or (isinstance(valid_x, np.ndarray) and isinstance(valid_x, np.ndarray)))
if (len(self.parameters) > 0):
t = time.time()
if (n_iter is None):
n_iter = self.num_iter_fit
for itr in range(1, (n_iter + 1)):
self.optimizer.zero_grad()
posterior = self.model.variational_distribution()
prior = self.model.forward(self.train_x_tensor)
ll = self.likelihood.expected_log_prob(self.train_t_tensor, posterior).mean((- 1))
kl = torch.distributions.kl_divergence(posterior, prior)
n = torch.tensor(self.train_x_tensor.shape[0], dtype=torch.float32)
loss = ((- ll) + torch.sqrt(((kl + torch.log(((2 * torch.sqrt(n)) / self.delta))) / (2 * n))))
loss.backward()
self.optimizer.step()
if ((itr == 1) or ((itr % log_period) == 0)):
duration = (time.time() - t)
t = time.time()
message = ('Iter %d/%d - Loss: %.3f - LL: %.3f - KL: %.3f Time %.3f sec' % (itr, self.num_iter_fit, loss.item(), ll.item(), kl.item(), duration))
if (valid_x is not None):
self.model.eval()
self.likelihood.eval()
(valid_ll, valid_rmse, calibr_err) = self.eval(valid_x, valid_t)
self.lr_scheduler.step(valid_ll)
self.model.train()
self.likelihood.train()
message += (' - Valid-LL: %.3f - Valid-RMSE: %.3f - Calib-Err %.3f' % (valid_ll, valid_rmse, calibr_err))
if verbose:
self.logger.info(message)
else:
self.logger.info('Vanilla mode - nothing to fit')
self.fitted = True
self.model.eval()
self.likelihood.eval()
return loss.item()
def predict(self, test_x, return_density=False, **kwargs):
'\n computes the predictive distribution of the targets p(t|test_x, train_x, train_y)\n\n Args:\n test_x: (ndarray) query input data of shape (n_samples, ndim_x)\n return_density (bool) whether to return a density object or\n\n Returns:\n (pred_mean, pred_std) predicted mean and standard deviation corresponding to p(y_test|X_test, X_train, y_train)\n '
if (test_x.ndim == 1):
test_x = np.expand_dims(test_x, axis=(- 1))
with torch.no_grad():
test_x_normalized = self._normalize_data(test_x)
test_x_tensor = torch.from_numpy(test_x_normalized).contiguous().float().to(device)
pred_dist = self.likelihood(self.model(test_x_tensor))
pred_dist_transformed = AffineTransformedDistribution(pred_dist, normalization_mean=self.y_mean, normalization_std=self.y_std)
if return_density:
return pred_dist_transformed
else:
pred_mean = pred_dist_transformed.mean.cpu().numpy()
pred_std = pred_dist_transformed.stddev.cpu().numpy()
return (pred_mean, pred_std)
def state_dict(self):
state_dict = {'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
def _vectorize_pred_dist(self, pred_dist):
return torch.distributions.Normal(pred_dist.mean, pred_dist.stddev)
|
def _filter(dict, str):
result = OrderedDict()
for (key, val) in dict.items():
if (str in key):
result[key] = val
return result
|
class VectorizedGP(VectorizedModel):
def __init__(self, input_dim, feature_dim=2, covar_module_str='SE', mean_module_str='constant', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), nonlinearlity=torch.tanh):
super().__init__(input_dim, 1)
self._params = OrderedDict()
self.mean_module_str = mean_module_str
self.covar_module_str = covar_module_str
if (mean_module_str == 'NN'):
self.mean_nn = self._param_module('mean_nn', NeuralNetworkVectorized(input_dim, 1, layer_sizes=mean_nn_layers, nonlinearlity=nonlinearlity))
elif (mean_module_str == 'constant'):
self.constant_mean = self._param('constant_mean', torch.zeros(1, 1))
else:
raise NotImplementedError
if (covar_module_str == 'NN'):
self.kernel_nn = self._param_module('kernel_nn', NeuralNetworkVectorized(input_dim, feature_dim, layer_sizes=kernel_nn_layers, nonlinearlity=nonlinearlity))
self.lengthscale_raw = self._param('lengthscale_raw', torch.zeros(1, feature_dim))
elif (covar_module_str == 'SE'):
self.lengthscale_raw = self._param('lengthscale_raw', torch.zeros(1, input_dim))
else:
raise NotImplementedError
self.noise_raw = self._param('noise_raw', torch.zeros(1, 1))
def forward(self, x_data, y_data, train=True, prior=False):
assert (x_data.ndim == 3)
if (self.mean_module_str == 'NN'):
learned_mean = self.mean_nn
mean_module = None
else:
learned_mean = None
mean_module = ConstantMeanLight(self.constant_mean)
if (self.covar_module_str == 'NN'):
learned_kernel = self.kernel_nn
else:
learned_kernel = None
lengthscale = F.softplus(self.lengthscale_raw)
lengthscale = lengthscale.view(lengthscale.shape[0], 1, lengthscale.shape[1])
covar_module = SEKernelLight(lengthscale)
noise = F.softplus(self.noise_raw)
likelihood = GaussianLikelihoodLight(noise)
gp = LearnedGPRegressionModel(x_data, y_data, likelihood, mean_module=mean_module, covar_module=covar_module, learned_mean=learned_mean, learned_kernel=learned_kernel)
if prior:
gp.train()
likelihood.train()
return (gp, likelihood)
elif train:
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp)
output = gp(x_data)
return (likelihood(output), mll(output, y_data))
else:
gp.eval()
likelihood.eval()
return (gp, likelihood)
def parameter_shapes(self):
return OrderedDict([(name, param.shape) for (name, param) in self.named_parameters().items()])
def named_parameters(self):
return self._params
def _param_module(self, name, module):
assert (type(name) == str)
assert hasattr(module, 'named_parameters')
for (param_name, param) in module.named_parameters().items():
self._param(((name + '.') + param_name), param)
return module
def _param(self, name, tensor):
assert (type(name) == str)
assert isinstance(tensor, torch.Tensor)
assert (name not in list(self._params.keys()))
if (not (device.type == tensor.device.type)):
tensor = tensor.to(device)
self._params[name] = tensor
return tensor
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
|
class _RandomGPBase():
def __init__(self, size_in, prior_factor=1.0, weight_prior_std=1.0, bias_prior_std=3.0, **kwargs):
self._params = OrderedDict()
self._param_dists = OrderedDict()
self.prior_factor = prior_factor
self.gp = VectorizedGP(size_in, **kwargs)
for (name, shape) in self.gp.parameter_shapes().items():
if (name == 'constant_mean'):
mean_p_loc = torch.zeros(1).to(device)
mean_p_scale = torch.ones(1).to(device)
self._param_dist(name, Normal(mean_p_loc, mean_p_scale).to_event(1))
if (name == 'lengthscale_raw'):
lengthscale_p_loc = torch.zeros(shape[(- 1)]).to(device)
lengthscale_p_scale = torch.ones(shape[(- 1)]).to(device)
self._param_dist(name, Normal(lengthscale_p_loc, lengthscale_p_scale).to_event(1))
if (name == 'noise_raw'):
noise_p_loc = ((- 1.0) * torch.ones(1).to(device))
noise_p_scale = torch.ones(1).to(device)
self._param_dist(name, Normal(noise_p_loc, noise_p_scale).to_event(1))
if (('mean_nn' in name) or ('kernel_nn' in name)):
mean = torch.zeros(shape).to(device)
if ('weight' in name):
std = (weight_prior_std * torch.ones(shape).to(device))
elif ('bias' in name):
std = (bias_prior_std * torch.ones(shape).to(device))
else:
raise NotImplementedError
self._param_dist(name, Normal(mean, std).to_event(1))
for (param_name_gp, param_name_prior) in zip(self.gp.named_parameters().keys(), self._param_dists.keys()):
assert (param_name_gp == param_name_prior)
self.hyper_prior = CatDist(self._param_dists.values())
def sample_params_from_prior(self, shape=torch.Size()):
return self.hyper_prior.sample(shape)
def sample_fn_from_prior(self, shape=torch.Size()):
params = self.sample_params_from_prior(shape=shape)
return self.get_forward_fn(params)
def get_forward_fn(self, params):
gp_model = copy.deepcopy(self.gp)
gp_model.set_parameters_as_vector(params)
return gp_model
def _param_dist(self, name, dist):
assert (type(name) == str)
assert isinstance(dist, torch.distributions.Distribution)
assert (name not in list(self._param_dists.keys()))
assert hasattr(dist, 'rsample')
self._param_dists[name] = dist
return dist
def _log_prob_prior(self, params):
return self.hyper_prior.log_prob(params)
def _log_prob_likelihood(self, *args):
raise NotImplementedError
def log_prob(self, *args):
raise NotImplementedError
def parameter_shapes(self):
param_shapes_dict = OrderedDict()
for (name, dist) in self._param_dists.items():
param_shapes_dict[name] = dist.event_shape
return param_shapes_dict
|
class RandomGP(_RandomGPBase):
def _log_prob_likelihood(self, params, x_data, y_data):
fn = self.get_forward_fn(params)
(_, mll) = fn(x_data, y_data)
return mll
def log_prob(self, params, x_data, y_data):
return ((self.prior_factor * self._log_prob_prior(params)) + self._log_prob_likelihood(params, x_data, y_data))
|
class RandomGPMeta(_RandomGPBase):
def _log_prob_likelihood(self, params, train_data_tuples):
fn = self.get_forward_fn(params)
num_datasets = len(train_data_tuples)
dataset_sizes = torch.tensor([train_x.shape[(- 2)] for (train_x, _) in train_data_tuples]).float().to(device)
harmonic_mean_dataset_size = (1.0 / torch.mean((1.0 / dataset_sizes)))
pre_factor = (harmonic_mean_dataset_size / (harmonic_mean_dataset_size + num_datasets))
mlls = []
for (i, (x_data, y_data)) in enumerate(train_data_tuples):
(_, mll) = fn(x_data, y_data)
mlls.append(mll)
mlls = torch.stack(mlls, dim=(- 1))
return (pre_factor * torch.sum(mlls, dim=(- 1)))
def log_prob(self, params, train_data_tuples):
return ((self.prior_factor * self._log_prob_prior(params)) + self._log_prob_likelihood(params, train_data_tuples))
|
class RandomGPPosterior(torch.nn.Module):
'\n Gaussian VI posterior on the GP-Prior parameters\n '
def __init__(self, named_param_shapes, init_std=0.1, cov_type='full'):
super().__init__()
assert (cov_type in ['diag', 'full'])
self.param_idx_ranges = OrderedDict()
idx_start = 0
for (name, shape) in named_param_shapes.items():
assert (len(shape) == 1)
idx_end = (idx_start + shape[0])
self.param_idx_ranges[name] = (idx_start, idx_end)
idx_start = idx_end
param_shape = torch.Size((idx_start,))
self.loc = torch.nn.Parameter(torch.normal(0.0, init_std, size=param_shape, device=device))
if (cov_type == 'diag'):
self.scale = torch.nn.Parameter(torch.normal(math.log(0.1), init_std, size=param_shape, device=device))
self.dist_fn = (lambda : Normal(self.loc, self.scale.exp()).to_event(1))
if (cov_type == 'full'):
self.tril_cov = torch.nn.Parameter(torch.diag(torch.ones(param_shape, device=device).uniform_(0.05, 0.1)))
self.dist_fn = (lambda : torch.distributions.MultivariateNormal(loc=self.loc, scale_tril=torch.tril(self.tril_cov)))
def forward(self):
return self.dist_fn()
def rsample(self, sample_shape=torch.Size()):
return self.forward().rsample(sample_shape)
def sample(self, sample_shape=torch.Size()):
return self.forward().sample(sample_shape)
def log_prob(self, value):
return self.forward().log_prob(value)
@property
def mode(self):
return self.mean
@property
def mean(self):
return self.forward().mean
@property
def stddev(self):
return self.forward().stddev
def entropy(self):
return self.forward().entropy()
@property
def mean_stddev_dict(self):
mean = self.mean
stddev = self.stddev
with torch.no_grad():
return OrderedDict([(name, (mean[idx_start:idx_end], stddev[idx_start:idx_end])) for (name, (idx_start, idx_end)) in self.param_idx_ranges.items()])
|
def _get_base_dist(dist):
if isinstance(dist, Independent):
return _get_base_dist(dist.base_dist)
else:
return dist
|
class SVGD():
def __init__(self, distribution, kernel, optimizer):
self.P = distribution
self.K = kernel
self.optim = optimizer
def phi(self, X, *data):
X = X.detach().requires_grad_(True)
log_prob = self.P.log_prob(X, *data)
score_func = torch.autograd.grad(log_prob.sum(), X)[0]
K_XX = self.K(X, X.detach())
grad_K = (- torch.autograd.grad(K_XX.sum(), X)[0])
phi = ((K_XX.detach().matmul(score_func) + grad_K) / X.size(0))
return phi
def step(self, particles, *data):
self.optim.zero_grad()
particles.grad = (- self.phi(particles, *data))
self.optim.step()
|
class RBF_Kernel(torch.nn.Module):
'\n RBF kernel\n\n :math:`K(x, y) = exp(||x-v||^2 / (2h))\n\n '
def __init__(self, bandwidth=None):
super().__init__()
self.bandwidth = bandwidth
def _bandwidth(self, norm_sq):
if (self.bandwidth is None):
np_dnorm2 = norm_sq.detach().cpu().numpy()
h = (np.median(np_dnorm2) / (2 * np.log((np_dnorm2.shape[0] + 1))))
return np.sqrt(h).item()
else:
return self.bandwidth
def forward(self, X, Y):
dnorm2 = norm_sq(X, Y)
bandwidth = self._bandwidth(dnorm2)
gamma = (1.0 / (1e-08 + (2 * (bandwidth ** 2))))
K_XY = ((- gamma) * dnorm2).exp()
return K_XY
|
class IMQSteinKernel(torch.nn.Module):
'\n IMQ (inverse multi-quadratic) kernel\n\n :math:`K(x, y) = (\\alpha + ||x-y||^2/h)^{\\beta}`\n\n '
def __init__(self, alpha=0.5, beta=(- 0.5), bandwidth=None):
super(IMQSteinKernel, self).__init__()
assert (alpha > 0.0), 'alpha must be positive.'
assert (beta < 0.0), 'beta must be negative.'
self.alpha = alpha
self.beta = beta
self.bandwidth = bandwidth
def _bandwidth(self, norm_sq):
'\n Compute the bandwidth along each dimension using the median pairwise squared distance between particles.\n '
if (self.bandwidth is None):
num_particles = norm_sq.size(0)
index = torch.arange(num_particles)
norm_sq = norm_sq[((index > index.unsqueeze((- 1))), ...)]
median = norm_sq.median(dim=0)[0]
assert (median.shape == norm_sq.shape[(- 1):])
return (median / math.log((num_particles + 1)))
else:
return self.bandwidth
def forward(self, X, Y):
norm_sq = ((X.unsqueeze(0) - Y.unsqueeze(1)) ** 2)
assert (norm_sq.dim() == 3)
bandwidth = self._bandwidth(norm_sq)
base_term = (self.alpha + torch.sum((norm_sq / bandwidth), dim=(- 1)))
log_kernel = (self.beta * torch.log(base_term))
return log_kernel.exp()
|
def norm_sq(X, Y):
XX = X.matmul(X.t())
XY = X.matmul(Y.t())
YY = Y.matmul(Y.t())
return ((((- 2) * XY) + XX.diag().unsqueeze(1)) + YY.diag().unsqueeze(0))
|
def find_root_by_bounding(fun, left, right, eps=1e-06, max_iter=10000.0):
'\n Root finding method that uses selective shrinking of a target interval bounded by left and right\n --> other than the newton method, this method only works for for vectorized univariate functions\n Args:\n fun (callable): function f for which f(x) = 0 shall be solved\n left: (torch.Tensor): initial left bound\n right (torch.Tensor): initial right bound\n eps (float): tolerance\n max_iter (int): maximum iterations\n '
assert callable(fun)
n_iter = 0
approx_error = 1000000000000.0
while (approx_error > eps):
middle = ((right + left) / 2)
f = fun(middle)
left_of_zero = (f < 0).flatten()
left[left_of_zero] = middle[left_of_zero]
right[(~ left_of_zero)] = middle[(~ left_of_zero)]
assert torch.all((left <= right)).item()
approx_error = (torch.max(torch.abs((right - left))) / 2)
n_iter += 1
if (n_iter > max_iter):
warnings.warn('Max_iter has been reached - stopping newton method for determining quantiles')
return torch.Tensor([np.nan for _ in range(len(left))])
return middle
|
def _handle_input_dimensionality(x, y=None):
if (x.ndim == 1):
x = np.expand_dims(x, (- 1))
assert (x.ndim == 2)
if (y is not None):
if (y.ndim == 1):
y = np.expand_dims(y, (- 1))
assert (x.shape[0] == y.shape[0])
assert (y.ndim == 2)
return (x, y)
else:
return x
|
def get_logger(log_dir=None, log_file='output.log', expname=''):
if ((log_dir is None) and flags.FLAGS.is_parsed() and hasattr(flags.FLAGS, 'log_dir')):
log_dir = flags.FLAGS.log_dir
logger = logging.getLogger('gp-priors')
logger.setLevel(logging.INFO)
if (len(logger.handlers) == 0):
if (len(expname) > 0):
expname = (' %s - ' % expname)
formatter = logging.Formatter((('[%(asctime)s -' + ('%s' % expname)) + '%(levelname)s] %(message)s'))
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
logger.propagate = False
if ((log_dir is not None) and (len(log_dir) > 0)):
fh = logging.FileHandler(os.path.join(log_dir, log_file))
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.log_dir = log_dir
else:
logger.log_dir = None
return logger
|
class DummyLRScheduler():
def __init__(self, *args, **kwargs):
pass
def step(self, *args, **kwargs):
pass
|
class AsyncExecutor():
def __init__(self, n_jobs=1):
self.num_workers = (n_jobs if (n_jobs > 0) else multiprocessing.cpu_count())
self._pool = []
self._populate_pool()
def run(self, target, *args_iter, verbose=False):
workers_idle = ([False] * self.num_workers)
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
while (not all(workers_idle)):
for i in range(self.num_workers):
if (not self._pool[i].is_alive()):
self._pool[i].terminate()
if (len(tasks) > 0):
if verbose:
print(('task %i of %i' % ((n_tasks - len(tasks)), n_tasks)))
next_task = tasks.pop(0)
self._pool[i] = _start_process(target, next_task)
else:
workers_idle[i] = True
def _populate_pool(self):
self._pool = [_start_process(_dummy_fun) for _ in range(self.num_workers)]
|
class LoopExecutor():
def run(self, target, *args_iter, verbose=False):
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
for (i, task) in enumerate(tasks):
target(*task)
if verbose:
print(('task %i of %i' % ((n_tasks - len(tasks)), n_tasks)))
|
def _start_process(target, args=None):
if args:
p = Process(target=target, args=args)
else:
p = Process(target=target)
p.start()
return p
|
def _dummy_fun():
pass
|
class TestGPR_mll(unittest.TestCase):
def setUp(self):
torch.manual_seed(22)
np.random.seed(25)
n_train_points = 60
self.x_train = np.linspace((- 2), 2, num=n_train_points)
self.y_train_zero = ((self.x_train * 0) + np.random.normal(scale=0.02, size=self.x_train.shape))
self.y_train_two = (((self.x_train * 0) + 2) + np.random.normal(scale=0.02, size=self.x_train.shape))
self.y_train_sin = np.sin((4 * self.x_train))
n_test_points = 80
self.x_test = np.linspace((- 2.1), 2.1, num=n_test_points)
self.y_test_zero = ((self.x_test * 0) + np.random.normal(scale=0.02, size=self.x_test.shape))
self.y_test_two = (((self.x_test * 0) + 2) + np.random.normal(scale=0.02, size=self.x_test.shape))
self.y_test_sin = np.sin((4 * self.x_test))
def test_random_seed_consistency(self):
gpr_model_1 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both', num_iter_fit=5, mean_module='NN', covar_module='NN', random_seed=22)
gpr_model_2 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both', num_iter_fit=5, mean_module='NN', covar_module='NN', random_seed=22)
gpr_model_1.fit()
t_predict_1 = gpr_model_1.predict(self.x_test)
gpr_model_2.fit()
t_predict_2 = gpr_model_2.predict(self.x_test)
self.assertTrue(np.array_equal(t_predict_1, t_predict_2))
def test_serializable(self):
torch.manual_seed(40)
np.random.seed(22)
import itertools
for (mean_module, covar_module) in itertools.product(['constant', 'NN'], ['SE', 'NN']):
gpr_model = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both', num_iter_fit=10, mean_module=mean_module, covar_module='NN', random_seed=22)
gpr_model.fit()
pred_1 = gpr_model.predict(self.x_train)
gpr_model2 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both', num_iter_fit=1, mean_module=mean_module, covar_module='NN', random_seed=345)
gpr_model2.fit()
pred_2 = gpr_model2.predict(self.x_train)
file = '/tmp/test_torch_serialization.pkl'
torch.save(gpr_model.state_dict(), file)
gpr_model2.load_state_dict(torch.load(file))
pred_3 = gpr_model2.predict(self.x_train)
assert (not np.array_equal(pred_1, pred_2))
assert np.array_equal(pred_1, pred_3)
def test_mean_learning(self):
for mean_module in ['NN']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='vanilla', num_iter_fit=20, mean_module='constant', covar_module='SE')
gpr_model_vanilla.fit()
gpr_model_learn_mean = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_mean', num_iter_fit=100, mean_module=mean_module, covar_module='SE', mean_nn_layers=(16, 16))
gpr_model_learn_mean.fit()
(ll_vanilla, rmse_vanilla, _) = gpr_model_vanilla.eval(self.x_train, self.y_train_two)
(ll_mean, rmse_mean, _) = gpr_model_learn_mean.eval(self.x_train, self.y_train_sin)
print(ll_mean, ll_vanilla)
print(rmse_mean, rmse_vanilla)
self.assertGreater(ll_mean, ll_vanilla)
self.assertLess(rmse_mean, rmse_vanilla)
def test_kernel_learning_COS(self):
for learning_mode in ['learn_kernel', 'both']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='vanilla', num_iter_fit=1, mean_module='constant', covar_module=CosineKernel())
gpr_model_vanilla.fit()
gpr_model_learn_kernel = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_kernel', num_iter_fit=500, mean_module='constant', covar_module=CosineKernel())
print(gpr_model_learn_kernel.model.covar_module.lengthscale)
gpr_model_learn_kernel.fit(valid_x=self.x_train, valid_t=self.y_train_sin)
print(gpr_model_learn_kernel.model.covar_module.lengthscale)
(ll_vanilla, rmse_vanilla, _) = gpr_model_vanilla.eval(self.x_train, self.y_train_sin)
(ll_kernel, rmse_kernel, _) = gpr_model_learn_kernel.eval(self.x_train, self.y_train_sin)
print('learning_mode', learning_mode)
print(ll_kernel, ll_vanilla)
print(rmse_kernel, rmse_vanilla)
self.assertGreater(ll_kernel, ll_vanilla)
self.assertLess(rmse_kernel, rmse_vanilla)
def test_kernel_learning_NN(self):
for learning_mode in ['learn_kernel', 'both']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_kernel', num_iter_fit=1, mean_module='zero', covar_module='NN')
gpr_model_vanilla.fit()
gpr_model_learn_kernel = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode=learning_mode, num_iter_fit=500, mean_module='constant', covar_module='NN', kernel_nn_layers=(16, 16), mean_nn_layers=(16, 16))
gpr_model_learn_kernel.fit(valid_x=self.x_train, valid_t=self.y_train_sin)
(ll_vanilla, rmse_vanilla, _) = gpr_model_vanilla.eval(self.x_train, self.y_train_sin)
(ll_kernel, rmse_kernel, _) = gpr_model_learn_kernel.eval(self.x_train, self.y_train_sin)
print('learning_mode', learning_mode)
print(ll_kernel, ll_vanilla)
print(rmse_kernel, rmse_vanilla)
self.assertGreater(ll_kernel, ll_vanilla)
self.assertLess(rmse_kernel, rmse_vanilla)
|
class TestGPR_mll_meta(unittest.TestCase):
def setUp(self):
torch.manual_seed(22)
np.random.seed(23)
n_train_datasets = 10
n_samples_train = 5
self.train_data_tuples = [sample_data_nonstationary(n_samples_train) for _ in range(n_train_datasets)]
n_test_datasets = 10
n_samples_test_context = 5
n_samples_test = 50
test_data = [sample_data_nonstationary((n_samples_test_context + n_samples_test)) for _ in range(n_test_datasets)]
self.test_data_tuples = [(x[:n_samples_test_context], t[:n_samples_test_context], x[n_samples_test_context:], t[n_samples_test_context:]) for (x, t) in test_data]
def test_random_seed_consistency(self):
gp_meta_1 = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', num_iter_fit=5, covar_module='NN', mean_module='NN', random_seed=22)
gp_meta_2 = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', num_iter_fit=5, covar_module='NN', mean_module='NN', random_seed=22)
gp_meta_1.meta_fit(valid_tuples=self.test_data_tuples)
gp_meta_2.meta_fit(valid_tuples=self.test_data_tuples)
for (x_context, t_context, x_test, _) in self.test_data_tuples[:3]:
t_predict_1 = gp_meta_1.predict(x_context, t_context, x_test)
t_predict_2 = gp_meta_2.predict(x_context, t_context, x_test)
self.assertTrue(np.array_equal(t_predict_1, t_predict_2))
def test_serializable(self):
torch.manual_seed(40)
np.random.seed(22)
import itertools
for (mean_module, covar_module) in itertools.product(['constant', 'NN'], ['SE', 'NN']):
gpr_model = GPRegressionMetaLearned(self.train_data_tuples[:3], learning_mode='both', num_iter_fit=5, mean_module=mean_module, covar_module='NN', random_seed=22)
gpr_model.meta_fit()
pred_1 = gpr_model.predict(*self.test_data_tuples[0][:3])
gpr_model2 = GPRegressionMetaLearned(self.train_data_tuples[:3], learning_mode='both', num_iter_fit=5, mean_module=mean_module, covar_module='NN', random_seed=25)
gpr_model2.meta_fit()
pred_2 = gpr_model2.predict(*self.test_data_tuples[0][:3])
file = '/tmp/test_torch_serialization.pkl'
torch.save(gpr_model.state_dict(), file)
gpr_model2.load_state_dict(torch.load(file))
pred_3 = gpr_model2.predict(*self.test_data_tuples[0][:3])
assert (not np.array_equal(pred_1, pred_2))
assert np.array_equal(pred_1, pred_3)
torch.manual_seed(25)
gpr_model.rds_numpy = np.random.RandomState(55)
gpr_model.meta_fit()
torch.manual_seed(25)
gpr_model2.rds_numpy = np.random.RandomState(55)
gpr_model2.meta_fit()
pred_1 = gpr_model.predict(*self.test_data_tuples[0][:3])
pred_2 = gpr_model2.predict(*self.test_data_tuples[0][:3])
assert np.array_equal(pred_1, pred_2)
def test_mean_learning_more_datasets(self):
torch.manual_seed(40)
gp_meta = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', mean_nn_layers=(16, 16), kernel_nn_layers=(16, 16), num_iter_fit=3000, covar_module='SE', mean_module='NN', weight_decay=0.0)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
(test_ll_meta_2, test_rmse_meta_2, _) = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta (2 datasets):', test_ll_meta_2)
gp_meta = GPRegressionMetaLearned(self.train_data_tuples, learning_mode='both', mean_nn_layers=(16, 16), kernel_nn_layers=(16, 16), num_iter_fit=3000, covar_module='SE', mean_module='NN', weight_decay=0.0)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
(test_ll_meta_10, test_rmse_meta_10, _) = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta (10 datasets):', test_ll_meta_10)
self.assertGreater(test_ll_meta_10, test_ll_meta_2)
self.assertLess(test_rmse_meta_10, test_rmse_meta_2)
def test_normal_vs_meta(self):
torch.manual_seed(60)
num_iter_fit = 1000
gp_meta = GPRegressionMetaLearned(self.train_data_tuples, learning_mode='both', mean_nn_layers=(64, 64), covar_module='SE', mean_module='NN', weight_decay=0.0, num_iter_fit=num_iter_fit)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
(test_ll_meta, test_rmse_meta, _) = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta:', test_ll_meta)
def fit_eval_gpr(x_context, t_context, x_test, t_test):
gpr = GPRegressionLearned(x_context, t_context, learning_mode='both', mean_nn_layers=(64, 64), covar_module='SE', mean_module='NN', weight_decay=0.0, num_iter_fit=num_iter_fit)
gpr.fit(valid_x=x_test, valid_t=t_test)
return gpr.eval(x_test, t_test)[0]
ll_list = [fit_eval_gpr(*data_tuple) for data_tuple in self.test_data_tuples]
test_ll_normal = np.mean(ll_list)
print('Test log-likelihood normal:', test_ll_normal)
self.assertGreater(test_ll_meta, test_ll_normal)
|
def sample_data_nonstationary(size=1):
def _sample_fun():
slope = np.random.normal(loc=1, scale=0.2)
freq = (lambda x: (1 + np.abs(x)))
mean = (lambda x: (slope * x))
return (lambda x: ((mean(x) + np.sin((freq(x) * x))) / 5))
func = _sample_fun()
X = np.random.uniform((- 5), 5, size=(size, 1))
Y = func(X)
return (X, Y)
|
class TestSinusoidDataset(unittest.TestCase):
def test_seed_reproducability(self):
rds = np.random.RandomState(55)
dataset = SinusoidDataset(random_state=rds)
data_test_1 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_1 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
rds = np.random.RandomState(55)
dataset = SinusoidDataset(random_state=rds)
data_test_2 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_2 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
for (test_tuple_1, test_tuple_2) in zip(data_test_1, data_test_2):
for (data_array_1, data_array_2) in zip(test_tuple_1, test_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
for (train_tuple_1, train_tuple_2) in zip(data_train_1, data_train_2):
for (data_array_1, data_array_2) in zip(train_tuple_1, train_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
def test_no_noise(self):
dataset = SinusoidDataset(amp_low=1.0, amp_high=1.0, period_low=1.0, period_high=1.0, x_shift_mean=0.0, x_shift_std=0.0, y_shift_mean=0.0, y_shift_std=0.0, slope_mean=1.0, slope_std=0.0, noise_std=0.0, x_low=5, x_high=(- 5))
data_tuples = dataset.generate_meta_train_data(n_tasks=2, n_samples=500)
true_fn = (lambda x: (x + np.sin(x)))
for data_tuple in data_tuples:
(x_train, y_train) = data_tuple
y_true = true_fn(x_train)
abs_diff = np.mean(np.abs((y_true - y_train)))
self.assertAlmostEqual(abs_diff, 0.0)
def test_context_test_consistency(self):
dataset = SinusoidDataset(noise_std=0.0, x_low=1, x_high=1)
data_tuples = dataset.generate_meta_test_data(n_tasks=10, n_samples_context=1, n_samples_test=1)
for data_tuple in data_tuples:
(x_context, y_context, x_test, y_test) = data_tuple
assert np.array_equal(y_context, y_test)
print(y_context, y_test)
|
class TestSinusoidNonstationaryDataset(unittest.TestCase):
def test_seed_reproducability(self):
rds = np.random.RandomState(55)
dataset = SinusoidNonstationaryDataset(random_state=rds)
data_test_1 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_1 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
rds = np.random.RandomState(55)
dataset = SinusoidNonstationaryDataset(random_state=rds)
data_test_2 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_2 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
for (test_tuple_1, test_tuple_2) in zip(data_test_1, data_test_2):
for (data_array_1, data_array_2) in zip(test_tuple_1, test_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
for (train_tuple_1, train_tuple_2) in zip(data_train_1, data_train_2):
for (data_array_1, data_array_2) in zip(train_tuple_1, train_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
|
class TestGPFunctionsDataset(unittest.TestCase):
def test_seed_reproducability(self):
rds = np.random.RandomState(55)
dataset = GPFunctionsDataset(random_state=rds)
data_test_1 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_1 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
rds = np.random.RandomState(55)
dataset = GPFunctionsDataset(random_state=rds)
data_test_2 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_2 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
for (test_tuple_1, test_tuple_2) in zip(data_test_1, data_test_2):
for (data_array_1, data_array_2) in zip(test_tuple_1, test_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
for (train_tuple_1, train_tuple_2) in zip(data_train_1, data_train_2):
for (data_array_1, data_array_2) in zip(train_tuple_1, train_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
|
class TestMNISTRegressionDataset(unittest.TestCase):
def test_seed_reproducability(self):
rds = np.random.RandomState(55)
dataset = MNISTRegressionDataset(random_state=rds)
data_test_1 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_1 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
rds = np.random.RandomState(55)
dataset = MNISTRegressionDataset(random_state=rds)
data_test_2 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_2 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
for (test_tuple_1, test_tuple_2) in zip(data_test_1, data_test_2):
for (data_array_1, data_array_2) in zip(test_tuple_1, test_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
for (train_tuple_1, train_tuple_2) in zip(data_train_1, data_train_2):
for (data_array_1, data_array_2) in zip(train_tuple_1, train_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
def test_output_shapes_generate_test(self):
rds = np.random.RandomState(123)
dataset = MNISTRegressionDataset(random_state=rds)
for n_tasks in [1, 5]:
for n_samples_context in [1, 85]:
for n_samples_test in [(- 1), 23]:
data_test = dataset.generate_meta_test_data(n_tasks=n_tasks, n_samples_context=n_samples_context, n_samples_test=n_samples_test)
assert (len(data_test) == n_tasks)
for (x_context, t_context, x_test, t_test) in data_test:
assert (x_context.shape[0] == t_context.shape[0])
assert (x_context.shape[1] == x_test.shape[1] == 2)
if (n_samples_test == (- 1)):
assert ((x_context.shape[0] + x_test.shape[0]) == (28 ** 2))
def test_output_shapes_generate_train(self):
rds = np.random.RandomState(123)
dataset = MNISTRegressionDataset(random_state=rds)
for n_tasks in [24, 2]:
for n_samples in [1, 85]:
data_test = dataset.generate_meta_train_data(n_tasks=n_tasks, n_samples=n_samples)
assert (len(data_test) == n_tasks)
for (x_train, t_train) in data_test:
assert (x_train.shape[0] == t_train.shape[0])
assert (x_train.shape[1] == 2)
|
class TestCauchyDataset(unittest.TestCase):
def test_seed_reproducability(self):
for ndim in [1, 5]:
rds = np.random.RandomState(55)
dataset = CauchyDataset(random_state=rds, ndim_x=ndim)
data_test_1 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_1 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
rds = np.random.RandomState(55)
dataset = CauchyDataset(random_state=rds, ndim_x=ndim)
data_test_2 = dataset.generate_meta_test_data(n_tasks=2, n_samples_context=5, n_samples_test=10)
data_train_2 = dataset.generate_meta_train_data(n_tasks=5, n_samples=20)
for (test_tuple_1, test_tuple_2) in zip(data_test_1, data_test_2):
for (data_array_1, data_array_2) in zip(test_tuple_1, test_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
for (train_tuple_1, train_tuple_2) in zip(data_train_1, data_train_2):
for (data_array_1, data_array_2) in zip(train_tuple_1, train_tuple_2):
assert np.array_equal(data_array_1, data_array_2)
|
class SineData(Dataset):
'\n Dataset of functions f(x) = a * sin(x - b) where a and b are randomly\n sampled. The function is evaluated from -pi to pi.\n\n Parameters\n ----------\n amplitude_range : tuple of float\n Defines the range from which the amplitude (i.e. a) of the sine function\n is sampled.\n\n shift_range : tuple of float\n Defines the range from which the shift (i.e. b) of the sine function is\n sampled.\n\n num_samples : int\n Number of samples of the function contained in dataset.\n\n num_points : int\n Number of points at which to evaluate f(x) for x in [-pi, pi].\n '
def __init__(self, amplitude_range=((- 1.0), 1.0), shift_range=((- 0.5), 0.5), num_samples=1000, num_points=100):
self.amplitude_range = amplitude_range
self.shift_range = shift_range
self.num_samples = num_samples
self.num_points = num_points
self.x_dim = 1
self.y_dim = 1
self.data = []
(a_min, a_max) = amplitude_range
(b_min, b_max) = shift_range
for i in range(num_samples):
a = (((a_max - a_min) * np.random.rand()) + a_min)
b = (((b_max - b_min) * np.random.rand()) + b_min)
x = torch.linspace((- pi), pi, num_points).unsqueeze(1)
y = (a * torch.sin((x - b)))
self.data.append((x, y))
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.num_samples
|
def mnist(batch_size=16, size=28, path_to_data='../../mnist_data'):
'MNIST dataloader.\n\n Parameters\n ----------\n batch_size : int\n\n size : int\n Size (height and width) of each image. Default is 28 for no resizing.\n\n path_to_data : string\n Path to MNIST data files.\n '
all_transforms = transforms.Compose([transforms.Resize(size), transforms.ToTensor()])
train_data = datasets.MNIST(path_to_data, train=True, download=True, transform=all_transforms)
test_data = datasets.MNIST(path_to_data, train=False, transform=all_transforms)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
return (train_loader, test_loader)
|
def celeba(batch_size=16, size=32, crop=89, path_to_data='../celeba_data', shuffle=True):
'CelebA dataloader.\n\n Parameters\n ----------\n batch_size : int\n\n size : int\n Size (height and width) of each image.\n\n crop : int\n Size of center crop. This crop happens *before* the resizing.\n\n path_to_data : string\n Path to CelebA data files.\n '
transform = transforms.Compose([transforms.CenterCrop(crop), transforms.Resize(size), transforms.ToTensor()])
celeba_data = CelebADataset(path_to_data, transform=transform)
celeba_loader = DataLoader(celeba_data, batch_size=batch_size, shuffle=shuffle)
return celeba_loader
|
class CelebADataset(Dataset):
'CelebA dataset.'
def __init__(self, path_to_data, subsample=1, transform=None):
'\n Parameters\n ----------\n path_to_data : string\n Path to CelebA data files.\n\n subsample : int\n Only load every |subsample| number of images.\n\n transform : torchvision.transforms\n Torchvision transforms to be applied to each image.\n '
self.img_paths = glob.glob((path_to_data + '/*.jpg'))[::subsample]
self.transform = transform
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
sample_path = self.img_paths[idx]
sample = Image.open(sample_path)
if self.transform:
sample = self.transform(sample)
return (sample, 0)
|
class Encoder(nn.Module):
'Maps an (x_i, y_i) pair to a representation r_i.\n\n Parameters\n ----------\n x_dim : int\n Dimension of x values.\n\n y_dim : int\n Dimension of y values.\n\n h_dim : int\n Dimension of hidden layer.\n\n r_dim : int\n Dimension of output representation r.\n '
def __init__(self, x_dim, y_dim, h_dim, r_dim):
super(Encoder, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.h_dim = h_dim
self.r_dim = r_dim
layers = [nn.Linear((x_dim + y_dim), h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, r_dim)]
self.input_to_hidden = nn.Sequential(*layers)
def forward(self, x, y):
'\n x : torch.Tensor\n Shape (batch_size, x_dim)\n\n y : torch.Tensor\n Shape (batch_size, y_dim)\n '
input_pairs = torch.cat((x, y), dim=1)
return self.input_to_hidden(input_pairs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.