code
stringlengths
17
6.64M
def set_raytune_search_parameters(search_space, config): if ('layernorm' in search_space.keys()): config['parameters']['combined_graph_layer']['layernorm'] = bool(search_space['layernorm']) if ('ffn_dist_hidden_dim' in search_space.keys()): config['parameters']['combined_graph_layer']['ffn_dist_hidden_dim'] = int(search_space['ffn_dist_hidden_dim']) if ('ffn_dist_num_layers' in search_space.keys()): config['parameters']['combined_graph_layer']['ffn_dist_num_layers'] = int(search_space['ffn_dist_num_layers']) if ('distance_dim' in search_space.keys()): config['parameters']['combined_graph_layer']['distance_dim'] = int(search_space['distance_dim']) if ('num_node_messages' in search_space.keys()): config['parameters']['combined_graph_layer']['num_node_messages'] = int(search_space['num_node_messages']) if ('normalize_degrees' in search_space.keys()): config['parameters']['combined_graph_layer']['node_message']['normalize_degrees'] = bool(search_space['normalize_degrees']) if ('output_dim' in search_space.keys()): config['parameters']['combined_graph_layer']['node_message']['output_dim'] = int(search_space['output_dim']) if ('activation' in search_space.keys()): config['parameters']['combined_graph_layer']['node_message']['activation'] = search_space['activation'] config['parameters']['combined_graph_layer']['dist_activation'] = search_space['activation'] config['parameters']['combined_graph_layer']['activation'] = search_space['activation'] if ('num_graph_layers_id' in search_space.keys()): config['parameters']['num_graph_layers_id'] = int(search_space['num_graph_layers_id']) if ('num_graph_layers_reg' in search_space.keys()): config['parameters']['num_graph_layers_reg'] = int(search_space['num_graph_layers_reg']) if ('bin_size' in search_space.keys()): config['parameters']['combined_graph_layer']['bin_size'] = int(search_space['bin_size']) if ('clip_value_low' in search_space.keys()): config['parameters']['combined_graph_layer']['kernel']['clip_value_low'] = search_space['clip_value_low'] if ('dist_mult' in search_space.keys()): config['parameters']['combined_graph_layer']['kernel']['dist_mult'] = search_space['dist_mult'] if ('dist_norm' in search_space.keys()): config['parameters']['combined_graph_layer']['kernel']['dist_norm'] = search_space['dist_norm'] if ('dropout' in search_space.keys()): config['parameters']['combined_graph_layer']['dropout'] = (search_space['dropout'] / 2) config['parameters']['output_decoding']['dropout'] = search_space['dropout'] if ('lr' in search_space.keys()): config['setup']['lr'] = search_space['lr'] if ('batch_multiplier' in search_space.keys()): if (not config['batching']['bucket_by_sequence_length']): raise ValueError('batch_multiplier given but bucket_by_sequence_length is set to False. Check config.') config['batching']['batch_multiplier'] = search_space['batch_multiplier'] if ('batch_size_physical' in search_space.keys()): config['train_test_datasets']['physical']['batch_per_gpu'] = int(search_space['batch_size_physical']) if ('batch_size_delphes' in search_space.keys()): config['train_test_datasets']['delphes']['batch_per_gpu'] = int(search_space['batch_size_physical']) if ('batch_size_gun' in search_space.keys()): config['train_test_datasets']['gun']['batch_per_gpu'] = int(search_space['batch_size_gun']) if ('expdecay_decay_steps' in search_space.keys()): config['exponentialdecay']['decay_steps'] = search_space['expdecay_decay_steps'] if ('expdecay_decay_rate' in search_space.keys()): config['exponentialdecay']['decay_rate'] = search_space['expdecay_decay_rate'] if ('event_loss' in search_space.keys()): config['loss']['event_loss'] = search_space['event_loss'] if (search_space['event_loss'] == 'none'): config['loss']['event_loss_coef'] = 0.0 else: config['loss']['event_loss_coef'] = 1.0 if ('met_loss' in search_space.keys()): config['loss']['met_loss'] = search_space['event_loss'] if (search_space['met_loss'] == 'none'): config['loss']['met_loss_coef'] = 0.0 else: config['loss']['met_loss_coef'] = 1.0 if ('event_and_met_loss' in search_space.keys()): (event_l, met_l) = search_space['event_and_met_loss'] config['loss']['event_loss'] = event_l if (event_l == 'none'): config['loss']['event_loss_coef'] = 0.0 else: config['loss']['event_loss_coef'] = 1.0 if (met_l == 'none'): config['loss']['met_loss'] = met_l config['loss']['met_loss_coef'] = 0.0 else: config['loss']['met_loss'] = {'type': 'Huber', 'delta': 10.0} config['loss']['met_loss_coef'] = 1.0 if ('mask_reg_cls0' in search_space.keys()): config['parameters']['output_decoding']['mask_reg_cls0'] = search_space['mask_reg_cls0'] if ('lr_schedule' in search_space.keys()): config['setup']['lr_schedule'] = search_space['lr_schedule'] if ('weight_decay' in search_space.keys()): config['optimizer']['adamw']['weight_decay'] = search_space['weight_decay'] if ('optimizer' in search_space.keys()): if (search_space['optimizer'] == 'pcgrad_adam'): config['setup']['optimizer'] = 'adam' config['optimizer']['adam']['pcgrad'] = True elif (search_space['optimizer'] == 'adam'): config['setup']['optimizer'] = 'adam' config['optimizer']['adam']['pcgrad'] = False else: config['setup']['optimizer'] = search_space['optimizer'] if ('node_encoding_hidden_dim' in search_space.keys()): config['parameters']['node_encoding_hidden_dim'] = search_space['node_encoding_hidden_dim'] if ('out_hidden_dim' in search_space.keys()): config['parameters']['output_decoding']['id_hidden_dim'] = search_space['out_hidden_dim'] config['parameters']['output_decoding']['charge_hidden_dim'] = search_space['out_hidden_dim'] config['parameters']['output_decoding']['pt_hidden_dim'] = search_space['out_hidden_dim'] config['parameters']['output_decoding']['eta_hidden_dim'] = search_space['out_hidden_dim'] config['parameters']['output_decoding']['phi_hidden_dim'] = search_space['out_hidden_dim'] config['parameters']['output_decoding']['energy_hidden_dim'] = search_space['out_hidden_dim'] if ('out_num_layers' in search_space.keys()): config['parameters']['output_decoding']['id_num_layers'] = search_space['out_num_layers'] config['parameters']['output_decoding']['charge_num_layers'] = search_space['out_num_layers'] config['parameters']['output_decoding']['pt_num_layers'] = search_space['out_num_layers'] config['parameters']['output_decoding']['eta_num_layers'] = search_space['out_num_layers'] config['parameters']['output_decoding']['phi_num_layers'] = search_space['out_num_layers'] config['parameters']['output_decoding']['energy_num_layers'] = search_space['out_num_layers'] if ('num_layers_encoder' in search_space.keys()): config['parameters']['num_layers_encoder'] = search_space['num_layers_encoder'] if ('num_layers_decoder_reg' in search_space.keys()): config['parameters']['num_layers_decoder_reg'] = search_space['num_layers_decoder_reg'] if ('num_layers_decoder_cls' in search_space.keys()): config['parameters']['num_layers_decoder_cls'] = search_space['num_layers_decoder_cls'] if ('hidden_dim' in search_space.keys()): config['parameters']['hidden_dim'] = search_space['hidden_dim'] if ('num_heads' in search_space.keys()): config['parameters']['num_heads'] = search_space['num_heads'] if ('num_random_features' in search_space.keys()): config['parameters']['num_random_features'] = search_space['num_random_features'] return config
def get_raytune_search_alg(raytune_cfg, seeds=False): if ((raytune_cfg['sched'] == 'pbt') or (raytune_cfg['sched'] == 'pb2')): if (raytune_cfg['search_alg'] is not None): print("INFO: Using schedule '{}' is not compatible with Ray Tune search algorithms.".format(raytune_cfg['sched'])) print('INFO: Uing the Ray Tune {} scheduler without search algorithm'.format(raytune_cfg['sched'])) return None if ((raytune_cfg['sched'] == 'bohb') or (raytune_cfg['sched'] == 'BOHB')): print('INFO: Using TuneBOHB search algorithm since it is required for BOHB shedule') if seeds: seed = 1234 else: seed = None return TuneBOHB(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], seed=seed) if (raytune_cfg['search_alg'] == 'bayes'): print('INFO: Using BayesOptSearch') return BayesOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], random_search_steps=raytune_cfg['bayes']['n_random_steps']) if (raytune_cfg['search_alg'] == 'hyperopt'): print('INFO: Using HyperOptSearch') return HyperOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], n_initial_points=raytune_cfg['hyperopt']['n_random_steps']) if (raytune_cfg['search_alg'] == 'scikit'): print('INFO: Using bayesian optimization from scikit-learn') return SkOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], convert_to_python=True) if (raytune_cfg['search_alg'] == 'nevergrad'): print('INFO: Using bayesian optimization from nevergrad') import nevergrad as ng return NevergradSearch(optimizer=ng.optimizers.BayesOptim(pca=False, init_budget=raytune_cfg['nevergrad']['n_random_steps']), metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode']) else: print('INFO: Not using any Ray Tune search algorithm') return None
def get_raytune_schedule(raytune_cfg): if (raytune_cfg['sched'] == 'asha'): return AsyncHyperBandScheduler(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['asha']['max_t'], grace_period=raytune_cfg['asha']['grace_period'], reduction_factor=raytune_cfg['asha']['reduction_factor'], brackets=raytune_cfg['asha']['brackets']) elif (raytune_cfg['sched'] == 'hyperband'): return HyperBandScheduler(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['hyperband']['max_t'], reduction_factor=raytune_cfg['hyperband']['reduction_factor']) elif ((raytune_cfg['sched'] == 'bohb') or (raytune_cfg['sched'] == 'BOHB')): return HyperBandForBOHB(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['hyperband']['max_t'], reduction_factor=raytune_cfg['hyperband']['reduction_factor']) elif ((raytune_cfg['sched'] == 'pbt') or (raytune_cfg['sched'] == 'PBT')): return PopulationBasedTraining(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', perturbation_interval=raytune_cfg['pbt']['perturbation_interval'], hyperparam_mutations=raytune_cfg['pbt']['hyperparam_mutations'], log_config=True) elif ((raytune_cfg['sched'] == 'pb2') or (raytune_cfg['sched'] == 'PB2')): return PB2(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', perturbation_interval=raytune_cfg['pb2']['perturbation_interval'], hyperparam_bounds=raytune_cfg['pb2']['hyperparam_bounds'], log_config=True) else: print('INFO: Not using any Ray Tune trial scheduler.') return None
@click.group() @click.help_option('-h', '--help') def main(): pass
@main.command() @click.help_option('-h', '--help') @click.option('-p', '--path', help='path to json file or dir containing json files', type=click.Path()) @click.option('-y', '--ylabel', default=None, help='Y-axis label', type=str) @click.option('-x', '--xlabel', default='Step', help='X-axis label', type=str) @click.option('-t', '--title', default=None, help='X-axis label', type=str) @click.option('-s', '--save_dir', default=None, help='X-axis label', type=click.Path()) def plot_cometml_json(path, ylabel, xlabel, title=None, save_dir=None): path = Path(path) if path.is_dir(): json_files = path.glob('*.json') else: json_files = [path] for json_file in json_files: with open(json_file) as f: data = json.load(f) plt.figure(figsize=(12, 6)) for (ii, metric) in enumerate(data): if ('val' in metric['name']): pass else: try: val_metric = data[(ii + 1)] except IndexError: val_metric = data[(ii - 1)] if (('val_' + metric['name']) != val_metric['name']): val_metric = data[(ii - 1)] if (('val_' + metric['name']) != val_metric['name']): raise ValueError("The val and train metrics don't match, {}, {}".format(('val_' + metric['name']), val_metric['name'])) pp = plt.plot(metric['x'], metric['y'], label=metric['name'], linestyle='-') color = pp[0].get_color() plt.plot(val_metric['x'], val_metric['y'], label=val_metric['name'], linestyle='--', color=color) plt.legend() plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if title: plt.title('') if save_dir: plt.savefig(str((Path(save_dir) / (json_file.stem + '.jpg')))) if (not save_dir): plt.show()
class CustomTensorBoard(TensorBoard): '\n Extends tensorflow.keras.callbacks TensorBoard\n\n Custom tensorboard class to make logging of learning rate possible when using\n keras.optimizers.schedules.LearningRateSchedule.\n See https://github.com/tensorflow/tensorflow/pull/37552\n\n Also logs momemtum for supported optimizers that use momemtum.\n ' def __init__(self, *args, **kwargs): self.dump_history = kwargs.pop('dump_history') super().__init__(*args, **kwargs) def _collect_learning_rate(self, logs): logs = (logs or {}) opt = self.model.optimizer if hasattr(opt, 'lr'): lr_schedule = getattr(opt, 'lr', None) if isinstance(lr_schedule, tf.keras.optimizers.schedules.LearningRateSchedule): logs['learning_rate'] = np.float64(tf.keras.backend.get_value(lr_schedule(opt.iterations))) else: logs.update({'learning_rate': np.float64(tf.keras.backend.eval(opt.lr))}) try: logs.update({'momentum': np.float64(tf.keras.backend.eval(opt.momentum))}) except AttributeError: pass if isinstance(opt, tf.keras.optimizers.Adam): logs.update({'adam_beta_1': np.float64(tf.keras.backend.eval(opt.beta_1))}) if hasattr(opt, 'loss_scale'): logs.update({'loss_scale': np.float64(opt.loss_scale.numpy())}) return logs def on_epoch_end(self, epoch, logs): logs = (logs or {}) logs.update(self._collect_learning_rate(logs)) logs['time'] = time.time() if self.dump_history: history_path = (Path(self.log_dir) / 'history') history_path.mkdir(parents=True, exist_ok=True) history_path = str(history_path) with open('{}/history_{}.json'.format(history_path, epoch), 'w') as fi: converted_logs = {k: float(v) for (k, v) in logs.items()} json.dump(converted_logs, fi) super().on_epoch_end(epoch, logs) def on_train_batch_end(self, batch, logs): logs = (logs or {}) if (isinstance(self.update_freq, int) and ((batch % self.update_freq) == 0)): logs.update(self._collect_learning_rate(logs)) super().on_train_batch_end(batch, logs)
class CustomModelCheckpoint(ModelCheckpoint): 'Extends tensorflow.keras.callbacks.ModelCheckpoint to also save optimizer' def __init__(self, *args, **kwargs): self.optimizer_to_save = kwargs.pop('optimizer_to_save') self.optimizer_filepath = kwargs.pop('optimizer_save_filepath') super().__init__(*args, **kwargs) Path(self.filepath).parent.mkdir(parents=True, exist_ok=True) def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) filepath = str(self.optimizer_filepath).format(epoch=(epoch + 1), **logs) if (self.epochs_since_last_save == 0): if self.save_best_only: current = logs.get(self.monitor) if (current == self.best): with open(filepath, 'wb') as f: pickle.dump(self.optimizer_to_save, f) else: with open(filepath, 'wb') as f: pickle.dump(self.optimizer_to_save, f)
class BenchmarkLoggerCallback(tf.keras.callbacks.Callback): def __init__(self, *args, **kwargs): self.outdir = kwargs.pop('outdir') self.steps_per_epoch = kwargs.pop('steps_per_epoch') self.batch_size_per_gpu = kwargs.pop('batch_size_per_gpu') self.num_gpus = kwargs.pop('num_gpus') self.num_cpus = kwargs.pop('num_cpus') self.train_set_size = kwargs.pop('train_set_size') self.horovod_enabled = kwargs.pop('horovod_enabled') super().__init__(*args, **kwargs) def on_train_begin(self, logs=None): self.times = [] self.start_time = tf.timestamp().numpy() def on_epoch_begin(self, epoch, logs=None): self.epoch_time_start = tf.timestamp().numpy() def on_epoch_end(self, epoch, logs=None): self.times.append((tf.timestamp().numpy() - self.epoch_time_start)) def plot(self, times): plt.figure() plt.xlabel('Epoch') plt.ylabel('Time [s]') plt.plot(times, 'o') for i in range(len(times)): if isinstance(times[i], tf.Tensor): j = times[i].numpy() else: j = times[i] if (i == 0): plt.text((i + 0.02), (j + 0.2), str(round(j, 2))) else: if isinstance(times[(i - 1)], tf.Tensor): j_prev = times[(i - 1)].numpy() else: j_prev = times[(i - 1)] plt.text((i + 0.02), (j + 0.2), str(round((j - j_prev), 2))) plt.ylim(bottom=0) txt = 'Time in seconds per epoch. The numbers next to each data point\n show the difference in seconds compared to the previous epoch.' plt.title(txt) filename = (('time_per_epoch_' + datetime.now().strftime('%Y%m%d%H%M%S')) + '.png') save_path = (Path(self.outdir) / filename) print('Saving plot in {}'.format(save_path)) plt.savefig(save_path) def on_train_end(self, logs=None): result_path = Path(self.outdir, 'result.json') stop_time = tf.timestamp().numpy() total_time = round((stop_time - self.start_time), 2) throughput_per_epoch = (self.train_set_size / np.array(self.times)) mean_throughput = round(np.mean(throughput_per_epoch[1:]), 2) mean_epoch_time = round(np.mean(self.times[1:]), 2) batch_size_total = (self.batch_size_per_gpu * (self.num_gpus or self.num_cpus or 1)) data = {'wl-scores': {'mean_throughput': mean_throughput, 'mean_epoch_time': mean_epoch_time}, 'wl-stats': {'num_epochs': len(self.times), 'epoch_times': self.times, 'train_start': self.start_time, 'train_stop': stop_time, 'train_time': total_time, 'horovod_enabled': self.horovod_enabled, 'GPU': self.num_gpus, 'CPU': self.num_cpus, 'train_set_size': self.train_set_size, 'batch_size_per_device': self.batch_size_per_gpu, 'batch_size_total': batch_size_total, 'steps_per_epoch': self.steps_per_epoch, 'events_per_epoch': (batch_size_total * self.steps_per_epoch), 'throughput_per_epoch': list(throughput_per_epoch)}} print('Saving result to {}'.format(result_path.resolve())) with result_path.open('w', encoding='utf-8') as f: json.dump(data, f, ensure_ascii=False, indent=4, cls=NpEncoder) f.write('\n') self.plot(self.times)
class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj)
def get_model_builder(config, total_steps): (lr_schedule, optim_callbacks, lr) = get_lr_schedule(config, steps=total_steps) def model_builder(hp): node_encoding_hidden_dim = hp.Choice('node_dim', values=[128, 256, 512]) config['parameters']['node_encoding_hidden_dim'] = node_encoding_hidden_dim config['parameters']['num_graph_layers_id'] = hp.Choice('num_graph_layers_id', [1, 2, 3]) config['parameters']['num_graph_layers_reg'] = hp.Choice('num_graph_layers_reg', [1, 2, 3]) config['parameters']['combined_graph_layer']['dropout'] = hp.Choice('cg_dropout', values=[0.0, 0.1, 0.2]) config['parameters']['combined_graph_layer']['num_node_messages'] = hp.Choice('num_node_messages', [1, 2]) config['parameters']['combined_graph_layer']['bin_size'] = hp.Choice('bin_size', values=[160, 320, 640]) config['parameters']['combined_graph_layer']['ffn_dist_hidden_dim'] = hp.Choice('ffn_dist_hidden_dim', values=[64, 128, 256]) config['parameters']['combined_graph_layer']['ffn_dist_num_layers'] = hp.Choice('ffn_dist_num_layers', values=[1, 2]) config['parameters']['combined_graph_layer']['kernel']['dist_mult'] = hp.Choice('dist_mult', values=[0.01, 0.1, 1.0]) config['parameters']['combined_graph_layer']['node_message']['output_dim'] = node_encoding_hidden_dim config['parameters']['combined_graph_layer']['node_message']['normalize_degrees'] = hp.Choice('normalize_degrees', values=[True, False]) config['parameters']['output_decoding']['dropout'] = hp.Choice('output_dropout', values=[0.0, 0.1, 0.2]) config['parameters']['output_decoding']['layernorm'] = hp.Choice('output_layernorm', values=[True, False]) config['parameters']['output_decoding']['mask_reg_cls0'] = hp.Choice('output_mask_reg_cls0', values=[True, False]) model = make_model(config, dtype='float32') model.build((1, config['dataset']['padded_num_elem_size'], config['dataset']['num_input_features'])) opt = get_optimizer(config, lr_schedule) (loss_dict, loss_weights) = get_loss_dict(config) model.compile(loss=loss_dict, optimizer=opt, sample_weight_mode='temporal', loss_weights=loss_weights) return model return (model_builder, optim_callbacks)
class LRFinder(Callback): "`Callback` that exponentially adjusts the learning rate after each training batch between `start_lr` and\n `end_lr` for a maximum number of batches: `max_step`. The loss and learning rate are recorded at each step allowing\n visually finding a good learning rate as per https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html via\n the `plot` method.\n\n A version of this learning rate finder technique is also described under the name 'LR range test' in Leslie Smith's\n paper: https://arxiv.org/pdf/1803.09820.pdf.\n " def __init__(self, start_lr: float=1e-07, end_lr: float=0.01, max_steps: int=200, smoothing=0.9): super(LRFinder, self).__init__() (self.start_lr, self.end_lr) = (start_lr, end_lr) self.max_steps = max_steps self.smoothing = smoothing (self.step, self.best_loss, self.avg_loss, self.lr) = (0, 0, 0, 0) (self.lrs, self.losses) = ([], []) def on_train_begin(self, logs=None): (self.step, self.best_loss, self.avg_loss, self.lr) = (0, 0, 0, 0) (self.lrs, self.losses) = ([], []) def on_train_batch_begin(self, batch, logs=None): self.lr = self.exp_annealing(self.step) tf.keras.backend.set_value(self.model.optimizer.lr, self.lr) def on_train_batch_end(self, batch, logs=None): print('lr:', self.lr) print('step', self.step) logs = (logs or {}) loss = logs.get('loss') step = self.step if loss: print('loss', loss) self.avg_loss = ((self.smoothing * self.avg_loss) + ((1 - self.smoothing) * loss)) smooth_loss = (self.avg_loss / (1 - (self.smoothing ** (self.step + 1)))) self.losses.append(smooth_loss) self.lrs.append(self.lr) if ((step == 0) or (loss < self.best_loss)): self.best_loss = loss if ((smooth_loss > (100 * self.best_loss)) or tf.math.is_nan(smooth_loss)): self.model.stop_training = True print('Loss reached predefined maximum... stopping') if (step >= self.max_steps): print('STOPPING') self.model.stop_training = True self.step += 1 def exp_annealing(self, step): return (self.start_lr * ((self.end_lr / self.start_lr) ** ((step * 1.0) / self.max_steps))) def plot(self, save_dir=None, figname='lr_finder.jpg', log_scale=False): (fig, ax) = plt.subplots(1, 1) ax.set_ylabel('Loss') ax.set_xlabel('Learning Rate') ax.set_xscale('log') ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e')) ax.plot(self.lrs, self.losses) if log_scale: ax.set_yscale('log') if (save_dir is not None): Path(save_dir).mkdir(parents=True, exist_ok=True) plt.savefig(str((Path(save_dir) / Path(figname))))
class ModelOptimizerCheckpoint(tf.keras.callbacks.ModelCheckpoint): def on_epoch_end(self, epoch, logs=None): super(ModelOptimizerCheckpoint, self).on_epoch_end(epoch, logs=logs) weightfile_path = self.opt_path.format(epoch=(epoch + 1), **logs) weights = {} self.model.optimizer.save_own_variables(weights) with open(weightfile_path, 'wb') as fi: pickle.dump({'weights': weights}, fi)
class CustomCallback(tf.keras.callbacks.Callback): def __init__(self, outpath, dataset, config, plot_freq=1, horovod_enabled=False, comet_experiment=None, is_hpo_run=False): super(CustomCallback, self).__init__() self.plot_freq = plot_freq self.dataset = dataset self.outpath = outpath self.config = config self.horovod_enabled = horovod_enabled self.comet_experiment = comet_experiment self.is_hpo_run = is_hpo_run def on_epoch_end(self, epoch, logs=None): if ((not self.horovod_enabled) or (hvd.rank() == 0)): epoch_end(self, epoch, logs, comet_experiment=self.comet_experiment)
def epoch_end(self, epoch, logs, comet_experiment=None): epoch = (epoch + 1) with open('{}/history_{}.json'.format(self.outpath, epoch), 'w') as fi: json.dump(logs, fi) if self.is_hpo_run: comet_experiment.log_metrics(logs, epoch=epoch) if (self.plot_freq <= 0): return if (self.plot_freq >= 1): if ((epoch % self.plot_freq) != 0): return cp_dir = (Path(self.outpath) / 'epoch_{}'.format(epoch)) cp_dir.mkdir(parents=True, exist_ok=True) eval_model(self.model, self.dataset, self.config, cp_dir) (yvals, X, filenames) = load_eval_data(str((cp_dir / '*.parquet'))) for fi in filenames: os.remove(fi) met_data = compute_met_and_ratio(yvals) plot_jets(yvals, epoch, cp_dir, comet_experiment) plot_jet_ratio(yvals, epoch, cp_dir, comet_experiment) plot_met(met_data, epoch, cp_dir, comet_experiment) plot_met_ratio(met_data, epoch, cp_dir, comet_experiment) jet_distances = compute_distances(yvals['jet_gen_to_pred_genpt'], yvals['jet_gen_to_pred_predpt'], yvals['jet_ratio_pred']) met_distances = compute_distances(met_data['gen_met'], met_data['pred_met'], met_data['ratio_pred']) N_jets = len(awkward.flatten(yvals['jets_gen_pt'])) N_jets_matched_pred = len(yvals['jet_gen_to_pred_genpt']) for (name, val) in [('jet_matched_frac', ((N_jets_matched_pred / N_jets) if (N_jets > 0) else float('nan'))), ('jet_wd', jet_distances['wd']), ('jet_iqr', jet_distances['iqr']), ('jet_med', jet_distances['p50']), ('met_wd', met_distances['wd']), ('met_iqr', met_distances['iqr']), ('met_med', met_distances['p50'])]: logs[('val_' + name)] = val if comet_experiment: comet_experiment.log_metric(name, val, step=(epoch - 1))
def prepare_callbacks(config, outdir, dataset, comet_experiment=None, horovod_enabled=False, benchmark_dir=None, num_train_steps=None, num_cpus=None, num_gpus=None, train_samples=None, is_hpo_run=False): callbacks = [] callbacks.append(tf.keras.callbacks.TerminateOnNaN()) callbacks += get_checkpoint_history_callback(outdir, config, dataset, comet_experiment, horovod_enabled, is_hpo_run) if ((not horovod_enabled) or (hvd.rank() == 0)): if benchmark_dir: if (benchmark_dir == 'exp_dir'): benchmark_dir = outdir if (config['dataset']['schema'] == 'delphes'): bmk_bs = config['train_test_datasets']['delphes']['batch_per_gpu'] elif ((config['dataset']['schema'] == 'cms') or (config['dataset']['schema'] == 'clic')): assert (len(config['train_test_datasets']) == 1), 'Expected exactly 1 key, physical OR delphes, found {}'.format(config['train_test_datasets'].keys()) bmk_bs = config['train_test_datasets']['physical']['batch_per_gpu'] else: raise ValueError('Benchmark callback only supports delphes cms or clic dataset schema. {}'.format(config['dataset']['schema'])) Path(benchmark_dir).mkdir(exist_ok=True, parents=True) callbacks.append(BenchmarkLoggerCallback(outdir=benchmark_dir, steps_per_epoch=num_train_steps, batch_size_per_gpu=bmk_bs, num_gpus=num_gpus, num_cpus=num_cpus, train_set_size=train_samples, horovod_enabled=horovod_enabled)) return callbacks
def get_checkpoint_history_callback(outdir, config, dataset, comet_experiment, horovod_enabled, is_hpo_run=False): callbacks = [] if ((not horovod_enabled) or (hvd.rank() == 0)): cp_dir = (Path(outdir) / 'weights') cp_dir.mkdir(parents=True, exist_ok=True) cp_callback = ModelOptimizerCheckpoint(filepath=str((cp_dir / 'weights-{epoch:02d}-{val_loss:.6f}.hdf5')), save_weights_only=True, verbose=1, monitor=config['callbacks']['checkpoint']['monitor'], save_best_only=False) cp_callback.opt_path = str((cp_dir / 'opt-{epoch:02d}-{val_loss:.6f}.pkl')) if config.get('do_checkpoint_callback', True): callbacks += [cp_callback] if (not horovod_enabled): history_path = (Path(outdir) / 'history') history_path.mkdir(parents=True, exist_ok=True) history_path = str(history_path) cb = CustomCallback(history_path, dataset.tensorflow_dataset.take(config['validation_num_events']), config, plot_freq=config['callbacks']['plot_freq'], horovod_enabled=horovod_enabled, comet_experiment=comet_experiment, is_hpo_run=is_hpo_run) if config.get('do_validation_callback', True): callbacks += [cb] tb = CustomTensorBoard(log_dir=(outdir + '/logs'), histogram_freq=config['callbacks']['tensorboard']['hist_freq'], write_graph=False, write_images=False, update_freq='batch', profile_batch=(config['callbacks']['tensorboard']['profile_batch'] if ('profile_batch' in config['callbacks']['tensorboard'].keys()) else 0), dump_history=config['callbacks']['tensorboard']['dump_history']) tb.__class__.__name__ = 'TensorBoard' callbacks += [tb] return callbacks
def get_rundir(base='experiments'): if (not os.path.exists(base)): os.makedirs(base) previous_runs = os.listdir(base) if (len(previous_runs) == 0): run_number = 1 else: run_number = (max([int(s.split('run_')[1]) for s in previous_runs]) + 1) logdir = ('run_%02d' % run_number) return '{}/{}'.format(base, logdir)
def make_model(config, dtype): model = config['parameters']['model'] if (model == 'transformer'): return make_transformer(config, dtype) elif (model == 'gnn_dense'): return make_gnn_dense(config, dtype) raise KeyError('Unknown model type {}'.format(model))
def make_gnn_dense(config, dtype): parameters = ['do_node_encoding', 'node_update_mode', 'node_encoding_hidden_dim', 'dropout', 'activation', 'num_graph_layers_id', 'num_graph_layers_reg', 'input_encoding', 'skip_connection', 'output_decoding', 'combined_graph_layer', 'debug'] kwargs = {} for par in parameters: if (par in config['parameters'].keys()): kwargs[par] = config['parameters'][par] model = PFNetDense(multi_output=config['setup']['multi_output'], num_input_classes=config['dataset']['num_input_classes'], num_output_classes=config['dataset']['num_output_classes'], schema=config['dataset']['schema'], event_set_output=(config['loss']['event_loss'] != 'none'), met_output=(config['loss']['met_loss'] != 'none'), cls_output_as_logits=config['setup'].get('cls_output_as_logits', False), small_graph_opt=config['setup'].get('small_graph_opt', False), use_normalizer=config['setup'].get('use_normalizer', True), **kwargs) return model
def make_transformer(config, dtype): parameters = ['input_encoding', 'output_decoding', 'num_layers_encoder', 'num_layers_decoder_reg', 'num_layers_decoder_cls', 'hidden_dim', 'num_heads', 'num_random_features'] kwargs = {} for par in parameters: if (par in config['parameters'].keys()): kwargs[par] = config['parameters'][par] model = PFNetTransformer(multi_output=config['setup']['multi_output'], num_input_classes=config['dataset']['num_input_classes'], num_output_classes=config['dataset']['num_output_classes'], schema=config['dataset']['schema'], event_set_output=(config['loss']['event_loss'] != 'none'), met_output=(config['loss']['met_loss'] != 'none'), cls_output_as_logits=config['setup']['cls_output_as_logits'], **kwargs) return model
def eval_model(model, dataset, config, outdir, jet_ptcut=5.0, jet_match_dr=0.1, verbose=False): ibatch = 0 if (config['evaluation_jet_algo'] == 'ee_genkt_algorithm'): jetdef = fastjet.JetDefinition(fastjet.ee_genkt_algorithm, 0.7, (- 1.0)) elif (config['evaluation_jet_algo'] == 'antikt_algorithm'): jetdef = fastjet.JetDefinition(fastjet.antikt_algorithm, 0.4) else: raise KeyError('Unknown evaluation_jet_algo: {}'.format(config['evaluation_jet_algo'])) for elem in tqdm(dataset, desc='Evaluating model'): if verbose: print('evaluating model') ypred = model.predict(elem['X'], verbose=verbose) ypred['charge'] = (np.argmax(ypred['charge'], axis=(- 1)) - 1) if verbose: print('unpacking outputs') ygen = [unpack_target(x, config['dataset']['num_output_classes'], config) for x in elem['ygen']] ycand = [unpack_target(x, config['dataset']['num_output_classes'], config) for x in elem['ycand']] ygen = {k: tf.stack([x[k] for x in ygen]) for k in ygen[0].keys()} ycand = {k: tf.stack([x[k] for x in ycand]) for k in ycand[0].keys()} ygen['charge'] = (tf.expand_dims(tf.math.argmax(ygen['charge'], axis=(- 1)), axis=(- 1)) - 1) ycand['charge'] = (tf.expand_dims(tf.math.argmax(ycand['charge'], axis=(- 1)), axis=(- 1)) - 1) ygen['cls_id'] = tf.math.argmax(ygen['cls'], axis=(- 1)) ycand['cls_id'] = tf.math.argmax(ycand['cls'], axis=(- 1)) ypred['cls_id'] = tf.math.argmax(ypred['cls'], axis=(- 1)).numpy() keys_particle = [k for k in ypred.keys() if (k != 'met')] X = awkward.Array(elem['X'].numpy()) ygen = awkward.Array({k: squeeze_if_one(ygen[k].numpy()) for k in keys_particle}) ycand = awkward.Array({k: squeeze_if_one(ycand[k].numpy()) for k in keys_particle}) ypred = awkward.Array({k: squeeze_if_one(ypred[k]) for k in keys_particle}) awkvals = {'gen': ygen, 'cand': ycand, 'pred': ypred} jets_coll = {} if verbose: print('clustering jets') for typ in ['gen', 'cand', 'pred']: phi = np.arctan2(awkvals[typ]['sin_phi'], awkvals[typ]['cos_phi']) cls_id = awkward.argmax(awkvals[typ]['cls'], axis=(- 1), mask_identity=False) valid = (cls_id != 0) pt = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['pt'], valid)]) eta = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['eta'], valid)]) energy = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['energy'], valid)]) phi = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(phi, valid)]) if verbose: print(typ, pt) if (len(awkward.flatten(pt)) == 0): pt = build_dummy_array(len(pt), np.float64) eta = build_dummy_array(len(pt), np.float64) phi = build_dummy_array(len(pt), np.float64) energy = build_dummy_array(len(pt), np.float64) vec = vector.awk(awkward.zip({'pt': pt, 'eta': eta, 'phi': phi, 'e': energy})) cluster = fastjet.ClusterSequence(vec.to_xyzt(), jetdef) jets_coll[typ] = cluster.inclusive_jets(min_pt=jet_ptcut) if verbose: print('jets {}'.format(typ), awkward.to_numpy(awkward.count(jets_coll[typ].px, axis=1))) gen_to_pred = match_two_jet_collections(jets_coll, 'gen', 'pred', jet_match_dr) gen_to_cand = match_two_jet_collections(jets_coll, 'gen', 'cand', jet_match_dr) matched_jets = awkward.Array({'gen_to_pred': gen_to_pred, 'gen_to_cand': gen_to_cand}) outfile = '{}/pred_batch{}.parquet'.format(outdir, ibatch) if verbose: print('saving to {}'.format(outfile)) awkward.to_parquet(awkward.Array({'inputs': X, 'particles': awkvals, 'jets': jets_coll, 'matched_jets': matched_jets}), outfile) ibatch += 1
def freeze_model(model, config, outdir): def model_output(ret): return tf.concat([ret['cls'], ret['charge'], ret['pt'], ret['eta'], ret['sin_phi'], ret['cos_phi'], ret['energy']], axis=(- 1)) full_model = tf.function((lambda x: model_output(model(x, training=False)))) niter = 10 nfeat = config['dataset']['num_input_features'] if ('combined_graph_layer' in config['parameters']): bin_size = config['parameters']['combined_graph_layer']['bin_size'] elem_range = list(range(bin_size, (5 * bin_size), bin_size)) else: elem_range = range(100, 1000, 200) for ibatch in [1, 2, 4]: for nptcl in elem_range: X = np.random.rand(ibatch, nptcl, nfeat) full_model(X) t0 = time.time() for i in range(niter): full_model(X) t1 = time.time() print(ibatch, nptcl, ((t1 - t0) / niter)) import tf2onnx (model_proto, _) = tf2onnx.convert.from_function(full_model, opset=12, input_signature=(tf.TensorSpec((None, None, nfeat), tf.float32, name='x:0'),), output_path=str((Path(outdir) / 'model.onnx')))
class LearningRateLoggingCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, numpy_logs): try: lr = self.model.optimizer._decayed_lr(tf.float32).numpy() tf.summary.scalar('learning rate', data=lr, step=epoch) except AttributeError as e: print(e) pass
def configure_model_weights(model, trainable_layers): print('setting trainable layers: {}'.format(trainable_layers)) if (trainable_layers is None): trainable_layers = 'all' if (trainable_layers == 'all'): model.trainable = True elif (trainable_layers == 'regression'): for cg in model.cg_id: cg.trainable = False for cg in model.cg_reg: cg.trainable = True model.output_dec.set_trainable_regression() elif (trainable_layers == 'classification'): for cg in model.cg_id: cg.trainable = True for cg in model.cg_reg: cg.trainable = False model.output_dec.set_trainable_classification() else: if isinstance(trainable_layers, str): trainable_layers = [trainable_layers] model.set_trainable_named(trainable_layers) model.compile() trainable_count = sum([np.prod(tf.keras.backend.get_value(w).shape) for w in model.trainable_weights]) non_trainable_count = sum([np.prod(tf.keras.backend.get_value(w).shape) for w in model.non_trainable_weights]) print('trainable={} non_trainable={}'.format(trainable_count, non_trainable_count))
def make_focal_loss(config): def loss(x, y): from .tfa import sigmoid_focal_crossentropy return sigmoid_focal_crossentropy(x, y, alpha=float(config['setup'].get('focal_loss_alpha', 0.25)), gamma=float(config['setup'].get('focal_loss_gamma', 2.0)), from_logits=config['setup']['cls_output_as_logits']) return loss
class CosineAnnealer(): def __init__(self, start, end, steps): self.start = start self.end = end self.steps = steps self.n = 0 def step(self): cos = (np.cos((np.pi * (self.n / self.steps))) + 1) self.n += 1 return (self.end + (((self.start - self.end) / 2.0) * cos))
class OneCycleScheduler(LearningRateSchedule): "`LearningRateSchedule` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper\n (https://arxiv.org/pdf/1803.09820.pdf).\n\n The implementation adopts additional improvements as per the fastai library:\n https://docs.fast.ai/callbacks.one_cycle.html, where only two phases are used and the adaptation is done using\n cosine annealing. In the warm-up phase the LR increases from `lr_max / div_factor` to `lr_max` and momentum\n decreases from `mom_max` to `mom_min`. In the second phase the LR decreases from `lr_max` to `lr_max / final_div`\n and momemtum from `mom_max` to `mom_min`. By default the phases are not of equal length, with the warm-up phase\n controlled by the parameter `warmup_ratio`.\n\n NOTE: The momentum is not controlled through this class. This class is intended to be used together with the\n `MomentumOneCycleScheduler` callback defined below.\n " def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, warmup_ratio=0.3, div_factor=25.0, final_div=100000.0, name=None): super(OneCycleScheduler, self).__init__() lr_min = (lr_max / div_factor) if (final_div is None): final_lr = (lr_max / (div_factor * 10000.0)) else: final_lr = (lr_max / final_div) phase_1_steps = int((steps * warmup_ratio)) phase_2_steps = (steps - phase_1_steps) self.lr_max = lr_max self.steps = steps self.mom_min = mom_min self.mom_max = mom_max self.warmup_ratio = warmup_ratio self.div_factor = div_factor self.final_div = final_div self.name = name phases = [CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(lr_max, final_lr, phase_2_steps)] step = 0 phase = 0 full_lr_schedule = np.zeros(int(steps)) for ii in np.arange(np.floor(steps), dtype=int): step += 1 if (step >= phase_1_steps): phase = 1 full_lr_schedule[ii] = phases[phase].step() self.full_lr_schedule = tf.convert_to_tensor(full_lr_schedule) def __call__(self, step): with ops.name_scope((self.name or 'OneCycleScheduler')): return self.full_lr_schedule[(tf.cast(step, 'int32') - 1)] def get_config(self): return {'lr_max': self.lr_max, 'steps': self.steps, 'mom_min': self.mom_min, 'mom_max': self.mom_max, 'warmup_ratio': self.warmup_ratio, 'div_factor': self.div_factor, 'final_div': self.final_div, 'name': self.name}
class MomentumOneCycleScheduler(Callback): "`Callback` that schedules the momentum according to the 1cycle policy as per Leslie Smith's paper\n (https://arxiv.org/pdf/1803.09820.pdf).\n NOTE: This callback only schedules the momentum parameter, not the learning rate. It is intended to be used with the\n KerasOneCycle learning rate scheduler above or similar.\n " def __init__(self, steps, mom_min=0.85, mom_max=0.95, warmup_ratio=0.3): super(MomentumOneCycleScheduler, self).__init__() phase_1_steps = (steps * warmup_ratio) phase_2_steps = (steps - phase_1_steps) self.phase_1_steps = phase_1_steps self.phase_2_steps = phase_2_steps self.phase = 0 self.step = 0 self.phases = [CosineAnnealer(mom_max, mom_min, phase_1_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)] def _get_opt(self): opt = self.model.optimizer return opt def set_step(self, step): 'Set the step of the schedule, 1 step is one batch' self.step = step if (self.step >= self.phase_1_steps): self.phase = 1 self.phases[1].n = (step - self.phase_1_steps) assert ((self.phases[1].n >= 0) and (self.phases[1].n < self.phase_2_steps)) else: self.phase = 0 self.phases[0].n = step def on_train_begin(self, logs=None): self.set_step(tf.keras.backend.get_value(self._get_opt().iterations)) self.set_momentum(self.mom_schedule().step()) def on_train_batch_end(self, batch, logs=None): self.step += 1 if (self.step >= self.phase_1_steps): self.phase = 1 self.set_momentum(self.mom_schedule().step()) def set_momentum(self, mom): opt = self._get_opt() if hasattr(opt, 'beta_1'): tf.keras.backend.set_value(opt.beta_1, mom) elif hasattr(opt, 'momentum'): tf.keras.backend.set_value(opt.momentum, mom) else: raise NotImplementedError('Only SGD and Adam are supported by MomentumOneCycleScheduler: {}'.format(type(opt))) def mom_schedule(self): return self.phases[self.phase]
def is_tensor_or_variable(x): return (tf.is_tensor(x) or isinstance(x, tf.Variable))
class LossFunctionWrapper(tf.keras.losses.Loss): 'Wraps a loss function in the `Loss` class.' def __init__(self, fn, reduction=tf.keras.losses.Reduction.AUTO, name=None, **kwargs): 'Initializes `LossFunctionWrapper` class.\n\n Args:\n fn: The loss function to wrap, with signature `fn(y_true, y_pred,\n **kwargs)`.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training)\n for more details.\n name: (Optional) name for the loss.\n **kwargs: The keyword arguments that are passed on to `fn`.\n ' super().__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): 'Invokes the `LossFunctionWrapper` instance.\n\n Args:\n y_true: Ground truth values.\n y_pred: The predicted values.\n\n Returns:\n Loss values per sample.\n ' return self.fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for (k, v) in iter(self._fn_kwargs.items()): config[k] = (tf.keras.backend.eval(v) if is_tensor_or_variable(v) else v) base_config = super().get_config() return {**base_config, **config}
class SigmoidFocalCrossEntropy(LossFunctionWrapper): "Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much higher for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n >>> fl = tfa.losses.SigmoidFocalCrossEntropy()\n >>> loss = fl(\n ... y_true = [[1.0], [1.0], [0.0]],y_pred = [[0.97], [0.91], [0.03]])\n >>> loss\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([6.8532745e-06, 1.9097870e-04, 2.0559824e-05],\n dtype=float32)>\n\n Usage with `tf.keras` API:\n\n >>> model = tf.keras.Model()\n >>> model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n\n Args:\n alpha: balancing factor, default value is 0.25.\n gamma: modulating factor, default value is 2.0.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero.\n " def __init__(self, from_logits: bool=False, alpha=0.25, gamma=2.0, reduction: str=tf.keras.losses.Reduction.NONE, name: str='sigmoid_focal_crossentropy'): super().__init__(sigmoid_focal_crossentropy, name=name, reduction=reduction, from_logits=from_logits, alpha=alpha, gamma=gamma)
@tf.function def sigmoid_focal_crossentropy(y_true, y_pred, alpha=0.25, gamma=2.0, from_logits: bool=False) -> tf.Tensor: 'Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much higher for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Args:\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n ' if (gamma and (gamma < 0)): raise ValueError('Value of gamma should be greater than or equal to zero.') y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, dtype=y_pred.dtype) ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits) if from_logits: pred_prob = tf.sigmoid(y_pred) else: pred_prob = y_pred p_t = ((y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))) alpha_factor = 1.0 modulating_factor = 1.0 if alpha: alpha = tf.cast(alpha, dtype=y_true.dtype) alpha_factor = ((y_true * alpha) + ((1 - y_true) * (1 - alpha))) if gamma: gamma = tf.cast(gamma, dtype=y_true.dtype) modulating_factor = tf.pow((1.0 - p_t), gamma) return tf.reduce_sum(((alpha_factor * modulating_factor) * ce), axis=(- 1))
def get_hp_str(result): def func(key): if ('config' in key): return key.split('config/')[(- 1)] s = '' for (ii, hp) in enumerate(list(filter(None.__ne__, [func(key) for key in result.keys()]))): if ((ii % 6) == 0): s += '\n' s += '{}={}; '.format(hp, result['config/{}'.format(hp)].values[0]) return s
def plot_ray_analysis(analysis, save=False, skip=0): to_plot = ['charge_loss', 'cls_loss', 'cos_phi_loss', 'energy_loss', 'eta_loss', 'learning_rate', 'loss', 'pt_loss', 'sin_phi_loss', 'val_charge_loss', 'val_cls_loss', 'val_cos_phi_loss', 'val_energy_loss', 'val_eta_loss', 'val_loss', 'val_pt_loss', 'val_sin_phi_loss'] dfs = analysis.fetch_trial_dataframes() result_df = analysis.dataframe() for key in tqdm(dfs.keys(), desc='Creating Ray analysis plots', total=len(dfs.keys())): result = result_df[(result_df['logdir'] == key)] (fig, axs) = plt.subplots(5, 4, figsize=(12, 9), tight_layout=True) for (var, ax) in zip(to_plot, axs.flat): ax.plot(dfs[key].index.values[skip:], dfs[key][var][skip:], alpha=0.8) ax.set_xlabel('Epoch') ax.set_ylabel(var) ax.grid(alpha=0.3) plt.suptitle(get_hp_str(result)) if save: plt.savefig((key + '/trial_summary.jpg')) plt.close() if (not save): plt.show() else: print('Saved plots in trial dirs.')
def correct_column_names_in_trial_dataframes(analysis): '\n Sometimes some trial dataframes are missing column names and have been\n given the first row of values as column names. This function corrects\n this in the ray.tune.Analysis object.\n ' trial_dataframes = analysis.trial_dataframes trial_df_columns = ['adam_beta_1', 'charge_loss', 'cls_acc_unweighted', 'cls_loss', 'cos_phi_loss', 'energy_loss', 'eta_loss', 'learning_rate', 'loss', 'pt_loss', 'sin_phi_loss', 'val_charge_loss', 'val_cls_acc_unweighted', 'val_cls_acc_weighted', 'val_cls_loss', 'val_cos_phi_loss', 'val_energy_loss', 'val_eta_loss', 'val_loss', 'val_pt_loss', 'val_sin_phi_loss', 'time_this_iter_s', 'should_checkpoint', 'done', 'timesteps_total', 'episodes_total', 'training_iteration', 'experiment_id', 'date', 'timestamp', 'time_total_s', 'pid', 'hostname', 'node_ip', 'time_since_restore', 'timesteps_since_restore', 'iterations_since_restore', 'trial_id'] for (ii, key) in enumerate(trial_dataframes.keys()): trial_dataframes[key].columns = trial_df_columns analysis._trial_dataframes = trial_dataframes
def get_top_k_df(analysis, k): result_df = analysis.dataframe() if (analysis.default_mode == 'min'): dd = result_df.nsmallest(k, analysis.default_metric) elif (analysis.default_mode == 'max'): dd = result_df.nlargest(k, analysis.default_metric) return dd
def topk_summary_plot(analysis, k, save=False, save_dir=None): to_plot = ['val_cls_loss', 'val_energy_loss', 'val_loss'] dd = get_top_k_df(analysis, k) dfs = analysis.trial_dataframes (fig, axs) = plt.subplots(k, 5, figsize=(12, 9), tight_layout=True) for (key, ax_row) in zip(dd['logdir'], axs): for (var, ax) in zip(to_plot, ax_row): ax.plot(dfs[key].index.values, dfs[key][var], alpha=0.8) ax.set_xlabel('Epoch') ax.set_ylabel(var) ax.grid(alpha=0.3) if save: if save_dir: plt.savefig(str((Path(save_dir) / 'topk_summary_plot.jpg'))) else: plt.savefig('topk_summary_plot.jpg') else: plt.show()
def topk_summary_plot_v2(analysis, k, save=False, save_dir=None): print('Creating summary plot of top {} trials.'.format(k)) to_plot = ['val_loss', 'val_cls_loss'] dd = get_top_k_df(analysis, k) dfs = analysis.trial_dataframes (fig, axs) = plt.subplots(len(to_plot), 1, figsize=(12, 9), tight_layout=True, sharex=True) for (var, ax_row) in zip(to_plot, axs): for (ii, key) in enumerate(dd['logdir']): ax_row.plot(dfs[key].index.values, dfs[key][var], alpha=0.8, label='#{}'.format((ii + 1))) ax_row.set_ylabel(var) ax_row.grid(alpha=0.3) ax_row.legend() ax_row.set_xlabel('Epoch') plt.suptitle("Top {} best trials according to '{}'".format(k, analysis.default_metric)) if (save or save_dir): if save_dir: file_name = str((Path(save_dir) / 'topk_summary_plot_v2.jpg')) else: file_name = 'topk_summary_plot.jpg' plt.savefig(file_name) print('Saved summary plot to {}'.format(file_name)) else: plt.show()
def summarize_top_k(analysis, k, save=False, save_dir=None): print('Creating summary table of top {} trials.'.format(k)) dd = get_top_k_df(analysis, k) summary = pd.concat([dd[['loss', 'cls_loss', 'val_loss', 'val_cls_loss']], dd.filter(regex='config/*'), dd['logdir']], axis=1) cm_green = sns.light_palette('green', as_cmap=True) cm_red = sns.light_palette('red', as_cmap=True) max_is_better = [] min_is_better = ['loss', 'cls_loss', 'val_loss', 'val_cls_loss'] styled_summary = summary.style.background_gradient(cmap=cm_green, subset=max_is_better).background_gradient(cmap=cm_red, subset=min_is_better).highlight_max(subset=max_is_better, props='color:black; font-weight:bold; background-color:yellow;').highlight_min(subset=min_is_better, props='color:black; font-weight:bold; background-color:yellow;').set_caption('Top {} trials according to {}'.format(k, analysis.default_metric)).hide_index() if (save or save_dir): if save_dir: xl_file = str((Path(save_dir) / 'summary_table.xlsx')) else: xl_file = 'summary_table.xlsx' styled_summary.to_excel(xl_file, engine='openpyxl') print('Saved plot table to {}'.format(xl_file)) return (summary, styled_summary)
def analyze_ray_experiment(exp_dir, default_metric, default_mode): from ray.tune import Analysis analysis = Analysis(exp_dir, default_metric=default_metric, default_mode=default_mode) topk_summary_plot_v2(analysis, 5, save_dir=exp_dir) (summ, styled) = summarize_top_k(analysis, k=10, save_dir=exp_dir)
def count_skipped_configurations(exp_dir): skiplog_file_path = (Path(exp_dir) / 'skipped_configurations.txt') if skiplog_file_path.exists(): with open(skiplog_file_path, 'r') as f: lines = f.readlines() count = 0 for line in lines: if (line == (('#' * 80) + '\n')): count += 1 if ((count % 2) != 0): print('WARNING: counts is not divisible by two') return (count // 2) else: print('Could not find {}'.format(str(skiplog_file_path)))
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--bin-size', type=int, default=256) parser.add_argument('--num-features', type=int, default=17) parser.add_argument('--batch-size', type=int, default=20) parser.add_argument('--num-threads', type=int, default=1) parser.add_argument('--use-gpu', type=bool, action='store_true') args = parser.parse_args() return args
def get_mem_cpu_mb(): return (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
def get_mem_gpu_mb(): mem = pynvml.nvmlDeviceGetMemoryInfo(handle) return ((mem.used / 1000) / 1000)
def get_mem_mb(use_gpu): if use_gpu: return get_mem_gpu_mb() else: return get_mem_cpu_mb()
def create_experiment_dir(prefix=None, suffix=None, experiments_dir='experiments'): if (prefix is None): train_dir = (Path(experiments_dir) / datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')) else: train_dir = (Path(experiments_dir) / (prefix + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f'))) if (suffix is not None): train_dir = train_dir.with_name(((train_dir.name + '.') + platform.node())) train_dir.mkdir(parents=True) return str(train_dir)
def create_comet_experiment(comet_exp_name, comet_offline=False, outdir=None): try: if comet_offline: logging.info('Using comet-ml OfflineExperiment, saving logs locally.') if (outdir is None): raise ValueError('Please specify am output directory when setting comet_offline to True') experiment = OfflineExperiment(project_name=comet_exp_name, auto_metric_logging=True, auto_param_logging=True, auto_histogram_weight_logging=True, auto_histogram_gradient_logging=False, auto_histogram_activation_logging=False, offline_directory=(outdir + '/cometml'), auto_output_logging='simple') else: logging.info('Using comet-ml Experiment, streaming logs to www.comet.ml.') experiment = Experiment(project_name=comet_exp_name, auto_metric_logging=True, auto_param_logging=True, auto_histogram_weight_logging=True, auto_histogram_gradient_logging=False, auto_histogram_activation_logging=False, auto_output_logging='simple') except Exception as e: logging.warning('Failed to initialize comet-ml dashboard: {}'.format(e)) experiment = None return experiment
def hits_to_features(hit_data, iev, coll, feats): if ('TrackerHit' in coll): new_feats = [] for feat in feats: feat_to_get = feat if (feat == 'energy'): feat_to_get = 'eDep' new_feats.append((feat, feat_to_get)) else: new_feats = [(f, f) for f in feats] feat_arr = {f1: hit_data[((coll + '.') + f2)][iev] for (f1, f2) in new_feats} sdcoll = 'subdetector' feat_arr[sdcoll] = np.zeros(len(feat_arr['type']), dtype=np.int32) if coll.startswith('ECAL'): feat_arr[sdcoll][:] = 0 elif coll.startswith('HCAL'): feat_arr[sdcoll][:] = 1 elif coll.startswith('MUON'): feat_arr[sdcoll][:] = 2 else: feat_arr[sdcoll][:] = 3 return awkward.Record(feat_arr)
def track_pt(omega): a = (3 * (10 ** (- 4))) b = 4 return (a * np.abs((b / omega)))
def track_to_features(prop_data, iev): track_arr = prop_data[track_coll][iev] feats_from_track = ['type', 'chi2', 'ndf', 'dEdx', 'dEdxError', 'radiusOfInnermostHit'] ret = {feat: track_arr[((track_coll + '.') + feat)] for feat in feats_from_track} n_tr = len(ret['type']) trackstate_idx = prop_data[track_coll][(track_coll + '.trackStates_begin')][iev] for k in ['tanLambda', 'D0', 'phi', 'omega', 'Z0', 'time']: ret[k] = prop_data['SiTracks_1'][('SiTracks_1.' + k)][iev][trackstate_idx] ret['pt'] = track_pt(ret['omega']) ret['px'] = (np.cos(ret['phi']) * ret['pt']) ret['py'] = (np.sin(ret['phi']) * ret['pt']) ret['pz'] = (ret['tanLambda'] * ret['pt']) ret['q'] = ret['omega'].to_numpy().copy() ret['q'][(ret['q'] > 0)] = 1 ret['q'][(ret['q'] < 0)] = (- 1) return (ret['px'].to_numpy(), ret['py'].to_numpy(), ret['pz'].to_numpy(), ret['q'])
def visualize(sample, data, iev, trk_opacity=0.8): Xelem = pandas.DataFrame(data[iev]['Xelem']) ycand = pandas.DataFrame(data[iev]['ycand']) ygen = pandas.DataFrame(data[iev]['ygen']) eta_range = 1000 radius_mult = 2000 trk_x = [] trk_y = [] trk_z = [] for (irow, row) in Xelem[(Xelem['typ'] == 1)].iterrows(): trk_x += [0, ((1 * radius_mult) * np.cos(row['phi']))] trk_z += [0, ((1 * radius_mult) * np.sin(row['phi']))] trk_y += [0, (eta_range * row['eta'])] if (row['phi_ecal'] != 0): trk_x += [((1.5 * radius_mult) * np.cos(row['phi_ecal']))] trk_z += [((1.5 * radius_mult) * np.sin(row['phi_ecal']))] trk_y += [(eta_range * row['eta_ecal'])] if (row['phi_hcal'] != 0): trk_x += [((2 * radius_mult) * np.cos(row['phi_hcal']))] trk_z += [((2 * radius_mult) * np.sin(row['phi_hcal']))] trk_y += [(eta_range * row['eta_hcal'])] trk_x += [None] trk_z += [None] trk_y += [None] points_trk = go.Scatter3d(x=trk_x, z=trk_z, y=trk_y, mode='lines', line=dict(color='rgba(10, 10, 10, {})'.format(trk_opacity)), name='Tracks', hoverinfo='skip') trk_x = [] trk_y = [] trk_z = [] for (irow, row) in Xelem[(Xelem['typ'] == 6)].iterrows(): trk_x += [0, ((1 * radius_mult) * np.cos(row['phi']))] trk_z += [0, ((1 * radius_mult) * np.sin(row['phi']))] trk_y += [0, (eta_range * row['eta'])] if (row['phi_ecal'] != 0): trk_x += [((1.5 * radius_mult) * np.cos(row['phi_ecal']))] trk_z += [((1.5 * radius_mult) * np.sin(row['phi_ecal']))] trk_y += [(eta_range * row['eta_ecal'])] if (row['phi_hcal'] != 0): trk_x += [((2 * radius_mult) * np.cos(row['phi_hcal']))] trk_z += [((2 * radius_mult) * np.sin(row['phi_hcal']))] trk_y += [(eta_range * row['eta_hcal'])] trk_x += [None] trk_z += [None] trk_y += [None] points_gsf = go.Scatter3d(x=trk_x, z=trk_z, y=trk_y, mode='lines', line=dict(color='rgba(10, 10, 10, {})'.format(trk_opacity)), name='GSF') msk = (Xelem['typ'] == 2) points_ps1 = go.Scatter3d(x=((1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (1000 * Xelem[msk]['e'])}, name='PS1', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 3) points_ps2 = go.Scatter3d(x=((1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (1000 * Xelem[msk]['e'])}, name='PS2', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 4) points_ecal = go.Scatter3d(x=((1.5 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1.5 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (10 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='ECAL', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 5) points_hcal = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HCAL', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 8) points_hfem = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HFEM', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 9) points_hfhad = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HFHAD', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 10) points_sc = go.Scatter3d(x=((1.5 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1.5 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='SC', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 11) points_ho = go.Scatter3d(x=((2.1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2.1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HO', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (ycand['typ'] != 0) points_cand = go.Scatter3d(x=((2.2 * radius_mult) * ycand[msk]['cos_phi'].values), z=((2.2 * radius_mult) * ycand[msk]['sin_phi'].values), y=(eta_range * ycand[msk]['eta'].values), mode='markers', marker={'symbol': 'x', 'opacity': 0.8, 'color': 'rgba(0, 0, 0, 0.8)', 'size': np.clip((5 * np.log10((ycand[msk]['e'].values + 5.0))), 1, 10)}, hovertemplate='<b>%{hovertext}</b>', hovertext=['{}<br>E={:.2f}<br>eta={:.2f}<br>phi={:.2f}'.format(int(x['typ']), x['e'], x['eta'], np.arctan2(x['sin_phi'], x['cos_phi'])) for (_, x) in ycand[msk].iterrows()], name='PFCand') msk = (ygen['typ'] != 0) points_gen = go.Scatter3d(x=((2.5 * radius_mult) * ygen[msk]['cos_phi'].values), z=((2.5 * radius_mult) * ygen[msk]['sin_phi'].values), y=(eta_range * ygen[msk]['eta'].values), mode='markers', marker={'symbol': 'circle', 'opacity': 0.8, 'color': 'rgba(50, 0, 0, 0.4)', 'size': np.clip((5 * np.log10((ygen[msk]['e'].values + 5))), 1, 10)}, hovertemplate='<b>%{hovertext}</b>', hovertext=['{}<br>E={:.2f}<br>eta={:.2f}<br>phi={:.2f}'.format(int(x['typ']), x['e'], x['eta'], np.arctan2(x['sin_phi'], x['cos_phi'])) for (_, x) in ygen[msk].iterrows()], name='MLPF truth') fig = go.Figure(data=[points_trk, points_gsf, points_ps1, points_ps2, points_ecal, points_hcal, points_hfem, points_hfhad, points_sc, points_ho, points_cand, points_gen]) fig.update_layout(autosize=True, scene_camera={'eye': dict(x=0.8, y=0.8, z=0.8)}, scene={'xaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True), 'yaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True), 'zaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True)}) fig.update_layout(legend={'itemsizing': 'constant'}) s = fig.to_html(default_width='1200px', default_height='800px') with open('plot_{}_{}.html'.format(sample, iev), 'w') as fi: fi.write(s) with open('plot_{}_{}_data.html'.format(sample, iev), 'w') as fi: fi.write('X') fi.write(Xelem.to_html()) fi.write('ycand') fi.write(ycand[(ycand['typ'] != 0)].to_html()) fi.write('ygen') fi.write(ygen[(ygen['typ'] != 0)].to_html())
def node_label_func(n): return '{0} {1}\nE={2:.2f}\n{3:.1f}:{4:.1f}'.format(n[0].upper(), g.nodes[n]['typ'], g.nodes[n]['e'], g.nodes[n]['eta'], g.nodes[n]['phi'])
def node_color_func(n): colors = {'gen': 'blue', 'el': 'gray', 'pf': 'purple', 'tp': 'red', 'cp': 'red', 'gen': 'blue'} return colors[n[0]]
def plot_energy_stack(energies, pids): uniq_pids = np.unique(pids) hists = [] bins = np.logspace((- 1), 6, 61) for pid in uniq_pids: h = bh.Histogram(bh.axis.Variable(bins)) h.fill(energies[(pids == pid)]) hists.append(h) mplhep.histplot(hists, stack=False, label=[str(p) for p in uniq_pids]) plt.legend()
def to_bh(data, bins, cumulative=False): h1 = bh.Histogram(bh.axis.Variable(bins)) h1.fill(data) if cumulative: h1[:] = (np.sum(h1.values()) - np.cumsum(h1)) return h1
def load_pickle(fn): d = pickle.load(open(fn, 'rb')) ret = [] for it in d: ret.append({'slimmedGenJets': it['slimmedGenJets'], 'slimmedJetsPuppi': it['slimmedJetsPuppi'], 'genMetTrue': it['genMetTrue'], 'slimmedMETsPuppi': it['slimmedMETsPuppi']}) return ret
def varbins(*args): newlist = [] for arg in args[:(- 1)]: newlist.append(arg[:(- 1)]) newlist.append(args[(- 1)]) return np.concatenate(newlist)
def get_hist_and_merge(files, histname): hists = [] for fn in files: fi = uproot.open(fn) h = fi[histname].to_boost() hists.append(h) return sum(hists[1:], hists[0])
def Gauss(x, a, x0, sigma): return (a * np.exp(((- ((x - x0) ** 2)) / (2 * (sigma ** 2)))))
def fit_response(hist2d, bin_range): centers = [] means = [] means_unc = [] sigmas = [] sigmas_unc = [] for ibin in bin_range: print(ibin) plt.figure() xvals = hist2d.axes[1].centers vals = hist2d.values()[ibin] errs = np.sqrt(vals) errs[(vals == 0)] = 1.0 (parameters1, covariances1) = curve_fit(Gauss, xvals, vals, p0=[1.0, 0.0, 1.0], sigma=errs, maxfev=1000000, method='dogbox', bounds=[((- np.inf), (- 10), 0), (np.inf, 10, 50)]) plt.errorbar(xvals, vals, errs) plt.plot(xvals, Gauss(xvals, *parameters1)) plt.xlabel('$\\Delta E_T / E_T$') plt.title('${} < E_T < {}$'.format(hist2d.axes[0].edges[ibin], hist2d.axes[0].edges[(ibin + 1)])) means.append(parameters1[1]) means_unc.append(np.sqrt(covariances1[(1, 1)])) sigmas.append(parameters1[2]) sigmas_unc.append(np.sqrt(covariances1[(2, 2)])) centers.append(hist2d.axes[0].centers[ibin]) centers = np.array(centers) means = np.array(means) means_unc = np.array(means_unc) sigmas = np.array(sigmas) sigmas_unc = np.array(sigmas_unc) return (centers, means, means_unc, sigmas, sigmas_unc)
def yield_from_ds(): for elem in dss: (yield {'X': elem['X'], 'ygen': elem['ygen'], 'ycand': elem['ycand']})
def particle_has_track(g, particle): for e in g.edges(particle): if (e[1][0] == 'track'): return True return False
def get_tower_gen_fracs(g, tower): e_130 = 0.0 e_211 = 0.0 e_22 = 0.0 e_11 = 0.0 ptcls = [] for e in g.edges(tower): if (e[1][0] == 'particle'): if (not particle_has_track(g, e[1])): ptcls.append(e[1]) pid = abs(g.nodes[e[1]]['pid']) ch = abs(g.nodes[e[1]]['charge']) e = g.nodes[e[1]]['energy'] if (pid in [211]): e_211 += e elif (pid in [130]): e_130 += e elif (pid == 22): e_22 += e elif (pid == 11): e_11 += e elif (ch == 1): e_211 += e else: e_130 += e return (ptcls, (e_130, e_211, e_22, e_11))
def make_tower_array(tower_dict): return np.array([1, tower_dict['et'], tower_dict['eta'], np.sin(tower_dict['phi']), np.cos(tower_dict['phi']), tower_dict['energy'], tower_dict['eem'], tower_dict['ehad'], 0.0, 0.0, 0.0, 0.0])
def make_track_array(track_dict): return np.array([2, track_dict['pt'], track_dict['eta'], np.sin(track_dict['phi']), np.cos(track_dict['phi']), track_dict['p'], track_dict['eta_outer'], np.sin(track_dict['phi_outer']), np.cos(track_dict['phi_outer']), track_dict['charge'], track_dict['is_gen_muon'], track_dict['is_gen_electron']])
def make_gen_array(gen_dict): if (not gen_dict): return np.zeros(7) encoded_pid = gen_pid_encoding.get(abs(gen_dict['pid']), 1) charge = (math.copysign(1, gen_dict['pid']) if (encoded_pid in [1, 4, 5]) else 0) return np.array([encoded_pid, charge, gen_dict['pt'], gen_dict['eta'], np.sin(gen_dict['phi']), np.cos(gen_dict['phi']), gen_dict['energy']])
def make_cand_array(cand_dict): if (not cand_dict): return np.zeros(7) encoded_pid = gen_pid_encoding.get(abs(cand_dict['pid']), 1) return np.array([encoded_pid, cand_dict['charge'], cand_dict.get('pt', 0), cand_dict['eta'], np.sin(cand_dict['phi']), np.cos(cand_dict['phi']), cand_dict.get('energy', 0)])
def make_triplets(g, tracks, towers, particles, pfparticles): triplets = [] remaining_particles = set(particles) remaining_pfcandidates = set(pfparticles) for t in tracks: ptcl = None for e in g.edges(t): if (e[1][0] == 'particle'): ptcl = e[1] break pf_ptcl = None for e in g.edges(ptcl): if ((e[1][0] in ['pfcharged', 'pfel', 'pfmu']) and (e[1] in remaining_pfcandidates)): pf_ptcl = e[1] break remaining_particles.remove(ptcl) if pf_ptcl: remaining_pfcandidates.remove(pf_ptcl) triplets.append((t, ptcl, pf_ptcl)) for t in towers: (ptcls, fracs) = get_tower_gen_fracs(g, t) imax = np.argmax(fracs) if (len(ptcls) > 0): if (imax == 0): pid = 130 elif (imax == 1): pid = 211 elif (imax == 2): pid = 22 elif (imax == 3): pid = 11 for ptcl in ptcls: if (ptcl in remaining_particles): remaining_particles.remove(ptcl) lvs = [] for ptcl in ptcls: lv = uproot_methods.TLorentzVector.from_ptetaphie(g.nodes[ptcl]['pt'], g.nodes[ptcl]['eta'], g.nodes[ptcl]['phi'], g.nodes[ptcl]['energy']) lvs.append(lv) lv = None gen_ptcl = None if (len(lvs) > 0): lv = sum(lvs[1:], lvs[0]) gen_ptcl = {'pid': pid, 'pt': lv.pt, 'eta': lv.eta, 'phi': lv.phi, 'energy': lv.energy} if ((gen_ptcl['pid'] == 211) and (abs(gen_ptcl['eta']) > 2.5)): gen_ptcl['pid'] = 130 if ((gen_ptcl['pid'] == 130) and (gen_ptcl['energy'] < 9.0)): gen_ptcl = None found_pf = False for pf_ptcl in remaining_pfcandidates: if ((g.nodes[pf_ptcl]['eta'] == g.nodes[t]['eta']) and (g.nodes[pf_ptcl]['phi'] == g.nodes[t]['phi'])): found_pf = True break if found_pf: remaining_pfcandidates.remove(pf_ptcl) else: pf_ptcl = None triplets.append((t, gen_ptcl, pf_ptcl)) return (triplets, list(remaining_particles), list(remaining_pfcandidates))
def process_chunk(infile, ev_start, ev_stop, outfile): f = ROOT.TFile.Open(infile) tree = f.Get('Delphes') X_all = [] ygen_all = [] ygen_remaining_all = [] ycand_all = [] for iev in range(ev_start, ev_stop): print('event {}/{} out of {} in the full file'.format(iev, ev_stop, tree.GetEntries())) tree.GetEntry(iev) pileupmix = list(tree.PileUpMix) pileupmix_idxdict = {} for (ip, p) in enumerate(pileupmix): pileupmix_idxdict[p] = ip towers = list(tree.Tower) tracks = list(tree.Track) pf_charged = list(tree.PFChargedHadron) pf_neutral = list(tree.PFNeutralHadron) pf_photon = list(tree.PFPhoton) pf_el = list(tree.PFElectron) pf_mu = list(tree.PFMuon) graph = nx.Graph() for i in range(len(pileupmix)): node = ('particle', i) graph.add_node(node) graph.nodes[node]['pid'] = pileupmix[i].PID graph.nodes[node]['eta'] = pileupmix[i].Eta graph.nodes[node]['phi'] = pileupmix[i].Phi graph.nodes[node]['pt'] = pileupmix[i].PT graph.nodes[node]['charge'] = pileupmix[i].Charge graph.nodes[node]['energy'] = pileupmix[i].E graph.nodes[node]['is_pu'] = pileupmix[i].IsPU for i in range(len(towers)): node = ('tower', i) graph.add_node(node) graph.nodes[node]['eta'] = towers[i].Eta graph.nodes[node]['phi'] = towers[i].Phi graph.nodes[node]['energy'] = towers[i].E graph.nodes[node]['et'] = towers[i].ET graph.nodes[node]['eem'] = towers[i].Eem graph.nodes[node]['ehad'] = towers[i].Ehad for ptcl in towers[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('tower', i), ('particle', ip)) for i in range(len(tracks)): node = ('track', i) graph.add_node(node) graph.nodes[node]['p'] = (tracks[i].PT * np.cosh(tracks[i].Eta)) graph.nodes[node]['eta'] = tracks[i].Eta graph.nodes[node]['phi'] = tracks[i].Phi graph.nodes[node]['eta_outer'] = tracks[i].EtaOuter graph.nodes[node]['phi_outer'] = tracks[i].PhiOuter graph.nodes[node]['pt'] = tracks[i].PT graph.nodes[node]['pid'] = tracks[i].PID graph.nodes[node]['charge'] = tracks[i].Charge ip = pileupmix_idxdict[tracks[i].Particle.GetObject()] graph.add_edge(('track', i), ('particle', ip)) for i in range(len(pf_charged)): node = ('pfcharged', i) graph.add_node(node) graph.nodes[node]['pid'] = pf_charged[i].PID graph.nodes[node]['eta'] = pf_charged[i].Eta graph.nodes[node]['phi'] = pf_charged[i].Phi graph.nodes[node]['pt'] = pf_charged[i].PT graph.nodes[node]['charge'] = pf_charged[i].Charge ip = pileupmix_idxdict[pf_charged[i].Particle.GetObject()] graph.add_edge(('pfcharged', i), ('particle', ip)) for i in range(len(pf_el)): node = ('pfel', i) graph.add_node(node) graph.nodes[node]['pid'] = 11 graph.nodes[node]['eta'] = pf_el[i].Eta graph.nodes[node]['phi'] = pf_el[i].Phi graph.nodes[node]['pt'] = pf_el[i].PT graph.nodes[node]['charge'] = pf_el[i].Charge ip = pileupmix_idxdict[pf_el[i].Particle.GetObject()] graph.add_edge(('pfel', i), ('particle', ip)) for i in range(len(pf_mu)): node = ('pfmu', i) graph.add_node(node) graph.nodes[node]['pid'] = 13 graph.nodes[node]['eta'] = pf_mu[i].Eta graph.nodes[node]['phi'] = pf_mu[i].Phi graph.nodes[node]['pt'] = pf_mu[i].PT graph.nodes[node]['charge'] = pf_mu[i].Charge ip = pileupmix_idxdict[pf_mu[i].Particle.GetObject()] graph.add_edge(('pfmu', i), ('particle', ip)) for i in range(len(pf_neutral)): node = ('pfneutral', i) graph.add_node(node) graph.nodes[node]['pid'] = 130 graph.nodes[node]['eta'] = pf_neutral[i].Eta graph.nodes[node]['phi'] = pf_neutral[i].Phi graph.nodes[node]['energy'] = pf_neutral[i].E graph.nodes[node]['charge'] = 0 for ptcl in pf_neutral[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('pfneutral', i), ('particle', ip)) for i in range(len(pf_photon)): node = ('pfphoton', i) graph.add_node(node) graph.nodes[node]['pid'] = 22 graph.nodes[node]['eta'] = pf_photon[i].Eta graph.nodes[node]['phi'] = pf_photon[i].Phi graph.nodes[node]['energy'] = pf_photon[i].E graph.nodes[node]['charge'] = 0 for ptcl in pf_photon[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('pfphoton', i), ('particle', ip)) if ((iev < 10) and save_full_graphs): nx.readwrite.write_gpickle(graph, outfile.replace('.pkl.bz2', '_graph_{}.pkl'.format(iev))) particles = [n for n in graph.nodes if (n[0] == 'particle')] pfcand = [n for n in graph.nodes if n[0].startswith('pf')] tracks = [n for n in graph.nodes if (n[0] == 'track')] towers = [n for n in graph.nodes if (n[0] == 'tower')] (triplets, remaining_particles, remaining_pfcandidates) = make_triplets(graph, tracks, towers, particles, pfcand) print('remaining PF', len(remaining_pfcandidates)) for pf in remaining_pfcandidates: print(pf, graph.nodes[pf]) X = [] ygen = [] ygen_remaining = [] ycand = [] for triplet in triplets: (reco, gen, cand) = triplet if (reco[0] == 'track'): track_dict = graph.nodes[reco] gen_dict = graph.nodes[gen] if (abs(gen_dict['pid']) == 13): track_dict['is_gen_muon'] = 1.0 else: track_dict['is_gen_muon'] = 0.0 if (abs(gen_dict['pid']) == 11): track_dict['is_gen_electron'] = 1.0 else: track_dict['is_gen_electron'] = 0.0 X.append(make_track_array(track_dict)) ygen.append(make_gen_array(gen_dict)) else: X.append(make_tower_array(graph.nodes[reco])) ygen.append(make_gen_array(gen)) ycand.append(make_cand_array((graph.nodes[cand] if cand else None))) for prt in remaining_particles: ygen_remaining.append(make_gen_array(graph.nodes[prt])) X = np.stack(X) ygen = np.stack(ygen) ygen_remaining = np.stack(ygen_remaining) ycand = np.stack(ycand) print('X', X.shape, 'ygen', ygen.shape, 'ygen_remaining', ygen_remaining.shape, 'ycand', ycand.shape) X_all.append(X) ygen_all.append(ygen) ygen_remaining_all.append(ygen_remaining) ycand_all.append(ycand) with bz2.BZ2File(outfile, 'wb') as fi: pickle.dump({'X': X_all, 'ygen': ygen_all, 'ycand': ycand_all}, fi)
def process_chunk_args(args): process_chunk(*args)
def chunks(lst, n): 'Yield successive n-sized chunks from lst.' for i in range(0, len(lst), n): (yield lst[i:(i + n)])
def parse_args(): import argparse parser = argparse.ArgumentParser() parser.add_argument('-d', '--dir', type=str, default='parameters/delphes-gnn-skipconn.yaml', help='dir containing csv files') args = parser.parse_args() return args
def plot_gpu_util(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_util'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('GPU utilization [%]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_power(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_power'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('Power consumption [W]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_mem_util(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_mem_util'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('GPU memory utilization [%]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_mem_used(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_mem_used'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('Used GPU memory [MiB]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_dfs(dfs, plot_func, suffix): (fig, axs) = plt.subplots(2, 2, figsize=(12, 9), tight_layout=True) for ax in axs.flat: ax.label_outer() for (cuda_device, (df, ax)) in enumerate(zip(dfs, axs.flat)): plot_func(df, cuda_device, ax) plt.suptitle('{}'.format(file.stem)) plt.savefig((args.dir + '/{}_{}.jpg'.format(file.stem, suffix)))
class TestGNN(unittest.TestCase): def helper_test_pairwise_dist_shape(self, dist_func): A = tf.random.normal((2, 128, 32)) B = tf.random.normal((2, 128, 32)) out = dist_func(A, B) self.assertEqual(out.shape, (2, 128, 128)) def test_pairwise_l2_dist_shape(self): from mlpf.tfmodel.model import pairwise_l2_dist self.helper_test_pairwise_dist_shape(pairwise_l2_dist) def test_pairwise_l1_dist_shape(self): from mlpf.tfmodel.model import pairwise_l1_dist self.helper_test_pairwise_dist_shape(pairwise_l1_dist) def test_GHConvDense_shape(self): from mlpf.tfmodel.model import GHConvDense nn = GHConvDense(output_dim=128, activation='selu') x = tf.random.normal((2, 256, 64)) adj = tf.random.normal((2, 256, 256, 1)) msk = tf.random.normal((2, 256, 1)) out = nn((x, adj, msk)) self.assertEqual(out.shape, (2, 256, 128)) def test_GHConvDense_binned_shape(self): from mlpf.tfmodel.model import GHConvDense nn = GHConvDense(output_dim=128, activation='selu') x = tf.random.normal((2, 4, 64, 64)) adj = tf.random.normal((2, 4, 64, 64, 1)) msk = tf.random.normal((2, 4, 64, 1)) out = nn((x, adj, msk)) self.assertEqual(out.shape, (2, 4, 64, 128)) def test_NodePairGaussianKernel_shape(self): from mlpf.tfmodel.model import NodePairGaussianKernel nn = NodePairGaussianKernel() x = tf.random.normal((2, 256, 32)) msk = tf.random.normal((2, 256, 1)) out = nn(x, msk) self.assertEqual(out.shape, (2, 256, 256, 1)) def test_NodePairGaussianKernel_binned_shape(self): from mlpf.tfmodel.model import NodePairGaussianKernel nn = NodePairGaussianKernel() x = tf.random.normal((2, 4, 64, 32)) msk = tf.random.normal((2, 4, 64, 1)) out = nn(x, msk) self.assertEqual(out.shape, (2, 4, 64, 64, 1)) def test_MessageBuildingLayerLSH_shape(self): from mlpf.tfmodel.model import MessageBuildingLayerLSH nn = MessageBuildingLayerLSH(bin_size=64, distance_dim=128) x_dist = tf.random.normal((2, 256, 128)) x_features = tf.random.normal((2, 256, 32)) msk = (tf.random.normal((2, 256)) > 0) (bins_split, x_features_binned, dm_binned, msk_f_binned) = nn(x_dist, x_features, msk) self.assertEqual(bins_split.shape, (2, 4, 64)) self.assertEqual(x_features_binned.shape, (2, 4, 64, 32)) self.assertEqual(dm_binned.shape, (2, 4, 64, 64, 1)) self.assertEqual(msk_f_binned.shape, (2, 4, 64, 1)) from mlpf.tfmodel.model import reverse_lsh x_features2 = reverse_lsh(bins_split, x_features_binned) self.assertEqual(tf.reduce_sum((x_features - x_features2)).numpy(), 0)
class TestGNNTorchAndTensorflow(unittest.TestCase): def test_GHConvDense(self): from mlpf.tfmodel.model import GHConvDense nn1 = GHConvDense(output_dim=128, activation='selu') from mlpf.pyg.gnn_lsh import GHConvDense as GHConvDenseTorch nn2 = GHConvDenseTorch(output_dim=128, activation='selu', hidden_dim=64) x = np.random.normal(size=(2, 4, 64, 64)).astype(np.float32) adj = np.random.normal(size=(2, 4, 64, 64, 1)).astype(np.float32) msk = np.random.normal(size=(2, 4, 64, 1)).astype(np.float32) msk = (msk > 0).astype(np.float32) nn1((tf.convert_to_tensor(x), tf.convert_to_tensor(adj), tf.convert_to_tensor(msk))).numpy() nn2((torch.tensor(x), torch.tensor(adj), torch.tensor(msk))).detach().numpy() sd = nn2.state_dict() sd['W_t'] = torch.from_numpy(nn1.weights[0].numpy()) sd['b_t'] = torch.from_numpy(nn1.weights[1].numpy()) sd['W_h'] = torch.from_numpy(nn1.weights[2].numpy()) sd['theta'] = torch.from_numpy(nn1.weights[3].numpy()) nn2.load_state_dict(sd) out1 = nn1((tf.convert_to_tensor(x), tf.convert_to_tensor(adj), tf.convert_to_tensor(msk))).numpy() out2 = nn2((torch.tensor(x), torch.tensor(adj), torch.tensor(msk))).detach().numpy() self.assertLess(np.sum((out1 - out2)), TOLERANCE) def test_MessageBuildingLayerLSH(self): from mlpf.tfmodel.model import MessageBuildingLayerLSH nn1 = MessageBuildingLayerLSH(distance_dim=128, bin_size=64) from mlpf.pyg.gnn_lsh import MessageBuildingLayerLSH as MessageBuildingLayerLSHTorch nn2 = MessageBuildingLayerLSHTorch(distance_dim=128, bin_size=64) nn2.stable_sort = True x_dist = np.random.normal(size=(2, 256, 128)).astype(np.float32) x_node = np.random.normal(size=(2, 256, 32)).astype(np.float32) msk = np.random.normal(size=(2, 256)).astype(np.float32) msk = (msk > 0).astype(bool) nn1(tf.convert_to_tensor(x_dist), tf.convert_to_tensor(x_node), tf.convert_to_tensor(msk)) nn2(torch.tensor(x_dist), torch.tensor(x_node), torch.tensor(msk)) sd = nn2.state_dict() sd['codebook_random_rotations'] = torch.from_numpy(nn1.weights[0].numpy()) nn2.load_state_dict(sd) out1 = nn1(tf.convert_to_tensor(x_dist), tf.convert_to_tensor(x_node), tf.convert_to_tensor(msk)) out2 = nn2(torch.tensor(x_dist), torch.tensor(x_node), torch.tensor(msk)) self.assertTrue(np.all((out1[0].numpy() == out2[0].numpy()))) self.assertLess(np.sum((out1[1].numpy() - out2[1].detach().numpy())), TOLERANCE) self.assertLess(np.sum((out1[2].numpy() - out2[2].detach().numpy())), TOLERANCE) self.assertEqual(np.sum((out1[3].numpy() - out2[3].detach().numpy())), 0.0) from mlpf.tfmodel.model import reverse_lsh (bins_split, x, dm, msk_f) = out1 ret = reverse_lsh(bins_split, x, False) self.assertTrue(np.all((x_node == ret.numpy()))) from mlpf.pyg.gnn_lsh import reverse_lsh as reverse_lsh_torch (bins_split, x, dm, msk_f) = out2 ret = reverse_lsh_torch(bins_split, x) self.assertTrue(np.all((x_node == ret.detach().numpy())))
class QuadKey(): @precondition((lambda c, key: valid_key(key))) def __init__(self, key): '\n A quadkey must be between 1 and 23 digits and can only contain digit[0-3]\n ' self.key = key self.level = len(key) def children(self): if (self.level >= 23): return [] return [QuadKey((self.key + str(k))) for k in [0, 1, 2, 3]] def parent(self): return QuadKey(self.key[:(- 1)]) def nearby(self): (tile, level) = TileSystem.quadkey_to_tile(self.key) perms = [((- 1), (- 1)), ((- 1), 0), ((- 1), 1), (0, (- 1)), (0, 1), (1, (- 1)), (1, 0), (1, 1)] tiles = set(map((lambda perm: (abs((tile[0] + perm[0])), abs((tile[1] + perm[1])))), perms)) return [TileSystem.tile_to_quadkey(tile, level) for tile in tiles] def is_ancestor(self, node): '\n If node is ancestor of self\n Get the difference in level\n If not, None\n ' if ((self.level <= node.level) or (self.key[:len(node.key)] != node.key)): return None return (self.level - node.level) def is_descendent(self, node): '\n If node is descendent of self\n Get the difference in level\n If not, None\n ' return node.is_ancestor(self) def area(self): size = TileSystem.map_size(self.level) LAT = 0 res = TileSystem.ground_resolution(LAT, self.level) side = ((size / 2) * res) return (side * side) def xdifference(self, to): ' Generator\n Gives the difference of quadkeys between self and to\n Generator in case done on a low level\n Only works with quadkeys of same level\n ' (x, y) = (0, 1) assert (self.level == to.level) self_tile = list(self.to_tile()[0]) to_tile = list(to.to_tile()[0]) if ((self_tile[x] >= to_tile[x]) and (self_tile[y] <= self_tile[y])): (ne_tile, sw_tile) = (self_tile, to_tile) else: (sw_tile, ne_tile) = (self_tile, to_tile) cur = ne_tile[:] while (cur[x] >= sw_tile[x]): while (cur[y] <= sw_tile[y]): (yield from_tile(tuple(cur), self.level)) cur[y] += 1 cur[x] -= 1 cur[y] = ne_tile[y] def difference(self, to): ' Non generator version of xdifference\n ' return [qk for qk in self.xdifference(to)] def unwind(self): ' Get a list of all ancestors in descending order of level, including a new instance of self\n ' return [QuadKey(self.key[:(l + 1)]) for l in reversed(range(len(self.key)))] def to_tile(self): return TileSystem.quadkey_to_tile(self.key) def to_geo(self, centered=False): ret = TileSystem.quadkey_to_tile(self.key) tile = ret[0] lvl = ret[1] pixel = TileSystem.tile_to_pixel(tile, centered) return TileSystem.pixel_to_geo(pixel, lvl) def __eq__(self, other): return (self.key == other.key) def __ne__(self, other): return (not self.__eq__(other)) def __str__(self): return self.key def __repr__(self): return self.key
def from_geo(geo, level): '\n Constucts a quadkey representation from geo and level\n geo => (lat, lon)\n If lat or lon are outside of bounds, they will be clipped\n If level is outside of bounds, an AssertionError is raised\n\n ' pixel = TileSystem.geo_to_pixel(geo, level) tile = TileSystem.pixel_to_tile(pixel) key = TileSystem.tile_to_quadkey(tile, level) return QuadKey(key)
def from_tile(tile, level): return QuadKey(TileSystem.tile_to_quadkey(tile, level))
def from_str(qk_str): return QuadKey(qk_str)
def geo_to_dict(geo): " Take a geo tuple and return a labeled dict\n (lat, lon) -> {'lat': lat, 'lon', lon}\n " return {LAT_STR: geo[0], LON_STR: geo[1]}
def valid_level(level): LEVEL_RANGE = (1, 23) return (LEVEL_RANGE[0] <= level <= LEVEL_RANGE[1])
@precondition((lambda key: valid_level(len(key)))) def valid_key(key): return (TileSystem.KEY_PATTERN.match(key) is not None)
class TileSystem(): '\n Class with static method to build quadkeys from lat, lon, levels\n see http://msdn.microsoft.com/en-us/library/bb259689.aspx\n ' import re KEY_PATTERN = re.compile('^[0-3]+$') EARTH_RADIUS = 6378137 LATITUDE_RANGE = ((- 85.05112878), 85.05112878) LONGITUDE_RANGE = ((- 180.0), 180.0) @staticmethod @precondition((lambda n, minMax: (minMax[0] <= minMax[1]))) def clip(n, minMax): '\tClips number to specified values ' return min(max(n, minMax[0]), minMax[1]) @staticmethod @precondition(valid_level) def map_size(level): 'Determines map height and width in pixel space at level' return (256 << level) @staticmethod @precondition((lambda lat, lvl: valid_level(lvl))) def ground_resolution(lat, level): 'Gets ground res in meters / pixel' lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE) return ((((cos(((lat * pi) / 180)) * 2) * pi) * TileSystem.EARTH_RADIUS) / TileSystem.map_size(level)) @staticmethod @precondition((lambda lat, lvl, dpi: valid_level(lvl))) def map_scale(lat, level, dpi): 'Gets the scale of the map expressed as ratio 1\t: N. Returns N' return ((TileSystem.ground_resolution(lat, level) * dpi) / 0.0254) @staticmethod @precondition((lambda geo, lvl: valid_level(lvl))) def geo_to_pixel(geo, level): 'Transform from geo coordinates to pixel coordinates' (lat, lon) = (float(geo[0]), float(geo[1])) lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE) lon = TileSystem.clip(lon, TileSystem.LONGITUDE_RANGE) x = ((lon + 180) / 360) sin_lat = sin(((lat * pi) / 180)) y = (0.5 - (log(((1 + sin_lat) / (1 - sin_lat))) / (4 * pi))) map_size = TileSystem.map_size(level) pixel_x = int(TileSystem.clip(((x * map_size) + 0.5), (0, (map_size - 1)))) pixel_y = int(TileSystem.clip(((y * map_size) + 0.5), (0, (map_size - 1)))) return (pixel_x, pixel_y) @staticmethod @precondition((lambda pix, lvl: valid_level(lvl))) def pixel_to_geo(pixel, level): 'Transform from pixel to geo coordinates' pixel_x = pixel[0] pixel_y = pixel[1] map_size = float(TileSystem.map_size(level)) x = ((TileSystem.clip(pixel_x, (0, (map_size - 1))) / map_size) - 0.5) y = (0.5 - (TileSystem.clip(pixel_y, (0, (map_size - 1))) / map_size)) lat = (90 - ((360 * atan(exp((((- y) * 2) * pi)))) / pi)) lon = (360 * x) return (round(lat, 6), round(lon, 6)) @staticmethod def pixel_to_tile(pixel): 'Transform pixel to tile coordinates' return ((pixel[0] // 256), (pixel[1] // 256)) @staticmethod def tile_to_pixel(tile, centered=False): 'Transform tile to pixel coordinates' pixel = [(tile[0] * 256), (tile[1] * 256)] if centered: pixel = [(pix + 128) for pix in pixel] return (pixel[0], pixel[1]) @staticmethod @precondition((lambda tile, lvl: valid_level(lvl))) def tile_to_quadkey(tile, level): 'Transform tile coordinates to a quadkey' tile_x = tile[0] tile_y = tile[1] quadkey = '' for i in xrange(level): bit = (level - i) digit = ord('0') mask = (1 << (bit - 1)) if ((tile_x & mask) is not 0): digit += 1 if ((tile_y & mask) is not 0): digit += 2 quadkey += chr(digit) return quadkey @staticmethod def quadkey_to_tile(quadkey): 'Transform quadkey to tile coordinates' (tile_x, tile_y) = (0, 0) level = len(quadkey) for i in xrange(level): bit = (level - i) mask = (1 << (bit - 1)) if (quadkey[(level - bit)] == '1'): tile_x |= mask if (quadkey[(level - bit)] == '2'): tile_y |= mask if (quadkey[(level - bit)] == '3'): tile_x |= mask tile_y |= mask return [(tile_x, tile_y), level]
def condition(precondition=None, postcondition=None): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if (precondition is not None): assert precondition(*args, **kwargs) retval = func(*args, **kwargs) if (postcondition is not None): assert postcondition(retval) return retval return wrapper return decorator
def precondition(check): return condition(precondition=check)
def postcondition(check): return condition(postcondition=check)
def run(): unittest.main()
class QuadkeyTest(TestCase): def testInit(self): qk = quadkey.from_str('0321201120') with self.assertRaises(AssertionError): qk = quadkey.from_str('') with self.assertRaises(AssertionError): qk = quadkey.from_str('0156510012') def testFromGeo(self): geo = (40, (- 105)) level = 7 key = quadkey.from_str('0231010') self.assertEqual(key, quadkey.from_geo(geo, level)) def testEquality(self): one = quadkey.from_str('00') two = quadkey.from_str('00') self.assertEqual(one, two) three = quadkey.from_str('0') self.assertNotEqual(one, three) def testChildren(self): qk = quadkey.from_str('0') self.assertEqual([c.key for c in qk.children()], ['00', '01', '02', '03']) qk = quadkey.from_str(''.join(['0' for x in xrange(23)])) self.assertEqual(qk.children(), []) def testAncestry(self): one = quadkey.from_str('0') two = quadkey.from_str('0101') self.assertEqual(3, one.is_descendent(two)) self.assertIsNone(two.is_descendent(one)) self.assertEqual(3, two.is_ancestor(one)) three = quadkey.from_str('1') self.assertIsNone(three.is_ancestor(one)) def testNearby(self): qk = quadkey.from_str('0') self.assertEqual(set(['1', '2', '3']), set(qk.nearby())) def testUnwind(self): qk = quadkey.from_str('0123') self.assertEqual(['0123', '012', '01', '0'], [qk.key for qk in qk.unwind()]) def testDifference(self): _from = quadkey.from_str('0320101102') _to = quadkey.from_str('0320101110') diff = set(['0320101102', '0320101100', '0320101103', '0320101101', '0320101112', '0320101110']) self.assertEqual(diff, set([qk.key for qk in _to.difference(_from)])) self.assertEqual(diff, set([qk.key for qk in _from.difference(_to)]))
class TileSystemTest(TestCase): def testClip(self): self.assertEqual(1, TileSystem.clip(0, (1, 5))) self.assertEqual(5, TileSystem.clip(10, (1, 5))) self.assertEqual(3, TileSystem.clip(3, (1, 5))) with self.assertRaises(AssertionError): TileSystem.clip(7, (5, 1)) def testMapSize(self): self.assertEqual(512, TileSystem.map_size(1)) with self.assertRaises(AssertionError): TileSystem.map_size(0) def testGroundResolution(self): geo = (40.0, (- 105.0)) res = 936.8665722621985 TileSystem.ground_resolution(geo[0], 7) def testMapScale(self): geo = (40.0, (- 105.0)) level = 7 dpi = 96 scale = 3540913.029022482 self.assertEqual(scale, TileSystem.map_scale(geo[0], level, dpi)) def testGeoToPixel(self): geo = (40.0, (- 105.0)) level = 7 pixel = (6827, 12405) self.assertEqual(pixel, TileSystem.geo_to_pixel(geo, level)) def testPixelToGeo(self): pixel = (6827, 12405) level = 7 geo = (40.002372, (- 104.996338)) self.assertEqual(geo, TileSystem.pixel_to_geo(pixel, level)) def testPixelToTile(self): pixel = (6827, 12405) tile = (26, 48) self.assertEqual(tile, TileSystem.pixel_to_tile(pixel)) def testTileToPixel(self): tile = (26, 48) pixel = (6656, 12288) self.assertEqual(pixel, TileSystem.tile_to_pixel(tile)) def testTileToQuadkey(self): tile = (26, 48) level = 7 key = '0231010' self.assertEqual(key, TileSystem.tile_to_quadkey(tile, level)) def testQuadkeyToTile(self): tile = (26, 48) level = 7 key = '0231010' self.assertEqual([tile, level], TileSystem.quadkey_to_tile(key))
class UtilTest(TestCase): def testPrecondition(self): self.assertTrue(self.pre(True)) with self.assertRaises(AssertionError): self.pre(False) def testPostcondition(self): pass @precondition((lambda c, x: (x is True))) def pre(self, x): return x
def makeOsmFileName(fileNumber): return os.path.join('anomaly', 'reviewed_{:02d}.osm'.format(fileNumber))
def saveOsmData(query): result = api.query(query) for way in result.ways: featureDirectoryName = way.tags.get('sport') outputDirectoryName = os.path.join(cfg.rootOsmDir, featureDirectoryName) if (os.path.exists(outputDirectoryName) == False): os.makedirs(outputDirectoryName) if ((featureDirectoryName in summary) == False): summary[featureDirectoryName] = 1 else: summary[featureDirectoryName] += 1 filenameBase = os.path.join(cfg.rootOsmDir, featureDirectoryName, str(way.id)) with open(('%s.csv' % filenameBase), 'wt') as text_file: for node in way.nodes: text_file.write(('%0.7f\t%0.7f\n' % (node.lat, node.lon))) with open(('%s.GeoJSON' % filenameBase), 'wt') as text_file: rawNodes = [] for node in way.nodes: rawNodes.append((node.lon, node.lat)) try: geom = shapely.geometry.Polygon(rawNodes) tags = way.tags tags['wayOSMId'] = way.id features = [] features.append(geojson.Feature(geometry=geom, properties=tags)) featureC = geojson.FeatureCollection(features) text_file.write(geojson.dumps(featureC)) except Exception as e: print(e)
def _find_getch(): try: import termios except ImportError: import msvcrt return msvcrt.getch import sys, tty def _getch(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch return _getch
def find_in_path(name, path): 'Find a file in a search path' for _dir in path.split(os.pathsep): binpath = os.path.join(_dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None
def get_cuda_sm_list(cuda_ver): if ('CUDA_SM_LIST' in os.environ): sm_list = os.environ['CUDA_SM_LIST'].split(',') else: sm_list = ['30', '52', '60', '61', '70', '75', '80', '86'] if (cuda_ver >= 110): filter_list = ['30'] if (cuda_ver == 110): filter_list += ['86'] else: filter_list = ['80', '86'] if (cuda_ver < 100): filter_list += ['75'] if (cuda_ver < 90): filter_list += ['70'] if (cuda_ver < 80): filter_list += ['60', '61'] sm_list = [sm for sm in sm_list if (sm not in filter_list)] return sm_list
def get_cuda_compute(cuda_ver): if ('CUDA_COMPUTE' in os.environ): compute = os.environ['CUDA_COMPUTE'] else: if (70 <= cuda_ver < 80): compute = '52' if (80 <= cuda_ver < 90): compute = '61' if (90 <= cuda_ver < 100): compute = '70' if (100 <= cuda_ver < 110): compute = '75' if (cuda_ver == 110): compute = '80' if (cuda_ver == 111): compute = '86' return compute