code
stringlengths
17
6.64M
def get_checkpoint_history_callback(outdir, config, dataset, comet_experiment, horovod_enabled, is_hpo_run=False): callbacks = [] if ((not horovod_enabled) or (hvd.rank() == 0)): cp_dir = (Path(outdir) / 'weights') cp_dir.mkdir(parents=True, exist_ok=True) cp_callback = ModelOptimizerCheckpoint(filepath=str((cp_dir / 'weights-{epoch:02d}-{val_loss:.6f}.hdf5')), save_weights_only=True, verbose=1, monitor=config['callbacks']['checkpoint']['monitor'], save_best_only=False) cp_callback.opt_path = str((cp_dir / 'opt-{epoch:02d}-{val_loss:.6f}.pkl')) if config.get('do_checkpoint_callback', True): callbacks += [cp_callback] if (not horovod_enabled): history_path = (Path(outdir) / 'history') history_path.mkdir(parents=True, exist_ok=True) history_path = str(history_path) cb = CustomCallback(history_path, dataset.tensorflow_dataset.take(config['validation_num_events']), config, plot_freq=config['callbacks']['plot_freq'], horovod_enabled=horovod_enabled, comet_experiment=comet_experiment, is_hpo_run=is_hpo_run) if config.get('do_validation_callback', True): callbacks += [cb] tb = CustomTensorBoard(log_dir=(outdir + '/logs'), histogram_freq=config['callbacks']['tensorboard']['hist_freq'], write_graph=False, write_images=False, update_freq='batch', profile_batch=(config['callbacks']['tensorboard']['profile_batch'] if ('profile_batch' in config['callbacks']['tensorboard'].keys()) else 0), dump_history=config['callbacks']['tensorboard']['dump_history']) tb.__class__.__name__ = 'TensorBoard' callbacks += [tb] return callbacks
def get_rundir(base='experiments'): if (not os.path.exists(base)): os.makedirs(base) previous_runs = os.listdir(base) if (len(previous_runs) == 0): run_number = 1 else: run_number = (max([int(s.split('run_')[1]) for s in previous_runs]) + 1) logdir = ('run_%02d' % run_number) return '{}/{}'.format(base, logdir)
def make_model(config, dtype): model = config['parameters']['model'] if (model == 'transformer'): return make_transformer(config, dtype) elif (model == 'gnn_dense'): return make_gnn_dense(config, dtype) raise KeyError('Unknown model type {}'.format(model))
def make_gnn_dense(config, dtype): parameters = ['do_node_encoding', 'node_update_mode', 'node_encoding_hidden_dim', 'dropout', 'activation', 'num_graph_layers_id', 'num_graph_layers_reg', 'input_encoding', 'skip_connection', 'output_decoding', 'combined_graph_layer', 'debug'] kwargs = {} for par in parameters: if (par in config['parameters'].keys()): kwargs[par] = config['parameters'][par] model = PFNetDense(multi_output=config['setup']['multi_output'], num_input_classes=config['dataset']['num_input_classes'], num_output_classes=config['dataset']['num_output_classes'], schema=config['dataset']['schema'], event_set_output=(config['loss']['event_loss'] != 'none'), met_output=(config['loss']['met_loss'] != 'none'), cls_output_as_logits=config['setup'].get('cls_output_as_logits', False), small_graph_opt=config['setup'].get('small_graph_opt', False), use_normalizer=config['setup'].get('use_normalizer', True), **kwargs) return model
def make_transformer(config, dtype): parameters = ['input_encoding', 'output_decoding', 'num_layers_encoder', 'num_layers_decoder_reg', 'num_layers_decoder_cls', 'hidden_dim', 'num_heads', 'num_random_features'] kwargs = {} for par in parameters: if (par in config['parameters'].keys()): kwargs[par] = config['parameters'][par] model = PFNetTransformer(multi_output=config['setup']['multi_output'], num_input_classes=config['dataset']['num_input_classes'], num_output_classes=config['dataset']['num_output_classes'], schema=config['dataset']['schema'], event_set_output=(config['loss']['event_loss'] != 'none'), met_output=(config['loss']['met_loss'] != 'none'), cls_output_as_logits=config['setup']['cls_output_as_logits'], **kwargs) return model
def eval_model(model, dataset, config, outdir, jet_ptcut=5.0, jet_match_dr=0.1, verbose=False): ibatch = 0 if (config['evaluation_jet_algo'] == 'ee_genkt_algorithm'): jetdef = fastjet.JetDefinition(fastjet.ee_genkt_algorithm, 0.7, (- 1.0)) elif (config['evaluation_jet_algo'] == 'antikt_algorithm'): jetdef = fastjet.JetDefinition(fastjet.antikt_algorithm, 0.4) else: raise KeyError('Unknown evaluation_jet_algo: {}'.format(config['evaluation_jet_algo'])) for elem in tqdm(dataset, desc='Evaluating model'): if verbose: print('evaluating model') ypred = model.predict(elem['X'], verbose=verbose) ypred['charge'] = (np.argmax(ypred['charge'], axis=(- 1)) - 1) if verbose: print('unpacking outputs') ygen = [unpack_target(x, config['dataset']['num_output_classes'], config) for x in elem['ygen']] ycand = [unpack_target(x, config['dataset']['num_output_classes'], config) for x in elem['ycand']] ygen = {k: tf.stack([x[k] for x in ygen]) for k in ygen[0].keys()} ycand = {k: tf.stack([x[k] for x in ycand]) for k in ycand[0].keys()} ygen['charge'] = (tf.expand_dims(tf.math.argmax(ygen['charge'], axis=(- 1)), axis=(- 1)) - 1) ycand['charge'] = (tf.expand_dims(tf.math.argmax(ycand['charge'], axis=(- 1)), axis=(- 1)) - 1) ygen['cls_id'] = tf.math.argmax(ygen['cls'], axis=(- 1)) ycand['cls_id'] = tf.math.argmax(ycand['cls'], axis=(- 1)) ypred['cls_id'] = tf.math.argmax(ypred['cls'], axis=(- 1)).numpy() keys_particle = [k for k in ypred.keys() if (k != 'met')] X = awkward.Array(elem['X'].numpy()) ygen = awkward.Array({k: squeeze_if_one(ygen[k].numpy()) for k in keys_particle}) ycand = awkward.Array({k: squeeze_if_one(ycand[k].numpy()) for k in keys_particle}) ypred = awkward.Array({k: squeeze_if_one(ypred[k]) for k in keys_particle}) awkvals = {'gen': ygen, 'cand': ycand, 'pred': ypred} jets_coll = {} if verbose: print('clustering jets') for typ in ['gen', 'cand', 'pred']: phi = np.arctan2(awkvals[typ]['sin_phi'], awkvals[typ]['cos_phi']) cls_id = awkward.argmax(awkvals[typ]['cls'], axis=(- 1), mask_identity=False) valid = (cls_id != 0) pt = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['pt'], valid)]) eta = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['eta'], valid)]) energy = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(awkvals[typ]['energy'], valid)]) phi = awkward.from_iter([np.array(v[m], np.float32) for (v, m) in zip(phi, valid)]) if verbose: print(typ, pt) if (len(awkward.flatten(pt)) == 0): pt = build_dummy_array(len(pt), np.float64) eta = build_dummy_array(len(pt), np.float64) phi = build_dummy_array(len(pt), np.float64) energy = build_dummy_array(len(pt), np.float64) vec = vector.awk(awkward.zip({'pt': pt, 'eta': eta, 'phi': phi, 'e': energy})) cluster = fastjet.ClusterSequence(vec.to_xyzt(), jetdef) jets_coll[typ] = cluster.inclusive_jets(min_pt=jet_ptcut) if verbose: print('jets {}'.format(typ), awkward.to_numpy(awkward.count(jets_coll[typ].px, axis=1))) gen_to_pred = match_two_jet_collections(jets_coll, 'gen', 'pred', jet_match_dr) gen_to_cand = match_two_jet_collections(jets_coll, 'gen', 'cand', jet_match_dr) matched_jets = awkward.Array({'gen_to_pred': gen_to_pred, 'gen_to_cand': gen_to_cand}) outfile = '{}/pred_batch{}.parquet'.format(outdir, ibatch) if verbose: print('saving to {}'.format(outfile)) awkward.to_parquet(awkward.Array({'inputs': X, 'particles': awkvals, 'jets': jets_coll, 'matched_jets': matched_jets}), outfile) ibatch += 1
def freeze_model(model, config, outdir): def model_output(ret): return tf.concat([ret['cls'], ret['charge'], ret['pt'], ret['eta'], ret['sin_phi'], ret['cos_phi'], ret['energy']], axis=(- 1)) full_model = tf.function((lambda x: model_output(model(x, training=False)))) niter = 10 nfeat = config['dataset']['num_input_features'] if ('combined_graph_layer' in config['parameters']): bin_size = config['parameters']['combined_graph_layer']['bin_size'] elem_range = list(range(bin_size, (5 * bin_size), bin_size)) else: elem_range = range(100, 1000, 200) for ibatch in [1, 2, 4]: for nptcl in elem_range: X = np.random.rand(ibatch, nptcl, nfeat) full_model(X) t0 = time.time() for i in range(niter): full_model(X) t1 = time.time() print(ibatch, nptcl, ((t1 - t0) / niter)) import tf2onnx (model_proto, _) = tf2onnx.convert.from_function(full_model, opset=12, input_signature=(tf.TensorSpec((None, None, nfeat), tf.float32, name='x:0'),), output_path=str((Path(outdir) / 'model.onnx')))
class LearningRateLoggingCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, numpy_logs): try: lr = self.model.optimizer._decayed_lr(tf.float32).numpy() tf.summary.scalar('learning rate', data=lr, step=epoch) except AttributeError as e: print(e) pass
def configure_model_weights(model, trainable_layers): print('setting trainable layers: {}'.format(trainable_layers)) if (trainable_layers is None): trainable_layers = 'all' if (trainable_layers == 'all'): model.trainable = True elif (trainable_layers == 'regression'): for cg in model.cg_id: cg.trainable = False for cg in model.cg_reg: cg.trainable = True model.output_dec.set_trainable_regression() elif (trainable_layers == 'classification'): for cg in model.cg_id: cg.trainable = True for cg in model.cg_reg: cg.trainable = False model.output_dec.set_trainable_classification() else: if isinstance(trainable_layers, str): trainable_layers = [trainable_layers] model.set_trainable_named(trainable_layers) model.compile() trainable_count = sum([np.prod(tf.keras.backend.get_value(w).shape) for w in model.trainable_weights]) non_trainable_count = sum([np.prod(tf.keras.backend.get_value(w).shape) for w in model.non_trainable_weights]) print('trainable={} non_trainable={}'.format(trainable_count, non_trainable_count))
def make_focal_loss(config): def loss(x, y): from .tfa import sigmoid_focal_crossentropy return sigmoid_focal_crossentropy(x, y, alpha=float(config['setup'].get('focal_loss_alpha', 0.25)), gamma=float(config['setup'].get('focal_loss_gamma', 2.0)), from_logits=config['setup']['cls_output_as_logits']) return loss
class CosineAnnealer(): def __init__(self, start, end, steps): self.start = start self.end = end self.steps = steps self.n = 0 def step(self): cos = (np.cos((np.pi * (self.n / self.steps))) + 1) self.n += 1 return (self.end + (((self.start - self.end) / 2.0) * cos))
class OneCycleScheduler(LearningRateSchedule): "`LearningRateSchedule` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper\n (https://arxiv.org/pdf/1803.09820.pdf).\n\n The implementation adopts additional improvements as per the fastai library:\n https://docs.fast.ai/callbacks.one_cycle.html, where only two phases are used and the adaptation is done using\n cosine annealing. In the warm-up phase the LR increases from `lr_max / div_factor` to `lr_max` and momentum\n decreases from `mom_max` to `mom_min`. In the second phase the LR decreases from `lr_max` to `lr_max / final_div`\n and momemtum from `mom_max` to `mom_min`. By default the phases are not of equal length, with the warm-up phase\n controlled by the parameter `warmup_ratio`.\n\n NOTE: The momentum is not controlled through this class. This class is intended to be used together with the\n `MomentumOneCycleScheduler` callback defined below.\n " def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, warmup_ratio=0.3, div_factor=25.0, final_div=100000.0, name=None): super(OneCycleScheduler, self).__init__() lr_min = (lr_max / div_factor) if (final_div is None): final_lr = (lr_max / (div_factor * 10000.0)) else: final_lr = (lr_max / final_div) phase_1_steps = int((steps * warmup_ratio)) phase_2_steps = (steps - phase_1_steps) self.lr_max = lr_max self.steps = steps self.mom_min = mom_min self.mom_max = mom_max self.warmup_ratio = warmup_ratio self.div_factor = div_factor self.final_div = final_div self.name = name phases = [CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(lr_max, final_lr, phase_2_steps)] step = 0 phase = 0 full_lr_schedule = np.zeros(int(steps)) for ii in np.arange(np.floor(steps), dtype=int): step += 1 if (step >= phase_1_steps): phase = 1 full_lr_schedule[ii] = phases[phase].step() self.full_lr_schedule = tf.convert_to_tensor(full_lr_schedule) def __call__(self, step): with ops.name_scope((self.name or 'OneCycleScheduler')): return self.full_lr_schedule[(tf.cast(step, 'int32') - 1)] def get_config(self): return {'lr_max': self.lr_max, 'steps': self.steps, 'mom_min': self.mom_min, 'mom_max': self.mom_max, 'warmup_ratio': self.warmup_ratio, 'div_factor': self.div_factor, 'final_div': self.final_div, 'name': self.name}
class MomentumOneCycleScheduler(Callback): "`Callback` that schedules the momentum according to the 1cycle policy as per Leslie Smith's paper\n (https://arxiv.org/pdf/1803.09820.pdf).\n NOTE: This callback only schedules the momentum parameter, not the learning rate. It is intended to be used with the\n KerasOneCycle learning rate scheduler above or similar.\n " def __init__(self, steps, mom_min=0.85, mom_max=0.95, warmup_ratio=0.3): super(MomentumOneCycleScheduler, self).__init__() phase_1_steps = (steps * warmup_ratio) phase_2_steps = (steps - phase_1_steps) self.phase_1_steps = phase_1_steps self.phase_2_steps = phase_2_steps self.phase = 0 self.step = 0 self.phases = [CosineAnnealer(mom_max, mom_min, phase_1_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)] def _get_opt(self): opt = self.model.optimizer return opt def set_step(self, step): 'Set the step of the schedule, 1 step is one batch' self.step = step if (self.step >= self.phase_1_steps): self.phase = 1 self.phases[1].n = (step - self.phase_1_steps) assert ((self.phases[1].n >= 0) and (self.phases[1].n < self.phase_2_steps)) else: self.phase = 0 self.phases[0].n = step def on_train_begin(self, logs=None): self.set_step(tf.keras.backend.get_value(self._get_opt().iterations)) self.set_momentum(self.mom_schedule().step()) def on_train_batch_end(self, batch, logs=None): self.step += 1 if (self.step >= self.phase_1_steps): self.phase = 1 self.set_momentum(self.mom_schedule().step()) def set_momentum(self, mom): opt = self._get_opt() if hasattr(opt, 'beta_1'): tf.keras.backend.set_value(opt.beta_1, mom) elif hasattr(opt, 'momentum'): tf.keras.backend.set_value(opt.momentum, mom) else: raise NotImplementedError('Only SGD and Adam are supported by MomentumOneCycleScheduler: {}'.format(type(opt))) def mom_schedule(self): return self.phases[self.phase]
def is_tensor_or_variable(x): return (tf.is_tensor(x) or isinstance(x, tf.Variable))
class LossFunctionWrapper(tf.keras.losses.Loss): 'Wraps a loss function in the `Loss` class.' def __init__(self, fn, reduction=tf.keras.losses.Reduction.AUTO, name=None, **kwargs): 'Initializes `LossFunctionWrapper` class.\n\n Args:\n fn: The loss function to wrap, with signature `fn(y_true, y_pred,\n **kwargs)`.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training)\n for more details.\n name: (Optional) name for the loss.\n **kwargs: The keyword arguments that are passed on to `fn`.\n ' super().__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): 'Invokes the `LossFunctionWrapper` instance.\n\n Args:\n y_true: Ground truth values.\n y_pred: The predicted values.\n\n Returns:\n Loss values per sample.\n ' return self.fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for (k, v) in iter(self._fn_kwargs.items()): config[k] = (tf.keras.backend.eval(v) if is_tensor_or_variable(v) else v) base_config = super().get_config() return {**base_config, **config}
class SigmoidFocalCrossEntropy(LossFunctionWrapper): "Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much higher for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n >>> fl = tfa.losses.SigmoidFocalCrossEntropy()\n >>> loss = fl(\n ... y_true = [[1.0], [1.0], [0.0]],y_pred = [[0.97], [0.91], [0.03]])\n >>> loss\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([6.8532745e-06, 1.9097870e-04, 2.0559824e-05],\n dtype=float32)>\n\n Usage with `tf.keras` API:\n\n >>> model = tf.keras.Model()\n >>> model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n\n Args:\n alpha: balancing factor, default value is 0.25.\n gamma: modulating factor, default value is 2.0.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero.\n " def __init__(self, from_logits: bool=False, alpha=0.25, gamma=2.0, reduction: str=tf.keras.losses.Reduction.NONE, name: str='sigmoid_focal_crossentropy'): super().__init__(sigmoid_focal_crossentropy, name=name, reduction=reduction, from_logits=from_logits, alpha=alpha, gamma=gamma)
@tf.function def sigmoid_focal_crossentropy(y_true, y_pred, alpha=0.25, gamma=2.0, from_logits: bool=False) -> tf.Tensor: 'Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much higher for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Args:\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n ' if (gamma and (gamma < 0)): raise ValueError('Value of gamma should be greater than or equal to zero.') y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, dtype=y_pred.dtype) ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits) if from_logits: pred_prob = tf.sigmoid(y_pred) else: pred_prob = y_pred p_t = ((y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))) alpha_factor = 1.0 modulating_factor = 1.0 if alpha: alpha = tf.cast(alpha, dtype=y_true.dtype) alpha_factor = ((y_true * alpha) + ((1 - y_true) * (1 - alpha))) if gamma: gamma = tf.cast(gamma, dtype=y_true.dtype) modulating_factor = tf.pow((1.0 - p_t), gamma) return tf.reduce_sum(((alpha_factor * modulating_factor) * ce), axis=(- 1))
def get_hp_str(result): def func(key): if ('config' in key): return key.split('config/')[(- 1)] s = '' for (ii, hp) in enumerate(list(filter(None.__ne__, [func(key) for key in result.keys()]))): if ((ii % 6) == 0): s += '\n' s += '{}={}; '.format(hp, result['config/{}'.format(hp)].values[0]) return s
def plot_ray_analysis(analysis, save=False, skip=0): to_plot = ['charge_loss', 'cls_loss', 'cos_phi_loss', 'energy_loss', 'eta_loss', 'learning_rate', 'loss', 'pt_loss', 'sin_phi_loss', 'val_charge_loss', 'val_cls_loss', 'val_cos_phi_loss', 'val_energy_loss', 'val_eta_loss', 'val_loss', 'val_pt_loss', 'val_sin_phi_loss'] dfs = analysis.fetch_trial_dataframes() result_df = analysis.dataframe() for key in tqdm(dfs.keys(), desc='Creating Ray analysis plots', total=len(dfs.keys())): result = result_df[(result_df['logdir'] == key)] (fig, axs) = plt.subplots(5, 4, figsize=(12, 9), tight_layout=True) for (var, ax) in zip(to_plot, axs.flat): ax.plot(dfs[key].index.values[skip:], dfs[key][var][skip:], alpha=0.8) ax.set_xlabel('Epoch') ax.set_ylabel(var) ax.grid(alpha=0.3) plt.suptitle(get_hp_str(result)) if save: plt.savefig((key + '/trial_summary.jpg')) plt.close() if (not save): plt.show() else: print('Saved plots in trial dirs.')
def correct_column_names_in_trial_dataframes(analysis): '\n Sometimes some trial dataframes are missing column names and have been\n given the first row of values as column names. This function corrects\n this in the ray.tune.Analysis object.\n ' trial_dataframes = analysis.trial_dataframes trial_df_columns = ['adam_beta_1', 'charge_loss', 'cls_acc_unweighted', 'cls_loss', 'cos_phi_loss', 'energy_loss', 'eta_loss', 'learning_rate', 'loss', 'pt_loss', 'sin_phi_loss', 'val_charge_loss', 'val_cls_acc_unweighted', 'val_cls_acc_weighted', 'val_cls_loss', 'val_cos_phi_loss', 'val_energy_loss', 'val_eta_loss', 'val_loss', 'val_pt_loss', 'val_sin_phi_loss', 'time_this_iter_s', 'should_checkpoint', 'done', 'timesteps_total', 'episodes_total', 'training_iteration', 'experiment_id', 'date', 'timestamp', 'time_total_s', 'pid', 'hostname', 'node_ip', 'time_since_restore', 'timesteps_since_restore', 'iterations_since_restore', 'trial_id'] for (ii, key) in enumerate(trial_dataframes.keys()): trial_dataframes[key].columns = trial_df_columns analysis._trial_dataframes = trial_dataframes
def get_top_k_df(analysis, k): result_df = analysis.dataframe() if (analysis.default_mode == 'min'): dd = result_df.nsmallest(k, analysis.default_metric) elif (analysis.default_mode == 'max'): dd = result_df.nlargest(k, analysis.default_metric) return dd
def topk_summary_plot(analysis, k, save=False, save_dir=None): to_plot = ['val_cls_loss', 'val_energy_loss', 'val_loss'] dd = get_top_k_df(analysis, k) dfs = analysis.trial_dataframes (fig, axs) = plt.subplots(k, 5, figsize=(12, 9), tight_layout=True) for (key, ax_row) in zip(dd['logdir'], axs): for (var, ax) in zip(to_plot, ax_row): ax.plot(dfs[key].index.values, dfs[key][var], alpha=0.8) ax.set_xlabel('Epoch') ax.set_ylabel(var) ax.grid(alpha=0.3) if save: if save_dir: plt.savefig(str((Path(save_dir) / 'topk_summary_plot.jpg'))) else: plt.savefig('topk_summary_plot.jpg') else: plt.show()
def topk_summary_plot_v2(analysis, k, save=False, save_dir=None): print('Creating summary plot of top {} trials.'.format(k)) to_plot = ['val_loss', 'val_cls_loss'] dd = get_top_k_df(analysis, k) dfs = analysis.trial_dataframes (fig, axs) = plt.subplots(len(to_plot), 1, figsize=(12, 9), tight_layout=True, sharex=True) for (var, ax_row) in zip(to_plot, axs): for (ii, key) in enumerate(dd['logdir']): ax_row.plot(dfs[key].index.values, dfs[key][var], alpha=0.8, label='#{}'.format((ii + 1))) ax_row.set_ylabel(var) ax_row.grid(alpha=0.3) ax_row.legend() ax_row.set_xlabel('Epoch') plt.suptitle("Top {} best trials according to '{}'".format(k, analysis.default_metric)) if (save or save_dir): if save_dir: file_name = str((Path(save_dir) / 'topk_summary_plot_v2.jpg')) else: file_name = 'topk_summary_plot.jpg' plt.savefig(file_name) print('Saved summary plot to {}'.format(file_name)) else: plt.show()
def summarize_top_k(analysis, k, save=False, save_dir=None): print('Creating summary table of top {} trials.'.format(k)) dd = get_top_k_df(analysis, k) summary = pd.concat([dd[['loss', 'cls_loss', 'val_loss', 'val_cls_loss']], dd.filter(regex='config/*'), dd['logdir']], axis=1) cm_green = sns.light_palette('green', as_cmap=True) cm_red = sns.light_palette('red', as_cmap=True) max_is_better = [] min_is_better = ['loss', 'cls_loss', 'val_loss', 'val_cls_loss'] styled_summary = summary.style.background_gradient(cmap=cm_green, subset=max_is_better).background_gradient(cmap=cm_red, subset=min_is_better).highlight_max(subset=max_is_better, props='color:black; font-weight:bold; background-color:yellow;').highlight_min(subset=min_is_better, props='color:black; font-weight:bold; background-color:yellow;').set_caption('Top {} trials according to {}'.format(k, analysis.default_metric)).hide_index() if (save or save_dir): if save_dir: xl_file = str((Path(save_dir) / 'summary_table.xlsx')) else: xl_file = 'summary_table.xlsx' styled_summary.to_excel(xl_file, engine='openpyxl') print('Saved plot table to {}'.format(xl_file)) return (summary, styled_summary)
def analyze_ray_experiment(exp_dir, default_metric, default_mode): from ray.tune import Analysis analysis = Analysis(exp_dir, default_metric=default_metric, default_mode=default_mode) topk_summary_plot_v2(analysis, 5, save_dir=exp_dir) (summ, styled) = summarize_top_k(analysis, k=10, save_dir=exp_dir)
def count_skipped_configurations(exp_dir): skiplog_file_path = (Path(exp_dir) / 'skipped_configurations.txt') if skiplog_file_path.exists(): with open(skiplog_file_path, 'r') as f: lines = f.readlines() count = 0 for line in lines: if (line == (('#' * 80) + '\n')): count += 1 if ((count % 2) != 0): print('WARNING: counts is not divisible by two') return (count // 2) else: print('Could not find {}'.format(str(skiplog_file_path)))
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--bin-size', type=int, default=256) parser.add_argument('--num-features', type=int, default=17) parser.add_argument('--batch-size', type=int, default=20) parser.add_argument('--num-threads', type=int, default=1) parser.add_argument('--use-gpu', type=bool, action='store_true') args = parser.parse_args() return args
def get_mem_cpu_mb(): return (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
def get_mem_gpu_mb(): mem = pynvml.nvmlDeviceGetMemoryInfo(handle) return ((mem.used / 1000) / 1000)
def get_mem_mb(use_gpu): if use_gpu: return get_mem_gpu_mb() else: return get_mem_cpu_mb()
def create_experiment_dir(prefix=None, suffix=None, experiments_dir='experiments'): if (prefix is None): train_dir = (Path(experiments_dir) / datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')) else: train_dir = (Path(experiments_dir) / (prefix + datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f'))) if (suffix is not None): train_dir = train_dir.with_name(((train_dir.name + '.') + platform.node())) train_dir.mkdir(parents=True) return str(train_dir)
def create_comet_experiment(comet_exp_name, comet_offline=False, outdir=None): try: if comet_offline: logging.info('Using comet-ml OfflineExperiment, saving logs locally.') if (outdir is None): raise ValueError('Please specify am output directory when setting comet_offline to True') experiment = OfflineExperiment(project_name=comet_exp_name, auto_metric_logging=True, auto_param_logging=True, auto_histogram_weight_logging=True, auto_histogram_gradient_logging=False, auto_histogram_activation_logging=False, offline_directory=(outdir + '/cometml'), auto_output_logging='simple') else: logging.info('Using comet-ml Experiment, streaming logs to www.comet.ml.') experiment = Experiment(project_name=comet_exp_name, auto_metric_logging=True, auto_param_logging=True, auto_histogram_weight_logging=True, auto_histogram_gradient_logging=False, auto_histogram_activation_logging=False, auto_output_logging='simple') except Exception as e: logging.warning('Failed to initialize comet-ml dashboard: {}'.format(e)) experiment = None return experiment
def hits_to_features(hit_data, iev, coll, feats): if ('TrackerHit' in coll): new_feats = [] for feat in feats: feat_to_get = feat if (feat == 'energy'): feat_to_get = 'eDep' new_feats.append((feat, feat_to_get)) else: new_feats = [(f, f) for f in feats] feat_arr = {f1: hit_data[((coll + '.') + f2)][iev] for (f1, f2) in new_feats} sdcoll = 'subdetector' feat_arr[sdcoll] = np.zeros(len(feat_arr['type']), dtype=np.int32) if coll.startswith('ECAL'): feat_arr[sdcoll][:] = 0 elif coll.startswith('HCAL'): feat_arr[sdcoll][:] = 1 elif coll.startswith('MUON'): feat_arr[sdcoll][:] = 2 else: feat_arr[sdcoll][:] = 3 return awkward.Record(feat_arr)
def track_pt(omega): a = (3 * (10 ** (- 4))) b = 4 return (a * np.abs((b / omega)))
def track_to_features(prop_data, iev): track_arr = prop_data[track_coll][iev] feats_from_track = ['type', 'chi2', 'ndf', 'dEdx', 'dEdxError', 'radiusOfInnermostHit'] ret = {feat: track_arr[((track_coll + '.') + feat)] for feat in feats_from_track} n_tr = len(ret['type']) trackstate_idx = prop_data[track_coll][(track_coll + '.trackStates_begin')][iev] for k in ['tanLambda', 'D0', 'phi', 'omega', 'Z0', 'time']: ret[k] = prop_data['SiTracks_1'][('SiTracks_1.' + k)][iev][trackstate_idx] ret['pt'] = track_pt(ret['omega']) ret['px'] = (np.cos(ret['phi']) * ret['pt']) ret['py'] = (np.sin(ret['phi']) * ret['pt']) ret['pz'] = (ret['tanLambda'] * ret['pt']) ret['q'] = ret['omega'].to_numpy().copy() ret['q'][(ret['q'] > 0)] = 1 ret['q'][(ret['q'] < 0)] = (- 1) return (ret['px'].to_numpy(), ret['py'].to_numpy(), ret['pz'].to_numpy(), ret['q'])
def visualize(sample, data, iev, trk_opacity=0.8): Xelem = pandas.DataFrame(data[iev]['Xelem']) ycand = pandas.DataFrame(data[iev]['ycand']) ygen = pandas.DataFrame(data[iev]['ygen']) eta_range = 1000 radius_mult = 2000 trk_x = [] trk_y = [] trk_z = [] for (irow, row) in Xelem[(Xelem['typ'] == 1)].iterrows(): trk_x += [0, ((1 * radius_mult) * np.cos(row['phi']))] trk_z += [0, ((1 * radius_mult) * np.sin(row['phi']))] trk_y += [0, (eta_range * row['eta'])] if (row['phi_ecal'] != 0): trk_x += [((1.5 * radius_mult) * np.cos(row['phi_ecal']))] trk_z += [((1.5 * radius_mult) * np.sin(row['phi_ecal']))] trk_y += [(eta_range * row['eta_ecal'])] if (row['phi_hcal'] != 0): trk_x += [((2 * radius_mult) * np.cos(row['phi_hcal']))] trk_z += [((2 * radius_mult) * np.sin(row['phi_hcal']))] trk_y += [(eta_range * row['eta_hcal'])] trk_x += [None] trk_z += [None] trk_y += [None] points_trk = go.Scatter3d(x=trk_x, z=trk_z, y=trk_y, mode='lines', line=dict(color='rgba(10, 10, 10, {})'.format(trk_opacity)), name='Tracks', hoverinfo='skip') trk_x = [] trk_y = [] trk_z = [] for (irow, row) in Xelem[(Xelem['typ'] == 6)].iterrows(): trk_x += [0, ((1 * radius_mult) * np.cos(row['phi']))] trk_z += [0, ((1 * radius_mult) * np.sin(row['phi']))] trk_y += [0, (eta_range * row['eta'])] if (row['phi_ecal'] != 0): trk_x += [((1.5 * radius_mult) * np.cos(row['phi_ecal']))] trk_z += [((1.5 * radius_mult) * np.sin(row['phi_ecal']))] trk_y += [(eta_range * row['eta_ecal'])] if (row['phi_hcal'] != 0): trk_x += [((2 * radius_mult) * np.cos(row['phi_hcal']))] trk_z += [((2 * radius_mult) * np.sin(row['phi_hcal']))] trk_y += [(eta_range * row['eta_hcal'])] trk_x += [None] trk_z += [None] trk_y += [None] points_gsf = go.Scatter3d(x=trk_x, z=trk_z, y=trk_y, mode='lines', line=dict(color='rgba(10, 10, 10, {})'.format(trk_opacity)), name='GSF') msk = (Xelem['typ'] == 2) points_ps1 = go.Scatter3d(x=((1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (1000 * Xelem[msk]['e'])}, name='PS1', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 3) points_ps2 = go.Scatter3d(x=((1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (1000 * Xelem[msk]['e'])}, name='PS2', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 4) points_ecal = go.Scatter3d(x=((1.5 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1.5 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (10 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='ECAL', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 5) points_hcal = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HCAL', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 8) points_hfem = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HFEM', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 9) points_hfhad = go.Scatter3d(x=((2 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HFHAD', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 10) points_sc = go.Scatter3d(x=((1.5 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((1.5 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='SC', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (Xelem['typ'] == 11) points_ho = go.Scatter3d(x=((2.1 * radius_mult) * np.cos(Xelem[msk]['phi'].values)), z=((2.1 * radius_mult) * np.sin(Xelem[msk]['phi'].values)), y=(eta_range * Xelem[msk]['eta'].values), mode='markers', marker={'symbol': 'square', 'opacity': 0.8, 'size': (5 * np.log10((Xelem[msk]['e'] + 1.0)))}, name='HO', hovertemplate='<b>%{hovertext}</b>', hovertext=['E={:.2f}'.format(x['e']) for (_, x) in Xelem[msk].iterrows()]) msk = (ycand['typ'] != 0) points_cand = go.Scatter3d(x=((2.2 * radius_mult) * ycand[msk]['cos_phi'].values), z=((2.2 * radius_mult) * ycand[msk]['sin_phi'].values), y=(eta_range * ycand[msk]['eta'].values), mode='markers', marker={'symbol': 'x', 'opacity': 0.8, 'color': 'rgba(0, 0, 0, 0.8)', 'size': np.clip((5 * np.log10((ycand[msk]['e'].values + 5.0))), 1, 10)}, hovertemplate='<b>%{hovertext}</b>', hovertext=['{}<br>E={:.2f}<br>eta={:.2f}<br>phi={:.2f}'.format(int(x['typ']), x['e'], x['eta'], np.arctan2(x['sin_phi'], x['cos_phi'])) for (_, x) in ycand[msk].iterrows()], name='PFCand') msk = (ygen['typ'] != 0) points_gen = go.Scatter3d(x=((2.5 * radius_mult) * ygen[msk]['cos_phi'].values), z=((2.5 * radius_mult) * ygen[msk]['sin_phi'].values), y=(eta_range * ygen[msk]['eta'].values), mode='markers', marker={'symbol': 'circle', 'opacity': 0.8, 'color': 'rgba(50, 0, 0, 0.4)', 'size': np.clip((5 * np.log10((ygen[msk]['e'].values + 5))), 1, 10)}, hovertemplate='<b>%{hovertext}</b>', hovertext=['{}<br>E={:.2f}<br>eta={:.2f}<br>phi={:.2f}'.format(int(x['typ']), x['e'], x['eta'], np.arctan2(x['sin_phi'], x['cos_phi'])) for (_, x) in ygen[msk].iterrows()], name='MLPF truth') fig = go.Figure(data=[points_trk, points_gsf, points_ps1, points_ps2, points_ecal, points_hcal, points_hfem, points_hfhad, points_sc, points_ho, points_cand, points_gen]) fig.update_layout(autosize=True, scene_camera={'eye': dict(x=0.8, y=0.8, z=0.8)}, scene={'xaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True), 'yaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True), 'zaxis': dict(nticks=1, range=[(- 5000), 5000], showaxeslabels=False, showticklabels=False, showgrid=False, visible=True)}) fig.update_layout(legend={'itemsizing': 'constant'}) s = fig.to_html(default_width='1200px', default_height='800px') with open('plot_{}_{}.html'.format(sample, iev), 'w') as fi: fi.write(s) with open('plot_{}_{}_data.html'.format(sample, iev), 'w') as fi: fi.write('X') fi.write(Xelem.to_html()) fi.write('ycand') fi.write(ycand[(ycand['typ'] != 0)].to_html()) fi.write('ygen') fi.write(ygen[(ygen['typ'] != 0)].to_html())
def node_label_func(n): return '{0} {1}\nE={2:.2f}\n{3:.1f}:{4:.1f}'.format(n[0].upper(), g.nodes[n]['typ'], g.nodes[n]['e'], g.nodes[n]['eta'], g.nodes[n]['phi'])
def node_color_func(n): colors = {'gen': 'blue', 'el': 'gray', 'pf': 'purple', 'tp': 'red', 'cp': 'red', 'gen': 'blue'} return colors[n[0]]
def plot_energy_stack(energies, pids): uniq_pids = np.unique(pids) hists = [] bins = np.logspace((- 1), 6, 61) for pid in uniq_pids: h = bh.Histogram(bh.axis.Variable(bins)) h.fill(energies[(pids == pid)]) hists.append(h) mplhep.histplot(hists, stack=False, label=[str(p) for p in uniq_pids]) plt.legend()
def to_bh(data, bins, cumulative=False): h1 = bh.Histogram(bh.axis.Variable(bins)) h1.fill(data) if cumulative: h1[:] = (np.sum(h1.values()) - np.cumsum(h1)) return h1
def load_pickle(fn): d = pickle.load(open(fn, 'rb')) ret = [] for it in d: ret.append({'slimmedGenJets': it['slimmedGenJets'], 'slimmedJetsPuppi': it['slimmedJetsPuppi'], 'genMetTrue': it['genMetTrue'], 'slimmedMETsPuppi': it['slimmedMETsPuppi']}) return ret
def varbins(*args): newlist = [] for arg in args[:(- 1)]: newlist.append(arg[:(- 1)]) newlist.append(args[(- 1)]) return np.concatenate(newlist)
def get_hist_and_merge(files, histname): hists = [] for fn in files: fi = uproot.open(fn) h = fi[histname].to_boost() hists.append(h) return sum(hists[1:], hists[0])
def Gauss(x, a, x0, sigma): return (a * np.exp(((- ((x - x0) ** 2)) / (2 * (sigma ** 2)))))
def fit_response(hist2d, bin_range): centers = [] means = [] means_unc = [] sigmas = [] sigmas_unc = [] for ibin in bin_range: print(ibin) plt.figure() xvals = hist2d.axes[1].centers vals = hist2d.values()[ibin] errs = np.sqrt(vals) errs[(vals == 0)] = 1.0 (parameters1, covariances1) = curve_fit(Gauss, xvals, vals, p0=[1.0, 0.0, 1.0], sigma=errs, maxfev=1000000, method='dogbox', bounds=[((- np.inf), (- 10), 0), (np.inf, 10, 50)]) plt.errorbar(xvals, vals, errs) plt.plot(xvals, Gauss(xvals, *parameters1)) plt.xlabel('$\\Delta E_T / E_T$') plt.title('${} < E_T < {}$'.format(hist2d.axes[0].edges[ibin], hist2d.axes[0].edges[(ibin + 1)])) means.append(parameters1[1]) means_unc.append(np.sqrt(covariances1[(1, 1)])) sigmas.append(parameters1[2]) sigmas_unc.append(np.sqrt(covariances1[(2, 2)])) centers.append(hist2d.axes[0].centers[ibin]) centers = np.array(centers) means = np.array(means) means_unc = np.array(means_unc) sigmas = np.array(sigmas) sigmas_unc = np.array(sigmas_unc) return (centers, means, means_unc, sigmas, sigmas_unc)
def yield_from_ds(): for elem in dss: (yield {'X': elem['X'], 'ygen': elem['ygen'], 'ycand': elem['ycand']})
def particle_has_track(g, particle): for e in g.edges(particle): if (e[1][0] == 'track'): return True return False
def get_tower_gen_fracs(g, tower): e_130 = 0.0 e_211 = 0.0 e_22 = 0.0 e_11 = 0.0 ptcls = [] for e in g.edges(tower): if (e[1][0] == 'particle'): if (not particle_has_track(g, e[1])): ptcls.append(e[1]) pid = abs(g.nodes[e[1]]['pid']) ch = abs(g.nodes[e[1]]['charge']) e = g.nodes[e[1]]['energy'] if (pid in [211]): e_211 += e elif (pid in [130]): e_130 += e elif (pid == 22): e_22 += e elif (pid == 11): e_11 += e elif (ch == 1): e_211 += e else: e_130 += e return (ptcls, (e_130, e_211, e_22, e_11))
def make_tower_array(tower_dict): return np.array([1, tower_dict['et'], tower_dict['eta'], np.sin(tower_dict['phi']), np.cos(tower_dict['phi']), tower_dict['energy'], tower_dict['eem'], tower_dict['ehad'], 0.0, 0.0, 0.0, 0.0])
def make_track_array(track_dict): return np.array([2, track_dict['pt'], track_dict['eta'], np.sin(track_dict['phi']), np.cos(track_dict['phi']), track_dict['p'], track_dict['eta_outer'], np.sin(track_dict['phi_outer']), np.cos(track_dict['phi_outer']), track_dict['charge'], track_dict['is_gen_muon'], track_dict['is_gen_electron']])
def make_gen_array(gen_dict): if (not gen_dict): return np.zeros(7) encoded_pid = gen_pid_encoding.get(abs(gen_dict['pid']), 1) charge = (math.copysign(1, gen_dict['pid']) if (encoded_pid in [1, 4, 5]) else 0) return np.array([encoded_pid, charge, gen_dict['pt'], gen_dict['eta'], np.sin(gen_dict['phi']), np.cos(gen_dict['phi']), gen_dict['energy']])
def make_cand_array(cand_dict): if (not cand_dict): return np.zeros(7) encoded_pid = gen_pid_encoding.get(abs(cand_dict['pid']), 1) return np.array([encoded_pid, cand_dict['charge'], cand_dict.get('pt', 0), cand_dict['eta'], np.sin(cand_dict['phi']), np.cos(cand_dict['phi']), cand_dict.get('energy', 0)])
def make_triplets(g, tracks, towers, particles, pfparticles): triplets = [] remaining_particles = set(particles) remaining_pfcandidates = set(pfparticles) for t in tracks: ptcl = None for e in g.edges(t): if (e[1][0] == 'particle'): ptcl = e[1] break pf_ptcl = None for e in g.edges(ptcl): if ((e[1][0] in ['pfcharged', 'pfel', 'pfmu']) and (e[1] in remaining_pfcandidates)): pf_ptcl = e[1] break remaining_particles.remove(ptcl) if pf_ptcl: remaining_pfcandidates.remove(pf_ptcl) triplets.append((t, ptcl, pf_ptcl)) for t in towers: (ptcls, fracs) = get_tower_gen_fracs(g, t) imax = np.argmax(fracs) if (len(ptcls) > 0): if (imax == 0): pid = 130 elif (imax == 1): pid = 211 elif (imax == 2): pid = 22 elif (imax == 3): pid = 11 for ptcl in ptcls: if (ptcl in remaining_particles): remaining_particles.remove(ptcl) lvs = [] for ptcl in ptcls: lv = uproot_methods.TLorentzVector.from_ptetaphie(g.nodes[ptcl]['pt'], g.nodes[ptcl]['eta'], g.nodes[ptcl]['phi'], g.nodes[ptcl]['energy']) lvs.append(lv) lv = None gen_ptcl = None if (len(lvs) > 0): lv = sum(lvs[1:], lvs[0]) gen_ptcl = {'pid': pid, 'pt': lv.pt, 'eta': lv.eta, 'phi': lv.phi, 'energy': lv.energy} if ((gen_ptcl['pid'] == 211) and (abs(gen_ptcl['eta']) > 2.5)): gen_ptcl['pid'] = 130 if ((gen_ptcl['pid'] == 130) and (gen_ptcl['energy'] < 9.0)): gen_ptcl = None found_pf = False for pf_ptcl in remaining_pfcandidates: if ((g.nodes[pf_ptcl]['eta'] == g.nodes[t]['eta']) and (g.nodes[pf_ptcl]['phi'] == g.nodes[t]['phi'])): found_pf = True break if found_pf: remaining_pfcandidates.remove(pf_ptcl) else: pf_ptcl = None triplets.append((t, gen_ptcl, pf_ptcl)) return (triplets, list(remaining_particles), list(remaining_pfcandidates))
def process_chunk(infile, ev_start, ev_stop, outfile): f = ROOT.TFile.Open(infile) tree = f.Get('Delphes') X_all = [] ygen_all = [] ygen_remaining_all = [] ycand_all = [] for iev in range(ev_start, ev_stop): print('event {}/{} out of {} in the full file'.format(iev, ev_stop, tree.GetEntries())) tree.GetEntry(iev) pileupmix = list(tree.PileUpMix) pileupmix_idxdict = {} for (ip, p) in enumerate(pileupmix): pileupmix_idxdict[p] = ip towers = list(tree.Tower) tracks = list(tree.Track) pf_charged = list(tree.PFChargedHadron) pf_neutral = list(tree.PFNeutralHadron) pf_photon = list(tree.PFPhoton) pf_el = list(tree.PFElectron) pf_mu = list(tree.PFMuon) graph = nx.Graph() for i in range(len(pileupmix)): node = ('particle', i) graph.add_node(node) graph.nodes[node]['pid'] = pileupmix[i].PID graph.nodes[node]['eta'] = pileupmix[i].Eta graph.nodes[node]['phi'] = pileupmix[i].Phi graph.nodes[node]['pt'] = pileupmix[i].PT graph.nodes[node]['charge'] = pileupmix[i].Charge graph.nodes[node]['energy'] = pileupmix[i].E graph.nodes[node]['is_pu'] = pileupmix[i].IsPU for i in range(len(towers)): node = ('tower', i) graph.add_node(node) graph.nodes[node]['eta'] = towers[i].Eta graph.nodes[node]['phi'] = towers[i].Phi graph.nodes[node]['energy'] = towers[i].E graph.nodes[node]['et'] = towers[i].ET graph.nodes[node]['eem'] = towers[i].Eem graph.nodes[node]['ehad'] = towers[i].Ehad for ptcl in towers[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('tower', i), ('particle', ip)) for i in range(len(tracks)): node = ('track', i) graph.add_node(node) graph.nodes[node]['p'] = (tracks[i].PT * np.cosh(tracks[i].Eta)) graph.nodes[node]['eta'] = tracks[i].Eta graph.nodes[node]['phi'] = tracks[i].Phi graph.nodes[node]['eta_outer'] = tracks[i].EtaOuter graph.nodes[node]['phi_outer'] = tracks[i].PhiOuter graph.nodes[node]['pt'] = tracks[i].PT graph.nodes[node]['pid'] = tracks[i].PID graph.nodes[node]['charge'] = tracks[i].Charge ip = pileupmix_idxdict[tracks[i].Particle.GetObject()] graph.add_edge(('track', i), ('particle', ip)) for i in range(len(pf_charged)): node = ('pfcharged', i) graph.add_node(node) graph.nodes[node]['pid'] = pf_charged[i].PID graph.nodes[node]['eta'] = pf_charged[i].Eta graph.nodes[node]['phi'] = pf_charged[i].Phi graph.nodes[node]['pt'] = pf_charged[i].PT graph.nodes[node]['charge'] = pf_charged[i].Charge ip = pileupmix_idxdict[pf_charged[i].Particle.GetObject()] graph.add_edge(('pfcharged', i), ('particle', ip)) for i in range(len(pf_el)): node = ('pfel', i) graph.add_node(node) graph.nodes[node]['pid'] = 11 graph.nodes[node]['eta'] = pf_el[i].Eta graph.nodes[node]['phi'] = pf_el[i].Phi graph.nodes[node]['pt'] = pf_el[i].PT graph.nodes[node]['charge'] = pf_el[i].Charge ip = pileupmix_idxdict[pf_el[i].Particle.GetObject()] graph.add_edge(('pfel', i), ('particle', ip)) for i in range(len(pf_mu)): node = ('pfmu', i) graph.add_node(node) graph.nodes[node]['pid'] = 13 graph.nodes[node]['eta'] = pf_mu[i].Eta graph.nodes[node]['phi'] = pf_mu[i].Phi graph.nodes[node]['pt'] = pf_mu[i].PT graph.nodes[node]['charge'] = pf_mu[i].Charge ip = pileupmix_idxdict[pf_mu[i].Particle.GetObject()] graph.add_edge(('pfmu', i), ('particle', ip)) for i in range(len(pf_neutral)): node = ('pfneutral', i) graph.add_node(node) graph.nodes[node]['pid'] = 130 graph.nodes[node]['eta'] = pf_neutral[i].Eta graph.nodes[node]['phi'] = pf_neutral[i].Phi graph.nodes[node]['energy'] = pf_neutral[i].E graph.nodes[node]['charge'] = 0 for ptcl in pf_neutral[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('pfneutral', i), ('particle', ip)) for i in range(len(pf_photon)): node = ('pfphoton', i) graph.add_node(node) graph.nodes[node]['pid'] = 22 graph.nodes[node]['eta'] = pf_photon[i].Eta graph.nodes[node]['phi'] = pf_photon[i].Phi graph.nodes[node]['energy'] = pf_photon[i].E graph.nodes[node]['charge'] = 0 for ptcl in pf_photon[i].Particles: ip = pileupmix_idxdict[ptcl] graph.add_edge(('pfphoton', i), ('particle', ip)) if ((iev < 10) and save_full_graphs): nx.readwrite.write_gpickle(graph, outfile.replace('.pkl.bz2', '_graph_{}.pkl'.format(iev))) particles = [n for n in graph.nodes if (n[0] == 'particle')] pfcand = [n for n in graph.nodes if n[0].startswith('pf')] tracks = [n for n in graph.nodes if (n[0] == 'track')] towers = [n for n in graph.nodes if (n[0] == 'tower')] (triplets, remaining_particles, remaining_pfcandidates) = make_triplets(graph, tracks, towers, particles, pfcand) print('remaining PF', len(remaining_pfcandidates)) for pf in remaining_pfcandidates: print(pf, graph.nodes[pf]) X = [] ygen = [] ygen_remaining = [] ycand = [] for triplet in triplets: (reco, gen, cand) = triplet if (reco[0] == 'track'): track_dict = graph.nodes[reco] gen_dict = graph.nodes[gen] if (abs(gen_dict['pid']) == 13): track_dict['is_gen_muon'] = 1.0 else: track_dict['is_gen_muon'] = 0.0 if (abs(gen_dict['pid']) == 11): track_dict['is_gen_electron'] = 1.0 else: track_dict['is_gen_electron'] = 0.0 X.append(make_track_array(track_dict)) ygen.append(make_gen_array(gen_dict)) else: X.append(make_tower_array(graph.nodes[reco])) ygen.append(make_gen_array(gen)) ycand.append(make_cand_array((graph.nodes[cand] if cand else None))) for prt in remaining_particles: ygen_remaining.append(make_gen_array(graph.nodes[prt])) X = np.stack(X) ygen = np.stack(ygen) ygen_remaining = np.stack(ygen_remaining) ycand = np.stack(ycand) print('X', X.shape, 'ygen', ygen.shape, 'ygen_remaining', ygen_remaining.shape, 'ycand', ycand.shape) X_all.append(X) ygen_all.append(ygen) ygen_remaining_all.append(ygen_remaining) ycand_all.append(ycand) with bz2.BZ2File(outfile, 'wb') as fi: pickle.dump({'X': X_all, 'ygen': ygen_all, 'ycand': ycand_all}, fi)
def process_chunk_args(args): process_chunk(*args)
def chunks(lst, n): 'Yield successive n-sized chunks from lst.' for i in range(0, len(lst), n): (yield lst[i:(i + n)])
def parse_args(): import argparse parser = argparse.ArgumentParser() parser.add_argument('-d', '--dir', type=str, default='parameters/delphes-gnn-skipconn.yaml', help='dir containing csv files') args = parser.parse_args() return args
def plot_gpu_util(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_util'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('GPU utilization [%]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_power(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_power'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('Power consumption [W]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_mem_util(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_mem_util'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('GPU memory utilization [%]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_gpu_mem_used(df, cuda_device, ax): ax.plot(df['time'], df['GPU{}_mem_used'.format(cuda_device)], alpha=0.8) ax.set_xlabel('Time [s]') ax.set_ylabel('Used GPU memory [MiB]') ax.set_title('GPU{}'.format(cuda_device)) ax.grid(alpha=0.3)
def plot_dfs(dfs, plot_func, suffix): (fig, axs) = plt.subplots(2, 2, figsize=(12, 9), tight_layout=True) for ax in axs.flat: ax.label_outer() for (cuda_device, (df, ax)) in enumerate(zip(dfs, axs.flat)): plot_func(df, cuda_device, ax) plt.suptitle('{}'.format(file.stem)) plt.savefig((args.dir + '/{}_{}.jpg'.format(file.stem, suffix)))
class TestGNN(unittest.TestCase): def helper_test_pairwise_dist_shape(self, dist_func): A = tf.random.normal((2, 128, 32)) B = tf.random.normal((2, 128, 32)) out = dist_func(A, B) self.assertEqual(out.shape, (2, 128, 128)) def test_pairwise_l2_dist_shape(self): from mlpf.tfmodel.model import pairwise_l2_dist self.helper_test_pairwise_dist_shape(pairwise_l2_dist) def test_pairwise_l1_dist_shape(self): from mlpf.tfmodel.model import pairwise_l1_dist self.helper_test_pairwise_dist_shape(pairwise_l1_dist) def test_GHConvDense_shape(self): from mlpf.tfmodel.model import GHConvDense nn = GHConvDense(output_dim=128, activation='selu') x = tf.random.normal((2, 256, 64)) adj = tf.random.normal((2, 256, 256, 1)) msk = tf.random.normal((2, 256, 1)) out = nn((x, adj, msk)) self.assertEqual(out.shape, (2, 256, 128)) def test_GHConvDense_binned_shape(self): from mlpf.tfmodel.model import GHConvDense nn = GHConvDense(output_dim=128, activation='selu') x = tf.random.normal((2, 4, 64, 64)) adj = tf.random.normal((2, 4, 64, 64, 1)) msk = tf.random.normal((2, 4, 64, 1)) out = nn((x, adj, msk)) self.assertEqual(out.shape, (2, 4, 64, 128)) def test_NodePairGaussianKernel_shape(self): from mlpf.tfmodel.model import NodePairGaussianKernel nn = NodePairGaussianKernel() x = tf.random.normal((2, 256, 32)) msk = tf.random.normal((2, 256, 1)) out = nn(x, msk) self.assertEqual(out.shape, (2, 256, 256, 1)) def test_NodePairGaussianKernel_binned_shape(self): from mlpf.tfmodel.model import NodePairGaussianKernel nn = NodePairGaussianKernel() x = tf.random.normal((2, 4, 64, 32)) msk = tf.random.normal((2, 4, 64, 1)) out = nn(x, msk) self.assertEqual(out.shape, (2, 4, 64, 64, 1)) def test_MessageBuildingLayerLSH_shape(self): from mlpf.tfmodel.model import MessageBuildingLayerLSH nn = MessageBuildingLayerLSH(bin_size=64, distance_dim=128) x_dist = tf.random.normal((2, 256, 128)) x_features = tf.random.normal((2, 256, 32)) msk = (tf.random.normal((2, 256)) > 0) (bins_split, x_features_binned, dm_binned, msk_f_binned) = nn(x_dist, x_features, msk) self.assertEqual(bins_split.shape, (2, 4, 64)) self.assertEqual(x_features_binned.shape, (2, 4, 64, 32)) self.assertEqual(dm_binned.shape, (2, 4, 64, 64, 1)) self.assertEqual(msk_f_binned.shape, (2, 4, 64, 1)) from mlpf.tfmodel.model import reverse_lsh x_features2 = reverse_lsh(bins_split, x_features_binned) self.assertEqual(tf.reduce_sum((x_features - x_features2)).numpy(), 0)
class TestGNNTorchAndTensorflow(unittest.TestCase): def test_GHConvDense(self): from mlpf.tfmodel.model import GHConvDense nn1 = GHConvDense(output_dim=128, activation='selu') from mlpf.pyg.gnn_lsh import GHConvDense as GHConvDenseTorch nn2 = GHConvDenseTorch(output_dim=128, activation='selu', hidden_dim=64) x = np.random.normal(size=(2, 4, 64, 64)).astype(np.float32) adj = np.random.normal(size=(2, 4, 64, 64, 1)).astype(np.float32) msk = np.random.normal(size=(2, 4, 64, 1)).astype(np.float32) msk = (msk > 0).astype(np.float32) nn1((tf.convert_to_tensor(x), tf.convert_to_tensor(adj), tf.convert_to_tensor(msk))).numpy() nn2((torch.tensor(x), torch.tensor(adj), torch.tensor(msk))).detach().numpy() sd = nn2.state_dict() sd['W_t'] = torch.from_numpy(nn1.weights[0].numpy()) sd['b_t'] = torch.from_numpy(nn1.weights[1].numpy()) sd['W_h'] = torch.from_numpy(nn1.weights[2].numpy()) sd['theta'] = torch.from_numpy(nn1.weights[3].numpy()) nn2.load_state_dict(sd) out1 = nn1((tf.convert_to_tensor(x), tf.convert_to_tensor(adj), tf.convert_to_tensor(msk))).numpy() out2 = nn2((torch.tensor(x), torch.tensor(adj), torch.tensor(msk))).detach().numpy() self.assertLess(np.sum((out1 - out2)), TOLERANCE) def test_MessageBuildingLayerLSH(self): from mlpf.tfmodel.model import MessageBuildingLayerLSH nn1 = MessageBuildingLayerLSH(distance_dim=128, bin_size=64) from mlpf.pyg.gnn_lsh import MessageBuildingLayerLSH as MessageBuildingLayerLSHTorch nn2 = MessageBuildingLayerLSHTorch(distance_dim=128, bin_size=64) nn2.stable_sort = True x_dist = np.random.normal(size=(2, 256, 128)).astype(np.float32) x_node = np.random.normal(size=(2, 256, 32)).astype(np.float32) msk = np.random.normal(size=(2, 256)).astype(np.float32) msk = (msk > 0).astype(bool) nn1(tf.convert_to_tensor(x_dist), tf.convert_to_tensor(x_node), tf.convert_to_tensor(msk)) nn2(torch.tensor(x_dist), torch.tensor(x_node), torch.tensor(msk)) sd = nn2.state_dict() sd['codebook_random_rotations'] = torch.from_numpy(nn1.weights[0].numpy()) nn2.load_state_dict(sd) out1 = nn1(tf.convert_to_tensor(x_dist), tf.convert_to_tensor(x_node), tf.convert_to_tensor(msk)) out2 = nn2(torch.tensor(x_dist), torch.tensor(x_node), torch.tensor(msk)) self.assertTrue(np.all((out1[0].numpy() == out2[0].numpy()))) self.assertLess(np.sum((out1[1].numpy() - out2[1].detach().numpy())), TOLERANCE) self.assertLess(np.sum((out1[2].numpy() - out2[2].detach().numpy())), TOLERANCE) self.assertEqual(np.sum((out1[3].numpy() - out2[3].detach().numpy())), 0.0) from mlpf.tfmodel.model import reverse_lsh (bins_split, x, dm, msk_f) = out1 ret = reverse_lsh(bins_split, x, False) self.assertTrue(np.all((x_node == ret.numpy()))) from mlpf.pyg.gnn_lsh import reverse_lsh as reverse_lsh_torch (bins_split, x, dm, msk_f) = out2 ret = reverse_lsh_torch(bins_split, x) self.assertTrue(np.all((x_node == ret.detach().numpy())))
def maybe_download(filename, work_directory): "Download the data from Yann's website, unless it's already here." if (not os.path.exists(work_directory)): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if (not os.path.exists(filepath)): (filepath, _) = urllib.urlretrieve((SOURCE_URL + filename), filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return int(numpy.frombuffer(bytestream.read(4), dtype=dt))
def extract_images(filename): 'Extract the images into a 4D uint8 numpy array [index, y, x, depth].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2051): raise ValueError(('Invalid magic number %d in MNIST image file: %s' % (magic, filename))) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(((rows * cols) * num_images)) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
def dense_to_one_hot(labels_dense, num_classes=10): 'Convert class labels from scalars to one-hot vectors.' num_labels = labels_dense.shape[0] index_offset = (numpy.arange(num_labels) * num_classes) labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[(index_offset + labels_dense.ravel())] = 1 return labels_one_hot
def extract_labels(filename, one_hot=False): 'Extract the labels into a 1D uint8 numpy array [index].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2049): raise ValueError(('Invalid magic number %d in MNIST label file: %s' % (magic, filename))) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels
class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert (images.shape[0] == labels.shape[0]), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] assert (images.shape[3] == 1) images = images.reshape(images.shape[0], (images.shape[1] * images.shape[2])) images = images.astype(numpy.float32) images = numpy.multiply(images, (1.0 / 255.0)) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): 'Return the next `batch_size` examples from this data set.' if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]) start = self._index_in_epoch self._index_in_epoch += batch_size if (self._index_in_epoch > self._num_examples): self._epochs_completed += 1 perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] start = 0 self._index_in_epoch = batch_size assert (batch_size <= self._num_examples) end = self._index_in_epoch return (self._images[start:end], self._labels[start:end])
def read_data_sets(train_dir, fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
def maybe_download(filename, work_directory): "Download the data from Yann's website, unless it's already here." if (not os.path.exists(work_directory)): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if (not os.path.exists(filepath)): (filepath, _) = urllib.urlretrieve((SOURCE_URL + filename), filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath
def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return int(numpy.frombuffer(bytestream.read(4), dtype=dt))
def extract_images(filename): 'Extract the images into a 4D uint8 numpy array [index, y, x, depth].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2051): raise ValueError(('Invalid magic number %d in MNIST image file: %s' % (magic, filename))) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(((rows * cols) * num_images)) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
def dense_to_one_hot(labels_dense, num_classes=10): 'Convert class labels from scalars to one-hot vectors.' num_labels = labels_dense.shape[0] index_offset = (numpy.arange(num_labels) * num_classes) labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[(index_offset + labels_dense.ravel())] = 1 return labels_one_hot
def extract_labels(filename, one_hot=False): 'Extract the labels into a 1D uint8 numpy array [index].' print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if (magic != 2049): raise ValueError(('Invalid magic number %d in MNIST label file: %s' % (magic, filename))) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels
class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert (images.shape[0] == labels.shape[0]), ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] assert (images.shape[3] == 1) images = images.reshape(images.shape[0], (images.shape[1] * images.shape[2])) images = images.astype(numpy.float32) images = numpy.multiply(images, (1.0 / 255.0)) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): 'Return the next `batch_size` examples from this data set.' if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return ([fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]) start = self._index_in_epoch self._index_in_epoch += batch_size if (self._index_in_epoch > self._num_examples): self._epochs_completed += 1 perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] start = 0 self._index_in_epoch = batch_size assert (batch_size <= self._num_examples) end = self._index_in_epoch return (self._images[start:end], self._labels[start:end])
def read_data_sets(train_dir, fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
def make_chain(): chain = [1] while (chain[(- 1)] != states[(- 1)]): choices = transitions[chain[(- 1)]] j = np.random.randint(len(choices)) chain.append(choices[j]) return chain
def valid_chain(chain): if (len(chain) == 0): return False if (chain[0] != states[0]): return False for i in range(1, len(chain)): if (chain[i] not in transitions[chain[(i - 1)]]): return False return True
def convert_chain(chain): sequence = '' for value in chain: sequence += aliases[value] return sequence
def load_id2any(index_file, format=None): fspec = open(index_file) ids = [] id2any = dict() for line in fspec.readlines(): (id, any) = line.strip().split('\t') ids.append(id) if (format == 'toFloat'): id2any[id] = [float(i) for i in eval(any)] else: id2any[id] = any return (ids, id2any)
def split_magna(ids, id2path): train_set = [] val_set = [] test_set = [] for id in ids: path = id2path[id] folder = int(path[(path.rfind('/') - 1):path.rfind('/')], 16) if (folder < 12): train_set.append(id) elif (folder < 13): val_set.append(id) else: test_set.append(id) return (train_set, val_set, test_set)
def write_gt_file(ids, id2gt, file_name): fw = open(file_name, 'w') for id in ids: if (id in IDS_ERROR): continue fw.write(('%s\t%s\n' % (id, id2gt[id]))) fw.close()
def evaluation(batch_dispatcher, tf_vars, array_cost, pred_array, id_array): [sess, normalized_y, cost, x, y_, is_train] = tf_vars for batch in tqdm(batch_dispatcher): (pred, cost_pred) = sess.run([normalized_y, cost], feed_dict={x: batch['X'], y_: batch['Y'], is_train: False}) if (not array_cost): pred_array = pred id_array = batch['ID'] else: pred_array = np.concatenate((pred_array, pred), axis=0) id_array = np.append(id_array, batch['ID']) array_cost.append(cost_pred) print('predictions', pred_array.shape) print('cost', np.mean(array_cost)) return (array_cost, pred_array, id_array)
def model_number(x, is_training, config): if (config['model_number'] == 0): print('\nMODEL: Dieleman | BN input') return models_baselines.dieleman(x, is_training, config) elif (config['model_number'] == 1): print('\nMODEL: VGG 32 | BN input') return models_baselines.vgg(x, is_training, config, 32) elif (config['model_number'] == 2): print('\nMODEL: VGG 128 | BN input') return models_baselines.vgg(x, is_training, config, 128) elif (config['model_number'] == 3): print('\nMODEL: Timbre | BN input') return models_baselines.timbre(x, is_training, config, num_filt=1) elif (config['model_number'] == 10): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > RESIDUAL > GLOBAL POOLING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = midend_features_list[3] return backend.temporal_pooling(midend_features, is_training, 50, 200, type='globalpool') elif (config['model_number'] == 11): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > DENSE > GLOBAL POOLING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='globalpool') elif (config['model_number'] == 12): print('\nMODEL: BN input > [7, 40%] > DENSE > ATTENTION + POSITIONAL ENCODING') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=4.5, type='74timbral') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='attention_positional') elif (config['model_number'] == 13): print('\nMODEL: BN input > [7, 40%] > DENSE > AUTOPOOL') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=4.5, type='74timbral') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = tf.concat(midend_features_list, 2) return backend.temporal_pooling(midend_features, is_training, 50, 200, type='autopool') elif (config['model_number'] == 14): print('\nMODEL: BN input > [7, 70%][7, 40%] + temporal > RESIDUAL > RNN') frontend_features_list = frontend.musically_motivated_cnns(x, is_training, config['audio_rep']['n_mels'], num_filt=1.6, type='7774timbraltemporal') frontend_features = tf.concat(frontend_features_list, 2) midend_features_list = midend.dense_cnns(frontend_features, is_training, 64) midend_features = midend_features_list[3] return backend.temporal_pooling(midend_features, is_training, 50, 200, type='rnn') raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
def dieleman(x, is_training, config): print(('Input: ' + str(x.get_shape))) input_layer = tf.expand_dims(x, 3) bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training) conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=32, kernel_size=[8, config['yInput']], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=conv1, pool_size=[4, 1], strides=[4, 1], name='1-pool') pool1_rs = tf.reshape(pool1, [(- 1), int(pool1.shape[1]), int(pool1.shape[3]), 1]) print(('\t\t' + str(pool1_rs.get_shape))) conv2 = tf.compat.v1.layers.conv2d(inputs=pool1_rs, filters=32, kernel_size=[8, pool1_rs.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[4, 1], strides=[4, 1], name='2-pool') flat_pool2 = tf.reshape(pool2, [(- 1), int(((pool2.shape[1] * pool2.shape[2]) * pool2.shape[3]))]) print(('\t\t' + str(flat_pool2.shape))) dense = tf.compat.v1.layers.dense(inputs=flat_pool2, activation=tf.nn.relu, units=100, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) output = tf.compat.v1.layers.dense(inputs=dense, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(('output: ' + str(output.get_shape))) return output
def vgg(x, is_training, config, num_filters=32): print(('Input: ' + str(x.get_shape))) input_layer = tf.expand_dims(x, 3) bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training) conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[4, 1], strides=[2, 2]) print(('pool1: ' + str(pool1.get_shape))) do_pool1 = tf.compat.v1.layers.dropout(pool1, rate=0.25, training=is_training) conv2 = tf.compat.v1.layers.conv2d(inputs=do_pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2]) print(('pool2: ' + str(pool2.get_shape))) do_pool2 = tf.compat.v1.layers.dropout(pool2, rate=0.25, training=is_training) conv3 = tf.compat.v1.layers.conv2d(inputs=do_pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2]) print(('pool3: ' + str(pool3.get_shape))) do_pool3 = tf.layers.dropout(pool3, rate=0.25, training=is_training) conv4 = tf.layers.conv2d(inputs=do_pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training) pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2]) print(('pool4: ' + str(pool4.get_shape))) do_pool4 = tf.compat.v1.layers.dropout(pool4, rate=0.25, training=is_training) conv5 = tf.compat.v1.layers.conv2d(inputs=do_pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training) pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[4, 4], strides=[4, 4]) print(('pool5: ' + str(pool5.get_shape))) flat_pool5 = tf.contrib.layers.flatten(pool5) do_pool5 = tf.compat.v1.layers.dropout(flat_pool5, rate=0.5, training=is_training) output = tf.compat.v1.layers.dense(inputs=do_pool5, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(('output: ' + str(output.get_shape))) return output
def timbre(x, is_training, config, num_filt=1): print(('Input: ' + str(x.get_shape))) expanded_layer = tf.expand_dims(x, 3) input_layer = tf.compat.v1.layers.batch_normalization(expanded_layer, training=is_training) input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') input_pad_5 = tf.pad(input_layer, [[0, 0], [2, 2], [0, 0], [0, 0]], 'CONSTANT') input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT') conv1 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(3 * num_filt), kernel_size=[7, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[1, bn_conv1.shape[2]], strides=[1, bn_conv1.shape[2]]) p1 = tf.squeeze(pool1, [2]) conv2 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(3 * num_filt), kernel_size=[5, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[1, bn_conv2.shape[2]], strides=[1, bn_conv2.shape[2]]) p2 = tf.squeeze(pool2, [2]) conv3 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(6 * num_filt), kernel_size=[3, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[1, bn_conv3.shape[2]], strides=[1, bn_conv3.shape[2]]) p3 = tf.squeeze(pool3, [2]) conv4 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(10 * num_filt), kernel_size=[1, int((0.8 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training) pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[1, bn_conv4.shape[2]], strides=[1, bn_conv4.shape[2]]) p4 = tf.squeeze(pool4, [2]) conv5 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(5 * num_filt), kernel_size=[7, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training) pool5 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv5, pool_size=[1, bn_conv5.shape[2]], strides=[1, bn_conv5.shape[2]]) p5 = tf.squeeze(pool5, [2]) conv6 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(5 * num_filt), kernel_size=[5, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv6 = tf.compat.v1.layers.batch_normalization(conv6, training=is_training) pool6 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv6, pool_size=[1, bn_conv6.shape[2]], strides=[1, bn_conv6.shape[2]]) p6 = tf.squeeze(pool6, [2]) conv7 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(10 * num_filt), kernel_size=[3, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv7 = tf.compat.v1.layers.batch_normalization(conv7, training=is_training) pool7 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv7, pool_size=[1, bn_conv7.shape[2]], strides=[1, bn_conv7.shape[2]]) p7 = tf.squeeze(pool7, [2]) conv8 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(15 * num_filt), kernel_size=[1, int((0.6 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv8 = tf.compat.v1.layers.batch_normalization(conv8, training=is_training) pool8 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv8, pool_size=[1, bn_conv8.shape[2]], strides=[1, bn_conv8.shape[2]]) p8 = tf.squeeze(pool8, [2]) conv9 = tf.compat.v1.layers.conv2d(inputs=input_pad_7, filters=(5 * num_filt), kernel_size=[7, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv9 = tf.compat.v1.layers.batch_normalization(conv9, training=is_training) pool9 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv9, pool_size=[1, bn_conv9.shape[2]], strides=[1, bn_conv9.shape[2]]) p9 = tf.squeeze(pool9, [2]) conv10 = tf.compat.v1.layers.conv2d(inputs=input_pad_5, filters=(5 * num_filt), kernel_size=[5, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv10 = tf.compat.v1.layers.batch_normalization(conv10, training=is_training) pool10 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv10, pool_size=[1, bn_conv10.shape[2]], strides=[1, bn_conv10.shape[2]]) p10 = tf.squeeze(pool10, [2]) conv11 = tf.compat.v1.layers.conv2d(inputs=input_pad_3, filters=(10 * num_filt), kernel_size=[3, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv11 = tf.compat.v1.layers.batch_normalization(conv11, training=is_training) pool11 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv11, pool_size=[1, bn_conv11.shape[2]], strides=[1, bn_conv11.shape[2]]) p11 = tf.squeeze(pool11, [2]) conv12 = tf.compat.v1.layers.conv2d(inputs=input_layer, filters=(15 * num_filt), kernel_size=[1, int((0.2 * config['yInput']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv12 = tf.compat.v1.layers.batch_normalization(conv12, training=is_training) pool12 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv12, pool_size=[1, bn_conv12.shape[2]], strides=[1, bn_conv12.shape[2]]) p12 = tf.squeeze(pool12, [2]) pool = tf.concat([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12], 2) out_front_end = tf.expand_dims(pool, 3) conv2 = tf.compat.v1.layers.conv2d(inputs=out_front_end, filters=32, kernel_size=[8, out_front_end.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) print(conv2.get_shape) pool2 = tf.compat.v1.layers.max_pooling2d(inputs=conv2, pool_size=[4, 1], strides=[4, 1], name='2-pool') print(pool2.get_shape) flat_pool2 = tf.reshape(pool2, [(- 1), int(((pool2.shape[1] * pool2.shape[2]) * pool2.shape[3]))]) print(flat_pool2.shape) dense = tf.compat.v1.layers.dense(inputs=flat_pool2, activation=tf.nn.relu, units=100, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) output = tf.compat.v1.layers.dense(inputs=dense, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) return output
def musically_motivated_cnns(x, is_training, yInput, num_filt, type): expanded_layer = tf.expand_dims(x, 3) input_layer = tf.compat.v1.layers.batch_normalization(expanded_layer, training=is_training) input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') if ('timbral' in type): input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT') if ('74' in type): f74 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.4 * yInput))], is_training=is_training) if ('77' in type): f77 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.7 * yInput))], is_training=is_training) if ('temporal' in type): s1 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[128, 1], is_training=is_training) s2 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[64, 1], is_training=is_training) s3 = tempo_block(inputs=input_layer, filters=int((num_filt * 32)), kernel_size=[32, 1], is_training=is_training) if (type == '7774timbraltemporal'): return [f74, f77, s1, s2, s3] elif (type == '74timbral'): return [f74]
def timbral_block(inputs, filters, kernel_size, is_training, padding='valid', activation=tf.nn.relu): conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation) bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training) pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]]) return tf.squeeze(pool, [2])
def tempo_block(inputs, filters, kernel_size, is_training, padding='same', activation=tf.nn.relu): conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation) bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training) pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]]) return tf.squeeze(pool, [2])
def dense_cnns(front_end_output, is_training, num_filt): front_end_pad = tf.pad(front_end_output, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv1 = tf.compat.v1.layers.conv1d(inputs=front_end_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training) bn_conv1_pad = tf.pad(bn_conv1, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv2 = tf.compat.v1.layers.conv1d(inputs=bn_conv1_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training) res_conv2 = tf.add(conv2, bn_conv1) bn_conv2_pad = tf.pad(res_conv2, [[0, 0], [3, 3], [0, 0]], 'CONSTANT') conv3 = tf.compat.v1.layers.conv1d(inputs=bn_conv2_pad, filters=num_filt, kernel_size=7, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training) res_conv3 = tf.add(conv3, res_conv2) return [front_end_output, bn_conv1, res_conv2, res_conv3]
def compute_audio_repr(audio_file, audio_repr_file): (audio, sr) = librosa.load(audio_file, sr=config['resample_sr']) if (config['type'] == 'waveform'): audio_repr = audio audio_repr = np.expand_dims(audio_repr, axis=1) elif (config['spectrogram_type'] == 'mel'): audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr, hop_length=config['hop'], n_fft=config['n_fft'], n_mels=config['n_mels']).T print(audio_repr.shape) length = audio_repr.shape[0] audio_repr = audio_repr.astype(np.float16) with open(audio_repr_file, 'wb') as f: pickle.dump(audio_repr, f) return length
def do_process(files, index): try: [id, audio_file, audio_repr_file] = files[index] if (not os.path.exists(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])): path = Path(audio_repr_file[:(audio_repr_file.rfind('/') + 1)]) path.mkdir(parents=True, exist_ok=True) length = compute_audio_repr(audio_file, audio_repr_file) fw = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'index_') + str(config['machine_i'])) + '.tsv'), 'a') fw.write(('%s\t%s\t%s\n' % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):]))) fw.close() print((((str(index) + '/') + str(len(files))) + (' Computed: %s' % audio_file))) except Exception as e: ferrors = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'errors') + str(config['machine_i'])) + '.txt'), 'a') ferrors.write((audio_file + '\n')) ferrors.write(str(e)) ferrors.close() print('Error computing audio representation: ', audio_file) print(str(e))
def process_files(files): if DEBUG: print('WARNING: Parallelization is not used!') for index in range(0, len(files)): do_process(files, index) else: Parallel(n_jobs=config['num_processing_units'])((delayed(do_process)(files, index) for index in range(0, len(files))))
class Data(): 'Standard data format. \n ' def __init__(self): self.X_train = None self.y_train = None self.X_test = None self.y_test = None self.__device = None self.__dtype = None @property def device(self): return self.__device @property def dtype(self): return self.__dtype @device.setter def device(self, d): if (d == 'cpu'): self.__to_cpu() elif (d == 'gpu'): self.__to_gpu() else: raise ValueError self.__device = d @dtype.setter def dtype(self, d): if (d == 'float'): self.__to_float() elif (d == 'double'): self.__to_double() else: raise ValueError self.__dtype = d @property def Device(self): if (self.__device == 'cpu'): return torch.device('cpu') elif (self.__device == 'gpu'): return torch.device('cuda') @property def Dtype(self): if (self.__dtype == 'float'): return torch.float32 elif (self.__dtype == 'double'): return torch.float64 @property def dim(self): if isinstance(self.X_train, np.ndarray): return self.X_train.shape[(- 1)] elif isinstance(self.X_train, torch.Tensor): return self.X_train.size((- 1)) @property def K(self): if isinstance(self.y_train, np.ndarray): return self.y_train.shape[(- 1)] elif isinstance(self.y_train, torch.Tensor): return self.y_train.size((- 1)) @property def X_train_np(self): return Data.to_np(self.X_train) @property def y_train_np(self): return Data.to_np(self.y_train) @property def X_test_np(self): return Data.to_np(self.X_test) @property def y_test_np(self): return Data.to_np(self.y_test) @staticmethod def to_np(d): if (isinstance(d, np.ndarray) or (d is None)): return d elif isinstance(d, torch.Tensor): return d.cpu().detach().numpy() else: raise ValueError def __to_cpu(self): for d in ['X_train', 'y_train', 'X_test', 'y_test']: if isinstance(getattr(self, d), np.ndarray): setattr(self, d, torch.DoubleTensor(getattr(self, d))) elif isinstance(getattr(self, d), torch.Tensor): setattr(self, d, getattr(self, d).cpu()) def __to_gpu(self): for d in ['X_train', 'y_train', 'X_test', 'y_test']: if isinstance(getattr(self, d), np.ndarray): setattr(self, d, torch.cuda.DoubleTensor(getattr(self, d))) elif isinstance(getattr(self, d), torch.Tensor): setattr(self, d, getattr(self, d).cuda()) def __to_float(self): if (self.device is None): raise RuntimeError('device is not set') for d in ['X_train', 'y_train', 'X_test', 'y_test']: if isinstance(getattr(self, d), torch.Tensor): setattr(self, d, getattr(self, d).float()) def __to_double(self): if (self.device is None): raise RuntimeError('device is not set') for d in ['X_train', 'y_train', 'X_test', 'y_test']: if isinstance(getattr(self, d), torch.Tensor): setattr(self, d, getattr(self, d).double())
class FNN(StructureNN): 'Fully connected neural networks.\n ' def __init__(self, ind, outd, layers=2, width=50, activation='relu', initializer='default', softmax=False): super(FNN, self).__init__() self.ind = ind self.outd = outd self.layers = layers self.width = width self.activation = activation self.initializer = initializer self.softmax = softmax self.modus = self.__init_modules() self.__initialize() def forward(self, x): for i in range(1, self.layers): LinM = self.modus['LinM{}'.format(i)] NonM = self.modus['NonM{}'.format(i)] x = NonM(LinM(x)) x = self.modus['LinMout'](x) if self.softmax: x = nn.functional.softmax(x, dim=(- 1)) return x def __init_modules(self): modules = nn.ModuleDict() if (self.layers > 1): modules['LinM1'] = nn.Linear(self.ind, self.width) modules['NonM1'] = self.Act for i in range(2, self.layers): modules['LinM{}'.format(i)] = nn.Linear(self.width, self.width) modules['NonM{}'.format(i)] = self.Act modules['LinMout'] = nn.Linear(self.width, self.outd) else: modules['LinMout'] = nn.Linear(self.ind, self.outd) return modules def __initialize(self): for i in range(1, self.layers): self.weight_init_(self.modus['LinM{}'.format(i)].weight) nn.init.constant_(self.modus['LinM{}'.format(i)].bias, 0) self.weight_init_(self.modus['LinMout'].weight) nn.init.constant_(self.modus['LinMout'].bias, 0)
class Module(torch.nn.Module): 'Standard module format. \n ' def __init__(self): super(Module, self).__init__() self.activation = None self.initializer = None self.__device = None self.__dtype = None @property def device(self): return self.__device @property def dtype(self): return self.__dtype @device.setter def device(self, d): if (d == 'cpu'): self.cpu() elif (d == 'gpu'): self.cuda() else: raise ValueError self.__device = d @dtype.setter def dtype(self, d): if (d == 'float'): self.to(torch.float) elif (d == 'double'): self.to(torch.double) else: raise ValueError self.__dtype = d @property def Device(self): if (self.__device == 'cpu'): return torch.device('cpu') elif (self.__device == 'gpu'): return torch.device('cuda') @property def Dtype(self): if (self.__dtype == 'float'): return torch.float32 elif (self.__dtype == 'double'): return torch.float64 @property def act(self): if (self.activation == 'sigmoid'): return torch.sigmoid elif (self.activation == 'relu'): return torch.relu elif (self.activation == 'tanh'): return torch.tanh elif (self.activation == 'elu'): return torch.elu else: raise NotImplementedError @property def Act(self): if (self.activation == 'sigmoid'): return torch.nn.Sigmoid() elif (self.activation == 'relu'): return torch.nn.ReLU() elif (self.activation == 'tanh'): return torch.nn.Tanh() elif (self.activation == 'elu'): return torch.nn.ELU() else: raise NotImplementedError @property def weight_init_(self): if (self.initializer == 'He normal'): return torch.nn.init.kaiming_normal_ elif (self.initializer == 'He uniform'): return torch.nn.init.kaiming_uniform_ elif (self.initializer == 'Glorot normal'): return torch.nn.init.xavier_normal_ elif (self.initializer == 'Glorot uniform'): return torch.nn.init.xavier_uniform_ elif (self.initializer == 'orthogonal'): return torch.nn.init.orthogonal_ elif (self.initializer == 'default'): if (self.activation == 'relu'): return torch.nn.init.kaiming_normal_ elif (self.activation == 'tanh'): return torch.nn.init.orthogonal_ else: return (lambda x: None) else: raise NotImplementedError
class StructureNN(Module): 'Structure-oriented neural network used as a general map based on designing architecture.\n ' def __init__(self): super(StructureNN, self).__init__() def predict(self, x, returnnp=False): return (self(x).cpu().detach().numpy() if returnnp else self(x))