code
stringlengths
17
6.64M
class PyGRandomGraphDataset(RandomGraphDataset): def __getitem__(self, idx): n = np.random.randint(low=self.n_min, high=self.n_max) g = self.graph_generator(n) g = from_networkx(g) if (g.pos is not None): del g.pos return g
class DenseGraphBatch(Data): def __init__(self, node_features, edge_features, mask, **kwargs): self.node_features = node_features self.edge_features = edge_features self.mask = mask for (key, item) in kwargs.items(): setattr(self, key, item) @classmethod def from_sparse_graph_list(cls, data_list, labels=False): if labels: max_num_nodes = max([graph.number_of_nodes() for (graph, label) in data_list]) else: max_num_nodes = max([graph.number_of_nodes() for graph in data_list]) node_features = [] edge_features = [] mask = [] y = [] props = [] for data in data_list: if labels: (graph, label) = data y.append(label) else: graph = data num_nodes = graph.number_of_nodes() props.append(torch.Tensor([num_nodes])) graph.add_nodes_from([i for i in range(num_nodes, max_num_nodes)]) nf = torch.ones(max_num_nodes, 1) node_features.append(nf.unsqueeze(0)) dm = torch.from_numpy(floyd_warshall_numpy(graph)).long() dm = torch.clamp(dm, 0, 5).unsqueeze((- 1)) num_nodes = dm.size(1) dm = torch.zeros((num_nodes, num_nodes, 6)).type_as(dm).scatter_(2, dm, 1).float() edge_features.append(dm) mask.append((torch.arange(max_num_nodes) < num_nodes).unsqueeze(0)) node_features = torch.cat(node_features, dim=0) edge_features = torch.stack(edge_features, dim=0) mask = torch.cat(mask, dim=0) props = torch.cat(props, dim=0) batch = cls(node_features=node_features, edge_features=edge_features, mask=mask, properties=props) if labels: batch.y = torch.Tensor(y) return batch def __repr__(self): repr_list = ['{}={}'.format(key, list(value.shape)) for (key, value) in self.__dict__.items()] return 'DenseGraphBatch({})'.format(', '.join(repr_list))
class DenseGraphDataLoader(torch.utils.data.DataLoader): def __init__(self, dataset, batch_size=1, shuffle=False, labels=False, **kwargs): super().__init__(dataset, batch_size, shuffle, collate_fn=(lambda data_list: DenseGraphBatch.from_sparse_graph_list(data_list, labels)), **kwargs)
class GraphDataModule(pl.LightningDataModule): def __init__(self, graph_family, graph_kwargs=None, samples_per_epoch=100000, batch_size=32, distributed_sampler=True, num_workers=1): super().__init__() if (graph_kwargs is None): graph_kwargs = {} self.graph_family = graph_family self.graph_kwargs = graph_kwargs self.samples_per_epoch = samples_per_epoch self.num_workers = num_workers self.batch_size = batch_size self.distributed_sampler = distributed_sampler self.train_dataset = None self.eval_dataset = None self.train_sampler = None self.eval_sampler = None def make_dataset(self, samples_per_epoch): if (self.graph_family == 'binomial'): ds = BinomialGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs) elif (self.graph_family == 'barabasi_albert'): ds = BarabasiAlbertGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs) elif (self.graph_family == 'regular'): ds = RegularGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs) elif (self.graph_family == 'geometric'): ds = GeometricGraphDataset(samples_per_epoch=samples_per_epoch) elif (self.graph_family == 'all'): ds = RandomGraphDataset(samples_per_epoch=samples_per_epoch) else: raise NotImplementedError return ds def train_dataloader(self): self.train_dataset = self.make_dataset(samples_per_epoch=self.samples_per_epoch) if self.distributed_sampler: train_sampler = DistributedSampler(dataset=self.train_dataset, shuffle=False) else: train_sampler = None return DenseGraphDataLoader(dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=train_sampler) def val_dataloader(self): self.eval_dataset = self.make_dataset(samples_per_epoch=4096) if self.distributed_sampler: eval_sampler = DistributedSampler(dataset=self.eval_dataset, shuffle=False) else: eval_sampler = None return DenseGraphDataLoader(dataset=self.eval_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=eval_sampler)
def binomial_ego_graph(n, p): g = ego_graph(binomial_graph(n, p), 0) g = nx.convert_node_labels_to_integers(g, first_label=0) return g
class GraphGenerator(object): def __init__(self): self.graph_params = {'binominal': {'func': binomial_graph, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, '"binominal_ego": {\n "func": binomial_ego_graph,\n "kwargs_float_ranges": {\n "p": (0.2, 0.6)\n }\n },newman_watts_strogatz': {'func': newman_watts_strogatz_graph, 'kwargs_int_ranges': {'k': (2, 6)}, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, 'watts_strogatz': {'func': watts_strogatz_graph, 'kwargs_int_ranges': {'k': (2, 6)}, 'kwargs_float_ranges': {'p': (0.2, 0.6)}}, 'random_regular': {'func': random_regular_graph, 'kwargs_int_ranges': {'d': (3, 6)}}, 'barabasi_albert': {'func': barabasi_albert_graph, 'kwargs_int_ranges': {'m': (1, 6)}}, 'dual_barabasi_albert': {'func': dual_barabasi_albert_graph, 'kwargs_int_ranges': {'m1': (1, 6), 'm2': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.9)}}, 'extended_barabasi_albert': {'func': extended_barabasi_albert_graph, 'kwargs_int_ranges': {'m': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.49), 'q': (0.1, 0.49)}}, 'powerlaw_cluster': {'func': powerlaw_cluster_graph, 'kwargs_int_ranges': {'m': (1, 6)}, 'kwargs_float_ranges': {'p': (0.1, 0.9)}}, 'random_powerlaw_tree': {'func': random_powerlaw_tree, 'kwargs': {'gamma': 3, 'tries': 1000}}, 'random_geometric': {'func': random_geometric_graph, 'kwargs_float_ranges': {'p': (0.4, 0.5)}, 'kwargs': {'radius': 1}}} self.graph_types = list(self.graph_params.keys()) def __call__(self, n, graph_type=None): if (graph_type is None): graph_type = random.choice(self.graph_types) params = self.graph_params[graph_type] kwargs = {} if ('kwargs' in params): kwargs = {**params['kwargs']} if ('kwargs_int_ranges' in params): for (key, arg) in params['kwargs_int_ranges'].items(): kwargs[key] = np.random.randint(arg[0], (arg[1] + 1)) if ('kwargs_float_ranges' in params): for (key, arg) in params['kwargs_float_ranges'].items(): kwargs[key] = np.random.uniform(arg[0], arg[1]) if (graph_type == 'random_regular'): if (((n * kwargs['d']) % 2) != 0): n -= 1 try: g = params['func'](n=n, **kwargs) except nx.exception.NetworkXError: g = self(n) return g
class EvalRandomGraphDataset(Dataset): def __init__(self, n, pyg=False): self.n = n self.pyg = pyg self.graph_params = {'binominal': {'func': binomial_graph, 'kwargs': {'p': (0.25, 0.35, 0.5)}}, 'newman_watts_strogatz': {'func': newman_watts_strogatz_graph, 'kwargs': {'k': (2, 2, 5, 5), 'p': (0.25, 0.75, 0.25, 0.75)}}, 'watts_strogatz': {'func': watts_strogatz_graph, 'kwargs': {'k': (2, 2, 5, 5), 'p': (0.25, 0.75, 0.25, 0.75)}}, 'random_regular': {'func': random_regular_graph, 'kwargs': {'d': (3, 4, 5, 6)}}, 'barabasi_albert': {'func': barabasi_albert_graph, 'kwargs': {'m': (1, 2, 3, 4)}}, 'dual_barabasi_albert': {'func': dual_barabasi_albert_graph, 'kwargs': {'m1': (2, 2), 'm2': (4, 1), 'p': (0.5, 0.5)}}, 'extended_barabasi_albert': {'func': extended_barabasi_albert_graph, 'kwargs': {'m': (1, 2, 4), 'p': (0.5, 0.5, 0.5), 'q': (0.25, 0.25, 0.25)}}, 'powerlaw_cluster': {'func': powerlaw_cluster_graph, 'kwargs': {'m': (2, 3, 4)}, 'kwargs_fix': {'p': 0.5}}, 'random_geometric': {'func': random_geometric_graph, 'kwargs': {'p': (0.35, 0.55)}, 'kwargs_fix': {'radius': 1}}} self.graph_types = ['binominal', 'barabasi_albert', 'random_geometric', 'random_regular', 'random_powerlaw_tree', 'watts_strogatz', 'extended_barabasi_albert', 'newman_watts_strogatz', 'dual_barabasi_albert'] (graphs, labels) = self.generate_dataset() c = list(zip(graphs, labels)) random.shuffle(c) (self.graphs, self.labels) = zip(*c) def generate_dataset(self): label = 0 graphs = [] labels = [] for (j, graph_type) in enumerate(self.graph_types): params = self.graph_params[graph_type] func = params['func'] if ('kwargs' in params): kwargs = params['kwargs'] else: kwargs = None if ('kwargs_fix' in params): kwargs_fix = params['kwargs_fix'] else: kwargs_fix = None if (kwargs is not None): num_settings = len(list(kwargs.values())[0]) else: num_settings = 1 for i in range(num_settings): final_kwargs = {} if (kwargs is not None): for (key, args) in kwargs.items(): if (num_settings > 1): final_kwargs[key] = args[i] else: final_kwargs[key] = args num_graphs = int((256 / num_settings)) if (kwargs_fix is not None): final_kwargs2 = {**final_kwargs, **kwargs_fix} elif (kwargs is None): final_kwargs2 = kwargs_fix else: final_kwargs2 = final_kwargs gs = [func(n=self.n, **final_kwargs2) for _ in range(num_graphs)] graphs.extend(gs) labels.extend((len(gs) * [label])) label += 1 return (graphs, labels) def __len__(self): return len(self.graphs) def __getitem__(self, idx): graph = self.graphs[idx] label = self.labels[idx] if self.pyg: g = from_networkx(graph) if (g.pos is not None): del g.pos if (g.edge_index.dtype != torch.long): print(g) g.y = torch.Tensor([label]).long() return g else: return (graph, label)
class EvalRandomBinomialGraphDataset(Dataset): def __init__(self, n_min, n_max, p_min, p_max, num_samples, pyg=False): self.n_min = n_min self.n_max = n_max self.p_min = p_min self.p_max = p_max self.num_samples = num_samples self.pyg = pyg (self.graphs, self.labels) = self.generate_dataset() def generate_dataset(self): graphs = [] labels = [] for i in range(self.num_samples): n = np.random.randint(low=self.n_min, high=self.n_max) p = np.random.uniform(low=self.p_min, high=self.p_max) g = binomial_graph(n, p) if self.pyg: g = from_networkx(g) g.y = p graphs.append(g) labels.append(p) return (graphs, labels) def __len__(self): return len(self.graphs) def __getitem__(self, idx): graph = self.graphs[idx] if self.pyg: return graph else: label = self.labels[idx] return (graph, label)
def add_arguments(parser): 'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n parser: Updated parser object\n ' parser.add_argument('--test', dest='test', action='store_true') parser.add_argument('-i', '--id', type=int, default=0) parser.add_argument('-g', '--gpus', default=1, type=int) parser.add_argument('-e', '--num_epochs', default=5000, type=int) parser.add_argument('--num_eval_samples', default=8192, type=int) parser.add_argument('--eval_freq', default=1000, type=int) parser.add_argument('-s', '--save_dir', default=DEFAULT_SAVE_DIR, type=str) parser.add_argument('--precision', default=32, type=int) parser.add_argument('--progress_bar', dest='progress_bar', action='store_true') parser.set_defaults(test=False) parser.set_defaults(progress_bar=False) parser.add_argument('--resume_ckpt', default='', type=str) parser.add_argument('-b', '--batch_size', default=32, type=int) parser.add_argument('--lr', default=5e-05, type=float) parser.add_argument('--kld_loss_scale', default=0.001, type=float) parser.add_argument('--perm_loss_scale', default=0.5, type=float) parser.add_argument('--property_loss_scale', default=0.1, type=float) parser.add_argument('--vae', dest='vae', action='store_true') parser.set_defaults(vae=False) parser.add_argument('--num_node_features', default=1, type=int) parser.add_argument('--num_edge_features', default=6, type=int) parser.add_argument('--emb_dim', default=64, type=int) parser.add_argument('--graph_encoder_hidden_dim', default=256, type=int) parser.add_argument('--graph_encoder_k_dim', default=64, type=int) parser.add_argument('--graph_encoder_v_dim', default=64, type=int) parser.add_argument('--graph_encoder_num_heads', default=16, type=int) parser.add_argument('--graph_encoder_ppf_hidden_dim', default=1024, type=int) parser.add_argument('--graph_encoder_num_layers', default=16, type=int) parser.add_argument('--graph_decoder_hidden_dim', default=256, type=int) parser.add_argument('--graph_decoder_k_dim', default=64, type=int) parser.add_argument('--graph_decoder_v_dim', default=64, type=int) parser.add_argument('--graph_decoder_num_heads', default=16, type=int) parser.add_argument('--graph_decoder_ppf_hidden_dim', default=1024, type=int) parser.add_argument('--graph_decoder_num_layers', default=16, type=int) parser.add_argument('--graph_decoder_pos_emb_dim', default=64, type=int) parser.add_argument('--property_predictor_hidden_dim', default=256, type=int) parser.add_argument('--num_properties', default=1, type=int) parser.add_argument('--num_workers', default=32, type=int) parser.add_argument('--shuffle', default=1, type=int) parser.add_argument('--graph_family', default='barabasi_albert', type=str) parser.add_argument('--n_min', default=12, type=int) parser.add_argument('--n_max', default=20, type=int) parser.add_argument('--p_min', default=0.4, type=float) parser.add_argument('--p_max', default=0.6, type=float) parser.add_argument('--m_min', default=1, type=int) parser.add_argument('--m_max', default=5, type=int) return parser
def main(hparams): if (not os.path.isdir((hparams.save_dir + '/run{}/'.format(hparams.id)))): print('Creating directory') os.mkdir((hparams.save_dir + '/run{}/'.format(hparams.id))) print('Starting Run {}'.format(hparams.id)) checkpoint_callback = ModelCheckpoint(dirpath=(hparams.save_dir + '/run{}/'.format(hparams.id)), save_last=True, save_top_k=1, monitor='val_loss') lr_logger = LearningRateMonitor() tb_logger = TensorBoardLogger((hparams.save_dir + '/run{}/'.format(hparams.id))) critic = Critic model = PLGraphAE(hparams.__dict__, critic) graph_kwargs = {'n_min': hparams.n_min, 'n_max': hparams.n_max, 'm_min': hparams.m_min, 'm_max': hparams.m_max, 'p_min': hparams.p_min, 'p_max': hparams.p_max} datamodule = GraphDataModule(graph_family=hparams.graph_family, graph_kwargs=graph_kwargs, batch_size=hparams.batch_size, num_workers=hparams.num_workers, samples_per_epoch=100000000) my_ddp_plugin = MyDDP() trainer = pl.Trainer(gpus=hparams.gpus, progress_bar_refresh_rate=(5 if hparams.progress_bar else 0), logger=tb_logger, checkpoint_callback=True, val_check_interval=(hparams.eval_freq if (not hparams.test) else 100), accelerator='ddp', plugins=[my_ddp_plugin], gradient_clip_val=0.1, callbacks=[lr_logger, checkpoint_callback], terminate_on_nan=True, replace_sampler_ddp=False, precision=hparams.precision, max_epochs=hparams.num_epochs, reload_dataloaders_every_epoch=True, resume_from_checkpoint=(hparams.resume_ckpt if (hparams.resume_ckpt != '') else None)) trainer.fit(model=model, datamodule=datamodule)
class PLGraphAE(pl.LightningModule): def __init__(self, hparams, critic): super().__init__() self.save_hyperparameters(hparams) self.graph_ae = GraphAE(hparams) self.critic = critic(hparams) def forward(self, graph, training): (graph_pred, perm, mu, logvar) = self.graph_ae(graph, training, tau=1.0) return (graph_pred, perm, mu, logvar) def training_step(self, graph, batch_idx): (graph_pred, perm, mu, logvar) = self(graph=graph, training=True) loss = self.critic(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar) self.log_dict(loss) return loss def validation_step(self, graph, batch_idx): (graph_pred, perm, mu, logvar) = self(graph=graph, training=True) metrics_soft = self.critic.evaluate(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar, prefix='val') (graph_pred, perm, mu, logvar) = self(graph=graph, training=False) metrics_hard = self.critic.evaluate(graph_true=graph, graph_pred=graph_pred, perm=perm, mu=mu, logvar=logvar, prefix='val_hard') metrics = {**metrics_soft, **metrics_hard} self.log_dict(metrics) self.log_dict(metrics_soft) def configure_optimizers(self): optimizer = torch.optim.Adam(self.graph_ae.parameters(), lr=self.hparams['lr'], betas=(0.9, 0.98)) lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.999) if ('eval_freq' in self.hparams): scheduler = {'scheduler': lr_scheduler, 'interval': 'step', 'frequency': (2 * (self.hparams['eval_freq'] + 1))} else: scheduler = {'scheduler': lr_scheduler, 'interval': 'epoch'} return ([optimizer], [scheduler]) def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure=None, second_order_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False): if (self.trainer.global_step < 10000): lr_scale = min(1.0, (float((self.trainer.global_step + 1)) / 10000.0)) for pg in optimizer.param_groups: pg['lr'] = (lr_scale * self.hparams.lr) optimizer.step(closure=optimizer_closure) optimizer.zero_grad()
def download_file_from_google_drive(id, destination): URL = 'https://docs.google.com/uc?export=download' session = requests.Session() response = session.get(URL, params={'id': id}, stream=True) token = get_confirm_token(response) if token: params = {'id': id, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
def get_confirm_token(response): for (key, value) in response.cookies.items(): if key.startswith('download_warning'): return value return None
def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, 'wb') as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: f.write(chunk)
def download_pretrained_model(): destination = os.path.join(PRETRAINED_MODEL_DIR, 'default_model.zip') if (not os.path.isdir(PRETRAINED_MODEL_DIR)): os.mkdir(PRETRAINED_MODEL_DIR) download_file_from_google_drive(FILE_ID, destination) with zipfile.ZipFile(destination, 'r') as zip_ref: zip_ref.extractall(PRETRAINED_MODEL_DIR)
def add_arguments(parser): 'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n ' parser.add_argument('-m', '--model', help='which model?', default='NoisyGRUSeq2SeqWithFeatures', type=str) parser.add_argument('-i', '--input_pipeline', default='InputPipelineWithFeatures', type=str) parser.add_argument('--input_sequence_key', default='random_smiles', type=str) parser.add_argument('--output_sequence_key', default='canonical_smiles', type=str) parser.add_argument('-c', '--cell_size', help='hidden layers of cell. multiple numbers for multi layer rnn', nargs='+', default=[128], type=int) parser.add_argument('-e', '--emb_size', help='size of bottleneck layer', default=128, type=int) parser.add_argument('-l', '--learning_rate', default=0.0005, type=float) parser.add_argument('-s', '--save_dir', help='path to save and log files', default=os.path.join(DEFAULT_DATA_DIR, 'default_model'), type=str) parser.add_argument('-d', '--device', help='number of cuda visible devise', default='-1', type=str) parser.add_argument('-gmf', '--gpu_mem_frac', default=1.0, type=float) parser.add_argument('-n', '--num_steps', help='number of train steps', default=250000, type=int) parser.add_argument('--summary_freq', help='save model and log translation accuracy', default=1000, type=int) parser.add_argument('--inference_freq', help='log qsar modelling performance', default=5000, type=int) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--one_hot_embedding', default=False, type=bool) parser.add_argument('--char_embedding_size', default=32, type=int) parser.add_argument('--train_file', default='../data/pretrain_dataset.tfrecords', type=str) parser.add_argument('--val_file', default='../data/pretrain_dataset_val.tfrecords', type=str) parser.add_argument('--infer_file', default='../data/val_dataset_preprocessed3.csv', type=str) parser.add_argument('--allow_soft_placement', default=True, type=bool) parser.add_argument('--cpu_threads', default=5, type=int) parser.add_argument('--overwrite_saves', default=False, type=bool) parser.add_argument('--input_dropout', default=0.0, type=float) parser.add_argument('--emb_noise', default=0.0, type=float) parser.add_argument('-ks', '--kernel_size', nargs='+', default=[2], type=int) parser.add_argument('-chs', '--conv_hidden_size', nargs='+', default=[128], type=int) parser.add_argument('--reverse_decoding', default=False, type=bool) parser.add_argument('--buffer_size', default=10000, type=int) parser.add_argument('--lr_decay', default=True, type=bool) parser.add_argument('--lr_decay_frequency', default=50000, type=int) parser.add_argument('--lr_decay_factor', default=0.9, type=float) parser.add_argument('--num_buckets', default=8.0, type=float) parser.add_argument('--min_bucket_length', default=20.0, type=float) parser.add_argument('--max_bucket_length', default=60.0, type=float) parser.add_argument('--num_features', default=7, type=int) parser.add_argument('--save_hparams', default=True, type=bool) parser.add_argument('--hparams_from_file', default=False, type=bool) parser.add_argument('--hparams_file_name', default=None, type=str) parser.add_argument('--rand_input_swap', default=False, type=bool) parser.add_argument('--infer_input', default='random', type=str) parser.add_argument('--emb_activation', default='tanh', type=str) parser.add_argument('--div_loss_scale', default=1.0, type=float) parser.add_argument('--div_loss_rate', default=0.9, type=float)
def create_hparams(flags): 'Create training hparams.' hparams = tf.contrib.training.HParams(model=flags.model, input_pipeline=flags.input_pipeline, input_sequence_key=flags.input_sequence_key, output_sequence_key=flags.output_sequence_key, cell_size=flags.cell_size, emb_size=flags.emb_size, save_dir=flags.save_dir, device=flags.device, lr=flags.learning_rate, gpu_mem_frac=flags.gpu_mem_frac, num_steps=flags.num_steps, summary_freq=flags.summary_freq, inference_freq=flags.inference_freq, batch_size=flags.batch_size, one_hot_embedding=flags.one_hot_embedding, char_embedding_size=flags.char_embedding_size, train_file=flags.train_file, val_file=flags.val_file, infer_file=flags.infer_file, allow_soft_placement=flags.allow_soft_placement, cpu_threads=flags.cpu_threads, overwrite_saves=flags.overwrite_saves, input_dropout=flags.input_dropout, emb_noise=flags.emb_noise, conv_hidden_size=flags.conv_hidden_size, kernel_size=flags.kernel_size, reverse_decoding=flags.reverse_decoding, buffer_size=flags.buffer_size, lr_decay=flags.lr_decay, lr_decay_frequency=flags.lr_decay_frequency, lr_decay_factor=flags.lr_decay_factor, num_buckets=flags.num_buckets, min_bucket_length=flags.min_bucket_length, max_bucket_length=flags.max_bucket_length, num_features=flags.num_features, rand_input_swap=flags.rand_input_swap, infer_input=flags.infer_input, emb_activation=flags.emb_activation, div_loss_scale=flags.div_loss_scale, div_loss_rate=flags.div_loss_rate) hparams.add_hparam('encode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy')) hparams.add_hparam('decode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy')) hparams_file_name = flags.hparams_file_name if (hparams_file_name is None): hparams_file_name = os.path.join(hparams.save_dir, 'hparams.json') if flags.hparams_from_file: hparams.cell_size = list() hparams = hparams.parse_json(json.load(open(hparams_file_name))) hparams.set_hparam('encode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy')) hparams.set_hparam('decode_vocabulary_file', os.path.join(DEFAULT_DATA_DIR, 'indices_char.npy')) return hparams
def sequence2embedding(model, hparams, seq_list): 'Helper Function to run a forwards path up to the bottneck layer (ENCODER).\n Encodes a list of sequences into the molecular descriptor.\n\n Args:\n model: The translation model instance to use.\n hparams: Hyperparameter object.\n seq_list: list of sequnces that should be encoded.\n Returns:\n Embedding of the input sequnces as numpy array.\n ' emb_list = [] with model.graph.as_default(): input_pipeline = InputPipelineInferEncode(seq_list, hparams) input_pipeline.initilize() model.model.restore(model.sess) while 1: try: (input_seq, input_len) = input_pipeline.get_next() emb = model.model.seq2emb(model.sess, input_seq, input_len) emb_list.append(emb) except StopIteration: break embedding_array = np.concatenate(emb_list) return embedding_array
def embedding2sequence(model, hparams, embedding, num_top=1, maximum_iterations=1000): 'Helper Function to run a forwards path from thebottneck layer to\n output (DECODER).\n\n Args:\n model: The translation model instance to use.\n hparams: Hyperparameter object.\n embedding: Array with samples x num_features\n Returns:\n List of sequences decoded from the input embedding (descriptor).\n ' seq_list = [] with model.graph.as_default(): input_pipeline = InputPipelineInferDecode(embedding, hparams) input_pipeline.initilize() model.model.restore(model.sess) while 1: try: emb = input_pipeline.get_next() seq = model.model.emb2seq(model.sess, emb, num_top, maximum_iterations) if (num_top == 1): seq = [s[0] for s in seq] seq_list.extend(seq) except StopIteration: break if ((len(seq_list) == 1) & isinstance(seq_list, str)): return seq_list[0] return seq_list
class InferenceModel(object): 'Class that handles the inference of a trained model.' def __init__(self, model_dir=_default_model_dir, use_gpu=True, batch_size=256, gpu_mem_frac=0.1, beam_width=10, num_top=1, maximum_iterations=1000, cpu_threads=5, emb_activation=None): 'Constructor for the inference model.\n\n Args:\n model_dir: Path to the model directory.\n use_gpu: Flag for GPU usage.\n batch_size: Number of samples to process per step.\n gpu_mem_frac: If GPU is used, what memory fraction should be used?\n beam_width: Width of the the window used for the beam search decoder.\n num_top: Number of most probable sequnces as output of the beam search decoder.\n emb_activation: Activation function used in the bottleneck layer.\n Returns:\n None\n ' self.num_top = num_top self.use_gpu = use_gpu parser = argparse.ArgumentParser() add_arguments(parser) flags = parser.parse_args([]) flags.hparams_from_file = True flags.save_dir = model_dir self.hparams = create_hparams(flags) self.hparams.set_hparam('save_dir', model_dir) self.hparams.set_hparam('batch_size', batch_size) self.hparams.set_hparam('gpu_mem_frac', gpu_mem_frac) self.hparams.add_hparam('beam_width', beam_width) self.hparams.set_hparam('cpu_threads', cpu_threads) (self.encode_model, self.decode_model) = build_models(self.hparams, modes=['ENCODE', 'DECODE']) self.maximum_iterations = maximum_iterations def seq_to_emb(self, seq): 'Helper function to calculate the embedding (molecular descriptor) for input sequnce(s)\n\n Args:\n seq: Single sequnces or list of sequnces to encode.\n Returns:\n Embedding of the input sequnce(s).\n ' if isinstance(seq, str): seq = [seq] if self.use_gpu: emb = sequence2embedding(self.encode_model, self.hparams, seq) else: with tf.device('/cpu:0'): emb = sequence2embedding(self.encode_model, self.hparams, seq) return emb def emb_to_seq(self, embedding): 'Helper function to calculate the sequnce(s) for one or multiple (concatinated)\n embedding.\n\n Args:\n embedding: array with n_samples x num_features.\n Returns:\n sequnce(s).\n ' if (embedding.ndim == 1): embedding = np.expand_dims(embedding, 0) if self.use_gpu: seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations) else: with tf.device('/cpu:0'): seq = embedding2sequence(self.decode_model, self.hparams, embedding, self.num_top, self.maximum_iterations) if (len(seq) == 1): seq = seq[0] if (len(seq) == 1): seq = seq[0] return seq
class InferenceServer(): def __init__(self, model_dir=_default_model_dir, num_servers=1, port_frontend='5559', port_backend='5560', batch_size=256, gpu_mem_frac=0.3, beam_width=10, num_top=1, maximum_iterations=1000, use_running=False): self.model_dir = model_dir self.port_frontend = port_frontend self.port_backend = port_backend self.batch_size = batch_size self.gpu_mem_frac = gpu_mem_frac self.beam_width = beam_width self.maximum_iterations = maximum_iterations self.num_top = num_top if (not use_running): self.gpus = os.environ.get('CUDA_VISIBLE_DEVICES').split(',') mp.Process(target=self._init_device).start() for i in range(num_servers): os.environ['CUDA_VISIBLE_DEVICES'] = self.gpus[(i % len(self.gpus))] mp.Process(target=self._init_server).start() def _init_device(self): try: context = zmq.Context(1) frontend = context.socket(zmq.XREP) frontend.bind(('tcp://*:%s' % self.port_frontend)) backend = context.socket(zmq.XREQ) backend.bind(('tcp://*:%s' % self.port_backend)) zmq.device(zmq.QUEUE, frontend, backend) except: print('bringing down zmq device') finally: pass frontend.close() backend.close() context.term() def _init_server(self): infer_model = InferenceModel(model_dir=self.model_dir, gpu_mem_frac=self.gpu_mem_frac, use_gpu=True, batch_size=self.batch_size, beam_width=self.beam_width, maximum_iterations=self.maximum_iterations) context = zmq.Context() socket = context.socket(zmq.REP) socket.connect(('tcp://localhost:%s' % self.port_backend)) print('Server running on GPU ', os.environ['CUDA_VISIBLE_DEVICES']) while True: inp = json.loads(socket.recv()) if inp[0]: embeddings = infer_model.seq_to_emb(inp[1]) socket.send_string(json.dumps(embeddings.tolist())) else: smiles = infer_model.emb_to_seq(np.array(inp[1])) socket.send_string(json.dumps(smiles)) def seq_to_emb(self, smiles): context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect(('tcp://localhost:%s' % self.port_frontend)) socket.send_string(json.dumps((1, smiles))) emb = np.array(json.loads(socket.recv())) return emb def emb_to_seq(self, emb): context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect(('tcp://localhost:%s' % self.port_frontend)) socket.send_string(json.dumps((0, emb.tolist()))) emb = json.loads(socket.recv()) return emb
class InputPipeline(): 'Base input pipeline class. Iterates through tf-record file to produce inputs\n for training the translation model.\n\n Atributes:\n mode: The mode the model is supposed to run (e.g. Train).\n batch_size: Number of samples per batch.\n buffer_size: Number of samples in the shuffle buffer.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n output_sequence_key: Identifier of the output_sequence feature in the\n tf-record file.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n decode_vocabulary: Dictonary that maps integers to unique tokens of the\n output strings.\n num_buckets: Number of buckets for batching together sequnces of\n similar length.\n min_bucket_lenght: All sequnces below this legth are put in the\n same bucket.\n max_bucket_lenght: All sequnces above this legth are put in the\n same bucket.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n regex_pattern_output: Expression to toeknize the output sequnce with.\n ' def __init__(self, mode, hparams): 'Constructor for base input pipeline class.\n\n Args:\n mode: The mode the model is supposed to run (e.g. Train).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n ' self.mode = mode self.batch_size = hparams.batch_size self.buffer_size = hparams.buffer_size self.input_sequence_key = hparams.input_sequence_key self.output_sequence_key = hparams.output_sequence_key if (self.mode == 'TRAIN'): self.file = hparams.train_file else: self.input_sequence_key = 'canonical_smiles' self.file = hparams.val_file self.encode_vocabulary = {v: k for (k, v) in np.load(hparams.encode_vocabulary_file, allow_pickle=True).item().items()} self.decode_vocabulary = {v: k for (k, v) in np.load(hparams.decode_vocabulary_file, allow_pickle=True).item().items()} self.num_buckets = hparams.num_buckets self.min_bucket_lenght = hparams.min_bucket_length self.max_bucket_lenght = hparams.max_bucket_length if ('inchi' in self.input_sequence_key): self.regex_pattern_input = REGEX_INCHI elif ('smiles' in self.input_sequence_key): self.regex_pattern_input = REGEX_SML else: raise ValueError('Could not understand the input typ. SMILES or INCHI?') if ('inchi' in self.output_sequence_key): self.regex_pattern_output = REGEX_INCHI elif ('smiles' in self.output_sequence_key): self.regex_pattern_output = REGEX_SML else: raise ValueError('Could not understand the output typ. SMILES or INCHI?') def make_dataset_and_iterator(self): 'Method that builds a TFRecordDataset and creates a iterator.' self.dataset = tf.data.TFRecordDataset(self.file) if (self.mode == 'TRAIN'): self.dataset = self.dataset.repeat() self.dataset = self.dataset.map(self._parse_element, num_parallel_calls=32) self.dataset = self.dataset.map((lambda element: tf.py_func(self._process_element, [element[self.input_sequence_key], element[self.output_sequence_key]], [tf.int32, tf.int32, tf.int32, tf.int32])), num_parallel_calls=32) self.dataset = self.dataset.apply(tf.contrib.data.group_by_window(key_func=(lambda in_seq, out_seq, in_len, out_len: self._length_bucket(in_len)), reduce_func=(lambda key, ds: self._pad_batch(ds, self.batch_size, ([None], [None], [1], [1]), (self.encode_vocabulary['</s>'], self.decode_vocabulary['</s>'], 0, 0))), window_size=self.batch_size)) if (self.mode == 'TRAIN'): self.dataset = self.dataset.shuffle(buffer_size=self.buffer_size) self.iterator = self.dataset.make_initializable_iterator() def _parse_element(self, example_proto): 'Method that parses an element from a tf-record file.' feature_dict = {self.input_sequence_key: tf.FixedLenFeature([], tf.string), self.output_sequence_key: tf.FixedLenFeature([], tf.string)} parsed_features = tf.parse_single_example(example_proto, feature_dict) element = {name: parsed_features[name] for name in list(feature_dict.keys())} return element def _process_element(self, input_seq, output_seq): 'Method that tokenizes input an output sequnce, pads it with start and stop token.\n\n Args:\n input_seq: Input sequnce.\n output_seq: Target sequnce.\n Returns\n Array with ids of each token in the tokenzized input sequence.\n Array with ids of each token in the tokenzized output sequence.\n Array with length of the input sequnce.\n Array with length of output sequence.\n ' input_seq = input_seq.decode('ascii') output_seq = output_seq.decode('ascii') input_seq = np.array(self._char_to_idx(input_seq, self.regex_pattern_input, self.encode_vocabulary)).astype(np.int32) output_seq = np.array(self._char_to_idx(output_seq, self.regex_pattern_output, self.decode_vocabulary)).astype(np.int32) input_seq = self._pad_start_end_token(input_seq, self.encode_vocabulary) output_seq = self._pad_start_end_token(output_seq, self.decode_vocabulary) input_seq_len = np.array([len(input_seq)]).astype(np.int32) output_seq_len = np.array([len(output_seq)]).astype(np.int32) return (input_seq, output_seq, input_seq_len, output_seq_len) def _char_to_idx(self, seq, regex_pattern, vocabulary): 'Helper function to tokenize a sequnce.\n\n Args:\n seq: Sequence to tokenize.\n regex_pattern: Expression to toeknize the input sequnce with.\n vocabulary: Dictonary that maps integers to unique tokens.\n Returns:\n List with ids of the tokens in the tokenized sequnce.\n ' char_list = re.findall(regex_pattern, seq) return [vocabulary[char_list[j]] for j in range(len(char_list))] def _pad_start_end_token(self, seq, vocabulary): 'Helper function to pad start and stop token to a tokenized sequnce.\n\n Args:\n seq: Tokenized sequnce to pad.\n vocabulary: Dictonary that maps integers to unique tokens.\n Returns:\n Array with ids of each token in the tokenzized input sequence\n padded by start and stop token.\n ' seq = np.concatenate([np.array([vocabulary['<s>']]), seq, np.array([vocabulary['</s>']])]).astype(np.int32) return seq def _length_bucket(self, length): 'Helper function to assign the a bucked for certain sequnce length.\n\n Args:\n length: The length of a sequnce.\n Returns:\n ID of the assigned bucket.\n ' length = tf.cast(length, tf.float32) num_buckets = tf.cast(self.num_buckets, tf.float32) cast_value = ((self.max_bucket_lenght - self.min_bucket_lenght) / num_buckets) minimum = (self.min_bucket_lenght / cast_value) bucket_id = (((length / cast_value) - minimum) + 1) bucket_id = tf.cast(tf.clip_by_value(bucket_id, 0, (self.num_buckets + 1)), tf.int64) return bucket_id def _pad_batch(self, ds, batch_size, padded_shapes, padded_values): 'Helper function that pads a batch.' return ds.padded_batch(batch_size, padded_shapes=padded_shapes, padding_values=padded_values, drop_remainder=True)
class InputPipelineWithFeatures(InputPipeline): 'Input pipeline class with addtional molecular feature output. Iterates through tf-record\n file to produce inputs for training the translation model.\n\n Atributes:\n mode: The mode the model is supposed to run (e.g. Train).\n batch_size: Number of samples per batch.\n buffer_size: Number of samples in the shuffle buffer.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n output_sequence_key: Identifier of the output_sequence feature in the\n tf-record file.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n decode_vocabulary: Dictonary that maps integers to unique tokens of the\n output strings.\n num_buckets: Number of buckets for batching together sequnces of\n similar length.\n min_bucket_lenght: All sequnces below this legth are put in the\n same bucket.\n max_bucket_lenght: All sequnces above this legth are put in the\n same bucket.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n regex_pattern_output: Expression to toeknize the output sequnce with.\n ' def __init__(self, mode, hparams): 'Constructor for input pipeline class with features.\n\n Args:\n mode: The mode the model is supposed to run (e.g. Train).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n ' super().__init__(mode, hparams) self.features_key = 'mol_features' self.num_features = hparams.num_features def make_dataset_and_iterator(self): 'Method that builds a TFRecordDataset and creates a iterator.' self.dataset = tf.data.TFRecordDataset(self.file) self.dataset = self.dataset.map(self._parse_element, num_parallel_calls=32) if (self.mode == 'TRAIN'): self.dataset = self.dataset.repeat() self.dataset = self.dataset.map((lambda element: tf.py_func(self._process_element, [element[self.input_sequence_key], element[self.output_sequence_key], element[self.features_key]], [tf.int32, tf.int32, tf.int32, tf.int32, tf.float32])), num_parallel_calls=32) self.dataset = self.dataset.apply(tf.contrib.data.group_by_window(key_func=(lambda in_seq, out_seq, in_len, out_len, feat: self._length_bucket(in_len)), reduce_func=(lambda key, ds: self._pad_batch(ds, self.batch_size, ([None], [None], [1], [1], [self.num_features]), (self.encode_vocabulary['</s>'], self.decode_vocabulary['</s>'], 0, 0, 0.0))), window_size=self.batch_size)) if (self.mode == 'TRAIN'): self.dataset = self.dataset.shuffle(buffer_size=self.buffer_size) self.iterator = self.dataset.make_initializable_iterator() def _parse_element(self, example_proto): 'Method that parses an element from a tf-record file.' feature_dict = {self.input_sequence_key: tf.FixedLenFeature([], tf.string), self.output_sequence_key: tf.FixedLenFeature([], tf.string), self.features_key: tf.FixedLenFeature([self.num_features], tf.float32)} parsed_features = tf.parse_single_example(example_proto, feature_dict) element = {name: parsed_features[name] for name in list(feature_dict.keys())} return element def _process_element(self, input_seq, output_seq, features): 'Method that tokenizes input an output sequnce, pads it with start and stop token.\n\n Args:\n input_seq: Input sequnce.\n output_seq: target sequnce.\n Returns\n Array with ids of each token in the tokenzized input sequence.\n Array with ids of each token in the tokenzized output sequence.\n Array with length of the input sequnce.\n Array with length of output sequence.\n Array with molecular features.\n ' input_seq = input_seq.decode('ascii') output_seq = output_seq.decode('ascii') input_seq = np.array(self._char_to_idx(input_seq, self.regex_pattern_input, self.encode_vocabulary)).astype(np.int32) output_seq = np.array(self._char_to_idx(output_seq, self.regex_pattern_output, self.decode_vocabulary)).astype(np.int32) input_seq = self._pad_start_end_token(input_seq, self.encode_vocabulary) output_seq = self._pad_start_end_token(output_seq, self.decode_vocabulary) input_seq_len = np.array([len(input_seq)]).astype(np.int32) output_seq_len = np.array([len(output_seq)]).astype(np.int32) return (input_seq, output_seq, input_seq_len, output_seq_len, features)
class InputPipelineInferEncode(): 'Class that creates a python generator for list of sequnces. Used to feed\n sequnces to the encoing part during inference time.\n\n Atributes:\n seq_list: List with sequnces to iterate over.\n batch_size: Number of samples to output per iterator call.\n encode_vocabulary: Dictonary that maps integers to unique tokens of the\n input strings.\n input_sequence_key: Identifier of the input_sequence feature in the\n tf-record file.\n regex_pattern_input: Expression to toeknize the input sequnce with.\n ' def __init__(self, seq_list, hparams): 'Constructor for the inference input pipeline class.\n\n Args:\n seq_list: List with sequnces to iterate over.\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n ' self.seq_list = seq_list self.batch_size = hparams.batch_size self.encode_vocabulary = {v: k for (k, v) in np.load(hparams.encode_vocabulary_file, allow_pickle=True).item().items()} self.input_sequence_key = hparams.input_sequence_key if ('inchi' in self.input_sequence_key): self.regex_pattern_input = REGEX_INCHI elif ('smiles' in self.input_sequence_key): self.regex_pattern_input = REGEX_SML else: raise ValueError('Could not understand the input typ. SMILES or INCHI?') def _input_generator(self): 'Function that defines the generator.' l = len(self.seq_list) for ndx in range(0, l, self.batch_size): samples = self.seq_list[ndx:min((ndx + self.batch_size), l)] samples = [self._seq_to_idx(seq) for seq in samples] seq_len_batch = np.array([len(entry) for entry in samples]) max_length = seq_len_batch.max() seq_batch = np.concatenate([np.expand_dims(np.append(seq, np.array(([self.encode_vocabulary['</s>']] * (max_length - len(seq))))), 0) for seq in samples]).astype(np.int32) (yield (seq_batch, seq_len_batch)) def initilize(self): 'Helper function to initialiize the generator' self.generator = self._input_generator() def get_next(self): 'Helper function to get the next batch from the iterator' return next(self.generator) def _char_to_idx(self, seq): 'Helper function to tokenize a sequnce.\n\n Args:\n seq: Sequence to tokenize.\n Returns:\n List with ids of the tokens in the tokenized sequnce.\n ' char_list = re.findall(self.regex_pattern_input, seq) return [self.encode_vocabulary[char_list[j]] for j in range(len(char_list))] def _seq_to_idx(self, seq): 'Method that tokenizes a sequnce and pads it with start and stop token.\n\n Args:\n seq: Sequence to tokenize.\n Returns:\n seq: List with ids of the tokens in the tokenized sequnce.\n ' seq = np.concatenate([np.array([self.encode_vocabulary['<s>']]), np.array(self._char_to_idx(seq)).astype(np.int32), np.array([self.encode_vocabulary['</s>']])]).astype(np.int32) return seq
class InputPipelineInferDecode(): 'Class that creates a python generator for arrays of embeddings (molecular descriptor).\n Used to feed embeddings to the decoding part during inference time.\n\n Atributes:\n embedding: Array with embeddings (molecular descriptors) (n_samples x n_features).\n batch_size: Number of samples to output per iterator call.\n ' def __init__(self, embedding, hparams): 'Constructor for the inference input pipeline class.\n\n Args:\n embedding: Array with embeddings (molecular descriptors) (n_samples x n_features).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n ' self.embedding = embedding self.batch_size = hparams.batch_size def _input_generator(self): 'Function that defines the generator.' l = len(self.embedding) for ndx in range(0, l, self.batch_size): samples = self.embedding[ndx:min((ndx + self.batch_size), l)] (yield samples) def initilize(self): 'Helper function to initialiize the generator' self.generator = self._input_generator() def get_next(self): 'Helper function to get the next batch from the iterator' return next(self.generator)
def build_models(hparams, modes=['TRAIN', 'EVAL', 'ENCODE']): 'Helper function to build a translation model for one or many different modes.\n\n Args:\n hparams: Hyperparameters defined in file or flags.\n modes: The mode the model is supposed to run (e.g. Train, EVAL, ENCODE, DECODE).\n Can be a list if multiple models should be build.\n Returns:\n One model or a list of multiple models.\n ' model = getattr(models, hparams.model) input_pipe = getattr(input_pipeline, hparams.input_pipeline) model_list = [] if isinstance(modes, list): for mode in modes: model_list.append(create_model(mode, model, input_pipe, hparams)) return tuple(model_list) else: model = create_model(modes, model, input_pipe, hparams) return model
def create_model(mode, model_creator, input_pipeline_creator, hparams): 'Helper function to build a translation model for a certain mode.\n\n Args:cpu_threads\n mode: The mode the model is supposed to run(e.g. Train, EVAL, ENCODE, DECODE).\n model_creator: Type of model class (e.g. NoisyGRUSeq2SeqWithFeatures).\n input_pipeline_creator: Type of input pipeline class (e.g. InputPipelineWithFeatures).\n hparams: Hyperparameters defined in file or flags.\n Returns:\n One model as named tuple with a graph, model and session object.\n ' sess_config = tf.ConfigProto(allow_soft_placement=hparams.allow_soft_placement, gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=hparams.gpu_mem_frac), inter_op_parallelism_threads=hparams.cpu_threads, intra_op_parallelism_threads=hparams.cpu_threads) tf.reset_default_graph() graph = tf.Graph() with graph.as_default(): if (mode in ['TRAIN', 'EVAL']): input_pipe = input_pipeline_creator(mode, hparams) input_pipe.make_dataset_and_iterator() iterator = input_pipe.iterator else: iterator = None model = model_creator(mode=mode, iterator=iterator, hparams=hparams) model.build_graph() sess = tf.Session(graph=graph, config=sess_config) return Model(graph=graph, model=model, sess=sess)
def add_arguments(parser): 'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n ' parser.add_argument('-i', '--input', help='input file. Either .smi or .csv file.', type=str) parser.add_argument('-o', '--output', help='output .csv file with a descriptor for each SMILES per row.', type=str) parser.add_argument('--smiles_header', help='if .csv, specify the name of the SMILES column header here.', default='smiles', type=str) parser.add_argument('--preprocess', dest='preprocess', action='store_true') parser.add_argument('--no-preprocess', dest='preprocess', action='store_false') parser.set_defaults(preprocess=True) parser.add_argument('--model_dir', default=_default_model_dir, type=str) parser.add_argument('--use_gpu', dest='gpu', action='store_true') parser.set_defaults(gpu=False) parser.add_argument('--device', default='2', type=str) parser.add_argument('--cpu_threads', default=5, type=int) parser.add_argument('--batch_size', default=512, type=int)
def read_input(file): 'Function that read teh provided file into a pandas dataframe.\n Args:\n file: File to read.\n Returns:\n pandas dataframe\n Raises:\n ValueError: If file is not a .smi or .csv file.\n ' if file.endswith('.csv'): sml_df = pd.read_csv(file) elif file.endswith('.smi'): sml_df = pd.read_table(file, header=None).rename({0: FLAGS.smiles_header, 1: 'EXTREG'}, axis=1) else: raise ValueError('use .csv or .smi format...') return sml_df
def main(unused_argv): 'Main function that extracts the contineous data-driven descriptors for a file of SMILES.' if FLAGS.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device) model_dir = FLAGS.model_dir file = FLAGS.input df = read_input(file) if FLAGS.preprocess: print('start preprocessing SMILES...') df['new_smiles'] = df[FLAGS.smiles_header].map(preprocess_smiles) sml_list = df[(~ df.new_smiles.isna())].new_smiles.tolist() print('finished preprocessing SMILES!') else: sml_list = df[FLAGS.smiles_header].tolist() print('start calculating descriptors...') infer_model = InferenceModel(model_dir=model_dir, use_gpu=FLAGS.gpu, batch_size=FLAGS.batch_size, cpu_threads=FLAGS.cpu_threads) descriptors = infer_model.seq_to_emb(sml_list) print(('finished calculating descriptors! %d out of %d input SMILES could be interpreted' % (len(sml_list), len(df)))) if FLAGS.preprocess: df = df.join(pd.DataFrame(descriptors, index=df[(~ df.new_smiles.isna())].index, columns=[('cddd_' + str((i + 1))) for i in range(512)])) else: df = df.join(pd.DataFrame(descriptors, index=df.index, columns=[('cddd_' + str((i + 1))) for i in range(512)])) print('writing descriptors to file...') df.to_csv(FLAGS.output)
def main_wrapper(): global FLAGS PARSER = argparse.ArgumentParser() add_arguments(PARSER) (FLAGS, UNPARSED) = PARSER.parse_known_args() tf.app.run(main=main, argv=([sys.argv[0]] + UNPARSED))
def train_loop(train_model, eval_model, encoder_model, hparams): 'Main training loop function for training and evaluating.\n Args:\n train_model: The model used for training.\n eval_model: The model used evaluating the translation accuracy.\n encoder_model: The model used for evaluating the QSAR modeling performance.\n hparams: Hyperparameters defined in file or flags.\n Returns:\n None\n ' qsar_process = [] with train_model.graph.as_default(): train_model.sess.run(train_model.model.iterator.initializer) step = train_model.model.initilize(train_model.sess, overwrite_saves=hparams.overwrite_saves) hparams_file_name = FLAGS.hparams_file_name if (hparams_file_name is None): hparams_file_name = os.path.join(hparams.save_dir, 'hparams.json') with open(hparams_file_name, 'w') as outfile: json.dump(hparams.to_json(), outfile) while (step < hparams.num_steps): with train_model.graph.as_default(): step = train_model.model.train(train_model.sess) if ((step % hparams.summary_freq) == 0): with train_model.graph.as_default(): train_model.model.save(train_model.sess) with eval_model.graph.as_default(): eval_model.model.restore(eval_model.sess) eval_model.sess.run(eval_model.model.iterator.initializer) eval_reconstruct(eval_model, step, hparams) if ((step % hparams.inference_freq) == 0): with encoder_model.graph.as_default(): qsar_process.append(parallel_eval_qsar(encoder_model, step, hparams)) for process in qsar_process: process.join()
def main(unused_argv): 'Main function that trains and evaluats the translation model' hparams = create_hparams(FLAGS) os.environ['CUDA_VISIBLE_DEVICES'] = str(hparams.device) (train_model, eval_model, encode_model) = build_models(hparams) train_loop(train_model, eval_model, encode_model, hparams)
def add_arguments(parser): 'Helper function to fill the parser object.\n\n Args:\n parser: Parser object\n Returns:\n None\n ' parser.add_argument('--model_dir', default=_default_model_dir, type=str) parser.add_argument('--use_gpu', dest='gpu', action='store_true') parser.set_defaults(gpu=False) parser.add_argument('--device', default='0', type=str) parser.add_argument('--cpu_threads', default=5, type=int)
def main(unused_argv): 'Main function to test the performance of the translation model to extract\n meaningfull features for a QSAR modelling' if FLAGS.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device) print('use gpu {}'.format(str(FLAGS.device))) else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' model_dir = FLAGS.model_dir infer_model = InferenceModel(model_dir, use_gpu=FLAGS.gpu, cpu_threads=FLAGS.cpu_threads) ames_df = pd.read_csv('ames.csv') ames_smls = ames_df.smiles.tolist() ames_labels = ames_df.label.values ames_fold = ames_df.fold.values print('Extracting molecular desscriptors for Ames') ames_emb = infer_model.seq_to_emb(ames_smls) ames_emb = ((ames_emb - ames_emb.mean()) / ames_emb.std()) lipo_df = pd.read_csv('lipo.csv') lipo_smls = lipo_df.smiles.tolist() lipo_labels = lipo_df.label.values lipo_fold = lipo_df.fold.values print('Extracting molecular desscriptors for Lipophilicity') lipo_emb = infer_model.seq_to_emb(lipo_smls) lipo_emb = ((lipo_emb - lipo_emb.mean()) / lipo_emb.std()) print('Running SVM on Ames mutagenicity...') clf = SVC(C=5.0) result = cross_val_score(clf, ames_emb, ames_labels, ames_fold, cv=LeaveOneGroupOut(), n_jobs=5) print(('Ames mutagenicity accuracy: %0.3f +/- %0.3f' % (np.mean(result), np.std(result)))) print('Running SVM on Lipophilicity...') clf = SVR(C=5.0) result = cross_val_score(clf, lipo_emb, lipo_labels, lipo_fold, cv=LeaveOneGroupOut(), n_jobs=5) print(('Lipophilicity r2: %0.3f +/- %0.3f' % (np.mean(result), np.std(result))))
def main(): args = parser.parse_args() world_size = args.gpus if args.gpus: assert (world_size <= torch.cuda.device_count()), f'--gpus is too high (specefied {world_size} gpus but only {torch.cuda.device_count()} gpus are available)' torch.cuda.empty_cache() if (world_size > 1): _logger.info(f'Will use torch.nn.parallel.DistributedDataParallel() and {world_size} gpus', color='purple') for rank in range(world_size): _logger.info(torch.cuda.get_device_name(rank), color='purple') elif (world_size == 1): rank = 0 _logger.info(f'Will use single-gpu: {torch.cuda.get_device_name(rank)}', color='purple') else: rank = 'cpu' _logger.info('Will use cpu', color='purple')
def customize_pipeline_test(config): config['batching']['bucket_by_sequence_length'] = False if ('delphes_pf_ttbar' in config['datasets']): config['train_test_datasets']['physical']['datasets'] = ['delphes_pf_ttbar'] if ('cms_pf_ttbar' in config['datasets']): config['train_test_datasets']['physical']['datasets'] = ['cms_pf_ttbar'] config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']} config['train_test_datasets']['physical']['batch_per_gpu'] = 2 config['validation_dataset'] = 'cms_pf_ttbar' config['evaluation_datasets'] = {'cms_pf_ttbar': {'batch_size': 2, 'num_events': (- 1)}} if ('clic_edm_ttbar_pf' in config['datasets']): config['train_test_datasets']['physical']['datasets'] = ['clic_edm_ttbar_pf'] config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']} config['train_test_datasets']['physical']['batch_per_gpu'] = 5 config['validation_dataset'] = 'clic_edm_ttbar_pf' config['validation_batch_size'] = 5 config['evaluation_datasets'] = {'clic_edm_ttbar_pf': {'batch_size': 5, 'num_events': (- 1)}} if ('clic_edm_ttbar_hits_pf' in config['datasets']): config['train_test_datasets']['physical']['datasets'] = ['clic_edm_ttbar_hits_pf'] config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']} config['train_test_datasets']['physical']['batch_per_gpu'] = 1 config['validation_dataset'] = 'clic_edm_ttbar_hits_pf' config['validation_batch_size'] = 1 config['evaluation_datasets'] = {'clic_edm_ttbar_hits_pf': {'batch_size': 1, 'num_events': (- 1)}} config['validation_num_events'] = (config['validation_batch_size'] * 2) config['parameters']['num_graph_layers_id'] = 1 config['parameters']['num_graph_layers_cls'] = 1 return config
def submit(config): crabCommand('submit', config=config) with open((((config.General.workArea + '/crab_') + config.General.requestName) + '/crab_config.py'), 'w') as fi: fi.write(config.pythonise_())
def map_pdgid_to_candid(pdgid, charge): if (pdgid in [22, 11, 13]): return pdgid if (abs(charge) > 0): return 211 return 130
def deltar_pairs(eta_vec, phi_vec, dr_cut): deta = np.abs(np.subtract.outer(eta_vec, eta_vec)) dphi = (np.mod((np.subtract.outer(phi_vec, phi_vec) + np.pi), (2 * np.pi)) - np.pi) dr2 = ((deta ** 2) + (dphi ** 2)) dr2 *= np.tri(*dr2.shape) dr2[(dr2 == 0)] = 999 ind_pairs = np.where((dr2 < dr_cut)) return ind_pairs
def get_charge(pid): abs_pid = abs(pid) if (pid in [130, 22, 1, 2]): return 0.0 elif (abs_pid in [11, 13]): return (- math.copysign(1.0, pid)) elif (abs_pid in [211]): return math.copysign(1.0, pid) else: raise Exception('Unknown pid: ', pid)
def draw_event(g): pos = {} for node in g.nodes: pos[node] = (g.nodes[node]['eta'], g.nodes[node]['phi']) fig = plt.figure(figsize=(10, 10)) nodes_to_draw = [n for n in g.nodes if (n[0] == 'elem')] nx.draw_networkx(g, pos=pos, with_labels=False, node_size=5, nodelist=nodes_to_draw, edgelist=[], node_color='red', node_shape='s', alpha=0.5) nodes_to_draw = [n for n in g.nodes if (n[0] == 'pfcand')] nx.draw_networkx(g, pos=pos, with_labels=False, node_size=10, nodelist=nodes_to_draw, edgelist=[], node_color='green', node_shape='x', alpha=0.5) nodes_to_draw = [n for n in g.nodes if ((n[0] == 'sc') or (n[0] == 'tp'))] nx.draw_networkx(g, pos=pos, with_labels=False, node_size=1, nodelist=nodes_to_draw, edgelist=[], node_color='blue', node_shape='.', alpha=0.5) edges_to_draw = [e for e in g.edges if (e[0] in nodes_to_draw)] nx.draw_networkx_edges(g, pos, edgelist=edges_to_draw, arrows=False, alpha=0.1) plt.xlim((- 6), 6) plt.ylim((- 4), 4) plt.tight_layout() plt.axis('on') return fig
def merge_closeby_particles(g, pid=22, deltar_cut=0.001): photons = [elem for elem in g.nodes if ((g.nodes[elem]['typ'] == pid) and ((elem[0] == 'tp') or (elem[0] == 'sc')))] phot_eta = [g.nodes[node]['eta'] for node in photons] phot_phi = [g.nodes[node]['phi'] for node in photons] merge_pairs = [] (pairs_0, pairs_1) = deltar_pairs(phot_eta, phot_phi, deltar_cut) merge_pairs = [(photons[p0], photons[p1]) for (p0, p1) in zip(pairs_0, pairs_1)] for pair in merge_pairs: if ((pair[0] in g.nodes) and (pair[1] in g.nodes)): lv = vector.obj(pt=0, eta=0, phi=0, E=0) for gp in pair: lv += vector.obj(pt=g.nodes[gp]['pt'], eta=g.nodes[gp]['eta'], phi=g.nodes[gp]['phi'], E=g.nodes[gp]['e']) g.nodes[pair[0]]['pt'] = lv.pt g.nodes[pair[0]]['eta'] = lv.eta g.nodes[pair[0]]['phi'] = lv.phi g.nodes[pair[0]]['e'] = lv.energy for suc in g.successors(pair[1]): if ((pair[0], suc) in g.edges): g.edges[(pair[0], suc)]['weight'] += g.edges[(pair[1], suc)]['weight'] g.remove_nodes_from([pair[1]])
def cleanup_graph(g, node_energy_threshold=0.1, edge_energy_threshold=0.05): g = g.copy() nodes_to_remove = [] for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): sw = 0.0 for edge in g.edges(node): sw += g.edges[edge]['weight'] if ((sw / g.nodes[node]['e']) < node_energy_threshold): nodes_to_remove += [node] g.remove_nodes_from(nodes_to_remove) edges_to_remove = [] for node in g.nodes: if (node[0] == 'elem'): ew = [((gen, node), g.edges[(gen, node)]['weight']) for gen in g.predecessors(node)] ew = sorted(ew, key=(lambda x: x[1]), reverse=True) for (edge, weight) in ew: if ((weight / g.nodes[edge[0]]['e']) < edge_energy_threshold): edges_to_remove += [edge] g.remove_edges_from(edges_to_remove) nodes_to_remove = [] for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): deg = g.degree[node] if (deg == 0): nodes_to_remove += [node] g.remove_nodes_from(nodes_to_remove) for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): E_track = 0.0 E_calo = 0.0 E_other = 0.0 E_hf = 0.0 E_hfem = 0.0 E_hfhad = 0.0 g.nodes[node]['typ'] = map_pdgid_to_candid(abs(g.nodes[node]['typ']), g.nodes[node]['charge']) for suc in g.successors(node): elem_type = g.nodes[suc]['typ'] if (elem_type in [1, 6]): E_track += g.edges[(node, suc)]['weight'] elif (elem_type in [4, 5, 10, 11]): E_calo += g.edges[(node, suc)]['weight'] elif (elem_type in [8, 9]): if (elem_type == 8): E_hfem += g.edges[(node, suc)]['weight'] elif (elem_type == 9): E_hfhad += g.edges[(node, suc)]['weight'] E_hf += g.edges[(node, suc)]['weight'] else: E_other += g.edges[(node, suc)]['weight'] g.nodes[node]['E_track'] = E_track g.nodes[node]['E_calo'] = E_calo g.nodes[node]['E_other'] = E_other g.nodes[node]['E_hf'] = E_hf g.nodes[node]['E_hfem'] = E_hfem g.nodes[node]['E_hfhad'] = E_hfhad for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): tracks = [] for suc in g.successors(node): typ = g.nodes[suc]['typ'] if ((typ == 1) or (typ == 6)): tracks.append(suc) if (len(tracks) > 1): n0 = g.nodes[node] drs = [] for tr in tracks: n1 = g.nodes[tr] deta = np.abs((n0['eta'] - n1['eta'])) dphi = (np.mod(((n0['phi'] - n1['phi']) + np.pi), (2 * np.pi)) - np.pi) dr2 = ((deta ** 2) + (dphi ** 2)) drs.append(dr2) imin = np.argmin(drs) for itr in range(len(tracks)): if (itr != imin): g.edges[(node, tracks[itr])]['weight'] = 0.0 for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): typ = g.nodes[node]['typ'] if ((typ in [211, 13]) and (g.nodes[node]['E_track'] == 0)): g.nodes[node]['typ'] = 130 g.nodes[node]['charge'] = 0 if ((typ in [11]) and (g.nodes[node]['E_track'] == 0)): g.nodes[node]['typ'] = 22 g.nodes[node]['charge'] = 0 if ((g.nodes[node]['E_track'] == 0) and (g.nodes[node]['E_calo'] == 0) and (g.nodes[node]['E_other'] == 0) and (g.nodes[node]['E_hf'] > 0)): if (g.nodes[node]['E_hfhad'] > g.nodes[node]['E_hfem']): g.nodes[node]['typ'] = 1 g.nodes[node]['charge'] = 0 else: g.nodes[node]['typ'] = 2 g.nodes[node]['charge'] = 0 for node in g.nodes: if ((node[0] == 'sc') or (node[0] == 'tp')): nd = g.nodes[node] if ((nd['pt'] < 1.0) and ((abs(nd['typ']) == 11) or (abs(nd['typ']) == 13))): if (g.nodes[node]['E_track'] > g.nodes[node]['E_calo']): g.nodes[node]['typ'] = 211 else: if (abs(nd['typ']) == 11): g.nodes[node]['typ'] = 22 else: g.nodes[node]['typ'] = 130 g.nodes[node]['charge'] = 0 merge_closeby_particles(g, 22) merge_closeby_particles(g, 130) merge_closeby_particles(g, 1) merge_closeby_particles(g, 2) return g
def prepare_normalized_table(g, genparticle_energy_threshold=0.2): all_genparticles = [] all_elements = [] all_pfcandidates = [] for node in g.nodes: if (node[0] == 'elem'): all_elements += [node] for parent in g.predecessors(node): all_genparticles += [parent] elif (node[0] == 'pfcand'): all_pfcandidates += [node] all_genparticles = list(set(all_genparticles)) all_elements = sorted(all_elements) elem_to_gp = {} unmatched_gp = [] for gp in sorted(all_genparticles, key=(lambda x: g.nodes[x]['e']), reverse=True): elems = [e for e in g.successors(gp)] elems_sorted = sorted([(g.edges[(gp, e)]['weight'], e) for e in elems], key=(lambda x: x[0]), reverse=True) chosen_elem = None for (weight, elem) in elems_sorted: if (not (elem in elem_to_gp)): chosen_elem = elem elem_to_gp[elem] = [] break if (chosen_elem is None): unmatched_gp += [gp] else: elem_to_gp[elem] += [gp] for gp in sorted(unmatched_gp, key=(lambda x: g.nodes[x]['e']), reverse=True): elems = [e for e in g.successors(gp)] elems_sorted = sorted([(g.edges[(gp, e)]['weight'], e) for e in elems], key=(lambda x: x[0]), reverse=True) (_, elem) = elems_sorted[0] elem_to_gp[elem] += [gp] unmatched_cand = [] elem_to_cand = {} for cand in sorted(all_pfcandidates, key=(lambda x: g.nodes[x]['e']), reverse=True): tp = g.nodes[cand]['typ'] neighbors = list(g.predecessors(cand)) chosen_elem = None if (tp in [211, 13, 11]): for elem in neighbors: tp_neighbor = g.nodes[elem]['typ'] if ((tp_neighbor == 1) or (tp_neighbor == 6)): if (not (elem in elem_to_cand)): chosen_elem = elem elem_to_cand[elem] = cand break else: sorted_neighbors = sorted(neighbors, key=(lambda x: g.edges[(x, cand)]['weight']), reverse=True) for elem in sorted_neighbors: if (not (elem in elem_to_cand)): chosen_elem = elem elem_to_cand[elem] = cand break if (chosen_elem is None): unmatched_cand += [cand] Xelem = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in elem_branches]) Xelem.fill(0.0) ygen = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in target_branches]) ygen.fill(0.0) ycand = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in target_branches]) ycand.fill(0.0) for (ielem, elem) in enumerate(all_elements): elem_type = g.nodes[elem]['typ'] genparticles = sorted(elem_to_gp.get(elem, []), key=(lambda x: g.edges[(x, elem)]['weight']), reverse=True) genparticles = [gp for gp in genparticles if (g.nodes[gp]['e'] > genparticle_energy_threshold)] candidate = elem_to_cand.get(elem, None) for j in range(len(elem_branches)): Xelem[elem_branches[j]][ielem] = g.nodes[elem][elem_branches[j]] if (not (candidate is None)): for j in range(len(target_branches)): ycand[target_branches[j]][ielem] = g.nodes[candidate][target_branches[j]] lv = vector.obj(x=0, y=0, z=0, t=0) if (len(genparticles) > 0): pid = g.nodes[genparticles[0]]['typ'] charge = g.nodes[genparticles[0]]['charge'] for gp in genparticles: lv += vector.obj(pt=g.nodes[gp]['pt'], eta=g.nodes[gp]['eta'], phi=g.nodes[gp]['phi'], e=g.nodes[gp]['e']) if ((elem_type == 5) and ((pid == 22) or (pid == 11))): pid = 130 if (elem_type in [8, 9]): if (pid == 130): pid = 1 elif (pid == 22): pid = 2 if (elem_type in [2, 3, 4, 5]): if (pid == 1): pid = 130 elif (pid == 2): pid = 22 gp = {'pt': lv.rho, 'eta': lv.eta, 'sin_phi': np.sin(lv.phi), 'cos_phi': np.cos(lv.phi), 'e': lv.t, 'typ': pid, 'px': lv.x, 'py': lv.y, 'pz': lv.z, 'charge': (charge if (pid in [211, 11, 13]) else 0)} for j in range(len(target_branches)): ygen[target_branches[j]][ielem] = gp[target_branches[j]] return (Xelem, ycand, ygen)
def make_graph(ev, iev): element_type = ev['element_type'][iev] element_pt = ev['element_pt'][iev] element_e = ev['element_energy'][iev] element_eta = ev['element_eta'][iev] element_phi = ev['element_phi'][iev] element_eta_ecal = ev['element_eta_ecal'][iev] element_phi_ecal = ev['element_phi_ecal'][iev] element_eta_hcal = ev['element_eta_hcal'][iev] element_phi_hcal = ev['element_phi_hcal'][iev] element_trajpoint = ev['element_trajpoint'][iev] element_layer = ev['element_layer'][iev] element_charge = ev['element_charge'][iev] element_depth = ev['element_depth'][iev] element_deltap = ev['element_deltap'][iev] element_sigmadeltap = ev['element_sigmadeltap'][iev] element_px = ev['element_px'][iev] element_py = ev['element_py'][iev] element_pz = ev['element_pz'][iev] element_sigma_x = ev['element_sigma_x'][iev] element_sigma_y = ev['element_sigma_y'][iev] element_sigma_z = ev['element_sigma_z'][iev] element_muon_dt_hits = ev['element_muon_dt_hits'][iev] element_muon_csc_hits = ev['element_muon_csc_hits'][iev] element_muon_type = ev['element_muon_type'][iev] element_gsf_electronseed_trkorecal = ev['element_gsf_electronseed_trkorecal'][iev] element_gsf_electronseed_dnn1 = ev['element_gsf_electronseed_dnn1'][iev] element_gsf_electronseed_dnn2 = ev['element_gsf_electronseed_dnn2'][iev] element_gsf_electronseed_dnn3 = ev['element_gsf_electronseed_dnn3'][iev] element_gsf_electronseed_dnn4 = ev['element_gsf_electronseed_dnn4'][iev] element_gsf_electronseed_dnn5 = ev['element_gsf_electronseed_dnn5'][iev] element_num_hits = ev['element_num_hits'][iev] element_cluster_flags = ev['element_cluster_flags'][iev] element_corr_energy = ev['element_corr_energy'][iev] element_corr_energy_err = ev['element_corr_energy_err'][iev] element_pterror = ev['element_pterror'][iev] element_etaerror = ev['element_etaerror'][iev] element_phierror = ev['element_phierror'][iev] element_lambda = ev['element_lambda'][iev] element_theta = ev['element_theta'][iev] element_lambdaerror = ev['element_lambdaerror'][iev] element_thetaerror = ev['element_thetaerror'][iev] element_vx = ev['element_vx'][iev] element_vy = ev['element_vy'][iev] element_vz = ev['element_vz'][iev] element_time = ev['element_time'][iev] element_timeerror = ev['element_timeerror'][iev] element_etaerror1 = ev['element_etaerror1'][iev] element_etaerror2 = ev['element_etaerror2'][iev] element_etaerror3 = ev['element_etaerror3'][iev] element_etaerror4 = ev['element_etaerror4'][iev] element_phierror1 = ev['element_phierror1'][iev] element_phierror2 = ev['element_phierror2'][iev] element_phierror3 = ev['element_phierror3'][iev] element_phierror4 = ev['element_phierror4'][iev] trackingparticle_pid = ev['trackingparticle_pid'][iev] trackingparticle_charge = ev['trackingparticle_charge'][iev] trackingparticle_pt = ev['trackingparticle_pt'][iev] trackingparticle_e = ev['trackingparticle_energy'][iev] trackingparticle_eta = ev['trackingparticle_eta'][iev] trackingparticle_phi = ev['trackingparticle_phi'][iev] trackingparticle_ev = ev['trackingparticle_ev'][iev] caloparticle_pid = ev['caloparticle_pid'][iev] caloparticle_charge = ev['caloparticle_charge'][iev] caloparticle_pt = ev['caloparticle_pt'][iev] caloparticle_e = ev['caloparticle_energy'][iev] caloparticle_eta = ev['caloparticle_eta'][iev] caloparticle_phi = ev['caloparticle_phi'][iev] caloparticle_ev = ev['caloparticle_ev'][iev] caloparticle_idx_trackingparticle = ev['caloparticle_idx_trackingparticle'][iev] pfcandidate_pdgid = ev['pfcandidate_pdgid'][iev] pfcandidate_pt = ev['pfcandidate_pt'][iev] pfcandidate_e = ev['pfcandidate_energy'][iev] pfcandidate_eta = ev['pfcandidate_eta'][iev] pfcandidate_phi = ev['pfcandidate_phi'][iev] gen_pdgid = ev['gen_pdgid'][iev] gen_pt = ev['gen_pt'][iev] gen_e = ev['gen_energy'][iev] gen_eta = ev['gen_eta'][iev] gen_phi = ev['gen_phi'][iev] gen_status = ev['gen_status'][iev] g = nx.DiGraph() for iobj in range(len(element_type)): g.add_node(('elem', iobj), typ=element_type[iobj], pt=element_pt[iobj], e=element_e[iobj], eta=element_eta[iobj], phi=element_phi[iobj], eta_ecal=element_eta_ecal[iobj], phi_ecal=element_phi_ecal[iobj], eta_hcal=element_eta_hcal[iobj], phi_hcal=element_phi_hcal[iobj], trajpoint=element_trajpoint[iobj], layer=element_layer[iobj], charge=element_charge[iobj], depth=element_depth[iobj], deltap=element_deltap[iobj], sigmadeltap=element_sigmadeltap[iobj], px=element_px[iobj], py=element_py[iobj], pz=element_pz[iobj], sigma_x=element_sigma_x[iobj], sigma_y=element_sigma_y[iobj], sigma_z=element_sigma_z[iobj], muon_dt_hits=element_muon_dt_hits[iobj], muon_csc_hits=element_muon_csc_hits[iobj], muon_type=element_muon_type[iobj], gsf_electronseed_trkorecal=element_gsf_electronseed_trkorecal[iobj], gsf_electronseed_dnn1=element_gsf_electronseed_dnn1[iobj], gsf_electronseed_dnn2=element_gsf_electronseed_dnn2[iobj], gsf_electronseed_dnn3=element_gsf_electronseed_dnn3[iobj], gsf_electronseed_dnn4=element_gsf_electronseed_dnn4[iobj], gsf_electronseed_dnn5=element_gsf_electronseed_dnn5[iobj], num_hits=element_num_hits[iobj], cluster_flags=element_cluster_flags[iobj], corr_energy=element_corr_energy[iobj], corr_energy_err=element_corr_energy_err[iobj], pterror=element_pterror[iobj], etaerror=element_etaerror[iobj], phierror=element_phierror[iobj], lambd=element_lambda[iobj], theta=element_theta[iobj], lambdaerror=element_lambdaerror[iobj], thetaerror=element_thetaerror[iobj], vx=element_vx[iobj], vy=element_vy[iobj], vz=element_vz[iobj], time=element_time[iobj], timeerror=element_timeerror[iobj], etaerror1=element_etaerror1[iobj], etaerror2=element_etaerror2[iobj], etaerror3=element_etaerror3[iobj], etaerror4=element_etaerror4[iobj], phierror1=element_phierror1[iobj], phierror2=element_phierror2[iobj], phierror3=element_phierror3[iobj], phierror4=element_phierror4[iobj]) for iobj in range(len(gen_pdgid)): g.add_node(('gen', iobj), typ=gen_pdgid[iobj], pt=gen_pt[iobj], e=gen_e[iobj], eta=gen_eta[iobj], phi=gen_phi[iobj], status=gen_status[iobj]) for iobj in range(len(trackingparticle_pid)): g.add_node(('tp', iobj), typ=trackingparticle_pid[iobj], charge=trackingparticle_charge[iobj], pt=trackingparticle_pt[iobj], e=trackingparticle_e[iobj], eta=trackingparticle_eta[iobj], phi=trackingparticle_phi[iobj], ispu=(trackingparticle_ev[iobj] != 0)) for iobj in range(len(caloparticle_pid)): g.add_node(('sc', iobj), typ=caloparticle_pid[iobj], charge=caloparticle_charge[iobj], pt=caloparticle_pt[iobj], e=caloparticle_e[iobj], eta=caloparticle_eta[iobj], phi=caloparticle_phi[iobj], ispu=(caloparticle_ev[iobj] != 0)) for iobj in range(len(pfcandidate_pdgid)): g.add_node(('pfcand', iobj), typ=abs(pfcandidate_pdgid[iobj]), pt=pfcandidate_pt[iobj], e=pfcandidate_e[iobj], eta=pfcandidate_eta[iobj], sin_phi=np.sin(pfcandidate_phi[iobj]), cos_phi=np.cos(pfcandidate_phi[iobj]), charge=get_charge(pfcandidate_pdgid[iobj])) trackingparticle_to_element_first = ev['trackingparticle_to_element.first'][iev] trackingparticle_to_element_second = ev['trackingparticle_to_element.second'][iev] trackingparticle_to_element_cmp = ev['trackingparticle_to_element_cmp'][iev] for (tp, elem, c) in zip(trackingparticle_to_element_first, trackingparticle_to_element_second, trackingparticle_to_element_cmp): if (not (g.nodes[('elem', elem)]['typ'] in [7])): g.add_edge(('tp', tp), ('elem', elem), weight=float('inf')) caloparticle_to_element_first = ev['caloparticle_to_element.first'][iev] caloparticle_to_element_second = ev['caloparticle_to_element.second'][iev] caloparticle_to_element_cmp = ev['caloparticle_to_element_cmp'][iev] for (sc, elem, c) in zip(caloparticle_to_element_first, caloparticle_to_element_second, caloparticle_to_element_cmp): if (not (g.nodes[('elem', elem)]['typ'] in [7])): g.add_edge(('sc', sc), ('elem', elem), weight=c) nodes_to_remove = [] for (idx_sc, idx_tp) in enumerate(caloparticle_idx_trackingparticle): if (idx_tp != (- 1)): for elem in g.neighbors(('sc', idx_sc)): g.add_edge(('tp', idx_tp), elem, weight=g.edges[(('sc', idx_sc), elem)]['weight']) g.nodes[('tp', idx_tp)]['idx_sc'] = idx_sc nodes_to_remove += [('sc', idx_sc)] g.remove_nodes_from(nodes_to_remove) element_to_candidate_first = ev['element_to_candidate.first'][iev] element_to_candidate_second = ev['element_to_candidate.second'][iev] for (elem, pfcand) in zip(element_to_candidate_first, element_to_candidate_second): g.add_edge(('elem', elem), ('pfcand', pfcand), weight=1.0) return g
def gen_e(g): etot_gen = 0.0 etot_pf = 0.0 for node in g.nodes: if ((node[0] == 'tp') or (node[0] == 'sc')): etot_gen += g.nodes[node]['e'] if (node[0] == 'pfcand'): etot_pf += g.nodes[node]['e'] return (etot_gen, etot_pf)
def process(args): infile = args.input outpath = os.path.join(args.outpath, os.path.basename(infile).split('.')[0]) tf = uproot.open(infile) if ('ana' in tf): tt = tf['ana/pftree'] elif ('pfana' in tf): tt = tf['pfana/pftree'] else: raise Exception('Could not find the PFAnalysisNtuplizer TTree') if (args.num_events == (- 1)): args.num_events = tt.num_entries events_to_process = [i for i in range(args.num_events)] all_data = [] ev = tt.arrays(library='np') for iev in tqdm.tqdm(events_to_process): g = make_graph(ev, iev) g = cleanup_graph(g) (Xelem, ycand, ygen) = prepare_normalized_table(g) data = {} ptcls_pythia = [n for n in g.nodes if ((n[0] == 'gen') and (g.nodes[n]['status'] == 1))] feats = ['typ', 'pt', 'eta', 'phi', 'e'] arr_ptcls_pythia = np.array([[g.nodes[n][f] for f in feats] for n in ptcls_pythia]) if args.save_normalized_table: data = {'Xelem': Xelem, 'ycand': ycand, 'ygen': ygen, 'pythia': arr_ptcls_pythia} if args.save_full_graph: data['full_graph'] = g all_data += [data] with open((outpath + '.pkl'), 'wb') as fi: pickle.dump(all_data, fi)
def parse_args(): import argparse parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, help='Input file from PFAnalysis', required=True) parser.add_argument('--outpath', type=str, default='raw', help='output path') parser.add_argument('--save-full-graph', action='store_true', help='save the full event graph') parser.add_argument('--save-normalized-table', action='store_true', help='save the uniquely identified table') parser.add_argument('--num-events', type=int, help='number of events to process', default=(- 1)) args = parser.parse_args() return args
class ClicEdmQqPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'update stats, move to 380 GeV', '1.2.0': 'sin cos as separate features', '1.3.0': 'Update stats to ~1M events', '1.3.1': 'Update stats to ~2M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmQqPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_qq_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmTtbarPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'update stats, move to 380 GeV', '1.2.0': 'sin/cos phi separately', '1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmTtbarPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_tt_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmTtbarPu10Pf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmTtbarPu10Pf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_tt_ecm380_PU10/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmWwFullhadPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmWwFullhadPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_WW_fullhad_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmZhTautauPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.3.0': 'First version', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmZhTautauPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_ZH_Htautau_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmQqHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'0.9.0': 'Small stats', '1.0.0': 'Initial release', '1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmQqHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_qq_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmQqHitsPf10k(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from: https://zenodo.org/record/8414225\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmQqHitsPf10k, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_qq_ecm380/')), max_files=100) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSingleElectronHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticels', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSingleElectronHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample_several([Path((path / 'e-/')), Path((path / 'e+/'))]) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSingleGammaHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSingleGammaHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'gamma/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSingleKaon0lHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSingleKaon0lHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'kaon0L/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSingleMuonHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSingleMuonHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample_several([Path((path / 'mu-/')), Path((path / 'mu+/'))]) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSingleNeutronHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSingleNeutronHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'neutron/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSinglePiHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSinglePiHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample_several([Path((path / 'pi-/')), Path((path / 'pi+/'))]) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmSinglePi0HitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmSinglePi0HitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'pi0/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmTtbarHitsPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'0.9.0': 'Small stats', '1.0.0': 'Initial release', '1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmTtbarHitsPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_tt_ecm380/'))) def _generate_examples(self, files): return generate_examples(files)
class ClicEdmTtbarHitsPf10k(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.5.0') RELEASE_NOTES = {'1.5.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow dataset can also be downloaded from: https://zenodo.org/record/8414225\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(ClicEdmTtbarHitsPf10k, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = dl_manager.manual_dir return split_sample(Path((path / 'p8_ee_tt_ecm380/')), max_files=100) def _generate_examples(self, files): return generate_examples(files)
class CmsPfMultiParticleGun(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_multi_particle_gun dataset.' VERSION = tfds.core.Version('1.6.1') RELEASE_NOTES = {'1.6.0': 'Initial release', '1.6.1': 'Additional stats'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_multi_particle_gun ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfMultiParticleGun, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'MultiParticlePFGun50_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfQcd(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_qcd dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_qcd ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfQcd, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'QCDForPF_14TeV_TuneCUETP8M1_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfQcdHighPt(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_qcd_high_pt dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_qcd_high_pt ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfQcdHighPt, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'QCD_Pt_3000_7000_14TeV_TuneCUETP8M1_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleElectron(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singleele dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Initial release.', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_electron ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleElectron, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleElectronFlatPt1To1000_pythia8_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleGamma(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singlegamma dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_gamma ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleGamma, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleGammaFlatPt1To1000_pythia8_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleMu(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singlemu dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_mu ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleMu, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleMuFlatLogPt_100MeVto2TeV_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleNeutron(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singleneutron dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_neutron ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleNeutron, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleNeutronFlatPt0p7To1000_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSinglePi(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singlepi dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add genjet information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_pi ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSinglePi, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SinglePiMinusFlatPt0p7To1000_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSinglePi0(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singlepi0 dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_pi0 ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSinglePi0, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SinglePi0Pt1To1000_pythia8_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleProton(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singleproton dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_proton ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleProton, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleProtonMinusFlatPt0p7To1000_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSingleTau(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_singletau dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add genjet information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_tau ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSingleTau, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SingleTauFlatPt1To1000_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfSmsT1tttt(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_sms_t1tttt ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfSmsT1tttt, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'SMS-T1tttt_mGl-1500_mLSP-100_TuneCP5_14TeV_pythia8_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfTtbar(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_ttbar ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfTtbar, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'TTbar_14TeV_TuneCUETP8M1_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class CmsPfZtt(tfds.core.GeneratorBasedBuilder): 'DatasetBuilder for cms_pf_ztt dataset.' VERSION = tfds.core.Version('1.6.0') RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_ztt ~/tensorflow_datasets/\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(CmsPfZtt, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: 'Returns the dataset metadata.' return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): 'Returns SplitGenerators.' path = dl_manager.manual_dir sample_dir = 'ZTT_All_hadronic_14TeV_TuneCUETP8M1_cfi' return cms_utils.split_sample(((path / sample_dir) / 'raw')) def _generate_examples(self, files): return cms_utils.generate_examples(files)
class DelphesQcdPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.2.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Do not pad events to the same size', '1.2.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n Download from https://zenodo.org/record/4559324#.YTs853tRVH4\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(DelphesQcdPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=np.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='https://zenodo.org/record/4559324#.YTs853tRVH4', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = Path(dl_manager.manual_dir) return split_sample(Path((path / 'pythia8_qcd/raw'))) def _generate_examples(self, path): return generate_examples(path)
class DelphesTtbarPf(tfds.core.GeneratorBasedBuilder): VERSION = tfds.core.Version('1.2.0') RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Do not pad events to the same size', '1.2.0': 'Regenerate with ARRAY_RECORD'} MANUAL_DOWNLOAD_INSTRUCTIONS = '\n Download from https://zenodo.org/record/4559324#.YTs853tRVH4\n ' def __init__(self, *args, **kwargs): kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD super(DelphesTtbarPf, self).__init__(*args, **kwargs) def _info(self) -> tfds.core.DatasetInfo: return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=np.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='https://zenodo.org/record/4559324#.YTs853tRVH4', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES)) def _split_generators(self, dl_manager: tfds.download.DownloadManager): path = Path(dl_manager.manual_dir) return split_sample(Path((path / 'pythia8_ttbar/raw'))) def _generate_examples(self, path): return generate_examples(path)
@numba.njit def deltaphi(phi1, phi2): diff = (phi1 - phi2) return np.arctan2(np.sin(diff), np.cos(diff))
@numba.njit def deltar(eta1, phi1, eta2, phi2): deta = (eta1 - eta2) dphi = deltaphi(phi1, phi2) return np.sqrt(((deta ** 2) + (dphi ** 2)))
@numba.njit def match_jets(jets1, jets2, deltaR_cut): iev = len(jets1) jet_inds_1_ev = [] jet_inds_2_ev = [] for ev in range(iev): j1 = jets1[ev] j2 = jets2[ev] jet_inds_1 = [] jet_inds_2 = [] for ij1 in range(len(j1)): drs = np.zeros(len(j2), dtype=np.float64) for ij2 in range(len(j2)): eta1 = j1.eta[ij1] eta2 = j2.eta[ij2] phi1 = j1.phi[ij1] phi2 = j2.phi[ij2] dr = deltar(eta1, phi1, eta2, phi2) drs[ij2] = dr if (len(drs) > 0): min_idx_dr = np.argmin(drs) if (drs[min_idx_dr] < deltaR_cut): jet_inds_1.append(ij1) jet_inds_2.append(min_idx_dr) jet_inds_1_ev.append(jet_inds_1) jet_inds_2_ev.append(jet_inds_2) return (jet_inds_1_ev, jet_inds_2_ev)
def squeeze_if_one(arr): if (arr.shape[(- 1)] == 1): return np.squeeze(arr, axis=(- 1)) else: return arr
def build_dummy_array(num, dtype=np.int64): return awkward.Array(awkward.contents.ListOffsetArray(awkward.index.Index64(np.zeros((num + 1), dtype=np.int64)), awkward.from_numpy(np.array([], dtype=dtype), highlevel=False)))
def match_two_jet_collections(jets_coll, name1, name2, jet_match_dr): num_events = len(jets_coll[name1]) vec1 = vector.awk(awkward.zip({'pt': jets_coll[name1].pt, 'eta': jets_coll[name1].eta, 'phi': jets_coll[name1].phi, 'energy': jets_coll[name1].energy})) vec2 = vector.awk(awkward.zip({'pt': jets_coll[name2].pt, 'eta': jets_coll[name2].eta, 'phi': jets_coll[name2].phi, 'energy': jets_coll[name2].energy})) ret = match_jets(vec1, vec2, jet_match_dr) j1_idx = awkward.from_iter(ret[0]) j2_idx = awkward.from_iter(ret[1]) num_jets = len(awkward.flatten(j1_idx)) if (num_jets > 0): c1_to_c2 = awkward.Array({name1: j1_idx, name2: j2_idx}) else: dummy = build_dummy_array(num_events) c1_to_c2 = awkward.Array({name1: dummy, name2: dummy}) return c1_to_c2
class Expression(): def __init__(self, label, edmtype, eval_list): self.label = label self.edmtype = edmtype self.eval_list = eval_list self.handle = Handle(self.edmtype) def get(self, event): event.getByLabel(self.label, self.handle) obj = self.handle.product() results = {} for (eval_name, eval_item) in self.eval_list: ret = eval(eval_item) results[eval_name] = ret return results
class TFDSDataSource(): def __init__(self, ds): self.ds = ds tmp = self.ds.dataset_info self.ds.dataset_info = SimpleNamespace() self.ds.dataset_info.name = tmp.name self.ds.dataset_info.features = tmp.features self.rep = self.ds.__repr__() def __getitem__(self, item): if isinstance(item, int): item = [item] records = self.ds.data_source.__getitems__(item) ret = [self.ds.dataset_info.features.deserialize_example_np(record, decoders=self.ds.decoders) for record in records] if (len(item) == 1): ret = ret[0] return ret def __len__(self): return len(self.ds) def __repr__(self): return self.rep
class PFDataset(): 'Builds a DataSource from tensorflow datasets.' def __init__(self, data_dir, name, split, num_samples=None): '\n Args\n data_dir: path to tensorflow_datasets (e.g. `../data/tensorflow_datasets/`)\n name: sample and version (e.g. `clic_edm_ttbar_pf:1.5.0`)\n split: "train" or "test" (if "valid" then will use "test")\n keys_to_get: any selection of ["X", "ygen", "ycand"] to retrieve\n ' if (split == 'valid'): split = 'test' builder = tfds.builder(name, data_dir=data_dir) self.ds = TFDSDataSource(builder.as_data_source(split=split)) if num_samples: self.ds = torch.utils.data.Subset(self.ds, range(num_samples)) def __len__(self): return len(self.ds)
class PFDataLoader(torch.utils.data.DataLoader): '\n Copied from https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/loader/dataloader.html#DataLoader\n because we need to implement our own Collater class to load the tensorflow_datasets (see below).\n ' def __init__(self, dataset: PFDataset, batch_size: int=1, shuffle: bool=False, follow_batch: Optional[List[str]]=None, exclude_keys: Optional[List[str]]=None, **kwargs): collate_fn = kwargs.pop('collate_fn', None) self.follow_batch = follow_batch self.exclude_keys = exclude_keys super().__init__(dataset, batch_size, shuffle, collate_fn=collate_fn, **kwargs)
class Collater(): 'Based on the Collater found on torch_geometric docs we build our own.' def __init__(self, keys_to_get, follow_batch=None, exclude_keys=None, pad_bin_size=640, pad_3d=True): self.follow_batch = follow_batch self.exclude_keys = exclude_keys self.keys_to_get = keys_to_get self.pad_bin_size = pad_bin_size self.pad_3d = pad_3d def __call__(self, inputs): num_samples_in_batch = len(inputs) elem_keys = self.keys_to_get batch = [] for ev in range(num_samples_in_batch): batch.append(Data()) for elem_key in elem_keys: batch[ev][elem_key] = Tensor(inputs[ev][elem_key]) batch[ev]['batch'] = torch.tensor(([ev] * len(inputs[ev][elem_key]))) ret = Batch.from_data_list(batch, self.follow_batch, self.exclude_keys) if (not self.pad_3d): return ret else: ret = {k: torch_geometric.utils.to_dense_batch(getattr(ret, k), ret.batch) for k in elem_keys} ret['mask'] = ret['X'][1] for k in elem_keys: ret[k] = ret[k][0] ret = Batch(**ret) return ret
class InterleavedIterator(object): 'Will combine DataLoaders of different lengths and batch sizes.' def __init__(self, data_loaders): self.idx = 0 self.data_loaders = data_loaders self.data_loaders_iter = [iter(dl) for dl in data_loaders] max_loader_size = max([len(dl) for dl in data_loaders]) self.loader_ds_indices = [] for i in range(max_loader_size): for (iloader, loader) in enumerate(data_loaders): if (i < len(loader)): self.loader_ds_indices.append(iloader) self.cur_index = 0 self._len = None def __iter__(self): return self def __next__(self): try: iloader = self.loader_ds_indices[self.cur_index] except IndexError: self.cur_index = 0 self.data_loaders_iter = [iter(dl) for dl in self.data_loaders] raise StopIteration self.cur_index += 1 return next(self.data_loaders_iter[iloader]) def __len__(self): if self._len: return self._len else: len_ = 0 for iloader in range(len(self.data_loaders_iter)): len_ += len(self.data_loaders_iter[iloader]) self._len = len_ return len_
def get_interleaved_dataloaders(world_size, rank, config, use_cuda, pad_3d, use_ray): loaders = {} for split in ['train', 'valid']: loaders[split] = [] for type_ in config[f'{split}_dataset'][config['dataset']]: dataset = [] for sample in config[f'{split}_dataset'][config['dataset']][type_]['samples']: version = config[f'{split}_dataset'][config['dataset']][type_]['samples'][sample]['version'] ds = PFDataset(config['data_dir'], f'{sample}:{version}', split, num_samples=config[f'n{split}']).ds if ((rank == 0) or (rank == 'cpu')): _logger.info(f'{split}_dataset: {sample}, {len(ds)}', color='blue') dataset.append(ds) dataset = torch.utils.data.ConcatDataset(dataset) if (world_size > 1): sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: sampler = torch.utils.data.RandomSampler(dataset) batch_size = (config[f'{split}_dataset'][config['dataset']][type_]['batch_size'] * config['gpu_batch_multiplier']) loader = PFDataLoader(dataset, batch_size=batch_size, collate_fn=Collater(['X', 'ygen'], pad_3d=pad_3d), sampler=sampler, num_workers=config['num_workers'], prefetch_factor=config['prefetch_factor'], pin_memory=use_cuda, pin_memory_device=('cuda:{}'.format(rank) if use_cuda else '')) if use_ray: import ray loader = ray.train.torch.prepare_data_loader(loader) loaders[split].append(loader) loaders[split] = InterleavedIterator(loaders[split]) return loaders
def _logging(rank, _logger, msg): 'Will log the message only on rank 0 or cpu.' if ((rank == 0) or (rank == 'cpu')): _logger.info(msg)
def _configLogger(name, filename=None, loglevel=logging.INFO): logger = logging.getLogger(name) logger.setLevel(loglevel) if filename: logfile = logging.FileHandler(filename) logfile.setLevel(loglevel) logfile.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')) logger.addHandler(logfile)
class ColoredLogger(): color_dict = {'black': '\x1b[0;30m', 'red': '\x1b[0;31m', 'green': '\x1b[0;32m', 'orange': '\x1b[0;33m', 'blue': '\x1b[0;34m', 'purple': '\x1b[0;35m', 'cyan': '\x1b[0;36m', 'lightgray': '\x1b[0;37m', 'darkgray': '\x1b[1;30m', 'lightred': '\x1b[1;31m', 'lightgreen': '\x1b[1;32m', 'yellow': '\x1b[1;33m', 'lightblue': '\x1b[1;34m', 'lightpurple': '\x1b[1;35m', 'lightcyan': '\x1b[1;36m', 'white': '\x1b[1;37m', 'bold': '\x1b[1m', 'endcolor': '\x1b[0m'} def __init__(self, name): self.logger = logging.getLogger(name) def colorize(self, msg, color): return ((self.color_dict[color] + msg) + self.color_dict['endcolor']) def debug(self, msg, *args, color=None, **kwargs): if color: msg = self.colorize(msg, color) self.logger.debug(msg, *args, **kwargs) def info(self, msg, *args, color=None, **kwargs): if color: msg = self.colorize(msg, color) self.logger.info(msg, *args, **kwargs) def warning(self, msg, *args, color=None, **kwargs): if color: msg = self.colorize(msg, color) self.logger.warning(msg, *args, **kwargs) def error(self, msg, *args, color=None, **kwargs): if color: msg = self.colorize(msg, color) self.logger.error(msg, *args, **kwargs)
@lru_cache(10) def warn_once(msg, logger=_logger): logger.warning(msg)
def main(): args = parser.parse_args() world_size = (args.gpus if (args.gpus > 0) else 1) with open(args.config, 'r') as stream: config = yaml.safe_load(stream) config = override_config(config, args) if args.hpo: run_hpo(config, args) else: if args.resume_training: outdir = args.resume_training else: outdir = create_experiment_dir(prefix=(((args.prefix or '') + Path(args.config).stem) + '_'), experiments_dir=(args.experiments_dir if args.experiments_dir else 'experiments')) config_filename = ('train-config.yaml' if args.train else 'test-config.yaml') with open((Path(outdir) / config_filename), 'w') as file: yaml.dump(config, file) if args.ray_train: run_ray_training(config, args, outdir) else: device_agnostic_run(config, args, world_size, outdir)
def set_hps_from_search_space(search_space, config): varaible_names = ['lr', 'gpu_batch_multiplier'] for var in varaible_names: if (var in search_space.keys()): config[var] = search_space[var] if ('conv_type' in search_space.keys()): conv_type = search_space['conv_type'] config['conv_type'] = conv_type common_varaible_names = ['embedding_dim', 'width', 'num_convs', 'activation'] if ((conv_type == 'gnn_lsh') or (conv_type == 'gravnet') or (conv_type == 'attention')): for var in common_varaible_names: if (var in search_space.keys()): config['model'][conv_type][var] = search_space[var] gravnet_variable_names = ['k', 'propagate_dimensions', 'space_dimensions'] if (conv_type == 'gravnet'): for var in gravnet_variable_names: if (var in search_space.keys()): config['model'][conv_type][var] = search_space[var] attention_variables = ['num_heads'] if (conv_type == 'attention'): for var in attention_variables: if (var in search_space.keys()): config['model'][conv_type][var] = search_space[var] mamba_variables = ['num_heads', 'd_state', 'd_conv', 'expand'] if (conv_type == 'mamba'): for var in mamba_variables: if (var in search_space.keys()): config['model'][conv_type][var] = search_space[var] gnn_lsh_varaible_names = ['bin_size', 'max_num_bins', 'distance_dim', 'layernorm', 'num_node_messages', 'ffn_dist_hidden_dim'] if (conv_type == 'gnn_lsh'): for var in gnn_lsh_varaible_names: if (var in search_space.keys()): config['model'][conv_type][var] = search_space[var] return config