code stringlengths 17 6.64M |
|---|
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask=None):
super(Transformer, self).__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)])
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return self.resblocks((x, attn_mask))[0]
|
def warmup_cosine(x, warmup=0.002):
if (x < warmup):
return (x / warmup)
return (0.5 * (1.0 + math.cos((math.pi * x))))
|
def warmup_constant(x, warmup=0.002):
' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. '
if (x < warmup):
return (x / warmup)
return 1.0
|
def warmup_linear(x, warmup=0.002):
' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. '
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (schedule not in SCHEDULES):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
return [0]
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup']))
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_(grad, alpha=(1 - beta1))
next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
if (group['t_total'] != (- 1)):
schedule_fct = SCHEDULES[group['schedule']]
progress = (state['step'] / group['t_total'])
lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup']))
else:
lr_scheduled = group['lr']
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
|
def whitespace_clean(text):
text = re.sub('\\s+', ' ', text)
text = text.strip()
return text
|
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens]
|
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def synchronize():
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
world_size = dist.get_world_size()
if (world_size == 1):
return
dist.barrier()
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.LongTensor([tensor.numel()]).to('cuda')
size_list = [torch.LongTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if (local_size != max_size):
padding = torch.ByteTensor(size=((max_size - local_size),)).to('cuda')
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_logger(name, save_dir, dist_rank, filename='log.txt'):
logger = logging.getLogger(name)
logger.setLevel(logging.ERROR)
if (dist_rank > 0):
return logger
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s %(name)s %(lineno)s %(levelname)s]: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str)
|
def main():
args = parser.parse_args()
world_size = args.gpus
if args.gpus:
assert (world_size <= torch.cuda.device_count()), f'--gpus is too high (specefied {world_size} gpus but only {torch.cuda.device_count()} gpus are available)'
torch.cuda.empty_cache()
if (world_size > 1):
_logger.info(f'Will use torch.nn.parallel.DistributedDataParallel() and {world_size} gpus', color='purple')
for rank in range(world_size):
_logger.info(torch.cuda.get_device_name(rank), color='purple')
elif (world_size == 1):
rank = 0
_logger.info(f'Will use single-gpu: {torch.cuda.get_device_name(rank)}', color='purple')
else:
rank = 'cpu'
_logger.info('Will use cpu', color='purple')
|
def customize_pipeline_test(config):
config['batching']['bucket_by_sequence_length'] = False
if ('delphes_pf_ttbar' in config['datasets']):
config['train_test_datasets']['physical']['datasets'] = ['delphes_pf_ttbar']
if ('cms_pf_ttbar' in config['datasets']):
config['train_test_datasets']['physical']['datasets'] = ['cms_pf_ttbar']
config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']}
config['train_test_datasets']['physical']['batch_per_gpu'] = 2
config['validation_dataset'] = 'cms_pf_ttbar'
config['evaluation_datasets'] = {'cms_pf_ttbar': {'batch_size': 2, 'num_events': (- 1)}}
if ('clic_edm_ttbar_pf' in config['datasets']):
config['train_test_datasets']['physical']['datasets'] = ['clic_edm_ttbar_pf']
config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']}
config['train_test_datasets']['physical']['batch_per_gpu'] = 5
config['validation_dataset'] = 'clic_edm_ttbar_pf'
config['validation_batch_size'] = 5
config['evaluation_datasets'] = {'clic_edm_ttbar_pf': {'batch_size': 5, 'num_events': (- 1)}}
if ('clic_edm_ttbar_hits_pf' in config['datasets']):
config['train_test_datasets']['physical']['datasets'] = ['clic_edm_ttbar_hits_pf']
config['train_test_datasets'] = {'physical': config['train_test_datasets']['physical']}
config['train_test_datasets']['physical']['batch_per_gpu'] = 1
config['validation_dataset'] = 'clic_edm_ttbar_hits_pf'
config['validation_batch_size'] = 1
config['evaluation_datasets'] = {'clic_edm_ttbar_hits_pf': {'batch_size': 1, 'num_events': (- 1)}}
config['validation_num_events'] = (config['validation_batch_size'] * 2)
config['parameters']['num_graph_layers_id'] = 1
config['parameters']['num_graph_layers_cls'] = 1
return config
|
def submit(config):
crabCommand('submit', config=config)
with open((((config.General.workArea + '/crab_') + config.General.requestName) + '/crab_config.py'), 'w') as fi:
fi.write(config.pythonise_())
|
def map_pdgid_to_candid(pdgid, charge):
if (pdgid in [22, 11, 13]):
return pdgid
if (abs(charge) > 0):
return 211
return 130
|
def deltar_pairs(eta_vec, phi_vec, dr_cut):
deta = np.abs(np.subtract.outer(eta_vec, eta_vec))
dphi = (np.mod((np.subtract.outer(phi_vec, phi_vec) + np.pi), (2 * np.pi)) - np.pi)
dr2 = ((deta ** 2) + (dphi ** 2))
dr2 *= np.tri(*dr2.shape)
dr2[(dr2 == 0)] = 999
ind_pairs = np.where((dr2 < dr_cut))
return ind_pairs
|
def get_charge(pid):
abs_pid = abs(pid)
if (pid in [130, 22, 1, 2]):
return 0.0
elif (abs_pid in [11, 13]):
return (- math.copysign(1.0, pid))
elif (abs_pid in [211]):
return math.copysign(1.0, pid)
else:
raise Exception('Unknown pid: ', pid)
|
def draw_event(g):
pos = {}
for node in g.nodes:
pos[node] = (g.nodes[node]['eta'], g.nodes[node]['phi'])
fig = plt.figure(figsize=(10, 10))
nodes_to_draw = [n for n in g.nodes if (n[0] == 'elem')]
nx.draw_networkx(g, pos=pos, with_labels=False, node_size=5, nodelist=nodes_to_draw, edgelist=[], node_color='red', node_shape='s', alpha=0.5)
nodes_to_draw = [n for n in g.nodes if (n[0] == 'pfcand')]
nx.draw_networkx(g, pos=pos, with_labels=False, node_size=10, nodelist=nodes_to_draw, edgelist=[], node_color='green', node_shape='x', alpha=0.5)
nodes_to_draw = [n for n in g.nodes if ((n[0] == 'sc') or (n[0] == 'tp'))]
nx.draw_networkx(g, pos=pos, with_labels=False, node_size=1, nodelist=nodes_to_draw, edgelist=[], node_color='blue', node_shape='.', alpha=0.5)
edges_to_draw = [e for e in g.edges if (e[0] in nodes_to_draw)]
nx.draw_networkx_edges(g, pos, edgelist=edges_to_draw, arrows=False, alpha=0.1)
plt.xlim((- 6), 6)
plt.ylim((- 4), 4)
plt.tight_layout()
plt.axis('on')
return fig
|
def merge_closeby_particles(g, pid=22, deltar_cut=0.001):
photons = [elem for elem in g.nodes if ((g.nodes[elem]['typ'] == pid) and ((elem[0] == 'tp') or (elem[0] == 'sc')))]
phot_eta = [g.nodes[node]['eta'] for node in photons]
phot_phi = [g.nodes[node]['phi'] for node in photons]
merge_pairs = []
(pairs_0, pairs_1) = deltar_pairs(phot_eta, phot_phi, deltar_cut)
merge_pairs = [(photons[p0], photons[p1]) for (p0, p1) in zip(pairs_0, pairs_1)]
for pair in merge_pairs:
if ((pair[0] in g.nodes) and (pair[1] in g.nodes)):
lv = vector.obj(pt=0, eta=0, phi=0, E=0)
for gp in pair:
lv += vector.obj(pt=g.nodes[gp]['pt'], eta=g.nodes[gp]['eta'], phi=g.nodes[gp]['phi'], E=g.nodes[gp]['e'])
g.nodes[pair[0]]['pt'] = lv.pt
g.nodes[pair[0]]['eta'] = lv.eta
g.nodes[pair[0]]['phi'] = lv.phi
g.nodes[pair[0]]['e'] = lv.energy
for suc in g.successors(pair[1]):
if ((pair[0], suc) in g.edges):
g.edges[(pair[0], suc)]['weight'] += g.edges[(pair[1], suc)]['weight']
g.remove_nodes_from([pair[1]])
|
def cleanup_graph(g, node_energy_threshold=0.1, edge_energy_threshold=0.05):
g = g.copy()
nodes_to_remove = []
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
sw = 0.0
for edge in g.edges(node):
sw += g.edges[edge]['weight']
if ((sw / g.nodes[node]['e']) < node_energy_threshold):
nodes_to_remove += [node]
g.remove_nodes_from(nodes_to_remove)
edges_to_remove = []
for node in g.nodes:
if (node[0] == 'elem'):
ew = [((gen, node), g.edges[(gen, node)]['weight']) for gen in g.predecessors(node)]
ew = sorted(ew, key=(lambda x: x[1]), reverse=True)
for (edge, weight) in ew:
if ((weight / g.nodes[edge[0]]['e']) < edge_energy_threshold):
edges_to_remove += [edge]
g.remove_edges_from(edges_to_remove)
nodes_to_remove = []
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
deg = g.degree[node]
if (deg == 0):
nodes_to_remove += [node]
g.remove_nodes_from(nodes_to_remove)
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
E_track = 0.0
E_calo = 0.0
E_other = 0.0
E_hf = 0.0
E_hfem = 0.0
E_hfhad = 0.0
g.nodes[node]['typ'] = map_pdgid_to_candid(abs(g.nodes[node]['typ']), g.nodes[node]['charge'])
for suc in g.successors(node):
elem_type = g.nodes[suc]['typ']
if (elem_type in [1, 6]):
E_track += g.edges[(node, suc)]['weight']
elif (elem_type in [4, 5, 10, 11]):
E_calo += g.edges[(node, suc)]['weight']
elif (elem_type in [8, 9]):
if (elem_type == 8):
E_hfem += g.edges[(node, suc)]['weight']
elif (elem_type == 9):
E_hfhad += g.edges[(node, suc)]['weight']
E_hf += g.edges[(node, suc)]['weight']
else:
E_other += g.edges[(node, suc)]['weight']
g.nodes[node]['E_track'] = E_track
g.nodes[node]['E_calo'] = E_calo
g.nodes[node]['E_other'] = E_other
g.nodes[node]['E_hf'] = E_hf
g.nodes[node]['E_hfem'] = E_hfem
g.nodes[node]['E_hfhad'] = E_hfhad
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
tracks = []
for suc in g.successors(node):
typ = g.nodes[suc]['typ']
if ((typ == 1) or (typ == 6)):
tracks.append(suc)
if (len(tracks) > 1):
n0 = g.nodes[node]
drs = []
for tr in tracks:
n1 = g.nodes[tr]
deta = np.abs((n0['eta'] - n1['eta']))
dphi = (np.mod(((n0['phi'] - n1['phi']) + np.pi), (2 * np.pi)) - np.pi)
dr2 = ((deta ** 2) + (dphi ** 2))
drs.append(dr2)
imin = np.argmin(drs)
for itr in range(len(tracks)):
if (itr != imin):
g.edges[(node, tracks[itr])]['weight'] = 0.0
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
typ = g.nodes[node]['typ']
if ((typ in [211, 13]) and (g.nodes[node]['E_track'] == 0)):
g.nodes[node]['typ'] = 130
g.nodes[node]['charge'] = 0
if ((typ in [11]) and (g.nodes[node]['E_track'] == 0)):
g.nodes[node]['typ'] = 22
g.nodes[node]['charge'] = 0
if ((g.nodes[node]['E_track'] == 0) and (g.nodes[node]['E_calo'] == 0) and (g.nodes[node]['E_other'] == 0) and (g.nodes[node]['E_hf'] > 0)):
if (g.nodes[node]['E_hfhad'] > g.nodes[node]['E_hfem']):
g.nodes[node]['typ'] = 1
g.nodes[node]['charge'] = 0
else:
g.nodes[node]['typ'] = 2
g.nodes[node]['charge'] = 0
for node in g.nodes:
if ((node[0] == 'sc') or (node[0] == 'tp')):
nd = g.nodes[node]
if ((nd['pt'] < 1.0) and ((abs(nd['typ']) == 11) or (abs(nd['typ']) == 13))):
if (g.nodes[node]['E_track'] > g.nodes[node]['E_calo']):
g.nodes[node]['typ'] = 211
else:
if (abs(nd['typ']) == 11):
g.nodes[node]['typ'] = 22
else:
g.nodes[node]['typ'] = 130
g.nodes[node]['charge'] = 0
merge_closeby_particles(g, 22)
merge_closeby_particles(g, 130)
merge_closeby_particles(g, 1)
merge_closeby_particles(g, 2)
return g
|
def prepare_normalized_table(g, genparticle_energy_threshold=0.2):
all_genparticles = []
all_elements = []
all_pfcandidates = []
for node in g.nodes:
if (node[0] == 'elem'):
all_elements += [node]
for parent in g.predecessors(node):
all_genparticles += [parent]
elif (node[0] == 'pfcand'):
all_pfcandidates += [node]
all_genparticles = list(set(all_genparticles))
all_elements = sorted(all_elements)
elem_to_gp = {}
unmatched_gp = []
for gp in sorted(all_genparticles, key=(lambda x: g.nodes[x]['e']), reverse=True):
elems = [e for e in g.successors(gp)]
elems_sorted = sorted([(g.edges[(gp, e)]['weight'], e) for e in elems], key=(lambda x: x[0]), reverse=True)
chosen_elem = None
for (weight, elem) in elems_sorted:
if (not (elem in elem_to_gp)):
chosen_elem = elem
elem_to_gp[elem] = []
break
if (chosen_elem is None):
unmatched_gp += [gp]
else:
elem_to_gp[elem] += [gp]
for gp in sorted(unmatched_gp, key=(lambda x: g.nodes[x]['e']), reverse=True):
elems = [e for e in g.successors(gp)]
elems_sorted = sorted([(g.edges[(gp, e)]['weight'], e) for e in elems], key=(lambda x: x[0]), reverse=True)
(_, elem) = elems_sorted[0]
elem_to_gp[elem] += [gp]
unmatched_cand = []
elem_to_cand = {}
for cand in sorted(all_pfcandidates, key=(lambda x: g.nodes[x]['e']), reverse=True):
tp = g.nodes[cand]['typ']
neighbors = list(g.predecessors(cand))
chosen_elem = None
if (tp in [211, 13, 11]):
for elem in neighbors:
tp_neighbor = g.nodes[elem]['typ']
if ((tp_neighbor == 1) or (tp_neighbor == 6)):
if (not (elem in elem_to_cand)):
chosen_elem = elem
elem_to_cand[elem] = cand
break
else:
sorted_neighbors = sorted(neighbors, key=(lambda x: g.edges[(x, cand)]['weight']), reverse=True)
for elem in sorted_neighbors:
if (not (elem in elem_to_cand)):
chosen_elem = elem
elem_to_cand[elem] = cand
break
if (chosen_elem is None):
unmatched_cand += [cand]
Xelem = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in elem_branches])
Xelem.fill(0.0)
ygen = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in target_branches])
ygen.fill(0.0)
ycand = np.recarray((len(all_elements),), dtype=[(name, np.float32) for name in target_branches])
ycand.fill(0.0)
for (ielem, elem) in enumerate(all_elements):
elem_type = g.nodes[elem]['typ']
genparticles = sorted(elem_to_gp.get(elem, []), key=(lambda x: g.edges[(x, elem)]['weight']), reverse=True)
genparticles = [gp for gp in genparticles if (g.nodes[gp]['e'] > genparticle_energy_threshold)]
candidate = elem_to_cand.get(elem, None)
for j in range(len(elem_branches)):
Xelem[elem_branches[j]][ielem] = g.nodes[elem][elem_branches[j]]
if (not (candidate is None)):
for j in range(len(target_branches)):
ycand[target_branches[j]][ielem] = g.nodes[candidate][target_branches[j]]
lv = vector.obj(x=0, y=0, z=0, t=0)
if (len(genparticles) > 0):
pid = g.nodes[genparticles[0]]['typ']
charge = g.nodes[genparticles[0]]['charge']
for gp in genparticles:
lv += vector.obj(pt=g.nodes[gp]['pt'], eta=g.nodes[gp]['eta'], phi=g.nodes[gp]['phi'], e=g.nodes[gp]['e'])
if ((elem_type == 5) and ((pid == 22) or (pid == 11))):
pid = 130
if (elem_type in [8, 9]):
if (pid == 130):
pid = 1
elif (pid == 22):
pid = 2
if (elem_type in [2, 3, 4, 5]):
if (pid == 1):
pid = 130
elif (pid == 2):
pid = 22
gp = {'pt': lv.rho, 'eta': lv.eta, 'sin_phi': np.sin(lv.phi), 'cos_phi': np.cos(lv.phi), 'e': lv.t, 'typ': pid, 'px': lv.x, 'py': lv.y, 'pz': lv.z, 'charge': (charge if (pid in [211, 11, 13]) else 0)}
for j in range(len(target_branches)):
ygen[target_branches[j]][ielem] = gp[target_branches[j]]
return (Xelem, ycand, ygen)
|
def make_graph(ev, iev):
element_type = ev['element_type'][iev]
element_pt = ev['element_pt'][iev]
element_e = ev['element_energy'][iev]
element_eta = ev['element_eta'][iev]
element_phi = ev['element_phi'][iev]
element_eta_ecal = ev['element_eta_ecal'][iev]
element_phi_ecal = ev['element_phi_ecal'][iev]
element_eta_hcal = ev['element_eta_hcal'][iev]
element_phi_hcal = ev['element_phi_hcal'][iev]
element_trajpoint = ev['element_trajpoint'][iev]
element_layer = ev['element_layer'][iev]
element_charge = ev['element_charge'][iev]
element_depth = ev['element_depth'][iev]
element_deltap = ev['element_deltap'][iev]
element_sigmadeltap = ev['element_sigmadeltap'][iev]
element_px = ev['element_px'][iev]
element_py = ev['element_py'][iev]
element_pz = ev['element_pz'][iev]
element_sigma_x = ev['element_sigma_x'][iev]
element_sigma_y = ev['element_sigma_y'][iev]
element_sigma_z = ev['element_sigma_z'][iev]
element_muon_dt_hits = ev['element_muon_dt_hits'][iev]
element_muon_csc_hits = ev['element_muon_csc_hits'][iev]
element_muon_type = ev['element_muon_type'][iev]
element_gsf_electronseed_trkorecal = ev['element_gsf_electronseed_trkorecal'][iev]
element_gsf_electronseed_dnn1 = ev['element_gsf_electronseed_dnn1'][iev]
element_gsf_electronseed_dnn2 = ev['element_gsf_electronseed_dnn2'][iev]
element_gsf_electronseed_dnn3 = ev['element_gsf_electronseed_dnn3'][iev]
element_gsf_electronseed_dnn4 = ev['element_gsf_electronseed_dnn4'][iev]
element_gsf_electronseed_dnn5 = ev['element_gsf_electronseed_dnn5'][iev]
element_num_hits = ev['element_num_hits'][iev]
element_cluster_flags = ev['element_cluster_flags'][iev]
element_corr_energy = ev['element_corr_energy'][iev]
element_corr_energy_err = ev['element_corr_energy_err'][iev]
element_pterror = ev['element_pterror'][iev]
element_etaerror = ev['element_etaerror'][iev]
element_phierror = ev['element_phierror'][iev]
element_lambda = ev['element_lambda'][iev]
element_theta = ev['element_theta'][iev]
element_lambdaerror = ev['element_lambdaerror'][iev]
element_thetaerror = ev['element_thetaerror'][iev]
element_vx = ev['element_vx'][iev]
element_vy = ev['element_vy'][iev]
element_vz = ev['element_vz'][iev]
element_time = ev['element_time'][iev]
element_timeerror = ev['element_timeerror'][iev]
element_etaerror1 = ev['element_etaerror1'][iev]
element_etaerror2 = ev['element_etaerror2'][iev]
element_etaerror3 = ev['element_etaerror3'][iev]
element_etaerror4 = ev['element_etaerror4'][iev]
element_phierror1 = ev['element_phierror1'][iev]
element_phierror2 = ev['element_phierror2'][iev]
element_phierror3 = ev['element_phierror3'][iev]
element_phierror4 = ev['element_phierror4'][iev]
trackingparticle_pid = ev['trackingparticle_pid'][iev]
trackingparticle_charge = ev['trackingparticle_charge'][iev]
trackingparticle_pt = ev['trackingparticle_pt'][iev]
trackingparticle_e = ev['trackingparticle_energy'][iev]
trackingparticle_eta = ev['trackingparticle_eta'][iev]
trackingparticle_phi = ev['trackingparticle_phi'][iev]
trackingparticle_ev = ev['trackingparticle_ev'][iev]
caloparticle_pid = ev['caloparticle_pid'][iev]
caloparticle_charge = ev['caloparticle_charge'][iev]
caloparticle_pt = ev['caloparticle_pt'][iev]
caloparticle_e = ev['caloparticle_energy'][iev]
caloparticle_eta = ev['caloparticle_eta'][iev]
caloparticle_phi = ev['caloparticle_phi'][iev]
caloparticle_ev = ev['caloparticle_ev'][iev]
caloparticle_idx_trackingparticle = ev['caloparticle_idx_trackingparticle'][iev]
pfcandidate_pdgid = ev['pfcandidate_pdgid'][iev]
pfcandidate_pt = ev['pfcandidate_pt'][iev]
pfcandidate_e = ev['pfcandidate_energy'][iev]
pfcandidate_eta = ev['pfcandidate_eta'][iev]
pfcandidate_phi = ev['pfcandidate_phi'][iev]
gen_pdgid = ev['gen_pdgid'][iev]
gen_pt = ev['gen_pt'][iev]
gen_e = ev['gen_energy'][iev]
gen_eta = ev['gen_eta'][iev]
gen_phi = ev['gen_phi'][iev]
gen_status = ev['gen_status'][iev]
g = nx.DiGraph()
for iobj in range(len(element_type)):
g.add_node(('elem', iobj), typ=element_type[iobj], pt=element_pt[iobj], e=element_e[iobj], eta=element_eta[iobj], phi=element_phi[iobj], eta_ecal=element_eta_ecal[iobj], phi_ecal=element_phi_ecal[iobj], eta_hcal=element_eta_hcal[iobj], phi_hcal=element_phi_hcal[iobj], trajpoint=element_trajpoint[iobj], layer=element_layer[iobj], charge=element_charge[iobj], depth=element_depth[iobj], deltap=element_deltap[iobj], sigmadeltap=element_sigmadeltap[iobj], px=element_px[iobj], py=element_py[iobj], pz=element_pz[iobj], sigma_x=element_sigma_x[iobj], sigma_y=element_sigma_y[iobj], sigma_z=element_sigma_z[iobj], muon_dt_hits=element_muon_dt_hits[iobj], muon_csc_hits=element_muon_csc_hits[iobj], muon_type=element_muon_type[iobj], gsf_electronseed_trkorecal=element_gsf_electronseed_trkorecal[iobj], gsf_electronseed_dnn1=element_gsf_electronseed_dnn1[iobj], gsf_electronseed_dnn2=element_gsf_electronseed_dnn2[iobj], gsf_electronseed_dnn3=element_gsf_electronseed_dnn3[iobj], gsf_electronseed_dnn4=element_gsf_electronseed_dnn4[iobj], gsf_electronseed_dnn5=element_gsf_electronseed_dnn5[iobj], num_hits=element_num_hits[iobj], cluster_flags=element_cluster_flags[iobj], corr_energy=element_corr_energy[iobj], corr_energy_err=element_corr_energy_err[iobj], pterror=element_pterror[iobj], etaerror=element_etaerror[iobj], phierror=element_phierror[iobj], lambd=element_lambda[iobj], theta=element_theta[iobj], lambdaerror=element_lambdaerror[iobj], thetaerror=element_thetaerror[iobj], vx=element_vx[iobj], vy=element_vy[iobj], vz=element_vz[iobj], time=element_time[iobj], timeerror=element_timeerror[iobj], etaerror1=element_etaerror1[iobj], etaerror2=element_etaerror2[iobj], etaerror3=element_etaerror3[iobj], etaerror4=element_etaerror4[iobj], phierror1=element_phierror1[iobj], phierror2=element_phierror2[iobj], phierror3=element_phierror3[iobj], phierror4=element_phierror4[iobj])
for iobj in range(len(gen_pdgid)):
g.add_node(('gen', iobj), typ=gen_pdgid[iobj], pt=gen_pt[iobj], e=gen_e[iobj], eta=gen_eta[iobj], phi=gen_phi[iobj], status=gen_status[iobj])
for iobj in range(len(trackingparticle_pid)):
g.add_node(('tp', iobj), typ=trackingparticle_pid[iobj], charge=trackingparticle_charge[iobj], pt=trackingparticle_pt[iobj], e=trackingparticle_e[iobj], eta=trackingparticle_eta[iobj], phi=trackingparticle_phi[iobj], ispu=(trackingparticle_ev[iobj] != 0))
for iobj in range(len(caloparticle_pid)):
g.add_node(('sc', iobj), typ=caloparticle_pid[iobj], charge=caloparticle_charge[iobj], pt=caloparticle_pt[iobj], e=caloparticle_e[iobj], eta=caloparticle_eta[iobj], phi=caloparticle_phi[iobj], ispu=(caloparticle_ev[iobj] != 0))
for iobj in range(len(pfcandidate_pdgid)):
g.add_node(('pfcand', iobj), typ=abs(pfcandidate_pdgid[iobj]), pt=pfcandidate_pt[iobj], e=pfcandidate_e[iobj], eta=pfcandidate_eta[iobj], sin_phi=np.sin(pfcandidate_phi[iobj]), cos_phi=np.cos(pfcandidate_phi[iobj]), charge=get_charge(pfcandidate_pdgid[iobj]))
trackingparticle_to_element_first = ev['trackingparticle_to_element.first'][iev]
trackingparticle_to_element_second = ev['trackingparticle_to_element.second'][iev]
trackingparticle_to_element_cmp = ev['trackingparticle_to_element_cmp'][iev]
for (tp, elem, c) in zip(trackingparticle_to_element_first, trackingparticle_to_element_second, trackingparticle_to_element_cmp):
if (not (g.nodes[('elem', elem)]['typ'] in [7])):
g.add_edge(('tp', tp), ('elem', elem), weight=float('inf'))
caloparticle_to_element_first = ev['caloparticle_to_element.first'][iev]
caloparticle_to_element_second = ev['caloparticle_to_element.second'][iev]
caloparticle_to_element_cmp = ev['caloparticle_to_element_cmp'][iev]
for (sc, elem, c) in zip(caloparticle_to_element_first, caloparticle_to_element_second, caloparticle_to_element_cmp):
if (not (g.nodes[('elem', elem)]['typ'] in [7])):
g.add_edge(('sc', sc), ('elem', elem), weight=c)
nodes_to_remove = []
for (idx_sc, idx_tp) in enumerate(caloparticle_idx_trackingparticle):
if (idx_tp != (- 1)):
for elem in g.neighbors(('sc', idx_sc)):
g.add_edge(('tp', idx_tp), elem, weight=g.edges[(('sc', idx_sc), elem)]['weight'])
g.nodes[('tp', idx_tp)]['idx_sc'] = idx_sc
nodes_to_remove += [('sc', idx_sc)]
g.remove_nodes_from(nodes_to_remove)
element_to_candidate_first = ev['element_to_candidate.first'][iev]
element_to_candidate_second = ev['element_to_candidate.second'][iev]
for (elem, pfcand) in zip(element_to_candidate_first, element_to_candidate_second):
g.add_edge(('elem', elem), ('pfcand', pfcand), weight=1.0)
return g
|
def gen_e(g):
etot_gen = 0.0
etot_pf = 0.0
for node in g.nodes:
if ((node[0] == 'tp') or (node[0] == 'sc')):
etot_gen += g.nodes[node]['e']
if (node[0] == 'pfcand'):
etot_pf += g.nodes[node]['e']
return (etot_gen, etot_pf)
|
def process(args):
infile = args.input
outpath = os.path.join(args.outpath, os.path.basename(infile).split('.')[0])
tf = uproot.open(infile)
if ('ana' in tf):
tt = tf['ana/pftree']
elif ('pfana' in tf):
tt = tf['pfana/pftree']
else:
raise Exception('Could not find the PFAnalysisNtuplizer TTree')
if (args.num_events == (- 1)):
args.num_events = tt.num_entries
events_to_process = [i for i in range(args.num_events)]
all_data = []
ev = tt.arrays(library='np')
for iev in tqdm.tqdm(events_to_process):
g = make_graph(ev, iev)
g = cleanup_graph(g)
(Xelem, ycand, ygen) = prepare_normalized_table(g)
data = {}
ptcls_pythia = [n for n in g.nodes if ((n[0] == 'gen') and (g.nodes[n]['status'] == 1))]
feats = ['typ', 'pt', 'eta', 'phi', 'e']
arr_ptcls_pythia = np.array([[g.nodes[n][f] for f in feats] for n in ptcls_pythia])
if args.save_normalized_table:
data = {'Xelem': Xelem, 'ycand': ycand, 'ygen': ygen, 'pythia': arr_ptcls_pythia}
if args.save_full_graph:
data['full_graph'] = g
all_data += [data]
with open((outpath + '.pkl'), 'wb') as fi:
pickle.dump(all_data, fi)
|
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='Input file from PFAnalysis', required=True)
parser.add_argument('--outpath', type=str, default='raw', help='output path')
parser.add_argument('--save-full-graph', action='store_true', help='save the full event graph')
parser.add_argument('--save-normalized-table', action='store_true', help='save the uniquely identified table')
parser.add_argument('--num-events', type=int, help='number of events to process', default=(- 1))
args = parser.parse_args()
return args
|
class ClicEdmQqPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'update stats, move to 380 GeV', '1.2.0': 'sin cos as separate features', '1.3.0': 'Update stats to ~1M events', '1.3.1': 'Update stats to ~2M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmQqPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_qq_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmTtbarPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'update stats, move to 380 GeV', '1.2.0': 'sin/cos phi separately', '1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmTtbarPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_tt_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmTtbarPu10Pf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmTtbarPu10Pf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_tt_ecm380_PU10/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmWwFullhadPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.3.0': 'Update stats to ~1M events', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmWwFullhadPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_WW_fullhad_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmZhTautauPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.3.0': 'First version', '1.4.0': 'Fix ycand matching', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/clic_edm4hep/ ./\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmZhTautauPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CL))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_cluster=X_FEATURES_CL, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_ZH_Htautau_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmQqHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'0.9.0': 'Small stats', '1.0.0': 'Initial release', '1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmQqHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_qq_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmQqHitsPf10k(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from: https://zenodo.org/record/8414225\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmQqHitsPf10k, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_qq_ecm380/')), max_files=100)
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSingleElectronHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticels', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleElectronHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample_several([Path((path / 'e-/')), Path((path / 'e+/'))])
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSingleGammaHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleGammaHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'gamma/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSingleKaon0lHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleKaon0lHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'kaon0L/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSingleMuonHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleMuonHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample_several([Path((path / 'mu-/')), Path((path / 'mu+/'))])
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSingleNeutronHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleNeutronHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'neutron/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSinglePiHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSinglePiHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample_several([Path((path / 'pi-/')), Path((path / 'pi+/'))])
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmSinglePi0HitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSinglePi0HitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'pi0/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmTtbarHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'0.9.0': 'Small stats', '1.0.0': 'Initial release', '1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmTtbarHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_tt_ecm380/')))
def _generate_examples(self, files):
return generate_examples(files)
|
class ClicEdmTtbarHitsPf10k(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow dataset can also be downloaded from: https://zenodo.org/record/8414225\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmTtbarHitsPf10k, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'p8_ee_tt_ecm380/')), max_files=100)
def _generate_examples(self, files):
return generate_examples(files)
|
class CmsPfMultiParticleGun(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_multi_particle_gun dataset.'
VERSION = tfds.core.Version('1.6.1')
RELEASE_NOTES = {'1.6.0': 'Initial release', '1.6.1': 'Additional stats'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_multi_particle_gun ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfMultiParticleGun, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'MultiParticlePFGun50_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfQcd(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_qcd dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_qcd ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfQcd, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'QCDForPF_14TeV_TuneCUETP8M1_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfQcdHighPt(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_qcd_high_pt dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_qcd_high_pt ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfQcdHighPt, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'QCD_Pt_3000_7000_14TeV_TuneCUETP8M1_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleElectron(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singleele dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Initial release.', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_electron ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleElectron, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleElectronFlatPt1To1000_pythia8_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleGamma(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singlegamma dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_gamma ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleGamma, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleGammaFlatPt1To1000_pythia8_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleMu(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singlemu dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_mu ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleMu, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleMuFlatLogPt_100MeVto2TeV_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleNeutron(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singleneutron dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_neutron ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleNeutron, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleNeutronFlatPt0p7To1000_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSinglePi(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singlepi dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add genjet information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_pi ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSinglePi, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SinglePiMinusFlatPt0p7To1000_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSinglePi0(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singlepi0 dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_pi0 ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSinglePi0, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SinglePi0Pt1To1000_pythia8_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleProton(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singleproton dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.1.0': 'Initial release', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_proton ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleProton, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleProtonMinusFlatPt0p7To1000_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSingleTau(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_singletau dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add genjet information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_tau ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleTau, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SingleTauFlatPt1To1000_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfSmsT1tttt(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_sms_t1tttt ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSmsT1tttt, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'SMS-T1tttt_mGl-1500_mLSP-100_TuneCP5_14TeV_pythia8_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfTtbar(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_ttbar ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfTtbar, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'TTbar_14TeV_TuneCUETP8M1_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class CmsPfZtt(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for cms_pf_ztt dataset.'
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.3.0': '12_2_0_pre2 generation with updated caloparticle/trackingparticle', '1.3.1': 'Remove PS again', '1.4.0': 'Add gen jet index information', '1.5.0': 'No padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_ztt ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfZtt, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.manual_dir
sample_dir = 'ZTT_All_hadronic_14TeV_TuneCUETP8M1_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files)
|
class DelphesQcdPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.2.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Do not pad events to the same size', '1.2.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n Download from https://zenodo.org/record/4559324#.YTs853tRVH4\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(DelphesQcdPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=np.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='https://zenodo.org/record/4559324#.YTs853tRVH4', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = Path(dl_manager.manual_dir)
return split_sample(Path((path / 'pythia8_qcd/raw')))
def _generate_examples(self, path):
return generate_examples(path)
|
class DelphesTtbarPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.2.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Do not pad events to the same size', '1.2.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n Download from https://zenodo.org/record/4559324#.YTs853tRVH4\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(DelphesTtbarPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=np.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=np.float32)}), supervised_keys=None, homepage='https://zenodo.org/record/4559324#.YTs853tRVH4', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = Path(dl_manager.manual_dir)
return split_sample(Path((path / 'pythia8_ttbar/raw')))
def _generate_examples(self, path):
return generate_examples(path)
|
@numba.njit
def deltaphi(phi1, phi2):
diff = (phi1 - phi2)
return np.arctan2(np.sin(diff), np.cos(diff))
|
@numba.njit
def deltar(eta1, phi1, eta2, phi2):
deta = (eta1 - eta2)
dphi = deltaphi(phi1, phi2)
return np.sqrt(((deta ** 2) + (dphi ** 2)))
|
@numba.njit
def match_jets(jets1, jets2, deltaR_cut):
iev = len(jets1)
jet_inds_1_ev = []
jet_inds_2_ev = []
for ev in range(iev):
j1 = jets1[ev]
j2 = jets2[ev]
jet_inds_1 = []
jet_inds_2 = []
for ij1 in range(len(j1)):
drs = np.zeros(len(j2), dtype=np.float64)
for ij2 in range(len(j2)):
eta1 = j1.eta[ij1]
eta2 = j2.eta[ij2]
phi1 = j1.phi[ij1]
phi2 = j2.phi[ij2]
dr = deltar(eta1, phi1, eta2, phi2)
drs[ij2] = dr
if (len(drs) > 0):
min_idx_dr = np.argmin(drs)
if (drs[min_idx_dr] < deltaR_cut):
jet_inds_1.append(ij1)
jet_inds_2.append(min_idx_dr)
jet_inds_1_ev.append(jet_inds_1)
jet_inds_2_ev.append(jet_inds_2)
return (jet_inds_1_ev, jet_inds_2_ev)
|
def squeeze_if_one(arr):
if (arr.shape[(- 1)] == 1):
return np.squeeze(arr, axis=(- 1))
else:
return arr
|
def build_dummy_array(num, dtype=np.int64):
return awkward.Array(awkward.contents.ListOffsetArray(awkward.index.Index64(np.zeros((num + 1), dtype=np.int64)), awkward.from_numpy(np.array([], dtype=dtype), highlevel=False)))
|
def match_two_jet_collections(jets_coll, name1, name2, jet_match_dr):
num_events = len(jets_coll[name1])
vec1 = vector.awk(awkward.zip({'pt': jets_coll[name1].pt, 'eta': jets_coll[name1].eta, 'phi': jets_coll[name1].phi, 'energy': jets_coll[name1].energy}))
vec2 = vector.awk(awkward.zip({'pt': jets_coll[name2].pt, 'eta': jets_coll[name2].eta, 'phi': jets_coll[name2].phi, 'energy': jets_coll[name2].energy}))
ret = match_jets(vec1, vec2, jet_match_dr)
j1_idx = awkward.from_iter(ret[0])
j2_idx = awkward.from_iter(ret[1])
num_jets = len(awkward.flatten(j1_idx))
if (num_jets > 0):
c1_to_c2 = awkward.Array({name1: j1_idx, name2: j2_idx})
else:
dummy = build_dummy_array(num_events)
c1_to_c2 = awkward.Array({name1: dummy, name2: dummy})
return c1_to_c2
|
class Expression():
def __init__(self, label, edmtype, eval_list):
self.label = label
self.edmtype = edmtype
self.eval_list = eval_list
self.handle = Handle(self.edmtype)
def get(self, event):
event.getByLabel(self.label, self.handle)
obj = self.handle.product()
results = {}
for (eval_name, eval_item) in self.eval_list:
ret = eval(eval_item)
results[eval_name] = ret
return results
|
class TFDSDataSource():
def __init__(self, ds):
self.ds = ds
tmp = self.ds.dataset_info
self.ds.dataset_info = SimpleNamespace()
self.ds.dataset_info.name = tmp.name
self.ds.dataset_info.features = tmp.features
self.rep = self.ds.__repr__()
def __getitem__(self, item):
if isinstance(item, int):
item = [item]
records = self.ds.data_source.__getitems__(item)
ret = [self.ds.dataset_info.features.deserialize_example_np(record, decoders=self.ds.decoders) for record in records]
if (len(item) == 1):
ret = ret[0]
return ret
def __len__(self):
return len(self.ds)
def __repr__(self):
return self.rep
|
class PFDataset():
'Builds a DataSource from tensorflow datasets.'
def __init__(self, data_dir, name, split, num_samples=None):
'\n Args\n data_dir: path to tensorflow_datasets (e.g. `../data/tensorflow_datasets/`)\n name: sample and version (e.g. `clic_edm_ttbar_pf:1.5.0`)\n split: "train" or "test" (if "valid" then will use "test")\n keys_to_get: any selection of ["X", "ygen", "ycand"] to retrieve\n '
if (split == 'valid'):
split = 'test'
builder = tfds.builder(name, data_dir=data_dir)
self.ds = TFDSDataSource(builder.as_data_source(split=split))
if num_samples:
self.ds = torch.utils.data.Subset(self.ds, range(num_samples))
def __len__(self):
return len(self.ds)
|
class PFDataLoader(torch.utils.data.DataLoader):
'\n Copied from https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/loader/dataloader.html#DataLoader\n because we need to implement our own Collater class to load the tensorflow_datasets (see below).\n '
def __init__(self, dataset: PFDataset, batch_size: int=1, shuffle: bool=False, follow_batch: Optional[List[str]]=None, exclude_keys: Optional[List[str]]=None, **kwargs):
collate_fn = kwargs.pop('collate_fn', None)
self.follow_batch = follow_batch
self.exclude_keys = exclude_keys
super().__init__(dataset, batch_size, shuffle, collate_fn=collate_fn, **kwargs)
|
class Collater():
'Based on the Collater found on torch_geometric docs we build our own.'
def __init__(self, keys_to_get, follow_batch=None, exclude_keys=None, pad_bin_size=640, pad_3d=True):
self.follow_batch = follow_batch
self.exclude_keys = exclude_keys
self.keys_to_get = keys_to_get
self.pad_bin_size = pad_bin_size
self.pad_3d = pad_3d
def __call__(self, inputs):
num_samples_in_batch = len(inputs)
elem_keys = self.keys_to_get
batch = []
for ev in range(num_samples_in_batch):
batch.append(Data())
for elem_key in elem_keys:
batch[ev][elem_key] = Tensor(inputs[ev][elem_key])
batch[ev]['batch'] = torch.tensor(([ev] * len(inputs[ev][elem_key])))
ret = Batch.from_data_list(batch, self.follow_batch, self.exclude_keys)
if (not self.pad_3d):
return ret
else:
ret = {k: torch_geometric.utils.to_dense_batch(getattr(ret, k), ret.batch) for k in elem_keys}
ret['mask'] = ret['X'][1]
for k in elem_keys:
ret[k] = ret[k][0]
ret = Batch(**ret)
return ret
|
class InterleavedIterator(object):
'Will combine DataLoaders of different lengths and batch sizes.'
def __init__(self, data_loaders):
self.idx = 0
self.data_loaders = data_loaders
self.data_loaders_iter = [iter(dl) for dl in data_loaders]
max_loader_size = max([len(dl) for dl in data_loaders])
self.loader_ds_indices = []
for i in range(max_loader_size):
for (iloader, loader) in enumerate(data_loaders):
if (i < len(loader)):
self.loader_ds_indices.append(iloader)
self.cur_index = 0
self._len = None
def __iter__(self):
return self
def __next__(self):
try:
iloader = self.loader_ds_indices[self.cur_index]
except IndexError:
self.cur_index = 0
self.data_loaders_iter = [iter(dl) for dl in self.data_loaders]
raise StopIteration
self.cur_index += 1
return next(self.data_loaders_iter[iloader])
def __len__(self):
if self._len:
return self._len
else:
len_ = 0
for iloader in range(len(self.data_loaders_iter)):
len_ += len(self.data_loaders_iter[iloader])
self._len = len_
return len_
|
def get_interleaved_dataloaders(world_size, rank, config, use_cuda, pad_3d, use_ray):
loaders = {}
for split in ['train', 'valid']:
loaders[split] = []
for type_ in config[f'{split}_dataset'][config['dataset']]:
dataset = []
for sample in config[f'{split}_dataset'][config['dataset']][type_]['samples']:
version = config[f'{split}_dataset'][config['dataset']][type_]['samples'][sample]['version']
ds = PFDataset(config['data_dir'], f'{sample}:{version}', split, num_samples=config[f'n{split}']).ds
if ((rank == 0) or (rank == 'cpu')):
_logger.info(f'{split}_dataset: {sample}, {len(ds)}', color='blue')
dataset.append(ds)
dataset = torch.utils.data.ConcatDataset(dataset)
if (world_size > 1):
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = torch.utils.data.RandomSampler(dataset)
batch_size = (config[f'{split}_dataset'][config['dataset']][type_]['batch_size'] * config['gpu_batch_multiplier'])
loader = PFDataLoader(dataset, batch_size=batch_size, collate_fn=Collater(['X', 'ygen'], pad_3d=pad_3d), sampler=sampler, num_workers=config['num_workers'], prefetch_factor=config['prefetch_factor'], pin_memory=use_cuda, pin_memory_device=('cuda:{}'.format(rank) if use_cuda else ''))
if use_ray:
import ray
loader = ray.train.torch.prepare_data_loader(loader)
loaders[split].append(loader)
loaders[split] = InterleavedIterator(loaders[split])
return loaders
|
def _logging(rank, _logger, msg):
'Will log the message only on rank 0 or cpu.'
if ((rank == 0) or (rank == 'cpu')):
_logger.info(msg)
|
def _configLogger(name, filename=None, loglevel=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(loglevel)
if filename:
logfile = logging.FileHandler(filename)
logfile.setLevel(loglevel)
logfile.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
logger.addHandler(logfile)
|
class ColoredLogger():
color_dict = {'black': '\x1b[0;30m', 'red': '\x1b[0;31m', 'green': '\x1b[0;32m', 'orange': '\x1b[0;33m', 'blue': '\x1b[0;34m', 'purple': '\x1b[0;35m', 'cyan': '\x1b[0;36m', 'lightgray': '\x1b[0;37m', 'darkgray': '\x1b[1;30m', 'lightred': '\x1b[1;31m', 'lightgreen': '\x1b[1;32m', 'yellow': '\x1b[1;33m', 'lightblue': '\x1b[1;34m', 'lightpurple': '\x1b[1;35m', 'lightcyan': '\x1b[1;36m', 'white': '\x1b[1;37m', 'bold': '\x1b[1m', 'endcolor': '\x1b[0m'}
def __init__(self, name):
self.logger = logging.getLogger(name)
def colorize(self, msg, color):
return ((self.color_dict[color] + msg) + self.color_dict['endcolor'])
def debug(self, msg, *args, color=None, **kwargs):
if color:
msg = self.colorize(msg, color)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, color=None, **kwargs):
if color:
msg = self.colorize(msg, color)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, color=None, **kwargs):
if color:
msg = self.colorize(msg, color)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, color=None, **kwargs):
if color:
msg = self.colorize(msg, color)
self.logger.error(msg, *args, **kwargs)
|
@lru_cache(10)
def warn_once(msg, logger=_logger):
logger.warning(msg)
|
def main():
args = parser.parse_args()
world_size = (args.gpus if (args.gpus > 0) else 1)
with open(args.config, 'r') as stream:
config = yaml.safe_load(stream)
config = override_config(config, args)
if args.hpo:
run_hpo(config, args)
else:
if args.resume_training:
outdir = args.resume_training
else:
outdir = create_experiment_dir(prefix=(((args.prefix or '') + Path(args.config).stem) + '_'), experiments_dir=(args.experiments_dir if args.experiments_dir else 'experiments'))
config_filename = ('train-config.yaml' if args.train else 'test-config.yaml')
with open((Path(outdir) / config_filename), 'w') as file:
yaml.dump(config, file)
if args.ray_train:
run_ray_training(config, args, outdir)
else:
device_agnostic_run(config, args, world_size, outdir)
|
def set_hps_from_search_space(search_space, config):
varaible_names = ['lr', 'gpu_batch_multiplier']
for var in varaible_names:
if (var in search_space.keys()):
config[var] = search_space[var]
if ('conv_type' in search_space.keys()):
conv_type = search_space['conv_type']
config['conv_type'] = conv_type
common_varaible_names = ['embedding_dim', 'width', 'num_convs', 'activation']
if ((conv_type == 'gnn_lsh') or (conv_type == 'gravnet') or (conv_type == 'attention')):
for var in common_varaible_names:
if (var in search_space.keys()):
config['model'][conv_type][var] = search_space[var]
gravnet_variable_names = ['k', 'propagate_dimensions', 'space_dimensions']
if (conv_type == 'gravnet'):
for var in gravnet_variable_names:
if (var in search_space.keys()):
config['model'][conv_type][var] = search_space[var]
attention_variables = ['num_heads']
if (conv_type == 'attention'):
for var in attention_variables:
if (var in search_space.keys()):
config['model'][conv_type][var] = search_space[var]
mamba_variables = ['num_heads', 'd_state', 'd_conv', 'expand']
if (conv_type == 'mamba'):
for var in mamba_variables:
if (var in search_space.keys()):
config['model'][conv_type][var] = search_space[var]
gnn_lsh_varaible_names = ['bin_size', 'max_num_bins', 'distance_dim', 'layernorm', 'num_node_messages', 'ffn_dist_hidden_dim']
if (conv_type == 'gnn_lsh'):
for var in gnn_lsh_varaible_names:
if (var in search_space.keys()):
config['model'][conv_type][var] = search_space[var]
return config
|
def set_raytune_search_parameters(search_space, config):
if ('layernorm' in search_space.keys()):
config['parameters']['combined_graph_layer']['layernorm'] = bool(search_space['layernorm'])
if ('ffn_dist_hidden_dim' in search_space.keys()):
config['parameters']['combined_graph_layer']['ffn_dist_hidden_dim'] = int(search_space['ffn_dist_hidden_dim'])
if ('ffn_dist_num_layers' in search_space.keys()):
config['parameters']['combined_graph_layer']['ffn_dist_num_layers'] = int(search_space['ffn_dist_num_layers'])
if ('distance_dim' in search_space.keys()):
config['parameters']['combined_graph_layer']['distance_dim'] = int(search_space['distance_dim'])
if ('num_node_messages' in search_space.keys()):
config['parameters']['combined_graph_layer']['num_node_messages'] = int(search_space['num_node_messages'])
if ('normalize_degrees' in search_space.keys()):
config['parameters']['combined_graph_layer']['node_message']['normalize_degrees'] = bool(search_space['normalize_degrees'])
if ('output_dim' in search_space.keys()):
config['parameters']['combined_graph_layer']['node_message']['output_dim'] = int(search_space['output_dim'])
if ('activation' in search_space.keys()):
config['parameters']['combined_graph_layer']['node_message']['activation'] = search_space['activation']
config['parameters']['combined_graph_layer']['dist_activation'] = search_space['activation']
config['parameters']['combined_graph_layer']['activation'] = search_space['activation']
if ('num_graph_layers_id' in search_space.keys()):
config['parameters']['num_graph_layers_id'] = int(search_space['num_graph_layers_id'])
if ('num_graph_layers_reg' in search_space.keys()):
config['parameters']['num_graph_layers_reg'] = int(search_space['num_graph_layers_reg'])
if ('bin_size' in search_space.keys()):
config['parameters']['combined_graph_layer']['bin_size'] = int(search_space['bin_size'])
if ('clip_value_low' in search_space.keys()):
config['parameters']['combined_graph_layer']['kernel']['clip_value_low'] = search_space['clip_value_low']
if ('dist_mult' in search_space.keys()):
config['parameters']['combined_graph_layer']['kernel']['dist_mult'] = search_space['dist_mult']
if ('dist_norm' in search_space.keys()):
config['parameters']['combined_graph_layer']['kernel']['dist_norm'] = search_space['dist_norm']
if ('dropout' in search_space.keys()):
config['parameters']['combined_graph_layer']['dropout'] = (search_space['dropout'] / 2)
config['parameters']['output_decoding']['dropout'] = search_space['dropout']
if ('lr' in search_space.keys()):
config['setup']['lr'] = search_space['lr']
if ('batch_multiplier' in search_space.keys()):
if (not config['batching']['bucket_by_sequence_length']):
raise ValueError('batch_multiplier given but bucket_by_sequence_length is set to False. Check config.')
config['batching']['batch_multiplier'] = search_space['batch_multiplier']
if ('batch_size_physical' in search_space.keys()):
config['train_test_datasets']['physical']['batch_per_gpu'] = int(search_space['batch_size_physical'])
if ('batch_size_delphes' in search_space.keys()):
config['train_test_datasets']['delphes']['batch_per_gpu'] = int(search_space['batch_size_physical'])
if ('batch_size_gun' in search_space.keys()):
config['train_test_datasets']['gun']['batch_per_gpu'] = int(search_space['batch_size_gun'])
if ('expdecay_decay_steps' in search_space.keys()):
config['exponentialdecay']['decay_steps'] = search_space['expdecay_decay_steps']
if ('expdecay_decay_rate' in search_space.keys()):
config['exponentialdecay']['decay_rate'] = search_space['expdecay_decay_rate']
if ('event_loss' in search_space.keys()):
config['loss']['event_loss'] = search_space['event_loss']
if (search_space['event_loss'] == 'none'):
config['loss']['event_loss_coef'] = 0.0
else:
config['loss']['event_loss_coef'] = 1.0
if ('met_loss' in search_space.keys()):
config['loss']['met_loss'] = search_space['event_loss']
if (search_space['met_loss'] == 'none'):
config['loss']['met_loss_coef'] = 0.0
else:
config['loss']['met_loss_coef'] = 1.0
if ('event_and_met_loss' in search_space.keys()):
(event_l, met_l) = search_space['event_and_met_loss']
config['loss']['event_loss'] = event_l
if (event_l == 'none'):
config['loss']['event_loss_coef'] = 0.0
else:
config['loss']['event_loss_coef'] = 1.0
if (met_l == 'none'):
config['loss']['met_loss'] = met_l
config['loss']['met_loss_coef'] = 0.0
else:
config['loss']['met_loss'] = {'type': 'Huber', 'delta': 10.0}
config['loss']['met_loss_coef'] = 1.0
if ('mask_reg_cls0' in search_space.keys()):
config['parameters']['output_decoding']['mask_reg_cls0'] = search_space['mask_reg_cls0']
if ('lr_schedule' in search_space.keys()):
config['setup']['lr_schedule'] = search_space['lr_schedule']
if ('weight_decay' in search_space.keys()):
config['optimizer']['adamw']['weight_decay'] = search_space['weight_decay']
if ('optimizer' in search_space.keys()):
if (search_space['optimizer'] == 'pcgrad_adam'):
config['setup']['optimizer'] = 'adam'
config['optimizer']['adam']['pcgrad'] = True
elif (search_space['optimizer'] == 'adam'):
config['setup']['optimizer'] = 'adam'
config['optimizer']['adam']['pcgrad'] = False
else:
config['setup']['optimizer'] = search_space['optimizer']
if ('node_encoding_hidden_dim' in search_space.keys()):
config['parameters']['node_encoding_hidden_dim'] = search_space['node_encoding_hidden_dim']
if ('out_hidden_dim' in search_space.keys()):
config['parameters']['output_decoding']['id_hidden_dim'] = search_space['out_hidden_dim']
config['parameters']['output_decoding']['charge_hidden_dim'] = search_space['out_hidden_dim']
config['parameters']['output_decoding']['pt_hidden_dim'] = search_space['out_hidden_dim']
config['parameters']['output_decoding']['eta_hidden_dim'] = search_space['out_hidden_dim']
config['parameters']['output_decoding']['phi_hidden_dim'] = search_space['out_hidden_dim']
config['parameters']['output_decoding']['energy_hidden_dim'] = search_space['out_hidden_dim']
if ('out_num_layers' in search_space.keys()):
config['parameters']['output_decoding']['id_num_layers'] = search_space['out_num_layers']
config['parameters']['output_decoding']['charge_num_layers'] = search_space['out_num_layers']
config['parameters']['output_decoding']['pt_num_layers'] = search_space['out_num_layers']
config['parameters']['output_decoding']['eta_num_layers'] = search_space['out_num_layers']
config['parameters']['output_decoding']['phi_num_layers'] = search_space['out_num_layers']
config['parameters']['output_decoding']['energy_num_layers'] = search_space['out_num_layers']
if ('num_layers_encoder' in search_space.keys()):
config['parameters']['num_layers_encoder'] = search_space['num_layers_encoder']
if ('num_layers_decoder_reg' in search_space.keys()):
config['parameters']['num_layers_decoder_reg'] = search_space['num_layers_decoder_reg']
if ('num_layers_decoder_cls' in search_space.keys()):
config['parameters']['num_layers_decoder_cls'] = search_space['num_layers_decoder_cls']
if ('hidden_dim' in search_space.keys()):
config['parameters']['hidden_dim'] = search_space['hidden_dim']
if ('num_heads' in search_space.keys()):
config['parameters']['num_heads'] = search_space['num_heads']
if ('num_random_features' in search_space.keys()):
config['parameters']['num_random_features'] = search_space['num_random_features']
return config
|
def get_raytune_search_alg(raytune_cfg, seeds=False):
if ((raytune_cfg['sched'] == 'pbt') or (raytune_cfg['sched'] == 'pb2')):
if (raytune_cfg['search_alg'] is not None):
print("INFO: Using schedule '{}' is not compatible with Ray Tune search algorithms.".format(raytune_cfg['sched']))
print('INFO: Uing the Ray Tune {} scheduler without search algorithm'.format(raytune_cfg['sched']))
return None
if ((raytune_cfg['sched'] == 'bohb') or (raytune_cfg['sched'] == 'BOHB')):
print('INFO: Using TuneBOHB search algorithm since it is required for BOHB shedule')
if seeds:
seed = 1234
else:
seed = None
return TuneBOHB(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], seed=seed)
if (raytune_cfg['search_alg'] == 'bayes'):
print('INFO: Using BayesOptSearch')
return BayesOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], random_search_steps=raytune_cfg['bayes']['n_random_steps'])
if (raytune_cfg['search_alg'] == 'hyperopt'):
print('INFO: Using HyperOptSearch')
return HyperOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], n_initial_points=raytune_cfg['hyperopt']['n_random_steps'])
if (raytune_cfg['search_alg'] == 'scikit'):
print('INFO: Using bayesian optimization from scikit-learn')
return SkOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], convert_to_python=True)
if (raytune_cfg['search_alg'] == 'nevergrad'):
print('INFO: Using bayesian optimization from nevergrad')
import nevergrad as ng
return NevergradSearch(optimizer=ng.optimizers.BayesOptim(pca=False, init_budget=raytune_cfg['nevergrad']['n_random_steps']), metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'])
else:
print('INFO: Not using any Ray Tune search algorithm')
return None
|
def get_raytune_schedule(raytune_cfg):
if (raytune_cfg['sched'] == 'asha'):
return AsyncHyperBandScheduler(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['asha']['max_t'], grace_period=raytune_cfg['asha']['grace_period'], reduction_factor=raytune_cfg['asha']['reduction_factor'], brackets=raytune_cfg['asha']['brackets'])
elif (raytune_cfg['sched'] == 'hyperband'):
return HyperBandScheduler(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['hyperband']['max_t'], reduction_factor=raytune_cfg['hyperband']['reduction_factor'])
elif ((raytune_cfg['sched'] == 'bohb') or (raytune_cfg['sched'] == 'BOHB')):
return HyperBandForBOHB(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', max_t=raytune_cfg['hyperband']['max_t'], reduction_factor=raytune_cfg['hyperband']['reduction_factor'])
elif ((raytune_cfg['sched'] == 'pbt') or (raytune_cfg['sched'] == 'PBT')):
return PopulationBasedTraining(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', perturbation_interval=raytune_cfg['pbt']['perturbation_interval'], hyperparam_mutations=raytune_cfg['pbt']['hyperparam_mutations'], log_config=True)
elif ((raytune_cfg['sched'] == 'pb2') or (raytune_cfg['sched'] == 'PB2')):
return PB2(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], time_attr='training_iteration', perturbation_interval=raytune_cfg['pb2']['perturbation_interval'], hyperparam_bounds=raytune_cfg['pb2']['hyperparam_bounds'], log_config=True)
else:
print('INFO: Not using any Ray Tune trial scheduler.')
return None
|
@click.group()
@click.help_option('-h', '--help')
def main():
pass
|
@main.command()
@click.help_option('-h', '--help')
@click.option('-p', '--path', help='path to json file or dir containing json files', type=click.Path())
@click.option('-y', '--ylabel', default=None, help='Y-axis label', type=str)
@click.option('-x', '--xlabel', default='Step', help='X-axis label', type=str)
@click.option('-t', '--title', default=None, help='X-axis label', type=str)
@click.option('-s', '--save_dir', default=None, help='X-axis label', type=click.Path())
def plot_cometml_json(path, ylabel, xlabel, title=None, save_dir=None):
path = Path(path)
if path.is_dir():
json_files = path.glob('*.json')
else:
json_files = [path]
for json_file in json_files:
with open(json_file) as f:
data = json.load(f)
plt.figure(figsize=(12, 6))
for (ii, metric) in enumerate(data):
if ('val' in metric['name']):
pass
else:
try:
val_metric = data[(ii + 1)]
except IndexError:
val_metric = data[(ii - 1)]
if (('val_' + metric['name']) != val_metric['name']):
val_metric = data[(ii - 1)]
if (('val_' + metric['name']) != val_metric['name']):
raise ValueError("The val and train metrics don't match, {}, {}".format(('val_' + metric['name']), val_metric['name']))
pp = plt.plot(metric['x'], metric['y'], label=metric['name'], linestyle='-')
color = pp[0].get_color()
plt.plot(val_metric['x'], val_metric['y'], label=val_metric['name'], linestyle='--', color=color)
plt.legend()
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if title:
plt.title('')
if save_dir:
plt.savefig(str((Path(save_dir) / (json_file.stem + '.jpg'))))
if (not save_dir):
plt.show()
|
class CustomTensorBoard(TensorBoard):
'\n Extends tensorflow.keras.callbacks TensorBoard\n\n Custom tensorboard class to make logging of learning rate possible when using\n keras.optimizers.schedules.LearningRateSchedule.\n See https://github.com/tensorflow/tensorflow/pull/37552\n\n Also logs momemtum for supported optimizers that use momemtum.\n '
def __init__(self, *args, **kwargs):
self.dump_history = kwargs.pop('dump_history')
super().__init__(*args, **kwargs)
def _collect_learning_rate(self, logs):
logs = (logs or {})
opt = self.model.optimizer
if hasattr(opt, 'lr'):
lr_schedule = getattr(opt, 'lr', None)
if isinstance(lr_schedule, tf.keras.optimizers.schedules.LearningRateSchedule):
logs['learning_rate'] = np.float64(tf.keras.backend.get_value(lr_schedule(opt.iterations)))
else:
logs.update({'learning_rate': np.float64(tf.keras.backend.eval(opt.lr))})
try:
logs.update({'momentum': np.float64(tf.keras.backend.eval(opt.momentum))})
except AttributeError:
pass
if isinstance(opt, tf.keras.optimizers.Adam):
logs.update({'adam_beta_1': np.float64(tf.keras.backend.eval(opt.beta_1))})
if hasattr(opt, 'loss_scale'):
logs.update({'loss_scale': np.float64(opt.loss_scale.numpy())})
return logs
def on_epoch_end(self, epoch, logs):
logs = (logs or {})
logs.update(self._collect_learning_rate(logs))
logs['time'] = time.time()
if self.dump_history:
history_path = (Path(self.log_dir) / 'history')
history_path.mkdir(parents=True, exist_ok=True)
history_path = str(history_path)
with open('{}/history_{}.json'.format(history_path, epoch), 'w') as fi:
converted_logs = {k: float(v) for (k, v) in logs.items()}
json.dump(converted_logs, fi)
super().on_epoch_end(epoch, logs)
def on_train_batch_end(self, batch, logs):
logs = (logs or {})
if (isinstance(self.update_freq, int) and ((batch % self.update_freq) == 0)):
logs.update(self._collect_learning_rate(logs))
super().on_train_batch_end(batch, logs)
|
class CustomModelCheckpoint(ModelCheckpoint):
'Extends tensorflow.keras.callbacks.ModelCheckpoint to also save optimizer'
def __init__(self, *args, **kwargs):
self.optimizer_to_save = kwargs.pop('optimizer_to_save')
self.optimizer_filepath = kwargs.pop('optimizer_save_filepath')
super().__init__(*args, **kwargs)
Path(self.filepath).parent.mkdir(parents=True, exist_ok=True)
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
filepath = str(self.optimizer_filepath).format(epoch=(epoch + 1), **logs)
if (self.epochs_since_last_save == 0):
if self.save_best_only:
current = logs.get(self.monitor)
if (current == self.best):
with open(filepath, 'wb') as f:
pickle.dump(self.optimizer_to_save, f)
else:
with open(filepath, 'wb') as f:
pickle.dump(self.optimizer_to_save, f)
|
class BenchmarkLoggerCallback(tf.keras.callbacks.Callback):
def __init__(self, *args, **kwargs):
self.outdir = kwargs.pop('outdir')
self.steps_per_epoch = kwargs.pop('steps_per_epoch')
self.batch_size_per_gpu = kwargs.pop('batch_size_per_gpu')
self.num_gpus = kwargs.pop('num_gpus')
self.num_cpus = kwargs.pop('num_cpus')
self.train_set_size = kwargs.pop('train_set_size')
self.horovod_enabled = kwargs.pop('horovod_enabled')
super().__init__(*args, **kwargs)
def on_train_begin(self, logs=None):
self.times = []
self.start_time = tf.timestamp().numpy()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_time_start = tf.timestamp().numpy()
def on_epoch_end(self, epoch, logs=None):
self.times.append((tf.timestamp().numpy() - self.epoch_time_start))
def plot(self, times):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Time [s]')
plt.plot(times, 'o')
for i in range(len(times)):
if isinstance(times[i], tf.Tensor):
j = times[i].numpy()
else:
j = times[i]
if (i == 0):
plt.text((i + 0.02), (j + 0.2), str(round(j, 2)))
else:
if isinstance(times[(i - 1)], tf.Tensor):
j_prev = times[(i - 1)].numpy()
else:
j_prev = times[(i - 1)]
plt.text((i + 0.02), (j + 0.2), str(round((j - j_prev), 2)))
plt.ylim(bottom=0)
txt = 'Time in seconds per epoch. The numbers next to each data point\n show the difference in seconds compared to the previous epoch.'
plt.title(txt)
filename = (('time_per_epoch_' + datetime.now().strftime('%Y%m%d%H%M%S')) + '.png')
save_path = (Path(self.outdir) / filename)
print('Saving plot in {}'.format(save_path))
plt.savefig(save_path)
def on_train_end(self, logs=None):
result_path = Path(self.outdir, 'result.json')
stop_time = tf.timestamp().numpy()
total_time = round((stop_time - self.start_time), 2)
throughput_per_epoch = (self.train_set_size / np.array(self.times))
mean_throughput = round(np.mean(throughput_per_epoch[1:]), 2)
mean_epoch_time = round(np.mean(self.times[1:]), 2)
batch_size_total = (self.batch_size_per_gpu * (self.num_gpus or self.num_cpus or 1))
data = {'wl-scores': {'mean_throughput': mean_throughput, 'mean_epoch_time': mean_epoch_time}, 'wl-stats': {'num_epochs': len(self.times), 'epoch_times': self.times, 'train_start': self.start_time, 'train_stop': stop_time, 'train_time': total_time, 'horovod_enabled': self.horovod_enabled, 'GPU': self.num_gpus, 'CPU': self.num_cpus, 'train_set_size': self.train_set_size, 'batch_size_per_device': self.batch_size_per_gpu, 'batch_size_total': batch_size_total, 'steps_per_epoch': self.steps_per_epoch, 'events_per_epoch': (batch_size_total * self.steps_per_epoch), 'throughput_per_epoch': list(throughput_per_epoch)}}
print('Saving result to {}'.format(result_path.resolve()))
with result_path.open('w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4, cls=NpEncoder)
f.write('\n')
self.plot(self.times)
|
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj)
|
def get_model_builder(config, total_steps):
(lr_schedule, optim_callbacks, lr) = get_lr_schedule(config, steps=total_steps)
def model_builder(hp):
node_encoding_hidden_dim = hp.Choice('node_dim', values=[128, 256, 512])
config['parameters']['node_encoding_hidden_dim'] = node_encoding_hidden_dim
config['parameters']['num_graph_layers_id'] = hp.Choice('num_graph_layers_id', [1, 2, 3])
config['parameters']['num_graph_layers_reg'] = hp.Choice('num_graph_layers_reg', [1, 2, 3])
config['parameters']['combined_graph_layer']['dropout'] = hp.Choice('cg_dropout', values=[0.0, 0.1, 0.2])
config['parameters']['combined_graph_layer']['num_node_messages'] = hp.Choice('num_node_messages', [1, 2])
config['parameters']['combined_graph_layer']['bin_size'] = hp.Choice('bin_size', values=[160, 320, 640])
config['parameters']['combined_graph_layer']['ffn_dist_hidden_dim'] = hp.Choice('ffn_dist_hidden_dim', values=[64, 128, 256])
config['parameters']['combined_graph_layer']['ffn_dist_num_layers'] = hp.Choice('ffn_dist_num_layers', values=[1, 2])
config['parameters']['combined_graph_layer']['kernel']['dist_mult'] = hp.Choice('dist_mult', values=[0.01, 0.1, 1.0])
config['parameters']['combined_graph_layer']['node_message']['output_dim'] = node_encoding_hidden_dim
config['parameters']['combined_graph_layer']['node_message']['normalize_degrees'] = hp.Choice('normalize_degrees', values=[True, False])
config['parameters']['output_decoding']['dropout'] = hp.Choice('output_dropout', values=[0.0, 0.1, 0.2])
config['parameters']['output_decoding']['layernorm'] = hp.Choice('output_layernorm', values=[True, False])
config['parameters']['output_decoding']['mask_reg_cls0'] = hp.Choice('output_mask_reg_cls0', values=[True, False])
model = make_model(config, dtype='float32')
model.build((1, config['dataset']['padded_num_elem_size'], config['dataset']['num_input_features']))
opt = get_optimizer(config, lr_schedule)
(loss_dict, loss_weights) = get_loss_dict(config)
model.compile(loss=loss_dict, optimizer=opt, sample_weight_mode='temporal', loss_weights=loss_weights)
return model
return (model_builder, optim_callbacks)
|
class LRFinder(Callback):
"`Callback` that exponentially adjusts the learning rate after each training batch between `start_lr` and\n `end_lr` for a maximum number of batches: `max_step`. The loss and learning rate are recorded at each step allowing\n visually finding a good learning rate as per https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html via\n the `plot` method.\n\n A version of this learning rate finder technique is also described under the name 'LR range test' in Leslie Smith's\n paper: https://arxiv.org/pdf/1803.09820.pdf.\n "
def __init__(self, start_lr: float=1e-07, end_lr: float=0.01, max_steps: int=200, smoothing=0.9):
super(LRFinder, self).__init__()
(self.start_lr, self.end_lr) = (start_lr, end_lr)
self.max_steps = max_steps
self.smoothing = smoothing
(self.step, self.best_loss, self.avg_loss, self.lr) = (0, 0, 0, 0)
(self.lrs, self.losses) = ([], [])
def on_train_begin(self, logs=None):
(self.step, self.best_loss, self.avg_loss, self.lr) = (0, 0, 0, 0)
(self.lrs, self.losses) = ([], [])
def on_train_batch_begin(self, batch, logs=None):
self.lr = self.exp_annealing(self.step)
tf.keras.backend.set_value(self.model.optimizer.lr, self.lr)
def on_train_batch_end(self, batch, logs=None):
print('lr:', self.lr)
print('step', self.step)
logs = (logs or {})
loss = logs.get('loss')
step = self.step
if loss:
print('loss', loss)
self.avg_loss = ((self.smoothing * self.avg_loss) + ((1 - self.smoothing) * loss))
smooth_loss = (self.avg_loss / (1 - (self.smoothing ** (self.step + 1))))
self.losses.append(smooth_loss)
self.lrs.append(self.lr)
if ((step == 0) or (loss < self.best_loss)):
self.best_loss = loss
if ((smooth_loss > (100 * self.best_loss)) or tf.math.is_nan(smooth_loss)):
self.model.stop_training = True
print('Loss reached predefined maximum... stopping')
if (step >= self.max_steps):
print('STOPPING')
self.model.stop_training = True
self.step += 1
def exp_annealing(self, step):
return (self.start_lr * ((self.end_lr / self.start_lr) ** ((step * 1.0) / self.max_steps)))
def plot(self, save_dir=None, figname='lr_finder.jpg', log_scale=False):
(fig, ax) = plt.subplots(1, 1)
ax.set_ylabel('Loss')
ax.set_xlabel('Learning Rate')
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
ax.plot(self.lrs, self.losses)
if log_scale:
ax.set_yscale('log')
if (save_dir is not None):
Path(save_dir).mkdir(parents=True, exist_ok=True)
plt.savefig(str((Path(save_dir) / Path(figname))))
|
class ModelOptimizerCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def on_epoch_end(self, epoch, logs=None):
super(ModelOptimizerCheckpoint, self).on_epoch_end(epoch, logs=logs)
weightfile_path = self.opt_path.format(epoch=(epoch + 1), **logs)
weights = {}
self.model.optimizer.save_own_variables(weights)
with open(weightfile_path, 'wb') as fi:
pickle.dump({'weights': weights}, fi)
|
class CustomCallback(tf.keras.callbacks.Callback):
def __init__(self, outpath, dataset, config, plot_freq=1, horovod_enabled=False, comet_experiment=None, is_hpo_run=False):
super(CustomCallback, self).__init__()
self.plot_freq = plot_freq
self.dataset = dataset
self.outpath = outpath
self.config = config
self.horovod_enabled = horovod_enabled
self.comet_experiment = comet_experiment
self.is_hpo_run = is_hpo_run
def on_epoch_end(self, epoch, logs=None):
if ((not self.horovod_enabled) or (hvd.rank() == 0)):
epoch_end(self, epoch, logs, comet_experiment=self.comet_experiment)
|
def epoch_end(self, epoch, logs, comet_experiment=None):
epoch = (epoch + 1)
with open('{}/history_{}.json'.format(self.outpath, epoch), 'w') as fi:
json.dump(logs, fi)
if self.is_hpo_run:
comet_experiment.log_metrics(logs, epoch=epoch)
if (self.plot_freq <= 0):
return
if (self.plot_freq >= 1):
if ((epoch % self.plot_freq) != 0):
return
cp_dir = (Path(self.outpath) / 'epoch_{}'.format(epoch))
cp_dir.mkdir(parents=True, exist_ok=True)
eval_model(self.model, self.dataset, self.config, cp_dir)
(yvals, X, filenames) = load_eval_data(str((cp_dir / '*.parquet')))
for fi in filenames:
os.remove(fi)
met_data = compute_met_and_ratio(yvals)
plot_jets(yvals, epoch, cp_dir, comet_experiment)
plot_jet_ratio(yvals, epoch, cp_dir, comet_experiment)
plot_met(met_data, epoch, cp_dir, comet_experiment)
plot_met_ratio(met_data, epoch, cp_dir, comet_experiment)
jet_distances = compute_distances(yvals['jet_gen_to_pred_genpt'], yvals['jet_gen_to_pred_predpt'], yvals['jet_ratio_pred'])
met_distances = compute_distances(met_data['gen_met'], met_data['pred_met'], met_data['ratio_pred'])
N_jets = len(awkward.flatten(yvals['jets_gen_pt']))
N_jets_matched_pred = len(yvals['jet_gen_to_pred_genpt'])
for (name, val) in [('jet_matched_frac', ((N_jets_matched_pred / N_jets) if (N_jets > 0) else float('nan'))), ('jet_wd', jet_distances['wd']), ('jet_iqr', jet_distances['iqr']), ('jet_med', jet_distances['p50']), ('met_wd', met_distances['wd']), ('met_iqr', met_distances['iqr']), ('met_med', met_distances['p50'])]:
logs[('val_' + name)] = val
if comet_experiment:
comet_experiment.log_metric(name, val, step=(epoch - 1))
|
def prepare_callbacks(config, outdir, dataset, comet_experiment=None, horovod_enabled=False, benchmark_dir=None, num_train_steps=None, num_cpus=None, num_gpus=None, train_samples=None, is_hpo_run=False):
callbacks = []
callbacks.append(tf.keras.callbacks.TerminateOnNaN())
callbacks += get_checkpoint_history_callback(outdir, config, dataset, comet_experiment, horovod_enabled, is_hpo_run)
if ((not horovod_enabled) or (hvd.rank() == 0)):
if benchmark_dir:
if (benchmark_dir == 'exp_dir'):
benchmark_dir = outdir
if (config['dataset']['schema'] == 'delphes'):
bmk_bs = config['train_test_datasets']['delphes']['batch_per_gpu']
elif ((config['dataset']['schema'] == 'cms') or (config['dataset']['schema'] == 'clic')):
assert (len(config['train_test_datasets']) == 1), 'Expected exactly 1 key, physical OR delphes, found {}'.format(config['train_test_datasets'].keys())
bmk_bs = config['train_test_datasets']['physical']['batch_per_gpu']
else:
raise ValueError('Benchmark callback only supports delphes cms or clic dataset schema. {}'.format(config['dataset']['schema']))
Path(benchmark_dir).mkdir(exist_ok=True, parents=True)
callbacks.append(BenchmarkLoggerCallback(outdir=benchmark_dir, steps_per_epoch=num_train_steps, batch_size_per_gpu=bmk_bs, num_gpus=num_gpus, num_cpus=num_cpus, train_set_size=train_samples, horovod_enabled=horovod_enabled))
return callbacks
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.