code stringlengths 101 5.91M |
|---|
def calculate_roc_values(thresholds, distances, labels, num_folds=10):
num_pairs = min(len(labels), len(distances))
num_thresholds = len(thresholds)
k_fold = KFold(n_splits=num_folds, shuffle=False)
true_positive_rates = np.zeros((num_folds, num_thresholds))
false_positive_rates = np.zeros((num_folds, num_thresholds))
precision = np.zeros(num_folds)
recall = np.zeros(num_folds)
accuracy = np.zeros(num_folds)
best_distances = np.zeros(num_folds)
indices = np.arange(num_pairs)
for (fold_index, (train_set, test_set)) in enumerate(k_fold.split(indices)):
accuracies_trainset = np.zeros(num_thresholds)
for (threshold_index, threshold) in enumerate(thresholds):
(_, _, _, _, accuracies_trainset[threshold_index]) = calculate_metrics(threshold=threshold, dist=distances[train_set], actual_issame=labels[train_set])
best_threshold_index = np.argmax(accuracies_trainset)
for (threshold_index, threshold) in enumerate(thresholds):
(true_positive_rates[(fold_index, threshold_index)], false_positive_rates[(fold_index, threshold_index)], _, _, _) = calculate_metrics(threshold=threshold, dist=distances[test_set], actual_issame=labels[test_set])
(_, _, precision[fold_index], recall[fold_index], accuracy[fold_index]) = calculate_metrics(threshold=thresholds[best_threshold_index], dist=distances[test_set], actual_issame=labels[test_set])
true_positive_rate = np.mean(true_positive_rates, 0)
false_positive_rate = np.mean(false_positive_rates, 0)
best_distances[fold_index] = thresholds[best_threshold_index]
return (true_positive_rate, false_positive_rate, precision, recall, accuracy, best_distances) |
class TextClassProcessor(DataProcessor):
def get_train_examples(self, raw_data_dir):
examples = self._create_examples(self._read_tsv(os.path.join(raw_data_dir, 'train.csv'), quotechar='"', delimiter=','), 'train')
assert (len(examples) == self.get_train_size())
return examples
def get_dev_examples(self, raw_data_dir):
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, 'test.csv'), quotechar='"', delimiter=','), 'test')
def get_unsup_examples(self, raw_data_dir, unsup_set):
if (unsup_set == 'unsup_in'):
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, 'train.csv'), quotechar='"', delimiter=','), 'unsup_in', skip_unsup=False)
else:
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, '{:s}.csv'.format(unsup_set)), quotechar='"', delimiter=','), unsup_set, skip_unsup=False)
def _create_examples(self, lines, set_type, skip_unsup=True, only_unsup=False):
examples = []
for (i, line) in enumerate(lines):
if (skip_unsup and (line[0] == 'unsup')):
continue
if (only_unsup and (line[0] != 'unsup')):
continue
guid = ('%s-%d' % (set_type, i))
if self.has_title:
text_a = line[2]
text_b = line[1]
else:
text_a = line[1]
text_b = None
label = line[0]
text_a = clean_web_text(text_a)
if (text_b is not None):
text_b = clean_web_text(text_b)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
class ReverseSDE(object):
def __init__(self, score_model):
self.sde = score_model.sde
self.score_model = score_model
def drift(self, x, t, **kwargs):
drift = self.sde.drift(x, t)
diffusion = self.sde.diffusion(t)
score = self.score_model.score(x, t, **kwargs)
return (drift - stp((diffusion ** 2), score))
def diffusion(self, t):
return self.sde.diffusion(t) |
(help='Initialize PASCAL Context dataset.')
('download_dir', type=str)
def main(download_dir):
dataset_dir = (Path(download_dir) / 'pcontext')
download_pcontext(dataset_dir, overwrite=False)
devkit_path = (dataset_dir / 'VOCdevkit')
out_dir = ((devkit_path / 'VOC2010') / 'SegmentationClassContext')
imageset_dir = (((devkit_path / 'VOC2010') / 'ImageSets') / 'SegmentationContext')
out_dir.mkdir(parents=True, exist_ok=True)
imageset_dir.mkdir(parents=True, exist_ok=True)
train_torch_path = ((devkit_path / 'VOC2010') / 'train.pth')
val_torch_path = ((devkit_path / 'VOC2010') / 'val.pth')
train_dict = torch.load(str(train_torch_path))
train_list = []
for (idx, label) in tqdm(train_dict.items()):
idx = str(idx)
new_idx = ((idx[:4] + '_') + idx[4:])
train_list.append(new_idx)
label_path = (out_dir / f'{new_idx}.png')
label.save(str(label_path))
with open(str((imageset_dir / 'train.txt')), 'w') as f:
f.writelines(((line + '\n') for line in sorted(train_list)))
val_dict = torch.load(str(val_torch_path))
val_list = []
for (idx, label) in tqdm(val_dict.items()):
idx = str(idx)
new_idx = ((idx[:4] + '_') + idx[4:])
val_list.append(new_idx)
label_path = (out_dir / f'{new_idx}.png')
label.save(str(label_path))
with open(str((imageset_dir / 'val.txt')), 'w') as f:
f.writelines(((line + '\n') for line in sorted(val_list))) |
def process_line(args, line):
try:
(text, transcript) = line.split(args.delimiter)
inputs = {'text': text, 'transcript': transcript.strip().split()}
text = ' '.join(text.strip().split())
for p in ',.:;?!-_':
text = text.replace(p, '')
inputs['text'] = list(text.lower())
if args.strip_stress:
inputs['transcript'] = list(map((lambda x: re.sub('\\d', '', x)), inputs['transcript']))
except ValueError:
print(f'Problem with line "{line}"')
return None
return inputs |
def encode_pyunicode_string(s):
s = (list(map(ord, s)) + [0])
if (sys.maxunicode >= 65536):
(utf16, utf32) = ([], s)
for code_point in s:
if (code_point >= 65536):
(high, low) = divmod((code_point - 65536), 1024)
utf16.append((high + 55296))
utf16.append((low + 56320))
else:
utf16.append(code_point)
else:
(utf16, utf32) = (s, [])
for code_unit in s:
if ((56320 <= code_unit <= 57343) and utf32 and (55296 <= utf32[(- 1)] <= 56319)):
(high, low) = (utf32[(- 1)], code_unit)
utf32[(- 1)] = ((((high & 1023) << 10) + (low & 1023)) + 65536)
else:
utf32.append(code_unit)
if (utf16 == utf32):
utf16 = []
return (','.join(map(_unicode, utf16)), ','.join(map(_unicode, utf32))) |
def create_loss_func(npeak, nbins=None):
import zfit
bounds = (0.1, 3.0)
obs = zfit.Space('x', limits=bounds)
np.random.seed(0)
tau = (- 2.0)
beta = ((- 1) / tau)
bkg = np.random.exponential(beta, 300)
peak = np.random.normal(1.2, 0.1, npeak)
data = np.concatenate((bkg, peak))
data = data[((data > bounds[0]) & (data < bounds[1]))]
N = len(data)
data = zfit.data.Data.from_numpy(obs=obs, array=data)
mean = zfit.Parameter('mean', 1.2, 0.5, 2.0)
sigma = zfit.Parameter('sigma', 0.1, 0.02, 0.2)
lambda_ = zfit.Parameter('lambda', (- 2.0), (- 4.0), (- 1.0))
Nsig = zfit.Parameter('Nsig', 20.0, (- 20.0), N)
Nbkg = zfit.Parameter('Nbkg', N, 0.0, (N * 1.1))
signal = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma).create_extended(Nsig)
background = zfit.pdf.Exponential(obs=obs, lambda_=lambda_).create_extended(Nbkg)
tot_model = zfit.pdf.SumPDF([signal, background])
if (nbins is not None):
binned_space = obs.with_binning(nbins)
data = data.to_binned(binned_space)
tot_model = tot_model.to_binned(binned_space)
loss = zfit.loss.ExtendedBinnedNLL(tot_model, data)
else:
loss = zfit.loss.ExtendedUnbinnedNLL(model=tot_model, data=data)
return (loss, (Nsig, Nbkg, mean, sigma)) |
def save_path_stats(x2role, output_fpath):
with codecs.open(output_fpath, 'w', 'utf-8') as out:
for (path, freq) in sorted(x2role.items(), key=operator.itemgetter(1), reverse=True):
if (freq > 2):
out.write('{}\t{}\n'.format(path, freq))
print('Output:', output_fpath) |
def imload(filename, gray=False, scale_rate=1.0, enhance=False):
if (not gray):
image = cv2.imread(filename)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if (scale_rate != 1.0):
image = scale(image, scale_rate)
if enhance:
image = Image.fromarray(np.asarray(image, dtype='uint8'))
contrast = ImageEnhance.Contrast(image)
image = contrast.enhance(1.55)
else:
image = cv2.imread(filename, (- 1))
if (scale_rate != 1.0):
image = scale(image, scale_rate, interpolation=cv2.INTER_NEAREST)
image = np.asarray(image, dtype='uint8')
return image |
def generate_all_logical_forms_for_literal(value: str):
lfs = []
date = (not (value.__contains__('integer') or value.__contains__('float')))
if date:
for r in date_relations:
if legal_relation(r):
lfs.append(f'(AND {relations_info[r][0]} (JOIN {r} {value}))')
else:
for r in numerical_relations:
if legal_relation(r):
lfs.append(f'(AND {relations_info[r][0]} (JOIN {r} {value}))')
return lfs |
class GraphRepair():
def __init__(self, graph, nodes):
self.graph = graph
self.nodes = nodes
self.repaired_items = set()
def do(graph, nodes):
gr = GraphRepair(graph, nodes)
gr.remove_redundant_edges()
gr.remove_unknown_nodes()
def remove_unknown_nodes(self):
graph = self.graph
nodes = [node for node in graph.get_nodes()]
for node in nodes:
for (attr, value) in node.attributes:
if ((value == '') and (attr != 'instance')):
graph.remove_node_attribute(node, attr, value)
if (node.instance == ''):
if (len(list(graph._G.edges(node))) == 0):
for (source, target) in list(graph._G.in_edges(node)):
graph.remove_edge(source, target)
graph.remove_node(node)
self.repaired_items.add('remove-unknown-node')
def remove_redundant_edges(self):
graph = self.graph
nodes = [node for node in graph.get_nodes()]
removed_nodes = set()
for node in nodes:
if (node in removed_nodes):
continue
edges = list(graph._G.edges(node))
edge_counter = defaultdict(list)
for (source, target) in edges:
label = graph._G[source][target]['label']
if (label == 'name'):
edge_counter[label].append(target)
elif (label.startswith('op') or label.startswith('snt')):
edge_counter[str(target.instance)].append(target)
else:
edge_counter[(label + str(target.instance))].append(target)
for (label, children) in edge_counter.items():
if (len(children) == 1):
continue
if (label == 'name'):
for target in children[1:]:
if ((len(list(graph._G.in_edges(target))) == 1) and (len(list(graph._G.edges(target))) == 0)):
graph.remove_edge(node, target)
graph.remove_node(target)
removed_nodes.add(target)
self.repaired_items.add('remove-redundant-edge')
continue
visited_children = set()
groups = []
for (i, target) in enumerate(children):
if (target in visited_children):
continue
subtree_instances1 = [n.instance for n in graph.get_subtree(target, 5)]
group = [(target, subtree_instances1)]
visited_children.add(target)
for _t in children[(i + 1):]:
if ((_t in visited_children) or (target.instance != _t.instance)):
continue
subtree_instances2 = [n.instance for n in graph.get_subtree(_t, 5)]
if is_similar(subtree_instances1, subtree_instances2):
group.append((_t, subtree_instances2))
visited_children.add(_t)
groups.append(group)
for group in groups:
if (len(group) == 1):
continue
(kept_target, _) = max(group, key=(lambda x: len(x[1])))
for (target, _) in group:
if (target == kept_target):
continue
graph.remove_edge(node, target)
removed_nodes.update(graph.remove_subtree(target)) |
def _get_axes_excluding(ndim, axes):
axes = _force_list(axes)
axes = [(a + (ndim * (a < 0))) for a in axes]
return [i for i in range(ndim) if (i not in axes)] |
class EnsembleModel(nn.Module):
def __init__(self, encoding_model, alignment_model, node_filter, top_k=5):
super(EnsembleModel, self).__init__()
self._encoding_model = encoding_model
self._alignment_model = alignment_model
self._weight = V(FT([1.0, 1.0]))
self.node_filter = node_filter
self.loss = nn.CrossEntropyLoss(reduce=False)
self.top_k = top_k
def forward(self, web_page, examples):
e_logits = self._encoding_model(web_page, examples, logits_only=True)
a_logits = self._alignment_model(web_page, examples, logits_only=True)
e_logprobs = F.log_softmax(e_logits, dim=1)
a_logprobs = F.log_softmax(a_logits, dim=1)
logits = ((e_logprobs * self._weight[0]) + (a_logprobs * self._weight[1]))
node_filter_mask = self.node_filter(web_page, examples[0].web_page_code)
log_node_filter_mask = V(FT([(0.0 if x else (- 999999.0)) for x in node_filter_mask]))
logits = (logits + log_node_filter_mask)
targets = V(LT([web_page.xid_to_ref.get(x.target_xid, 0) for x in examples]))
mask = V(FT([int(((x.target_xid in web_page.xid_to_ref) and node_filter_mask[web_page.xid_to_ref[x.target_xid]])) for x in examples]))
losses = (self.loss(logits, targets) * mask)
if (not np.isfinite(losses.data.sum())):
logging.warn('Losses has NaN')
top_k = min(self.top_k, len(web_page.nodes))
predictions = torch.topk(logits, top_k, dim=1)[1]
return (logits, losses, predictions) |
def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:
r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))
return r |
def save_state_dict(state_dict: StateDict, path):
state_dict = {k: v for (k, v) in state_dict.items() if (v is not None)}
if (jax.process_index() == 0):
safetensors.numpy.save_file(state_dict, path, metadata={'format': 'pt'})
global _GLOBAL_SAVE_COUNT
sync_global_devices(f'local {_GLOBAL_SAVE_COUNT}')
_GLOBAL_SAVE_COUNT += 1 |
class DiagonalTest(tf.test.TestCase):
def test(self):
for units in TEST_DIMENSIONS:
diag_layer = Diagonal(units=units)
self.assertAllClose(diag_layer(diag_layer.inverse_matrix), tf.eye(units)) |
def register_Ns3HigherLayerTxVectorTag_methods(root_module, cls):
cls.add_constructor([param('ns3::HigherLayerTxVectorTag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::WifiTxVector', 'txVector'), param('bool', 'adaptable')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTxVector', 'ns3::WifiTxVector', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsAdaptable', 'bool', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
return |
def unison_shuffled_copies_three(amat, bmat, slmat):
ipdb.set_trace()
assert ((len(amat) == len(bmat)) and (len(bmat) == len(slmat)))
pmat = np.random.permutation(len(amat))
return (amat[pmat], bmat[pmat], slmat[pmat]) |
def run():
test_opts = TestOptions().parse()
out_path_w = 'celebaha_w.npy'
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
net = StyleTransformer(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path, transform=transforms_dict['transform_inference'], opts=opts)
dataloader = DataLoader(dataset, batch_size=opts.test_batch_size, shuffle=False, num_workers=int(opts.test_workers), drop_last=True)
global_i = 0
latents = []
for input_batch in tqdm(dataloader):
if (global_i >= opts.n_images):
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
(_, latent) = net(input_cuda, randomize_noise=True, resize=opts.resize_outputs, return_latents=True)
latent = latent.cpu().numpy()
for i in range(opts.test_batch_size):
result = latent[i]
latents.append(result)
global_i += 1
np.save(out_path_w, latents) |
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None):
assert (ignore_index is None), 'BCE loss does not support ignore_index'
assert ((reduction == 'mean') and (avg_factor is None))
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None] |
class PNA(ScalableGNN):
def __init__(self, num_nodes: int, in_channels: int, hidden_channels: int, out_channels: int, num_layers: int, aggregators: List[int], scalers: List[int], deg: Tensor, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.convs = ModuleList()
for i in range(num_layers):
in_dim = (in_channels if (i == 0) else hidden_channels)
out_dim = (out_channels if (i == (num_layers - 1)) else hidden_channels)
conv = PNAConv(in_dim, out_dim, aggregators=aggregators, scalers=scalers, deg=deg)
self.convs.append(conv)
self.bns = ModuleList()
for i in range((num_layers - 1)):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
def reg_modules(self):
return ModuleList((list(self.convs[:(- 1)]) + list(self.bns)))
def nonreg_modules(self):
return self.convs[(- 1):]
def reset_parameters(self):
super().reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns, self.histories):
h = conv(x, adj_t)
if self.batch_norm:
h = bn(h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
x = h.relu_()
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, adj_t)
return x
_grad()
def forward_layer(self, layer, x, adj_t, state):
if ((layer == 0) and self.drop_input):
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[layer](x, adj_t)
if (layer < (self.num_layers - 1)):
if self.batch_norm:
h = self.bns[layer](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
h = F.dropout(h, p=self.dropout, training=self.training)
return h |
def ignore_undocumented(name):
if name.isupper():
return True
if (name.endswith('PreTrainedModel') or name.endswith('Decoder') or name.endswith('Encoder') or name.endswith('Layer') or name.endswith('Embeddings') or name.endswith('Attention')):
return True
if (os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(os.path.join(PATH_TO_TRANSFORMERS, f'{name}.py'))):
return True
if (name.startswith('load_tf') or name.startswith('load_pytorch')):
return True
if (name.startswith('is_') and name.endswith('_available')):
return True
if ((name in DEPRECATED_OBJECTS) or (name in UNDOCUMENTED_OBJECTS)):
return True
if name.startswith('MMBT'):
return True
if (name in SHOULD_HAVE_THEIR_OWN_PAGE):
return True
return False |
class SeparableUnderapproximationMemlet(UnderapproximationMemletPattern):
def can_be_applied(self, expressions, variable_context, node_range, orig_edges):
data_dims = len(expressions[0])
self.patterns_per_dim = ([None] * data_dims)
params = variable_context[(- 1)]
other_params = variable_context[(- 3)]
if (not self._iteration_variables_appear_multiple_times(data_dims, expressions, other_params, params)):
return False
node_range = self._make_range(node_range)
for dim in range(data_dims):
dexprs = []
for expr in expressions:
if isinstance(expr[dim], symbolic.SymExpr):
dexprs.append(expr[dim].expr)
elif isinstance(expr[dim], tuple):
dexprs.append(((expr[dim][0].expr if isinstance(expr[dim][0], symbolic.SymExpr) else expr[dim][0]), (expr[dim][1].expr if isinstance(expr[dim][1], symbolic.SymExpr) else expr[dim][1]), (expr[dim][2].expr if isinstance(expr[dim][2], symbolic.SymExpr) else expr[dim][2])))
else:
dexprs.append(expr[dim])
for pattern_class in SeparableUnderapproximationMemletPattern.extensions().keys():
smpattern = pattern_class()
if smpattern.can_be_applied(dexprs, variable_context, node_range, orig_edges, dim, data_dims):
self.patterns_per_dim[dim] = smpattern
break
return (None not in self.patterns_per_dim)
def _iteration_variables_appear_multiple_times(self, data_dims, expressions, other_params, params):
for expr in expressions:
for param in params:
occured_before = False
for dim in range(data_dims):
free_symbols = []
curr_dim_expr = expr[dim]
if isinstance(curr_dim_expr, symbolic.SymExpr):
free_symbols += curr_dim_expr.expr.free_symbols
elif isinstance(curr_dim_expr, tuple):
free_symbols += (curr_dim_expr[0].expr.free_symbols if isinstance(curr_dim_expr[0], symbolic.SymExpr) else list(pystr_to_symbolic(curr_dim_expr[0]).expand().free_symbols))
free_symbols += (curr_dim_expr[1].expr.free_symbols if isinstance(curr_dim_expr[1], symbolic.SymExpr) else list(pystr_to_symbolic(curr_dim_expr[1]).expand().free_symbols))
free_symbols += (curr_dim_expr[2].expr.free_symbols if isinstance(curr_dim_expr[2], symbolic.SymExpr) else list(pystr_to_symbolic(curr_dim_expr[2]).expand().free_symbols))
else:
free_symbols += [curr_dim_expr]
if (param in free_symbols):
if occured_before:
return False
occured_before = True
for other_param in (set(params) | set(other_params)):
if (other_param is param):
continue
if ((other_param in free_symbols) and (param in free_symbols)):
return False
return True
def _make_range(self, node_range):
return subsets.Range([((rb.expr if isinstance(rb, symbolic.SymExpr) else rb), (re.expr if isinstance(re, symbolic.SymExpr) else re), (rs.expr if isinstance(rs, symbolic.SymExpr) else rs)) for (rb, re, rs) in node_range])
def propagate(self, array, expressions, node_range):
result = ([(None, None, None)] * len(self.patterns_per_dim))
node_range = self._make_range(node_range)
for (i, smpattern) in enumerate(self.patterns_per_dim):
dexprs = []
for expr in expressions:
if isinstance(expr[i], symbolic.SymExpr):
dexprs.append(expr[i].expr)
elif isinstance(expr[i], tuple):
dexprs.append(((expr[i][0].expr if isinstance(expr[i][0], symbolic.SymExpr) else expr[i][0]), (expr[i][1].expr if isinstance(expr[i][1], symbolic.SymExpr) else expr[i][1]), (expr[i][2].expr if isinstance(expr[i][2], symbolic.SymExpr) else expr[i][2]), expr.tile_sizes[i]))
else:
dexprs.append(expr[i])
result[i] = smpattern.propagate(array, dexprs, node_range)
return subsets.Range(result) |
def save_concrete_function(function, input_signature, add_nms_plugin, opset, output_dir, target='tensorrt', model_params=None, simplify=True, large_model=False, debug=False):
if (add_nms_plugin and (model_params is None)):
raise ValueError('model_params are required to add NMS plugin')
tf2onnx.logging.basicConfig(level=tf2onnx.logging.get_verbosity_level((1 if debug else 0)))
(onnx_model, _) = tf2onnx.convert.from_function(function=function, input_signature=input_signature, opset=opset, custom_ops=None, custom_op_handlers=None, custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None, target=target, large_model=large_model, output_path=None)
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, 'model.onnx')
if simplify:
try:
import onnxsim
logging.info('Running ONNX simplifier')
(onnx_model, status) = onnxsim.simplify(onnx_model, check_n=3)
if (not status):
raise AssertionError('Failed to simplify ONNX model')
except ImportError:
logging.warning('Failed to import onnxsim, skipping ONNX simplifier')
if add_nms_plugin:
logging.info('Adding `EfficientNMS_TRT` plugin')
onnx_model = _add_nms_plugin(onnx_model, model_params)
onnx.save_model(onnx_model, output_path)
logging.info('Saving ONNX model to: {}'.format(output_path)) |
def get_checkpoint_name(checkpoints_path, iteration, release=False, mp_rank=None):
if release:
d = 'release'
else:
d = 'iter_{:07d}'.format(iteration)
return os.path.join(checkpoints_path, d, 'mp_rank_{:02d}'.format((mpu.get_model_parallel_rank() if (mp_rank is None) else mp_rank)), 'model_optim_rng.pt') |
def test_redundant_array_failure():
sdfg = _make_sdfg_1(succeed=False)
sdfg.save('test2.sdfg')
num = sdfg.apply_transformations(RedundantArray)
assert (num == 0) |
class Tower(BaseTower):
def initialize(self):
params = self.params
placeholders = self.placeholders
tensors = self.tensors
variables_dict = self.variables_dict
(N, J, V, Q, M) = (params.batch_size, params.max_sent_size, params.vocab_size, params.max_ques_size, params.mem_size)
d = params.hidden_size
L = params.mem_num_layers
att_forget_bias = params.att_forget_bias
use_vector_gate = params.use_vector_gate
wd = params.wd
initializer = tf.random_uniform_initializer((- np.sqrt(3)), np.sqrt(3))
with tf.name_scope('placeholders'):
x = tf.placeholder('int32', shape=[N, M, J], name='x')
x_mask = tf.placeholder('bool', shape=[N, M, J], name='x_mask')
q = tf.placeholder('int32', shape=[N, J], name='q')
q_mask = tf.placeholder('bool', shape=[N, J], name='q_mask')
y = tf.placeholder('int32', shape=[N], name='y')
is_train = tf.placeholder('bool', shape=[], name='is_train')
placeholders['x'] = x
placeholders['x_mask'] = x_mask
placeholders['q'] = q
placeholders['q_mask'] = q_mask
placeholders['y'] = y
placeholders['is_train'] = is_train
with tf.variable_scope('embedding'):
A = VariableEmbedder(params, wd=wd, initializer=initializer, name='A')
Aq = A(q, name='Aq')
Ax = A(x, name='Ax')
with tf.name_scope('encoding'):
encoder = PositionEncoder(J, d)
u = encoder(Aq, q_mask)
m = encoder(Ax, x_mask)
with tf.variable_scope('networks'):
m_mask = tf.reduce_max(tf.cast(x_mask, 'int64'), 2, name='m_mask')
gate_mask = tf.expand_dims(m_mask, (- 1))
m_length = tf.reduce_sum(m_mask, 1, name='m_length')
prev_u = tf.tile(tf.expand_dims(u, 1), [1, M, 1])
reg_layer = (VectorReductionLayer(N, M, d) if use_vector_gate else ReductionLayer(N, M, d))
gate_size = (d if use_vector_gate else 1)
h = None
(as_, rfs, rbs) = ([], [], [])
hs = []
for layer_idx in range(L):
with tf.name_scope('layer_{}'.format(layer_idx)):
u_t = tf.tanh(linear([prev_u, m], d, True, wd=wd, scope='u_t'))
a = (tf.cast(gate_mask, 'float') * tf.sigmoid((linear([(prev_u * m)], gate_size, True, initializer=initializer, wd=wd, scope='a') - att_forget_bias)))
h = reg_layer(u_t, a, (1.0 - a), scope='h')
if ((layer_idx + 1) < L):
if params.use_reset:
(rf, rb) = tf.split(2, 2, (tf.cast(gate_mask, 'float') * tf.sigmoid(linear([(prev_u * m)], (2 * gate_size), True, initializer=initializer, wd=wd, scope='r'))))
else:
rf = rb = tf.ones(a.get_shape().as_list())
u_t_rev = tf.reverse_sequence(u_t, m_length, 1)
(a_rev, rb_rev) = (tf.reverse_sequence(a, m_length, 1), tf.reverse_sequence(rb, m_length, 1))
uf = reg_layer(u_t, (a * rf), (1.0 - a), scope='uf')
ub_rev = reg_layer(u_t_rev, (a_rev * rb_rev), (1.0 - a_rev), scope='ub_rev')
ub = tf.reverse_sequence(ub_rev, m_length, 1)
prev_u = (uf + ub)
else:
rf = rb = tf.zeros(a.get_shape().as_list())
rfs.append(rf)
rbs.append(rb)
as_.append(a)
hs.append(h)
tf.get_variable_scope().reuse_variables()
h_last = tf.squeeze(tf.slice(h, [0, (M - 1), 0], [(- 1), (- 1), (- 1)]), [1])
hs_last = [tf.squeeze(tf.slice(each, [0, (M - 1), 0], [(- 1), (- 1), (- 1)]), [1]) for each in hs]
a = tf.transpose(tf.pack(as_, name='a'), [1, 0, 2, 3])
rf = tf.transpose(tf.pack(rfs, name='rf'), [1, 0, 2, 3])
rb = tf.transpose(tf.pack(rbs, name='rb'), [1, 0, 2, 3])
tensors['a'] = a
tensors['rf'] = rf
tensors['rb'] = rb
with tf.variable_scope('class'):
class_mode = params.class_mode
use_class_bias = params.use_class_bias
if (class_mode == 'h'):
logits = linear([h_last], V, use_class_bias, wd=wd)
elif (class_mode == 'uh'):
logits = linear([h_last, u], V, use_class_bias, wd=wd)
elif (class_mode == 'hs'):
logits = linear(hs_last, V, use_class_bias, wd=wd)
elif (class_mode == 'hss'):
logits = linear(sum(hs_last), V, use_class_bias, wd=wd)
else:
raise Exception('Invalid class mode: {}'.format(class_mode))
yp = tf.cast(tf.argmax(logits, 1), 'int32')
correct = tf.equal(yp, y)
tensors['yp'] = yp
tensors['correct'] = correct
with tf.name_scope('loss'):
with tf.name_scope('ans_loss'):
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y, name='ce')
avg_ce = tf.reduce_mean(ce, name='avg_ce')
tf.add_to_collection('losses', avg_ce)
losses = tf.get_collection('losses')
loss = tf.add_n(losses, name='loss')
tensors['loss'] = loss
variables_dict['all'] = tf.trainable_variables()
def get_feed_dict(self, batch, mode, **kwargs):
params = self.params
(N, J, V, M) = (params.batch_size, params.max_sent_size, params.vocab_size, params.mem_size)
x = np.zeros([N, M, J], dtype='int32')
x_mask = np.zeros([N, M, J], dtype='bool')
q = np.zeros([N, J], dtype='int32')
q_mask = np.zeros([N, J], dtype='bool')
y = np.zeros([N], dtype='int32')
ph = self.placeholders
feed_dict = {ph['x']: x, ph['x_mask']: x_mask, ph['q']: q, ph['q_mask']: q_mask, ph['y']: y, ph['is_train']: (mode == 'train')}
if (batch is None):
return feed_dict
(X, Q, S, Y, H, T) = batch
for (i, para) in enumerate(X):
if (len(para) > M):
para = para[(- M):]
for (jj, sent) in enumerate(para):
j = jj
for (k, word) in enumerate(sent):
x[(i, j, k)] = word
x_mask[(i, j, k)] = True
for (i, ques) in enumerate(Q):
for (j, word) in enumerate(ques):
q[(i, j)] = word
q_mask[(i, j)] = True
for (i, ans) in enumerate(Y):
y[i] = ans
return feed_dict |
class GitHubGetRepositoryDetails(VirtualFunctionTool):
name = 'GitHubGetRepositoryDetails'
summary = 'Retrieve repository details, including issues, branches.'
parameters: List[ArgParameter] = [{'name': 'repo_id', 'type': 'string', 'description': 'The unique identifier of the repository.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'details', 'type': 'object', 'description': "Contains name (string), owner (string), description (string), url (string), branches (array of branch names), visibility (string, one of ['public', 'private']), last_updated (string, format: 'YYYY-MM-DD:HH:MM:SS') and statistics (including number of issues, number of stars, number of forks)."}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': 'If repo_id does not exist.'}, {'name': 'InvalidRequestException', 'description': 'If repo_id is not a valid repository id.'}] |
def r_action1(t):
def fn(k, n):
if (n > MAX_FUNC_CALL):
return (k, n, False)
action = np.array([1, 0, 0, 0, 0])
try:
k.state_transition(action)
except:
return (k, n, False)
else:
return (k, n, True)
return [('action', fn)] |
def MobileNet_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.batch_norm], decay=0.9, zero_debias_moving_mean=True, scale=True, activation_fn=tf.nn.relu):
with slim.arg_scope([slim.convolution2d, slim.fully_connected], activation_fn=None, weights_initializer=tf.contrib.layers.variance_scaling_initializer(), biases_initializer=None, weights_regularizer=slim.l2_regularizer(weight_decay)) as arg_sc:
return arg_sc |
def graph(A: np.ndarray) -> np.ndarray:
probe = ((A != 0) * 1.0)
probe = (((A + np.eye(A.shape[0])) != 0) * 1.0)
return probe |
_model_architecture('universal_transformer_lm', 'universal_transformer_lm_gpt')
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args) |
class ShenNeumann(CompositeBase):
def __init__(self, N, quad='GC', bc=(0, 0), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
if isinstance(bc, (tuple, list)):
bc = BoundaryConditions({'left': {'N': bc[0]}, 'right': {'N': bc[1]}}, domain=domain)
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: 1, 2: (- ((n / (n + 2)) ** 2))}
def boundary_condition():
return 'Neumann'
def short_name():
return 'SN' |
class SimpleReduction(nn.Module):
def __init__(self, cs, heights, out_ch=64):
super(SimpleReduction, self).__init__()
(c1, c2, c3, c4) = cs
(h1, h2, h3, h4) = heights
def EfficientConvCompressH(in_c, out_c, scale, down_h):
return nn.Sequential(PanoUpsampleW(scale), nn.Conv2d(in_c, out_c, (down_h, 1), bias=False), nn.BatchNorm2d(out_c), nn.ReLU(inplace=True))
self.ghc_lst = nn.ModuleList([EfficientConvCompressH(c1, (c1 // 4), scale=1, down_h=h1), EfficientConvCompressH(c2, (c2 // 4), scale=2, down_h=h2), EfficientConvCompressH(c3, (c3 // 4), scale=4, down_h=h3), EfficientConvCompressH(c4, (c4 // 4), scale=8, down_h=h4)])
self.fuse = nn.Sequential(nn.Conv2d(((((c1 + c2) + c3) + c4) // 4), out_ch, (1, 9), padding=(0, 4), bias=False), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
self.out_channels = out_ch
def forward(self, conv_list):
assert (len(conv_list) == 4)
feature = torch.cat([f(x) for (f, x) in zip(self.ghc_lst, conv_list)], dim=1)
feature = self.fuse(feature).squeeze(2)
return {'1D': feature} |
def make_transform(model_type: str, resolution: int):
if (model_type in ['ddpm', 'guidance_ddpm']):
transform = transforms.Compose([transforms.Resize(resolution), transforms.ToTensor(), (lambda x: ((2 * x) - 1))])
elif (model_type in ['mae', 'swav', 'swav_w2', 'deeplab']):
transform = transforms.Compose([transforms.Resize(resolution), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
raise Exception(f'Wrong model type: {model_type}')
return transform |
(frozen=True)
class PassageQuestionInput(Input):
def __init__(self, passage: str, question: str, passage_prefix: str='', question_prefix: str='Question: ', separator: str='\n'):
super().__init__(f'{passage_prefix}{passage}{separator}{question_prefix}{question}') |
class Comparable(object):
def __eq__(self, other):
return self._cmp(operator.eq, other)
def __ge__(self, other):
return self._cmp(operator.ge, other)
def __gt__(self, other):
return self._cmp(operator.gt, other)
def __le__(self, other):
return self._cmp(operator.le, other)
def __lt__(self, other):
return self._cmp(operator.lt, other)
def __ne__(self, other):
return self._cmp(operator.ne, other) |
def SetPartitionsAk(k):
(is_int, k) = _int_or_half_int(k)
if (not is_int):
return SetPartitionsAkhalf_k(k)
return SetPartitionsAk_k(k) |
class GanBase(nn.Module, metaclass=Named):
def __init__(self, z_dim, img_channels, num_classes=None):
self.z_dim = z_dim
self.img_channels = img_channels
super().__init__()
def device(self):
try:
return self._device
except AttributeError:
self._device = next(self.parameters()).device
return self._device
def sample_z(self, n=1):
return torch.randn(n, self.z_dim).to(self.device)
def sample(self, n=1):
return self(self.sample_z(n)) |
def test_raises_on_floating_point_input():
with pytest.raises(ValueError):
graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64)
maximum_flow(graph, 0, 1)
maximum_flow(graph, 0, 1, method='edmonds_karp') |
class Cli():
def run(self, **kwargs):
return self.extract(**kwargs)
def extract(self, **kwargs):
if ('out_path' in kwargs):
kwargs['data.output.path'] = kwargs.pop('out_path')
if ('tar_path' in kwargs):
kwargs['shards_path'] = kwargs.pop('tar_path')
if ('shards_path' in kwargs):
kwargs['data.path'] = kwargs.pop('shards_path')
if ('meta_path' in kwargs):
kwargs['data.meta.path'] = kwargs.pop('meta_path')
args = get_args(**kwargs)
if (args.acav.model_cache_path is not None):
args.data.cache_dir = (Path(args.acav.model_cache_path) / 'cache')
args.data.cache_dir.mkdir(exist_ok=True, parents=True)
args.data.output.path.mkdir(parents=True, exist_ok=True)
parallel_extraction_script(args)
print('done')
def show_model_dict(self):
print(get_model_dict()) |
class SRCNN(nn.Sequential):
def __init__(self, n_colors=3):
m = [nn.Conv2d(n_colors, 64, 9, padding=4), nn.ReLU(True), nn.Conv2d(64, 32, 1, padding=0), nn.ReLU(True), nn.Conv2d(32, 3, 5, padding=2)]
super().__init__(*m)
def get_kwargs(cfg, conv=common.default_conv):
kwargs = {'n_colors': cfg.n_colors}
return kwargs |
class Linear(nn.Module):
def __init__(self, dim, variance=1.0, lengthscale=None):
super(Linear, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
if (lengthscale is None):
self.lengthscale = torch.nn.Parameter(transform_backward(torch.ones(1, dim)))
else:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.tensor(lengthscale)))
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if (X2 is None):
X2 = X
l = transform_forward(self.lengthscale)
return (transform_forward(self.variance) * torch.mm((X / l), (X2 / l).t())) |
def test_fb15k_237_load() -> None:
_knowledge_graph_load(FB15k_237(), nodes=14541, rels=237, train=272115, test=20466, valid=17535) |
class Request(BaseRequest, AcceptMixin, ETagRequestMixin, UserAgentMixin, AuthorizationMixin, CORSRequestMixin, CommonRequestDescriptorsMixin): |
def griddata(points, values, xi, method='linear', fill_value=np.nan, rescale=False):
points = _ndim_coords_from_arrays(points)
if (points.ndim < 2):
ndim = points.ndim
else:
ndim = points.shape[(- 1)]
if ((ndim == 1) and (method in ('nearest', 'linear', 'cubic'))):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if (len(xi) != 1):
raise ValueError('invalid number of dimensions in xi')
(xi,) = xi
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if (method == 'nearest'):
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False, fill_value=fill_value)
return ip(xi)
elif (method == 'nearest'):
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif (method == 'linear'):
ip = LinearNDInterpolator(points, values, fill_value=fill_value, rescale=rescale)
return ip(xi)
elif ((method == 'cubic') and (ndim == 2)):
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value, rescale=rescale)
return ip(xi)
else:
raise ValueError(('Unknown interpolation method %r for %d dimensional data' % (method, ndim))) |
class SuperProxylessNASNets(ProxylessNASNets):
def __init__(self, width_stages, n_cell_stages, conv_candidates, stride_stages, n_classes=1000, width_mult=1, bn_param=(0.1, 0.001), dropout_rate=0):
self._redundant_modules = None
self._unused_modules = None
input_channel = make_divisible((32 * width_mult), 8)
first_cell_width = make_divisible((16 * width_mult), 8)
for i in range(len(width_stages)):
width_stages[i] = make_divisible((width_stages[i] * width_mult), 8)
first_conv = ConvLayer(3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act')
first_block_conv = MixedEdge(candidate_ops=build_candidate_ops(['3x3_MBConv1'], input_channel, first_cell_width, 1, 'weight_bn_act'))
if (first_block_conv.n_choices == 1):
first_block_conv = first_block_conv.candidate_ops[0]
first_block = MobileInvertedResidualBlock(first_block_conv, None)
input_channel = first_cell_width
blocks = [first_block]
for (width, n_cell, s) in zip(width_stages, n_cell_stages, stride_stages):
for i in range(n_cell):
if (i == 0):
stride = s
else:
stride = 1
if ((stride == 1) and (input_channel == width)):
modified_conv_candidates = (conv_candidates + ['Zero'])
else:
modified_conv_candidates = conv_candidates
conv_op = MixedEdge(candidate_ops=build_candidate_ops(modified_conv_candidates, input_channel, width, stride, 'weight_bn_act'))
if ((stride == 1) and (input_channel == width)):
shortcut = IdentityLayer(input_channel, input_channel)
else:
shortcut = None
inverted_residual_block = MobileInvertedResidualBlock(conv_op, shortcut)
blocks.append(inverted_residual_block)
input_channel = width
last_channel = (make_divisible((1280 * width_mult), 8) if (width_mult > 1.0) else 1280)
feature_mix_layer = ConvLayer(input_channel, last_channel, kernel_size=1, use_bn=True, act_func='relu6', ops_order='weight_bn_act')
classifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
super(SuperProxylessNASNets, self).__init__(first_conv, blocks, feature_mix_layer, classifier)
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
def config(self):
raise ValueError('not needed')
def build_from_config(config):
raise ValueError('not needed')
' weight parameters, arch_parameters & binary gates '
def architecture_parameters(self):
for (name, param) in self.named_parameters():
if ('AP_path_alpha' in name):
(yield param)
def binary_gates(self):
for (name, param) in self.named_parameters():
if ('AP_path_wb' in name):
(yield param)
def weight_parameters(self):
for (name, param) in self.named_parameters():
if (('AP_path_alpha' not in name) and ('AP_path_wb' not in name)):
(yield param)
' architecture parameters related methods '
def redundant_modules(self):
if (self._redundant_modules is None):
module_list = []
for m in self.modules():
if m.__str__().startswith('MixedEdge'):
module_list.append(m)
self._redundant_modules = module_list
return self._redundant_modules
def entropy(self, eps=1e-08):
entropy = 0
for m in self.redundant_modules:
module_entropy = m.entropy(eps=eps)
entropy = (module_entropy + entropy)
return entropy
def init_arch_params(self, init_type='normal', init_ratio=0.001):
for param in self.architecture_parameters():
if (init_type == 'normal'):
param.data.normal_(0, init_ratio)
elif (init_type == 'uniform'):
param.data.uniform_((- init_ratio), init_ratio)
else:
raise NotImplementedError
def reset_binary_gates(self):
for m in self.redundant_modules:
try:
m.binarize()
except AttributeError:
print(type(m), ' do not support binarize')
def set_arch_param_grad(self):
for m in self.redundant_modules:
try:
m.set_arch_param_grad()
except AttributeError:
print(type(m), ' do not support `set_arch_param_grad()`')
def rescale_updated_arch_param(self):
for m in self.redundant_modules:
try:
m.rescale_updated_arch_param()
except AttributeError:
print(type(m), ' do not support `rescale_updated_arch_param()`')
' training related methods '
def unused_modules_off(self):
self._unused_modules = []
for m in self.redundant_modules:
unused = {}
if (MixedEdge.MODE in ['full', 'two', 'full_v2']):
involved_index = (m.active_index + m.inactive_index)
else:
involved_index = m.active_index
for i in range(m.n_choices):
if (i not in involved_index):
unused[i] = m.candidate_ops[i]
m.candidate_ops[i] = None
self._unused_modules.append(unused)
def unused_modules_back(self):
if (self._unused_modules is None):
return
for (m, unused) in zip(self.redundant_modules, self._unused_modules):
for i in unused:
m.candidate_ops[i] = unused[i]
self._unused_modules = None
def set_chosen_op_active(self):
for m in self.redundant_modules:
try:
m.set_chosen_op_active()
except AttributeError:
print(type(m), ' do not support `set_chosen_op_active()`')
def set_active_via_net(self, net):
assert isinstance(net, SuperProxylessNASNets)
for (self_m, net_m) in zip(self.redundant_modules, net.redundant_modules):
self_m.active_index = copy.deepcopy(net_m.active_index)
self_m.inactive_index = copy.deepcopy(net_m.inactive_index)
def expected_latency(self, latency_model: LatencyEstimator):
expected_latency = 0
expected_latency += latency_model.predict('Conv', [224, 224, 3], [112, 112, self.first_conv.out_channels])
expected_latency += latency_model.predict('Conv_1', [7, 7, self.feature_mix_layer.in_channels], [7, 7, self.feature_mix_layer.out_channels])
expected_latency += latency_model.predict('Logits', [7, 7, self.classifier.in_features], [self.classifier.out_features])
fsize = 112
for block in self.blocks:
shortcut = block.shortcut
if ((shortcut is None) or shortcut.is_zero_layer()):
idskip = 0
else:
idskip = 1
mb_conv = block.mobile_inverted_conv
if (not isinstance(mb_conv, MixedEdge)):
if (not mb_conv.is_zero_layer()):
out_fz = (fsize // mb_conv.stride)
op_latency = latency_model.predict('expanded_conv', [fsize, fsize, mb_conv.in_channels], [out_fz, out_fz, mb_conv.out_channels], expand=mb_conv.expand_ratio, kernel=mb_conv.kernel_size, stride=mb_conv.stride, idskip=idskip)
expected_latency = (expected_latency + op_latency)
fsize = out_fz
continue
probs_over_ops = mb_conv.current_prob_over_ops
out_fsize = fsize
for (i, op) in enumerate(mb_conv.candidate_ops):
if ((op is None) or op.is_zero_layer()):
continue
out_fsize = (fsize // op.stride)
op_latency = latency_model.predict('expanded_conv', [fsize, fsize, op.in_channels], [out_fsize, out_fsize, op.out_channels], expand=op.expand_ratio, kernel=op.kernel_size, stride=op.stride, idskip=idskip)
expected_latency = (expected_latency + (op_latency * probs_over_ops[i]))
fsize = out_fsize
return expected_latency
def expected_flops(self, x):
expected_flops = 0
(flop, x) = self.first_conv.get_flops(x)
expected_flops += flop
for block in self.blocks:
mb_conv = block.mobile_inverted_conv
if (not isinstance(mb_conv, MixedEdge)):
(delta_flop, x) = block.get_flops(x)
expected_flops = (expected_flops + delta_flop)
continue
if (block.shortcut is None):
shortcut_flop = 0
else:
(shortcut_flop, _) = block.shortcut.get_flops(x)
expected_flops = (expected_flops + shortcut_flop)
probs_over_ops = mb_conv.current_prob_over_ops
for (i, op) in enumerate(mb_conv.candidate_ops):
if ((op is None) or op.is_zero_layer()):
continue
(op_flops, _) = op.get_flops(x)
expected_flops = (expected_flops + (op_flops * probs_over_ops[i]))
x = block(x)
(delta_flop, x) = self.feature_mix_layer.get_flops(x)
expected_flops = (expected_flops + delta_flop)
x = self.global_avg_pooling(x)
x = x.view(x.size(0), (- 1))
(delta_flop, x) = self.classifier.get_flops(x)
expected_flops = (expected_flops + delta_flop)
return expected_flops
def convert_to_normal_net(self):
queue = Queue()
queue.put(self)
while (not queue.empty()):
module = queue.get()
for m in module._modules:
child = module._modules[m]
if (child is None):
continue
if child.__str__().startswith('MixedEdge'):
module._modules[m] = child.chosen_op
else:
queue.put(child)
return ProxylessNASNets(self.first_conv, list(self.blocks), self.feature_mix_layer, self.classifier)
def get_split_gradients(self, split_eid=1):
params = []
params += list(self.first_conv.parameters())
for (i, block) in enumerate(self.blocks):
if (i == 0):
params += list(block.mobile_inverted_conv.parameters())
elif (i != split_eid):
params += list(block.mobile_inverted_conv.candidate_ops[block.mobile_inverted_conv.active_index[0]].parameters())
params += list(self.feature_mix_layer.parameters())
params += list(self.classifier.parameters())
param_grads = [p.grad for p in params if (p.grad != None)]
return param_grads
def set_encoding(self, split_eid, enc):
self.blocks[split_eid].mobile_inverted_conv.enc = enc.cuda() |
class DataLoader():
def __init__(self, json_path):
self.json_path = json_path
with open(self.json_path, 'r') as f:
data = json.load(f)
self.content = self.solveData(data)
def solveData(self, data):
content = []
for key in sorted(data.keys()):
detection_content = {}
detection_content['rois'] = np.array(data[key]['rois'])
detection_content['scores'] = np.array(data[key]['scores'])
detection_content['class_ids'] = np.array(data[key]['class_ids'])
content.append(detection_content)
return content
def cutWithScore(self, th):
for key_record in range(len(self.content)):
temp_ind = np.arange(self.content[key_record]['scores'].shape[0])
ind = temp_ind[(self.content[key_record]['scores'] >= th)]
self.content[key_record]['scores'] = self.content[key_record]['scores'][ind]
self.content[key_record]['rois'] = self.content[key_record]['rois'][ind]
self.content[key_record]['class_ids'] = self.content[key_record]['class_ids'][ind]
def nms(self, nms_threshold):
for key_record in range(len(self.content)):
if (self.content[key_record]['rois'] != np.array([])):
keep = nms(torch.cat((torch.from_numpy(self.content[key_record]['rois']).float(), torch.from_numpy(self.content[key_record]['scores']).unsqueeze(1).float()), 1), nms_threshold)
ind = keep.numpy()
self.content[key_record]['scores'] = self.content[key_record]['scores'][ind]
self.content[key_record]['rois'] = self.content[key_record]['rois'][ind]
self.content[key_record]['class_ids'] = self.content[key_record]['class_ids'][ind] |
def np_to_pytorch_batch(np_batch):
return {k: _elem_or_tuple_to_variable(x) for (k, x) in _filter_batch(np_batch) if (x.dtype != np.dtype('O'))} |
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if ((not part) or (part == '.')):
continue
if (part[:1] in ''):
(yield part.zfill(8))
else:
(yield ('*' + part))
(yield '*final') |
def reflection(a=np.zeros(3), b=((2 * np.pi) * np.ones(3))):
angles = np.zeros(3)
for i in range(3):
angles[i] = np.random.uniform(a[i], b[i], 1)
(cos1, sin1) = (np.cos(angles[0]), np.sin(angles[0]))
(cos2, sin2) = (np.cos(angles[1]), np.sin(angles[1]))
u = np.array([[sin1, (cos1 * sin2), (cos1 * cos2)]])
matrix = (np.identity(3) - (2 * np.dot(u.transpose(), u)))
return matrix |
class listingType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, codeline=None):
if (codeline is None):
self.codeline = []
else:
self.codeline = codeline
def factory(*args_, **kwargs_):
if listingType.subclass:
return listingType.subclass(*args_, **kwargs_)
else:
return listingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_codeline(self):
return self.codeline
def set_codeline(self, codeline):
self.codeline = codeline
def add_codeline(self, value):
self.codeline.append(value)
def insert_codeline(self, index, value):
self.codeline[index] = value
def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='listingType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
for codeline_ in self.codeline:
codeline_.export(outfile, level, namespace_, name_='codeline')
def hasContent_(self):
if (self.codeline is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='listingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('codeline=[\n')
level += 1
for codeline in self.codeline:
showIndent(outfile, level)
outfile.write('model_.codeline(\n')
codeline.exportLiteral(outfile, level, name_='codeline')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'codeline')):
obj_ = codelineType.factory()
obj_.build(child_)
self.codeline.append(obj_) |
class MultiheadAttention(SequenceModule):
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = nn.MultiheadAttention(d_model, n_heads, *args, batch_first=True, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
if (self.causal and (attn_mask is None)):
attn_mask = torch.triu(torch.ones(src.size((- 2)), src.size((- 2)), dtype=torch.bool, device=src.device), diagonal=1)
(y, _) = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return (y, None)
def step(self, x, state):
(y, z) = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False, **kwargs) |
def conv_block(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.MaxPool2d(2)) |
def evalkernels():
with open(os.path.join(CURRENT_DIR, '..', 'awkward-cpp', 'tests-spec', 'kernels.py')) as kernelfile:
exec(kernelfile.read(), globals()) |
def closest_parent_sourcepos(elem):
while (elem.sourcepos is None):
elem = elem.parent
return elem.sourcepos |
class GLUETransformer(BaseTransformer):
mode = 'sequence-classification'
def __init__(self, hparams):
if (type(hparams) == dict):
hparams = Namespace(**hparams)
hparams.glue_output_mode = glue_output_modes[hparams.task]
num_labels = glue_tasks_num_labels[hparams.task]
super().__init__(hparams, num_labels, self.mode)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (self.config.model_type not in ['distilbert', 'bart']):
inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = self(**inputs)
loss = outputs[0]
lr_scheduler = self.trainer.lr_schedulers[0]['scheduler']
tensorboard_logs = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[(- 1)]}
return {'loss': loss, 'log': tensorboard_logs}
def prepare_data(self):
args = self.hparams
processor = processors[args.task]()
self.labels = processor.get_labels()
for mode in ['train', 'dev']:
cached_features_file = self._feature_file(mode)
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = (processor.get_dev_examples(args.data_dir) if (mode == 'dev') else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
def get_dataloader(self, mode: str, batch_size: int, shuffle: bool=False) -> DataLoader:
mode = ('dev' if (mode == 'test') else mode)
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (self.hparams.glue_output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (self.hparams.glue_output_mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=shuffle)
def validation_step(self, batch, batch_idx):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (self.config.model_type not in ['distilbert', 'bart']):
inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = self(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
return {'val_loss': tmp_eval_loss.detach().cpu(), 'pred': preds, 'target': out_label_ids}
def _eval_end(self, outputs) -> tuple:
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
preds = np.concatenate([x['pred'] for x in outputs], axis=0)
if (self.hparams.glue_output_mode == 'classification'):
preds = np.argmax(preds, axis=1)
elif (self.hparams.glue_output_mode == 'regression'):
preds = np.squeeze(preds)
out_label_ids = np.concatenate([x['target'] for x in outputs], axis=0)
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
results = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)}
ret = dict(results.items())
ret['log'] = results
return (ret, preds_list, out_label_list)
def validation_epoch_end(self, outputs: list) -> dict:
(ret, preds, targets) = self._eval_end(outputs)
logs = ret['log']
return {'val_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def test_epoch_end(self, outputs) -> dict:
(ret, predictions, targets) = self._eval_end(outputs)
logs = ret['log']
return {'avg_test_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--task', default='', type=str, required=True, help='The GLUE task to run')
parser.add_argument('--gpus', default=0, type=int, help='The number of GPUs allocated for this, it is by default 0 meaning none')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
return parser |
class PretrainNpcPipe(SequentialDataPipe):
def __init__(self, output_keys: dict=None, feat_type: str='fbank', feat_dim: int=80, frame_length: int=25, frame_shift: int=10, decode_wav: bool=False, cmvn: bool=True, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6):
output_keys = (output_keys or dict(x='source_feat', label='target_feat', label_mask='label_mask', unique_name='id'))
super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractNpcFeat(feat_type=feat_type, feat_dim=feat_dim, frame_length=frame_length, frame_shift=frame_shift, decode_wav=decode_wav, cmvn=cmvn, feat_name='source_feat'), LabelMaskFromLen(target_feat_name='target_feat', label_mask_name='label_mask'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), SetOutputKeys(output_keys=output_keys)) |
class SplitBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, parts, device):
self.input_one = torch.rand(M, N, device=device)
self.split_size = int(((M * N) / parts))
self.set_module_name('split')
def forward(self):
return torch.split(self.input_one, self.split_size) |
def munge(src_dir):
files = os.listdir(src_dir)
for fn in files:
(base, ext) = os.path.splitext(fn)
first = base[:14]
second = base[:22]
dst_dir = os.path.join('MCG', 'mat', first, second)
if (not os.path.exists(dst_dir)):
os.makedirs(dst_dir)
src = os.path.join(src_dir, fn)
dst = os.path.join(dst_dir, fn)
print('MV: {} -> {}'.format(src, dst))
os.rename(src, dst) |
def convert_weights_and_push(save_directory: Path, model_name: str=None, push_to_hub: bool=True):
filename = 'imagenet-1k-id2label.json'
num_labels = 1000
expected_shape = (1, num_labels)
repo_id = 'huggingface/label-files'
num_labels = num_labels
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type='dataset')), 'r'))
id2label = {int(k): v for (k, v) in id2label.items()}
id2label = id2label
label2id = {v: k for (k, v) in id2label.items()}
ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
names_to_config = {'regnet-x-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x'), 'regnet-x-004': ImageNetPreTrainedConfig(depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x'), 'regnet-x-006': ImageNetPreTrainedConfig(depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x'), 'regnet-x-008': ImageNetPreTrainedConfig(depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x'), 'regnet-x-016': ImageNetPreTrainedConfig(depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x'), 'regnet-x-032': ImageNetPreTrainedConfig(depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x'), 'regnet-x-040': ImageNetPreTrainedConfig(depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x'), 'regnet-x-064': ImageNetPreTrainedConfig(depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x'), 'regnet-x-080': ImageNetPreTrainedConfig(depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x'), 'regnet-x-120': ImageNetPreTrainedConfig(depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x'), 'regnet-x-160': ImageNetPreTrainedConfig(depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x'), 'regnet-x-320': ImageNetPreTrainedConfig(depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x'), 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8), 'regnet-y-004': ImageNetPreTrainedConfig(depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8), 'regnet-y-006': ImageNetPreTrainedConfig(depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16), 'regnet-y-008': ImageNetPreTrainedConfig(depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16), 'regnet-y-016': ImageNetPreTrainedConfig(depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24), 'regnet-y-032': ImageNetPreTrainedConfig(depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24), 'regnet-y-040': ImageNetPreTrainedConfig(depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64), 'regnet-y-064': ImageNetPreTrainedConfig(depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72), 'regnet-y-080': ImageNetPreTrainedConfig(depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56), 'regnet-y-120': ImageNetPreTrainedConfig(depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112), 'regnet-y-160': ImageNetPreTrainedConfig(depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112), 'regnet-y-320': ImageNetPreTrainedConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232), 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328), 'regnet-y-1280-seer': RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264), 'regnet-y-2560-seer': RegNetConfig(depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640), 'regnet-y-10b-seer': ImageNetPreTrainedConfig(depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010), 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010)}
names_to_ours_model_map = NameToOurModelFuncMap()
names_to_from_model_map = NameToFromModelFuncMap()
def load_using_classy_vision(checkpoint_url: str, model_func: Callable[([], nn.Module)]) -> Tuple[(nn.Module, Dict)]:
files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location='cpu')
model = model_func()
model_state_dict = files['classy_state_dict']['base_model']['model']
state_dict = model_state_dict['trunk']
model.load_state_dict(state_dict)
return (model.eval(), model_state_dict['heads'])
names_to_from_model_map['regnet-y-320-seer'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY32gf())))
names_to_from_model_map['regnet-y-640-seer'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY64gf())))
names_to_from_model_map['regnet-y-1280-seer'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY128gf())))
names_to_from_model_map['regnet-y-10b-seer'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)))))
names_to_from_model_map['regnet-y-320-seer-in1k'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY32gf())))
names_to_from_model_map['regnet-y-640-seer-in1k'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY64gf())))
names_to_from_model_map['regnet-y-1280-seer-in1k'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNetY128gf())))
names_to_from_model_map['regnet-y-10b-seer-in1k'] = partial(load_using_classy_vision, ' (lambda : FakeRegNetVisslWrapper(RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)))))
if model_name:
convert_weight_and_push(model_name, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], save_directory, push_to_hub)
else:
for (model_name, config) in names_to_config.items():
convert_weight_and_push(model_name, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], config, save_directory, push_to_hub)
return (config, expected_shape) |
class RetrievalModel(BaseModel):
def __init__(self, output_channels, freeze=False, freezeBN=False):
super(RetrievalModel, self).__init__()
self.output_channels = output_channels
self.backbone = resnet50(output_channels)
if freeze:
self.freeze()
if freezeBN:
self.freezeBN()
def forward(self, x):
z = self.backbone(x)
z = z.view(z.shape[0], (- 1))
z = F.normalize(z, dim=1)
return z
def param_groups(self, lr=None, lr_fc_mul=1):
params = list(filter((lambda x: (('fc' not in x[0]) and x[1].requires_grad)), self.named_parameters()))
params = [x[1] for x in params]
fc_params = self.backbone.fc.parameters()
if len(params):
if (lr is not None):
return [{'params': params, 'lr': lr}, {'params': fc_params, 'lr': (lr * lr_fc_mul)}]
else:
return [{'params': params}, {'params': fc_params}]
else:
return [] |
class precision_recall(object):
def __init__(self, inception_model, device):
self.inception_model = inception_model
self.device = device
self.disable_tqdm = (device != 0)
def generate_images(self, gen, dis, truncated_factor, prior, latent_op, latent_op_step, latent_op_alpha, latent_op_beta, batch_size):
if (isinstance(gen, DataParallel) or isinstance(gen, DistributedDataParallel)):
z_dim = gen.module.z_dim
num_classes = gen.module.num_classes
conditional_strategy = dis.module.conditional_strategy
else:
z_dim = gen.z_dim
num_classes = gen.num_classes
conditional_strategy = dis.conditional_strategy
(zs, fake_labels) = sample_latents(prior, batch_size, z_dim, truncated_factor, num_classes, None, self.device)
if latent_op:
zs = latent_optimise(zs, fake_labels, gen, dis, conditional_strategy, latent_op_step, 1.0, latent_op_alpha, latent_op_beta, False, self.device)
with torch.no_grad():
batch_images = gen(zs, fake_labels, evaluation=True)
return batch_images
def inception_softmax(self, batch_images):
with torch.no_grad():
(embeddings, logits) = self.inception_model(batch_images)
return embeddings
def cluster_into_bins(self, real_embeds, fake_embeds, num_clusters):
representations = np.vstack([real_embeds, fake_embeds])
kmeans = MiniBatchKMeans(n_clusters=num_clusters, n_init=10)
labels = kmeans.fit(representations).labels_
real_labels = labels[:len(real_embeds)]
fake_labels = labels[len(real_embeds):]
real_density = np.histogram(real_labels, bins=num_clusters, range=[0, num_clusters], density=True)[0]
fake_density = np.histogram(fake_labels, bins=num_clusters, range=[0, num_clusters], density=True)[0]
return (real_density, fake_density)
def compute_PRD(self, real_density, fake_density, num_angles=1001, epsilon=1e-10):
angles = np.linspace(epsilon, ((np.pi / 2) - epsilon), num=num_angles)
slopes = np.tan(angles)
slopes_2d = np.expand_dims(slopes, 1)
real_density_2d = np.expand_dims(real_density, 0)
fake_density_2d = np.expand_dims(fake_density, 0)
precision = np.minimum((real_density_2d * slopes_2d), fake_density_2d).sum(axis=1)
recall = (precision / slopes)
max_val = max(np.max(precision), np.max(recall))
if (max_val > 1.001):
raise ValueError('Detected value > 1.001, this should not happen.')
precision = np.clip(precision, 0, 1)
recall = np.clip(recall, 0, 1)
return (precision, recall)
def compute_precision_recall(self, dataloader, gen, dis, num_generate, num_runs, num_clusters, truncated_factor, prior, latent_op, latent_op_step, latent_op_alpha, latent_op_beta, batch_size, device, num_angles=1001):
dataset_iter = iter(dataloader)
n_batches = int(math.ceil((float(num_generate) / float(batch_size))))
for i in tqdm(range(n_batches), disable=self.disable_tqdm):
(real_images, real_labels) = next(dataset_iter)
(real_images, real_labels) = (real_images.to(self.device), real_labels.to(self.device))
fake_images = self.generate_images(gen, dis, truncated_factor, prior, latent_op, latent_op_step, latent_op_alpha, latent_op_beta, batch_size)
real_embed = self.inception_softmax(real_images).detach().cpu().numpy()
fake_embed = self.inception_softmax(fake_images).detach().cpu().numpy()
if (i == 0):
real_embeds = np.array(real_embed, dtype=np.float64)
fake_embeds = np.array(fake_embed, dtype=np.float64)
else:
real_embeds = np.concatenate([real_embeds, np.array(real_embed, dtype=np.float64)], axis=0)
fake_embeds = np.concatenate([fake_embeds, np.array(fake_embed, dtype=np.float64)], axis=0)
real_embeds = real_embeds[:num_generate]
fake_embeds = fake_embeds[:num_generate]
precisions = []
recalls = []
for _ in range(num_runs):
(real_density, fake_density) = self.cluster_into_bins(real_embeds, fake_embeds, num_clusters)
(precision, recall) = self.compute_PRD(real_density, fake_density, num_angles=num_angles)
precisions.append(precision)
recalls.append(recall)
mean_precision = np.mean(precisions, axis=0)
mean_recall = np.mean(recalls, axis=0)
return (mean_precision, mean_recall)
def compute_f_beta(self, precision, recall, beta=1, epsilon=1e-10):
return (((1 + (beta ** 2)) * (precision * recall)) / ((((beta ** 2) * precision) + recall) + epsilon)) |
class MaxPool2dSamePad(nn.MaxPool2d):
PAD_VALUE: float = (- float('inf'))
def __init__(self, kernel_size: int, stride=1, padding=0, dilation=1, ceil_mode=False, count_include_pad=True):
assert (padding == 0), 'Padding in MaxPool2d Same Padding should be zero'
kernel_size = (kernel_size, kernel_size)
stride = (stride, stride)
padding = (padding, padding)
dilation = (dilation, dilation)
super(MaxPool2dSamePad, self).__init__(kernel_size, stride, padding, dilation, ceil_mode, count_include_pad)
def forward(self, x):
(h, w) = x.size()[(- 2):]
pad_h = (((((math.ceil((h / self.stride[0])) - 1) * self.stride[0]) + ((self.kernel_size[0] - 1) * self.dilation[0])) + 1) - h)
pad_w = (((((math.ceil((w / self.stride[1])) - 1) * self.stride[1]) + ((self.kernel_size[1] - 1) * self.dilation[1])) + 1) - w)
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))], value=self.PAD_VALUE)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode)
return x |
.parametrize('coupling', ['additive', 'affine'])
def test_normal_vs_invertible_module_wrapper(coupling):
for seed in range(10):
set_seeds(seed)
X = torch.rand(2, 4, 5, 5)
c1 = torch.nn.Conv2d(2, 2, 3, padding=1)
c2 = torch.nn.Conv2d(2, 2, 3, padding=1)
c1_2 = copy.deepcopy(c1)
c2_2 = copy.deepcopy(c2)
assert torch.equal(c1.weight, c1_2.weight)
assert torch.equal(c2.weight, c2_2.weight)
assert torch.equal(c1.bias, c1_2.bias)
assert torch.equal(c2.bias, c2_2.bias)
assert (not torch.equal(c1.weight, c2.weight))
optim1 = torch.optim.SGD(([e for e in c1.parameters()] + [e for e in c2.parameters()]), 0.1)
optim2 = torch.optim.SGD(([e for e in c1_2.parameters()] + [e for e in c2_2.parameters()]), 0.1)
for e in [c1, c2, c1_2, c2_2]:
e.train()
Xin = X.clone().requires_grad_()
coupling_fn = create_coupling(Fm=c1_2, Gm=c2_2, coupling=coupling, implementation_fwd=(- 1), implementation_bwd=(- 1), adapter=AffineAdapterNaive)
fn = InvertibleModuleWrapper(fn=coupling_fn, keep_input=False, keep_input_inverse=False)
Y = fn.forward(Xin)
loss2 = torch.mean(Y)
XX = X.clone().detach().requires_grad_()
(x1, x2) = torch.chunk(XX, 2, dim=1)
if (coupling == 'additive'):
y1 = (x1 + c1.forward(x2))
y2 = (x2 + c2.forward(y1))
elif (coupling == 'affine'):
fmr2 = c1.forward(x2)
fmr1 = torch.exp(fmr2)
y1 = ((x1 * fmr1) + fmr2)
gmr2 = c2.forward(y1)
gmr1 = torch.exp(gmr2)
y2 = ((x2 * gmr1) + gmr2)
else:
raise NotImplementedError()
YY = torch.cat([y1, y2], dim=1)
loss = torch.mean(YY)
grads = torch.autograd.grad(loss, (XX, c1.weight, c2.weight, c1.bias, c2.bias), None, retain_graph=True)
loss.backward()
optim1.step()
assert torch.equal(c1.weight.grad, grads[1])
assert torch.equal(c2.weight.grad, grads[2])
assert torch.equal(c1.bias.grad, grads[3])
assert torch.equal(c2.bias.grad, grads[4])
assert (not torch.equal(c1.weight, c1_2.weight))
assert (not torch.equal(c2.weight, c2_2.weight))
assert (not torch.equal(c1.bias, c1_2.bias))
assert (not torch.equal(c2.bias, c2_2.bias))
loss2.backward()
optim2.step()
assert Xin.is_contiguous()
assert Y.is_contiguous()
assert torch.allclose(c1.weight.detach(), c1_2.weight.detach())
assert torch.allclose(c2.weight.detach(), c2_2.weight.detach())
assert torch.allclose(c1.bias.detach(), c1_2.bias.detach())
assert torch.allclose(c2.bias.detach(), c2_2.bias.detach())
assert torch.allclose(c1.weight.grad.detach(), c1_2.weight.grad.detach())
assert torch.allclose(c2.weight.grad.detach(), c2_2.weight.grad.detach())
assert torch.allclose(c1.bias.grad.detach(), c1_2.bias.grad.detach())
assert torch.allclose(c2.bias.grad.detach(), c2_2.bias.grad.detach()) |
class StableDiffusionOnnxPipeline(metaclass=DummyObject):
_backends = ['transformers', 'onnx']
def __init__(self, *args, **kwargs):
requires_backends(self, ['transformers', 'onnx']) |
def save_checkpoint(epoch):
if (hvd.rank() == 0):
os.remove(args.checkpoint_format.format(epoch=epoch))
filepath = args.checkpoint_format.format(epoch=(epoch + 1))
state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(state, filepath) |
def lnlstm_creator(script=True, decompose_layernorm=False, **kwargs):
assert (script is True)
from .custom_lstms import script_lnlstm
input_size = kwargs['inputSize']
hidden_size = kwargs['hiddenSize']
seq_len = kwargs['seqLength']
batch_size = kwargs['miniBatch']
ge = script_lnlstm(input_size, hidden_size, 1, decompose_layernorm=decompose_layernorm).cuda()
input = torch.randn(seq_len, batch_size, input_size, device='cuda')
states = [(torch.randn(batch_size, hidden_size, device='cuda'), torch.randn(batch_size, hidden_size, device='cuda'))]
return ModelDef(inputs=[input, states], params=ge.parameters(), forward=ge, backward_setup=lstm_backward_setup, backward=simple_backward) |
def load_train_files(root_path, cfg, split):
spk2idx = {}
npys = cfg[split]['wav_files']
labs = cfg[split]['spk_ids']
Y = []
X = []
spk2idx = {}
for (npy, lab) in zip(npys, labs):
npy_name = os.path.join(root_path, npy)
x = np.load(npy_name)
if (lab not in spk2idx):
spk2idx[lab] = len(spk2idx)
X.append(x.T)
Y += ([spk2idx[lab]] * x.T.shape[0])
return (np.concatenate(X, axis=0), np.array(Y), spk2idx) |
class TorchMBRLAlgorithm(MBRLAlgorithm):
def to(self, device):
for net in self.trainer.networks:
net.to(device)
for net in self.model_trainer.networks:
net.to(device)
def training_mode(self, mode):
for net in self.trainer.networks:
net.train(mode)
for net in self.model_trainer.networks:
net.train(mode) |
class E1000NIC(NICSim):
def __init__(self) -> None:
super().__init__()
self.debug = False
def run_cmd(self, env: ExpEnv) -> str:
cmd = self.basic_run_cmd(env, '/e1000_gem5/e1000_gem5')
if self.debug:
cmd = ('env E1000_DEBUG=1 ' + cmd)
return cmd |
def evaluate(model, criterion, corpus, data_source, eval_batch_size):
model.eval()
total_loss = 0.0
total_words = 0.0
total_entropy = 0.0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, (data_source.size(0) - 1), args.bptt):
(data, targets) = get_batch(data_source, i, args.bptt)
(output, hidden) = model(data, hidden, mean_field_inference=True)
output_flat = output.view((- 1), ntokens)
num_words = output_flat.shape[0]
pred_proba = nn.functional.softmax(output_flat, dim=(- 1))
loss = (len(data) * criterion(output_flat, targets).item())
entropy = (- (pred_proba * pred_proba.log()).sum(1).sum(0).item())
total_words += num_words
total_entropy += entropy
total_loss += loss
hidden = repackage_hidden(hidden)
return ((total_loss / (len(data_source) - 1)), (total_entropy / total_words)) |
def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
if ((old_model_patterns.tokenizer_class is None) or (new_model_patterns.tokenizer_class is None)):
return
with open((((TRANSFORMERS_PATH / 'models') / 'auto') / 'tokenization_auto.py'), 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
idx = 0
while (not lines[idx].startswith(' TOKENIZER_MAPPING_NAMES = OrderedDict(')):
idx += 1
idx += 1
while (not lines[idx].startswith('TOKENIZER_MAPPING = _LazyAutoMapping')):
if lines[idx].endswith(','):
block = lines[idx]
else:
block = []
while (not lines[idx].startswith(' ),')):
block.append(lines[idx])
idx += 1
block = '\n'.join(block)
idx += 1
if ((f'"{old_model_patterns.model_type}"' in block) and (old_model_patterns.tokenizer_class in block)):
break
new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
new_lines = ((lines[:idx] + [new_block]) + lines[idx:])
with open((((TRANSFORMERS_PATH / 'models') / 'auto') / 'tokenization_auto.py'), 'w', encoding='utf-8') as f:
f.write('\n'.join(new_lines)) |
def load_model_and_optimizer(model, optimizer, model_state, optimizer_state):
model.load_state_dict(model_state, strict=True)
optimizer.load_state_dict(optimizer_state) |
class AssertNetTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32], [self.val_batch_size, 3, 32, 32]]
def create_feature_network(self, input_shape):
return AssertNet()
def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
set_model(float_model)
float_fx_model = symbolic_trace(float_model)
float_node_list = list(float_fx_model.graph.nodes)
self.unit_test.assertTrue((_assert in [node.target for node in float_node_list]))
for (model_name, quantized_model) in quantized_models.items():
set_model(quantized_model)
quantized_node_list = list(quantized_model.graph.nodes)
self.unit_test.assertFalse((_assert in [node.layer_class for node in quantized_node_list])) |
def _to_space_separated_string(l, base_ring=None):
if base_ring:
return ' '.join((repr(base_ring(x)) for x in l))
return ' '.join((repr(x) for x in l)) |
def dataset_for_class(i):
ds = sample_dataset()
i = tf.cast(i, tf.uint8)
return ds.filter((lambda image, label: (label == i))).repeat() |
def check_float_range(min_val, max_val):
def helper(x):
x = float(x)
if ((x < min_val) or (x > max_val)):
raise argparse.ArgumentTypeError('Value must be between {} and {}.'.format(min_val, max_val))
return x
return helper |
class GCSInterface(ObjectStoreInterface):
def __init__(self, bucket_name: str):
self.bucket_name = bucket_name
self.auth = compute.GCPAuthentication()
self._gcs_client = self.auth.get_storage_client()
self._requests_session = requests.Session()
def provider(self):
return 'gcp'
def path(self):
return f'gs://{self.bucket_name}'
_cache(maxsize=1)
def gcp_region(self):
def map_region_to_zone(region) -> str:
parsed_region = region.lower().split('-')
if (len(parsed_region) == 3):
return region
elif ((len(parsed_region) == 2) or (len(parsed_region) == 1)):
compute = self.auth.get_gcp_client()
zones = compute.zones().list(project=self.auth.project_id).execute()
for zone in zones['items']:
if zone['name'].startswith(region):
return zone['name']
raise ValueError(f'No GCP zone found for region {region}')
bucket = None
default_region = cloud_config.get_flag('gcp_default_region')
try:
bucket = self._gcs_client.lookup_bucket(self.bucket_name)
except Exception as e:
if ('access to the Google Cloud Storage bucket' in str(e)):
logger.warning(f"No access to the Google Cloud Storage bucket '{self.bucket_name}', assuming bucket is in the '{default_region}' zone")
return default_region
raise
if (bucket is None):
raise exceptions.MissingBucketException(f'GCS bucket {self.bucket_name} does not exist')
else:
loc = bucket.location.lower()
if default_region.startswith(loc):
loc = default_region
return map_region_to_zone(loc)
def region_tag(self):
return ('gcp:' + self.gcp_region)
def bucket(self) -> str:
return self.bucket_name
def bucket_exists(self):
iterator = self._gcs_client.list_blobs(self.bucket_name, page_size=1)
try:
next(iterator.pages, None)
return True
except Exception as e:
if ('The specified bucket does not exist' in str(e)):
return False
raise
def exists(self, obj_name):
try:
self.get_obj_metadata(obj_name)
return True
except NoSuchObjectException:
return False
def create_bucket(self, gcp_region, premium_tier=True):
assert premium_tier, 'Standard tier GCS buckets are not supported'
if (not self.bucket_exists()):
bucket = self._gcs_client.bucket(self.bucket_name)
bucket.storage_class = 'STANDARD'
region_without_zone = '-'.join(gcp_region.split('-')[:2])
self._gcs_client.create_bucket(bucket, location=region_without_zone)
def delete_bucket(self):
for batch in batch_generator(self.list_objects(), 1000):
self.delete_objects([obj.key for obj in batch])
assert (len(list(self.list_objects())) == 0), f'Bucket not empty after deleting all keys {list(self.list_objects())}'
self._gcs_client.get_bucket(self.bucket_name).delete()
def list_objects(self, prefix='', region=None) -> Iterator[GCSObject]:
blobs = self._gcs_client.list_blobs(self.bucket_name, prefix=prefix)
for blob in blobs:
(yield GCSObject(blob.name, provider='gcp', bucket=self.bucket_name, size=blob.size, last_modified=blob.updated, mime_type=getattr(blob, 'content_type', None)))
def delete_objects(self, keys: List[str]):
for key in keys:
self._gcs_client.bucket(self.bucket_name).blob(key).delete()
_cache(maxsize=1024)
def get_obj_metadata(self, obj_name):
bucket = self._gcs_client.bucket(self.bucket_name)
blob = bucket.get_blob(obj_name)
if (blob is None):
raise NoSuchObjectException(f'Object {obj_name} does not exist in bucket {self.bucket_name}, or you do not have permission to access it')
return blob
def get_obj_size(self, obj_name):
return self.get_obj_metadata(obj_name).size
def get_obj_last_modified(self, obj_name):
return self.get_obj_metadata(obj_name).updated
def get_obj_mime_type(self, obj_name):
return self.get_obj_metadata(obj_name).content_type
def send_xml_request(self, blob_name: str, params: dict, method: str, headers: Optional[dict]=None, expiration=datetime.timedelta(minutes=15), data=None, content_type='application/octet-stream'):
blob = self._gcs_client.bucket(self.bucket_name).blob(blob_name)
headers = (headers or {})
headers['Content-Type'] = content_type
url = blob.generate_signed_url(version='v4', expiration=expiration, method=method, content_type=content_type, query_parameters=params, headers=headers)
if data:
req = requests.Request(method, url, headers=headers, data=data)
else:
req = requests.Request(method, url, headers=headers)
prepared = req.prepare()
response = self._requests_session.send(prepared)
if (not response.ok):
raise ValueError(f'Invalid status code {response.status_code}: {response.text}')
return response
def download_object(self, src_object_name, dst_file_path, offset_bytes=None, size_bytes=None, write_at_offset=False, generate_md5=False) -> Tuple[(Optional[str], Optional[bytes])]:
(src_object_name, dst_file_path) = (str(src_object_name), str(dst_file_path))
src_object_name = (src_object_name if (src_object_name[0] != '/') else src_object_name)
bucket = self._gcs_client.bucket(self.bucket_name)
blob = bucket.blob(src_object_name)
if (offset_bytes is None):
chunk = blob.download_as_string()
else:
assert ((offset_bytes is not None) and (size_bytes is not None))
chunk = blob.download_as_string(start=offset_bytes, end=((offset_bytes + size_bytes) - 1))
if (not os.path.exists(dst_file_path)):
open(dst_file_path, 'a').close()
if generate_md5:
m = hashlib.md5()
with open(dst_file_path, ('wb+' if write_at_offset else 'wb')) as f:
f.seek((offset_bytes if write_at_offset else 0))
f.write(chunk)
if generate_md5:
m.update(chunk)
md5 = (m.digest() if generate_md5 else None)
mime_type = blob.content_type
return (mime_type, md5)
def upload_object(self, src_file_path, dst_object_name, part_number=None, upload_id=None, check_md5=None, mime_type=None):
(src_file_path, dst_object_name) = (str(src_file_path), str(dst_object_name))
dst_object_name = (dst_object_name if (dst_object_name[0] != '/') else dst_object_name)
os.path.getsize(src_file_path)
bucket = self._gcs_client.bucket(self.bucket_name)
b64_md5sum = (base64.b64encode(check_md5).decode('utf-8') if check_md5 else None)
if (part_number is None):
blob = bucket.blob(dst_object_name)
blob.upload_from_filename(src_file_path, content_type=mime_type)
if check_md5:
blob_md5 = blob.md5_hash
if (b64_md5sum != blob_md5):
raise exceptions.ChecksumMismatchException((f'Checksum mismatch for object {dst_object_name} in bucket {self.bucket_name}, ' + f'expected {b64_md5sum}, got {blob_md5}'))
return
assert (part_number is not None), f'Part number cannot be none for multipart upload: {part_number}, {upload_id}'
assert (upload_id is not None), f'Upload ID cannot be none for multipart upload: {part_number}, {upload_id}'
headers = ({'Content-MD5': b64_md5sum} if check_md5 else None)
try:
response = self.send_xml_request(dst_object_name, {'uploadId': upload_id, 'partNumber': part_number}, 'PUT', headers=headers, data=open(src_file_path, 'rb'))
except Exception as e:
raise ValueError(f'Failed to upload {dst_object_name} to bucket {self.bucket_name} upload id {upload_id}: {e}')
if ('ETag' not in response.headers):
raise exceptions.ChecksumMismatchException(f'Upload of object {dst_object_name} in bucket {self.bucket_name} failed, got status code {response.status_code} w/ response {response.text}')
def initiate_multipart_upload(self, dst_object_name: str, mime_type: Optional[str]=None) -> str:
assert (len(dst_object_name) > 0), f"Destination object name must be non-empty: '{dst_object_name}'"
response = self.send_xml_request(dst_object_name, {'uploads': None}, 'POST', content_type=mime_type)
return ElementTree.fromstring(response.content)[2].text
def complete_multipart_upload(self, dst_object_name, upload_id, metadata: Optional[Any]=None):
xml_data = ElementTree.Element('CompleteMultipartUpload')
next_part_number_marker = None
while True:
if (next_part_number_marker is None):
response = self.send_xml_request(dst_object_name, {'uploadId': upload_id}, 'GET')
else:
response = self.send_xml_request(dst_object_name, {'uploadId': upload_id, 'part-number-marker': next_part_number_marker}, 'GET')
tree = ElementTree.fromstring(response.content)
ns = {'ns': tree.tag.split('}')[0][1:]}
for part in tree.findall('ns:Part', ns):
part_xml = ElementTree.Element('Part')
etag_match = part.find('ns:ETag', ns)
assert (etag_match is not None)
etag = etag_match.text
part_num_match = part.find('ns:PartNumber', ns)
assert (part_num_match is not None)
part_num = part_num_match.text
ElementTree.SubElement(part_xml, 'PartNumber').text = part_num
ElementTree.SubElement(part_xml, 'ETag').text = etag
xml_data.append(part_xml)
is_truncated = tree.findall('ns:IsTruncated', ns)[0].text
if (is_truncated == 'false'):
break
else:
next_part_number_marker = tree.findall('ns:NextPartNumberMarker', ns)[0].text
xml_data = ElementTree.tostring(xml_data, encoding='utf-8', method='xml')
xml_data = xml_data.replace(b'ns0:', b'')
try:
response = self.send_xml_request(dst_object_name, {'uploadId': upload_id}, 'POST', data=xml_data, content_type='application/xml')
except Exception as e:
response = self.send_xml_request(dst_object_name, {'uploadId': upload_id}, 'DELETE')
raise exceptions.SkyplaneException('Failed to complete multipart upload') from e |
def update_indiv_generation_losses(losses, nums, micro, macro, bs, length, loss):
nums[micro] += (bs * length)
batch_loss = (loss * bs)
losses[micro][(- 1)] += batch_loss
losses[micro][(- 1)] /= nums[micro]
losses[macro][(- 1)] += (batch_loss / length)
losses[macro][(- 1)] /= nums[macro] |
(config_path='config', config_name='main', version_base=None)
def main(cfg):
local_rank = int(os.environ.get('LOCAL_RANK', (- 1)))
(cfg_dict, tags) = prepare_logging(cfg)
training_args = run_clm.TrainingArguments(**cfg_dict['training_args'], local_rank=local_rank)
model_args = run_clm.ModelArguments(**cfg.model_args)
data_args = run_clm.DataTrainingArguments(**cfg.data_args)
wandb_initilized = False
if ((training_args.local_rank <= 0) and ('wandb' in training_args.report_to)):
wandb.init(project=cfg.project, name=training_args.run_name, tags=tags, settings=wandb.Settings(code_dir='.'), config=cfg_dict)
wandb_initilized = True
model = transformers.AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
if cfg.get_baseline:
return get_baseline(training_args, model_args, data_args, model)
codebook_config = models.CodebookModelConfig(**cfg_dict['codebook_args'])
model = models.wrap_codebook(model_or_path=model, config=codebook_config, pretrained_path=cfg.pretrained_path)
if cfg.enable_logging:
model.enable_logging()
optimizer = get_optimizer(training_args, model)
callbacks = ([cb_trainer.WandbCallback()] if wandb_initilized else [])
if (cfg.k_scheduler_kwargs is not None):
k_scheduler = cb_trainer.MulticodeKScheduler(k_min=cfg.codebook_args.k_codebook, **cfg.k_scheduler_kwargs)
callbacks.append(k_scheduler)
(trainer, lm_datasets, _, last_checkpoint) = run_clm.get_trainer_and_dataset(model_args, data_args, training_args, model, optimizers=(optimizer, None), callbacks=callbacks)
if (codebook_config.kmeans_init and (training_args.local_rank <= 0)):
model.init_codebook(trainer.get_train_dataloader())
model.enable_codebooks()
if ((os.name != 'nt') and (sys.version_info < (3, 11))):
model = torch.compile(model)
metrics = run_clm.run_trainer(model_args, data_args, training_args, trainer, lm_datasets, last_checkpoint)
return metrics |
def test_method_get_accessible_object(default_test_case, method_mock, variable_reference_mock):
meth = stmt.MethodStatement(default_test_case, method_mock, variable_reference_mock)
assert (meth.accessible_object() == method_mock) |
def get_schemas_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
db_names = [db['db_id'] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db['db_id']
schema = {}
column_names_original = db['column_names_original']
table_names_original = db['table_names_original']
tables[db_id] = {'column_names_original': column_names_original, 'table_names_original': table_names_original}
for (i, tabn) in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for (td, col) in column_names_original if (td == i)]
schema[table] = cols
schemas[db_id] = schema
return (schemas, db_names, tables) |
_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=(- 1), kind='introselect', order=None):
if (axis is None):
a = asanyarray(a).flatten()
axis = (- 1)
else:
a = asanyarray(a).copy(order='K')
a.partition(kth, axis=axis, kind=kind, order=order)
return a |
class ModelInfo():
def __init__(self, modelId: str, key: str, author: Optional[str]=None, downloads: Optional[int]=None, tags: List[str]=[], pipeline_tag: Optional[str]=None, siblings: Optional[List[Dict]]=None, **kwargs):
self.modelId = modelId
self.key = key
self.author = author
self.downloads = downloads
self.tags = tags
self.pipeline_tag = pipeline_tag
self.siblings = ([S3Object(**x) for x in siblings] if (siblings is not None) else None)
for (k, v) in kwargs.items():
setattr(self, k, v) |
class MatrixMorphism_abstract(sage.categories.morphism.Morphism):
def __init__(self, parent, side='left'):
if (not sage.categories.homset.is_Homset(parent)):
raise TypeError('parent must be a Hom space')
if (side not in ['left', 'right']):
raise ValueError("the argument side must be either 'left' or 'right'")
self._side = side
sage.categories.morphism.Morphism.__init__(self, parent)
def _richcmp_(self, other, op):
if ((not isinstance(other, MatrixMorphism)) or (op not in (op_EQ, op_NE))):
return sage.categories.morphism.Morphism._richcmp_(self, other, op)
return richcmp(self.matrix(), other.matrix(), op)
def _call_(self, x):
try:
if (parent(x) is not self.domain()):
x = self.domain()(x)
except TypeError:
raise TypeError(('%s must be coercible into %s' % (x, self.domain())))
if self.domain().is_ambient():
x = x.element()
else:
x = self.domain().coordinate_vector(x)
C = self.codomain()
if (self.side() == 'left'):
v = (x.change_ring(C.base_ring()) * self.matrix())
else:
v = (self.matrix() * x.change_ring(C.base_ring()))
if (not C.is_ambient()):
v = C.linear_combination_of_basis(v)
return C._element_constructor_(v)
def _call_with_args(self, x, args=(), kwds={}):
if self.domain().is_ambient():
x = x.element()
else:
x = self.domain().coordinate_vector(x)
C = self.codomain()
v = (x.change_ring(C.base_ring()) * self.matrix())
if (not C.is_ambient()):
v = C.linear_combination_of_basis(v)
return C._element_constructor_(v, *args, **kwds)
def __invert__(self):
try:
B = (~ self.matrix())
except ZeroDivisionError:
raise ZeroDivisionError('matrix morphism not invertible')
try:
return self.parent().reversed()(B, side=self.side())
except TypeError:
raise ZeroDivisionError('matrix morphism not invertible')
def side(self):
return self._side
def side_switch(self):
side = ('left' if (self.side() == 'right') else 'right')
return self.parent()(self.matrix().transpose(), side=side)
def inverse(self):
return (~ self)
def __rmul__(self, left):
R = self.base_ring()
return self.parent()((R(left) * self.matrix()), side=self.side())
def __mul__(self, right):
if (not isinstance(right, MatrixMorphism)):
if isinstance(right, (sage.categories.morphism.Morphism, sage.categories.map.Map)):
return sage.categories.map.Map.__mul__(self, right)
R = self.base_ring()
return self.parent()((self.matrix() * R(right)))
H = right.domain().Hom(self.codomain())
if (self.domain() != right.codomain()):
raise TypeError('Incompatible composition of morphisms: domain of left morphism must be codomain of right.')
if (self.side() == 'left'):
if (right.side() == 'left'):
return H((right.matrix() * self.matrix()), side=self.side())
else:
return H((right.matrix().transpose() * self.matrix()), side=self.side())
elif (right.side() == 'right'):
return H((self.matrix() * right.matrix()), side=self.side())
else:
return H((right.matrix() * self.matrix().transpose()), side='left')
def __add__(self, right):
if (not isinstance(right, MatrixMorphism)):
R = self.base_ring()
return self.parent()((self.matrix() + R(right)))
if (not (right.parent() == self.parent())):
right = self.parent()(right, side=right.side())
if (self.side() == 'left'):
if (right.side() == 'left'):
return self.parent()((self.matrix() + right.matrix()), side=self.side())
elif (right.side() == 'right'):
return self.parent()((self.matrix() + right.matrix().transpose()), side='left')
if (self.side() == 'right'):
if (right.side() == 'right'):
return self.parent()((self.matrix() + right.matrix()), side=self.side())
elif (right.side() == 'left'):
return self.parent()((self.matrix().transpose() + right.matrix()), side='left')
def __neg__(self):
return self.parent()((- self.matrix()), side=self.side())
def __sub__(self, other):
if (not isinstance(other, MatrixMorphism)):
R = self.base_ring()
return self.parent()((self.matrix() - R(other)), side=self.side())
if (not (other.parent() == self.parent())):
other = self.parent()(other, side=other.side())
if (self.side() == 'left'):
if (other.side() == 'left'):
return self.parent()((self.matrix() - other.matrix()), side=self.side())
elif (other.side() == 'right'):
return self.parent()((self.matrix() - other.matrix().transpose()), side='left')
if (self.side() == 'right'):
if (other.side() == 'right'):
return self.parent()((self.matrix() - other.matrix()), side=self.side())
elif (other.side() == 'left'):
return self.parent()((self.matrix().transpose() - other.matrix()), side='left')
def base_ring(self):
return self.domain().base_ring()
def characteristic_polynomial(self, var='x'):
if (not self.is_endomorphism()):
raise ArithmeticError('charpoly only defined for endomorphisms (i.e., domain = range)')
return self.matrix().charpoly(var)
charpoly = characteristic_polynomial
def decomposition(self, *args, **kwds):
if (not self.is_endomorphism()):
raise ArithmeticError('Matrix morphism must be an endomorphism.')
D = self.domain()
if (self.side() == 'left'):
E = self.matrix().decomposition(*args, **kwds)
else:
E = self.matrix().transpose().decomposition(*args, **kwds)
if D.is_ambient():
return Sequence([D.submodule(V, check=False) for (V, _) in E], cr=True, check=False)
else:
B = D.basis_matrix()
R = D.base_ring()
return Sequence([D.submodule((V.basis_matrix() * B).row_module(R), check=False) for (V, _) in E], cr=True, check=False)
def trace(self):
return self._matrix.trace()
def det(self):
if (not self.is_endomorphism()):
raise ArithmeticError('Matrix morphism must be an endomorphism.')
return self.matrix().determinant()
def fcp(self, var='x'):
return self.charpoly(var).factor()
def kernel(self):
if (self.side() == 'left'):
V = self.matrix().left_kernel()
else:
V = self.matrix().right_kernel()
D = self.domain()
if (not D.is_ambient()):
B = (V.basis_matrix() * D.basis_matrix())
V = B.row_module(D.base_ring())
return self.domain().submodule(V, check=False)
def image(self):
if (self.side() == 'left'):
V = self.matrix().row_space()
else:
V = self.matrix().column_space()
C = self.codomain()
if (not C.is_ambient()):
B = (V.basis_matrix() * C.basis_matrix())
V = B.row_module(self.domain().base_ring())
return self.codomain().submodule(V, check=False)
def matrix(self):
raise NotImplementedError('this method must be overridden in the extension class')
def _matrix_(self):
return self.matrix()
def rank(self):
return self.matrix().rank()
def nullity(self):
if (self.side() == 'left'):
return self._matrix.left_nullity()
else:
return self._matrix.right_nullity()
def is_bijective(self):
return (self.is_injective() and self.is_surjective())
def is_identity(self):
if (self.domain() != self.codomain()):
return False
return all(((self(u) == u) for u in self.domain().basis()))
def is_zero(self):
return self._matrix.is_zero()
def is_equal_function(self, other):
if (not is_MatrixMorphism(other)):
msg = 'can only compare to a matrix morphism, not {0}'
raise TypeError(msg.format(other))
if (self.domain() != other.domain()):
return False
if (self.codomain() != other.codomain()):
return False
return all(((self(u) == other(u)) for u in self.domain().basis()))
def restrict_domain(self, sub):
D = self.domain()
if hasattr(D, 'coordinate_module'):
V = D.coordinate_module(sub)
else:
V = sub.free_module()
if (self.side() == 'right'):
A = self.matrix().transpose().restrict_domain(V).transpose()
else:
A = self.matrix().restrict_domain(V)
H = sub.Hom(self.codomain())
try:
return H(A, side=self.side())
except Exception:
return H(A)
def restrict_codomain(self, sub):
H = self.domain().Hom(sub)
C = self.codomain()
if hasattr(C, 'coordinate_module'):
V = C.coordinate_module(sub)
else:
V = sub.free_module()
try:
if (self.side() == 'right'):
return H(self.matrix().transpose().restrict_codomain(V).transpose(), side='right')
else:
return H(self.matrix().restrict_codomain(V))
except Exception:
return H(self.matrix().restrict_codomain(V))
def restrict(self, sub):
if (not self.is_endomorphism()):
raise ArithmeticError('matrix morphism must be an endomorphism')
D = self.domain()
C = self.codomain()
if ((D is not C) and (D.basis() != C.basis())):
return self.restrict_domain(sub).restrict_codomain(sub)
if hasattr(D, 'coordinate_module'):
V = D.coordinate_module(sub)
else:
V = sub.free_module()
if (self.side() == 'right'):
A = self.matrix().transpose().restrict(V).transpose()
else:
A = self.matrix().restrict(V)
H = sage.categories.homset.End(sub, self.domain().category())
return H(A, side=self.side()) |
class MLP_4HL(nn.Module):
def __init__(self, dim_in, dim_hidden1, dim_hidden2, sparse=False, bn=True):
super(MLP_3HL, self).__init__()
self.in_layer = (SpLinear(dim_in, dim_hidden1) if sparse else nn.Linear(dim_in, dim_hidden1))
self.dropout_layer = nn.Dropout(0.0)
self.lrelu = nn.LeakyReLU(0.1)
self.relu = nn.ReLU()
self.hidden_layer = nn.Linear(dim_hidden2, dim_hidden1)
self.out_layer = nn.Linear(dim_hidden1, 1)
self.bn = nn.BatchNorm1d(dim_hidden1)
self.bn2 = nn.BatchNorm1d(dim_in)
def forward(self, x, lower_f):
if (lower_f is not None):
x = torch.cat([x, lower_f], dim=1)
x = self.bn2(x)
out = self.lrelu(self.in_layer(x))
out = self.bn(out)
out = self.lrelu(self.hidden_layer(out))
out = self.bn(out)
out = self.lrelu(self.hidden_layer(out))
out = self.bn(out)
out = self.hidden_layer(out)
return (out, self.out_layer(self.relu(out)).squeeze())
def get_model(cls, stage, opt):
if (stage == 0):
dim_in = opt.feat_d
else:
dim_in = (opt.feat_d + opt.hidden_d)
model = MLP_3HL(dim_in, opt.hidden_d, opt.hidden_d, opt.sparse)
return model |
(scope='session')
def random_data():
batch_size = 4
x = np.random.random((batch_size, 28, 28, 3))
y = tf.keras.utils.to_categorical(np.random.randint(2, size=batch_size), num_classes=2).astype('uint8')
return (x, y) |
class GNConv(nn.Module):
def __init__(self, edge_model_block, node_model_block, global_model_block, use_edge_block=True, use_node_block=True, use_global_block=True, update_graph=False):
super(GNConv, self).__init__()
self.edge_model_block = edge_model_block
self.node_model_block = node_model_block
self.global_model_block = global_model_block
self._use_edge_block = use_edge_block
self._use_node_block = use_node_block
self._use_global_block = use_global_block
self._update_graph = update_graph
def reset_parameters(self):
for m in self.edge_model_block.modules():
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
for m in self.node_model_block.modules():
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
for m in self.global_model_block.modules():
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
def forward(self, graph):
if self._use_edge_block:
graph = self.edge_model_block(graph)
if self._use_node_block:
graph = self.node_model_block(graph)
if self._use_global_block:
graph = self.global_model_block(graph)
return graph |
def main(_):
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(), FLAGS.checkpoint_path)
g.finalize()
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(','):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info('Running caption generation on %d files matching %s', len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
restore_fn(sess)
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, 'r') as f:
image = f.read()
captions = generator.beam_search(sess, image)
print(('Captions for image %s:' % os.path.basename(filename)))
for (i, caption) in enumerate(captions):
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:(- 1)]]
sentence = ' '.join(sentence)
print((' %d) %s (p=%f)' % (i, sentence, math.exp(caption.logprob)))) |
def assign_hgraph_singletons(hgraph, singletons, singleton_type='grey_out'):
if (singleton_type == 'grey_out'):
for node in hgraph['nodes']:
if (node['id'].replace('|', '') in singletons):
node['if_singleton'] = True
else:
node['if_singleton'] = False
else:
nodes_new = [node for node in hgraph['nodes'] if (node['id'].replace('|', '') not in singletons)]
hgraph['nodes'] = nodes_new
links_new = [link for link in hgraph['links'] if ((link['source'].replace('|', '') not in singletons) and (link['target'].replace('|', '') not in singletons))]
hgraph['links'] = links_new |
def get_balanced_output_list_for_evidence_context_data(output_list: list[ProcessedData]):
label_split = {'supported': [], 'partially_supported': [], 'not_supported': []}
for d in output_list:
label_split[d['label']].append(d)
new_output_list: list[dict] = []
for label in ['supported', 'partially_supported']:
new_output_list.extend(label_split[label])
random.shuffle(label_split['not_supported'])
new_output_list.extend(label_split['not_supported'][:len(label_split['partially_supported'])])
return new_output_list |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.tanh(self.bn1(self.conv1(x)))
out = F.tanh(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.tanh(out)
return out |
def module_init():
root_module = Module('ns.mobility', cpp_namespace='::ns3')
return root_module |
class Date():
def __init__(self, year=None, month=None, day=None):
self.year = year
self.month = month
self.day = day |
def unsupervised_training_one_epoch(adata: AnnData, run_setup_anndata: bool=True, batch_key: Optional[str]=None, labels_key: Optional[str]=None):
if run_setup_anndata:
SCVI.setup_anndata(adata, batch_key=batch_key, labels_key=labels_key)
m = SCVI(adata)
m.train(1, train_size=0.4) |
def _reshape_for_microbatch(Batch: Axis, Microbatch: Axis, AccumStep: Axis, inputs, axis_mapping):
def _reshape(x):
if isinstance(x, hax.NamedArray):
if (not x.has_axis(Batch.name)):
return x
x = x.unflatten_axis(Batch, (AccumStep, Microbatch))
return hax.shard_with_axis_mapping(x, axis_mapping)
elif isinstance(x, jnp.ndarray):
x = x.reshape(((AccumStep.size, Microbatch.size) + x.shape[1:]))
return with_sharding_constraint(x, PartitionSpec(None, ResourceAxis.DATA, *((None,) * (len(x.shape) - 2))))
else:
assert jnp.isscalar(x)
return x
return jax.tree_util.tree_map(_reshape, inputs, is_leaf=is_named_array) |
def test(sim_time, qc_atten):
network_config = 'star_network.json'
network_topo = RouterNetTopo(network_config)
set_parameters(network_topo, sim_time, qc_atten)
quantum_router_nodes = network_topo.get_nodes_by_type(RouterNetTopo.QUANTUM_ROUTER)
node_names = [node.name for node in quantum_router_nodes]
apps = []
for (i, (name, node)) in enumerate(zip(node_names, quantum_router_nodes)):
other_nodes = node_names[:]
other_nodes.remove(name)
app = RandomRequestApp(node, other_nodes, i, min_dur=.0, max_dur=.0, min_size=10, max_size=25, min_fidelity=0.8, max_fidelity=1.0)
apps.append(app)
app.start()
tl = network_topo.get_timeline()
tl.show_progress = True
tl.init()
tick = time.time()
tl.run()
print(('execution time %.2f sec' % (time.time() - tick)))
for app in apps:
print(('node ' + app.node.name))
print('\tnumber of wait times: ', len(app.get_wait_time()))
print('\twait times:', app.get_wait_time())
print('\treservations: ', app.reserves)
print('\tthroughput: ', app.get_throughput())
print('\nReservations Table:\n')
node_names = []
start_times = []
end_times = []
memory_sizes = []
for node in network_topo.get_nodes_by_type(RouterNetTopo.QUANTUM_ROUTER):
node_name = node.name
for reservation in node.network_manager.protocol_stack[1].accepted_reservation:
(s_t, e_t, size) = (reservation.start_time, reservation.end_time, reservation.memory_size)
if ((reservation.initiator != node.name) and (reservation.responder != node.name)):
size *= 2
node_names.append(node_name)
start_times.append(s_t)
end_times.append(e_t)
memory_sizes.append(size)
log = {'Node': node_names, 'Start_time': start_times, 'End_time': end_times, 'Memory_size': memory_sizes}
df = pd.DataFrame(log)
print(df) |
def make_rectangle(img_size=(64, 64), num_points_per_cluster=8, cluster_radius=1):
is_rectangle = False
while (not is_rectangle):
point_1_x = random.randint((0 + cluster_radius), (img_size[0] - cluster_radius))
point_1_y = random.randint((0 + cluster_radius), (img_size[1] - cluster_radius))
point_2_x = random.randint((0 + cluster_radius), (img_size[0] - cluster_radius))
point_2_y = random.randint((0 + cluster_radius), (img_size[1] - cluster_radius))
(point_3_x, point_3_y, point_4_x, point_4_y) = get_point_rectangle(point_1_x, point_1_y, point_2_x, point_2_y)
if (((point_3_x + cluster_radius) > img_size[0]) or ((point_3_y + cluster_radius) > img_size[1]) or ((point_3_x - cluster_radius) < 0) or ((point_3_y - cluster_radius) < 0)):
continue
if (((point_4_x + cluster_radius) > img_size[0]) or ((point_4_y + cluster_radius) > img_size[1]) or ((point_4_x - cluster_radius) < 0) or ((point_4_y - cluster_radius) < 0)):
continue
points = []
points = get_cluster_points(num_points_per_cluster, point_1_x, point_1_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_2_x, point_2_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_3_x, point_3_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_4_x, point_4_y, points, cluster_radius)
image = np.zeros((img_size[0], img_size[1], 1))
for p in points:
image = cv2.circle(image, p, radius=2, color=255, thickness=(- 1))
is_rectangle = True
return image |
def build_roi_box_head(cfg, in_channels):
if cfg.MODEL.ROI_BOX_HEAD.WSDDN:
return WSDDNHead(cfg, in_channels)
else:
return ROIBoxHead(cfg, in_channels) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.