code stringlengths 101 5.91M |
|---|
def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):
if (not isinstance(ys, list)):
ys = [ys]
if (not isinstance(xs, list)):
xs = [xs]
bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True)
debug_print('bwd_ops: %s', bwd_ops)
fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops)
debug_print('fwd_ops: %s', fwd_ops)
fwd_ops = [op for op in fwd_ops if op.inputs]
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if (not (op in xs_ops))]
fwd_ops = [op for op in fwd_ops if (not ('/assign' in op.name))]
fwd_ops = [op for op in fwd_ops if (not ('/Assign' in op.name))]
fwd_ops = [op for op in fwd_ops if (not ('/read' in op.name))]
ts_all = ge.filter_ts(fwd_ops, True)
ts_all = [t for t in ts_all if ('/read' not in t.name)]
ts_all = ((set(ts_all) - set(xs)) - set(ys))
if (type(checkpoints) is not list):
if (checkpoints == 'collection'):
checkpoints = tf.get_collection('checkpoints')
elif (checkpoints == 'speed'):
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif (checkpoints == 'memory'):
def fixdims(t):
try:
return [int((e if (e.value is not None) else 64)) for e in t]
except:
return [0]
ts_all = [t for t in ts_all if (np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE)]
ts_all = [t for t in ts_all if ('L2Loss' not in t.name)]
ts_all = [t for t in ts_all if ('entropy' not in t.name)]
ts_all = [t for t in ts_all if ('FusedBatchNorm' not in t.name)]
ts_all = [t for t in ts_all if ('Switch' not in t.name)]
ts_all = [t for t in ts_all if ('dropout' not in t.name)]
ts_all = [t for t in ts_all if ('Cast' not in t.name)]
with util.capture_ops() as bwd_ops:
tf_gradients(ys, xs, grad_ys, **kwargs)
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print('Using tensors %s', ts_filtered)
for ts in [ts_filtered, ts_all]:
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if ((not set(b_inp).intersection(f_inp)) and ((len(b_inp) + len(f_inp)) >= len(ts_all))):
bottleneck_ts.append(t)
else:
debug_print('Rejected bottleneck candidate and ops %s', ([t] + list(((set(ts_all) - set(b_inp)) - set(f_inp)))))
if (len(bottleneck_ts) >= np.sqrt(len(ts_filtered))):
break
if (not bottleneck_ts):
raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".')
bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)
sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]
N = len(ts_filtered)
if (len(bottleneck_ts) <= np.ceil(np.sqrt(N))):
checkpoints = sorted_bottlenecks
else:
step = int(np.ceil((len(bottleneck_ts) / np.sqrt(N))))
checkpoints = sorted_bottlenecks[step::step]
else:
raise Exception(('%s is unsupported input for "checkpoints"' % (checkpoints,)))
checkpoints = list(set(checkpoints).intersection(ts_all))
assert isinstance(checkpoints, list)
debug_print('Checkpoint nodes used: %s', checkpoints)
xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))
if xs_intersect_checkpoints:
debug_print('Warning, some input nodes are also checkpoint nodes: %s', xs_intersect_checkpoints)
ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))
debug_print('ys: %s, checkpoints: %s, intersect: %s', ys, checkpoints, ys_intersect_checkpoints)
if ys_intersect_checkpoints:
debug_print('Warning, some output nodes are also checkpoints nodes: %s', format_ops(ys_intersect_checkpoints))
checkpoints = list(((set(checkpoints) - set(ys)) - set(xs)))
if (not checkpoints):
raise Exception('no checkpoints nodes found or given as input! ')
checkpoints_disconnected = {}
for x in checkpoints:
if (x.op and (x.op.name is not None)):
grad_node = tf.stop_gradient(x, name=(x.op.name + '_sg'))
else:
grad_node = tf.stop_gradient(x)
grad_node.op._set_device(x.op.node_def.device)
checkpoints_disconnected[x] = grad_node
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print('Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s', len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print('ops_to_copy = %s', ops_to_copy)
debug_print('Processing list %s', ys)
(copied_sgv, info) = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for (origin_op, op) in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print('Copied %s to %s', ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print('Rewired %s in place of %s restricted to %s', checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]
boundary = list(checkpoints_disconnected.values())
dv = tf_gradients(ys=copied_ys, xs=(boundary + xs), grad_ys=grad_ys, **kwargs)
debug_print('Got gradients %s', dv)
debug_print('for %s', copied_ys)
debug_print('with respect to %s', (boundary + xs))
inputs_to_do_before = [y.op for y in ys]
if (grad_ys is not None):
inputs_to_do_before += grad_ys
wait_to_do_ops = (list(copied_ops) + [g.op for g in dv if (g is not None)])
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
d_checkpoints = {r: dr for (r, dr) in zip(checkpoints_disconnected.keys(), dv[:len(checkpoints_disconnected)])}
d_xs = dv[len(checkpoints_disconnected):]
checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)
for ts in checkpoints_sorted_lists[::(- 1)]:
debug_print('Processing list %s', ts)
checkpoints_other = [r for r in checkpoints if (r not in ts)]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print('Found %s ops to copy within %s, seed %s, stop_at %s', len(ops_to_copy), fwd_ops, [r.op for r in ts], checkpoints_other)
debug_print('ops_to_copy = %s', ops_to_copy)
if (not ops_to_copy):
break
(copied_sgv, info) = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for (origin_op, op) in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print('Copied %s to %s', ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print('Rewired %s in place of %s restricted to %s', checkpoints_disconnected_other, checkpoints_other, copied_ops)
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary, (checkpoints_disconnected_other + xs), grad_ys=substitute_backprops, **kwargs)
debug_print('Got gradients %s', dv)
debug_print('for %s', boundary)
debug_print('with respect to %s', (checkpoints_disconnected_other + xs))
debug_print('with boundary backprop substitutions %s', substitute_backprops)
inputs_to_do_before = [d_checkpoints[r].op for r in ts]
wait_to_do_ops = (list(copied_ops) + [g.op for g in dv if (g is not None)])
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
for (r, dr) in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if (dr is not None):
if (d_checkpoints[r] is None):
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if (not isinstance(x, tf.IndexedSlices)):
return x
assert (x.dense_shape is not None), 'memory_saving_gradients encountered sparse gradients of unknown shape'
indices = x.indices
while (indices.shape.ndims < x.values.shape.ndims):
indices = tf.expand_dims(indices, (- 1))
return tf.scatter_nd(indices, x.values, x.dense_shape)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if (d_xs_new[j] is not None):
if (d_xs[j] is None):
d_xs[j] = _unsparsify(d_xs_new[j])
else:
d_xs[j] += _unsparsify(d_xs_new[j])
return d_xs |
_module()
class TINLrUpdaterHook(LrUpdaterHook):
def __init__(self, min_lr, **kwargs):
self.min_lr = min_lr
super().__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
if (self.warmup == 'linear'):
k = (((cur_iters / self.warmup_iters) * (1 - self.warmup_ratio)) + self.warmup_ratio)
warmup_lr = [(_lr * k) for _lr in self.regular_lr]
elif (self.warmup == 'constant'):
warmup_lr = [(_lr * self.warmup_ratio) for _lr in self.regular_lr]
elif (self.warmup == 'exp'):
k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters)))
warmup_lr = [(_lr * k) for _lr in self.regular_lr]
return warmup_lr
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
target_lr = self.min_lr
if (self.warmup is not None):
progress = (progress - self.warmup_iters)
max_progress = (max_progress - self.warmup_iters)
factor = (progress / max_progress)
return annealing_cos(base_lr, target_lr, factor) |
class ElasticCache(Elastic):
def __init__(self, index_name):
super(ElasticCache, self).__init__(index_name)
self.__num_docs = None
self.__num_fields = None
self.__doc_count = {}
self.__coll_length = {}
self.__avg_len = {}
self.__doc_length = {}
self.__doc_freq = {}
self.__coll_termfreq = {}
def __check_cache(self, func, params, var):
pass
def num_docs(self):
if (self.__num_docs is None):
self.__num_docs = super(ElasticCache, self).num_docs()
return self.__num_docs
def num_fields(self):
if (self.__num_fields is None):
self.__num_fields = super(ElasticCache, self).num_fields()
return self.__num_fields
def doc_count(self, field):
if (field not in self.__doc_count):
self.__doc_count[field] = super(ElasticCache, self).doc_count(field)
return self.__doc_count[field]
def coll_length(self, field):
if (field not in self.__coll_length):
self.__coll_length[field] = super(ElasticCache, self).coll_length(field)
return self.__coll_length[field]
def avg_len(self, field):
if (field not in self.__avg_len):
self.__avg_len[field] = super(ElasticCache, self).avg_len(field)
return self.__avg_len[field]
def doc_length(self, doc_id, field):
if (doc_id not in self.__doc_length):
self.__doc_length[doc_id] = {}
if (field not in self.__doc_length[doc_id]):
self.__doc_length[doc_id][field] = super(ElasticCache, self).doc_length(doc_id, field)
return self.__doc_length[doc_id][field]
def doc_freq(self, term, field):
if (field not in self.__doc_freq):
self.__doc_freq[field] = {}
if (term not in self.__doc_freq[field]):
self.__doc_freq[field][term] = super(ElasticCache, self).doc_freq(term, field)
return self.__doc_freq[field][term]
def coll_term_freq(self, term, field):
if (field not in self.__coll_termfreq):
self.__coll_termfreq[field] = {}
if (term not in self.__coll_termfreq[field]):
self.__coll_termfreq[field][term] = super(ElasticCache, self).coll_term_freq(term, field)
return self.__coll_termfreq[field][term] |
def test_scimodel_optimizer_exceptions(variable_x, variable_y, functional_fx, functional_gx):
xs = [variable_x, variable_y]
ys = [functional_fx, functional_gx]
with pytest.raises(ValueError):
assert isinstance(sn.SciModel(xs, ys, 'mse', 'to_fail'), sn.SciModel) |
def valid(model, iterator, criterion_onset_A, criterion_offset_A, criterion_mpe_A, criterion_velocity_A, criterion_onset_B, criterion_offset_B, criterion_mpe_B, criterion_velocity_B, weight_A, weight_B, device):
model.eval()
epoch_loss = 0
with torch.no_grad():
for (i, (input_spec, label_onset, label_offset, label_mpe, label_velocity)) in enumerate(iterator):
input_spec = input_spec.to(device, non_blocking=True)
label_onset = label_onset.to(device, non_blocking=True)
label_offset = label_offset.to(device, non_blocking=True)
label_mpe = label_mpe.to(device, non_blocking=True)
label_velocity = label_velocity.to(device, non_blocking=True)
(output_onset_A, output_offset_A, output_mpe_A, output_velocity_A, attention, output_onset_B, output_offset_B, output_mpe_B, output_velocity_B) = model(input_spec)
output_onset_A = output_onset_A.contiguous().view((- 1))
output_offset_A = output_offset_A.contiguous().view((- 1))
output_mpe_A = output_mpe_A.contiguous().view((- 1))
output_velocity_A_dim = output_velocity_A.shape[(- 1)]
output_velocity_A = output_velocity_A.contiguous().view((- 1), output_velocity_A_dim)
output_onset_B = output_onset_B.contiguous().view((- 1))
output_offset_B = output_offset_B.contiguous().view((- 1))
output_mpe_B = output_mpe_B.contiguous().view((- 1))
output_velocity_B_dim = output_velocity_B.shape[(- 1)]
output_velocity_B = output_velocity_B.contiguous().view((- 1), output_velocity_B_dim)
label_onset = label_onset.contiguous().view((- 1))
label_offset = label_offset.contiguous().view((- 1))
label_mpe = label_mpe.contiguous().view((- 1))
label_velocity = label_velocity.contiguous().view((- 1))
loss_onset_A = criterion_onset_A(output_onset_A, label_onset)
loss_offset_A = criterion_offset_A(output_offset_A, label_offset)
loss_mpe_A = criterion_mpe_A(output_mpe_A, label_mpe)
loss_velocity_A = criterion_velocity_A(output_velocity_A, label_velocity)
loss_A = (((loss_onset_A + loss_offset_A) + loss_mpe_A) + loss_velocity_A)
loss_onset_B = criterion_onset_B(output_onset_B, label_onset)
loss_offset_B = criterion_offset_B(output_offset_B, label_offset)
loss_mpe_B = criterion_mpe_B(output_mpe_B, label_mpe)
loss_velocity_B = criterion_velocity_B(output_velocity_B, label_velocity)
loss_B = (((loss_onset_B + loss_offset_B) + loss_mpe_B) + loss_velocity_B)
loss = ((weight_A * loss_A) + (weight_B * loss_B))
epoch_loss += loss.item()
return (epoch_loss, len(iterator)) |
def load_yaml(path):
with open(path, 'r') as f:
model_config = yaml.load(f, Loader=yaml.FullLoader)
return model_config |
class SpecificSpanSparseRelClsDecoder(DecoderBase, ChunkPairsDecoderMixin):
def __init__(self, config: SpecificSpanSparseRelClsDecoderConfig):
super().__init__()
self.max_span_size = config.max_span_size
self.max_size_id = config.max_size_id
self.neg_sampling_rate = config.neg_sampling_rate
self.none_label = config.none_label
self.idx2label = config.idx2label
self.ck_none_label = config.ck_none_label
self.idx2ck_label = config.idx2ck_label
self.filter_by_labels = config.filter_by_labels
self.existing_rht_labels = config.existing_rht_labels
self.filter_self_relation = config.filter_self_relation
self.existing_self_relation = config.existing_self_relation
if config.use_biaffine:
self.affine_head = config.affine.instantiate()
self.affine_tail = config.affine.instantiate()
else:
self.affine = config.affine.instantiate()
if config.use_context:
self.affine_ctx = config.affine_ctx.instantiate()
self.zero_context = torch.nn.Parameter(torch.empty(config.in_dim))
reinit_vector_parameter_(self.zero_context)
if (config.size_emb_dim > 0):
self.size_embedding = torch.nn.Embedding((config.max_size_id + 1), config.size_emb_dim)
reinit_embedding_(self.size_embedding)
if (config.label_emb_dim > 0):
self.label_embedding = torch.nn.Embedding(config.ck_voc_dim, config.label_emb_dim)
reinit_embedding_(self.label_embedding)
self.dropout = CombinedDropout(*config.hid_drop_rates)
self.U = torch.nn.Parameter(torch.empty(config.voc_dim, config.affine.out_dim, config.affine.out_dim))
self.W = torch.nn.Parameter(torch.empty(config.voc_dim, (config.affine.out_dim * (3 if config.use_context else 2))))
self.b = torch.nn.Parameter(torch.empty(config.voc_dim))
torch.nn.init.orthogonal_(self.U.data)
torch.nn.init.orthogonal_(self.W.data)
torch.nn.init.zeros_(self.b.data)
self.criterion = config.instantiate_criterion(reduction='sum')
def assign_chunks_pred(self, batch: Batch, batch_chunks_pred: List[List[tuple]]):
for (cp_obj, chunks_pred) in zip(batch.cp_objs, batch_chunks_pred):
if (cp_obj.chunks_pred is None):
cp_obj.chunks_pred = chunks_pred
cp_obj.build(self)
cp_obj.to(self.W.device)
def compute_scores(self, batch: Batch, full_hidden: torch.Tensor, all_query_hidden: Dict[(int, torch.Tensor)]):
all_hidden = ([full_hidden] + list(all_query_hidden.values()))
batch_scores = []
for (i, cp_obj) in enumerate(batch.cp_objs):
num_chunks = len(cp_obj.chunks)
if (num_chunks == 0):
scores = torch.empty(0, 0, self.W.size(0), device=full_hidden.device)
else:
span_hidden = torch.stack([all_hidden[((end - start) - 1)][(i, start)] for (label, start, end) in cp_obj.chunks])
if hasattr(self, 'size_embedding'):
size_embedded = self.size_embedding(cp_obj.span_size_ids)
span_hidden = torch.cat([span_hidden, size_embedded], dim=(- 1))
if hasattr(self, 'label_embedding'):
label_embedded = self.label_embedding(cp_obj.ck_label_ids)
span_hidden = torch.cat([span_hidden, label_embedded], dim=(- 1))
if hasattr(self, 'affine_head'):
affined_head = self.affine_head(span_hidden)
affined_tail = self.affine_tail(span_hidden)
else:
affined_head = self.affine(span_hidden)
affined_tail = self.affine(span_hidden)
scores1 = self.dropout(affined_head).matmul(self.U).matmul(self.dropout(affined_tail.permute(1, 0)))
affined_cat = torch.cat([self.dropout(affined_head).unsqueeze(1).expand((- 1), affined_tail.size(0), (- 1)), self.dropout(affined_tail).unsqueeze(0).expand(affined_head.size(0), (- 1), (- 1))], dim=(- 1))
if hasattr(self, 'affine_ctx'):
contexts = []
for ((h_label, h_start, h_end), (t_label, t_start, t_end)) in itertools.product(cp_obj.chunks, cp_obj.chunks):
if (h_end < t_start):
contexts.append(_collect_context_from_specific_span_hidden(i, h_end, t_start, all_hidden))
elif (t_end < h_start):
contexts.append(_collect_context_from_specific_span_hidden(i, t_end, h_start, all_hidden))
else:
contexts.append(self.zero_context)
contexts = torch.stack(contexts)
affined_ctx = self.affine_ctx(contexts).view(num_chunks, num_chunks, (- 1))
affined_cat = torch.cat([affined_cat, self.dropout(affined_ctx)], dim=(- 1))
scores2 = self.W.matmul(affined_cat.unsqueeze((- 1)))
scores = ((scores1.permute(1, 2, 0) + scores2.squeeze((- 1))) + self.b)
batch_scores.append(scores)
return batch_scores
def forward(self, batch: Batch, full_hidden: torch.Tensor, all_query_hidden: Dict[(int, torch.Tensor)]):
batch_scores = self.compute_scores(batch, full_hidden, all_query_hidden)
losses = []
for (scores, cp_obj) in zip(batch_scores, batch.cp_objs):
if (len(cp_obj.chunks) == 0):
loss = torch.tensor(0.0, device=full_hidden.device)
else:
label_ids = cp_obj.cp2label_id
if hasattr(cp_obj, 'non_mask'):
non_mask = cp_obj.non_mask
(scores, label_ids) = (scores[non_mask], label_ids[non_mask])
else:
(scores, label_ids) = (scores.flatten(end_dim=1), label_ids.flatten(end_dim=1))
loss = self.criterion(scores, label_ids)
losses.append(loss)
return torch.stack(losses)
def decode(self, batch: Batch, full_hidden: torch.Tensor, all_query_hidden: Dict[(int, torch.Tensor)]):
batch_scores = self.compute_scores(batch, full_hidden, all_query_hidden)
batch_relations = []
for (scores, cp_obj) in zip(batch_scores, batch.cp_objs):
if (len(cp_obj.chunks) == 0):
relations = []
else:
(confidences, label_ids) = scores.softmax(dim=(- 1)).max(dim=(- 1))
labels = [self.idx2label[i] for i in label_ids.flatten().cpu().tolist()]
relations = [(label, head, tail) for (label, (head, tail)) in zip(labels, itertools.product(cp_obj.chunks, cp_obj.chunks)) if (label != self.none_label)]
relations = [(label, head, tail) for (label, head, tail) in relations if (((not self.filter_by_labels) or ((label, head[0], tail[0]) in self.existing_rht_labels)) and ((not self.filter_self_relation) or self.existing_self_relation or (head[1:] != tail[1:])))]
batch_relations.append(relations)
return batch_relations |
class Attacker():
def __init__(self, args, model_tgt, tokenizer_tgt, model_mlm, tokenizer_mlm, use_bpe, threshold_pred_score) -> None:
self.args = args
self.model_tgt = model_tgt
self.tokenizer_tgt = tokenizer_tgt
self.model_mlm = model_mlm
self.tokenizer_mlm = tokenizer_mlm
self.use_bpe = use_bpe
self.threshold_pred_score = threshold_pred_score
def ga_attack(self, example, code, subs, initial_replace=None):
(logits, preds) = self.model_tgt.get_results([example], self.args.eval_batch_size)
orig_prob = logits[0]
orig_label = preds[0]
current_prob = max(orig_prob)
true_label = example[1].item()
adv_code = ''
temp_label = None
(identifiers, code_tokens) = get_identifiers(code, 'python')
prog_length = len(code_tokens)
processed_code = ' '.join(code_tokens)
(words, sub_words, keys) = _tokenize(processed_code, self.tokenizer_mlm)
variable_names = list(subs.keys())
if (not (orig_label == true_label)):
is_success = (- 4)
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, None, None, None, None)
if (len(variable_names) == 0):
is_success = (- 3)
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, None, None, None, None)
names_positions_dict = get_identifier_posistions_from_code(words, variable_names)
nb_changed_var = 0
nb_changed_pos = 0
is_success = (- 1)
variable_substitue_dict = {}
for tgt_word in names_positions_dict.keys():
variable_substitue_dict[tgt_word] = subs[tgt_word]
fitness_values = []
base_chromesome = {word: word for word in variable_substitue_dict.keys()}
population = [base_chromesome]
for tgt_word in variable_substitue_dict.keys():
if (initial_replace is None):
replace_examples = []
substitute_list = []
current_prob = max(orig_prob)
most_gap = 0.0
initial_candidate = tgt_word
tgt_positions = names_positions_dict[tgt_word]
for a_substitue in variable_substitue_dict[tgt_word]:
substitute_list.append(a_substitue)
temp_code = get_example(code, tgt_word, a_substitue, 'python')
new_feature = convert_code_to_features(temp_code, self.tokenizer_tgt, example[1].item(), self.args)
replace_examples.append(new_feature)
if (len(replace_examples) == 0):
continue
new_dataset = CodeDataset(replace_examples)
(logits, preds) = self.model_tgt.get_results(new_dataset, self.args.eval_batch_size)
_the_best_candidate = (- 1)
for (index, temp_prob) in enumerate(logits):
temp_label = preds[index]
gap = (current_prob - temp_prob[temp_label])
if (gap > most_gap):
most_gap = gap
_the_best_candidate = index
if (_the_best_candidate == (- 1)):
initial_candidate = tgt_word
else:
initial_candidate = substitute_list[_the_best_candidate]
else:
initial_candidate = initial_replace[tgt_word]
temp_chromesome = copy.deepcopy(base_chromesome)
temp_chromesome[tgt_word] = initial_candidate
population.append(temp_chromesome)
(temp_fitness, temp_label) = compute_fitness(temp_chromesome, self.model_tgt, self.tokenizer_tgt, max(orig_prob), orig_label, true_label, code, names_positions_dict, self.args)
fitness_values.append(temp_fitness)
cross_probability = 0.7
max_iter = max((5 * len(population)), 10)
for i in range(max_iter):
_temp_mutants = []
for j in range(self.args.eval_batch_size):
p = random.random()
(chromesome_1, index_1, chromesome_2, index_2) = select_parents(population)
if (p < cross_probability):
if (chromesome_1 == chromesome_2):
child_1 = mutate(chromesome_1, variable_substitue_dict)
continue
(child_1, child_2) = crossover(chromesome_1, chromesome_2)
if ((child_1 == chromesome_1) or (child_1 == chromesome_2)):
child_1 = mutate(chromesome_1, variable_substitue_dict)
else:
child_1 = mutate(chromesome_1, variable_substitue_dict)
_temp_mutants.append(child_1)
feature_list = []
for mutant in _temp_mutants:
_temp_code = map_chromesome(mutant, code, 'python')
_tmp_feature = convert_code_to_features(_temp_code, self.tokenizer_tgt, true_label, self.args)
feature_list.append(_tmp_feature)
if (len(feature_list) == 0):
continue
new_dataset = CodeDataset(feature_list)
(mutate_logits, mutate_preds) = self.model_tgt.get_results(new_dataset, self.args.eval_batch_size)
mutate_fitness_values = []
for (index, logits) in enumerate(mutate_logits):
if (mutate_preds[index] != orig_label):
adv_code = map_chromesome(_temp_mutants[index], code, 'python')
for old_word in _temp_mutants[index].keys():
if (old_word == _temp_mutants[index][old_word]):
nb_changed_var += 1
nb_changed_pos += len(names_positions_dict[old_word])
return (code, prog_length, adv_code, true_label, orig_label, mutate_preds[index], 1, variable_names, None, nb_changed_var, nb_changed_pos, _temp_mutants[index])
_tmp_fitness = (max(orig_prob) - logits[orig_label])
mutate_fitness_values.append(_tmp_fitness)
for (index, fitness_value) in enumerate(mutate_fitness_values):
min_value = min(fitness_values)
if (fitness_value > min_value):
min_index = fitness_values.index(min_value)
population[min_index] = _temp_mutants[index]
fitness_values[min_index] = fitness_value
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, None, nb_changed_var, nb_changed_pos, None)
def greedy_attack(self, example, code, subs):
(logits, preds) = self.model_tgt.get_results([example], self.args.eval_batch_size)
orig_prob = logits[0]
orig_label = preds[0]
current_prob = max(orig_prob)
true_label = example[1].item()
adv_code = ''
temp_label = None
(identifiers, code_tokens) = get_identifiers(code, 'python')
prog_length = len(code_tokens)
processed_code = ' '.join(code_tokens)
(words, sub_words, keys) = _tokenize(processed_code, self.tokenizer_mlm)
variable_names = list(subs.keys())
if (not (orig_label == true_label)):
is_success = (- 4)
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, None, None, None, None)
if (len(variable_names) == 0):
is_success = (- 3)
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, None, None, None, None)
sub_words = (([self.tokenizer_tgt.cls_token] + sub_words[:(self.args.block_size - 2)]) + [self.tokenizer_tgt.sep_token])
(importance_score, replace_token_positions, names_positions_dict) = get_importance_score(self.args, example, processed_code, words, sub_words, variable_names, self.model_tgt, self.tokenizer_tgt, [0, 1], batch_size=self.args.eval_batch_size, max_length=self.args.block_size, model_type='classification')
if (importance_score is None):
return (code, prog_length, adv_code, true_label, orig_label, temp_label, (- 3), variable_names, None, None, None, None)
token_pos_to_score_pos = {}
for (i, token_pos) in enumerate(replace_token_positions):
token_pos_to_score_pos[token_pos] = i
names_to_importance_score = {}
for name in names_positions_dict.keys():
total_score = 0.0
positions = names_positions_dict[name]
for token_pos in positions:
total_score += importance_score[token_pos_to_score_pos[token_pos]]
names_to_importance_score[name] = total_score
sorted_list_of_names = sorted(names_to_importance_score.items(), key=(lambda x: x[1]), reverse=True)
final_code = copy.deepcopy(code)
nb_changed_var = 0
nb_changed_pos = 0
is_success = (- 1)
replaced_words = {}
for name_and_score in sorted_list_of_names:
tgt_word = name_and_score[0]
all_substitues = subs[tgt_word]
most_gap = 0.0
candidate = None
replace_examples = []
substitute_list = []
for substitute in all_substitues:
substitute_list.append(substitute)
temp_code = get_example(final_code, tgt_word, substitute, 'python')
new_feature = convert_code_to_features(temp_code, self.tokenizer_tgt, example[1].item(), self.args)
replace_examples.append(new_feature)
if (len(replace_examples) == 0):
continue
new_dataset = CodeDataset(replace_examples)
(logits, preds) = self.model_tgt.get_results(new_dataset, self.args.eval_batch_size)
assert (len(logits) == len(substitute_list))
for (index, temp_prob) in enumerate(logits):
temp_label = preds[index]
if (temp_label != orig_label):
is_success = 1
nb_changed_var += 1
nb_changed_pos += len(names_positions_dict[tgt_word])
candidate = substitute_list[index]
replaced_words[tgt_word] = candidate
adv_code = get_example(final_code, tgt_word, candidate, 'python')
print(('%s SUC! %s => %s (%.5f => %.5f)' % ('>>', tgt_word, candidate, current_prob, temp_prob[orig_label])), flush=True)
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, names_to_importance_score, nb_changed_var, nb_changed_pos, replaced_words)
else:
gap = (current_prob - temp_prob[temp_label])
if (gap > most_gap):
most_gap = gap
candidate = substitute_list[index]
if (most_gap > 0):
nb_changed_var += 1
nb_changed_pos += len(names_positions_dict[tgt_word])
current_prob = (current_prob - most_gap)
final_code = get_example(final_code, tgt_word, candidate, 'python')
replaced_words[tgt_word] = candidate
print(('%s ACC! %s => %s (%.5f => %.5f)' % ('>>', tgt_word, candidate, (current_prob + most_gap), current_prob)), flush=True)
else:
replaced_words[tgt_word] = tgt_word
adv_code = final_code
return (code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, names_to_importance_score, nb_changed_var, nb_changed_pos, replaced_words) |
class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta):
_parameter_constraints: dict = {'hidden_layer_sizes': ['array-like', Interval(Integral, 1, None, closed='left')], 'activation': [StrOptions({'identity', 'logistic', 'tanh', 'relu'})], 'solver': [StrOptions({'lbfgs', 'sgd', 'adam'})], 'alpha': [Interval(Real, 0, None, closed='left')], 'batch_size': [StrOptions({'auto'}), Interval(Integral, 1, None, closed='left')], 'learning_rate': [StrOptions({'constant', 'invscaling', 'adaptive'})], 'learning_rate_init': [Interval(Real, 0, None, closed='neither')], 'power_t': [Interval(Real, 0, None, closed='left')], 'max_iter': [Interval(Integral, 1, None, closed='left')], 'shuffle': ['boolean'], 'random_state': ['random_state'], 'tol': [Interval(Real, 0, None, closed='left')], 'verbose': ['verbose'], 'warm_start': ['boolean'], 'momentum': [Interval(Real, 0, 1, closed='both')], 'nesterovs_momentum': ['boolean'], 'early_stopping': ['boolean'], 'validation_fraction': [Interval(Real, 0, 1, closed='left')], 'beta_1': [Interval(Real, 0, 1, closed='left')], 'beta_2': [Interval(Real, 0, 1, closed='left')], 'epsilon': [Interval(Real, 0, None, closed='neither')], 'n_iter_no_change': [Interval(Integral, 1, None, closed='left'), Options(Real, {np.inf})], 'max_fun': [Interval(Integral, 1, None, closed='left')]}
def __init__(self, hidden_layer_sizes, activation, solver, alpha, batch_size, learning_rate, learning_rate_init, power_t, max_iter, loss, shuffle, random_state, tol, verbose, warm_start, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
for i in range((self.n_layers_ - 1)):
(start, end, shape) = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
(start, end) = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
hidden_activation = ACTIVATIONS[self.activation]
for i in range((self.n_layers_ - 1)):
activations[(i + 1)] = safe_sparse_dot(activations[i], self.coefs_[i])
activations[(i + 1)] += self.intercepts_[i]
if ((i + 1) != (self.n_layers_ - 1)):
hidden_activation(activations[(i + 1)])
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[(i + 1)])
return activations
def _forward_pass_fast(self, X, check_input=True):
if check_input:
X = self._validate_data(X, accept_sparse=['csr', 'csc'], reset=False)
activation = X
hidden_activation = ACTIVATIONS[self.activation]
for i in range((self.n_layers_ - 1)):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if (i != (self.n_layers_ - 2)):
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(self, layer, n_samples, activations, deltas, coef_grads, intercept_grads):
coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads):
self._unpack(packed_coef_inter)
(loss, coef_grads, intercept_grads) = self._backprop(X, y, activations, deltas, coef_grads, intercept_grads)
grad = _pack(coef_grads, intercept_grads)
return (loss, grad)
def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
n_samples = X.shape[0]
activations = self._forward_pass(activations)
loss_func_name = self.loss
if ((loss_func_name == 'log_loss') and (self.out_activation_ == 'logistic')):
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[(- 1)])
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
loss += (((0.5 * self.alpha) * values) / n_samples)
last = (self.n_layers_ - 2)
deltas[last] = (activations[(- 1)] - y)
self._compute_loss_grad(last, n_samples, activations, deltas, coef_grads, intercept_grads)
inplace_derivative = DERIVATIVES[self.activation]
for i in range((self.n_layers_ - 2), 0, (- 1)):
deltas[(i - 1)] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[(i - 1)])
self._compute_loss_grad((i - 1), n_samples, activations, deltas, coef_grads, intercept_grads)
return (loss, coef_grads, intercept_grads)
def _initialize(self, y, layer_units, dtype):
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
self.n_layers_ = len(layer_units)
if (not is_classifier(self)):
self.out_activation_ = 'identity'
elif (self._label_binarizer.y_type_ == 'multiclass'):
self.out_activation_ = 'softmax'
else:
self.out_activation_ = 'logistic'
self.coefs_ = []
self.intercepts_ = []
for i in range((self.n_layers_ - 1)):
(coef_init, intercept_init) = self._init_coef(layer_units[i], layer_units[(i + 1)], dtype)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if (self.solver in _STOCHASTIC_SOLVERS):
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = (- np.inf)
self.best_loss_ = None
else:
self.best_loss_ = np.inf
self.validation_scores_ = None
self.best_validation_score_ = None
def _init_coef(self, fan_in, fan_out, dtype):
factor = 6.0
if (self.activation == 'logistic'):
factor = 2.0
init_bound = np.sqrt((factor / (fan_in + fan_out)))
coef_init = self._random_state.uniform((- init_bound), init_bound, (fan_in, fan_out))
intercept_init = self._random_state.uniform((- init_bound), init_bound, fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return (coef_init, intercept_init)
def _fit(self, X, y, incremental=False):
hidden_layer_sizes = self.hidden_layer_sizes
if (not hasattr(hidden_layer_sizes, '__iter__')):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
if np.any((np.array(hidden_layer_sizes) <= 0)):
raise ValueError(('hidden_layer_sizes must be > 0, got %s.' % hidden_layer_sizes))
first_pass = ((not hasattr(self, 'coefs_')) or ((not self.warm_start) and (not incremental)))
(X, y) = self._validate_input(X, y, incremental, reset=first_pass)
(n_samples, n_features) = X.shape
if (y.ndim == 1):
y = y.reshape(((- 1), 1))
self.n_outputs_ = y.shape[1]
layer_units = (([n_features] + hidden_layer_sizes) + [self.n_outputs_])
self._random_state = check_random_state(self.random_state)
if first_pass:
self._initialize(y, layer_units, X.dtype)
activations = ([X] + ([None] * (len(layer_units) - 1)))
deltas = ([None] * (len(activations) - 1))
coef_grads = [np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype) for (n_fan_in_, n_fan_out_) in zip(layer_units[:(- 1)], layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:]]
if (self.solver in _STOCHASTIC_SOLVERS):
self._fit_stochastic(X, y, activations, deltas, coef_grads, intercept_grads, layer_units, incremental)
elif (self.solver == 'lbfgs'):
self._fit_lbfgs(X, y, activations, deltas, coef_grads, intercept_grads, layer_units)
weights = chain(self.coefs_, self.intercepts_)
if (not all((np.isfinite(w).all() for w in weights))):
raise ValueError('Solver produced non-finite parameter weights. The input data may contain large values and need to be preprocessed.')
return self
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units):
self._coef_indptr = []
self._intercept_indptr = []
start = 0
for i in range((self.n_layers_ - 1)):
(n_fan_in, n_fan_out) = (layer_units[i], layer_units[(i + 1)])
end = (start + (n_fan_in * n_fan_out))
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
for i in range((self.n_layers_ - 1)):
end = (start + layer_units[(i + 1)])
self._intercept_indptr.append((start, end))
start = end
packed_coef_inter = _pack(self.coefs_, self.intercepts_)
if ((self.verbose is True) or (self.verbose >= 1)):
iprint = 1
else:
iprint = (- 1)
opt_res = scipy.optimize.minimize(self._loss_grad_lbfgs, packed_coef_inter, method='L-BFGS-B', jac=True, options={'maxfun': self.max_fun, 'maxiter': self.max_iter, 'iprint': iprint, 'gtol': self.tol}, args=(X, y, activations, deltas, coef_grads, intercept_grads))
self.n_iter_ = _check_optimize_result('lbfgs', opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units, incremental):
params = (self.coefs_ + self.intercepts_)
if ((not incremental) or (not hasattr(self, '_optimizer'))):
if (self.solver == 'sgd'):
self._optimizer = SGDOptimizer(params, self.learning_rate_init, self.learning_rate, self.momentum, self.nesterovs_momentum, self.power_t)
elif (self.solver == 'adam'):
self._optimizer = AdamOptimizer(params, self.learning_rate_init, self.beta_1, self.beta_2, self.epsilon)
if (self.early_stopping and incremental):
raise ValueError('partial_fit does not support early_stopping=True')
early_stopping = self.early_stopping
if early_stopping:
should_stratify = (is_classifier(self) and (self.n_outputs_ == 1))
stratify = (y if should_stratify else None)
(X, X_val, y, y_val) = train_test_split(X, y, random_state=self._random_state, test_size=self.validation_fraction, stratify=stratify)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if (self.batch_size == 'auto'):
batch_size = min(200, n_samples)
else:
if (self.batch_size > n_samples):
warnings.warn('Got `batch_size` less than 1 or larger than sample size. It is going to be clipped')
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
self.n_iter_ = 0
for it in range(self.max_iter):
if self.shuffle:
sample_idx = shuffle(sample_idx, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
X_batch = _safe_indexing(X, sample_idx[batch_slice])
y_batch = y[sample_idx[batch_slice]]
else:
X_batch = X[batch_slice]
y_batch = y[batch_slice]
activations[0] = X_batch
(batch_loss, coef_grads, intercept_grads) = self._backprop(X_batch, y_batch, activations, deltas, coef_grads, intercept_grads)
accumulated_loss += (batch_loss * (batch_slice.stop - batch_slice.start))
grads = (coef_grads + intercept_grads)
self._optimizer.update_params(params, grads)
self.n_iter_ += 1
self.loss_ = (accumulated_loss / X.shape[0])
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print(('Iteration %d, loss = %.8f' % (self.n_iter_, self.loss_)))
self._update_no_improvement_count(early_stopping, X_val, y_val)
self._optimizer.iteration_ends(self.t_)
if (self._no_improvement_count > self.n_iter_no_change):
if early_stopping:
msg = ('Validation score did not improve more than tol=%f for %d consecutive epochs.' % (self.tol, self.n_iter_no_change))
else:
msg = ('Training loss did not improve more than tol=%f for %d consecutive epochs.' % (self.tol, self.n_iter_no_change))
is_stopping = self._optimizer.trigger_stopping(msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if (self.n_iter_ == self.max_iter):
warnings.warn(("Stochastic Optimizer: Maximum iterations (%d) reached and the optimization hasn't converged yet." % self.max_iter), ConvergenceWarning)
except KeyboardInterrupt:
warnings.warn('Training interrupted by user.')
if early_stopping:
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
self.validation_scores_.append(self._score(X_val, y_val))
if self.verbose:
print(('Validation score: %f' % self.validation_scores_[(- 1)]))
last_valid_score = self.validation_scores_[(- 1)]
if (last_valid_score < (self.best_validation_score_ + self.tol)):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if (last_valid_score > self.best_validation_score_):
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
else:
if (self.loss_curve_[(- 1)] > (self.best_loss_ - self.tol)):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if (self.loss_curve_[(- 1)] < self.best_loss_):
self.best_loss_ = self.loss_curve_[(- 1)]
_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
return self._fit(X, y, incremental=False)
def _check_solver(self):
if (self.solver not in _STOCHASTIC_SOLVERS):
raise AttributeError(('partial_fit is only available for stochastic optimizers. %s is not stochastic.' % self.solver))
return True |
def build_from_cfg(cfg, registry, default_args=None):
assert (isinstance(cfg, dict) and ('type' in cfg))
assert (isinstance(default_args, dict) or (default_args is None))
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = registry.get(obj_type)
if (obj_cls is None):
raise KeyError('{} is not in the {} registry'.format(obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_cls(**args) |
def Function(name, *sig):
sig = _get_args(sig)
if z3_debug():
_z3_assert((len(sig) > 0), 'At least two arguments expected')
arity = (len(sig) - 1)
rng = sig[arity]
if z3_debug():
_z3_assert(is_sort(rng), 'Z3 sort expected')
dom = (Sort * arity)()
for i in range(arity):
if z3_debug():
_z3_assert(is_sort(sig[i]), 'Z3 sort expected')
dom[i] = sig[i].ast
ctx = rng.ctx
return FuncDeclRef(Z3_mk_func_decl(ctx.ref(), to_symbol(name, ctx), arity, dom, rng.ast), ctx) |
_if_win32()
class RendezvousEnvTest(TestCase):
_on_connect_failures
_nccl()
def test_common_errors(self):
if (torch.cuda.device_count() == 0):
raise unittest.SkipTest('No GPUs available, skipping test')
vars = {'WORLD_SIZE': '1', 'RANK': '0', 'MASTER_ADDR': '127.0.0.1', 'MASTER_PORT': common.find_free_port()}
class Env(object):
def __init__(self, vars):
self.vars = vars
def __enter__(self):
for (key, value) in self.vars.items():
os.environ[key] = str(value)
def __exit__(self, type, value, traceback):
for key in self.vars.keys():
del os.environ[key]
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, 'WORLD_SIZE')):
with self.assertRaisesRegex(ValueError, 'WORLD_SIZE expected'):
gen = c10d.rendezvous('env://')
next(gen)
c10d.init_process_group(backend='nccl', world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, 'RANK')):
with self.assertRaisesRegex(ValueError, 'RANK expected'):
gen = c10d.rendezvous('env://')
next(gen)
c10d.init_process_group(backend='nccl', rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ['RANK', 'WORLD_SIZE'])):
c10d.init_process_group(backend='nccl', rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend='nccl')
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, 'MASTER_ADDR')):
with self.assertRaisesRegex(ValueError, 'MASTER_ADDR expected'):
gen = c10d.rendezvous('env://')
next(gen)
with Env(without(vars, 'MASTER_PORT')):
with self.assertRaisesRegex(ValueError, 'MASTER_PORT expected'):
gen = c10d.rendezvous('env://')
next(gen)
with Env(without(vars, 'WORLD_SIZE')):
gen = c10d.rendezvous('env://?world_size={}'.format(1))
(_, _, size) = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, 'RANK')):
gen = c10d.rendezvous('env://?rank={}'.format(0))
(_, rank, _) = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ['RANK', 'WORLD_SIZE'])):
gen = c10d.rendezvous('env://?rank={}&world_size={}'.format(0, 1))
(_, rank, size) = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
_on_connect_failures
def test_nominal(self):
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(common.find_free_port())
os.environ['RANK'] = '0'
gen0 = c10d.rendezvous('env://')
(store0, rank0, size0) = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set('key0', 'value0')
self.assertEqual(b'value0', store0.get('key0')) |
def initialize(config):
random.seed(config.random_seed)
population_pickle = os.path.join(os.path.dirname(__file__), 'population.pkl.gz')
popu = pickle.load(gzip.open(population_pickle, 'rb'))
alive = []
for (num, person) in popu.items():
person['num'] = num
person['months_in_prison'] = 0
person['harsh_sentence'] = []
if valid_age(person, config.start_iter, config.min_age):
alive.append(person)
num_infect = round((config.percent * len(alive)))
potentials = []
for person in alive:
if ((config.start_iter - person['birth']) <= 45):
potentials.append(person)
infected = random.sample(potentials, num_infect)
for person in infected:
(sentence, harsh_sentence) = generate_sentence(person, (- 1), 0, config)
person['incarcerated'] = sentence
person['harsh_sentence'].append(harsh_sentence)
return popu |
class Cluster():
def __init__(self, cloud_config, cluster_config, no_start=False, no_delete=False, containers=None):
self._cloud_config = cloud_config
self._cluster_config = cluster_config
self._cluster_cmd = 'gcloud beta container --project {} clusters --zone {}'.format(self._cloud_config.project, self._cluster_config.zone)
self._no_start = no_start
self._no_delete = no_delete
self._containers = (containers or [])
def config(self):
return self._cluster_config
def get_kube_info(self, kind, namespace='default'):
return json.loads(run('kubectl get {} -o json -n {}'.format(kind, namespace)))
def get_by_owner(self, ty, owner, namespace='default'):
return run('kubectl get {} -o json -n {} | jq -r \'.items[] | select(.metadata.ownerReferences[0].name == "{}") | .metadata.name\''.format(ty, namespace, owner))
def get_object(self, info, name):
for item in info['items']:
if (item['metadata']['name'] == name):
return item
return None
def get_pod(self, deployment, namespace='default'):
while True:
rs = self.get_by_owner('rs', deployment, namespace)
pod_name = self.get_by_owner('pod', rs, namespace)
if (('\n' not in pod_name) and (pod_name != '')):
break
time.sleep(1)
while True:
pod = self.get_object(self.get_kube_info('pod', namespace), pod_name)
if (pod is not None):
return pod
time.sleep(1)
def running(self, pool=MASTER_POOL):
return (run('{cmd} list --cluster={id} --format=json | jq \'.[] | select(.name == "{pool}")\''.format(cmd=self._cluster_cmd.replace('clusters', 'node-pools'), id=self._cluster_config.id, pool=pool)) != '')
def get_credentials(self):
if (self._cluster_config.id not in run('kubectl config view -o json | jq \'.["current-context"]\' -r')):
run('{cmd} get-credentials {id}'.format(cmd=self._cluster_cmd, id=self._cluster_config.id))
def create_object(self, template):
with tempfile.NamedTemporaryFile() as f:
f.write(yaml.dump(template).encode())
f.flush()
run('kubectl create -f {}'.format(f.name))
def make_container(self, name, machine_config):
template = {'name': name, 'image': machine_config.image, 'command': ['/bin/bash'], 'args': ['-c', 'python3 -c "from scannerpy import kube; kube.{}()"'.format(name)], 'imagePullPolicy': 'Always', 'volumeMounts': [{'name': 'service-key', 'mountPath': '/secret'}, {'name': 'scanner-config', 'mountPath': '/root/.scanner/config.toml', 'subPath': 'config.toml'}], 'env': [{'name': 'GOOGLE_APPLICATION_CREDENTIALS', 'value': '/secret/{}'.format(os.path.basename(self._cloud_config.service_key))}, {'name': 'AWS_ACCESS_KEY_ID', 'valueFrom': {'secretKeyRef': {'name': 'aws-storage-key', 'key': 'AWS_ACCESS_KEY_ID'}}}, {'name': 'AWS_SECRET_ACCESS_KEY', 'valueFrom': {'secretKeyRef': {'name': 'aws-storage-key', 'key': 'AWS_SECRET_ACCESS_KEY'}}}, {'name': 'NO_WORKERS_TIMEOUT', 'value': str(self._cluster_config.no_workers_timeout)}, {'name': 'GLOG_minloglevel', 'value': '0'}, {'name': 'GLOG_logtostderr', 'value': '1'}, {'name': 'GLOG_v', 'value': ('2' if (name == 'master') else '1')}, {'name': 'NUM_LOAD_WORKERS', 'value': str(self._cluster_config.num_load_workers)}, {'name': 'NUM_SAVE_WORKERS', 'value': str(self._cluster_config.num_save_workers)}, {'name': 'PIPELINES', 'value': base64.b64encode(cloudpickle.dumps(self._cluster_config.pipelines))}, {'name': 'FORCE_CPU_DECODE', 'value': '1'}], 'resources': {}, 'securityContext': {'capabilities': {'add': ['SYS_PTRACE']}}}
if (name == 'master'):
template['ports'] = [{'containerPort': 8080}]
if (machine_config.gpu > 0):
template['resources']['limits'] = {'nvidia.com/gpu': machine_config.gpu}
elif (name == 'worker'):
template['resources']['requests'] = {'cpu': ((machine_config.type.get_cpu() / 2.0) + 0.1)}
return template
def make_deployment(self, name, machine_config, replicas):
template = {'apiVersion': 'apps/v1beta1', 'kind': 'Deployment', 'metadata': {'name': 'scanner-{}'.format(name)}, 'spec': {'replicas': replicas, 'template': {'metadata': {'labels': {'app': 'scanner-{}'.format(name)}}, 'spec': {'containers': ([self.make_container(name, machine_config)] + self._containers), 'volumes': [{'name': 'service-key', 'secret': {'secretName': 'service-key', 'items': [{'key': os.path.basename(self._cloud_config.service_key), 'path': os.path.basename(self._cloud_config.service_key)}]}}, {'name': 'scanner-config', 'configMap': {'name': 'scanner-config'}}], 'nodeSelector': {'cloud.google.com/gke-nodepool': (MASTER_POOL if (name == 'master') else WORKER_POOL)}}}}}
return template
def _cluster_start(self):
cfg = self._cluster_config
fmt_args = {'cmd': self._cluster_cmd, 'cluster_id': cfg.id, 'cluster_version': cfg.kube_version, 'master_machine': cfg.master.type.get_name(), 'master_disk': cfg.master.disk, 'worker_machine': cfg.worker.type.get_name(), 'worker_disk': cfg.worker.disk, 'scopes': ','.join(cfg.scopes), 'initial_size': cfg.num_workers, 'accelerator': ('--accelerator type={},count={}'.format(cfg.worker.gpu_type, cfg.worker.gpu) if (cfg.worker.gpu > 0) else ''), 'preemptible': ('--preemptible' if cfg.worker.preemptible else ''), 'autoscaling': ('--enable-autoscaling --min-nodes 0 --max-nodes {}'.format(cfg.num_workers) if cfg.autoscale else ''), 'master_cpu_platform': ('--min-cpu-platform skylake' if (cfg.master.type.get_cpu() > 64) else ''), 'worker_cpu_platform': ('--min-cpu-platform skylake' if (cfg.worker.type.get_cpu() > 64) else '')}
cluster_cmd = '\n{cmd} -q create "{cluster_id}" --cluster-version "{cluster_version}" --machine-type "{master_machine}" --image-type "COS" --disk-size "{master_disk}" --scopes {scopes} --num-nodes "1" --enable-cloud-logging --enable-autoscaling --min-nodes 1 --max-nodes 1 {accelerator} {master_cpu_platform}\n '.format(**fmt_args)
if (not self.running(pool=MASTER_POOL)):
try:
log.info('Cluster price: ${:.2f}/hr'.format(cfg.price()))
except Exception:
log.info('Failed to compute cluster price with error:')
traceback.print_exc()
log.info('Creating master...')
run(cluster_cmd)
log.info(' project=self._cloud_config.project, **fmt_args))
fmt_args['cmd'] = fmt_args['cmd'].replace('clusters', 'node-pools')
pool_cmd = '\n {cmd} -q create workers --cluster "{cluster_id}" --machine-type "{worker_machine}" --image-type "COS" --disk-size "{worker_disk}" --scopes {scopes} --num-nodes "{initial_size}" {autoscaling} {preemptible} {accelerator} {worker_cpu_platform}\n '.format(**fmt_args)
log.info('Creating workers...')
try:
run(pool_cmd)
except sp.CalledProcessError as e:
log.error('Worker pool command errored: {}'.format(e))
log.info('Waiting for cluster to reconcile...')
if (cfg.num_workers > 1):
time.sleep(60)
while True:
cluster_status = run('{cmd} list --format=json | jq -r \'.[] | select(.name == "{id}") | .status\''.format(cmd=self._cluster_cmd, id=cfg.id))
if (cluster_status == 'RECONCILING'):
time.sleep(5)
else:
if (cluster_status != 'RUNNING'):
raise Exception('Expected cluster status RUNNING, got: {}'.format(cluster_status))
break
if (cfg.worker.gpu > 0):
run('kubectl apply -f
def _kube_start(self, reset=True, wait=True):
cfg = self._cluster_config
deploy = self.get_object(self.get_kube_info('deployments'), 'scanner-worker')
if (deploy is not None):
num_workers = deploy['status']['replicas']
else:
num_workers = cfg.num_workers
if reset:
log.info('Deleting current deployments...')
run('kubectl delete deploy/scanner-master deploy/scanner-worker --ignore-not-found=true')
run('kubectl delete service/scanner-master --ignore-not-found=true')
secrets = self.get_kube_info('secrets')
if (self.get_object(secrets, 'service-key') is None):
log.info('Making secrets...')
run('kubectl create secret generic service-key --from-file={}'.format(self._cloud_config.service_key))
if (self.get_object(secrets, 'aws-storage-key') is None):
run('kubectl create secret generic aws-storage-key --from-literal=AWS_ACCESS_KEY_ID={} --from-literal=AWS_SECRET_ACCESS_KEY={}'.format(self._cloud_config.storage_key_id, self._cloud_config.storage_key_secret))
configmaps = self.get_kube_info('configmaps')
if (self.get_object(configmaps, 'scanner-config') is None):
run('kubectl create configmap scanner-config --from-file={}'.format(self._cluster_config.scanner_config))
deployments = self.get_kube_info('deployments')
if (self.get_object(deployments, 'scanner-master') is None):
log.info('Creating deployments...')
self.create_object(self.make_deployment('master', self._cluster_config.master, 1))
services = self.get_kube_info('services')
if (self.get_object(services, 'scanner-master') is None):
run('kubectl expose deploy/scanner-master --type=NodePort --port=8080')
if (self.get_object(deployments, 'scanner-worker') is None):
self.create_object(self.make_deployment('worker', self._cluster_config.worker, num_workers))
if wait:
log.info('Waiting on master...')
while True:
master = self.get_pod('scanner-master')
if (master['status']['phase'] == 'Running'):
break
time.sleep(1.0)
def start(self, reset=True, wait=True):
self._cluster_start()
self.get_credentials()
self._kube_start(reset, wait)
log.info('Finished startup.')
def resize(self, size):
log.info('Resized cluster price: ${:.2f}/hr'.format(evolve(self._cluster_config, num_workers=size).price()))
log.info('Resizing cluster...')
if (not self._cluster_config.autoscale):
run('{cmd} resize {id} -q --node-pool=workers --size={size}'.format(cmd=self._cluster_cmd, id=self._cluster_config.id, size=size))
else:
run('{cmd} update {id} -q --node-pool=workers --enable-autoscaling --max-nodes={size}'.format(cmd=self._cluster_cmd, id=self._cluster_config.id, size=size))
log.info('Scaling deployment...')
run('kubectl scale deploy/scanner-worker --replicas={}'.format(size))
def delete(self, prompt=False):
run('{cmd} {prompt} delete {id}'.format(cmd=self._cluster_cmd, prompt=('-q' if (not prompt) else ''), id=self._cluster_config.id))
def master_address(self):
ip = run('\n kubectl get pods -l \'app=scanner-master\' -o json | jq \'.items[0].spec.nodeName\' -r | xargs -I {} kubectl get nodes/{} -o json | jq \'.status.addresses[] | select(.type == "ExternalIP") | .address\' -r\n ')
port = run("\n kubectl get svc/scanner-master -o json | jq '.spec.ports[0].nodePort' -r\n ")
return '{}:{}'.format(ip, port)
def client(self, retries=3, **kwargs):
while True:
try:
return scannerpy.Client(master=self.master_address(), start_cluster=False, config_path=self._cluster_config.scanner_config, **kwargs)
except scannerpy.ScannerException:
if (retries == 0):
raise
else:
retries -= 1
def job_status(self):
sc = self.client()
jobs = sc.get_active_jobs()
if (len(jobs) > 0):
sc.wait_on_job(jobs[0])
def healthy(self):
failed = run('\n kubectl get pod -o json | jq -r \'.items[].status | select(.phase == "Failed" or .containerStatuses[0].lastState.terminated.reason == "OOMKilled")\'\n ')
return (failed == '')
def monitor(self, sc):
done = None
done_cvar = Condition()
def loop_set(cond, val):
def wrapper():
nonlocal done
nonlocal done_cvar
while True:
if (done is not None):
break
if cond():
with done_cvar:
done = val
done_cvar.notify()
time.sleep(1.0)
return wrapper
jobs = sc.get_active_jobs()
if (len(jobs) == 0):
raise Exception('No active jobs')
gen = sc.wait_on_job_gen(jobs[0])
def scanner_check():
nonlocal gen
try:
next(gen)
return False
except StopIteration:
return True
def health_check():
return (not self.healthy())
metrics = []
def resource_check():
nonlocal metrics
metrics.extend([{'TIME': datetime.now(), **r} for r in self.resource_metrics()])
return False
checks = [(scanner_check, True), (health_check, False), (resource_check, None)]
threads = [Thread(target=loop_set(f, v), daemon=True) for (f, v) in checks]
for t in threads:
t.start()
with done_cvar:
while (done is None):
done_cvar.wait()
for t in threads:
t.join()
return (done, metrics)
def resource_metrics(self):
table = run('kubectl top nodes').split('\n')
(header, rows) = (table[0], table[1:])
def match(line):
return re.findall('([^\\s]+)\\s*', line)
columns = match(header)
values = [{c: (int(re.search('(\\d+)', v).group(1)) if (c != 'NAME') else v) for (c, v) in zip(columns, match(row))} for row in rows]
values = [v for v in values if ('default-pool' not in v['NAME'])]
return values
def master_logs(self, previous=False):
master = self.get_pod('scanner-master')
print(run('kubectl logs pod/{} master {}'.format(master['metadata']['name'], ('--previous' if previous else ''))))
def worker_logs(self, n, previous=False):
workers = [pod for pod in self.get_kube_info('pod')['items'] if (pod['metadata']['labels']['app'] == 'scanner-worker')]
print(run('kubectl logs pod/{} worker {}'.format(workers[n]['metadata']['name'], ('--previous' if previous else ''))))
def trace(self, path, subsample=None, job=None):
self.get_credentials()
sc = self.client()
if (job is None):
log.info('Fetching job ID')
job = max([int(line.split('/')[(- 2)]) for line in sp.check_output('gsutil ls gs://{}/{}/jobs'.format(sc.config.config['storage']['bucket'], sc.config.db_path), shell=True).decode('utf-8').split('\n')[:(- 1)]])
log.info('Writing trace...')
sc.profiler(job, subsample=subsample).write_trace(path)
log.info('Trace written.')
def __enter__(self):
if (not self._no_start):
self.start()
return self
def __exit__(self, *args, **kwargs):
if (not self._no_delete):
self.delete()
def cli(self):
parser = argparse.ArgumentParser()
command = parser.add_subparsers(dest='command')
command.required = True
create = command.add_parser('start', help='Create cluster')
create.add_argument('--no-reset', '-nr', action='store_true', help='Delete current deployments')
create.add_argument('--no-wait', '-nw', action='store_true', help="Don't wait for master")
create.add_argument('--num-workers', '-n', type=int, default=1, help='Initial number of workers')
delete = command.add_parser('delete', help='Delete cluster')
delete.add_argument('--no-prompt', '-np', action='store_true', help="Don't prompt for deletion")
resize = command.add_parser('resize', help='Resize number of nodes in cluster')
resize.add_argument('size', type=int, help='Number of nodes')
command.add_parser('get-credentials', help='Setup kubectl with credentials')
command.add_parser('job-status', help='View status of current running job')
master_logs = command.add_parser('master-logs', help='Get logs of Scanner master')
master_logs.add_argument('--previous', '-p', action='store_true', help='Get logs for previous container')
worker_logs = command.add_parser('worker-logs', help='Get logs of a Scanner worker')
worker_logs.add_argument('n', type=int, help='Index of worker')
worker_logs.add_argument('--previous', '-p', action='store_true', help='Get logs for previous container')
trace = command.add_parser('trace', help='Extract profiler trace')
trace.add_argument('path', help='Path to output trace')
trace.add_argument('--subsample', type=int, help='Number of workers to include in trace')
trace.add_argument('--job', type=int, help='Job ID to extract (default is latest)')
args = parser.parse_args()
if (args.command == 'start'):
self.start(reset=(not args.no_reset), wait=(not args.no_wait))
elif (args.command == 'delete'):
self.delete(prompt=(not args.no_prompt))
elif (args.command == 'resize'):
self.resize(args.size)
elif (args.command == 'get-credentials'):
self.get_credentials()
elif (args.command == 'job-status'):
self.job_status()
elif (args.command == 'master-logs'):
self.master_logs(previous=args.previous)
elif (args.command == 'worker-logs'):
self.worker_logs(args.n, previous=args.previous)
elif (args.command == 'trace'):
self.trace(args.path, subsample=args.subsample, job=args.job) |
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_) |
def custom(tensors_in: list, shape_func, op_name: str, out_dtypes: list, out_names: list=None, params: dict=None):
out_shapes = shape_func(tensors_in)
tensors_out = []
for (i, out_dtype) in enumerate(out_dtypes):
tensor_out = Tensor(out_shapes[i], dtype=out_dtype, name=(out_names[i] if out_names else None))
tensors_out.append(tensor_out)
attrs = {}
attrs['name'] = Attr(op_name, 'string')
dict_array = []
for (key, value) in params.items():
params_new = {}
if isinstance(value, int):
value_new = Attr(value)
elif isinstance(value, bool):
value_new = Attr(value, 'bool')
elif isinstance(value, list):
if all((isinstance(x, int) for x in value)):
value_new = ArrayAttr(value)
elif all((isinstance(x, float) for x in value)):
value_new = ArrayAttr(value, 'float32')
else:
raise ValueError(f'Elements in the list of {key} must be int-only or float-only')
else:
value_new = Attr(value, 'float32')
params_new[key] = value_new
dict_array.append(Attr(params_new, 'dict'))
attrs['params'] = ArrayAttr(dict_array, 'dict')
TpuLang.insert_op('top.Custom', tensors_in, tensors_out, params=attrs)
return tensors_out |
def add_frame(img_in: np.ndarray, color: Tuple[(int, int, int)]) -> np.ndarray:
img = img_in.copy()
w = int(np.round((0.01 * img.shape[1])))
pad_lr = np.tile(np.uint8(color).reshape(1, 1, 3), (img.shape[0], w, 1))
img = np.concatenate([pad_lr, img, pad_lr], axis=1)
pad_tb = np.tile(np.uint8(color).reshape(1, 1, 3), (w, img.shape[1], 1))
img = np.concatenate([pad_tb, img, pad_tb], axis=0)
return img |
def transform():
return torchvision.transforms.Compose([_convert_image_to_rgb, torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) |
def cudnn_LSTM(model, input_blob, initial_states, dim_in, dim_out, scope, recurrent_params=None, input_params=None, num_layers=1, return_params=False):
with core.NameScope(scope):
weight_params = GetLSTMParamNames()['weights']
bias_params = GetLSTMParamNames()['biases']
input_weight_size = (dim_out * dim_in)
upper_layer_input_weight_size = (dim_out * dim_out)
recurrent_weight_size = (dim_out * dim_out)
input_bias_size = dim_out
recurrent_bias_size = dim_out
def init(layer, pname, input_type):
input_weight_size_for_layer = (input_weight_size if (layer == 0) else upper_layer_input_weight_size)
if (pname in weight_params):
sz = (input_weight_size_for_layer if (input_type == 'input') else recurrent_weight_size)
elif (pname in bias_params):
sz = (input_bias_size if (input_type == 'input') else recurrent_bias_size)
else:
assert False, 'unknown parameter type {}'.format(pname)
return model.param_init_net.UniformFill([], 'lstm_init_{}_{}_{}'.format(input_type, pname, layer), shape=[sz])
first_layer_sz = (((input_weight_size + recurrent_weight_size) + input_bias_size) + recurrent_bias_size)
upper_layer_sz = (((upper_layer_input_weight_size + recurrent_weight_size) + input_bias_size) + recurrent_bias_size)
total_sz = (4 * (first_layer_sz + ((num_layers - 1) * upper_layer_sz)))
weights = model.create_param('lstm_weight', shape=[total_sz], initializer=Initializer('UniformFill'), tags=ParameterTags.WEIGHT)
lstm_args = {'hidden_size': dim_out, 'rnn_mode': 'lstm', 'bidirectional': 0, 'dropout': 1.0, 'input_mode': 'linear', 'num_layers': num_layers, 'engine': 'CUDNN'}
param_extract_net = core.Net('lstm_param_extractor')
param_extract_net.AddExternalInputs([input_blob, weights])
param_extract_mapping = {}
for input_type in ['input', 'recurrent']:
param_extract_mapping[input_type] = {}
p = (recurrent_params if (input_type == 'recurrent') else input_params)
if (p is None):
p = {}
for pname in (weight_params + bias_params):
for j in range(0, num_layers):
values = (p[pname] if (pname in p) else init(j, pname, input_type))
model.param_init_net.RecurrentParamSet([input_blob, weights, values], weights, layer=j, input_type=input_type, param_type=pname, **lstm_args)
if (pname not in param_extract_mapping[input_type]):
param_extract_mapping[input_type][pname] = {}
b = param_extract_net.RecurrentParamGet([input_blob, weights], ['lstm_{}_{}_{}'.format(input_type, pname, j)], layer=j, input_type=input_type, param_type=pname, **lstm_args)
param_extract_mapping[input_type][pname][j] = b
(hidden_input_blob, cell_input_blob) = initial_states
(output, hidden_output, cell_output, rnn_scratch, dropout_states) = model.net.Recurrent([input_blob, hidden_input_blob, cell_input_blob, weights], ['lstm_output', 'lstm_hidden_output', 'lstm_cell_output', 'lstm_rnn_scratch', 'lstm_dropout_states'], seed=random.randint(0, 100000), **lstm_args)
model.net.AddExternalOutputs(hidden_output, cell_output, rnn_scratch, dropout_states)
if return_params:
param_extract = (param_extract_net, param_extract_mapping)
return (output, hidden_output, cell_output, param_extract)
else:
return (output, hidden_output, cell_output) |
def LF_negex_definite_negation_left(c):
possible_terms = [x['term'].split(' ') for x in negex.dictionary['definite'] if (x['direction'] == 'forward')]
longest = len(max(possible_terms, key=len))
left_window_length = (longest + 2)
v = negex.is_negated(c, 'definite', 'left', left_window_length)
return ((- 1) if v else 0) |
def secondsToStr(elapsed=None):
if (elapsed is None):
return strftime('%Y-%m-%d %H:%M:%S', localtime())
else:
return str(timedelta(seconds=elapsed)) |
class Sdma(Dma):
def __init__(self, core_id, writer, sheet_name):
super().__init__(core_id, writer)
self.sheet_name = ((sheet_name + '_') + str(core_id))
def load(self, reg_info_file, sdma_layer_map):
super().load(reg_info_file, sdma_layer_map)
new_reg_list = []
for reg_dict in self.reg_list:
if (reg_dict['Engine Id'] == '3'):
new_reg_list.append(reg_dict)
self.reg_list = new_reg_list
return self.chip_arch_dict
def set_style(cls, file_path, core_id, engine_type='SDMA', sheet_color='D0CECE', chip_arch=None, frozen=True):
super().set_style(file_path, core_id, engine_type, sheet_color, chip_arch, frozen=frozen) |
def GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor)]
side_modes = [SideMode.Left, SideMode.Right]
fill_modes = [FillMode.Lower, FillMode.Upper]
diag_types = [DiagType.NonUnit, DiagType.Unit]
math_instructions = [MathInstruction([16, 8, 8], DataType.tf32, DataType.tf32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 8], DataType.f32, DataType.f32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add_fast_f32)]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, data_type, alignment_constraints) |
class PySAGClassifier(BaseClassifier):
def _get_loss(self, loss):
losses = {'modified_huber': ModifiedHuber(), 'smooth_hinge': SmoothHinge(self.gamma), 'squared_hinge': SquaredHinge(1.0), 'log': Log(), 'squared': SquaredLoss()}
return losses[loss]
def _get_penalty(self, penalty):
if isinstance(penalty, str):
penalties = {'l1': L1Penalty(self.beta), 'l2': None}
return penalties[penalty]
else:
return penalty
def __init__(self, eta='auto', alpha=1.0, beta=0.0, loss='smooth_hinge', penalty='l2', gamma=1.0, max_iter=100, random_state=None, callback=None):
self.eta = eta
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.loss = loss
self.penalty = penalty
self.max_iter = max_iter
self.random_state = random_state
self.rng = self._get_random_state()
self.callback = callback
self.is_saga = False
def fit(self, X, y):
self._set_label_transformers(y)
y = np.asfortranarray(self.label_binarizer_.transform(y), dtype=np.float64)
if ((self.eta is None) or (self.eta == 'auto')):
eta = get_auto_step_size(get_dataset(X, order='c'), self.alpha, self.loss, self.is_saga)
else:
eta = self.eta
if ((self.alpha * eta) == 1):
eta *= 0.9
loss = self._get_loss(self.loss)
self.penalty = self._get_penalty(self.penalty)
if ((not self.is_saga) and (self.penalty is not None)):
raise ValueError('PySAGClassifier only accepts l2 penalty. Please use `saga=True` or PySAGAClassifier.')
if self.is_saga:
self.coef_ = _fit_saga(X, y, eta, self.alpha, loss, self.penalty, self.max_iter, self.rng)
else:
self.coef_ = _fit_sag(X, y, eta, self.alpha, loss, self.max_iter, self.rng) |
def load_dataset():
global train_data, dev_data, test_data, trfreq
trace('load train')
for line in open(args.train_file):
(h, r, t) = parse_line(line)
train_data.append((h, r, t))
trfreq[r] += 1
train_data = list(train_data)
for r in trfreq:
trfreq[r] = (args.train_size / (float(trfreq[r]) * len(trfreq)))
trace('load dev')
for line in open(args.dev_file):
(h, r, t, l) = parse_line(line)
if ((h not in glinks) or (t not in glinks)):
continue
dev_data.append((h, r, t, l))
print('dev size:', len(dev_data))
trace('load test')
for line in open(args.test_file):
(h, r, t, l) = parse_line(line)
if ((h not in glinks) or (t not in glinks)):
continue
test_data.append((h, r, t, l))
print('test size:', len(test_data)) |
class Mlp(nn.Module):
def __init__(self, input_size=784, hidden_sizes=None, n_classes=10, bias=True, dropout=False):
super().__init__()
if (hidden_sizes is None):
hidden_sizes = [512, 256]
self.dropout = dropout
self.input_size = input_size
self.hidden_layers = nn.ModuleList([nn.Linear(in_size, out_size, bias=bias) for (in_size, out_size) in zip(([self.input_size] + hidden_sizes[:(- 1)]), hidden_sizes)])
self.output_layer = nn.Linear(hidden_sizes[(- 1)], n_classes, bias=bias)
def forward(self, x):
x = x.view((- 1), self.input_size)
out = x
for layer in self.hidden_layers:
Z = layer(out)
out = F.relu(Z)
if self.dropout:
out = F.dropout(out, p=0.5)
logits = self.output_layer(out)
return logits |
class MobileConfigurationPath(ConfigurationPath):
def __init__(self, mobile: MobileBase, path_points: List[List[float]]):
self._mobile = mobile
self._path_points = path_points
self._drawing_handle = None
self._path_done = False
self.i_path = (- 1)
self.inter_done = True
self._num_joints = mobile.get_joint_count()
self.set_to_start()
if (len(self._path_points) > 2):
self._set_inter_target(0)
def step(self) -> bool:
raise NotImplementedError()
def set_to_start(self) -> None:
start_config = self._path_points[0]
self._mobile.set_2d_pose(start_config[:3])
self._path_done = False
def set_to_end(self) -> None:
final_config = self._path_points[(- 1)]
self._mobile.set_2d_pose(final_config[:3])
def visualize(self) -> None:
if (len(self._path_points) <= 0):
raise RuntimeError("Can't visualise a path with no points.")
tip = self._mobile
self._drawing_handle = sim.simAddDrawingObject(objectType=sim.sim_drawing_lines, size=3, duplicateTolerance=0, parentObjectHandle=(- 1), maxItemCount=99999, ambient_diffuse=[1, 0, 1])
sim.simAddDrawingObjectItem(self._drawing_handle, None)
init_pose = self._mobile.get_2d_pose()
self._mobile.set_2d_pose(self._path_points[0][:3])
prev_point = list(tip.get_position())
for i in range(len(self._path_points)):
points = self._path_points[i]
self._mobile.set_2d_pose(points[:3])
p = list(tip.get_position())
sim.simAddDrawingObjectItem(self._drawing_handle, (prev_point + p))
prev_point = p
self._mobile.set_2d_pose(init_pose[:3])
def clear_visualization(self) -> None:
if (self._drawing_handle is not None):
sim.simAddDrawingObjectItem(self._drawing_handle, None)
def _next_i_path(self):
incr = 0.01
dist_to_next = 0
while (dist_to_next < incr):
self.i_path += 1
if (self.i_path == (len(self._path_points) - 1)):
self.i_path = (len(self._path_points) - 1)
break
dist_to_next += self._path_points[self.i_path][(- 1)]
def _set_inter_target(self, i):
self._mobile.intermediate_target_base.set_position([self._path_points[i][0], self._path_points[i][1], self._mobile.target_z])
self._mobile.intermediate_target_base.set_orientation([0, 0, self._path_points[i][2]]) |
.parametrize('outside_of,expected_types', [(('tests.fixtures.types.outside.Foo',), ('builtins.int', 'builtins.str', 'builtins.bool', 'builtins.float', 'builtins.bytes', 'builtins.complex', 'builtins.list', 'builtins.set', 'builtins.dict', 'builtins.tuple', 'builtins.object')), (('tests.fixtures.types.outside.Bar',), ('tests.fixtures.types.outside.Foo', 'builtins.int', 'builtins.str', 'builtins.bool', 'builtins.float', 'builtins.bytes', 'builtins.complex', 'builtins.list', 'builtins.set', 'builtins.dict', 'builtins.tuple', 'builtins.object')), (('tests.fixtures.types.outside.Bar', 'builtins.complex'), ('tests.fixtures.types.outside.Foo', 'builtins.str', 'builtins.bytes', 'builtins.list', 'builtins.set', 'builtins.dict', 'builtins.tuple', 'builtins.object')), (('builtins.object',), ())])
def test_get_type_outside_of(outside_of, expected_types):
test_cluster = generate_test_cluster('tests.fixtures.types.outside')
tps = test_cluster.type_system
outside_set = OrderedSet((tps.find_type_info(t) for t in outside_of))
assert (set(tps.get_type_outside_of(outside_set)) == {tps.find_type_info(t) for t in expected_types}) |
def main():
fusiongraph = FusionGraphModel(graph, gpu_id, config['graph'], config['data'], config['train']['M'], config['train']['d'], config['train']['bn_decay'])
lightning_data = LightningData(train_set, val_set, test_set)
lightning_model = LightningModel(scaler, fusiongraph)
trainer = Trainer(logger=wandb_logger, gpus=[gpu_id], max_epochs=config['train']['epoch'])
trainer.fit(lightning_model, lightning_data)
trainer.test(lightning_model, datamodule=lightning_data)
print('Graph USE', config['graph']['use'])
print('Data', config['data']) |
def array_ufunc(ufunc, method: str, inputs, kwargs: dict[(str, Any)]):
if ((method != '__call__') or (len(inputs) == 0) or ('out' in kwargs)):
return NotImplemented
behavior = behavior_of(*inputs)
attrs = attrs_of(*inputs)
backend = backend_of(*inputs, coerce_to_common=True)
inputs = _array_ufunc_custom_cast(inputs, behavior, backend)
def action(inputs, **ignore):
contents = [x for x in inputs if isinstance(x, ak.contents.Content)]
assert (len(contents) >= 1)
signature = _array_ufunc_signature(ufunc, inputs)
custom = find_ufunc(behavior, signature)
if (custom is not None):
return _array_ufunc_adjust(custom, inputs, kwargs, behavior)
if any(((x.is_indexed and (x.parameter('__array__') == 'categorical')) for x in contents)):
out = _array_ufunc_categorical(ufunc, method, inputs, kwargs, behavior)
if (out is not None):
return out
if any(((x.is_list and (x.parameter('__array__') in ('string', 'bytestring'))) for x in contents)):
out = _array_ufunc_string_likes(ufunc, method, inputs, kwargs, behavior)
if (out is not None):
return out
if all(((x.is_list and (x.parameter('__array__') in ('string', 'bytestring'))) for x in contents)):
raise TypeError(f'{type(ufunc).__module__}.{ufunc.__name__} is not implemented for string types. To register an implementation, add a name to these string(s) and register a behavior overload')
if (ufunc is numpy.matmul):
raise NotImplementedError('matrix multiplication (`` or `np.matmul`) is not yet implemented for Awkward Arrays')
for x in contents:
apply_ufunc = find_ufunc_generic(ufunc, x, behavior)
if (apply_ufunc is not None):
out = _array_ufunc_adjust_apply(apply_ufunc, ufunc, method, inputs, kwargs, behavior)
if (out is not None):
return out
if all(((isinstance(x, NumpyArray) or (not isinstance(x, ak.contents.Content))) for x in inputs)):
parameters = functools.reduce(parameters_intersect, (c._parameters for c in contents))
input_args = [(x.data if isinstance(x, NumpyArray) else x) for x in inputs]
result = backend.nplike.apply_ufunc(ufunc, method, input_args, kwargs)
if isinstance(result, tuple):
return tuple((NumpyArray(x, backend=backend, parameters=parameters) for x in result))
else:
return (NumpyArray(result, backend=backend, parameters=parameters),)
if all((((x.parameter('__list__') is not None) or (x.parameter('__record__') is not None)) for x in contents)):
error_message = []
for x in inputs:
if isinstance(x, ak.contents.Content):
if (x.parameter('__list__') is not None):
error_message.append(x.parameter('__list__'))
elif (x.parameter('__record__') is not None):
error_message.append(x.parameter('__record__'))
else:
error_message.append(type(x).__name__)
else:
error_message.append(type(x).__name__)
raise TypeError('no {}.{} overloads for custom types: {}'.format(type(ufunc).__module__, ufunc.__name__, ', '.join(error_message)))
return None
out = ak._broadcasting.broadcast_and_apply(inputs, action, allow_records=False, function_name=ufunc.__name__)
if (len(out) == 1):
return wrap_layout(out[0], behavior=behavior, attrs=attrs)
else:
return tuple((wrap_layout(o, behavior=behavior, attrs=attrs) for o in out)) |
class InsertUsbInComputer(Task):
def init_task(self) -> None:
success_sensor = ProximitySensor('success')
usb = Shape('usb')
usb_tip = Shape('tip')
self.register_graspable_objects([usb])
self.register_success_conditions([DetectedCondition(usb_tip, success_sensor)])
def init_episode(self, index: int) -> List[str]:
return ['insert usb in computer', 'pick up the usb and put it in the computer', 'slide the usb into the usb slot', 'insert the usb stick into the usb port']
def variation_count(self) -> int:
return 1
def base_rotation_bounds(self) -> Tuple[(List[float], List[float])]:
return ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0]) |
class LogitTransform(nn.Module):
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _sigmoid(x, logpx, self.alpha)
else:
return _logit(x, logpx, self.alpha) |
def color_weisfeiler_lehman(adjacency: Union[(sparse.csr_matrix, np.ndarray)], max_iter: int=(- 1)) -> np.ndarray:
adjacency = check_format(adjacency, allow_empty=True)
check_square(adjacency)
n_nodes = adjacency.shape[0]
if ((max_iter < 0) or (max_iter > n_nodes)):
max_iter = n_nodes
labels = np.zeros(n_nodes, dtype=np.int32)
powers = (((- np.pi) / 3.15) ** np.arange(n_nodes, dtype=np.double))
indptr = adjacency.indptr
indices = adjacency.indices
(labels, _) = weisfeiler_lehman_coloring(indptr, indices, labels, powers, max_iter)
return np.array(labels) |
class Leon(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 1.2)] * self.N), ([1.2] * self.N)))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((100.0 * ((x[1] - (x[0] ** 2.0)) ** 2.0)) + ((1 - x[0]) ** 2.0)) |
class _OSA_module(nn.Module):
def __init__(self, in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE=False, identity=False, depthwise=False, dcn_config={}):
super(_OSA_module, self).__init__()
self.identity = identity
self.depthwise = depthwise
self.isReduced = False
self.layers = nn.ModuleList()
in_channel = in_ch
if (self.depthwise and (in_channel != stage_ch)):
self.isReduced = True
self.conv_reduction = nn.Sequential(OrderedDict(conv1x1(in_channel, stage_ch, '{}_reduction'.format(module_name), '0')))
with_dcn = dcn_config.get('stage_with_dcn', False)
for i in range(layer_per_block):
if self.depthwise:
self.layers.append(nn.Sequential(OrderedDict(dw_conv3x3(stage_ch, stage_ch, module_name, i))))
elif with_dcn:
deformable_groups = dcn_config.get('deformable_groups', 1)
with_modulated_dcn = dcn_config.get('with_modulated_dcn', False)
self.layers.append(DFConv3x3(in_channel, stage_ch, module_name, i, with_modulated_dcn=with_modulated_dcn, deformable_groups=deformable_groups))
else:
self.layers.append(nn.Sequential(OrderedDict(conv3x3(in_channel, stage_ch, module_name, i))))
in_channel = stage_ch
in_channel = (in_ch + (layer_per_block * stage_ch))
self.concat = nn.Sequential(OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat')))
self.ese = eSEModule(concat_ch)
def forward(self, x):
identity_feat = x
output = []
output.append(x)
if (self.depthwise and self.isReduced):
x = self.conv_reduction(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
xt = self.concat(x)
xt = self.ese(xt)
if self.identity:
xt = (xt + identity_feat)
return xt |
def finalize_compiler_options(cmd):
dist = cmd.distribution
defaults = {'fcompiler': 'gnu95', 'f2py': default_f2py(), 'compiler': None, 'f77exec': None, 'f90exec': None}
for option in defaults:
if (getattr(cmd, option) == None):
for c in dist.commands:
other_cmd = dist.get_command_obj(c)
if ((other_cmd == cmd) or (not hasattr(other_cmd, option))):
continue
if (getattr(other_cmd, option) != None):
setattr(cmd, option, getattr(other_cmd, option))
break
if (getattr(cmd, option) == None):
setattr(cmd, option, defaults[option])
if (not (cmd.fcompiler in ('gnu95', 'none', 'None'))):
raise OptionError(('--fcompiler={0} unknown'.format(cmd.fcompiler) + ', options: gnu95, None'))
if ((cmd.compiler == None) and (sys.platform == 'win32')):
cmd.compiler = 'mingw32'
if isinstance(cmd.f2py, list):
return
if ((sys.platform == 'win32') and isinstance(cmd.f2py, str) and (not is_win_exec(cmd.f2py))):
f2py = cmd.f2py
if (not os.path.isfile(f2py)):
f2pydir = next((d for d in os.environ['PATH'].split(os.pathsep) if os.path.isfile(os.path.join(d, f2py))), None)
if f2pydir:
f2py = os.path.join(f2pydir, f2py)
if (not is_win_exec(f2py)):
if is_win_exec(sys.executable, f2py):
cmd.f2py = [sys.executable, f2py]
else:
raise RuntimeError('f2py {0} found but not executable'.format(cmd.f2py))
else:
cmd.f2py = [f2py]
elif is_win_exec(sys.executable, f2py):
cmd.f2py = [sys.executable, f2py]
else:
raise RuntimeError('f2py {0} not found and not executable'.format(cmd.f2py))
elif is_win_exec(sys.executable, f2py):
cmd.f2py = [sys.executable, f2py]
else:
raise RuntimeError('f2py {0} exists but not executable'.format(cmd.f2py))
else:
cmd.f2py = [cmd.f2py] |
.operations('success')
.openapi_version('3.0')
def test_server_timeout(cli, schema_url, service, mocker):
mocker.patch('schemathesis.cli.output.default.wait_for_report_handler', return_value=events.Timeout())
result = cli.run(schema_url, 'my-api', f'--schemathesis-io-token={service.token}', f'--schemathesis-io-url={service.base_url}', '--report')
assert (result.exit_code == ExitCode.OK), result.stdout
lines = get_stdout_lines(result.stdout)
assert (lines[17] == 'Compressed report size: 1 KB')
assert (lines[18] == f'Uploading reports to {service.base_url} ...')
assert (lines[19] == 'Upload: TIMEOUT') |
class Agent():
def __init__(self, world_size):
self.ob_rrefs = []
self.agent_rref = RRef(self)
self.rewards = {}
self.saved_log_probs = {}
self.policy = Policy()
self.optimizer = optim.Adam(self.policy.parameters(), lr=0.01)
self.eps = np.finfo(np.float32).eps.item()
self.running_reward = 0
self.reward_threshold = DummyEnv().reward_threshold
for ob_rank in range(1, world_size):
ob_info = rpc.get_worker_info(worker_name(ob_rank))
self.ob_rrefs.append(remote(ob_info, Observer))
self.rewards[ob_info.id] = []
self.saved_log_probs[ob_info.id] = []
def select_action(self, ob_id, state):
probs = self.policy(state.unsqueeze(0))
m = Categorical(probs)
action = m.sample()
self.saved_log_probs[ob_id].append(m.log_prob(action))
return action.item()
def report_reward(self, ob_id, reward):
self.rewards[ob_id].append(reward)
def run_episode(self, n_steps=0):
futs = []
for ob_rref in self.ob_rrefs:
futs.append(rpc_async(ob_rref.owner(), _call_method, args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps)))
for fut in futs:
fut.wait()
def finish_episode(self):
(R, probs, rewards) = (0, [], [])
for ob_id in self.rewards:
probs.extend(self.saved_log_probs[ob_id])
rewards.extend(self.rewards[ob_id])
min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards])
self.running_reward = ((0.05 * min_reward) + ((1 - 0.05) * self.running_reward))
for ob_id in self.rewards:
self.rewards[ob_id] = []
self.saved_log_probs[ob_id] = []
(policy_loss, returns) = ([], [])
for r in rewards[::(- 1)]:
R = (r + (GAMMA * R))
returns.insert(0, R)
returns = torch.tensor(returns)
returns = ((returns - returns.mean()) / (returns.std() + self.eps))
for (log_prob, R) in zip(probs, returns):
policy_loss.append(((- log_prob) * R))
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
return min_reward |
def compute_scores(Y, Yhat):
Y = Y.drop(Y.index[0:60])
Yhat = Yhat.drop(Yhat.index[0:60])
return [accuracy_score(Y, Yhat), f1_score(Y, Yhat), precision_score(Y, Yhat), recall_score(Y, Yhat)] |
def test_ResourceReservationProtocol_schedule():
tl = Timeline()
n1 = FakeNode('n1', tl)
for _ in range(1000):
s_time = random.randint(1000)
memo_size = (random.randint(25) + 1)
reservation = Reservation('', '', s_time, ((s_time + 1) + random.randint(200)), memo_size, 0.9)
if n1.rsvp.schedule(reservation):
counter = 0
for card in n1.rsvp.timecards:
if (reservation in card.reservations):
counter += 1
assert (counter == (memo_size * 2))
else:
counter = 0
for card in n1.rsvp.timecards:
if (reservation in card.reservations):
counter += 1
assert (counter == 0)
n2 = FakeNode('n2', tl)
for _ in range(1000):
s_time = random.randint(1000)
memo_size = (random.randint(25) + 1)
reservation = Reservation('n2', '', s_time, ((s_time + 1) + random.randint(200)), memo_size, 0.9)
if n2.rsvp.schedule(reservation):
counter = 0
for card in n2.rsvp.timecards:
if (reservation in card.reservations):
counter += 1
assert (counter == memo_size)
else:
counter = 0
for card in n2.rsvp.timecards:
if (reservation in card.reservations):
counter += 1
assert (counter == 0) |
def compute_files(user1, user2, file_list, dir_pre, start_num):
match_total = 0
test_total = 0
gold_total = 0
for fi in file_list:
file1 = ((((dir_pre + user1) + '/') + fi) + '.txt')
file2 = ((((dir_pre + user2) + '/') + fi) + '.txt')
if (not os.path.exists(file1)):
print('Error: ', file1, 'does not exist', file=ERROR_LOG)
return (- 1.0)
if (not os.path.exists(file2)):
print('Error: ', file2, 'does not exist', file=ERROR_LOG)
return (- 1.0)
try:
file1_h = open(file1, 'r')
file2_h = open(file2, 'r')
except IOError:
print('Cannot open the files', file1, file2, file=ERROR_LOG)
break
cur_amr1 = amr.AMR.get_amr_line(file1_h)
cur_amr2 = amr.AMR.get_amr_line(file2_h)
if (cur_amr1 == ''):
print('AMR 1 is empty', file=ERROR_LOG)
continue
if (cur_amr2 == ''):
print('AMR 2 is empty', file=ERROR_LOG)
continue
amr1 = amr.AMR.parse_AMR_line(cur_amr1)
amr2 = amr.AMR.parse_AMR_line(cur_amr2)
test_label = 'a'
gold_label = 'b'
amr1.rename_node(test_label)
amr2.rename_node(gold_label)
(test_inst, test_rel1, test_rel2) = amr1.get_triples()
(gold_inst, gold_rel1, gold_rel2) = amr2.get_triples()
if verbose:
print('Instance triples of file 1:', len(test_inst), file=DEBUG_LOG)
print(test_inst, file=DEBUG_LOG)
print('Attribute triples of file 1:', len(test_rel1), file=DEBUG_LOG)
print(test_rel1, file=DEBUG_LOG)
print('Relation triples of file 1:', len(test_rel2), file=DEBUG_LOG)
print(test_rel2, file=DEBUG_LOG)
print('Instance triples of file 2:', len(gold_inst), file=DEBUG_LOG)
print(gold_inst, file=DEBUG_LOG)
print('Attribute triples of file 2:', len(gold_rel1), file=DEBUG_LOG)
print(gold_rel1, file=DEBUG_LOG)
print('Relation triples of file 2:', len(gold_rel2), file=DEBUG_LOG)
print(gold_rel2, file=DEBUG_LOG)
(best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2, gold_inst, gold_rel1, gold_rel2, test_label, gold_label)
if verbose:
print('best match number', best_match_num, file=DEBUG_LOG)
print('Best Match:', smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG)
match_total += best_match_num
test_total += ((len(test_inst) + len(test_rel1)) + len(test_rel2))
gold_total += ((len(gold_inst) + len(gold_rel1)) + len(gold_rel2))
smatch.match_triple_dict.clear()
(precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total)
return ('%.2f' % f_score) |
def radixpass(a, b, r, s, n, k):
c = array('i', ([0] * (k + 1)))
for i in range(n):
c[r[(a[i] + s)]] += 1
somme = 0
for i in range((k + 1)):
(freq, c[i]) = (c[i], somme)
somme += freq
for i in range(n):
b[c[r[(a[i] + s)]]] = a[i]
c[r[(a[i] + s)]] += 1 |
class XLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.cache_moses_punct_normalizer = dict()
self.cache_moses_tokenizer = dict()
self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja'])
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if ((lang2id is not None) and (id2lang is not None)):
assert (len(lang2id) == len(id2lang))
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
self.encoder = json.load(open(vocab_file, encoding='utf-8'))
self.decoder = {v: k for (k, v) in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def moses_punct_norm(self, text, lang):
if (lang not in self.cache_moses_punct_normalizer):
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if (lang not in self.cache_moses_tokenizer):
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if (self.ja_word_tokenizer is None):
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~')))
except (AttributeError, ImportError) as e:
logger.error("Make sure you install KyTea ( and it's python wrapper ( with the following steps")
logger.error('1. git clone :neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise e
return list(self.ja_word_tokenizer.getWS(text))
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text, lang='en', bypass_tokenizer=False):
if (lang and self.lang2id and (lang not in self.lang2id)):
logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.')
if bypass_tokenizer:
text = text.split()
elif (lang not in self.lang_with_custom_tokenizer):
text = self.moses_pipeline(text, lang=lang)
if (lang == 'ro'):
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif (lang == 'th'):
text = self.moses_pipeline(text, lang=lang)
try:
if ('pythainlp' not in sys.modules):
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules['pythainlp'].word_tokenize
except (AttributeError, ImportError) as e:
logger.error('Make sure you install PyThaiNLP ( with the following steps')
logger.error('1. pip install pythainlp')
raise e
text = th_word_tokenize(text)
elif (lang == 'zh'):
try:
if ('jieba' not in sys.modules):
import jieba
else:
jieba = sys.modules['jieba']
except (AttributeError, ImportError) as e:
logger.error('Make sure you install Jieba ( with the following steps')
logger.error('1. pip install jieba')
raise e
text = ' '.join(jieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif (lang == 'ja'):
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError('It should not reach here')
if (self.do_lowercase_and_remove_accent and (not bypass_tokenizer)):
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sequence(self, token_ids):
return (([self.cls_token_id] + token_ids) + [self.sep_token_id])
def add_special_tokens_sequence_pair(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + u'\n'))
index += 1
return (vocab_file, merge_file) |
class RandomAgent(Expert):
def __init__(self, action_scale=0.1, action_space_dim=2):
self.action_scale = action_scale
self.action_space_dim = action_space_dim
self.counter = 0
def get_action(self, obs):
action = (np.random.uniform((- 1), 1, self.action_space_dim) * self.action_scale)
self.counter += 1
reset = ((self.counter % 25) == 0)
accept = reset
valid = True
return (action, valid, reset, accept) |
class LPDictionary(LPAbstractDictionary):
def __init__(self, A, b, c, objective_value, basic_variables, nonbasic_variables, objective_name):
super().__init__()
A = copy(A)
b = copy(b)
c = copy(c)
B = vector(basic_variables)
N = vector(nonbasic_variables)
self._AbcvBNz = [A, b, c, objective_value, B, N, SR(objective_name)]
def random_element(m, n, bound=5, special_probability=0.2):
A = random_matrix(ZZ, m, n, x=(- bound), y=bound).change_ring(QQ)
if (special_probability < random()):
b = random_vector(ZZ, m, x=0, y=bound).change_ring(QQ)
else:
b = random_vector(ZZ, m, x=(- bound), y=bound).change_ring(QQ)
if (special_probability < random()):
c = random_vector(ZZ, n, x=(- bound), y=bound).change_ring(QQ)
else:
c = random_vector(ZZ, n, x=(- bound), y=0).change_ring(QQ)
x_N = list(PolynomialRing(QQ, 'x', ((m + n) + 1), order='neglex').gens())
x_N.pop(0)
x_B = []
for i in range(m):
x_B.append(x_N.pop(randint(0, (((n + m) - i) - 1))))
return LPDictionary(A, b, c, randint((- bound), bound), x_B, x_N, 'z')
def __eq__(self, other):
return (isinstance(other, LPDictionary) and (self._AbcvBNz == other._AbcvBNz))
def _latex_(self):
(A, b, c, v, B, N, z) = self._AbcvBNz
lines = []
lines.append('\\renewcommand{\\arraystretch}{1.5} %notruncate')
if generate_real_LaTeX:
lines[(- 1)] += ' \\setlength{\\arraycolsep}{0.125em}'
relations = [(_latex_product((- Ai), N, head=[xi, '=', bi], drop_plus=False, allow_empty=True) + '\\\\') for (xi, bi, Ai) in zip(B, b, A.rows())]
objective = (_latex_product(c, N, head=[z, '=', v], drop_plus=False, allow_empty=True) + '\\\\')
if (style() == 'UAlberta'):
lines.append(('\\begin{array}{|rcr%s|}' % ('cr' * len(N))))
lines.append('\\hline')
lines.extend(relations)
lines.append('\\hline')
lines.append(objective)
lines.append('\\hline')
if (style() == 'Vanderbei'):
lines.append(('\\begin{array}{rcr%s}' % ('cr' * len(N))))
lines.append(objective)
lines.append('\\hline')
lines.extend(relations)
lines.append('\\end{array}')
latex.add_package_to_preamble_if_available('color')
if (self._entering is not None):
e = ((2 * tuple(N).index(self._entering)) + 4)
for (i, lin) in enumerate(lines):
lin = lin[:(- 2)].split('&')
if (len(lin) > 1):
lin[e] = ('\\color{green}{%s}' % (lin[e],))
lines[i] = ('&'.join(lin) + '\\\\')
if (self._leaving is not None):
l = tuple(B).index(self._leaving)
if (style() == 'UAlberta'):
l += 3
if (style() == 'Vanderbei'):
l += 4
lin = lines[l][:(- 2)].split('&')
for (i, term) in enumerate(lin):
lin[i] = ('\\color{red}{%s}' % (term,))
lin = ('&'.join(lin) + '\\\\')
lin = lin.replace('\\color{red}{\\color{green}{', '\\color{blue}{{')
lines[l] = lin
return '\n'.join(lines)
def add_row(self, nonbasic_coefficients, constant, basic_variable=None):
(A, b, c, v, B, N, z) = self._AbcvBNz
m = len(B)
n = len(N)
BR = self.base_ring()
A = A.stack(vector(BR, n, nonbasic_coefficients))
b = vector(BR, (m + 1), (tuple(b) + (constant,)))
if (basic_variable is None):
basic_variable = default_variable_name('primal slack')
if (style() == 'UAlberta'):
index = ((n + m) + 1)
elif (style() == 'Vanderbei'):
index = (m + 1)
basic_variable = '{}{:d}'.format(basic_variable, index)
if (not isinstance(basic_variable, str)):
basic_variable = str(basic_variable)
R = PolynomialRing(BR, (list(B.base_ring().variable_names()) + [basic_variable]), order='neglex')
B = (list(B) + [basic_variable])
B = map(R, B)
N = map(R, N)
return LPDictionary(A, b, c, v, B, N, z)
def basic_variables(self):
return self._AbcvBNz[4]
def column_coefficients(self, v):
if (v is not None):
v = variable(self.coordinate_ring(), v)
if (v not in self.nonbasic_variables()):
raise ValueError('variable must be nonbasic')
k = tuple(self.nonbasic_variables()).index(v)
return self._AbcvBNz[0].column(k)
def constant_terms(self):
return self._AbcvBNz[1]
def nonbasic_variables(self):
return self._AbcvBNz[5]
def objective_coefficients(self):
return self._AbcvBNz[2]
def objective_name(self):
return self._AbcvBNz[6]
def objective_value(self):
return self._AbcvBNz[3]
def row_coefficients(self, v):
if (v is not None):
v = variable(self.coordinate_ring(), v)
if (v not in self.basic_variables()):
raise ValueError('variable must be basic')
i = tuple(self.basic_variables()).index(v)
return self._AbcvBNz[0][i]
def update(self):
(A, b, c, v, B, N, z) = self._AbcvBNz
entering = self._entering
if (entering is None):
raise ValueError('entering variable must be set before updating')
leaving = self._leaving
if (leaving is None):
raise ValueError('leaving variable must be set before updating')
l = tuple(B).index(leaving)
e = tuple(N).index(entering)
Ale = A[(l, e)]
if (Ale == 0):
raise ValueError('incompatible choice of entering and leaving variables')
B[l] = entering
N[e] = leaving
b[l] /= Ale
A[l] /= Ale
A[(l, e)] = (1 / Ale)
for i in range(A.nrows()):
if (i != l):
Aie = A[(i, e)]
A[(i, e)] = 0
b[i] -= (Aie * b[l])
A[i] -= (Aie * A[l])
ce = c[e]
c[e] = 0
self._AbcvBNz[2] = (c - (ce * A[l]))
self._AbcvBNz[3] += (ce * b[l])
self._entering = None
self._leaving = None |
def test_detect_first():
faces = RetinaFace.extract_faces(img_path='tests/dataset/img11.jpg')
num_black_pixels = np.sum(np.all((faces[0] == 0), axis=2))
assert (num_black_pixels > THRESHOLD)
logger.info(' Disabled align_first test for single face photo done') |
class TestL2XText(unittest.TestCase):
def setUp(self) -> None:
categories = ['alt.atheism', 'soc.religion.christian']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
self.newsgroups_test = newsgroups_test
self.x_train = Text(newsgroups_train.data)
self.y_train = newsgroups_train.target
self.x_test = Text(newsgroups_test.data)
self.y_test = newsgroups_test.target
self.class_names = ['atheism', 'christian']
self.transform = Tfidf().fit(self.x_train)
np.random.seed(1)
train_vectors = self.transform.transform(self.x_train)
test_vectors = self.transform.transform(self.x_test)
self.model = sklearn.ensemble.RandomForestClassifier(n_estimators=500)
self.model.fit(train_vectors, self.y_train)
self.predict_function = (lambda x: self.model.predict_proba(self.transform.transform(x)))
predictions = self.model.predict(test_vectors)
print('Test accuracy: {}'.format(sklearn.metrics.f1_score(self.y_test, predictions, average='binary')))
def test_explain(self):
idx = 83
explainer = L2XText(training_data=self.x_train, predict_function=self.predict_function)
explanations = explainer.explain(self.x_test[idx:(idx + 9)])
explanations.plot(class_names=self.class_names, max_num_subplots=9) |
def valid_aggregation(state: Dict) -> bool:
aggr1 = json.loads(state['aggr1'])
aggr2 = json.loads(state['aggr2'])
current = json.loads(state['current'])
if ((set(aggr1.keys()) | set(aggr2.keys())) != set(current.keys())):
return False
for country in current.keys():
aggr1_freq = (aggr1[country] if (country in aggr1.keys()) else 0)
aggr2_freq = (aggr2[country] if (country in aggr2.keys()) else 0)
if ((aggr1_freq + aggr2_freq) != current[country]):
return False
return True |
def default_loader(filename):
if filename.endswith('.npy'):
return numpy.load(filename).view(ndarray)
elif filename.endswith('.npz'):
return numpy.load(filename)
else:
return tv_default_loader(filename) |
class MixtureSameFamily(Distribution):
arg_constraints = {}
has_rsample = False
def __init__(self, mixture_distribution, component_distribution, validate_args=None):
self._mixture_distribution = mixture_distribution
self._component_distribution = component_distribution
if (not isinstance(self._mixture_distribution, Categorical)):
raise ValueError(' The Mixture distribution needs to be an instance of torch.distribtutions.Categorical')
if (not isinstance(self._component_distribution, Distribution)):
raise ValueError('The Component distribution need to be an instance of torch.distributions.Distribution')
mdbs = self._mixture_distribution.batch_shape
cdbs = self._component_distribution.batch_shape[:(- 1)]
for (size1, size2) in zip(reversed(mdbs), reversed(cdbs)):
if ((size1 != 1) and (size2 != 1) and (size1 != size2)):
raise ValueError('`mixture_distribution.batch_shape` ({0}) is not compatible with `component_distribution.batch_shape`({1})'.format(mdbs, cdbs))
km = self._mixture_distribution.logits.shape[(- 1)]
kc = self._component_distribution.batch_shape[(- 1)]
if ((km is not None) and (kc is not None) and (km != kc)):
raise ValueError('`mixture_distribution component` ({0}) does not equal `component_distribution.batch_shape[-1]` ({1})'.format(km, kc))
self._num_component = km
event_shape = self._component_distribution.event_shape
self._event_ndims = len(event_shape)
super(MixtureSameFamily, self).__init__(batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
batch_shape = torch.Size(batch_shape)
batch_shape_comp = (batch_shape + (self._num_component,))
new = self._get_checked_instance(MixtureSameFamily, _instance)
new._component_distribution = self._component_distribution.expand(batch_shape_comp)
new._mixture_distribution = self._mixture_distribution.expand(batch_shape)
new._num_component = self._num_component
new._event_ndims = self._event_ndims
event_shape = new._component_distribution.event_shape
super(MixtureSameFamily, new).__init__(batch_shape=batch_shape, event_shape=event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
_property
def support(self):
return self._component_distribution.support
def mixture_distribution(self):
return self._mixture_distribution
def component_distribution(self):
return self._component_distribution
def mean(self):
probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
return torch.sum((probs * self.component_distribution.mean), dim=((- 1) - self._event_ndims))
def variance(self):
probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
mean_cond_var = torch.sum((probs * self.component_distribution.variance), dim=((- 1) - self._event_ndims))
var_cond_mean = torch.sum((probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0)), dim=((- 1) - self._event_ndims))
return (mean_cond_var + var_cond_mean)
def cdf(self, x):
x = self._pad(x)
cdf_x = self.component_distribution.cdf(x)
mix_prob = self.mixture_distribution.probs
return torch.sum((cdf_x * mix_prob), dim=(- 1))
def log_prob(self, x):
x = self._pad(x)
log_prob_x = self.component_distribution.log_prob(x)
log_mix_prob = torch.log_softmax(self.mixture_distribution.logits, dim=(- 1))
return torch.logsumexp((log_prob_x + log_mix_prob), dim=(- 1))
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
sample_len = len(sample_shape)
batch_len = len(self.batch_shape)
gather_dim = (sample_len + batch_len)
es = self.event_shape
mix_sample = self.mixture_distribution.sample(sample_shape)
mix_shape = mix_sample.shape
comp_samples = self.component_distribution.sample(sample_shape)
mix_sample_r = mix_sample.reshape((mix_shape + torch.Size(([1] * (len(es) + 1)))))
mix_sample_r = mix_sample_r.repeat(((torch.Size(([1] * len(mix_shape))) + torch.Size([1])) + es))
samples = torch.gather(comp_samples, gather_dim, mix_sample_r)
return samples.squeeze(gather_dim)
def _pad(self, x):
return x.unsqueeze(((- 1) - self._event_ndims))
def _pad_mixture_dimensions(self, x):
dist_batch_ndims = self.batch_shape.numel()
cat_batch_ndims = self.mixture_distribution.batch_shape.numel()
pad_ndims = (0 if (cat_batch_ndims == 1) else (dist_batch_ndims - cat_batch_ndims))
xs = x.shape
x = x.reshape((((xs[:(- 1)] + torch.Size((pad_ndims * [1]))) + xs[(- 1):]) + torch.Size((self._event_ndims * [1]))))
return x
def __repr__(self):
args_string = '\n {},\n {}'.format(self.mixture_distribution, self.component_distribution)
return ((('MixtureSameFamily' + '(') + args_string) + ')') |
class Mish_ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(Mish_ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
self.mish = Mish()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.mish(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def pre_caption(caption, max_words=50):
caption = re.sub('([.!\\"()*#:;~])', ' ', caption.lower())
caption = re.sub('\\s{2,}', ' ', caption)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
caption_words = caption.split(' ')
if (len(caption_words) > max_words):
caption = ' '.join(caption_words[:max_words])
return caption |
class GapAware(GapAwareBase):
def __init__(self, optimizer, big_gamma=0.999, epsilon=1e-08, from_grad=True):
super().__init__(optimizer)
self.big_gamma = big_gamma
self.running_avg_step = init_running_avg_step(optimizer)
self.epsilon = epsilon
for pg in self.optimizer.param_groups:
for p in pg['params']:
if ('momentum_buffer' not in self.optimizer.state[p]):
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
def update_running_avg(self):
opt_s = self.optimizer.state
ra = self.running_avg_step
bg = self.big_gamma
with torch.no_grad():
for pg in self.optimizer.param_groups:
if (pg['momentum'] != 0):
for p in pg['params']:
ra[id(p)] = ((bg * ra[id(p)]) + ((1 - bg) * (opt_s[p]['momentum_buffer'] ** 2)))
else:
for p in pg['params']:
ra[id(p)] = ((bg * ra[id(p)]) + ((1 - bg) * (p.grad ** 2)))
def apply_from_grad(self):
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for pg in self.optimizer.param_groups:
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for p in pg['params']:
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
penalty = (1 + ((pg['lr'] * p.grad.abs()) / avg_steps_needed))
p.grad /= penalty
p.grad += p.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_theta(self, real_theta):
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad /= penalty
p.grad += rp.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_stashed(self, stashed_theta):
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad /= penalty
p.grad += p.mul((weight_decay * ((1 - penalty) / penalty))) |
def skyline_input_provider(batch_size=64):
vocab_size = 32000
src_len = 25
tgt_len = 25
device = torch.device('cuda')
src = torch.randint(low=0, high=vocab_size, size=(src_len, batch_size), dtype=torch.int64, device=device)
tgt = torch.randint(low=0, high=vocab_size, size=(tgt_len, batch_size), dtype=torch.int64, device=device)
src_len_tensor = torch.tensor(([src_len] * batch_size), dtype=torch.int64, device=device)
tgt_len_tensor = torch.tensor(([tgt_len] * batch_size), dtype=torch.int64, device=device)
return (src, src_len_tensor, tgt, tgt_len_tensor) |
def _convert_config(config):
config_list = []
for (k, v) in config.items():
if (v.lower() == 'true'):
config_list.append(('--' + k))
elif (v.lower() != 'false'):
config_list.extend(([('--' + k)] + v.split(' ')))
return config_list |
class TSStr(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TSStr_swiginit(self, _snap.new_TSStr(*args))
__swig_destroy__ = _snap.delete_TSStr
def CStr(self, *args):
return _snap.TSStr_CStr(self, *args)
def Empty(self):
return _snap.TSStr_Empty(self)
def Len(self):
return _snap.TSStr_Len(self) |
_model
def tf_efficientnet_b8_ap(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model |
def roi_sampling(x, bbx, idx, roi_size, interpolation='bilinear', padding='border', valid_mask=False):
return ROISampling.apply(x, bbx, idx, roi_size, interpolation, padding, valid_mask) |
def test_combine(tmp_path):
SHARDS = ('train', 'dev', 'test')
for (s_num, shard) in enumerate(SHARDS):
t1_json = (tmp_path / ('en_t1.%s.json' % shard))
write_temp_file(t1_json, '\n\n'.join(([EN_TRAIN_BIO] * (s_num + 1))))
t2_json = (tmp_path / ('en_t2.%s.json' % shard))
write_temp_file(t2_json, '\n\n'.join(([EN_DEV_BIO] * (s_num + 1))))
args = ['--output_dataset', 'en_c', 'en_t1', 'en_t2', '--input_dir', str(tmp_path), '--output_dir', str(tmp_path)]
combine_ner_datasets.main(args)
for (s_num, shard) in enumerate(SHARDS):
filename = (tmp_path / ('en_c.%s.json' % shard))
assert os.path.exists(filename)
with open(filename, encoding='utf-8') as fin:
doc = Document(json.load(fin))
assert (len(doc.sentences) == ((s_num + 1) * 3)) |
class bmodel_inference(common_inference):
def __init__(self, args):
super().__init__(args)
self.args = args
pyruntime = 'pyruntime_'
self.first = False
self.is_cv18xx = False
if self.args.model_file.endswith('.bmodel'):
pyruntime = (pyruntime + 'bm')
chip = get_chip_from_model(self.args.model_file)
lib_so = 'libcmodel_1684x.so'
if ((chip == 'BM1688') or (chip == 'CV186X')):
lib_so = 'libcmodel_1688.so'
elif (chip == 'BM1684'):
lib_so = 'libcmodel_1684.so'
elif (chip == 'SG2260'):
lib_so = 'libcmodel_sg2260.so'
elif (chip == 'MARS3'):
lib_so = 'libcmodel_mars3.so'
cmd = 'ln -sf $TPUC_ROOT/lib/{} $TPUC_ROOT/lib/libcmodel.so'.format(lib_so)
os.system(cmd)
elif self.args.model_file.endswith('.cvimodel'):
pyruntime = (pyruntime + 'cvi')
self.is_cv18xx = True
else:
raise RuntimeError('not support modle file:{}'.format(self.args.model_file))
pyruntime = importlib.import_module(pyruntime)
self.model = pyruntime.Model(self.args.model_file)
if (not self.is_cv18xx):
self.net = self.model.Net(self.model.networks[0])
else:
self.net = self.model
self.net_input_num = len(self.net.inputs)
def invoke(self):
inputs = {}
inputs[self.net.inputs[0]] = self.x
outputs = []
dyn_input_shapes = []
only_one = (len(inputs) == 1)
if (only_one and (len(self.net.inputs) != 1)):
raise RuntimeError('Input num not the same')
for i in self.net.inputs:
if (not only_one):
assert (i.name in inputs)
input = inputs[i.name]
else:
input = list(inputs.values())[0]
overflow = (np.prod(i.data.shape) - np.prod(input.shape))
if (self.is_cv18xx and i.aligned):
overflow = (i.size - np.prod(input.shape))
assert (len(i.data.shape) == len(input.shape))
for (max, dim) in zip(i.data.shape, input.shape):
if (dim > max):
raise RuntimeError('Error shape: form {} to {}'.format(i.data.shape, input.shape))
dyn_input_shapes.append(input.shape)
input = np.concatenate([input.flatten(), np.zeros([overflow]).astype(input.dtype)]).reshape(i.data.shape)
zp = i.qzero_point
if (i.data.dtype == input.dtype):
i.data[:] = input.reshape(i.data.shape)
elif ((i.dtype == 'i8') and (input.dtype == np.float32)):
data = round_away_from_zero(((input * i.qscale) + zp))
i.data[:] = np.clip(data, (- 128), 127).astype(np.int8).reshape(i.data.shape)
elif ((i.dtype == 'u8') and (input.dtype == np.float32)):
data = round_away_from_zero(((input * i.qscale) + zp))
i.data[:] = np.clip(data, 0, 255).astype(np.uint8).reshape(i.data.shape)
elif ((i.dtype == 'u16') and ((input.dtype == np.float32) or (input.dtype == np.int32))):
i.data[:] = input.astype(np.uint16).reshape(i.data.shape)
elif ((i.dtype == 'f16') and (input.dtype == np.float32)):
i.data[:] = input.astype(np.float16)
elif ((i.dtype == 'bf16') and (input.dtype == np.float32)):
i.data[:] = fp32_to_bf16(input).reshape(i.data.shape)
elif ((i.dtype == 'i32') and ((input.dtype == np.float32) or (input.dtype == np.int64))):
i.data[:] = input.astype(np.int32).reshape(i.data.shape)
elif ((i.dtype == 'i4') and (input.dtype == np.float32)):
data = round_away_from_zero(((input * i.qscale) + zp))
i.data[:] = np.clip(data, (- 8), 7).astype(np.int8).reshape(i.data.shape)
elif ((i.dtype == 'u4') and (input.dtype == np.float32)):
data = round_away_from_zero(((input * i.qscale) + zp))
i.data[:] = np.clip(data, 0, 15).astype(np.uint8).reshape(i.data.shape)
elif (i.dtype == 'f32'):
i.data[:] = input.astype(np.float32)
else:
raise ValueError(f'unknown type: form {input.dtype} to {i.data.dtype}')
dyn_output_shapes = self.net.forward_dynamic(dyn_input_shapes)
dyn_idx = 0
for i in self.net.outputs:
if (((i.data.dtype == np.int8) or (i.data.dtype == np.uint8)) and (i.qscale != 0)):
if (self.is_cv18xx and (i.name in inputs)):
output = np.array((i.data.astype(np.float32) / np.float32(i.qscale)))
else:
zp = i.qzero_point
output = np.array(((i.data.astype(np.float32) - zp) * np.float32(i.qscale)), dtype=np.float32)
elif (i.dtype == 'u16'):
output = np.array(i.data.astype(np.float32))
elif (i.dtype == 'f16'):
output = np.array(i.data.astype(np.float32))
elif (i.dtype == 'bf16'):
output = bf16_to_fp32(i.data)
else:
output = np.array(i.data)
if (output.shape != dyn_output_shapes[dyn_idx]):
dyn_len = np.prod(dyn_output_shapes[dyn_idx])
output = output.flatten()[:dyn_len].reshape(*dyn_output_shapes[dyn_idx])
dyn_idx += 1
outputs.append(output)
return outputs |
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.6', 'l_1': 'encoder.block.7', 'l_2': 'encoder.block.8'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class ResNetBottleneckBlock(nn.Module):
n_hidden: int
strides: Tuple[(int, int)] = (1, 1)
expansion: int = 4
groups: int = 1
base_width: int = 64
activation: Callable = nn.relu
conv_block_cls: ModuleDef = ConvBlock
skip_cls: ModuleDef = ResNetSkipConnection
def __call__(self, x):
skip_cls = partial(self.skip_cls, conv_block_cls=self.conv_block_cls)
group_width = (int((self.n_hidden * (self.base_width / 64.0))) * self.groups)
y = self.conv_block_cls(group_width, kernel_size=(1, 1))(x)
y = self.conv_block_cls(group_width, strides=self.strides, groups=self.groups, padding=((1, 1), (1, 1)))(y)
y = self.conv_block_cls((self.n_hidden * self.expansion), kernel_size=(1, 1), is_last=True)(y)
return self.activation((y + skip_cls(self.strides)(x, y.shape))) |
class SkewNormal(ReferenceDistribution):
def __init__(self, *, a):
super().__init__(a=a)
def _support(self, a):
return ((- mp.inf), mp.inf)
def _pdf(self, x, a):
return ((2 * mp.npdf(x)) * mp.ncdf((a * x))) |
def SimpleConv3x3lBlock(a, b, c, s):
return nn.Sequential(nn.Conv2d(a, c, 3, padding=1, bias=False), nn.BatchNorm2d(c), nn.ReLU(inplace=True), nn.Conv2d(c, c, 3, padding=1, stride=s, bias=False), nn.BatchNorm2d(c), nn.ReLU(inplace=True)) |
class PlayerState(object):
def __init__(self, position, orientation, held_object=None):
self.position = tuple(position)
self.orientation = tuple(orientation)
self.held_object = held_object
assert (self.orientation in Direction.ALL_DIRECTIONS)
if (self.held_object is not None):
assert isinstance(self.held_object, ObjectState)
assert (self.held_object.position == self.position)
def pos_and_or(self):
return (self.position, self.orientation)
def has_object(self):
return (self.held_object is not None)
def get_object(self):
assert self.has_object()
return self.held_object
def set_object(self, obj):
assert (not self.has_object())
obj.position = self.position
self.held_object = obj
def remove_object(self):
assert self.has_object()
obj = self.held_object
self.held_object = None
return obj
def update_pos_and_or(self, new_position, new_orientation):
self.position = new_position
self.orientation = new_orientation
if self.has_object():
self.get_object().position = new_position
def deepcopy(self):
new_obj = (None if (self.held_object is None) else self.held_object.deepcopy())
return PlayerState(self.position, self.orientation, new_obj)
def __eq__(self, other):
return (isinstance(other, PlayerState) and (self.position == other.position) and (self.orientation == other.orientation) and (self.held_object == other.held_object))
def __hash__(self):
return hash((self.position, self.orientation, self.held_object))
def __repr__(self):
return '{} facing {} holding {}'.format(self.position, self.orientation, str(self.held_object))
def to_dict(self):
return {'position': self.position, 'orientation': self.orientation, 'held_object': (self.held_object.to_dict() if (self.held_object is not None) else None)}
def from_dict(player_dict):
player_dict = copy.deepcopy(player_dict)
held_obj = player_dict['held_object']
if (held_obj is not None):
player_dict['held_object'] = ObjectState.from_dict(held_obj)
return PlayerState(**player_dict) |
def load_data(seq_path, struct_path, alphabet, baselines=False):
pdb_index = {}
for path in struct_path:
pid = os.path.basename(path)[:7]
pdb_index[pid] = path
with open(seq_path, 'rb') as f:
(names, sequences) = fasta.parse(f)
names = [name.split()[0].decode('utf-8') for name in names]
sequences = [alphabet.encode(s.upper()) for s in sequences]
x = [torch.from_numpy(x).long() for x in sequences]
names_ = []
x_ = []
y = []
for (xi, name) in zip(x, names):
pid = name
if (pid not in pdb_index):
pid = ('d' + pid[1:])
path = pdb_index[pid]
im = np.array(Image.open(path), copy=False)
contacts = np.zeros(im.shape, dtype=np.float32)
contacts[(im == 1)] = (- 1)
contacts[(im == 255)] = 1
mask = np.tril_indices(contacts.shape[0], k=1)
contacts[mask] = (- 1)
names_.append(name)
x_.append(xi)
y.append(torch.from_numpy(contacts))
return (x_, y, names_) |
def _self_bleu(completions: List[Sequence]) -> float:
completion_sequences: List[str] = [completion.text.strip() for completion in completions if completion.text.strip()]
if (len(completion_sequences) <= 1):
return 0
scores = []
for i in range(len(completion_sequences)):
hypothesis = completion_sequences[i]
references = (completion_sequences[:i] + completion_sequences[(i + 1):])
score = BLEU(effective_order=True).sentence_score(hypothesis=hypothesis, references=references)
scores.append(score.score)
return (sum(scores) / len(scores)) |
def test_datetime64():
array = ak.Array([[np.datetime64(1, 'D'), np.datetime64(10, 'D')]])
assert ((array == np.datetime64(10, 'D')).to_list() == [[False, True]]) |
def remove_short_notes(notes: List[Note], label: Label, min_char_count: int=0, **kwargs) -> List[Note]:
new_notes: List[Note] = []
for note in notes:
text: str = str(note.event.value)
if (len(text) >= min_char_count):
new_notes.append(note)
return new_notes |
def loop(model, cond_blob, external_blobs, loop_model, cond_model=None):
add_while_op(model.net, cond_blob, external_blobs, loop_model.net, (cond_model.net if cond_model else None)) |
class PythonParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PYTHONPARAMETER |
def run_train_distributed(args: typing.Optional[argparse.Namespace]=None) -> None:
if (args is None):
base_parser = create_base_parser()
distributed_parser = create_distributed_parser(base_parser)
distributed_train_parser = create_train_parser(distributed_parser)
args = distributed_train_parser.parse_args()
exp_name = utils.get_expname(args.exp_name, args.task, args.model_type)
args.exp_name = exp_name
utils.launch_process_group(run_train, args, args.nproc_per_node, args.nnodes, args.node_rank, args.master_addr, args.master_port) |
class EmitSparseGemmInstance():
def __init__(self, operation_suffix=''):
self.operation_suffix = operation_suffix
self.includes = []
self.gemm_template = '\n // Gemm operator ${operation_name}\n using Operation_${operation_name} = cutlass::gemm::device::SparseGemm<\n ${element_a}, ${layout_a},\n ${element_b}, ${layout_b},\n ${element_c}, ${layout_c},\n ${element_accumulator},\n ${opcode_class},\n ${arch},\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,\n cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,\n ${epilogue_functor}<\n ${element_c},\n ${epilogue_vector_length},\n ${element_accumulator},\n ${element_epilogue}\n >,\n ${swizzling_functor},\n ${stages},\n ${align_a},\n ${align_b},\n false,\n ${math_operation}\n ${residual}\n >;\n'
def instance_template(self):
return '\n${compile_guard_start}\n manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}"));\n${compile_guard_end}\n'
def emit(self, operation):
warp_shape = [(operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int((min((operation.C.alignment * DataTypeSize[operation.C.element]), 128) / DataTypeSize[operation.C.element]))
residual = ''
values = {'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': ('cutlass::arch::Sm%d' % operation.arch), 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual}
template = self.gemm_template
return SubstituteTemplate(template, values) |
class _MemberSpec(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data_type(self, data_type):
self.data_type = data_type
def get_data_type(self):
return self.data_type
def set_container(self, container):
self.container = container
def get_container(self):
return self.container |
def test_compare_ne():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a != b)
result_alt1 = rf.compare(a, '!=', b)
result_alt2 = rf.compare(a, '<>', b)
result_alt3 = rf.compare(a, 'not_equal', b)
assert (result.raw_tensor.tolist() == [True, False, True])
assert (result_alt1.raw_tensor.tolist() == [True, False, True])
assert (result_alt2.raw_tensor.tolist() == [True, False, True])
assert (result_alt3.raw_tensor.tolist() == [True, False, True]) |
def roman_to_int(roman_string):
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if (len((set(list(roman_string.upper())) - NUMERALS_SET)) != 0):
raise ValueError(f'{roman_string} does not seem to be a roman numeral')
i = result = 0
for (integer, numeral) in NUMERAL_MAP:
while (roman_string[i:(i + len(numeral))] == numeral):
result += integer
i += len(numeral)
if (result < 1):
raise ValueError(f'Can not interpret Roman Numeral {roman_string}')
return result |
def main(in_directory, out_directory, short_name):
phrases = get_tokenized_phrases(in_directory)
process_utils.write_list(os.path.join(out_directory, ('%s.train.json' % short_name)), phrases) |
def test_std():
assert (ak.std(array, axis=None) == pytest.approx(3.))
assert ak.almost_equal(ak.std(array, axis=None, keepdims=True, mask_identity=False), ak.to_regular([[3.]]))
assert ak.almost_equal(ak.std(array, axis=None, keepdims=True, mask_identity=True), ak.to_regular(ak.Array([[3.]]).mask[[[True]]]))
assert np.isnan(ak.std(array[2], axis=None, mask_identity=False)) |
def register_quantized_custom_module_mapping(float_custom_module_class, quantized_custom_module_class):
assert hasattr(quantized_custom_module_class, 'from_observed'), ('from_observed' + ' must be defined in quantized custom module class')
QUANTIZED_CUSTOM_MODULE_CLASS_MAPPINGS[float_custom_module_class] = quantized_custom_module_class |
class SubArray(np.ndarray):
def __new__(cls, arr, info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
if callable(getattr(super(SubArray, self), '__array_finalize__', None)):
super(SubArray, self).__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super(SubArray, self).__add__(other)
result.info['added'] = (result.info.get('added', 0) + 1)
return result
def __iadd__(self, other):
result = super(SubArray, self).__iadd__(other)
result.info['iadded'] = (result.info.get('iadded', 0) + 1)
return result |
class Stochastic(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([5.0] * self.N)))
self.global_optimum = [[(1.0 / _) for _ in range(1, (self.N + 1))]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
rnd = uniform(0.0, 1.0, size=(self.N,))
i = arange(1, (self.N + 1))
return sum((rnd * abs((x - (1.0 / i))))) |
def got() -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
plans = operations.Generate(1, 1)
operations_graph.append_operation(plans)
solved_subsets = []
for i in range(1, 5):
list_id = f'List {i}'
sub_list = operations.Selector((lambda thoughts, list_id=list_id: [thought for thought in thoughts if (thought.state['part'] == list_id)]))
sub_list.add_predecessor(plans)
operations_graph.add_operation(sub_list)
intersected_subset = operations.Generate(1, 5)
intersected_subset.add_predecessor(sub_list)
operations_graph.add_operation(intersected_subset)
score_sub_list = operations.Score(1, False, utils.num_errors)
score_sub_list.add_predecessor(intersected_subset)
operations_graph.add_operation(score_sub_list)
keep_best_sub_list = operations.KeepBestN(1, False)
keep_best_sub_list.add_predecessor(score_sub_list)
operations_graph.add_operation(keep_best_sub_list)
solved_subsets.append(keep_best_sub_list)
aggregate_1 = operations.Aggregate(10)
aggregate_1.add_predecessor(solved_subsets[0])
aggregate_1.add_predecessor(solved_subsets[1])
operations_graph.add_operation(aggregate_1)
score_aggregate_1 = operations.Score(1, False, utils.num_errors)
score_aggregate_1.add_predecessor(aggregate_1)
operations_graph.add_operation(score_aggregate_1)
keep_best_aggregate_1 = operations.KeepBestN(1, False)
keep_best_aggregate_1.add_predecessor(score_aggregate_1)
operations_graph.add_operation(keep_best_aggregate_1)
aggregate_2 = operations.Aggregate(10)
aggregate_2.add_predecessor(solved_subsets[2])
aggregate_2.add_predecessor(solved_subsets[3])
operations_graph.add_operation(aggregate_2)
score_aggregate_2 = operations.Score(1, False, utils.num_errors)
score_aggregate_2.add_predecessor(aggregate_2)
operations_graph.add_operation(score_aggregate_2)
keep_best_aggregate_2 = operations.KeepBestN(1, False)
keep_best_aggregate_2.add_predecessor(score_aggregate_2)
operations_graph.add_operation(keep_best_aggregate_2)
final_aggregate = operations.Aggregate(10)
operations_graph.append_operation(final_aggregate)
operations_graph.append_operation(operations.Score(1, False, utils.num_errors))
keep_best_aggregate_final = operations.KeepBestN(1, False)
operations_graph.append_operation(keep_best_aggregate_final)
operations_graph.append_operation(operations.GroundTruth(utils.test_set_intersection))
return operations_graph |
def enumerate_subgraph(G, k=3, progress_bar=False, node_anchored=False):
ps = (np.arange(1.0, 0.0, ((- 1.0) / (k + 1))) ** 1.5)
motif_counts = defaultdict(list)
for node in (tqdm(G.nodes) if progress_bar else G.nodes):
sg = set()
sg.add(node)
v_ext = set()
neighbors = [nbr for nbr in list(G[node].keys()) if (nbr > node)]
n_frac = (len(neighbors) * ps[1])
n_samples = (int(n_frac) + (1 if (random.random() < (n_frac - int(n_frac))) else 0))
neighbors = random.sample(neighbors, n_samples)
for nbr in neighbors:
v_ext.add(nbr)
extend_subgraph(G, k, sg, v_ext, node, motif_counts, ps, node_anchored)
return motif_counts |
def setup_ddp() -> None:
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
gpu = int(os.environ(['LOCAL_RANK']))
torch.cuda.set_device(gpu)
dist.init_process_group('nccl', init_method='env://', world_size=world_size, rank=rank)
dist.barrier()
else:
gpu = 0
return gpu |
def random_word_no_prob(text, label, label_map, tokenizer):
text = text.replace('\n', '').split(' ')
orig_to_map_label = []
orig_to_map_token = []
assert (len(text) == len(label_map))
for i in range(0, len(text)):
orig_token = text[i]
orig_label_map = label_map[i]
tokens = tokenizer.tokenize(orig_token)
orig_to_map_token.extend(tokens)
orig_to_map_label.append(orig_label_map)
for j in range(1, len(tokens)):
orig_to_map_label.append((- 1))
if (len(orig_to_map_label) != len(orig_to_map_token)):
print(text)
print(label_map)
print(orig_to_map_label)
print(orig_to_map_token)
raise Exception(' ')
return (orig_to_map_token, orig_to_map_label) |
def fixDelex(filename, data, data2, idx, idx_acts):
try:
turn = data2[filename.strip('.json')][str(idx_acts)]
except:
return data
if ((not isinstance(turn, bytes)) and (not isinstance(turn, str))):
for (k, act) in turn.items():
if ('Attraction' in k):
if ('restaurant_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('restaurant', 'attraction')
if ('hotel_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('hotel', 'attraction')
if ('Hotel' in k):
if ('attraction_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('attraction', 'hotel')
if ('restaurant_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('restaurant', 'hotel')
if ('Restaurant' in k):
if ('attraction_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('attraction', 'restaurant')
if ('hotel_' in data['log'][idx]['text']):
data['log'][idx]['text'] = data['log'][idx]['text'].replace('hotel', 'restaurant')
return data |
def main():
args = parse_args()
args.out_dir.mkdir(exist_ok=True)
logging.basicConfig(stream=sys.stdout, level=(logging.ERROR if args.quiet else logging.INFO), format='%(levelname)-8s %(message)s')
logging.info(f'''Using arguments:
{pprint.pformat(vars(args))}''')
logging.info('Loading names...')
with open(args.names, encoding='utf8') as f:
names = [line.strip() for line in f]
logging.info('Iterating over names...')
if (args.jobs == 1):
converted_names = []
for name in (pbar := tqdm.tqdm(names)):
pbar.set_postfix_str(name)
result = process(name, args.in_dir, args.out_dir, args.resolution, args.skip_existing, args.ignore_exceptions)
if (result is not None):
converted_names.append(result)
else:
results = joblib.Parallel(n_jobs=args.jobs, verbose=(0 if args.quiet else 5))((joblib.delayed(process)(name, args.in_dir, args.out_dir, args.resolution, args.skip_existing, args.ignore_exceptions) for name in names))
converted_names = [result for result in results if (result is not None)]
converted_names = sorted(set(converted_names))
logging.info(f'Converted {len(converted_names)} out of {len(names)} files.')
out_filename = (args.out_dir.parent / 'json-names.txt')
utils.save_txt(out_filename, converted_names)
logging.info(f'Saved the converted filenames to: {out_filename}') |
class FasterRCNN(nn.Module):
def __init__(self, extractor, rpn, head, loc_normalize_mean=(0.0, 0.0, 0.0, 0.0), loc_normalize_std=(0.1, 0.1, 0.1, 0.1)):
super(FasterRCNN, self).__init__()
self.extractor = extractor
self.rpn = rpn
self.head = head
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.use_preset('evaluate')
def use_preset(self, preset):
if (preset == 'visualize'):
self.nms_thresh = 0.3
self.score_thres = 0.7
elif (preset == 'evaluate'):
self.nms_thresh = 0.3
self.score_thres = 0.05
else:
raise ValueError("preset must be 'visualize' or 'evaluate' ")
def n_class(self):
return self.head.n_class
def forward(self, x, scale=1.0):
img_size = x.shape[2:]
h = self.extractor(x)
(rpn_locs, rpn_scores, rois, roi_indices, anchor) = self.rpn(h, img_size, scale)
(roi_cls_locs, roi_scores) = self.head(h, rois, roi_indices)
return (roi_cls_locs, roi_scores, rois, roi_indices) |
def get_files_list(base_dataset_dir, images_folder_name, annotations_folder_name, filename):
images_dir = os.path.join(base_dataset_dir, images_folder_name)
annotations_dir = os.path.join(base_dataset_dir, annotations_folder_name)
file = open(filename, 'r')
images_filename_list = [line for line in file]
return images_filename_list |
def test_tags():
labels = ['Siren', 'Laughter', 'Engine']
confidence = np.array([1.0, 0.0, 1.0])
tags = annotations.Tags(labels, 'open', confidence)
assert (tags.labels == labels)
assert np.allclose(tags.confidence, confidence)
bad_labels = ['Siren', 'Laughter', 5]
pytest.raises(TypeError, annotations.Tags, bad_labels, 'open', confidence)
bad_confidence = np.array([1, 0.5, (- 0.2)])
pytest.raises(ValueError, annotations.Tags, labels, 'open', bad_confidence)
with pytest.raises(ValueError):
annotations.Tags(labels, 'bad_unit', confidence) |
def generate_train_validation_list(data_path, train_size=0.8):
file_list = glob.glob((data_path + '*.wav'))
file_list = np.array(file_list)
(train, validation) = train_test_split(filenames, train_size=train_size) |
def process_k(func):
def wrap(self, recs: SparkDataFrame, k: IntOrList, *args):
if isinstance(k, int):
k_list = [k]
else:
k_list = k
res = func(self, recs, k_list, *args)
if isinstance(k, int):
return res[k]
return res
return wrap |
def infer_abbr(class_type):
if (not inspect.isclass(class_type)):
raise TypeError(f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
if issubclass(class_type, _InstanceNorm):
return 'in'
elif issubclass(class_type, _BatchNorm):
return 'bn'
elif issubclass(class_type, nn.GroupNorm):
return 'gn'
elif issubclass(class_type, nn.LayerNorm):
return 'ln'
else:
class_name = class_type.__name__.lower()
if ('batch' in class_name):
return 'bn'
elif ('group' in class_name):
return 'gn'
elif ('layer' in class_name):
return 'ln'
elif ('instance' in class_name):
return 'in'
else:
return class_name |
def check_time(timer_id):
if (timer_id not in _g_timers):
_g_timers[timer_id] = Timer()
return 0
else:
return _g_timers[timer_id].since_last_check() |
def generate_onion_service_keys(tor_cmd, n):
with tempfile.TemporaryDirectory(prefix='tornettools-hs-keygen-') as dir_name:
config = {'DisableNetwork': '1', 'DataDirectory': dir_name, 'ControlPort': '9030'}
tor_process = stem.process.launch_tor_with_config(config, tor_cmd=tor_cmd, init_msg_handler=logging.debug, take_ownership=True, completion_percent=0)
controller = stem.connection.connect(control_port=('127.0.0.1', 9030))
keys = []
for x in range(n):
hs = controller.create_ephemeral_hidden_service(80)
assert (hs.private_key_type == 'ED25519-V3')
keys.append((hs.private_key, (hs.service_id + '.onion')))
controller.close()
tor_process.kill()
tor_process.wait()
return keys |
class PoolingEncoderTest(tf.test.TestCase):
def setUp(self):
super(PoolingEncoderTest, self).setUp()
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_with_params(self, params):
inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth])
example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length)
encode_fn = PoolingEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(encoder_output_.attention_values.shape, [self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(encoder_output_.final_state.shape, [self.batch_size, self.input_depth])
def test_encode_with_pos(self):
self._test_with_params({'position_embeddings.enable': True, 'position_embeddings.num_positions': self.sequence_length})
def test_encode_without_pos(self):
self._test_with_params({'position_embeddings.enable': False, 'position_embeddings.num_positions': 0}) |
def check_compatibility(version, name):
if (version[0] > VERSION_COMPATIBLE[0]):
raise UnsupportedWheel("{}'s Wheel-Version ({}) is not compatible with this version of pip".format(name, '.'.join(map(str, version))))
elif (version > VERSION_COMPATIBLE):
logger.warning('Installing from a newer Wheel-Version (%s)', '.'.join(map(str, version))) |
def store_token_address(token_dict, outname, topk=1000):
sorted_tokens = {k: v for (k, v) in sorted(token_dict.items(), key=(lambda item: item[1]), reverse=True)}
ctr = 0
with open(outname, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['token_address', 'frequency'])
for (key, value) in sorted_tokens.items():
if (ctr <= topk):
csv_writer.writerow([key, value])
else:
break
ctr += 1 |
def veri_test_parser(line):
label = int(line.split(' ')[0])
enroll_filename = line.split(' ')[1]
test_filename = line.split(' ')[2].replace('\n', '')
return (label, enroll_filename, test_filename) |
def read_decode_depth(depth_dir):
depth = tf.py_function(func=read_depth_png_tf, inp=[depth_dir], Tout=tf.float32)
return depth |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.