code stringlengths 281 23.7M |
|---|
def eval_para(model, iterator, sent_ids, output_path):
model.eval()
(Words, Is_heads, Tags, Y, Y_hat) = ([], [], [], [], [])
with torch.no_grad():
for (i, batch) in enumerate(tqdm(iterator)):
(words, x, is_heads, tags, y, seqlens) = batch
(_, _, y_hat) = model(x, y)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = {k: dict() for (k, sid) in sent_ids}
with open('result.txt', 'w') as fout:
for (i, (words, is_heads, tags, y_hat)) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for (head, hat) in zip(is_heads, y_hat) if (head == 1)]
preds = [idx2tag[hat] for hat in y_hat]
assert (len(preds) == len(words)), f'len(preds)={len(preds)}, len(words)={len(words)}'
(words, preds) = (words[1:(- 1)], preds[1:(- 1)])
preds = tag_numbers(words, preds)
entity = get_entities(words, preds)
(key, sid) = (sent_ids[i][0], sent_ids[i][1])
entities[key][sid] = entity
json.dump(entities, open(output_path, 'w'))
return |
class MDense(Layer):
def __init__(self, outputs, channels=2, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):
if (('input_shape' not in kwargs) and ('input_dim' in kwargs)):
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MDense, self).__init__(**kwargs)
self.units = outputs
self.channels = channels
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert (len(input_shape) >= 2)
input_dim = input_shape[(- 1)]
self.kernel = self.add_weight(shape=(self.units, input_dim, self.channels), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units, self.channels), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.factor = self.add_weight(shape=(self.units, self.channels), initializer='ones', name='factor', regularizer=self.bias_regularizer, constraint=self.bias_constraint)
self.input_spec = InputSpec(min_ndim=2, axes={(- 1): input_dim})
self.built = True
def call(self, inputs):
output = K.dot(inputs, self.kernel)
if self.use_bias:
output = (output + self.bias)
output = (K.tanh(output) * self.factor)
output = K.sum(output, axis=(- 1))
if (self.activation is not None):
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert (input_shape and (len(input_shape) >= 2))
assert input_shape[(- 1)]
output_shape = list(input_shape)
output_shape[(- 1)] = self.units
return tuple(output_shape)
def get_config(self):
config = {'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}
base_config = super(MDense, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
.parametrize('q', [quantize(symmetric=True, initialized=True), quantize(symmetric=False, initialized=True), quantize_dequantize(symmetric=True, initialized=True), quantize_dequantize(symmetric=False, initialized=True)])
def test_backward(q: _QuantizerBase, x: torch.Tensor):
output = q(x)
output.backward(torch.zeros_like(output))
assert (q.min.grad is not None)
assert (q.max.grad is not None) |
def box(text):
lines = text.split('\n')
w = width(lines)
top_bar = ((TOP_LEFT_CORNER + (HORIZONTAL_BAR * (2 + w))) + TOP_RIGHT_CORNER)
bottom_bar = ((BOTTOM_LEFT_CORNER + (HORIZONTAL_BAR * (2 + w))) + BOTTOM_RIGHT_CORNER)
lines = [LINES_FORMAT_STR.format(line=line, width=w) for line in lines]
return ((((top_bar + '\n') + '\n'.join(lines)) + '\n') + bottom_bar) |
def lookup_connections(backend, identities):
from rapidsms.models import Backend
if isinstance(backend, str):
(backend, _) = Backend.objects.get_or_create(name=backend)
connections = []
for identity in identities:
(connection, _) = backend.connection_set.get_or_create(identity=identity)
connections.append(connection)
return connections |
def evaluate_extractive(result_file, article_file, summary_file, entity_map_file=None, out_rouge_file=None, cmd='-a -c 95 -m -n 4 -w 1.2', length=(- 1), eval_type='lead', topk=3, rerank=False, with_m=False, add_full_stop=True, nsent_budget_file=None, nword_budget_file=None, multi_ref=False, trigram_block=False):
articles = doc2sents(article_file, add_full_stop)
summaries = doc2sents(summary_file, add_full_stop)
if (entity_map_file is not None):
entity_maps = load_entity(entity_map_file)
articles = deanonymize(entity_maps, articles)
summaries = deanonymize(entity_maps, summaries)
vocab = load_dict(result_file)
results = load_result(result_file)
ndoc = len(articles)
opts = {'eval_type': eval_type, 'topk': topk, 'rerank': rerank, 'with_m': with_m}
outdir = os.path.join(os.path.dirname(out_rouge_file), ('__tmp__rouge.%d' % os.getpid()))
mkdir(outdir)
print('sum output dir: ', outdir)
sysdir = os.path.join(outdir, 'sys')
refdir = os.path.join(outdir, 'ref')
mkdir(sysdir)
mkdir(refdir)
if nsent_budget_file:
nsent_budgets = list(map(int, open(nsent_budget_file, encoding='utf8')))
assert (ndoc == len(nsent_budgets))
if nword_budget_file:
nword_budgets = list(map(int, open(nword_budget_file, encoding='utf8')))
assert (ndoc == len(nword_budgets))
print(ndoc, len(results['PredictedLabels']))
assert (ndoc == len(results['PredictedLabels']))
pred_scores = results.get('Score', None)
try:
for docid in tqdm.tqdm(range(ndoc)):
article = articles[docid]
summary = summaries[docid]
true_labels = results['TrueLabels'][docid]
pred_labels = results['PredictedLabels'][docid]
try:
pred_dist = results['PredictedDistri'][docid]
except IndexError:
pred_dist = []
pred_score = (pred_scores[docid] if (pred_scores is not None) else None)
nsent_budget = (nsent_budgets[docid] if nsent_budget_file else None)
nword_budget = (nword_budgets[docid] if nword_budget_file else None)
try:
pred_summary = generate_summary(article, summary, true_labels, pred_labels, pred_dist, pred_score, opts, vocab, nsent_budget, nword_budget, trigram_block)
except IndexError as e:
if (len(article) == 2):
pred_summary = article
else:
raise e
except NotImplementedError as e:
print(e)
shutil.rmtree(outdir)
def write(outfile, sents):
with open(outfile, 'w', encoding=ENCODE) as fout:
fout.write('\n'.join(sents))
fout.write('\n')
write(os.path.join(sysdir, ('%d.test' % docid)), pred_summary)
if (not multi_ref):
write(os.path.join(refdir, ('%d.gold' % docid)), summary)
else:
write_multi_ref(refdir, docid, summary)
if (not multi_ref):
(output_dict, output) = get_rouge(sysdir, refdir, cmd=cmd, length=length)
else:
(output_dict, output) = get_rouge_multi_ref(sysdir, refdir, cmd=cmd, length=length)
finally:
shutil.rmtree(outdir)
pass
if (out_rouge_file is not None):
with open(out_rouge_file, 'w', encoding=ENCODE) as fout:
fout.write(output)
return (output_dict, output) |
def remove_silence(silence_parts_list: list[tuple[(float, float)]], transcribed_data: list[TranscribedData]):
new_transcribed_data = []
for data in transcribed_data:
new_transcribed_data.append(data)
origin_end = data.end
was_split = False
for (silence_start, silence_end) in silence_parts_list:
if ((silence_start > origin_end) or (silence_end < data.start)):
continue
if ((silence_start >= data.start) and (silence_end <= origin_end)):
next_index = (silence_parts_list.index((silence_start, silence_end)) + 1)
if ((next_index < len(silence_parts_list)) and (silence_parts_list[next_index][0] < origin_end)):
split_end = silence_parts_list[next_index][0]
if (silence_parts_list[next_index][1] >= origin_end):
split_word = '~ '
is_word_end = True
else:
split_word = '~'
is_word_end = False
else:
split_end = origin_end
split_word = '~ '
is_word_end = True
split_data = TranscribedData({'conf': data.conf, 'word': split_word, 'end': split_end, 'start': silence_end, 'is_word_end': is_word_end})
if (not was_split):
data.end = silence_start
if ((data.end - data.start) < 0.1):
data.start = silence_end
data.end = split_end
continue
if ((split_data.end - split_data.start) <= 0.1):
continue
data.is_word_end = False
if (data.word[(- 1)] == ' '):
data.word = data.word[:(- 1)]
if ((split_data.end - split_data.start) > 0.1):
was_split = True
new_transcribed_data.append(split_data)
elif ((split_word == '~ ') and (not data.is_word_end)):
if (new_transcribed_data[(- 1)].word[(- 1)] != ' '):
new_transcribed_data[(- 1)].word += ' '
new_transcribed_data[(- 1)].is_word_end = True
continue
if ((silence_start < data.start) and (silence_end > origin_end)):
new_transcribed_data.remove(data)
break
if (silence_start < data.start):
data.start = silence_end
if (silence_end > origin_end):
data.end = silence_start
if (silence_start > origin_end):
break
return new_transcribed_data |
def text(session, *args, **kwargs):
txt = (args[0] if args else None)
if (txt is None):
return
if (txt.strip() in _IDLE_COMMAND):
session.update_session_counters(idle=True)
return
if session.account:
puppet = session.puppet
if puppet:
txt = puppet.nicks.nickreplace(txt, categories=('inputline', 'channel'), include_account=True)
else:
txt = session.account.nicks.nickreplace(txt, categories=('inputline', 'channel'), include_account=False)
kwargs.pop('options', None)
cmdhandler(session, txt, callertype='session', session=session, **kwargs)
session.update_session_counters() |
def test_format_failure_ignore_multidoc(run_line_simple, tmp_path):
schemafile = (tmp_path / 'schema.json')
schemafile.write_text(json.dumps(FORMAT_SCHEMA))
doc1 = (tmp_path / 'doc1.json')
doc1.write_text(json.dumps(FAILING_DOCUMENT))
doc2 = (tmp_path / 'doc2.json')
doc2.write_text(json.dumps(PASSING_DOCUMENT))
run_line_simple(['--disable-formats', '*', '--schemafile', str(schemafile), str(doc1), str(doc2)]) |
class BufferedOutput(Output):
def __init__(self, verbosity: Verbosity=Verbosity.NORMAL, decorated: bool=False, formatter: (Formatter | None)=None, supports_utf8: bool=True) -> None:
super().__init__(decorated=decorated, verbosity=verbosity, formatter=formatter)
self._buffer = StringIO()
self._supports_utf8 = supports_utf8
def fetch(self) -> str:
content = self._buffer.getvalue()
self._buffer = StringIO()
return content
def clear(self) -> None:
self._buffer = StringIO()
def supports_utf8(self) -> bool:
return self._supports_utf8
def set_supports_utf8(self, supports_utf8: bool) -> None:
self._supports_utf8 = supports_utf8
def section(self) -> SectionOutput:
return SectionOutput(self._buffer, self._section_outputs, verbosity=self.verbosity, decorated=self.is_decorated(), formatter=self.formatter)
def _write(self, message: str, new_line: bool=False) -> None:
self._buffer.write(message)
if new_line:
self._buffer.write('\n') |
def deep_dgl_graph_copy(graph: DGLGraph):
start = time()
copy_graph = DGLGraph()
copy_graph.add_nodes(graph.number_of_nodes())
graph_edges = graph.edges()
copy_graph.add_edges(graph_edges[0], graph_edges[1])
for (key, value) in graph.edata.items():
copy_graph.edata[key] = value
for (key, value) in graph.ndata.items():
copy_graph.ndata[key] = value
print('Graph copy take {:.2f} seconds'.format((time() - start)))
return copy_graph |
def _save_item_model(request, item: Item, form, change) -> None:
prev_status = False
if (not item.pk):
item.user = request.user
if (not item.issue):
la = lna = False
qs = Issue.objects
try:
la = qs.filter(status='active').order_by('-pk')[0:1].get()
lna = qs.filter(pk__gt=la.pk).order_by('pk')[0:1].get()
except Issue.DoesNotExist as e:
logger.warning('Not found last or recent issue')
if (la or lna):
item.issue = (lna or la)
else:
old_obj = Item.objects.get(pk=item.pk)
prev_status = old_obj.status
new_status = form.cleaned_data.get('status')
if ((not (prev_status == 'active')) and (new_status == 'active')):
item.modified_at = datetime.now() |
def drop_channels(edf_source, edf_target=None, to_keep=None, to_drop=None):
(signals, signal_headers, header) = hl.read_edf(edf_source, ch_nrs=to_keep, digital=False)
clean_file = {}
for (signal, header) in zip(signals, signal_headers):
channel = header.get('label')
if (channel in clean_file.keys()):
channel = (channel + '-2')
clean_file[channel] = signal
return clean_file |
.parametrize('configuration, expected_value', [((0.0, 0.0, 1.0, 50.0, 100.0), 50.0), ((1.0, 0.0, 1.0, 50.0, 100.0), 100.0), ((0.5, 0.0, 1.0, 50.0, 100.0), 75.0), ((0.0, 0.5, 1.0, 50.0, 100.0), 50.0), ((0.75, 0.5, 1.0, 50.0, 100.0), 75.0)])
def test_interpolation(configuration, expected_value):
(current_position, lower_bound, upper_bound, lower_value, upper_value) = configuration
assert (_interpolate(current_position, lower_bound, upper_bound, lower_value, upper_value) == expected_value) |
def test_features_for():
vuln_report_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'vulnerabilityreport.json')
security_info_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'securityinformation.json')
with open(vuln_report_filename) as vuln_report_file:
vuln_report = json.load(vuln_report_file)
with open(security_info_filename) as security_info_file:
security_info = json.load(security_info_file)
expected = security_info['data']
expected['Layer']['Features'].sort(key=(lambda d: d['Name']))
generated = SecurityInformation(Layer('sha256:b05ac1eeec8635442fa5d3e55d6ef4ad287b9c66055a552c2fd309c334563b0a', '', '', 4, features_for(vuln_report))).to_dict()
expected['Layer']['Features'].sort(key=(lambda d: d['Name']))
generated['Layer']['Features'].sort(key=(lambda d: d['Name']))
assert (generated == expected) |
class MLP(torch.nn.Module):
def __init__(self, config):
super(MLP, self).__init__()
self.config = config
self.num_users = config['num_users']
self.num_items = config['num_items']
self.latent_dim = config['latent_dim']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
self.fc_layers = torch.nn.ModuleList()
for (idx, (in_size, out_size)) in enumerate(zip(config['layers'][:(- 1)], config['layers'][1:])):
self.fc_layers.append(torch.nn.Linear(in_size, out_size))
self.affine_output = torch.nn.Linear(in_features=config['layers'][(- 1)], out_features=1)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices)
item_embedding = self.embedding_item(item_indices)
vector = torch.cat([user_embedding, item_embedding], dim=(- 1))
for (idx, _) in enumerate(range(len(self.fc_layers))):
vector = self.fc_layers[idx](vector)
vector = torch.nn.ReLU()(vector)
logits = self.affine_output(vector)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
def load_pretrain_weights(self):
config = self.config
gmf_model = GMF(config)
if (config['use_cuda'] is True):
gmf_model.cuda()
resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=config['device_id'])
self.embedding_user.weight.data = gmf_model.embedding_user.weight.data
self.embedding_item.weight.data = gmf_model.embedding_item.weight.data |
class TestSequenceImpl(TestNameCheckVisitorBase):
_passes()
def test(self):
from typing import Sequence
from typing_extensions import Literal
def capybara(x, ints: Sequence[Literal[(1, 2)]]):
assert_is_value(set(), KnownValue(set()))
assert_is_value(list(), KnownValue([]))
assert_is_value(tuple([1, 2, 3]), KnownValue((1, 2, 3)))
one_two = MultiValuedValue([KnownValue(1), KnownValue(2)])
assert_is_value(tuple((i for i in ints)), GenericValue(tuple, [one_two]))
assert_is_value(tuple({i: i for i in ints}), GenericValue(tuple, [one_two]))
assert_is_value(tuple([int(x)]), make_simple_sequence(tuple, [TypedValue(int)]))
assert_is_value(tuple(x), GenericValue(tuple, [AnyValue(AnySource.generic_argument)]))
assert_is_value(tuple(str(x)), GenericValue(tuple, [TypedValue(str)]))
_passes()
def test_union(self):
from typing import Sequence, Union
from typing_extensions import Never
def capybara(x: Union[(Sequence[int], Sequence[str])], never: Never):
assert_is_value(tuple(x), (GenericValue(tuple, [TypedValue(int)]) | GenericValue(tuple, [TypedValue(str)])))
assert_is_value(tuple(never), GenericValue(tuple, [NO_RETURN_VALUE]))
_passes()
def test_not_iterable(self):
def capybara(x):
tuple(3)
tuple(int(x)) |
class JsonConverter(Converter):
def dumps(self, obj: Any, unstructure_as: Any=None, **kwargs: Any) -> str:
return dumps(self.unstructure(obj, unstructure_as=unstructure_as), **kwargs)
def loads(self, data: Union[(bytes, str)], cl: Type[T], **kwargs: Any) -> T:
return self.structure(loads(data, **kwargs), cl) |
class ModelData():
def __init__(self, model: ModelProto):
self.model = model
self.module_to_info = {}
self._populate_model_data()
def _populate_model_data(self):
cg = ConnectedGraph(self.model)
for op in cg.ordered_ops:
self.module_to_info[op.name] = ModuleInfo()
if (op.type in ['Conv', 'ConvTranspose', 'Gemm', 'MatMul']):
self.module_to_info[op.name].type = op.type
if hasattr(op.get_module(), 'attribute'):
self.module_to_info[op.name].attributes = op.get_module().attribute
for (param, param_type) in op.parameters.values():
self.module_to_info[op.name].params[param_type] = param
for node in self.model.graph.node:
if (node.name in self.module_to_info):
module_info = self.module_to_info[node.name]
param = {param.name for param in module_info.params.values()}
for input_name in node.input:
if (input_name not in param):
module_info.inputs.append(input_name)
for output_name in node.output:
module_info.outputs.append(output_name) |
class DescribeCT_Row():
def it_can_add_a_trPr(self, add_trPr_fixture):
(tr, expected_xml) = add_trPr_fixture
tr._add_trPr()
assert (tr.xml == expected_xml)
def it_raises_on_tc_at_grid_col(self, tc_raise_fixture):
(tr, idx) = tc_raise_fixture
with pytest.raises(ValueError):
tr.tc_at_grid_col(idx)
(params=[('w:tr', 'w:tr/w:trPr'), ('w:tr/w:tblPrEx', 'w:tr/(w:tblPrEx,w:trPr)'), ('w:tr/w:tc', 'w:tr/(w:trPr,w:tc)'), ('w:tr/(w:sdt,w:del,w:tc)', 'w:tr/(w:trPr,w:sdt,w:del,w:tc)')])
def add_trPr_fixture(self, request):
(tr_cxml, expected_cxml) = request.param
tr = element(tr_cxml)
expected_xml = xml(expected_cxml)
return (tr, expected_xml)
(params=[(0, 0, 3), (1, 0, 1)])
def tc_raise_fixture(self, request):
(snippet_idx, row_idx, col_idx) = request.param
tbl = parse_xml(snippet_seq('tbl-cells')[snippet_idx])
tr = tbl.tr_lst[row_idx]
return (tr, col_idx) |
def train_model(max_epochs):
model = Model(20)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001)
def step(engine, batch):
model.train()
optimizer.zero_grad()
(x, y) = batch
y_pred = model(x)
loss = F.nll_loss(y_pred, y)
loss.backward()
optimizer.step()
return loss.item()
def eval_step(engine, batch):
model.eval()
(x, y) = batch
y_pred = model(x)
return (y_pred, y)
trainer = Engine(step)
evaluator = Engine(eval_step)
metric = Accuracy()
metric.attach(evaluator, 'accuracy')
metric = Loss(F.nll_loss)
metric.attach(evaluator, 'nll')
ds_train = torch.utils.data.TensorDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train))
dl_train = torch.utils.data.DataLoader(ds_train, batch_size=batch_size, shuffle=True, drop_last=True)
ds_test = torch.utils.data.TensorDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test))
dl_test = torch.utils.data.DataLoader(ds_test, batch_size=200, shuffle=False)
(Events.EPOCH_COMPLETED)
def log_results(trainer):
evaluator.run(dl_test)
metrics = evaluator.state.metrics
print(f"Test Results - Epoch: {trainer.state.epoch} Acc: {metrics['accuracy']:.4f} NLL: {metrics['nll']:.2f}")
trainer.run(dl_train, max_epochs=max_epochs)
return model |
class TestPerTestCapturing():
def test_capture_and_fixtures(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def setup_module(mod):\n print("setup module")\n def setup_function(function):\n print("setup " + function.__name__)\n def test_func1():\n print("in func1")\n assert 0\n def test_func2():\n print("in func2")\n assert 0\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['setup module*', 'setup test_func1*', 'in func1*', 'setup test_func2*', 'in func2*'])
.xfail(reason='unimplemented feature')
def test_capture_scope_cache(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n import sys\n def setup_module(func):\n print("module-setup")\n def setup_function(func):\n print("function-setup")\n def test_func():\n print("in function")\n assert 0\n def teardown_function(func):\n print("in teardown")\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['*test_func():*', '*Captured stdout during setup*', 'module-setup*', 'function-setup*', '*Captured stdout*', 'in teardown*'])
def test_no_carry_over(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def test_func1():\n print("in func1")\n def test_func2():\n print("in func2")\n assert 0\n ')
result = pytester.runpytest(p)
s = result.stdout.str()
assert ('in func1' not in s)
assert ('in func2' in s)
def test_teardown_capturing(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def setup_function(function):\n print("setup func1")\n def teardown_function(function):\n print("teardown func1")\n assert 0\n def test_func1():\n print("in func1")\n pass\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['*teardown_function*', '*Captured stdout*', 'setup func1*', 'in func1*', 'teardown func1*'])
def test_teardown_capturing_final(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n def teardown_module(mod):\n print("teardown module")\n assert 0\n def test_func():\n pass\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['*def teardown_module(mod):*', '*Captured stdout*', '*teardown module*', '*1 error*'])
def test_capturing_outerr(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(' import sys\n def test_capturing():\n print(42)\n sys.stderr.write(str(23))\n def test_capturing_error():\n print(1)\n sys.stderr.write(str(2))\n raise ValueError\n ')
result = pytester.runpytest(p1)
result.stdout.fnmatch_lines(['*test_capturing_outerr.py .F*', '====* FAILURES *====', '____*____', '*test_capturing_outerr.py:8: ValueError', '*--- Captured stdout *call*', '1', '*--- Captured stderr *call*', '2']) |
class Checkpoint(object):
CHECKPOINT_DIR_NAME = 'checkpoints'
TRAINER_STATE_NAME = 'trainer_states.pt'
MODEL_NAME = 'model.pt'
INPUT_VOCAB_FILE = 'input_vocab.pt'
OUTPUT_VOCAB_FILE = 'output_vocab.pt'
def __init__(self, model, optimizer, epoch, step, input_vocab, output_vocab, path=None):
self.model = model
self.optimizer = optimizer
self.input_vocab = input_vocab
self.output_vocab = output_vocab
self.epoch = epoch
self.step = step
self._path = path
def path(self):
if (self._path is None):
raise LookupError('The checkpoint has not been saved.')
return self._path
def save(self, experiment_dir):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
self._path = os.path.join(experiment_dir, self.CHECKPOINT_DIR_NAME, date_time)
path = self._path
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save({'epoch': self.epoch, 'step': self.step, 'optimizer': self.optimizer}, os.path.join(path, self.TRAINER_STATE_NAME))
torch.save(self.model, os.path.join(path, self.MODEL_NAME))
with open(os.path.join(path, self.INPUT_VOCAB_FILE), 'wb') as fout:
dill.dump(self.input_vocab, fout)
with open(os.path.join(path, self.OUTPUT_VOCAB_FILE), 'wb') as fout:
dill.dump(self.output_vocab, fout)
return path
def load(cls, path):
if torch.cuda.is_available():
resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))
model = torch.load(os.path.join(path, cls.MODEL_NAME))
else:
resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=(lambda storage, loc: storage))
model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=(lambda storage, loc: storage))
with open(os.path.join(path, cls.INPUT_VOCAB_FILE), 'rb') as fin:
input_vocab = dill.load(fin)
with open(os.path.join(path, cls.OUTPUT_VOCAB_FILE), 'rb') as fin:
output_vocab = dill.load(fin)
optimizer = resume_checkpoint['optimizer']
return Checkpoint(model=model, input_vocab=input_vocab, output_vocab=output_vocab, optimizer=optimizer, epoch=resume_checkpoint['epoch'], step=resume_checkpoint['step'], path=path)
def get_latest_checkpoint(cls, experiment_path):
checkpoints_path = os.path.join(experiment_path, cls.CHECKPOINT_DIR_NAME)
all_times = sorted(os.listdir(checkpoints_path), reverse=True)
return os.path.join(checkpoints_path, all_times[0]) |
class ServoFlags(Value):
SYNC = 1
OVERTEMP_FAULT = 2
OVERCURRENT_FAULT = 4
ENGAGED = 8
INVALID = (16 * 1)
PORT_PIN_FAULT = (16 * 2)
STARBOARD_PIN_FAULT = (16 * 4)
BADVOLTAGE_FAULT = (16 * 8)
MIN_RUDDER_FAULT = (256 * 1)
MAX_RUDDER_FAULT = (256 * 2)
CURRENT_RANGE = (256 * 4)
BAD_FUSES = (256 * 8)
REBOOTED = ((256 * 16) * 8)
sz = (256 * 256)
DRIVER_MASK = (sz - 1)
PORT_OVERCURRENT_FAULT = (sz * 1)
STARBOARD_OVERCURRENT_FAULT = (sz * 2)
DRIVER_TIMEOUT = (sz * 4)
SATURATED = (sz * 8)
def __init__(self, name):
super(ServoFlags, self).__init__(name, 0)
def get_str(self):
ret = ''
if (self.value & self.SYNC):
ret += 'SYNC '
if (self.value & self.OVERTEMP_FAULT):
ret += 'OVERTEMP_FAULT '
if (self.value & self.OVERCURRENT_FAULT):
ret += 'OVERCURRENT_FAULT '
if (self.value & self.ENGAGED):
ret += 'ENGAGED '
if (self.value & self.INVALID):
ret += 'INVALID '
if (self.value & self.PORT_PIN_FAULT):
ret += 'PORT_PIN_FAULT '
if (self.value & self.STARBOARD_PIN_FAULT):
ret += 'STARBOARD_PIN_FAULT '
if (self.value & self.BADVOLTAGE_FAULT):
ret += 'BADVOLTAGE_FAULT '
if (self.value & self.MIN_RUDDER_FAULT):
ret += 'MIN_RUDDER_FAULT '
if (self.value & self.MAX_RUDDER_FAULT):
ret += 'MAX_RUDDER_FAULT '
if (self.value & self.BAD_FUSES):
ret += 'BAD_FUSES '
if (self.value & self.PORT_OVERCURRENT_FAULT):
ret += 'PORT_OVERCURRENT_FAULT '
if (self.value & self.STARBOARD_OVERCURRENT_FAULT):
ret += 'STARBOARD_OVERCURRENT_FAULT '
if (self.value & self.DRIVER_TIMEOUT):
ret += 'DRIVER_TIMEOUT '
if (self.value & self.SATURATED):
ret += 'SATURATED '
if (self.value & self.REBOOTED):
ret += 'REBOOTED'
return ret
def get_msg(self):
return (('"' + self.get_str().strip()) + '"')
def setbit(self, bit, t=True):
if t:
self.update((self.value | bit))
else:
self.update((self.value & (~ bit)))
def clearbit(self, bit):
self.setbit(bit, False)
def port_overcurrent_fault(self):
self.update(((self.value | ServoFlags.PORT_OVERCURRENT_FAULT) & (~ ServoFlags.STARBOARD_OVERCURRENT_FAULT)))
def starboard_overcurrent_fault(self):
self.update(((self.value | ServoFlags.STARBOARD_OVERCURRENT_FAULT) & (~ ServoFlags.PORT_OVERCURRENT_FAULT))) |
def _create_app(emails=True):
global _PORT_NUMBER
_PORT_NUMBER = (_PORT_NUMBER + 1)
(public_key, private_key_data) = _generate_certs()
users = [{'name': 'cool.user', 'email': '', 'password': 'password'}, {'name': 'some.neat.user', 'email': '', 'password': 'foobar'}, {'name': 'blacklistedcom', 'email': '', 'password': 'somepass'}, {'name': 'blacklistednet', 'email': '', 'password': 'somepass'}, {'name': 'blacklistedorg', 'email': '', 'password': 'somepass'}, {'name': 'notblacklistedcom', 'email': '', 'password': 'somepass'}]
jwt_app = Flask('testjwt')
jwt_app.config['SERVER_HOSTNAME'] = ('localhost:%s' % _PORT_NUMBER)
def _get_basic_auth():
data = base64.b64decode(request.headers['Authorization'][len('Basic '):]).decode('utf-8')
return data.split(':', 1)
_app.route('/user/query', methods=['GET'])
def query_users():
query = request.args.get('query')
results = []
for user in users:
if user['name'].startswith(query):
result = {'username': user['name']}
if emails:
result['email'] = user['email']
results.append(result)
token_data = {'iss': 'authy', 'aud': 'quay.io/jwtauthn/query', 'nbf': datetime.utcnow(), 'iat': datetime.utcnow(), 'exp': (datetime.utcnow() + timedelta(seconds=60)), 'results': results}
encoded = jwt.encode(token_data, private_key_data, 'RS256')
return jsonify({'token': encoded})
_app.route('/user/get', methods=['GET'])
def get_user():
username = request.args.get('username')
if (username == 'disabled'):
return make_response('User is currently disabled', 401)
for user in users:
if ((user['name'] == username) or (user['email'] == username)):
token_data = {'iss': 'authy', 'aud': 'quay.io/jwtauthn/getuser', 'nbf': datetime.utcnow(), 'iat': datetime.utcnow(), 'exp': (datetime.utcnow() + timedelta(seconds=60)), 'sub': user['name'], 'email': user['email']}
encoded = jwt.encode(token_data, private_key_data, 'RS256')
return jsonify({'token': encoded})
return make_response('Invalid username or password', 404)
_app.route('/user/verify', methods=['GET'])
def verify_user():
(username, password) = _get_basic_auth()
if (username == 'disabled'):
return make_response('User is currently disabled', 401)
for user in users:
if ((user['name'] == username) or (user['email'] == username)):
if (password != user['password']):
return make_response('', 404)
token_data = {'iss': 'authy', 'aud': 'quay.io/jwtauthn', 'nbf': datetime.utcnow(), 'iat': datetime.utcnow(), 'exp': (datetime.utcnow() + timedelta(seconds=60)), 'sub': user['name'], 'email': user['email']}
encoded = jwt.encode(token_data, private_key_data, 'RS256')
return jsonify({'token': encoded})
return make_response('Invalid username or password', 404)
jwt_app.config['TESTING'] = True
return (jwt_app, _PORT_NUMBER, public_key) |
class IterativeRefinementGenerator(nn.Module):
def __init__(self, models, tgt_dict, eos_penalty=0.0, max_iter=2, max_ratio=2, decoding_format=None, retain_dropout=False, adaptive=True):
super().__init__()
self.models = models
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.adaptive = adaptive
for (i, model) in enumerate(self.models):
model.prepare_for_onnx_export_()
model.eval()
if hasattr(model, 'get_student_model'):
model = model.get_student_model()
self.models[i] = model
self._modules[f'model_{i}'] = model
def forward(self, src_tokens: torch.Tensor, src_lengths: torch.Tensor) -> Tuple[Tuple[(Tensor, Tensor, Tensor)]]:
(o1, o2, o3, _) = self.generate(self.models, src_tokens, src_lengths)
return tuple(((x, y.float().mean(), z) for (x, y, z) in zip(o1, o2, o3)))
_grad()
def generate(self, models, src_tokens, src_lengths, prefix_tokens=None):
assert (len(models) == 1), 'only support single model'
model = models[0]
(bsz, src_len) = src_tokens.size()
sent_idxs = torch.arange(bsz)
encoder_out = model.encoder(src_tokens, src_lengths)
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
finalized_tokens_list = [torch.tensor(0) for _ in range(bsz)]
finalized_scores_list = [torch.tensor(0) for _ in range(bsz)]
finalized_attns_list = [torch.tensor(0) for _ in range(bsz)]
finalized_alignments_list = [torch.tensor(0) for _ in range(bsz)]
for step in range((self.max_iter + 1)):
prev_decoder_out = prev_decoder_out._replace(step=step, max_step=(self.max_iter + 1))
decoder_out = model.forward_decoder(prev_decoder_out, encoder_out, eos_penalty=self.eos_penalty, max_ratio=self.max_ratio, decoding_format=self.decoding_format)
(terminated, output_tokens, output_scores, output_attn) = is_a_loop(self.pad, prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn)
decoder_out = decoder_out._replace(output_tokens=output_tokens, output_scores=output_scores, attn=output_attn)
terminated = last_step(step, self.max_iter, terminated)
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (None if (decoder_out.attn is None) else decoder_out.attn[terminated])
finalized_tokens_list = finalize_hypos_loop_tokens(finalized_tokens_list, finalized_idxs, self.pad, finalized_tokens, finalized_scores)
finalized_scores_list = finalize_hypos_loop_scores(finalized_scores_list, finalized_idxs, self.pad, finalized_tokens, finalized_scores)
(finalized_attns_list, finalized_alignments_list) = finalize_hypos_loop_attns(finalized_attns_list, finalized_alignments_list, finalized_idxs, self.pad, finalized_tokens, finalized_scores, finalized_attn)
not_terminated = (~ terminated)
prev_decoder_out = decoder_out._replace(output_tokens=script_skip_tensor(decoder_out.output_tokens, not_terminated), output_scores=script_skip_tensor(decoder_out.output_scores, not_terminated), attn=decoder_out.attn, step=decoder_out.step, max_step=decoder_out.max_step)
encoder_out = EncoderOut(encoder_out=script_skip_tensor(encoder_out.encoder_out, (~ terminated)), encoder_padding_mask=None, encoder_embedding=script_skip_tensor(encoder_out.encoder_embedding, (~ terminated)), encoder_states=None, src_tokens=None, src_lengths=None)
sent_idxs = script_skip_tensor(sent_idxs, not_terminated)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
return (finalized_tokens_list, finalized_scores_list, finalized_attns_list, finalized_alignments_list) |
('pypyr.config.config.init')
def test_main_pass_with_sysargv_single_group(mock_config_init):
arg_list = ['pypyr', 'blah', 'ctx string', '--loglevel', '50', '--dir', 'dir here', '--groups', 'group1', '--success', 'sg', '--failure', 'f g']
with patch('sys.argv', arg_list):
with patch('pypyr.pipelinerunner.run') as mock_pipeline_run:
with patch('pypyr.log.logger.set_root_logger') as mock_logger:
pypyr.cli.main()
mock_logger.assert_called_once_with(log_level=50, log_path=None)
mock_config_init.assert_called_once()
mock_pipeline_run.assert_called_once_with(pipeline_name='blah', args_in=['ctx string'], parse_args=True, py_dir='dir here', groups=['group1'], success_group='sg', failure_group='f g') |
class ResourceAllocation(Predictor):
def predict(self, weight=None):
res = Scoresheet()
for (a, b) in self.likely_pairs():
intersection = (set(neighbourhood(self.G, a)) & set(neighbourhood(self.G, b)))
w = 0
for c in intersection:
if (weight is not None):
numerator = float((self.G[a][c][weight] * self.G[b][c][weight]))
else:
numerator = 1
w += (numerator / neighbourhood_size(self.G, c, weight))
if (w > 0):
res[(a, b)] = w
return res |
.parametrize(['constraint', 'expected'], [('*', ['19.10b0']), ('>=19.0a0', ['19.10b0']), ('>=20.0a0', []), ('>=21.11b0', []), ('==21.11b0', ['21.11b0'])])
def test_find_packages_yanked(constraint: str, expected: list[str]) -> None:
repo = MockRepository()
packages = repo.find_packages(Factory.create_dependency('black', constraint))
assert ([str(p.version) for p in packages] == expected) |
(name='fake_dataset')
def fixture_fake_dataset():
count_ir = da.linspace(0, 255, 4, dtype=np.uint8).reshape(2, 2)
count_wv = da.linspace(0, 255, 4, dtype=np.uint8).reshape(2, 2)
count_vis = da.linspace(0, 255, 16, dtype=np.uint8).reshape(4, 4)
sza = da.from_array(np.array([[45, 90], [0, 45]], dtype=np.float32))
mask = da.from_array(np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.uint8))
time = (((np.arange(4) * 60) * 60) * .0)
time = time.astype('datetime64[ns]').reshape(2, 2)
ds = xr.Dataset(data_vars={'count_vis': (('y', 'x'), count_vis), 'count_wv': (('y_ir_wv', 'x_ir_wv'), count_wv), 'count_ir': (('y_ir_wv', 'x_ir_wv'), count_ir), 'toa_bidirectional_reflectance_vis': (vis_refl_exp / 100), 'u_independent_toa_bidirectional_reflectance': (u_vis_refl_exp / 100), 'quality_pixel_bitmask': (('y', 'x'), mask), 'solar_zenith_angle': (('y_tie', 'x_tie'), sza), 'time_ir_wv': (('y_ir_wv', 'x_ir_wv'), time), 'a_ir': (- 5.0), 'b_ir': 1.0, 'bt_a_ir': 10.0, 'bt_b_ir': (- 1000.0), 'a_wv': (- 0.5), 'b_wv': 0.05, 'bt_a_wv': 10.0, 'bt_b_wv': (- 2000.0), 'years_since_launch': 20.0, 'a0_vis': 1.0, 'a1_vis': 0.01, 'a2_vis': (- 0.0001), 'mean_count_space_vis': 1.0, 'distance_sun_earth': 1.0, 'solar_irradiance_vis': 650.0, 'sub_satellite_longitude_start': 57.1, 'sub_satellite_longitude_end': np.nan, 'sub_satellite_latitude_start': np.nan, 'sub_satellite_latitude_end': 0.1}, coords={'y': [1, 2, 3, 4], 'x': [1, 2, 3, 4], 'y_ir_wv': [1, 2], 'x_ir_wv': [1, 2], 'y_tie': [1, 2], 'x_tie': [1, 2]}, attrs={'foo': 'bar'})
ds['count_ir'].attrs['ancillary_variables'] = 'a_ir b_ir'
ds['count_wv'].attrs['ancillary_variables'] = 'a_wv b_wv'
return ds |
def get_num_processes():
cpu_count = multiprocessing.cpu_count()
if (config.NUMBER_OF_CORES == 0):
raise ValueError('Invalid NUMBER_OF_CORES; value may not be 0.')
if (config.NUMBER_OF_CORES > cpu_count):
log.info('Requesting %s cores; only %s available', config.NUMBER_OF_CORES, cpu_count)
return cpu_count
if (config.NUMBER_OF_CORES < 0):
num = ((cpu_count + config.NUMBER_OF_CORES) + 1)
if (num <= 0):
raise ValueError(f'Invalid NUMBER_OF_CORES; negative value is too negative: requesting {num} cores, {cpu_count} available.')
return num
return config.NUMBER_OF_CORES |
def test_item(func: Callable[(..., bool)], description: str) -> Parser:
def test_item_parser(stream, index):
if (index < len(stream)):
if isinstance(stream, bytes):
item = stream[index:(index + 1)]
else:
item = stream[index]
if func(item):
return Result.success((index + 1), item)
return Result.failure(index, description)
return test_item_parser |
def read_plane_paramters_file(filepath):
file = open(filepath, 'r')
lines = file.readlines()
planes = []
for line in lines:
if (not line.startswith('#')):
paras = line.split()
plane = {'index': int(paras[0]), 'num_of_points': int(paras[1]), 'ratio': (float(paras[1]) / (640.0 * 480.0)), 'nx': float(paras[5]), 'ny': float(paras[6]), 'nz': float(paras[7]), 'sx': float(paras[8]), 'sy': float(paras[9]), 'sz': float(paras[10])}
normal = np.asarray([plane['nx'], plane['ny'], plane['nz']])
center_point = np.asarray([plane['sx'], plane['sy'], plane['sz']]).transpose()
offset = np.matmul(normal, (4.0 * center_point))
plane['normal'] = normal
plane['offset'] = offset
planes.append(plane)
return planes |
class Solution(object):
def mergeTwoLists(self, l1, l2):
pos = dummyHead = ListNode((- 1))
while ((l1 is not None) and (l2 is not None)):
if (l1.val <= l2.val):
pos.next = l1
l1 = l1.next
else:
pos.next = l2
l2 = l2.next
pos = pos.next
if (l1 is not None):
pos.next = l1
if (l2 is not None):
pos.next = l2
return dummyHead.next |
class BatchScoringFunction(ScoringFunction):
def __init__(self, score_modifier: ScoreModifier=None) -> None:
super().__init__(score_modifier=score_modifier)
def score(self, smiles: str) -> float:
return self.score_list([smiles])[0]
def score_list(self, smiles_list: List[str]) -> List[float]:
raw_scores = self.raw_score_list(smiles_list)
scores = [(self.corrupt_score if (raw_scores[i] is None) else self.modify_score(raw_scores[i])) for i in range(len(raw_scores))]
return scores
def raw_score_list(self, smiles_list: List[str]) -> List[float]:
raise NotImplementedError |
class MarioNet(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
(c, h, w) = input_dim
if (h != 84):
raise ValueError(f'Expecting input height: 84, got: {h}')
if (w != 84):
raise ValueError(f'Expecting input width: 84, got: {w}')
self.online = nn.Sequential(nn.Conv2d(in_channels=c, out_channels=32, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(3136, 512), nn.ReLU(), nn.Linear(512, output_dim))
self.target = copy.deepcopy(self.online)
for p in self.target.parameters():
p.requires_grad = False
def forward(self, input, model):
if (model == 'online'):
return self.online(input)
elif (model == 'target'):
return self.target(input) |
def tabulate_events(dpath):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert (it.Tags()['scalars'] == tags)
out = defaultdict(list)
steps = []
for tag in tags:
steps = [e.step for e in summary_iterators[0].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert (len(set((e.step for e in events))) == 1)
out[tag].append([e.value for e in events])
max_ = np.min([len(out[tag]) for tag in tags])
for tag in tags:
out[tag] = out[tag][:max_]
return (out, steps) |
def in_ring(pt: Tuple[(float, float)], ring: List[Tuple[(float, float)]], ignore_boundary: bool) -> bool:
is_inside = False
if ((ring[0][0] == ring[(len(ring) - 1)][0]) and (ring[0][1] == ring[(len(ring) - 1)][1])):
ring = ring[0:(len(ring) - 1)]
j = (len(ring) - 1)
for i in range(0, len(ring)):
xi = ring[i][0]
yi = ring[i][1]
xj = ring[j][0]
yj = ring[j][1]
on_boundary = (((((pt[1] * (xi - xj)) + (yi * (xj - pt[0]))) + (yj * (pt[0] - xi))) == 0) and (((xi - pt[0]) * (xj - pt[0])) <= 0) and (((yi - pt[1]) * (yj - pt[1])) <= 0))
if on_boundary:
return (not ignore_boundary)
intersect = (((yi > pt[1]) != (yj > pt[1])) and (pt[0] < ((((xj - xi) * (pt[1] - yi)) / (yj - yi)) + xi)))
if intersect:
is_inside = (not is_inside)
j = i
return is_inside |
def process_split_fully(train_ratio=0.8):
if (not os.path.exists(os.path.join(config.save_dir, 'split_txts'))):
os.makedirs(os.path.join(config.save_dir, 'split_txts'))
for tag in ['Tr']:
img_ids = []
for path in tqdm(glob.glob(os.path.join(base_dir, f'images{tag}', '*.nii.gz'))):
img_id = path.split('/')[(- 1)].split('.')[0][3:]
img_ids.append(img_id)
if (tag == 'Tr'):
img_ids = np.random.permutation(img_ids)
split_idx = int((len(img_ids) * train_ratio))
train_val_ids = img_ids[:split_idx]
test_ids = sorted(img_ids[split_idx:])
split_idx = int(((len(train_val_ids) * 5) / 6))
train_ids = sorted(train_val_ids[:split_idx])
eval_ids = sorted(train_val_ids[split_idx:])
write_txt(train_ids, os.path.join(config.save_dir, 'split_txts/train.txt'))
write_txt(eval_ids, os.path.join(config.save_dir, 'split_txts/eval.txt'))
test_ids = sorted(test_ids)
write_txt(test_ids, os.path.join(config.save_dir, 'split_txts/test.txt')) |
class TestSolver(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_f = simple_net_file(self.num_output)
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write((("net: '" + net_f) + "'\n test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9\n weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75\n display: 100 max_iter: 100 snapshot_after_train: false"))
f.close()
self.solver = caffe.SGDSolver(f.name)
caffe.get_solver(f.name)
caffe.set_mode_cpu()
self.solver.net.blobs['label'].data[...] = np.random.randint(self.num_output, size=self.solver.net.blobs['label'].data.shape)
self.solver.test_nets[0].blobs['label'].data[...] = np.random.randint(self.num_output, size=self.solver.test_nets[0].blobs['label'].data.shape)
os.remove(f.name)
os.remove(net_f)
def test_solve(self):
self.assertEqual(self.solver.iter, 0)
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
def test_net_memory(self):
nets = ([self.solver.net] + list(self.solver.test_nets))
self.assertEqual(len(nets), 2)
del self.solver
total = 0
for net in nets:
for ps in six.itervalues(net.params):
for p in ps:
total += (p.data.sum() + p.diff.sum())
for bl in six.itervalues(net.blobs):
total += (bl.data.sum() + bl.diff.sum()) |
def get_min_dcf(Pfa, Pmiss, p_tar=0.01, normalize=True):
p_tar = np.asarray(p_tar)
p_non = (1 - p_tar)
cdet = np.dot(np.vstack((p_tar, p_non)).T, np.vstack((Pmiss, Pfa)))
idxdcfs = np.argmin(cdet, 1)
dcfs = cdet[(np.arange(len(idxdcfs)), idxdcfs)]
if normalize:
mins = np.amin(np.vstack((p_tar, p_non)), axis=0)
dcfs /= mins
return float(dcfs.squeeze()) |
def check_required_param(param_desc: list[str], param: inspect.Parameter, method_or_obj_name: str) -> bool:
is_ours_required = (param.default is inspect.Parameter.empty)
telegram_requires = is_parameter_required_by_tg(param_desc[2])
if (param.name in ignored_param_requirements(method_or_obj_name)):
return True
return (telegram_requires is is_ours_required) |
class Effect6501(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Energy Turret')), 'damageMultiplier', src.getModifiedItemAttr('shipBonusDreadnoughtA1'), skill='Amarr Dreadnought', **kwargs) |
class IntermediateLayerGetter(nn.Module):
_version = 2
__constants__ = ['layers']
__annotations__ = {'return_layers': Dict[(str, str)]}
def __init__(self, model, return_layers):
if (not set(return_layers).issubset([name for (name, _) in model.named_children()])):
raise ValueError('return_layers are not present in model')
super(IntermediateLayerGetter, self).__init__()
orig_return_layers = return_layers
return_layers = {k: v for (k, v) in return_layers.items()}
layers = OrderedDict()
for (name, module) in model.named_children():
layers[name] = module
if (name in return_layers):
del return_layers[name]
if (not return_layers):
break
self.layers = nn.ModuleDict(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for (name, module) in self.layers.items():
x = module(x)
if (name in self.return_layers):
out_name = self.return_layers[name]
out[out_name] = x
return out
.ignore
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
for new_key in self.state_dict().keys():
old_key = new_key[len('layers.'):]
old_key = (prefix + old_key)
new_key = (prefix + new_key)
if (old_key in state_dict):
value = state_dict[old_key]
del state_dict[old_key]
state_dict[new_key] = value
super(IntermediateLayerGetter, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) |
def odnoklassniki_oauth_sig(data, client_secret):
suffix = md5('{:s}{:s}'.format(data['access_token'], client_secret).encode('utf-8')).hexdigest()
check_list = sorted((f'{key:s}={value:s}' for (key, value) in data.items() if (key != 'access_token')))
return md5((''.join(check_list) + suffix).encode('utf-8')).hexdigest() |
class TimeStampTextFrame(TextFrame):
_framespec = [EncodingSpec('encoding', default=Encoding.UTF16), MultiSpec('text', TimeStampSpec('stamp'), sep=u',', default=[])]
def __bytes__(self):
return str(self).encode('utf-8')
def __str__(self):
return u','.join([stamp.text for stamp in self.text])
def _pprint(self):
return u' / '.join([stamp.text for stamp in self.text]) |
class Execute(Message):
type = message_types[b'E'[0]]
__slots__ = ('name', 'max')
def __init__(self, name, max=0):
self.name = name
self.max = max
def serialize(self):
return ((self.name + b'\x00') + ulong_pack(self.max))
def parse(typ, data):
(name, max) = data.split(b'\x00', 1)
return typ(name, ulong_unpack(max)) |
('/classify_upload', methods=['POST'])
def classify_upload():
try:
imagefile = flask.request.files['imagefile']
filename_ = (str(datetime.datetime.now()).replace(' ', '_') + werkzeug.secure_filename(imagefile.filename))
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template('index.html', has_result=True, result=(False, 'Cannot open uploaded image.'))
result = app.clf.classify_image(image)
return flask.render_template('index.html', has_result=True, result=result, imagesrc=embed_image_html(image)) |
def subdispatch_to_paymenttask(chain_state: ChainState, state_change: StateChange, secrethash: SecretHash) -> TransitionResult[ChainState]:
block_number = chain_state.block_number
block_hash = chain_state.block_hash
sub_task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash)
events: List[Event] = []
if sub_task:
pseudo_random_generator = chain_state.pseudo_random_generator
sub_iteration: Union[(TransitionResult[Optional[InitiatorPaymentState]], TransitionResult[Optional[MediatorTransferState]], TransitionResult[Optional[TargetTransferState]])]
if isinstance(sub_task, InitiatorTask):
token_network_address = sub_task.token_network_address
token_network_state = get_token_network_by_address(chain_state, token_network_address)
if token_network_state:
channel_identifier_map = token_network_state.channelidentifiers_to_channels
sub_iteration = initiator_manager.state_transition(payment_state=sub_task.manager_state, state_change=state_change, channelidentifiers_to_channels=channel_identifier_map, addresses_to_channel=chain_state.addresses_to_channel, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
events = sub_iteration.events
if (sub_iteration.new_state is None):
del chain_state.payment_mapping.secrethashes_to_task[secrethash]
elif isinstance(sub_task, MediatorTask):
token_network_address = sub_task.token_network_address
token_network_state = get_token_network_by_address(chain_state, token_network_address)
if token_network_state:
channelids_to_channels = token_network_state.channelidentifiers_to_channels
sub_iteration = mediator.state_transition(mediator_state=sub_task.mediator_state, state_change=state_change, channelidentifiers_to_channels=channelids_to_channels, addresses_to_channel=chain_state.addresses_to_channel, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash)
events = sub_iteration.events
if (sub_iteration.new_state is None):
del chain_state.payment_mapping.secrethashes_to_task[secrethash]
elif isinstance(sub_task, TargetTask):
token_network_address = sub_task.token_network_address
channel_identifier = sub_task.channel_identifier
channel_state = views.get_channelstate_by_canonical_identifier(chain_state=chain_state, canonical_identifier=CanonicalIdentifier(chain_identifier=chain_state.chain_id, token_network_address=token_network_address, channel_identifier=channel_identifier))
if channel_state:
sub_iteration = target.state_transition(target_state=sub_task.target_state, state_change=state_change, channel_state=channel_state, pseudo_random_generator=pseudo_random_generator, block_number=block_number)
events = sub_iteration.events
if (sub_iteration.new_state is None):
del chain_state.payment_mapping.secrethashes_to_task[secrethash]
return TransitionResult(chain_state, events) |
class VNet(MetaModule):
def __init__(self, input, hidden1, hidden2, output, num_classes):
super(VNet, self).__init__()
self.feature = share(input, hidden1, hidden2)
self.classfier = task(hidden2, output, num_classes)
def forward(self, x, num, c):
output = self.classfier(self.feature(x), num, c)
return output |
def resp_update_link():
updated_content = dict(link_content)
updated_content['link_type'] = new_link_type
with responses.RequestsMock() as rsps:
rsps.add(method=responses.PUT, url=link_id_url, json=updated_content, content_type='application/json', status=200)
(yield rsps) |
def train_model_swag_binning(model, arch, opt, train_data, test_data, args, lamb_lr, verbose=True):
model.train()
MI_data = train_data
(train_accs, train_losses) = ([], [])
(test_accs, test_losses) = ([], [])
binning_MIs = []
l_MIs = []
maxes = []
t = 0
analyse(model, grads=True)
if isinstance(model, BasicMLP):
swag_model = SWAG(BasicMLP, no_cov_mat=False, max_num_models=10, arch=arch)
swag_model.to(device)
for ep in range(args.epochs):
if (ep >= args.swa_start):
swag_model.collect_model(model)
for (xs, ys) in train_data:
if ((t % 10) == 0):
(train_acc, train_loss) = evaluate(model, train_data, args, 'train', plot=False)
(test_acc, test_loss) = evaluate(model, test_data, args, 'test', plot=False)
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(test_acc)
test_losses.append(test_loss)
if verbose:
print(('... eval train_model_swag_binning:', ep, t, train_acc, test_acc))
sys.stdout.flush()
xs = xs.to(device)
ys = ys.to(device)
opt.zero_grad()
(preds, max_prob) = model(xs)
l_sup = nn.functional.cross_entropy(preds, ys, reduction='mean')
zs_dropout = model(xs, repr=True)
l_MIs_batch = []
for (z_i, z) in enumerate(zs_dropout):
assert (len(z.shape) == 2)
if (z_i in args.MI_reg_layers):
noise = (torch.randn(z.shape, device=device) * np.sqrt((1.0 / ((2.0 * np.pi) * np.exp(1)))))
z_noise = (z + noise)
l_MI_i = torch.linalg.vector_norm(z_noise, ord=2, dim=1)
assert (l_MI_i.shape == (xs.shape[0],))
l_MIs_batch.append(l_MI_i)
l_MI = torch.cat(l_MIs_batch).mean()
maxes.append(max_prob)
(MI, ranges, bin_szs) = est_MI_binning(model, MI_data.dataset, num_bins=args.num_bins)
print(('Time %s: bin MI %s, range %s, bin_szs %s, loss: main %s, MI %s (%s)' % (t, MI, ranges, bin_szs, l_sup.item(), (lamb_lr * l_MI.item()), l_MI.item())))
sys.stdout.flush()
binning_MIs.append(MI)
l_MIs.append(l_MI.item())
(l_sup + (lamb_lr * l_MI)).backward()
opt.step()
if (ep == 0):
analyse(model, grads=True, t=t)
t += 1
(train_acc, train_loss) = evaluate(model, train_data, args, 'train', plot=False)
(test_acc, test_loss) = evaluate(model, test_data, args, 'test', plot=False)
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(test_acc)
test_losses.append(test_loss)
(MI_mc_perlayer, ranges, bin_szs) = est_MI_binning(model, MI_data.dataset, num_bins=args.num_bins)
MI_cond_mc_perlayer = est_MI_binning_cond(model, MI_data.dataset, args.C, num_bins_cond=args.num_bins_cond)
swag_model.sample(0.0)
(test_acc_swag, test_loss_swag) = evaluate(swag_model, test_data, args, 'test', plot=False)
diagnostics = {'train_losses': train_losses, 'train_accs': train_accs, 'test_losses': test_losses, 'test_accs': test_accs, 'test_loss_swag': test_loss_swag, 'test_acc_swag': test_acc_swag, 'MI_mc_perlayer': MI_mc_perlayer, 'MI_mc_perlayer_last': MI_mc_perlayer[(- 1)], 'ranges': ranges, 'bin_szs': bin_szs, 'MI_cond_mc_perlayer': MI_cond_mc_perlayer, 'MI_cond_mc_perlayer_last': MI_cond_mc_perlayer[(- 1)], 'binning_MIs': binning_MIs, 'l_MIs': l_MIs, 'maxes': maxes}
return (model, swag_model, diagnostics) |
def test_to_recap_record():
converter = AvroConverter()
avro_record = {'type': 'record', 'name': 'Test', 'fields': [{'name': 'a', 'type': 'int'}, {'name': 'b', 'type': 'string'}]}
actual = converter.to_recap(json.dumps(avro_record))
assert isinstance(actual, StructType)
assert (len(actual.fields) == 2)
assert isinstance(actual.fields[0], IntType)
assert (actual.fields[0].bits == 32)
assert (actual.fields[0].extra_attrs['name'] == 'a')
assert isinstance(actual.fields[1], StringType)
assert (actual.fields[1].bytes_ == )
assert (actual.fields[1].extra_attrs['name'] == 'b')
assert (actual.extra_attrs == {'name': 'Test'})
assert (actual.alias == '_root.Test') |
def dumped(parameters=True, returnvalue=True, fork_inst=JsonSerializable, dumper=dump, **kwargs):
if (dumper not in (dump, dumps, dumpb)):
raise InvalidDecorationError("The 'dumper' argument must be one of: jsons.dump, jsons.dumps, jsons.dumpb")
return _get_decorator(parameters, returnvalue, fork_inst, dumper, kwargs) |
def get_config():
config = get_default_configs()
training = config.training
training.sde = 'vpsde'
training.continuous = True
training.reduce_mean = True
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'euler_maruyama'
sampling.corrector = 'none'
data = config.data
data.centered = True
model = config.model
model.name = 'ddpm'
model.scale_by_sigma = False
model.ema_rate = 0.9999
model.normalization = 'GroupNorm'
model.nonlinearity = 'swish'
model.nf = 128
model.ch_mult = (1, 2, 2, 2)
model.num_res_blocks = 2
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
return config |
def get_private_repo_count(username):
return Repository.select().join(Visibility).switch(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id)).where((Namespace.username == username), (Visibility.name == 'private')).where((Repository.state != RepositoryState.MARKED_FOR_DELETION)).count() |
def test_flask_restful_integration_works():
class HelloWorld(flask_restful.Resource):
def __init__(self, *args, int: int, **kwargs):
self._int = int
super().__init__(*args, **kwargs)
def get(self):
return {'int': self._int}
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/')
FlaskInjector(app=app)
client = app.test_client()
response = client.get('/')
data = json.loads(response.data.decode('utf-8'))
assert (data == {'int': 0}) |
(scope='session')
def truncated_geos_area(create_test_area):
proj_dict = {'a': '6378169', 'h': '', 'lon_0': '9.5', 'no_defs': 'None', 'proj': 'geos', 'rf': '295.', 'type': 'crs', 'units': 'm', 'x_0': '0', 'y_0': '0'}
area_extent = (5567248.0742, 5570248.4773, (- 5570248.4773), 1393687.2705)
shape = (1392, 3712)
return create_test_area(proj_dict, shape[0], shape[1], area_extent) |
def _error_text(because: str, text: str, backend: usertypes.Backend, suggest_other_backend: bool=False) -> str:
text = f'<b>Failed to start with the {backend.name} backend!</b><p>qutebrowser tried to start with the {backend.name} backend but failed because {because}.</p>{text}'
if suggest_other_backend:
(other_backend, other_setting) = _other_backend(backend)
if (other_backend == usertypes.Backend.QtWebKit):
warning = "<i>Note that QtWebKit hasn't been updated since July 2017 (including security updates).</i>"
suffix = ' (not recommended)'
else:
warning = ''
suffix = ''
text += f"<p><b>Forcing the {other_backend.name} backend{suffix}</b></p><p>This forces usage of the {other_backend.name} backend by setting the <i>backend = '{other_setting}'</i> option (if you have a <i>config.py</i> file, you'll need to set this manually). {warning}</p>"
text += f'<p>{machinery.INFO.to_html()}</p>'
return text |
def convert_acdc(src_data_folder: str, dataset_id=27):
(out_dir, train_dir, labels_dir, test_dir) = make_out_dirs(dataset_id=dataset_id)
num_training_cases = copy_files(Path(src_data_folder), train_dir, labels_dir, test_dir)
generate_dataset_json(str(out_dir), channel_names={0: 'cineMRI'}, labels={'background': 0, 'RV': 1, 'MLV': 2, 'LVC': 3}, file_ending='.nii.gz', num_training_cases=num_training_cases) |
def _load_config(composite_configs):
if (not isinstance(composite_configs, (list, tuple))):
composite_configs = [composite_configs]
conf = {}
for composite_config in composite_configs:
with open(composite_config, 'r', encoding='utf-8') as conf_file:
conf = recursive_dict_update(conf, yaml.load(conf_file, Loader=UnsafeLoader))
try:
sensor_name = conf['sensor_name']
except KeyError:
logger.debug('No "sensor_name" tag found in %s, skipping.', composite_configs)
return ({}, {}, {})
sensor_compositors = {}
sensor_modifiers = {}
dep_id_keys = None
sensor_deps = sensor_name.split('/')[:(- 1)]
if sensor_deps:
for sensor_dep in sensor_deps:
(dep_comps, dep_mods, dep_id_keys) = load_compositor_configs_for_sensor(sensor_dep)
sensor_compositors.update(dep_comps)
sensor_modifiers.update(dep_mods)
id_keys = _get_sensor_id_keys(conf, dep_id_keys)
mod_config_helper = _ModifierConfigHelper(sensor_modifiers, id_keys)
configured_modifiers = conf.get('modifiers', {})
mod_config_helper.parse_config(configured_modifiers, composite_configs)
comp_config_helper = _CompositeConfigHelper(sensor_compositors, id_keys)
configured_composites = conf.get('composites', {})
comp_config_helper.parse_config(configured_composites, composite_configs)
return (sensor_compositors, sensor_modifiers, id_keys) |
def patch_builtin_len(modules=()):
def _new_len(obj):
return obj.__len__()
with ExitStack() as stack:
MODULES = (['detectron2.modeling.roi_heads.fast_rcnn', 'detectron2.modeling.roi_heads.mask_head', 'detectron2.modeling.roi_heads.keypoint_head'] + list(modules))
ctxs = [stack.enter_context(mock.patch((mod + '.len'))) for mod in MODULES]
for m in ctxs:
m.side_effect = _new_len
(yield) |
def gd(fcn: Callable[(..., torch.Tensor)], x0: torch.Tensor, params: List, step: float=0.001, gamma: float=0.9, maxiter: int=1000, f_tol: float=0.0, f_rtol: float=1e-08, x_tol: float=0.0, x_rtol: float=1e-08, verbose=False, **unused):
x = x0.clone()
stop_cond = TerminationCondition(f_tol, f_rtol, x_tol, x_rtol, verbose)
fprev = torch.tensor(0.0, dtype=x0.dtype, device=x0.device)
v = torch.zeros_like(x)
for i in range(maxiter):
(f, dfdx) = fcn(x, *params)
v = ((gamma * v) - (step * dfdx)).detach()
xprev = x.detach()
x = (xprev + v).detach()
to_stop = stop_cond.to_stop(i, x, xprev, f, fprev)
if to_stop:
break
fprev = f
x = stop_cond.get_best_x(x)
return x |
def cookie_decode(data, key):
data = tob(data)
if cookie_is_encoded(data):
(sig, msg) = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())):
return pickle.loads(base64.b64decode(msg))
return None |
class Adjoint(GateWithRegisters):
subbloq: 'Bloq'
_property
def signature(self) -> 'Signature':
return self.subbloq.signature.adjoint()
def decompose_bloq(self) -> 'CompositeBloq':
return self.subbloq.decompose_bloq().adjoint()
def supports_decompose_bloq(self) -> bool:
return self.subbloq.supports_decompose_bloq()
def adjoint(self) -> 'Bloq':
return self.subbloq
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
return {(bloq.adjoint(), n) for (bloq, n) in self.subbloq.build_call_graph(ssa=ssa)}
def short_name(self) -> str:
return (self.subbloq.short_name() + '')
def pretty_name(self) -> str:
return (self.subbloq.pretty_name() + '')
def wire_symbol(self, soq: 'Soquet') -> 'WireSymbol':
return self.subbloq.wire_symbol(attrs.evolve(soq, reg=soq.reg.adjoint())).adjoint() |
class _CfdRunnable(object):
def __init__(self, solver):
if (solver and solver.isDerivedFrom('Fem::FemSolverObjectPython')):
self.solver = solver
else:
raise TypeError('FemSolver object is missing in constructing CfdRunnable object')
self.analysis = CfdTools.getParentAnalysisObject(self.solver)
if self.analysis:
self.results_present = False
self.result_object = None
else:
raise Exception('FEM: No active analysis found!')
def check_prerequisites(self):
return '' |
def _fig_add_predictions(fig: Figure, category_data_frame: DataFrame, columns: List[str], column_color_map: Dict[(str, str)], show_legend: bool, row: int) -> None:
for column in columns:
if (column == COLUMN_DELTA):
marker_dict = dict(color=column_color_map[column], size=2)
fig.add_scatter(x=category_data_frame[COLUMN_TIME], y=category_data_frame[column], name=column, legendgroup=column, mode='markers', marker=marker_dict, showlegend=show_legend, col=1, row=row, secondary_y=True)
elif (column == COLUMN_DETECTION_SCORE_NMS):
marker_dict = _create_nms_marker()
filtered_data_frame = category_data_frame[(category_data_frame[column] != (- 1.0))]
fig.add_scatter(x=filtered_data_frame[COLUMN_TIME], y=filtered_data_frame[column], name=column, legendgroup=column, mode='markers', marker=marker_dict, showlegend=show_legend, col=1, row=row)
else:
line_dict = dict(color=column_color_map[column])
fig.add_scatter(x=category_data_frame[COLUMN_TIME], y=category_data_frame[column], name=column, legendgroup=column, mode='lines', line=line_dict, showlegend=show_legend, col=1, row=row) |
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert (results['imgs'].shape == torch.Size([3, 256, 256]))
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert (repr(image_to_tensor) == (image_to_tensor.__class__.__name__ + f'(keys={keys})')) |
('builtins.open', new_callable=mock.mock_open)
('pytube.request.urlopen')
def test_create_mock_html_json(mock_url_open, mock_open):
video_id = '2lAe1cqCOXo'
gzip_html_filename = ('yt-video-%s-html.json.gz' % video_id)
pytube_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
pytube_mocks_path = os.path.join(pytube_dir_path, 'tests', 'mocks')
gzip_html_filepath = os.path.join(pytube_mocks_path, gzip_html_filename)
mock_url_open_object = mock.Mock()
mock_url_open_object.read.side_effect = [b'yt.setConfig({"PLAYER_CONFIG":{"args":[]}});ytInitialData = {};ytInitialPlayerResponse = {};"jsUrl":"/s/player//player_ias.vflset/en_US/base.js"', b'embed_html', b'watch_html', b'{"responseContext":{}}']
mock_url_open.return_value = mock_url_open_object
result_data = create_mock_html_json(video_id)
mock_open.assert_called_once_with(gzip_html_filepath, 'wb')
gzip_file = io.BytesIO()
with gzip.GzipFile(filename=gzip_html_filename, fileobj=gzip_file, mode='wb') as f:
f.write(json.dumps(result_data).encode('utf-8'))
gzip_data = gzip_file.getvalue()
file_handle = mock_open.return_value.__enter__.return_value
full_content = b''
for call in file_handle.write.call_args_list:
(args, kwargs) = call
full_content += b''.join(args)
assert (gzip_data[10:] == full_content[10:]) |
def attach(parser):
add_input(parser, pages=False)
subparsers = parser.add_subparsers(dest='action')
subparsers.add_parser(ACTION_LIST)
parser_extract = subparsers.add_parser(ACTION_EXTRACT)
parser_extract.add_argument('--numbers', type=parse_numtext)
parser_extract.add_argument('--output-dir', '-o', type=Path, required=True)
parser_edit = subparsers.add_parser(ACTION_EDIT)
parser_edit.add_argument('--del-numbers', '-d', type=parse_numtext)
parser_edit.add_argument('--add-files', '-a', nargs='+', metavar='F', type=Path)
parser_edit.add_argument('--output', '-o', type=Path, required=True) |
class HonggfuzzEngineDescriptor(FuzzingEngineDescriptor):
NAME = 'HONGGFUZZ'
SHORT_NAME = 'HF'
VERSION = '1.0.0'
HF_PERSISTENT_SIG = b'\x01_LIBHFUZZ_PERSISTENT_BINARY_SIGNATURE_\x02\xff'
config_class = HonggfuzzConfigurationInterface
def __init__(self):
pass
def accept_file(binary_file: Path) -> Tuple[(bool, Optional[ExecMode], Optional[FuzzMode])]:
p = lief.parse(str(binary_file))
if (not p):
return (False, None, None)
instrumented = False
for f in p.functions:
if ('hfuzz_' in f.name):
instrumented = True
break
if (not instrumented):
return (True, ExecMode.PERSISTENT, FuzzMode.BINARY_ONLY)
exmode = ExecMode.SINGLE_EXEC
sections = {x.name: x for x in p.sections}
if ('.rodata' in sections):
rodata_content = bytearray(sections['.rodata'].content)
if (HonggfuzzEngineDescriptor.HF_PERSISTENT_SIG in rodata_content):
exmode = ExecMode.PERSISTENT
elif ('HF_ITER' in (x.name for x in p.imported_functions)):
exmode = ExecMode.PERSISTENT
return (True, exmode, FuzzMode.INSTRUMENTED)
def supported_coverage_strategies() -> List[CoverageMode]:
return [CoverageMode.AUTO] |
def test_perform_indexing_failed_within_reindex_threshold(initialized_db, set_secscan_config):
application.config['SECURITY_SCANNER_V4_REINDEX_THRESHOLD'] = 300
secscan = V4SecurityScanner(application, instance_keys, storage)
secscan._secscan_api = mock.Mock()
secscan._secscan_api.state.return_value = {'state': 'abc'}
secscan._secscan_api.index.return_value = ({'err': None, 'state': IndexReportState.Index_Finished}, 'abc')
for manifest in Manifest.select():
ManifestSecurityStatus.create(manifest=manifest, repository=manifest.repository, error_json={}, index_status=IndexStatus.FAILED, indexer_hash='abc', indexer_version=IndexerVersion.V4, metadata_json={})
secscan.perform_indexing_recent_manifests()
secscan.perform_indexing()
assert (ManifestSecurityStatus.select().count() == Manifest.select().count())
for mss in ManifestSecurityStatus.select():
assert (mss.index_status == IndexStatus.FAILED) |
def test_prepare_metadata_for_build_wheel_with_bad_path_dep_succeeds(caplog: LogCaptureFixture) -> None:
with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, 'with_bad_path_dep')):
api.prepare_metadata_for_build_wheel(tmp_dir)
assert (len(caplog.records) == 1)
record = caplog.records[0]
assert (record.levelname == 'WARNING')
assert ('does not exist' in record.message) |
_module()
class CCHead(FCNHead):
def __init__(self, recurrence=2, **kwargs):
if (CrissCrossAttention is None):
raise RuntimeError('Please install mmcv-full for CrissCrossAttention ops')
super(CCHead, self).__init__(num_convs=2, **kwargs)
self.recurrence = recurrence
self.cca = CrissCrossAttention(self.channels)
def forward(self, inputs):
x = self._transform_inputs(inputs)
output = self.convs[0](x)
for _ in range(self.recurrence):
output = self.cca(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output |
class PreSuDataset(data.Dataset):
def __init__(self, img_list, low_size=64, loader=default_loader):
super(PreSuDataset, self).__init__()
self.imgs = list(img_list)
self.loader = loader
def append(imgs):
imgs.append(transforms.Scale(low_size, interpolation=Image.NEAREST)(imgs[0]))
return imgs
self.transform = Compose([transforms.Lambda(append), Transforms(transforms.ToTensor())])
def get_path(self, idx):
return self.imgs[idx]
def __getitem__(self, index):
imgs = [self.loader(self.imgs[index])]
if (self.transform is not None):
imgs = self.transform(imgs)
return imgs
def __len__(self):
return len(self.imgs) |
def setupEnv(reinitialize=False):
dsz.env.Set('OPS_TIME', ops.timestamp())
dsz.env.Set('OPS_DATE', ops.datestamp())
for i in flags():
if ((not dsz.env.Check(i)) or reinitialize):
ops.env.set(i, False)
dszflags = dsz.control.Method()
dsz.control.echo.Off()
if (not dsz.cmd.Run('systempaths', dsz.RUN_FLAG_RECORD)):
ops.error("Could not get system paths. I'm confused. This means your OPS_TEMPDIR, OPS_WINDOWSDIR, and OPS_SYSTEMDIR environment variables are not set.")
else:
dsz.env.Set('OPS_TEMPDIR', ntpath.normpath(dsz.cmd.data.Get('TempDir::Location', dsz.TYPE_STRING)[0]))
dsz.env.Set('OPS_WINDOWSDIR', ntpath.normpath(dsz.cmd.data.Get('WindowsDir::Location', dsz.TYPE_STRING)[0]))
dsz.env.Set('OPS_SYSTEMDIR', ntpath.normpath(dsz.cmd.data.Get('SystemDir::Location', dsz.TYPE_STRING)[0]))
del dszflags |
def log_features(feas, tb_writer, tb_index, rank):
fea_cls = feas['cls']
fea_loc = feas['loc']
for i in range(len(fea_cls)):
fea = fea_cls[i].detach()
s = fea.shape
fea = fea.view((s[0] * s[1]), (- 1))
fea = fea.norm(dim=1)
fea = fea.cpu().numpy()
fea = np.float64(fea)
tb_writer.add_histogram('features_cls{}'.format(i), fea, tb_index)
fea = fea_loc[i].detach()
s = fea.shape
fea = fea.view((s[0] * s[1]), (- 1))
fea = fea.norm(dim=1)
fea = fea.cpu().numpy()
tb_writer.add_histogram('features_loc{}'.format(i), fea, tb_index) |
def sc_zaleplon_with_other_formula() -> GoalDirectedBenchmark:
specification = uniform_specification(1, 10, 100)
benchmark_object = zaleplon_with_other_formula()
sa_biased = ScoringFunctionSAWrapper(benchmark_object.objective, SCScoreModifier())
return GoalDirectedBenchmark(name='SC_zaleplon', objective=sa_biased, contribution_specification=specification) |
class TestCLS():
def test_graph_search_utils_single_residual_model(self):
if (version.parse(torch.__version__) >= version.parse('1.13')):
model = models_for_tests.single_residual_model()
connected_graph = ConnectedGraph(model)
ordered_module_list = get_ordered_list_of_conv_modules(connected_graph.starting_ops)
graph_search_utils = GraphSearchUtils(connected_graph, ordered_module_list, cls_supported_layer_types, cls_supported_activation_types)
ordered_layer_groups = graph_search_utils.find_layer_groups_to_scale()[0]
ordered_layer_groups_names = [op.dotted_name for op in ordered_layer_groups]
assert (ordered_layer_groups_names == ['/conv2/Conv', '/conv3/Conv'])
def test_find_cls_sets_depthwise_model(self):
if (version.parse(torch.__version__) >= version.parse('1.13')):
model = models_for_tests.depthwise_conv_model()
connected_graph = ConnectedGraph(model)
ordered_module_list = get_ordered_list_of_conv_modules(connected_graph.starting_ops)
graph_search_utils = GraphSearchUtils(connected_graph, ordered_module_list, cls_supported_layer_types, cls_supported_activation_types)
ordered_layer_groups = graph_search_utils.find_layer_groups_to_scale()[0]
cls_sets = graph_search_utils.convert_layer_group_to_cls_sets(ordered_layer_groups)
cls_sets_names = []
for cls_set in cls_sets:
cls_sets_name = tuple([op.dotted_name for op in cls_set])
cls_sets_names.append(cls_sets_name)
assert (cls_sets_names == [('/model/model.0/model.0.0/Conv', '/model/model.1/model.1.0/Conv', '/model/model.1/model.1.3/Conv'), ('/model/model.1/model.1.3/Conv', '/model/model.2/model.2.0/Conv', '/model/model.2/model.2.3/Conv'), ('/model/model.2/model.2.3/Conv', '/model/model.3/model.3.0/Conv', '/model/model.3/model.3.3/Conv'), ('/model/model.3/model.3.3/Conv', '/model/model.4/model.4.0/Conv', '/model/model.4/model.4.3/Conv'), ('/model/model.4/model.4.3/Conv', '/model/model.5/model.5.0/Conv', '/model/model.5/model.5.3/Conv'), ('/model/model.5/model.5.3/Conv', '/model/model.6/model.6.0/Conv', '/model/model.6/model.6.3/Conv'), ('/model/model.6/model.6.3/Conv', '/model/model.7/model.7.0/Conv', '/model/model.7/model.7.3/Conv'), ('/model/model.7/model.7.3/Conv', '/model/model.8/model.8.0/Conv', '/model/model.8/model.8.3/Conv')])
def test_find_cls_sets_resnet_model(self):
if (version.parse(torch.__version__) >= version.parse('1.13')):
model = models_for_tests.single_residual_model()
connected_graph = ConnectedGraph(model)
ordered_module_list = get_ordered_list_of_conv_modules(connected_graph.starting_ops)
graph_search_utils = GraphSearchUtils(connected_graph, ordered_module_list, cls_supported_layer_types, cls_supported_activation_types)
ordered_layer_groups = graph_search_utils.find_layer_groups_to_scale()[0]
cls_sets = graph_search_utils.convert_layer_group_to_cls_sets(ordered_layer_groups)
cls_sets_names = []
for cls_set in cls_sets:
cls_sets_name = tuple([op.dotted_name for op in cls_set])
cls_sets_names.append(cls_sets_name)
assert (cls_sets_names == [('/conv2/Conv', '/conv3/Conv')])
def test_scale_model_residual(self):
model = models_for_tests.single_residual_model()
input_shape = (1, 3, 32, 32)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cls = session.run(None, {'input': test_data})
cls = CrossLayerScaling(model)
cls_set_info = cls.scale_model()
session = _build_session(model)
output_after_cls = session.run(None, {'input': test_data})
assert np.allclose(output_after_cls, output_before_cls, rtol=0.01, atol=1e-05)
conv_3 = cls_set_info[0].cls_pair_info_list[0].layer1.get_module()
conv_5 = cls_set_info[0].cls_pair_info_list[0].layer2.get_module()
weight_3 = numpy_helper.to_array(ParamUtils.get_param(model.model, conv_3, WEIGHT_INDEX))
weight_5 = numpy_helper.to_array(ParamUtils.get_param(model.model, conv_5, WEIGHT_INDEX))
assert np.allclose(np.amax(np.abs(weight_3), axis=(1, 2, 3)), np.amax(np.abs(weight_5), axis=(0, 2, 3)))
def test_scale_model_tranposed_conv(self):
model = models_for_tests.transposed_conv_model_without_bn()
input_shape = (10, 10, 4, 4)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cls = session.run(None, {'input': test_data})
cls = CrossLayerScaling(model)
cls_set_info = cls.scale_model()
session = _build_session(model)
output_after_cls = session.run(None, {'input': test_data})
assert np.allclose(output_after_cls, output_before_cls, rtol=0.01, atol=1e-05)
conv_3 = cls_set_info[0].cls_pair_info_list[0].layer1.get_module()
conv_5 = cls_set_info[0].cls_pair_info_list[0].layer2.get_module()
weight_3 = numpy_helper.to_array(ParamUtils.get_param(model.model, conv_3, WEIGHT_INDEX))
weight_5 = numpy_helper.to_array(ParamUtils.get_param(model.model, conv_5, WEIGHT_INDEX))
assert np.allclose(np.amax(np.abs(weight_3), axis=(0, 2, 3)), np.amax(np.abs(weight_5), axis=(1, 2, 3)))
def test_scale_model_depthwise(self):
model = models_for_tests.depthwise_conv_model()
input_shape = (1, 3, 224, 224)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cls = session.run(None, {'input': test_data})
cls = CrossLayerScaling(model)
cls_set_infos = cls.scale_model()
session = _build_session(model)
output_after_cls = session.run(None, {'input': test_data})
assert np.allclose(output_after_cls, output_before_cls, rtol=0.01, atol=1e-05)
assert (len(cls_set_infos) == 8)
def test_cle(self):
np.random.seed(0)
model = models_for_tests.my_model_with_bns()
fold_all_batch_norms_to_weight(model.model)
input_shape = (2, 10, 24, 24)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cle = session.run(None, {'input': test_data})
equalize_model(model)
session = _build_session(model)
output_after_cle = session.run(None, {'input': test_data})
assert np.allclose(output_after_cle, output_before_cle, rtol=0.01, atol=1e-05)
def test_cle_conv1D_model(self):
x = torch.randn((2, 10, 24))
model = models_for_tests.BNAfterConv1d()
models_for_tests.initialize_bn_params(model)
model = models_for_tests._convert_to_onnx_no_fold(model, x)
input_shape = (2, 10, 24)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cle = session.run(None, {'input': test_data})
equalize_model(model)
output_after_cle = session.run(None, {'input': test_data})
assert np.allclose(output_after_cle, output_before_cle, rtol=0.01, atol=1e-05)
def test_cle_transpose1D_model(self):
x = torch.randn((2, 10, 24))
model = models_for_tests.BNAfterConvTranspose1d()
models_for_tests.initialize_bn_params(model)
model = models_for_tests._convert_to_onnx_no_fold(model, x)
input_shape = (2, 10, 24)
test_data = np.random.randn(*input_shape).astype(np.float32)
session = _build_session(model)
output_before_cle = session.run(None, {'input': test_data})
equalize_model(model)
output_after_cle = session.run(None, {'input': test_data})
assert np.allclose(output_after_cle, output_before_cle, rtol=0.01, atol=1e-05) |
def getNameFromSid(sid, domain=None):
name = LPWSTR()
cbName = DWORD(0)
referencedDomainName = LPWSTR()
cchReferencedDomainName = DWORD(0)
peUse = DWORD(0)
try:
LookupAccountSidW(domain, sid, None, byref(cbName), None, byref(cchReferencedDomainName), byref(peUse))
except Exception as e:
pass
if ((cbName.value <= 0) or (cchReferencedDomainName.value <= 0)):
logging.warning('Impossible to get size of name with LookupAccountSidW(), case 2: {0}'.format(getLastErrorMessage()))
return None
name = create_unicode_buffer(u'', (cbName.value + 1))
referencedDomainName = create_unicode_buffer(u'', (cchReferencedDomainName.value + 1))
try:
LookupAccountSidW(domain, sid, name, byref(cbName), referencedDomainName, byref(cchReferencedDomainName), byref(peUse))
except Exception as e:
logging.error('Impossible to get name with LookupAccountSidW(), case 1: {0}'.format(getLastErrorMessage()))
return None
'\n if name.value == \'None\':\n logging.error("Impossible to get name with LookupAccountSidW(), case 2: {0}".format(getLastErrorMessage()))\n return None\n '
return {'Name': name.value, 'Domain': referencedDomainName.value, 'type': peUse.value} |
_image_displayer('ueberzug')
class UeberzugImageDisplayer(ImageDisplayer):
IMAGE_ID = 'preview'
is_initialized = False
def __init__(self):
self.process = None
def initialize(self):
if (self.is_initialized and (self.process.poll() is None) and (not self.process.stdin.closed)):
return
with open(os.devnull, 'wb') as devnull:
self.process = Popen(['ueberzug', 'layer', '--silent'], cwd=self.working_dir, stderr=devnull, stdin=PIPE, universal_newlines=True)
self.is_initialized = True
def _execute(self, **kwargs):
self.initialize()
self.process.stdin.write((json.dumps(kwargs) + '\n'))
self.process.stdin.flush()
def draw(self, path, start_x, start_y, width, height):
self._execute(action='add', identifier=self.IMAGE_ID, x=start_x, y=start_y, max_width=width, max_height=height, path=path)
def clear(self, start_x, start_y, width, height):
if (self.process and (not self.process.stdin.closed)):
self._execute(action='remove', identifier=self.IMAGE_ID)
def quit(self):
if (self.is_initialized and (self.process.poll() is None)):
timer_kill = threading.Timer(1, self.process.kill, [])
try:
self.process.terminate()
timer_kill.start()
self.process.communicate()
finally:
timer_kill.cancel() |
class BatchStudy():
INPUT_LIST = ['experiments', 'geometries', 'parameter_values', 'submesh_types', 'var_pts', 'spatial_methods', 'solvers', 'output_variables', 'C_rates']
def __init__(self, models, experiments=None, geometries=None, parameter_values=None, submesh_types=None, var_pts=None, spatial_methods=None, solvers=None, output_variables=None, C_rates=None, repeats=1, permutations=False):
self.models = models
self.experiments = experiments
self.geometries = geometries
self.parameter_values = parameter_values
self.submesh_types = submesh_types
self.var_pts = var_pts
self.spatial_methods = spatial_methods
self.solvers = solvers
self.output_variables = output_variables
self.C_rates = C_rates
self.repeats = repeats
self.permutations = permutations
self.quick_plot = None
if (not self.permutations):
for name in self.INPUT_LIST:
if (getattr(self, name) and (len(self.models) != len(getattr(self, name)))):
raise ValueError(f'Either provide no {name} or an equal number of {name} as the models ({len(self.models)} models given) if permutations=False')
def solve(self, t_eval=None, solver=None, check_model=True, save_at_cycles=None, calc_esoh=True, starting_solution=None, initial_soc=None, **kwargs):
self.sims = []
iter_func = (product if self.permutations else zip)
inp_values = []
for name in self.INPUT_LIST:
if getattr(self, name):
inp_value = getattr(self, name).values()
elif self.permutations:
inp_value = [None]
else:
inp_value = ([None] * len(self.models))
inp_values.append(inp_value)
for (model, experiment, geometry, parameter_value, submesh_type, var_pt, spatial_method, solver, output_variable, C_rate) in iter_func(self.models.values(), *inp_values):
sim = pybamm.Simulation(model, experiment=experiment, geometry=geometry, parameter_values=parameter_value, submesh_types=submesh_type, var_pts=var_pt, spatial_methods=spatial_method, solver=solver, output_variables=output_variable, C_rate=C_rate)
solve_time = 0
integration_time = 0
for _ in range(self.repeats):
sol = sim.solve(t_eval, solver, check_model, save_at_cycles, calc_esoh, starting_solution, initial_soc, **kwargs)
solve_time += sol.solve_time
integration_time += sol.integration_time
sim.solution.solve_time = (solve_time / self.repeats)
sim.solution.integration_time = (integration_time / self.repeats)
self.sims.append(sim)
def plot(self, output_variables=None, **kwargs):
self.quick_plot = pybamm.dynamic_plot(self.sims, output_variables=output_variables, **kwargs)
return self.quick_plot
def create_gif(self, number_of_images=80, duration=0.1, output_filename='plot.gif'):
if (self.quick_plot is None):
self.quick_plot = pybamm.QuickPlot(self.sims)
self.quick_plot.create_gif(number_of_images=number_of_images, duration=duration, output_filename=output_filename) |
def test_profile_weir():
with tempfile.TemporaryDirectory() as tempdir:
temp_model_path = os.path.join(tempdir, 'model.inp')
temp_pdf_path = os.path.join(tempdir, 'test.pdf')
mymodel = swmmio.Model(MODEL_EXAMPLE6)
mymodel.inp.save(temp_model_path)
with pyswmm.Simulation(temp_model_path) as sim:
for step in sim:
pass
mymodel = swmmio.Model(temp_model_path)
depths = mymodel.rpt.node_depth_summary.MaxNodeDepthReported
fig = plt.figure(figsize=(11, 8))
fig.suptitle('Weir')
ax = fig.add_subplot(1, 1, 1)
path_selection = find_network_trace(mymodel, 'Inlet', 'TailWater', include_links=['Roadway'])
profile_config = build_profile_plot(ax, mymodel, path_selection)
assert (profile_config == profile_weir_assert)
add_hgl_plot(ax, profile_config, depth=depths, label='Max HGL')
add_node_labels_plot(ax, mymodel, profile_config)
add_link_labels_plot(ax, mymodel, profile_config)
leg = ax.legend()
ax.grid('xy')
fig.tight_layout()
plt.savefig(temp_pdf_path)
plt.close()
assert os.path.exists(temp_pdf_path) |
_sentencepiece
_tokenizers
class DebertaV2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = DebertaV2Tokenizer
rust_tokenizer_class = DebertaV2TokenizerFast
test_sentencepiece = True
test_sentencepiece_ignore_case = True
def setUp(self):
super().setUp()
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = 'this is a test'
output_text = 'this is a test'
return (input_text, output_text)
def test_convert_token_and_id(self):
token = '<pad>'
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[(- 1)], '[PAD]')
self.assertEqual(len(vocab_keys), 30001)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 30000)
def test_do_lower_case(self):
sequence = ' \tHeLLo!how \n Are yoU? '
tokens_target = ['hello', '!', 'how', 'are', 'you', '?']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, do_lower_case=True)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
pass
('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def test_sentencepiece_tokenize_and_decode(self):
pass
def test_split_by_punct(self):
sequence = 'I was born in 92000, and this is false.'
tokens_target = ['', '<unk>', 'was', 'born', 'in', '9', '2000', '', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '', '.']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, split_by_punct=True)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, split_by_punct=True)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
def test_do_lower_case_split_by_punct(self):
sequence = 'I was born in 92000, and this is false.'
tokens_target = ['i', 'was', 'born', 'in', '9', '2000', '', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '', '.']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, do_lower_case=True, split_by_punct=True)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, do_lower_case=True, split_by_punct=True)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
def test_do_lower_case_split_by_punct_false(self):
sequence = 'I was born in 92000, and this is false.'
tokens_target = ['i', 'was', 'born', 'in', '9', '2000', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '.']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, do_lower_case=True, split_by_punct=False)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, do_lower_case=True, split_by_punct=False)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
def test_do_lower_case_false_split_by_punct(self):
sequence = 'I was born in 92000, and this is false.'
tokens_target = ['', '<unk>', 'was', 'born', 'in', '9', '2000', '', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '', '.']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, do_lower_case=False, split_by_punct=True)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, do_lower_case=False, split_by_punct=True)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
def test_do_lower_case_false_split_by_punct_false(self):
sequence = ' \tHeLLo!how \n Are yoU? '
tokens_target = ['', '<unk>', 'e', '<unk>', 'o', '!', 'how', '', '<unk>', 're', 'yo', '<unk>', '?']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, do_lower_case=False, split_by_punct=False)
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, tokens_target)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, do_lower_case=False, split_by_punct=False)
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(rust_tokens, tokens_target)
def test_rust_and_python_full_tokenizers(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'I was born in 92000, and this is false.'
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(sequence, add_special_tokens=False))
rust_tokens = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(sequence, add_special_tokens=False))
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_full_tokenizer(self):
sequence = 'This is a test'
ids_target = [13, 1, 4398, 25, 21, 1289]
tokens_target = ['', 'T', 'his', 'is', 'a', 'test']
back_tokens_target = ['', '<unk>', 'his', 'is', 'a', 'test']
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, keep_accents=True)
rust_tokenizer = DebertaV2TokenizerFast(SAMPLE_VOCAB, keep_accents=True)
ids = tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, ids_target)
tokens = tokenizer.tokenize(sequence)
self.assertListEqual(tokens, tokens_target)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, back_tokens_target)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(rust_ids, ids_target)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(rust_tokens, tokens_target)
rust_back_tokens = rust_tokenizer.convert_ids_to_tokens(rust_ids)
self.assertListEqual(rust_back_tokens, back_tokens_target)
sequence = 'I was born in 92000, and this is false.'
ids_target = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
tokens_target = ['', 'I', 'was', 'born', 'in', '9', '2000', ',', 'and', 'this', 'is', 'fal', 's', 'e', '.']
back_tokens_target = ['', '<unk>', 'was', 'born', 'in', '9', '2000', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '.']
ids = tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, ids_target)
tokens = tokenizer.tokenize(sequence)
self.assertListEqual(tokens, tokens_target)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, back_tokens_target)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(rust_ids, ids_target)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(rust_tokens, tokens_target)
rust_back_tokens = rust_tokenizer.convert_ids_to_tokens(rust_ids)
self.assertListEqual(rust_back_tokens, back_tokens_target)
def test_sequence_builders(self):
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB)
text = tokenizer.encode('sequence builders')
text_2 = tokenizer.encode('multi-sequence build')
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
self.assertEqual((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]), encoded_sentence)
self.assertEqual((((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]) + text_2) + [tokenizer.sep_token_id]), encoded_pair)
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670') |
class TypoScriptCssDataLexer(RegexLexer):
name = 'TypoScriptCssData'
aliases = ['typoscriptcssdata']
url = '
version_added = '2.2'
tokens = {'root': [('(.*)(###\\w+###)(.*)', bygroups(String, Name.Constant, String)), ('(\\{)(\\$)((?:[\\w\\-]+\\.)*)([\\w\\-]+)(\\})', bygroups(String.Symbol, Operator, Name.Constant, Name.Constant, String.Symbol)), ('(.*)(\\{)([\\w\\-]+)(\\s*:\\s*)([\\w\\-]+)(\\})(.*)', bygroups(String, String.Symbol, Name.Constant, Operator, Name.Constant, String.Symbol, String)), ('\\s+', Text), ('/\\*(?:(?!\\*/).)*\\*/', Comment), ('(?<!(#|\\\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\\n#]+|//[^\\n]*)', Comment), ('[<>,:=.*%+|]', String), ('[\\w"\\-!/&;(){}]+', String)]} |
class ScantronClient():
def __init__(self, secrets_file_location='./scantron_api_secrets.json', **kwargs):
SECRETS = {}
try:
with open(secrets_file_location) as config_file:
SECRETS = json.loads(config_file.read())
except OSError:
print(f'Error: {secrets_file_location} does not exist. Exiting...')
sys.exit(1)
try:
self.host = SECRETS['scantron']['host']
self.port = SECRETS['scantron']['port']
self.token = SECRETS['scantron']['token']
except KeyError:
print(f'Error reading key-values in {secrets_file_location} file. Exiting...')
sys.exit(1)
self.BASE_URL = f'
self.user_agent = kwargs.get('user_agent', f'scantron-api-client-v{__version__}')
self.headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': f'Token {self.token}', 'User-Agent': self.user_agent}
self.timeout = kwargs.get('timeout', 30)
self.api_self_signed = kwargs.get('api_self_signed', True)
self.max_attempts = kwargs.get('max_attempts', 3)
if self.api_self_signed:
urllib3.disable_warnings()
self.debug_print = False
def scantron_api_query(self, endpoint, **kwargs):
url = f'{self.BASE_URL}{endpoint}'
headers = kwargs.get('headers', {})
if (not isinstance(headers, dict)):
raise ValueError('headers keyword passed to scantron_api_query is not a valid dict object')
headers = {**self.headers, **headers}
method = kwargs.get('method', 'GET')
method = method.upper()
parameters = kwargs.get('params', {})
if (not isinstance(parameters, dict)):
raise ValueError('params keyword passed to scantron_api_query is not a valid dict object')
payload = kwargs.get('payload', '{}')
attempts = 0
while True:
try:
if (method == 'GET'):
response = requests.get(url, headers=headers, params=parameters, json=payload, verify=(not self.api_self_signed), timeout=self.timeout)
if (response.status_code != 200):
debug_requests_response(response)
break
elif (method == 'POST'):
response = requests.post(url, headers=headers, params=parameters, json=payload, verify=(not self.api_self_signed), timeout=self.timeout)
if (response.status_code != 201):
debug_requests_response(response)
break
elif (method == 'PATCH'):
response = requests.patch(url, headers=headers, params=parameters, json=payload, verify=(not self.api_self_signed), timeout=self.timeout)
if (response.status_code != 200):
debug_requests_response(response)
break
elif (method == 'PUT'):
response = requests.put(url, headers=headers, params=parameters, json=payload, verify=(not self.api_self_signed), timeout=self.timeout)
if (response.status_code != 200):
debug_requests_response(response)
break
elif (method == 'DELETE'):
response = requests.delete(url, headers=headers, params=parameters, json=payload, verify=(not self.api_self_signed), timeout=self.timeout)
if (response.status_code != 204):
debug_requests_response(response)
break
else:
print(f'Invalid HTTP method passed to scantron_api_query: {method}')
raise ValueError(f'Invalid HTTP method passed to scantron_api_query: {method}')
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
attempts += 1
if (self.max_attempts < attempts):
print(f'Unable to reach Scantron API after {self.max_attempts} tries. Consider increasing the timeout.')
sys.exit(1)
else:
print('Packet loss when attempting to reach the Scantron API.')
if self.debug_print:
debug_requests_response(response)
return response
def retrieve_server_time(self):
response = self.scantron_api_query('/api/server_time')
if (response.status_code == 200):
server_time = response.json()['server_time']
return server_time
def retrieve_scan_results(self, scan_id, file_type, write_to_disk=False, **kwargs):
scan_results = None
file_type = file_type.lower()
file_name = f'scan_results_{scan_id}.{file_type}'
if (file_type not in ['nmap', 'xml', 'json', 'pooled']):
print(f'Not a valid file type: {file_type}')
else:
response = self.scantron_api_query(f'/results/{scan_id}?file_type={file_type}', **kwargs)
if ((response.status_code == 200) and (file_type in ['nmap', 'xml'])):
scan_results = response.text
if write_to_disk:
with open(file_name, 'w') as fh:
fh.write(scan_results)
elif ((response.status_code == 200) and (file_type in ['json', 'pooled'])):
try:
scan_results = response.json()
if write_to_disk:
with open(file_name, 'w') as fh:
json.dump(scan_results, fh)
except Exception as e:
print(f'Exception decoding json for scan ID {scan_id}: {e}')
return scan_results
def retrieve_configuration(self):
return self.scantron_api_query('/api/configuration/1', method='GET')
def update_configuration(self, payload):
return self.scantron_api_query('/api/configuration/1', method='PATCH', payload=payload)
def create_engine(self):
print('no create function, an Engine is created when a Django User is created.')
return None
def retrieve_engine(self, engine_id):
return self.scantron_api_query(f'/api/engines/{engine_id}', method='GET')
def update_engine(self, engine_id, payload):
return self.scantron_api_query(f'/api/engines/{engine_id}', method='PATCH', payload=payload)
def delete_engine(self, engine_id):
return self.scantron_api_query(f'/api/engines/{engine_id}', method='DELETE')
def retrieve_engines(self):
return self.scantron_api_query('/api/engines').json()
def retrieve_engine_id_from_engine_name(self, engine_name):
engines = self.retrieve_engines()
engine_id = None
for engine in engines:
if (engine['scan_engine'].lower() == engine_name.lower()):
engine_id = engine['id']
return engine_id
def create_globally_excluded_target(self, payload):
return self.scantron_api_query('/api/globally_excluded_targets', method='POST', payload=payload)
def retrieve_globally_excluded_target(self, globally_excluded_target_id):
return self.scantron_api_query(f'/api/globally_excluded_targets/{globally_excluded_target_id}', method='GET').json()
def update_globally_excluded_target(self, globally_excluded_target_id, payload):
return self.scantron_api_query(f'/api/globally_excluded_targets/{globally_excluded_target_id}', method='PATCH', payload=payload)
def delete_globally_excluded_target(self, globally_excluded_target_id):
return self.scantron_api_query(f'/api/globally_excluded_targets/{globally_excluded_target_id}', method='DELETE')
def retrieve_globally_excluded_targets(self):
return self.scantron_api_query('/api/globally_excluded_targets').json()
def create_scan_command(self, payload):
return self.scantron_api_query('/api/scan_commands', method='POST', payload=payload)
def retrieve_scan_command(self, scan_command_id):
return self.scantron_api_query(f'/api/scan_commands/{scan_command_id}', method='GET').json()
def update_scan_command(self, scan_command_id, payload):
return self.scantron_api_query(f'/api/scan_commands/{scan_command_id}', method='PATCH', payload=payload)
def delete_scan_command(self, scan_command_id):
return self.scantron_api_query(f'/api/scan_commands/{scan_command_id}', method='DELETE')
def retrieve_scan_commands(self):
return self.scantron_api_query('/api/scan_commands').json()
def retrieve_scan_command_id_from_scan_command_name(self, scan_command_name):
scan_commands = self.retrieve_scan_commands()
scan_command_id = None
for scan_command in scan_commands:
if (scan_command['scan_command_name'].lower() == scan_command_name.lower()):
scan_command_id = scan_command['id']
return scan_command_id
def create_scan(self, payload):
return self.scantron_api_query('/api/scans', method='POST', payload=payload)
def retrieve_scan(self, scan_id):
return self.scantron_api_query(f'/api/scans/{scan_id}', method='GET')
def update_scan(self, scan_id, payload):
return self.scantron_api_query(f'/api/scans/{scan_id}', method='PATCH', payload=payload)
def delete_scan(self, scan_id):
return self.scantron_api_query(f'/api/scans/{scan_id}', method='DELETE')
def retrieve_scans(self):
return self.scantron_api_query('/api/scans').json()
def retrieve_scheduled_scan(self, scheduled_scan_id):
return self.scantron_api_query(f'/api/scheduled_scans/{scheduled_scan_id}', method='GET')
def retrieve_scheduled_scans(self):
return self.scantron_api_query('/api/scheduled_scans').json()
def create_site(self, payload):
return self.scantron_api_query('/api/sites', method='POST', payload=payload)
def retrieve_site(self, site_id):
return self.scantron_api_query(f'/api/sites/{site_id}', method='GET')
def update_site(self, site_id, payload):
return self.scantron_api_query(f'/api/sites/{site_id}', method='PATCH', payload=payload)
def delete_site(self, site_id):
return self.scantron_api_query(f'/api/sites/{site_id}', method='DELETE')
def retrieve_sites(self):
return self.scantron_api_query('/api/sites').json()
def retrieve_site_id_from_site_name(self, site_name):
sites_info = self.retrieve_sites()
site_id = None
for site in sites_info:
if (site['site_name'].lower() == site_name.lower()):
site_id = site['id']
return site_id
def create_engine_pool(self, payload):
return self.scantron_api_query('/api/engine_pools', method='POST', payload=payload)
def retrieve_engine_pool(self, engine_pool_id):
return self.scantron_api_query(f'/api/engine_pools/{engine_pool_id}', method='GET').json()
def update_engine_pool(self, engine_pool_id, payload):
return self.scantron_api_query(f'/api/engine_pools/{engine_pool_id}', method='PATCH', payload=payload)
def delete_engine_pool(self, engine_pool_id):
return self.scantron_api_query(f'/api/engine_pools/{engine_pool_id}', method='DELETE')
def retrieve_engine_pools(self):
return self.scantron_api_query('/api/engine_pools').json()
def retrieve_all_scantron_information(self, write_to_file=False, json_dump_file_name='all_scantron_information.json'):
all_scantron_information = {}
try:
engines = self.retrieve_engines()
scan_commands = self.retrieve_scan_commands()
scans = self.retrieve_scans()
scheduled_scans = self.retrieve_scheduled_scans()
sites = self.retrieve_sites()
globally_excluded_targets = self.retrieve_globally_excluded_targets()
engine_pools = self.retrieve_engine_pools()
all_scantron_information['engines'] = engines
all_scantron_information['scan_commands'] = scan_commands
all_scantron_information['scans'] = scans
all_scantron_information['scheduled_scans'] = scheduled_scans
all_scantron_information['sites'] = sites
all_scantron_information['globally_excluded_targets'] = globally_excluded_targets
all_scantron_information['engine_pools'] = engine_pools
except Exception as e:
print(f'Exception: {e}')
if write_to_file:
print(f'Writing results to: {json_dump_file_name}')
with open(json_dump_file_name, 'w') as fh:
json.dump(all_scantron_information, fh, indent=4)
return all_scantron_information
def generate_masscan_dict_from_masscan_result(self, scan_results_json, excluded_ips=[]):
masscan_dict = {}
for result in scan_results_json:
if (result['ip'] in excluded_ips):
print(f"Skipping IP: {result['ip']}")
continue
if (result['ip'] not in masscan_dict):
masscan_dict[result['ip']] = {'tcp': set(), 'udp': set(), 'icmp': set()}
for port in result['ports']:
if ('port' in port):
if (port['proto'] == 'tcp'):
masscan_dict[result['ip']]['tcp'].add(port['port'])
elif (port['proto'] == 'udp'):
masscan_dict[result['ip']]['udp'].add(port['port'])
elif (port['proto'] == 'icmp'):
masscan_dict[result['ip']]['icmp'].add(port['port'])
for (key, value) in masscan_dict.items():
masscan_dict[key]['tcp'] = list(sorted(value['tcp']))
masscan_dict[key]['udp'] = list(sorted(value['udp']))
masscan_dict[key]['icmp'] = list(sorted(value['icmp']))
return masscan_dict
def generate_masscan_dict_from_masscan_result_json_file(self, massscan_results_file):
masscan_dict = None
try:
with open(massscan_results_file, 'r') as json_file:
scan_results_json = json.load(json_file)
masscan_dict = self.generate_masscan_dict_from_masscan_result(scan_results_json)
except FileNotFoundError:
print(f'File not found: {massscan_results_file}')
return masscan_dict
def retrieve_all_masscan_targets_with_an_open_port(self, masscan_dict):
all_targets_with_an_open_port = sorted(list(set(masscan_dict.keys())))
all_open_tcp_ports = set()
all_open_udp_ports = set()
for ip in masscan_dict.values():
for port in ip['tcp']:
all_open_tcp_ports.add(port)
for port in ip['udp']:
all_open_udp_ports.add(port)
all_open_tcp_ports_list = sorted(all_open_tcp_ports)
all_open_udp_ports_list = sorted(all_open_udp_ports)
all_open_tcp_ports_csv = ','.join(list(map(str, all_open_tcp_ports_list)))
all_open_udp_ports_csv = ','.join(list(map(str, all_open_udp_ports_list)))
all_targets_with_an_open_port_dict = {'all_targets_with_an_open_port_as_list': all_targets_with_an_open_port, 'all_targets_with_an_open_port_as_csv': ','.join(all_targets_with_an_open_port), 'all_targets_with_an_open_port_as_spaced': ' '.join(all_targets_with_an_open_port), 'all_targets_with_an_open_port_size': len(all_targets_with_an_open_port), 'all_open_tcp_ports_list': all_open_tcp_ports_list, 'all_open_udp_ports_list': all_open_udp_ports_list, 'all_open_tcp_ports_csv': all_open_tcp_ports_csv, 'all_open_udp_ports_csv': all_open_udp_ports_csv, 'unique_open_tcp_ports': len(all_open_tcp_ports), 'unique_open_udp_ports': len(all_open_udp_ports)}
scanner_port_string = ''
if all_open_tcp_ports:
scanner_port_string = f'T:{all_open_tcp_ports_csv}'
if all_open_udp_ports:
if all_open_tcp_ports:
scanner_port_string += ','
scanner_port_string += f'U:{all_open_udp_ports_csv}'
all_targets_with_an_open_port_dict['scanner_port_string'] = scanner_port_string
return all_targets_with_an_open_port_dict
def retrieve_all_masscan_targets_with_a_specific_port_and_protocol(self, masscan_dict, port, protocol='tcp'):
all_targets_with_a_specific_port_and_protocol_dict = {'port': port, 'protocol': protocol, 'all_targets_with_a_specific_port_and_protocol_list': [], 'all_targets_with_a_specific_port_and_protocol_csv': '', 'all_targets_with_a_specific_port_and_protocol_spaced': ''}
for (key, value) in masscan_dict.items():
if (port in masscan_dict[key][protocol]):
all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'].append(key)
all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'] = sorted(all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'])
all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_size'] = len(all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'])
all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_csv'] = ','.join(all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'])
all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_spaced'] = ' '.join(all_targets_with_a_specific_port_and_protocol_dict['all_targets_with_a_specific_port_and_protocol_list'])
return all_targets_with_a_specific_port_and_protocol_dict
def retrieve_all_masscan_targets_with_a_specific_port_and_protocol_from_scan_id(self, scan_id, port, protocol='tcp', file_type='json'):
all_targets_with_a_specific_port_and_protocol_dict = {'scan_id': scan_id, 'port': port, 'protocol': protocol, 'all_targets_with_a_specific_port_and_protocol_list': [], 'all_targets_with_a_specific_port_and_protocol_csv': '', 'all_targets_with_a_specific_port_and_protocol_spaced': ''}
scan_results_json = self.retrieve_scan_results(scan_id, file_type)
masscan_dict = self.generate_masscan_dict_from_masscan_result(scan_results_json)
all_targets_with_a_specific_port_and_protocol_dict = self.retrieve_all_masscan_targets_with_a_specific_port_and_protocol(masscan_dict, port, protocol)
all_targets_with_a_specific_port_and_protocol_dict['scan_id'] = scan_id
return all_targets_with_a_specific_port_and_protocol_dict
def retrieve_all_masscan_targets_and_open_ports_from_scan_id(self, scan_id, file_type='json'):
scan_results_json = self.retrieve_scan_results(scan_id, file_type)
masscan_dict = self.generate_masscan_dict_from_masscan_result(scan_results_json)
all_masscan_targets_and_open_ports = self.retrieve_all_masscan_targets_with_an_open_port(masscan_dict)
return all_masscan_targets_and_open_ports
def wait_until_scheduled_scan_finishes(self, scheduled_scan_id, sleep_seconds=60):
while (self.retrieve_scheduled_scan(scheduled_scan_id).json()['scan_status'] in ['started']):
print(f'Scheduled scan ID {scheduled_scan_id} is still running...sleeping {sleep_seconds} seconds.')
time.sleep(sleep_seconds)
def retrieve_next_available_scan_time(self):
server_time = self.retrieve_server_time()
server_time_datetime = datetime.datetime.fromisoformat(server_time)
minute = server_time_datetime.minute
server_time_future = server_time_datetime.replace(minute=(minute + 1)).replace(second=0).replace(microsecond=0)
if ((server_time_datetime + datetime.timedelta(seconds=15)) < server_time_future):
next_eligible_scan_datetime = server_time_future
else:
next_eligible_scan_datetime = server_time_future.replace(minute=(minute + 2))
next_eligible_scan_string = f'{next_eligible_scan_datetime.hour}:{next_eligible_scan_datetime.minute}'
return next_eligible_scan_string |
def enc_obj2bytes(obj, max_size=16000):
assert (max_size <= MAX_SIZE_LIMIT)
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if (obj_size > max_size):
raise Exception('objects too large: object size {}, max size {}'.format(obj_size, max_size))
byte_tensor[0] = (obj_size // 256)
byte_tensor[1] = (obj_size % 256)
byte_tensor[2:(2 + obj_size)] = torch.ByteTensor(list(obj_enc))
return byte_tensor |
def make_device(device_str: Optional[str]) -> torch.device:
if device_str:
try:
device = torch.device(device_str)
except RuntimeError as error:
device_type = device_str.split(':')[0]
msg = f"Unknown device type '{device_type}'."
match = re.match('Expected one of (?P<device_types>(\\w+,\\s)+\\w+)\\sdevice type', str(error))
if match:
device_types = [device_type.strip() for device_type in match.group('device_types').split(',')]
msg = add_suggestion(msg, word=device_type, possibilities=device_types)
raise ValueError(msg) from error
try:
torch.empty((), device=device)
except Exception as error:
msg = f"The device '{device_str}' is not available."
raise RuntimeError(msg) from error
return device
else:
return torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) |
class AttributeSliderChangeEvent():
def __init__(self, obj, old_value, new_value, old_percentage, new_percentage, affect_modified_flag=True):
self.__obj = obj
self.__old = old_value
self.__new = new_value
self.__old_percent = old_percentage
self.__new_percent = new_percentage
self.__affect_modified_flag = affect_modified_flag
def GetObj(self):
return self.__obj
def GetOldValue(self):
return self.__old
def GetValue(self):
return self.__new
def GetOldPercentage(self):
return self.__old_percent
def GetPercentage(self):
return self.__new_percent
def AffectsModifiedFlag(self):
return self.__affect_modified_flag
Object = property(GetObj)
OldValue = property(GetOldValue)
Value = property(GetValue)
OldPercentage = property(GetOldPercentage)
Percentage = property(GetPercentage) |
def get_no_augmentation(dataloader_train, dataloader_val, params=default_3D_augmentation_params, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True, regions=None):
tr_transforms = []
if (params.get('selected_data_channels') is not None):
tr_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels')))
if (params.get('selected_seg_channels') is not None):
tr_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels')))
tr_transforms.append(RemoveLabelTransform((- 1), 0))
tr_transforms.append(RenameTransform('seg', 'target', True))
if (regions is not None):
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if (deep_supervision_scales is not None):
if soft_ds:
assert (classes is not None)
tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'), params.get('num_cached_per_thread'), seeds=range(params.get('num_threads')), pin_memory=pin_memory)
batchgenerator_train.restart()
val_transforms = []
val_transforms.append(RemoveLabelTransform((- 1), 0))
if (params.get('selected_data_channels') is not None):
val_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels')))
if (params.get('selected_seg_channels') is not None):
val_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels')))
val_transforms.append(RenameTransform('seg', 'target', True))
if (regions is not None):
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if (deep_supervision_scales is not None):
if soft_ds:
assert (classes is not None)
val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max((params.get('num_threads') // 2), 1), params.get('num_cached_per_thread'), seeds=range(max((params.get('num_threads') // 2), 1)), pin_memory=pin_memory)
batchgenerator_val.restart()
return (batchgenerator_train, batchgenerator_val) |
def _iter_translations(args, task, dataset, translations, align_dict, rescorer, modify_target_dict):
is_multilingual = pytorch_translate_data.is_multilingual_many_to_one(args)
for (sample_id, src_tokens, target_tokens, hypos) in translations:
target_tokens = target_tokens.int().cpu()
if is_multilingual:
src_lang_id = (src_tokens[(- 1)] - pytorch_translate_data.MULTILING_DIALECT_ID_OFFSET)
target_lang_id = (target_tokens[0] - pytorch_translate_data.MULTILING_DIALECT_ID_OFFSET)
src_tokens = src_tokens[:(- 1)]
target_tokens = target_tokens[1:]
src_dict = task.source_dictionaries[task.get_encoder_lang_code(src_lang_id)]
target_dict = task.target_dictionaries[task.get_decoder_lang_code(target_lang_id)]
else:
src_dict = task.source_dictionary
target_dict = task.target_dictionary
if (align_dict is not None):
src_str = dataset.src.get_original_text(sample_id)
target_str = dataset.tgt.get_original_text(sample_id)
else:
src_str = src_dict.string(src_tokens, args.post_process)
target_str = target_dict.string(target_tokens, args.post_process, escape_unk=True)
if (not args.quiet):
print(f'S-{sample_id} {src_str}')
print(f'T-{sample_id} {target_str}')
best_hypo_tokens = None
best_hypo_score = 0
collect_oracle_hypos = (args.report_oracle_bleu or (args.output_hypos_binary_path and (args.nbest > 0)))
for (i, hypo) in enumerate(hypos[:min(len(hypos), args.nbest)]):
(hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=(hypo['alignment'].int().cpu() if (align_dict is not None) else None), align_dict=align_dict, tgt_dict=task.target_dictionary, remove_bpe=args.post_process)
if (not args.quiet):
print(f"H-{sample_id} {hypo['score']} {hypo_str}")
if (alignment is not None):
print('A-{}\t{}'.format(sample_id, ' '.join(map((lambda x: str(utils.item(x))), alignment))))
if collect_oracle_hypos:
score = smoothed_sentence_bleu(task, target_tokens, hypo_tokens)
if (score > best_hypo_score):
best_hypo_tokens = hypo_tokens
best_hypo_score = score
if (i == 0):
if ((align_dict is not None) or (args.post_process is not None)):
target_tokens = task.target_dictionary.encode_line(target_str, add_if_not_exist=modify_target_dict)
hypo_score = ((hypo['score'] / len(hypo_tokens)) if (len(hypo_tokens) > 0) else 0.0)
top_hypo_tokens = hypo_tokens
top_hypo_str = hypo_str
if (not collect_oracle_hypos):
best_hypo_tokens = top_hypo_tokens
(yield TranslationInfo(sample_id=sample_id, src_tokens=src_tokens, target_tokens=target_tokens, hypo_tokens=top_hypo_tokens, src_str=src_str, target_str=target_str, hypo_str=top_hypo_str, hypo_score=hypo_score, best_hypo_tokens=best_hypo_tokens, hypos=hypos)) |
class TestStickerWithoutRequest(TestStickerBase):
def test_slot_behaviour(self, sticker):
for attr in sticker.__slots__:
assert (getattr(sticker, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(sticker)) == len(set(mro_slots(sticker)))), 'duplicate slot'
def test_creation(self, sticker):
assert isinstance(sticker, Sticker)
assert isinstance(sticker.file_id, str)
assert isinstance(sticker.file_unique_id, str)
assert sticker.file_id
assert sticker.file_unique_id
assert isinstance(sticker.thumbnail, PhotoSize)
assert isinstance(sticker.thumbnail.file_id, str)
assert isinstance(sticker.thumbnail.file_unique_id, str)
assert sticker.thumbnail.file_id
assert sticker.thumbnail.file_unique_id
assert isinstance(sticker.needs_repainting, bool)
def test_expected_values(self, sticker):
assert (sticker.width == self.width)
assert (sticker.height == self.height)
assert (sticker.is_animated == self.is_animated)
assert (sticker.is_video == self.is_video)
assert (sticker.file_size == self.file_size)
assert (sticker.thumbnail.width == self.thumb_width)
assert (sticker.thumbnail.height == self.thumb_height)
assert (sticker.thumbnail.file_size == self.thumb_file_size)
assert (sticker.type == self.type)
assert (sticker.needs_repainting == self.needs_repainting)
def test_to_dict(self, sticker):
sticker_dict = sticker.to_dict()
assert isinstance(sticker_dict, dict)
assert (sticker_dict['file_id'] == sticker.file_id)
assert (sticker_dict['file_unique_id'] == sticker.file_unique_id)
assert (sticker_dict['width'] == sticker.width)
assert (sticker_dict['height'] == sticker.height)
assert (sticker_dict['is_animated'] == sticker.is_animated)
assert (sticker_dict['is_video'] == sticker.is_video)
assert (sticker_dict['file_size'] == sticker.file_size)
assert (sticker_dict['thumbnail'] == sticker.thumbnail.to_dict())
assert (sticker_dict['type'] == sticker.type)
assert (sticker_dict['needs_repainting'] == sticker.needs_repainting)
def test_de_json(self, bot, sticker):
json_dict = {'file_id': self.sticker_file_id, 'file_unique_id': self.sticker_file_unique_id, 'width': self.width, 'height': self.height, 'is_animated': self.is_animated, 'is_video': self.is_video, 'thumbnail': sticker.thumbnail.to_dict(), 'emoji': self.emoji, 'file_size': self.file_size, 'premium_animation': self.premium_animation.to_dict(), 'type': self.type, 'custom_emoji_id': self.custom_emoji_id, 'needs_repainting': self.needs_repainting}
json_sticker = Sticker.de_json(json_dict, bot)
assert (json_sticker.api_kwargs == {})
assert (json_sticker.file_id == self.sticker_file_id)
assert (json_sticker.file_unique_id == self.sticker_file_unique_id)
assert (json_sticker.width == self.width)
assert (json_sticker.height == self.height)
assert (json_sticker.is_animated == self.is_animated)
assert (json_sticker.is_video == self.is_video)
assert (json_sticker.emoji == self.emoji)
assert (json_sticker.file_size == self.file_size)
assert (json_sticker.thumbnail == sticker.thumbnail)
assert (json_sticker.premium_animation == self.premium_animation)
assert (json_sticker.type == self.type)
assert (json_sticker.custom_emoji_id == self.custom_emoji_id)
assert (json_sticker.needs_repainting == self.needs_repainting)
def test_equality(self, sticker):
a = Sticker(sticker.file_id, sticker.file_unique_id, self.width, self.height, self.is_animated, self.is_video, self.type)
b = Sticker('', sticker.file_unique_id, self.width, self.height, self.is_animated, self.is_video, self.type)
c = Sticker(sticker.file_id, sticker.file_unique_id, 0, 0, False, True, self.type)
d = Sticker('', '', self.width, self.height, self.is_animated, self.is_video, self.type)
e = PhotoSize(sticker.file_id, sticker.file_unique_id, self.width, self.height, self.is_animated)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a == c)
assert (hash(a) == hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
async def test_error_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
(await bot.send_sticker(chat_id))
async def test_send_with_sticker(self, monkeypatch, bot, chat_id, sticker):
async def make_assertion(url, request_data: RequestData, *args, **kwargs):
return (request_data.json_parameters['sticker'] == sticker.file_id)
monkeypatch.setattr(bot.request, 'post', make_assertion)
assert (await bot.send_sticker(sticker=sticker, chat_id=chat_id))
.parametrize('local_mode', [True, False])
async def test_send_sticker_local_files(self, monkeypatch, bot, chat_id, local_mode):
try:
bot._local_mode = local_mode
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
if local_mode:
test_flag = (data.get('sticker') == expected)
else:
test_flag = isinstance(data.get('sticker'), InputFile)
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.send_sticker(chat_id, file))
assert test_flag
finally:
bot._local_mode = False |
class GammaL(BinaryScalarOp):
def st_impl(k, x):
return (scipy.special.gammainc(k, x) * scipy.special.gamma(k))
def impl(self, k, x):
return GammaL.st_impl(k, x)
def c_support_code(self, **kwargs):
with open(os.path.join(os.path.dirname(__file__), 'c_code', 'gamma.c')) as f:
raw = f.read()
return raw
def c_code(self, node, name, inp, out, sub):
(k, x) = inp
(z,) = out
if (node.inputs[0].type in float_types):
dtype = ('npy_' + node.outputs[0].dtype)
return ('%(z)s =\n (%(dtype)s) lowerGamma(%(k)s, %(x)s);' % locals())
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self)) |
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth, axis_tilt=0, axis_azimuth=90, max_angle=90, backtrack=True, gcr=(2.0 / 7.0))
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180, 'surface_tilt': 0, 'tracker_theta': 0}, index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth, axis_tilt=0, axis_azimuth=90, max_angle=90, backtrack=True, gcr=(2.0 / 7.0))
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180, 'surface_tilt': 30, 'tracker_theta': 30}, index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data) |
def sendrecv(sendbuf, source=0, dest=0):
if (source == dest):
return sendbuf
if (rank == source):
sendbuf = numpy.asarray(sendbuf, order='C')
comm.send((sendbuf.shape, sendbuf.dtype), dest=dest)
comm.Send(sendbuf, dest=dest)
return sendbuf
elif (rank == dest):
(shape, dtype) = comm.recv(source=source)
recvbuf = numpy.empty(shape, dtype=dtype)
comm.Recv(recvbuf, source=source)
return recvbuf |
class Database(LiveDict):
def __init__(self, path):
super(Database, self).__init__(json.loads(open(path).read()))
self.path = path
def update(self):
with open(self.path, 'w+') as f:
f.write(json.dumps(self.todict()))
def refresh(self):
with open(self.path, 'w+') as f:
f.write(json.dumps(self.todict())) |
class UpdateLog():
def __init__(self, started, completed, versions, periodic) -> None:
self.started = started
self.completed = completed
self.versions = versions
self.periodic = periodic
def from_dict(cls, dictionary):
if (dictionary is None):
dictionary = {}
return cls(load_datetime(dictionary.get('started')), load_datetime(dictionary.get('completed')), [NewVersion.from_dict(v) for v in dictionary.get('versions', [])], dictionary.get('periodic'))
def from_app_data(cls, app_data, distribution, for_py_version):
raw_json = app_data.embed_update_log(distribution, for_py_version).read()
return cls.from_dict(raw_json)
def to_dict(self):
return {'started': dump_datetime(self.started), 'completed': dump_datetime(self.completed), 'periodic': self.periodic, 'versions': [r.to_dict() for r in self.versions]}
def needs_update(self):
now = datetime.now(tz=timezone.utc)
if (self.completed is None):
return self._check_start(now)
if ((now - self.completed) <= UPDATE_PERIOD):
return False
return self._check_start(now)
def _check_start(self, now):
return ((self.started is None) or ((now - self.started) > UPDATE_ABORTED_DELAY)) |
def test_tar_extract_one_with_interpolation():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'tar': {'extract': [{'in': './{key3}.tar.xz', 'out': 'path/{key2}/dir'}]}})
with patch('tarfile.open') as mock_tarfile:
pypyr.steps.tar.run_step(context)
mock_tarfile.assert_called_once_with('./value3.tar.xz', 'r:*')
mock_tarfile.return_value.__enter__().extractall.assert_called_once_with('path/value2/dir') |
def load_and_covnert_case(input_image: str, input_seg: str, output_image: str, output_seg: str, min_component_size: int=50):
seg = io.imread(input_seg)
seg[(seg == 255)] = 1
image = io.imread(input_image)
image = image.sum(2)
mask = (image == (3 * 255))
mask = generic_filter_components(mask, filter_fn=(lambda ids, sizes: [i for (j, i) in enumerate(ids) if (sizes[j] > min_component_size)]))
mask = binary_fill_holes(mask)
seg[mask] = 0
io.imsave(output_seg, seg, check_contrast=False)
shutil.copy(input_image, output_image) |
class OptionSetOption(models.Model):
optionset = models.ForeignKey('OptionSet', on_delete=models.CASCADE, related_name='optionset_options')
option = models.ForeignKey('Option', on_delete=models.CASCADE, related_name='option_optionsets')
order = models.IntegerField(default=0)
class Meta():
ordering = ('optionset', 'order')
def __str__(self):
return f'{self.optionset} / {self.option} [{self.order}]' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.