code stringlengths 281 23.7M |
|---|
class WebEngineInspectorView(QWebEngineView):
def createWindow(self, wintype: QWebEnginePage.WebWindowType) -> QWebEngineView:
our_page = self.page()
assert (our_page is not None)
inspected_page = our_page.inspectedPage()
assert (inspected_page is not None)
if machinery.IS_QT5:
view = inspected_page.view()
assert isinstance(view, QWebEngineView), view
return view.createWindow(wintype)
else:
newpage = inspected_page.createWindow(wintype)
ret = webview.WebEngineView.forPage(newpage)
assert (ret is not None)
return ret |
class VertexDomain():
_initial_count = 16
def __init__(self, program, attribute_meta):
self.program = program
self.attribute_meta = attribute_meta
self.allocator = allocation.Allocator(self._initial_count)
self.attribute_names = {}
self.buffer_attributes = []
self._property_dict = {}
for (name, meta) in attribute_meta.items():
assert (meta['format'][0] in _gl_types), f"'{meta['format']}' is not a valid atrribute format for '{name}'."
location = meta['location']
count = meta['count']
gl_type = _gl_types[meta['format'][0]]
normalize = ('n' in meta['format'])
attribute = shader.Attribute(name, location, count, gl_type, normalize)
self.attribute_names[attribute.name] = attribute
attribute.buffer = AttributeBufferObject((attribute.stride * self.allocator.capacity), attribute)
self.buffer_attributes.append((attribute.buffer, (attribute,)))
self._property_dict[attribute.name] = _make_attribute_property(name)
self._vertexlist_class = type('VertexList', (VertexList,), self._property_dict)
self.vao = vertexarray.VertexArray()
self.vao.bind()
for (buffer, attributes) in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(buffer.ptr)
self.vao.unbind()
def safe_alloc(self, count):
try:
return self.allocator.alloc(count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
for (buffer, _) in self.buffer_attributes:
buffer.resize((capacity * buffer.attribute_stride))
self.allocator.set_capacity(capacity)
return self.allocator.alloc(count)
def safe_realloc(self, start, count, new_count):
try:
return self.allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException as e:
capacity = _nearest_pow2(e.requested_capacity)
for (buffer, _) in self.buffer_attributes:
buffer.resize((capacity * buffer.attribute_stride))
self.allocator.set_capacity(capacity)
return self.allocator.realloc(start, count, new_count)
def create(self, count, index_count=None):
start = self.safe_alloc(count)
return self._vertexlist_class(self, start, count)
def draw(self, mode):
self.vao.bind()
for (buffer, _) in self.buffer_attributes:
buffer.sub_data()
(starts, sizes) = self.allocator.get_allocated_regions()
primcount = len(starts)
if (primcount == 0):
pass
elif (primcount == 1):
glDrawArrays(mode, starts[0], sizes[0])
else:
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
def draw_subset(self, mode, vertex_list):
self.vao.bind()
for (buffer, _) in self.buffer_attributes:
buffer.sub_data()
glDrawArrays(mode, vertex_list.start, vertex_list.count)
def is_empty(self):
return (not self.allocator.starts)
def __repr__(self):
return ('<%%x %s>' % (self.__class__.__name__, id(self), self.allocator)) |
class Permute(Layer):
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=(len(self.dims) + 1))
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
output_shape = copy.copy(input_shape)
for (i, dim) in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[(i + 1)] = target_dim
return tuple(output_shape)
def call(self, inputs):
return K.permute_dimensions(inputs, ((0,) + self.dims))
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def make_retinanet_loss_evaluator(cfg, box_coder):
matcher = Matcher(cfg.MODEL.RETINANET.FG_IOU_THRESHOLD, cfg.MODEL.RETINANET.BG_IOU_THRESHOLD, allow_low_quality_matches=True)
sigmoid_focal_loss = SigmoidFocalLoss(cfg.MODEL.RETINANET.LOSS_GAMMA, cfg.MODEL.RETINANET.LOSS_ALPHA)
loss_evaluator = RetinaNetLossComputation(matcher, box_coder, generate_retinanet_labels, sigmoid_focal_loss, bbox_reg_beta=cfg.MODEL.RETINANET.BBOX_REG_BETA, regress_norm=cfg.MODEL.RETINANET.BBOX_REG_WEIGHT)
return loss_evaluator |
def modified_main(dataset_path, k_list_to_check, ranker_path=None, normalize_ranker=False, num_workers=1, tokenizer='corenlp', docdb_path=None, out=None):
dataset = load_dataset(dataset_path)
ranker = TfidfDocRanker(tfidf_path=ranker_path, normalize_vectors=normalize_ranker, tokenizer=tokenizer)
docdb = DocDB(docdb_path)
print('Building modified queries...')
ranked_gold_dict = build_ranked_golds(dataset, docdb=docdb, ranker=ranker)
regular_table = prettytable.PrettyTable(['Top K', 'Second Paragraph Hits', 'Second Paragraph Hits Modified Query'])
cat_table_dict = {cat: prettytable.PrettyTable(['Top K', 'Second Paragraph Hits', 'Second Paragraph Hits Modified Query']) for cat in CATEGORIES}
max_k = max(k_list_to_check)
print(f'Retrieving top {max_k} ...')
start = time.time()
(reg_result_dict, ranked_result_dict) = get_ranked_top_k(dataset, ranked_gold_dict, ranker, max_k, num_workers)
print(f'Done, took {(time.time() - start)} ms.')
for k in k_list_to_check:
print(f'Calculating scores for top {k}...')
start = time.time()
(reg_scores, reg_category_scores) = modified_top_k_coverage_score(ranked_gold_dict, reg_result_dict, k)
(mod_scores, mod_category_scores) = modified_top_k_coverage_score(ranked_gold_dict, ranked_result_dict, k)
print(f'Done, took {(time.time() - start)} ms.')
regular_table.add_row([k, reg_scores['Second Paragraph Hits'], mod_scores['Second Paragraph Hits']])
for cat in cat_table_dict:
cat_table_dict[cat].add_row([k, reg_category_scores[cat]['Second Paragraph Hits'], mod_category_scores[cat]['Second Paragraph Hits']])
output_str = 'Overall Results:\n'
output_str += (regular_table.__str__() + '\n')
for (cat, table) in cat_table_dict.items():
output_str += '\n\n'
output_str += f'''Category: {cat} Results:
'''
output_str += (table.__str__() + '\n')
if (out is None):
print(output_str)
else:
with open(out, 'w') as f:
f.write(output_str) |
class TestGurobiTranslator(QiskitOptimizationTestCase):
((not _optionals.HAS_GUROBIPY), 'Gurobi not available.')
def test_from_and_to(self):
q_p = QuadraticProgram('test')
q_p.binary_var(name='x')
q_p.integer_var(name='y', lowerbound=(- 2), upperbound=4)
q_p.continuous_var(name='z', lowerbound=(- 1.5), upperbound=3.2)
q_p.minimize(constant=1, linear={'x': 1, 'y': 2}, quadratic={('x', 'y'): (- 1), ('z', 'z'): 2})
q_p.linear_constraint({'x': 2, 'z': (- 1)}, '==', 1)
q_p.quadratic_constraint({'x': 2, 'z': (- 1)}, {('y', 'z'): 3}, '==', 1)
q_p2 = from_gurobipy(to_gurobipy(q_p))
self.assertEqual(q_p.export_as_lp_string(), q_p2.export_as_lp_string())
import gurobipy as gp
mod = gp.Model('test')
x = mod.addVar(vtype=gp.GRB.BINARY, name='x')
y = mod.addVar(vtype=gp.GRB.INTEGER, lb=(- 2), ub=4, name='y')
z = mod.addVar(vtype=gp.GRB.CONTINUOUS, lb=(- 1.5), ub=3.2, name='z')
mod.setObjective(((((1 + x) + (2 * y)) - (x * y)) + ((2 * z) * z)))
mod.addConstr((((2 * x) - z) == 1), name='c0')
mod.addConstr(((((2 * x) - z) + ((3 * y) * z)) == 1), name='q0')
with self.assertRaises(QiskitOptimizationError):
mod = gp.Model()
mod.addVar(vtype=gp.GRB.SEMIINT, lb=1, name='x')
_ = from_gurobipy(mod)
with self.assertRaises(QiskitOptimizationError):
mod = gp.Model()
x = mod.addVar(vtype=gp.GRB.BINARY, name='x')
y = mod.addVar(vtype=gp.GRB.BINARY, name='y')
mod.addConstr(((x == 1) >> ((x + y) <= 1)))
_ = from_gurobipy(mod)
mod = gp.Model()
x = mod.addVar(vtype=gp.GRB.BINARY)
y = mod.addVar(vtype=gp.GRB.CONTINUOUS)
z = mod.addVar(vtype=gp.GRB.INTEGER)
mod.setObjective((((((x + y) + z) + (x * y)) + (y * z)) + (x * z)))
mod.addConstr(((x + y) == z))
mod.addConstr(((x + y) >= z))
mod.addConstr(((x + y) <= z))
mod.addConstr(((x * y) == z))
mod.addConstr(((x * y) >= z))
mod.addConstr(((x * y) <= z))
q_p = from_gurobipy(mod)
var_names = [v.name for v in q_p.variables]
self.assertListEqual(var_names, ['C0', 'C1', 'C2'])
senses = [Constraint.Sense.EQ, Constraint.Sense.GE, Constraint.Sense.LE]
for (i, c) in enumerate(q_p.linear_constraints):
self.assertDictEqual(c.linear.to_dict(use_name=True), {'C0': 1, 'C1': 1, 'C2': (- 1)})
self.assertEqual(c.rhs, 0)
self.assertEqual(c.sense, senses[i])
for (i, c) in enumerate(q_p.quadratic_constraints):
self.assertEqual(c.rhs, 0)
self.assertDictEqual(c.linear.to_dict(use_name=True), {'C2': (- 1)})
self.assertDictEqual(c.quadratic.to_dict(use_name=True), {('C0', 'C1'): 1})
self.assertEqual(c.sense, senses[i]) |
class Migration(migrations.Migration):
dependencies = [('views', '0014_data_migration')]
operations = [migrations.AlterField(model_name='view', name='comment', field=models.TextField(blank=True, help_text='Additional internal information about this view.', verbose_name='Comment')), migrations.AlterField(model_name='view', name='help_lang1', field=models.TextField(blank=True, help_text='The help text for this view in the primary language.', verbose_name='Help (primary)')), migrations.AlterField(model_name='view', name='help_lang2', field=models.TextField(blank=True, help_text='The help text for this view in the secondary language.', verbose_name='Help (secondary)')), migrations.AlterField(model_name='view', name='help_lang3', field=models.TextField(blank=True, help_text='The help text for this view in the tertiary language.', verbose_name='Help (tertiary)')), migrations.AlterField(model_name='view', name='help_lang4', field=models.TextField(blank=True, help_text='The help text for this view in the quaternary language.', verbose_name='Help (quaternary)')), migrations.AlterField(model_name='view', name='help_lang5', field=models.TextField(blank=True, help_text='The help text for this view in the quinary language.', verbose_name='Help (quinary)')), migrations.AlterField(model_name='view', name='key', field=models.SlugField(blank=True, help_text='The internal identifier of this view.', max_length=128, verbose_name='Key')), migrations.AlterField(model_name='view', name='template', field=models.TextField(blank=True, help_text='The template for this view, written in Django template language.', verbose_name='Template')), migrations.AlterField(model_name='view', name='title_lang1', field=models.CharField(blank=True, help_text='The title for this view in the primary language.', max_length=256, verbose_name='Title (primary)')), migrations.AlterField(model_name='view', name='title_lang2', field=models.CharField(blank=True, help_text='The title for this view in the secondary language.', max_length=256, verbose_name='Title (secondary)')), migrations.AlterField(model_name='view', name='title_lang3', field=models.CharField(blank=True, help_text='The title for this view in the tertiary language.', max_length=256, verbose_name='Title (tertiary)')), migrations.AlterField(model_name='view', name='title_lang4', field=models.CharField(blank=True, help_text='The title for this view in the quaternary language.', max_length=256, verbose_name='Title (quaternary)')), migrations.AlterField(model_name='view', name='title_lang5', field=models.CharField(blank=True, help_text='The title for this view in the quinary language.', max_length=256, verbose_name='Title (quinary)')), migrations.AlterField(model_name='view', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this view (auto-generated).', max_length=640, verbose_name='URI')), migrations.AlterField(model_name='view', name='uri_prefix', field=models.URLField(blank=True, help_text='The prefix for the URI of this view.', max_length=256, verbose_name='URI Prefix'))] |
class Conditional_LDSR(Conditional_Unfolding_Loss):
def __init__(self, window_length, hop_length, **kwargs):
super().__init__(window_length, hop_length)
def criterion(self, target_signal_hat, target_signal):
s_target = ((((target_signal_hat * target_signal).sum((- 1), keepdims=True) + 1e-08) / ((target_signal ** 2).sum(axis=(- 1), keepdims=True) + 1e-08)) * target_signal)
distortion = (target_signal_hat - s_target)
loss = (((distortion ** 2).sum((- 1)) + 1e-08).log() - ((s_target ** 2).sum((- 1)) + 1e-08).log())
return loss.mean() |
def run(gl: gitlab.Gitlab, gitlab_resource: str, resource_action: str, args: Dict[(str, Any)], verbose: bool, output: str, fields: List[str]) -> None:
g_cli = GitlabCLI(gl=gl, gitlab_resource=gitlab_resource, resource_action=resource_action, args=args)
data = g_cli.run()
printer: Union[(JSONPrinter, LegacyPrinter, YAMLPrinter)] = PRINTERS[output]()
if isinstance(data, dict):
printer.display(data, verbose=True, obj=data)
elif isinstance(data, list):
printer.display_list(data, fields, verbose=verbose)
elif isinstance(data, gitlab.base.RESTObjectList):
printer.display_list(list(data), fields, verbose=verbose)
elif isinstance(data, gitlab.base.RESTObject):
printer.display(get_dict(data, fields), verbose=verbose, obj=data)
elif isinstance(data, str):
print(data)
elif isinstance(data, bytes):
sys.stdout.buffer.write(data)
elif hasattr(data, 'decode'):
print(data.decode()) |
class unsubscribe_repos_Handler(BaseHandler):
.authenticated
async def get(self, userid):
try:
user = self.current_user
if ((user['id'] == int(userid)) and (user['role'] == u'admin')):
(await self.render('pubtpl_unsubscribe.html', user=user))
else:
raise Exception(',')
return
except Exception as e:
if config.traceback_print:
traceback.print_exc()
(await self.render('utils_run_result.html', log=str(e), title=u'', flg='danger'))
logger_Web_Handler.error('UserID: %s browse UnSubscribe_Repos failed! Reason: %s', userid, str(e).replace('\\r\\n', '\r\n'))
return
.authenticated
async def post(self, userid):
try:
user = self.current_user
if ((user['id'] == int(userid)) and (user['role'] == u'admin')):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = {}
for (k, v) in envs.items():
try:
env[k] = json.loads(v[0])
except:
env[k] = v[0]
async with self.db.transaction() as sql_session:
repos = json.loads((await self.db.site.get(1, fields=('repos',), sql_session=sql_session))['repos'])
tmp = repos['repos']
result = []
for (i, j) in enumerate(tmp):
if (not env['selectedrepos'].get(str(i), False)):
result.append(j)
else:
pubtpls = (await self.db.pubtpl.list(reponame=j['reponame'], fields=('id',), sql_session=sql_session))
for pubtpl in pubtpls:
(await self.db.pubtpl.delete(pubtpl['id'], sql_session=sql_session))
repos['repos'] = result
(await self.db.site.mod(1, repos=json.dumps(repos, ensure_ascii=False, indent=4), sql_session=sql_session))
else:
raise Exception(',')
except Exception as e:
if config.traceback_print:
traceback.print_exc()
(await self.render('utils_run_result.html', log=str(e), title=u'', flg='danger'))
logger_Web_Handler.error('UserID: %s unsubscribe Subscribe_Repos failed! Reason: %s', userid, str(e).replace('\\r\\n', '\r\n'))
return
(await self.render('utils_run_result.html', log=u',', title=u'', flg='success'))
return |
def count_matches(pred_texts, gt_texts):
match_res = {'gt_char_num': 0, 'pred_char_num': 0, 'true_positive_char_num': 0, 'gt_word_num': 0, 'match_word_num': 0, 'match_word_ignore_case': 0, 'match_word_ignore_case_symbol': 0}
comp = re.compile('[^A-Z^a-z^0-9^-]')
norm_ed_sum = 0.0
for (pred_text, gt_text) in zip(pred_texts, gt_texts):
if (gt_text == pred_text):
match_res['match_word_num'] += 1
gt_text_lower = gt_text.lower()
pred_text_lower = pred_text.lower()
if (gt_text_lower == pred_text_lower):
match_res['match_word_ignore_case'] += 1
gt_text_lower_ignore = comp.sub('', gt_text_lower)
pred_text_lower_ignore = comp.sub('', pred_text_lower)
if (gt_text_lower_ignore == pred_text_lower_ignore):
match_res['match_word_ignore_case_symbol'] += 1
match_res['gt_word_num'] += 1
norm_ed_sum += Levenshtein.normalized_distance(pred_text_lower_ignore, gt_text_lower_ignore)
match_res['gt_char_num'] += len(gt_text_lower_ignore)
match_res['pred_char_num'] += len(pred_text_lower_ignore)
true_positive_char_num = cal_true_positive_char(pred_text_lower_ignore, gt_text_lower_ignore)
match_res['true_positive_char_num'] += true_positive_char_num
normalized_edit_distance = (norm_ed_sum / max(1, len(gt_texts)))
match_res['ned'] = normalized_edit_distance
return match_res |
def test_order_ab():
FooAB = namedtuple('FooAB', 'a b')
assert (get_named_tuple_shape(FooAB) == Shape(input=InputShape(constructor=FooAB, kwargs=None, fields=(InputField(type=Any, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=Any, id='b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='b', name='b', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'a', 'b'})), output=OutputShape(fields=(OutputField(type=Any, id='a', default=NoDefault(), accessor=create_key_accessor(0, access_error=None), metadata=MappingProxyType({}), original=ANY), OutputField(type=Any, id='b', default=NoDefault(), accessor=create_key_accessor(1, access_error=None), metadata=MappingProxyType({}), original=ANY)), overriden_types=frozenset({'a', 'b'})))) |
_grad()
def convert_wav2vec2_checkpoint(checkpoint_path, pytorch_dump_folder_path, dict_path, encoder_config_path, decoder_config_path, vocab_size, num_decoder_layers):
encoder_config = Wav2Vec2Config.from_pretrained(encoder_config_path)
decoder_config = Speech2Text2Config.from_pretrained(decoder_config_path, vocab_size=vocab_size, decoder_layers=num_decoder_layers, do_stable_layer_norm=True)
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=True)
(model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/')[:(- 1)])})
model = model[0].eval()
hf_encoder = Wav2Vec2Model(encoder_config)
projection_layer = recursively_load_weights_wav2vec2(model.encoder, hf_encoder)
hf_decoder = Speech2Text2ForCausalLM(decoder_config)
(missing_keys, unexpected_keys) = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False)
unexpected_keys.remove('embed_out')
hf_decoder.lm_head.weight = nn.Parameter(model.decoder.embed_out.detach())
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}')
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}')
hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder)
hf_wav2vec.config.tie_word_embeddings = False
hf_wav2vec.enc_to_dec_proj.weight = nn.Parameter(projection_layer.weight)
hf_wav2vec.enc_to_dec_proj.bias = nn.Parameter(projection_layer.bias)
vocab_dict = create_vocab_dict(dict_path)
with open(os.path.join(pytorch_dump_folder_path, 'vocab.json'), 'w') as fp:
json.dump(vocab_dict, fp)
tokenizer = Speech2Text2Tokenizer(os.path.join(pytorch_dump_folder_path, 'vocab.json'))
tokenizer.save_pretrained(pytorch_dump_folder_path)
config = hf_wav2vec.config.to_dict()
config['pad_token_id'] = tokenizer.pad_token_id
config['bos_token_id'] = tokenizer.bos_token_id
config['eos_token_id'] = tokenizer.eos_token_id
config['tokenizer_class'] = 'speech_to_text_2'
config['feature_extractor_type'] = 'wav2vec2'
hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config)
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
feature_extractor.save_pretrained(pytorch_dump_folder_path) |
def _validate_template(target: Path, template: (str | None)) -> str:
if (template == ''):
warnings.warn(f'template={template!r} looks like a error, using default instead')
template = None
if (template is None):
template = TEMPLATES.get(target.suffix)
if (template is None):
raise ValueError(f'''bad file format: {target.suffix!r} (of {target})
only *.txt and *.py have a default template''')
else:
return template |
class TestIPMW():
def mdata(self):
df = pd.DataFrame()
df['A'] = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
df['L'] = [1, 1, 0, 0, 0, 1, 1, 1, 1, 0]
df['M'] = [1, np.nan, 1, 0, np.nan, 0, 1, np.nan, np.nan, 1]
return df
def test_error_for_non_nan(self, mdata):
with pytest.raises(ValueError):
IPMW(mdata, missing_variable='L', stabilized=True)
def test_missing_count(self, mdata):
ipm = IPMW(mdata, missing_variable='M', stabilized=True)
ipm.regression_models(model_denominator='A')
assert (6 == np.sum(ipm.df['_observed_indicator_']))
assert (4 == np.sum((1 - ipm.df['_observed_indicator_'])))
def test_missing_count2(self):
df = load_sample_data(False)
ipm = IPMW(df, missing_variable='dead', stabilized=True)
ipm.regression_models(model_denominator='art')
assert (517 == np.sum(ipm.df['_observed_indicator_']))
assert (30 == np.sum((1 - ipm.df['_observed_indicator_'])))
def test_error_numerator_with_unstabilized(self):
df = load_sample_data(False)
ipm = IPMW(df, missing_variable='dead', stabilized=False)
with pytest.raises(ValueError):
ipm.regression_models(model_denominator='male + age0 + dvl0 + cd40', model_numerator='male')
def test_unstabilized_weights(self):
df = load_sample_data(False)
ipm = IPMW(df, missing_variable='dead', stabilized=False)
ipm.regression_models(model_denominator='male + age0 + dvl0 + cd40')
ipm.fit()
npt.assert_allclose(np.mean(ipm.Weight), 1.)
npt.assert_allclose(np.std(ipm.Weight, ddof=1), 0.)
def test_stabilized_weights(self):
df = load_sample_data(False)
ipm = IPMW(df, missing_variable='dead', stabilized=True)
ipm.regression_models(model_denominator='male + age0 + dvl0 + cd40')
ipm.fit()
npt.assert_allclose(np.mean(ipm.Weight), 0.)
npt.assert_allclose(np.std(ipm.Weight, ddof=1), 0.)
def test_error_too_many_model(self):
df = load_sample_data(False)
ipm = IPMW(df, missing_variable='dead')
with pytest.raises(ValueError):
ipm.regression_models(model_denominator=['male + age0', 'male + age0 + dvl0'])
def test_error_for_non_nan2(self):
df = pd.DataFrame()
df['a'] = [0, 0, 1]
df['b'] = [0, 0, np.nan]
with pytest.raises(ValueError):
IPMW(df, missing_variable=['a', 'b'], stabilized=True)
def test_nonmonotone_detection(self):
df = pd.DataFrame()
df['a'] = [0, 0, np.nan]
df['b'] = [np.nan, 0, 1]
ipm = IPMW(df, missing_variable=['a', 'b'])
with pytest.raises(ValueError):
ipm.regression_models(model_denominator=['b', 'a'])
def test_check_overall_uniform(self):
df = pd.DataFrame()
df['a'] = [0, 0, 1, np.nan]
df['b'] = [1, 0, 0, np.nan]
df['c'] = [1, 0, 0, np.nan]
df['d'] = [1, np.nan, np.nan, np.nan]
ipm = IPMW(df, missing_variable=['a', 'b', 'c'])
assert ipm._check_overall_uniform(df, miss_vars=['a', 'b', 'c'])[1]
assert (not ipm._check_overall_uniform(df, miss_vars=['a', 'b', 'c', 'd'])[1])
def test_check_uniform(self):
df = pd.DataFrame()
df['a'] = [0, 0, 1, np.nan]
df['b'] = [1, 0, 0, np.nan]
df['c'] = [1, np.nan, np.nan, np.nan]
ipm = IPMW(df, missing_variable=['a', 'b', 'c'])
assert ipm._check_uniform(df, miss1='a', miss2='b')
assert (not ipm._check_uniform(df, miss1='a', miss2='c'))
def test_single_model_does_not_break(self):
df = load_monotone_missing_data()
ipm = IPMW(df, missing_variable=['B', 'C'], stabilized=False, monotone=True)
ipm.regression_models(model_denominator='L')
ipm.fit()
x = ipm.Weight
def test_monotone_example(self):
df = load_monotone_missing_data()
ipm = IPMW(df, missing_variable=['B', 'C'], stabilized=False, monotone=True)
ipm.regression_models(model_denominator=['L + A', 'L + B'])
ipm.fit()
df['w'] = ipm.Weight
dfs = df.dropna(subset=['w'])
npt.assert_allclose(np.average(dfs['B'], weights=dfs['w']), 0.)
npt.assert_allclose(np.average(dfs['C'], weights=dfs['w']), 0.) |
def deconv2d_args_preprocessor(args, kwargs):
converted = []
if (len(args) == 5):
if isinstance(args[4], tuple):
args = args[:(- 1)]
converted.append(('output_shape', None))
if ('output_shape' in kwargs):
kwargs.pop('output_shape')
converted.append(('output_shape', None))
(args, kwargs, _converted) = conv2d_args_preprocessor(args, kwargs)
return (args, kwargs, (converted + _converted)) |
class PaymentRequest():
def __init__(self, data, *, error=None):
self.raw = data
self.error = error
self.parse(data)
self.requestor = None
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r):
self.outputs = []
if self.error:
return
self.id = bh2u(sha256(r)[0:16])
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = 'cannot parse payment request'
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
for o in self.details.outputs:
addr = transaction.get_address_from_output_script(o.script)
if (not addr):
self.error = 'only addresses are allowed as outputs'
return
self.outputs.append(PartialTxOutput.from_address_and_value(addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def verify(self, contacts):
if self.error:
return False
if (not self.raw):
self.error = 'Empty request'
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = 'Error: Cannot parse payment request'
return False
if (not pr.signature):
self.requestor = None
return True
if (pr.pki_type in ['x509+sha256', 'x509+sha1']):
return self.verify_x509(pr)
elif (pr.pki_type in ['dnssec+btc', 'dnssec+ecdsa']):
return self.verify_dnssec(pr, contacts)
else:
self.error = 'ERROR: Unsupported PKI Type for Message Signature'
return False
def verify_x509(self, paymntreq):
load_ca_list()
if (not ca_list):
self.error = 'Trusted certificate authorities list not found'
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
try:
(x, ca) = verify_cert_chain(cert.certificate)
except BaseException as e:
_logger.exception('')
self.error = str(e)
return False
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if (paymntreq.pki_type == 'x509+sha256'):
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, (x509.PREFIX_RSA_SHA256 + hashBytes))
elif (paymntreq.pki_type == 'x509+sha1'):
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
else:
self.error = f'ERROR: unknown pki_type {paymntreq.pki_type} in Payment Request'
return False
if (not verify):
self.error = 'ERROR: Invalid Signature for Payment Request Data'
return False
self.error = ('Signed by Trusted CA: ' + ca.get_common_name())
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if (info.get('validated') is not True):
self.error = 'Alias verification failed (DNSSEC)'
return False
if (pr.pki_type == 'dnssec+btc'):
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = 'verify failed'
return False
else:
self.error = 'unknown algo'
return False
def has_expired(self) -> Optional[bool]:
if (not hasattr(self, 'details')):
return None
return (self.details.expires and (self.details.expires < int(time.time())))
def get_time(self):
return self.details.time
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map((lambda x: x.value), self.outputs))
def get_address(self):
o = self.outputs[0]
addr = o.address
assert addr
return addr
def get_requestor(self):
return (self.requestor if self.requestor else self.get_address())
def get_verify_status(self):
return (self.error if self.requestor else 'No Signature')
def get_memo(self):
return self.memo
def get_id(self):
return (self.id if self.requestor else self.get_address())
def get_outputs(self):
return self.outputs[:]
async def send_payment_and_receive_paymentack(self, raw_tx, refund_addr):
pay_det = self.details
if (not self.details.payment_url):
return (False, 'no url')
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(address_to_script(refund_addr))
paymnt.memo = 'Paid using Electrum'
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aio headers=ACK_HEADERS) as session:
async with session.post(payurl.geturl(), data=pm) as response:
resp_content = (await response.read())
response.raise_for_status()
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(resp_content)
except Exception:
return (False, 'PaymentACK could not be processed. Payment was sent; please manually verify that payment was received.')
print(f'PaymentACK message received: {paymntack.memo}')
return (True, paymntack.memo)
except aio as e:
error = f'''Payment Message/PaymentACK Failed:
error type: {type(e)}'''
if isinstance(e, aio
error += f'''
Got HTTP status code {e.status}.'''
if resp_content:
try:
error_text_received = resp_content.decode('utf8')
except UnicodeDecodeError:
error_text_received = '(failed to decode error)'
else:
error_text_received = error_text_received[:400]
error_oneline = ' -- '.join(error.split('\n'))
_logger.info(f'{error_oneline} -- [DO NOT TRUST THIS MESSAGE] {repr(e)} text: {error_text_received}')
return (False, error) |
def test_require_gdal_version_chaining():
version = '.0'
_gdal_version(version, param='foo', values=['bar'])
_gdal_version(version, param='something', values=['else'])
def a(foo=None, something=None):
return (foo, something)
assert (a(foo='ok', something='not else') == ('ok', 'not else'))
with pytest.raises(GDALVersionError) as exc_info:
a(foo='bar', something='else')
message = 'parameter "foo=bar" requires GDAL >= {0}'.format(version)
assert (message in exc_info.value.args[0])
with pytest.raises(GDALVersionError) as exc_info:
a(foo='ok', something='else')
message = 'parameter "something=else" requires GDAL >= {0}'.format(version)
assert (message in exc_info.value.args[0]) |
class Deadline():
def __init__(self, timeout: Optional[float]) -> None:
self.deadline: Optional[float]
if (timeout is None):
self.deadline = None
else:
self.deadline = (time.monotonic() + timeout)
def timeout(self, *, raise_if_elapsed: bool=True) -> Optional[float]:
if (self.deadline is None):
return None
timeout = (self.deadline - time.monotonic())
if (raise_if_elapsed and (timeout <= 0)):
raise TimeoutError('timed out')
return timeout |
def get_color(name):
if ('unord' in name):
return colors[0]
if ('rfwr' in name):
return colors[1]
if ('reinforce_bl' in name):
return colors[2]
if ('reinforce' in name):
return colors[6]
if ('sasbl' in name):
return colors[3]
if ('sas' in name):
return colors[5]
if ('stgs' in name):
return colors[8]
if ('relax' in name):
return colors[7]
if ('arsm' in name):
return colors[4]
return None |
class FFTDF(lib.StreamObject):
_keys = {'cell', 'kpts', 'grids', 'mesh', 'blockdim', 'exxdiv'}
def __init__(self, cell, kpts=numpy.zeros((1, 3))):
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
self.cell = cell
self.stdout = cell.stdout
self.verbose = cell.verbose
self.max_memory = cell.max_memory
self.kpts = kpts
self.grids = gen_grid.UniformGrids(cell)
self.mesh = cell.mesh
self.blockdim = getattr(__config__, 'pbc_df_df_DF_blockdim', 240)
self.exxdiv = None
self._numint = numint.KNumInt()
self._rsh_df = {}
def mesh(self):
return self.grids.mesh
def mesh(self, mesh):
self.grids.mesh = mesh
def reset(self, cell=None):
if (cell is not None):
self.cell = cell
self.grids.reset(cell)
self._rsh_df = {}
return self
def dump_flags(self, verbose=None):
logger.info(self, '\n')
logger.info(self, ' %s ', self.__class__)
logger.info(self, 'mesh = %s (%d PWs)', self.mesh, numpy.prod(self.mesh))
logger.info(self, 'len(kpts) = %d', len(self.kpts))
logger.debug1(self, ' kpts = %s', self.kpts)
return self
def check_sanity(self):
lib.StreamObject.check_sanity(self)
cell = self.cell
if ((cell.dimension < 2) or ((cell.dimension == 2) and (cell.low_dim_ft_type == 'inf_vacuum'))):
raise RuntimeError('FFTDF method does not support 0D/1D low-dimension PBC system. DF, MDF or AFTDF methods should be used.\nSee also examples/pbc/31-low_dimensional_pbc.py')
if (not cell.has_ecp()):
logger.warn(self, 'FFTDF integrals are found in all-electron calculation. It often causes huge error.\nRecommended methods are DF or MDF. In SCF calculation, they can be initialized as\n mf = mf.density_fit()\nor\n mf = mf.mix_density_fit()')
if (cell.ke_cutoff is None):
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh).min()
else:
ke_cutoff = numpy.min(cell.ke_cutoff)
ke_guess = estimate_ke_cutoff(cell, cell.precision)
if (ke_cutoff < (ke_guess * KE_SCALING)):
mesh_guess = cell.cutoff_to_mesh(ke_guess)
logger.warn(self, 'ke_cutoff/mesh (%g / %s) is not enough for FFTDF to get integral accuracy %g.\nCoulomb integral error is ~ %.2g Eh.\nRecommended ke_cutoff/mesh are %g / %s.', ke_cutoff, self.mesh, cell.precision, error_for_ke_cutoff(cell, ke_cutoff), ke_guess, mesh_guess)
return self
def build(self):
return self.check_sanity()
def aoR_loop(self, grids=None, kpts=None, deriv=0):
if (grids is None):
grids = self.grids
cell = self.cell
else:
cell = grids.cell
if (grids.non0tab is None):
grids.build(with_non0tab=True)
if (kpts is None):
kpts = self.kpts
kpts = numpy.asarray(kpts)
if ((cell.dimension < 2) or ((cell.dimension == 2) and (cell.low_dim_ft_type == 'inf_vacuum'))):
raise RuntimeError('FFTDF method does not support low-dimension PBC system. DF, MDF or AFTDF methods should be used.\nSee also examples/pbc/31-low_dimensional_pbc.py')
max_memory = max(2000, (self.max_memory - lib.current_memory()[0]))
ni = self._numint
nao = cell.nao_nr()
p1 = 0
for ao_k1_etc in ni.block_loop(cell, grids, nao, deriv, kpts, max_memory=max_memory):
coords = ao_k1_etc[4]
(p0, p1) = (p1, (p1 + coords.shape[0]))
(yield (ao_k1_etc, p0, p1))
get_pp = get_pp
get_nuc = get_nuc
def get_jk_e1(self, dm, kpts=None, kpts_band=None, exxdiv=None):
kpts = _check_kpts(self, kpts)[0]
vj = fft_jk.get_j_e1_kpts(self, dm, kpts, kpts_band)
vk = fft_jk.get_k_e1_kpts(self, dm, kpts, kpts_band, exxdiv)
return (vj, vk)
def get_j_e1(self, dm, kpts=None, kpts_band=None):
kpts = _check_kpts(self, kpts)[0]
vj = fft_jk.get_j_e1_kpts(self, dm, kpts, kpts_band)
return vj
def get_k_e1(self, dm, kpts=None, kpts_band=None, exxdiv=None):
kpts = _check_kpts(self, kpts)[0]
vk = fft_jk.get_k_e1_kpts(self, dm, kpts, kpts_band, exxdiv)
return vk
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None, with_j=True, with_k=True, omega=None, exxdiv=None):
if (omega is not None):
with self.range_coulomb(omega) as rsh_df:
return rsh_df.get_jk(dm, hermi, kpts, kpts_band, with_j, with_k, omega=None, exxdiv=exxdiv)
(kpts, is_single_kpt) = _check_kpts(self, kpts)
if is_single_kpt:
(vj, vk) = fft_jk.get_jk(self, dm, hermi, kpts[0], kpts_band, with_j, with_k, exxdiv)
else:
vj = vk = None
if with_k:
vk = fft_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = fft_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return (vj, vk)
get_eri = get_ao_eri = fft_ao2mo.get_eri
ao2mo = get_mo_eri = fft_ao2mo.general
ao2mo_7d = fft_ao2mo.ao2mo_7d
get_ao_pairs_G = get_ao_pairs = fft_ao2mo.get_ao_pairs_G
get_mo_pairs_G = get_mo_pairs = fft_ao2mo.get_mo_pairs_G
def update_mf(self, mf):
mf = mf.copy()
mf.with_df = self
return mf
def loop(self, blksize=None):
if (self.cell.dimension < 3):
raise RuntimeError('ERIs of 1D and 2D systems are not positive definite. Current API only supports postive definite ERIs.')
if (blksize is None):
blksize = self.blockdim
kpts0 = numpy.zeros((2, 3))
coulG = tools.get_coulG(self.cell, numpy.zeros(3), mesh=self.mesh)
ngrids = len(coulG)
ao_pairs_G = self.get_ao_pairs_G(kpts0, compact=True)
ao_pairs_G *= numpy.sqrt((coulG * (self.cell.vol / (ngrids ** 2)))).reshape((- 1), 1)
Lpq = numpy.empty((blksize, ao_pairs_G.shape[1]))
for (p0, p1) in lib.prange(0, ngrids, blksize):
Lpq[:(p1 - p0)] = ao_pairs_G[p0:p1].real
(yield Lpq[:(p1 - p0)])
Lpq[:(p1 - p0)] = ao_pairs_G[p0:p1].imag
(yield Lpq[:(p1 - p0)])
def get_naoaux(self):
mesh = numpy.asarray(self.mesh)
ngrids = numpy.prod(mesh)
return (ngrids * 2)
range_coulomb = aft.AFTDF.range_coulomb |
.script
def _compute(sum_squared_obs: torch.Tensor, sum_obs: torch.Tensor, rss: torch.Tensor, num_obs: torch.Tensor, multioutput: str, num_regressors: int) -> torch.Tensor:
tss = (sum_squared_obs - (torch.square(sum_obs) / num_obs))
r_squared = (1 - (rss / tss))
if (multioutput == 'uniform_average'):
r_squared = torch.mean(r_squared)
elif (multioutput == 'variance_weighted'):
r_squared = torch.sum(((r_squared * tss) / torch.sum(tss)))
if (num_regressors != 0):
r_squared = (1 - (((1 - r_squared) * (num_obs - 1)) / ((num_obs - num_regressors) - 1)))
return r_squared |
def torch_matmul(input, other, *, out=None):
d1 = input.dim()
d2 = other.dim()
shape = None
if ((d1 == 1) and (d2 == 1)):
shape = None
elif ((d1 == 2) and (d2 == 2)):
shape = (input.size(0), other.size(1))
elif ((d1 == 1) and (d2 == 2)):
shape = (other.size(1),)
elif ((d1 == 2) and (d1 == 1)):
shape = (input.size(0),)
else:
max_length = max(input.dim(), other.dim())
shape1 = list(input.shape)
shape2 = list(other.shape)
if (d1 == 1):
shape1 = ([1] + shape1)
if (d2 == 1):
shape2.append(1)
shape1 = (([(- 1)] * (max_length - d1)) + list(input.shape))
shape2 = (([(- 1)] * (max_length - d2)) + list(other.shape))
shape = []
for i in range(max_length):
shape.append(max(shape1[i], shape2[i]))
shape[(- 2)] = shape1[(- 2)]
shape[(- 1)] = shape2[(- 1)]
if (d1 == 1):
shape.pop((- 2))
if (d2 == 1):
shape.pop((- 1))
if (shape is None):
return torch.tensor(0.0, device='meta')
return torch.empty(*shape, device='meta') |
def test_random_2_1_wedge_1_1():
dim = 3
n_tensor = numpy.random.random((dim, dim, dim))
m_tensor = numpy.random.random((dim, dim))
true_tensor = numpy.zeros(tuple(([dim] * 5)))
for (a, b, c, d, e) in product(range(dim), repeat=5):
for (u_perm, u_phase) in generate_parity_permutations([a, b, c]):
for (l_perm, l_phase) in generate_parity_permutations([e, d]):
true_tensor[(a, b, c, d, e)] += (((u_phase * l_phase) * n_tensor[(u_perm[0], u_perm[1], l_perm[0])]) * m_tensor[(u_perm[2], l_perm[1])])
true_tensor /= ((2 * 3) * 2)
test_tensor = wedge(n_tensor, m_tensor, (2, 1), (1, 1))
assert numpy.allclose(test_tensor, true_tensor) |
def _assert_values_changed_and_not_hardcoded(test_file_path, pseudonymised_file_path):
ds_input = pydicom.dcmread(test_file_path, force=True)
ds_pseudo = pydicom.dcmread(pseudonymised_file_path, force=True)
assert (ds_input['PatientID'].value != ds_pseudo['PatientID'].value)
assert (ds_pseudo['PatientID'].value not in ['', 'Anonymous']) |
class ExposureSettings():
def __init__(self, data_provider: DataProvider, sector_exposure_tickers: List[Ticker], factor_exposure_tickers: List[Ticker]):
self._data_provider = data_provider
self._sector_exposure_tickers = sector_exposure_tickers
self._factor_exposure_tickers = factor_exposure_tickers
def data_provider(self) -> DataProvider:
return self._data_provider
def sector_exposure_tickers(self) -> List[Ticker]:
return self._sector_exposure_tickers
def factor_exposure_tickers(self) -> List[Ticker]:
return self._factor_exposure_tickers |
class LatticeDecoder(TopologicalDecoder[TQubit], metaclass=ABCMeta):
def syndrome_graph_keys(self) -> List[str]:
def __init__(self, params: Dict) -> None:
super().__init__(params)
self._params_validation()
for syndrome_graph_key in self.syndrome_graph_keys:
self.S[syndrome_graph_key] = rx.PyGraph(multigraph=False)
self.node_map[syndrome_graph_key] = {}
self.virtual = self._specify_virtual()
self._encoder = None
self._make_syndrome_graph()
def encoder(self):
if (self._encoder is None):
self._encoder = self.encoder_type(self.params.copy())
return self._encoder
def _params_validation(self):
if (('d' not in self.params) or ('T' not in self.params)):
raise ValueError('Please include d and T in params.')
def _specify_virtual(self) -> Dict[(str, List[TQubit])]:
def _is_crossing_readout_path(self, match: Tuple[(TQubit, TQubit)], logical_readout_type: str) -> bool:
def _make_error_graph(self, nodes: List[TQubit], syndrome_graph_key: str, err_prob: Optional[float]=None):
node_map: Dict[(TQubit, int)] = {}
error_graph = rx.PyGraph(multigraph=False)
make_even = ((len(nodes) % 2) != 0)
nodes += self.virtual[syndrome_graph_key]
for node in nodes:
if (node not in error_graph.nodes()):
node_map[node] = error_graph.add_node(node)
shortest_distance_mat = rx.graph_floyd_warshall_numpy(self.S[syndrome_graph_key])
num_shortest_paths: Dict[(int, Dict[(int, int)])] = {}
for (source, target) in combinations(nodes, 2):
if ((source in self.virtual[syndrome_graph_key]) and (target in self.virtual[syndrome_graph_key])):
distance = 0.0
else:
i = self.node_map[syndrome_graph_key][source]
j = self.node_map[syndrome_graph_key][target]
distance = float(shortest_distance_mat[i][j])
if err_prob:
(deg, num_shortest_paths) = self._path_degeneracy(source, target, syndrome_graph_key, num_shortest_paths, shortest_distance_mat)
distance = (distance - (math.log(deg) / (math.log1p(((- 1.0) * err_prob)) - math.log(err_prob))))
distance = ((- 1.0) * distance)
error_graph.add_edge(node_map[source], node_map[target], distance)
if make_even:
source = ((- 1), (- 1), (- 1))
node_map[source] = error_graph.add_node(source)
for target in self.virtual[syndrome_graph_key]:
error_graph.add_edge(node_map[source], node_map[target], 0)
return error_graph
def _path_degeneracy(self, a: TQubit, b: TQubit, syndrome_graph_key: str, num_shortest_paths: Dict[(int, Dict[(int, int)])], shortest_distance_mat: np.ndarray) -> Tuple[(int, Dict[(int, Dict[(int, int)])])]:
a_indx = self.node_map[syndrome_graph_key][a]
b_indx = self.node_map[syndrome_graph_key][b]
source = None
if (a[0] == (- 1)):
target = a_indx
source = b_indx
elif (b[0] == (- 1)):
target = b_indx
source = a_indx
if source:
shortest_distance = shortest_distance_mat[source][target]
virtual_nodes = self.virtual[syndrome_graph_key]
total_deg = 0
for node in virtual_nodes:
node_indx = self.node_map[syndrome_graph_key][node]
if (shortest_distance_mat[source][node_indx] == shortest_distance):
(deg, num_shortest_paths) = self._path_degeneracy_helper(source, node_indx, syndrome_graph_key, num_shortest_paths)
total_deg += deg
else:
(total_deg, num_shortest_paths) = self._path_degeneracy_helper(a_indx, b_indx, syndrome_graph_key, num_shortest_paths)
return (total_deg, num_shortest_paths)
def _path_degeneracy_helper(self, a_indx: int, b_indx: int, syndrome_graph_key: str, num_shortest_paths: Dict[(int, Dict[(int, int)])]) -> Tuple[(int, Dict[(int, Dict[(int, int)])])]:
if (a_indx in num_shortest_paths.keys()):
return (num_shortest_paths[a_indx][b_indx], num_shortest_paths)
elif (b_indx in num_shortest_paths.keys()):
return (num_shortest_paths[b_indx][a_indx], num_shortest_paths)
else:
num_shortest_paths[a_indx] = dict(rx.num_shortest_paths_unweighted(self.S[syndrome_graph_key], a_indx))
return (num_shortest_paths[a_indx][b_indx], num_shortest_paths)
def _run_mwpm_graph(self, matching_graph: rx.PyGraph, floats: bool=False) -> rx.PyGraph:
weight_fn = (int if (not floats) else (lambda n: int((n * 10000))))
matches_idxs = rx.max_weight_matching(matching_graph, max_cardinality=True, weight_fn=weight_fn)
filtered_matches_idxs = [(i, j) for (i, j) in matches_idxs if (not ((matching_graph[i][0] == (- 1)) and (matching_graph[j][0] == (- 1))))]
matched_graph = rx.PyGraph(multigraph=False)
node_map = {}
for (i, j) in filtered_matches_idxs:
weight = matching_graph.get_edge_data(i, j)
for node in [i, j]:
if (matching_graph[node] not in matched_graph.nodes()):
node_map[matching_graph[node]] = matched_graph.add_node(matching_graph[node])
matched_graph.add_edge(node_map[matching_graph[i]], node_map[matching_graph[j]], weight)
return matched_graph
def _run_mwpm(self, matching_graph: rx.PyGraph, floats=False) -> List[Tuple[(TQubit, TQubit)]]:
weight_fn = (int if (not floats) else (lambda n: int((n * 10000))))
matches_idxs = rx.max_weight_matching(matching_graph, max_cardinality=True, weight_fn=weight_fn)
matches = [(matching_graph[i], matching_graph[j]) for (i, j) in matches_idxs]
filtered_matches = [(source, target) for (source, target) in matches if (not ((source[0] == (- 1)) and (target[0] == (- 1))))]
return filtered_matches
def _corrections(self, syndromes: List[TQubit], syndrome_graph_key: str, err_prob: Optional[float]=None) -> List[Tuple[(TQubit, TQubit)]]:
if (not syndromes):
return []
error_graph = self._make_error_graph(syndromes, syndrome_graph_key, err_prob=err_prob)
matches = self._run_mwpm(error_graph, floats=(err_prob is not None))
return matches
def correct_readout(self, syndromes: Union[(str, Dict[(str, List[TQubit])])], logical_readout_type: str, logical_qubit_value: Optional[int]=None, err_prob: Optional[float]=None) -> int:
if (type(syndromes) == str):
(logical_qubit_value, syndromes) = self.parse_readout(str(syndromes), logical_readout_type)
syndromes = cast(Dict[(str, List[TQubit])], syndromes)
logical_qubit_value = cast(int, logical_qubit_value)
matches = self._corrections(syndromes[logical_readout_type], logical_readout_type, err_prob=err_prob)
for match in matches:
if self._is_crossing_readout_path(match, logical_readout_type):
logical_qubit_value = ((logical_qubit_value + 1) % 2)
return logical_qubit_value
def parse_readout(self, readout_string: str, readout_type: Optional[str]=None) -> Tuple[(int, Dict[(str, List[TQubit])])]:
return self.encoder.parse_readout(readout_string, readout_type)
def draw(self, graph: rx.PyGraph, dpi: Optional[int]=None, node_size: Optional[int]=None, font_size: Optional[float]=None, show: Optional[bool]=True) -> Tuple[(figure.Figure, axes.Axes)]:
if (self.params['T'] > 1):
self.draw3D(graph)
return
node_count = len(graph.nodes())
scale = (5 / (math.sqrt(node_count) if (node_count > 0) else 5))
dpi = (dpi if (dpi is not None) else (150 / scale))
node_size = (node_size if (node_size is not None) else (1750 * scale))
font_size = (font_size if (font_size is not None) else (6 * scale))
positions = {}
for (idx, node) in enumerate(graph.nodes()):
positions[idx] = [node[2], (- node[1])]
fig = plt.figure(dpi=dpi)
ax = fig.subplots()
mpl_draw(graph, ax=ax, with_labels=True, pos=positions, labels=str, edge_labels=(lambda e: str(np.around(e, 3))), node_size=node_size, node_color='lightblue', font_size=font_size, alpha=0.8)
fig.tight_layout()
if (not show):
plt.close(fig)
return (fig, ax)
def draw3D(self, graph: rx.PyGraph, angle: Optional[List[float]]=None) -> None:
angle = ([(- 116), 22] if (not angle) else angle)
colors = {node: plt.cm.plasma(((node[0] + 1) / self.params['T'])) for node in graph.nodes()}
def node_to_pos3D(node):
z = (((self.params['T'] - 1.0) / 2.0) if (node[0] == (- 1)) else node[0])
return (node[2], (- node[1]), z)
with plt.style.context('ggplot'):
fig = plt.figure(figsize=(20, 14))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
for node in graph.nodes():
(xi, yi, zi) = node_to_pos3D(node)
ax.scatter(xi, yi, zi, color=colors[node], s=(120 * 1), edgecolors='k', alpha=0.7)
ax.text(xi, yi, zi, node, fontsize=20)
for (src, tgt) in graph.edge_list():
(x_1, y_1, z_1) = node_to_pos3D(graph[src])
(x_2, y_2, z_2) = node_to_pos3D(graph[tgt])
x_line = np.array((x_1, x_2))
y_line = np.array((y_1, y_2))
z_line = np.array((z_1, z_2))
ax.plot(x_line, y_line, z_line, color='black', alpha=0.5)
x_mid = ((x_1 + x_2) / 2)
y_mid = ((y_1 + y_2) / 2)
z_mid = ((z_1 + z_2) / 2)
label = round(graph.get_edge_data(src, tgt), 2)
ax.text(x_mid, y_mid, z_mid, label, fontsize=14)
ax.view_init(angle[1], angle[0])
ax.set_axis_off()
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('w')
ax.yaxis.pane.set_edgecolor('w')
ax.zaxis.pane.set_edgecolor('w')
plt.show() |
class WikiTableQuestion(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'question': datasets.Value('string'), 'table_id': datasets.Value('string'), 'table': {'page_title': datasets.Value('string'), 'header': datasets.features.Sequence(datasets.Value('string')), 'rows': datasets.features.Sequence(datasets.features.Sequence(datasets.Value('string')))}, 'answer_text': datasets.features.Sequence(datasets.Value('string')), 'encode_sql_str': datasets.Value('string')}), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
data_dir = os.path.join(dl_manager.download_and_extract(_URL), 'WikiTableQuestions-master')
squall_dir = os.path.join(dl_manager.download_and_extract(_SQUALL_URL), 'squall-main')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': os.path.join(data_dir, 'data/random-split-1-train.tsv'), 'data_dir': data_dir, 'squall_path': os.path.join(squall_dir, 'data/squall.json'), 'squall_tables_path': os.path.join(squall_dir, 'tables/json'), 'squall_db_path': os.path.join(squall_dir, 'tables/db'), 'squall_tmp_db_path': os.path.join(squall_dir, 'tables/tmp_db')}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': os.path.join(data_dir, 'data/random-split-1-dev.tsv'), 'data_dir': data_dir, 'squall_path': os.path.join(squall_dir, 'data/squall.json'), 'squall_tables_path': os.path.join(squall_dir, 'tables/json'), 'squall_db_path': os.path.join(squall_dir, 'tables/db'), 'squall_tmp_db_path': os.path.join(squall_dir, 'tables/tmp_db')}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': os.path.join(data_dir, 'data/pristine-unseen-tables.tsv'), 'data_dir': data_dir, 'squall_path': os.path.join(squall_dir, 'data/squall.json'), 'squall_tables_path': os.path.join(squall_dir, 'tables/json'), 'squall_db_path': os.path.join(squall_dir, 'tables/db'), 'squall_tmp_db_path': os.path.join(squall_dir, 'tables/tmp_db')})]
def _generate_examples(self, filepath, data_dir, squall_path, squall_tables_path, squall_db_path, squall_tmp_db_path):
src_table_content_map = {}
tgt_table_content_map = {}
table_drop_rows_map = {}
db_engine_map = {}
for table_json_file in os.listdir(squall_tables_path):
table_id = table_json_file[:(- 5)]
check_table_file = open(os.path.join(squall_tables_path, table_json_file), 'r', encoding='utf8')
src_table_content = json.load(check_table_file)
src_table_content = process_table_structure(src_table_content)
src_table_content_map[table_id] = json.loads(json.dumps(src_table_content))
tgt_table_content_map[table_id] = src_table_content
for table_db_file in os.listdir(squall_db_path):
table_id = table_db_file[:(- 3)]
database_path = os.path.join(squall_db_path, table_db_file)
temp_database_path = os.path.join(squall_tmp_db_path, table_db_file)
if os.path.exists(temp_database_path):
os.remove(temp_database_path)
shutil.copy(database_path, temp_database_path)
db_engine_map[table_id] = WTQDBEngine(temp_database_path)
if ((table_id in table_drop_rows_map) and (len(table_drop_rows_map[table_id]) != 0)):
table_drop_rows = table_drop_rows_map[table_id]
db_engine_map[table_id].delete_rows(table_drop_rows)
squall_id_map = {}
with open(squall_path) as f:
squall_data = json.load(f)
for squall_item in squall_data:
squall_id_map[squall_item['nt']] = squall_item
with open(filepath, encoding='utf-8') as f:
for (idx, line) in enumerate(f):
if (idx == 0):
continue
(data_id, question, table_id, gold_result_str) = line.strip('\n').split('\t')
if (data_id in sql_solvable_ids):
table = _load_table(os.path.join(data_dir, table_id.replace('.csv', '.tsv')))
gold_result = gold_result_str.split('|')
squall_data_item = squall_id_map[data_id]
squall_table_id = squall_data_item['tbl']
sql_struct = squall_data_item['sql']
(engine, src_table_content) = (db_engine_map[squall_table_id], src_table_content_map[squall_table_id])
new_columns = []
for (_, col) in enumerate(table['header']):
if (col == ''):
new_columns.append('FilledColumnName')
else:
new_columns.append(col)
table['header'] = new_columns
new_columns = []
for (_, col) in enumerate(table['header']):
if (col in new_columns):
(new_col, suffix) = (col, 2)
while (new_col in new_columns):
new_col = ((col + '_') + str(suffix))
suffix += 1
new_columns.append(new_col)
else:
new_columns.append(col)
table['header'] = new_columns
new_columns = []
for col in table['header']:
new_columns.append(col.lower())
table['header'] = new_columns
(encode_sql_str, _, exec_sql_str) = retrieve_wtq_query_answer(engine, table, sql_struct)
(yield (idx, {'id': data_id, 'question': question, 'table_id': table_id, 'table': table, 'answer_text': gold_result, 'encode_sql_str': encode_sql_str})) |
class WaveRNN(nn.Module):
def __init__(self, hidden_size=384, quantization=256):
super(WaveRNN, self).__init__()
self.hidden_size = hidden_size
self.split_size = (hidden_size // 2)
self.R = nn.Linear(self.hidden_size, (3 * self.hidden_size), bias=False)
self.O1 = nn.Linear(self.split_size, self.split_size)
self.O3 = nn.Linear(self.split_size, self.split_size)
self.one_O2 = nn.Linear(self.split_size, quantization)
self.one_O4 = nn.Linear(self.split_size, quantization)
self.two_O2 = nn.Linear(self.split_size, quantization)
self.two_O4 = nn.Linear(self.split_size, quantization)
self.three_O2 = nn.Linear(self.split_size, quantization)
self.three_O4 = nn.Linear(self.split_size, quantization)
self.four_O2 = nn.Linear(self.split_size, quantization)
self.four_O4 = nn.Linear(self.split_size, quantization)
self.I_coarse = nn.Linear((2 * 4), (3 * self.split_size), bias=False)
self.I_fine = nn.Linear((3 * 4), (3 * self.split_size), bias=False)
self.bias_u = nn.Parameter(torch.zeros(self.hidden_size))
self.bias_r = nn.Parameter(torch.zeros(self.hidden_size))
self.bias_e = nn.Parameter(torch.zeros(self.hidden_size))
self.num_params()
def forward(self, prev_y, prev_hidden, current_coarse):
R_hidden = self.R(prev_hidden)
(R_u, R_r, R_e) = torch.split(R_hidden, self.hidden_size, dim=1)
coarse_input_proj = self.I_coarse(prev_y)
(I_coarse_u, I_coarse_r, I_coarse_e) = torch.split(coarse_input_proj, self.split_size, dim=1)
fine_input = torch.cat([prev_y, current_coarse], dim=1)
fine_input_proj = self.I_fine(fine_input)
(I_fine_u, I_fine_r, I_fine_e) = torch.split(fine_input_proj, self.split_size, dim=1)
I_u = torch.cat([I_coarse_u, I_fine_u], dim=1)
I_r = torch.cat([I_coarse_r, I_fine_r], dim=1)
I_e = torch.cat([I_coarse_e, I_fine_e], dim=1)
u = F.sigmoid(((R_u + I_u) + self.bias_u))
r = F.sigmoid(((R_r + I_r) + self.bias_r))
e = F.tanh((((r * R_e) + I_e) + self.bias_e))
hidden = ((u * prev_hidden) + ((1.0 - u) * e))
(h_c, h_f) = torch.split(hidden, self.split_size, dim=1)
out_c = F.relu(self.O1(h_c))
out_f = F.relu(self.O3(h_f))
one_c = self.one_O2(out_c)
one_f = self.one_O4(out_f)
two_c = self.two_O2(out_c)
two_f = self.two_O4(out_f)
three_c = self.three_O2(out_c)
three_f = self.three_O4(out_f)
four_c = self.four_O2(out_c)
four_f = self.four_O4(out_f)
c = torch.cat([one_c, two_c, three_c, four_c], dim=0)
f = torch.cat([one_f, two_f, three_f, four_f], dim=0)
return (c, f, hidden)
def get_initial_hidden(self, batch_size=1):
device = next(self.parameters()).device
return torch.zeros(batch_size, self.hidden_size, device=device)
def num_params(self, print_out=True):
parameters = filter((lambda p: p.requires_grad), self.parameters())
parameters = (sum([np.prod(p.size()) for p in parameters]) / 1000000)
print(('Trainable Parameters: %.3f million' % parameters)) |
class TwitchOpenIdConnect(OpenIdConnectAuth):
name = 'twitch'
USERNAME_KEY = 'preferred_username'
OIDC_ENDPOINT = '
DEFAULT_SCOPE = ['openid', 'user:read:email']
TWITCH_CLAIMS = '{"id_token":{"email": null,"email_verified":null,"preferred_username":null}}'
def auth_params(self, state=None):
params = super().auth_params(state)
params['claims'] = self.TWITCH_CLAIMS
return params
def get_user_details(self, response):
return {'username': self.id_token['preferred_username'], 'email': self.id_token['email'], 'fullname': '', 'first_name': '', 'last_name': ''} |
def test_initilization_info_logger():
import torch.nn as nn
from mmcv.utils.logging import get_logger
import os
class OverloadInitConv(nn.Conv2d, BaseModule):
def init_weights(self):
for p in self.parameters():
with torch.no_grad():
p.fill_(1)
class CheckLoggerModel(BaseModule):
def __init__(self, init_cfg=None):
super(CheckLoggerModel, self).__init__(init_cfg)
self.conv1 = nn.Conv2d(1, 1, 1, 1)
self.conv2 = OverloadInitConv(1, 1, 1, 1)
self.conv3 = nn.Conv2d(1, 1, 1, 1)
self.fc1 = nn.Linear(1, 1)
init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)]
model = CheckLoggerModel(init_cfg=init_cfg)
train_log = '_132454.log'
workdir = tempfile.mkdtemp()
log_file = os.path.join(workdir, train_log)
get_logger('init_logger', log_file=log_file)
assert (not hasattr(model, '_params_init_info'))
model.init_weights()
assert (not hasattr(model, '_params_init_info'))
assert os.path.exists(log_file)
lines = mmcv.list_from_file(log_file)
for (i, line) in enumerate(lines):
if ('conv1.weight' in line):
assert ('NormalInit' in lines[(i + 1)])
if ('conv2.weight' in line):
assert ('OverloadInitConv' in lines[(i + 1)])
if ('fc1.weight' in line):
assert ('ConstantInit' in lines[(i + 1)])
class OverloadInitConvFc(nn.Conv2d, BaseModule):
def __init__(self, *args, **kwargs):
super(OverloadInitConvFc, self).__init__(*args, **kwargs)
self.conv1 = nn.Linear(1, 1)
def init_weights(self):
for p in self.parameters():
with torch.no_grad():
p.fill_(1)
class CheckLoggerModel(BaseModule):
def __init__(self, init_cfg=None):
super(CheckLoggerModel, self).__init__(init_cfg)
self.conv1 = nn.Conv2d(1, 1, 1, 1)
self.conv2 = OverloadInitConvFc(1, 1, 1, 1)
self.conv3 = nn.Conv2d(1, 1, 1, 1)
self.fc1 = nn.Linear(1, 1)
class TopLevelModule(BaseModule):
def __init__(self, init_cfg=None, checklog_init_cfg=None):
super(TopLevelModule, self).__init__(init_cfg)
self.module1 = CheckLoggerModel(checklog_init_cfg)
self.module2 = OverloadInitConvFc(1, 1, 1, 1)
checklog_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)]
top_level_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='module2', std=0.01, bias_prob=0.01))]
model = TopLevelModule(init_cfg=top_level_init_cfg, checklog_init_cfg=checklog_init_cfg)
model.module1.init_weights()
model.module2.init_weights()
model.init_weights()
model.module1.init_weights()
model.module2.init_weights()
assert (not hasattr(model, '_params_init_info'))
model.init_weights()
assert (not hasattr(model, '_params_init_info'))
assert os.path.exists(log_file)
lines = mmcv.list_from_file(log_file)
for (i, line) in enumerate(lines):
if (('TopLevelModule' in line) and ('init_cfg' not in line)):
assert ('the same' in line) |
def test_api_groups():
group_ret = {'groups': [{'membership': [{'real_name': 'Bugzilla User', 'can_login': 1, 'name': '', 'login_denied_text': '', 'id': 85, 'email_enabled': 1, 'email': ''}, {'real_name': 'Bugzilla User2', 'can_login': 0, 'name': '', 'login_denied_text': '', 'id': 77, 'email_enabled': 0, 'email': ''}], 'is_active': 1, 'description': 'Test Group', 'user_regexp': '', 'is_bug_group': 1, 'name': 'TestGroup', 'id': 9}]}
fakebz = tests.mockbackend.make_bz(group_get_args='data/mockargs/test_api_groups_get1.txt', group_get_return=group_ret)
groupobj = fakebz.getgroups('TestGroups')[0]
assert (groupobj.groupid == 9)
assert (groupobj.member_emails == ['', ''])
assert (groupobj.name == 'TestGroup')
fakebz = tests.mockbackend.make_bz(group_get_args='data/mockargs/test_api_groups_get2.txt', group_get_return=group_ret)
groupobj = fakebz.getgroup('TestGroup', membership=True)
groupobj.membership = []
assert (groupobj.members() == group_ret['groups'][0]['membership']) |
def remove_markers(img, left, right, output):
pixels = img.load()
count = len(left)
for i in range(0, count):
header = left[i]
right_header = right[i]
x = int(header[0])
y = int(header[1])
rx = int(right_header[0])
ry = int(right_header[1])
for row in range(y, (y + HEIGHT)):
for col in range(x, (x + WIDTH)):
pixels[(col, row)] = (0, 0, 0, 0)
for row in range(ry, (ry + HEIGHT)):
for col in range(rx, (rx + WIDTH)):
pixels[(col, row)] = (0, 0, 0, 0)
for row in range(y, (y + TOP_BOTTOM_PIECE_HEIGHT)):
for col in range(x, rx):
pixels[(col, row)] = (0, 0, 0, 0)
for row in range(((y + HEIGHT) - TOP_BOTTOM_PIECE_HEIGHT), (y + HEIGHT)):
for col in range(x, rx):
pixels[(col, row)] = (0, 0, 0, 0)
if DEBUG:
img.show()
img.save(output) |
class TestMetadataColumnConstructionAndProperties(unittest.TestCase):
def test_single_id(self):
index = pd.Index(['id1'], name='id')
series = pd.Series([42], name='col1', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 1)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('id1',))
self.assertEqual(mdc.name, 'col1')
def test_multiple_ids(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
series = pd.Series([42, 4.2, (- 4.2)], name='column', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 3)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('id1', 'a', 'my-id'))
self.assertEqual(mdc.name, 'column')
def test_supported_id_headers(self):
case_insensitive = {'id', 'sampleid', 'sample id', 'sample-id', 'featureid', 'feature id', 'feature-id'}
exact_match = {'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'}
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
series = pd.Series([0, 123], name='column', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_header, header)
count += 1
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-a5ea', 'My.ID'], name='id')
series = pd.Series([(- 1), (- 2)], name='col1', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 2)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('c6ca034a-223f-40b4-a0e0-a5ea', 'My.ID'))
self.assertEqual(mdc.name, 'col1')
def test_non_standard_characters(self):
index = pd.Index(['id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'], name='id')
series = pd.Series([0, 1, 2, 3, 4], name='', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 5)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqual(mdc.name, '')
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA'], name='id')
series = pd.Series([np.nan, np.nan, np.nan], name='NA', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 3)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('None', 'nan', 'NA'))
self.assertEqual(mdc.name, 'NA')
def test_missing_insdc(self):
index = pd.Index(['None', 'nan', 'NA'], name='id')
series = pd.Series(['missing', 'not applicable', 5.0], name='NA', index=index)
mdc = DummyMetadataColumn(series, missing_scheme='INSDC:missing')
self.assertEqual(mdc.id_count, 3)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('None', 'nan', 'NA'))
self.assertEqual(mdc.name, 'NA')
pd.testing.assert_series_equal(mdc.to_series(), pd.Series([np.nan, np.nan, 5.0], name='NA', index=index))
def test_does_not_cast_ids_or_column_name(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object, name='id')
series = pd.Series([2.0, 1.0, 3.0], name='42.0', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.id_count, 3)
self.assertEqual(mdc.id_header, 'id')
self.assertEqual(mdc.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqual(mdc.name, '42.0')
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
series = pd.Series([1, 2, 3], name='column', index=index)
mdc = DummyMetadataColumn(series)
self.assertEqual(mdc.ids, ('a', 'b', 'A')) |
_config
def test_stack_commands(manager):
assert (manager.c.layout.info()['current_stack'] == 0)
manager.test_window('one')
assert (_stacks(manager) == [['one'], []])
assert (manager.c.layout.info()['current_stack'] == 0)
manager.test_window('two')
assert (_stacks(manager) == [['one'], ['two']])
assert (manager.c.layout.info()['current_stack'] == 1)
manager.test_window('three')
assert (_stacks(manager) == [['one'], ['three', 'two']])
assert (manager.c.layout.info()['current_stack'] == 1)
manager.c.layout.delete()
assert (_stacks(manager) == [['one', 'three', 'two']])
info = manager.c.get_groups()['a']
assert (info['focus'] == 'one')
manager.c.layout.delete()
assert (len(_stacks(manager)) == 1)
manager.c.layout.add()
assert (_stacks(manager) == [['one', 'three', 'two'], []])
manager.c.layout.rotate()
assert (_stacks(manager) == [[], ['one', 'three', 'two']]) |
def generate_module(xsd_path: str) -> None:
module = run_generate_ds(xsd_path)
module = remove_python_version(module)
module = remove_six_import(module)
module = disable_code_analyzers(module)
module = format_with_black(module)
existing_path = xsd_path.replace('.xsd', '.py')
with open(existing_path, encoding='utf-8') as in_file:
existing = in_file.read()
if (existing != module):
with open(existing_path, 'w', encoding='utf-8') as out_file:
out_file.write(module)
print(f'Wrote {existing_path}') |
def ensureMarkerTable(dbHandle=None):
if (dbHandle is None):
dbHandle = ops.db.Database(db=ops.db.TARGET_DB, isolation_level=None)
curs = dbHandle.connection.cursor()
else:
curs = dbHandle.cursor()
try:
curs.execute('CREATE TABLE marker (name, last_date, extra)')
except:
pass
return curs |
(scope='session')
def run_kodi_pod(build_plugin):
podman('pod', 'rm', '-f', 'kodipod')
podman('pod', 'create', '--publish=8080:8080', '--publish=1080:1080', '--publish=5999:5999', '--name=kodipod')
podman('run', '--detach', '--pod=kodipod', '--name=kodi', '--umask=0002', '--env=KINO_PUB_TEST=1', f'--volume={HOST_DIR}/addons/:{CON_DIR}/addons', f'--volume={HOST_DIR}/Database/:{CON_DIR}/userdata/Database', f'--volume={HOST_DIR}/addon_data/:{CON_DIR}/userdata/addon_data/video.kino.pub', 'quay.io/quarck/conkodi:19')
podman('run', '--detach', '--pod=kodipod', '--name=mockserver', '--env=MOCKSERVER_INITIALIZATION_JSON_PATH=/fake_api/persistedExpectations.json', f'--volume={HOST_DIR}/fake_api/:/fake_api', 'docker.io/mockserver/mockserver:mockserver-5.11.2')
(yield)
podman('pod', 'stop', 'kodipod') |
def _upload(training_dir, algorithm_id=None, writeup=None, benchmark_run_id=None, api_key=None, ignore_open_monitors=False):
if (not ignore_open_monitors):
open_monitors = monitoring._open_monitors()
if (len(open_monitors) > 0):
envs = [(m.env.spec.id if m.env.spec else '(unknown)') for m in open_monitors]
raise error.Error("Still have an open monitor on {}. You must run 'env.monitor.close()' before uploading.".format(', '.join(envs)))
(env_info, training_episode_batch, training_video) = upload_training_data(training_dir, api_key=api_key)
env_id = env_info['env_id']
training_episode_batch_id = training_video_id = None
if training_episode_batch:
training_episode_batch_id = training_episode_batch.id
if training_video:
training_video_id = training_video.id
if (logger.level <= logging.INFO):
if ((training_episode_batch_id is not None) and (training_video_id is not None)):
logger.info('[%s] Creating evaluation object from %s with learning curve and training video', env_id, training_dir)
elif (training_episode_batch_id is not None):
logger.info('[%s] Creating evaluation object from %s with learning curve', env_id, training_dir)
elif (training_video_id is not None):
logger.info('[%s] Creating evaluation object from %s with training video', env_id, training_dir)
else:
raise error.Error("[%s] You didn't have any recorded training data in {}. Once you've used 'env.monitor.start(training_dir)' to start recording, you need to actually run some rollouts. Please join the community chat on if you have any issues.".format(env_id, training_dir))
evaluation = resource.Evaluation.create(training_episode_batch=training_episode_batch_id, training_video=training_video_id, env=env_info['env_id'], algorithm={'id': algorithm_id}, benchmark_run_id=benchmark_run_id, writeup=writeup, gym_version=env_info['gym_version'], api_key=api_key)
return evaluation |
class AsyncObject(object):
cls_value = 0
def __init__(self):
self.value = 0
_per_instance()
()
def get_value(self, index):
self.value += 1
return self.value
_per_instance()
()
def with_kwargs(self, x=1, y=2, z=3):
self.value += ((x + y) + z)
return self.value
_per_instance()
()
def raises_exception(self):
assert False
_per_instance()
()
def with_kwonly_arg(self, *, arg=1):
return arg
()
()
def increment_value_method(self, val=1):
self.value += val
()
()
def deduplicated_static_method(val=1):
AsyncObject.cls_value += val |
class TestGameBase():
title = 'Python-telegram-bot Test Game'
description = 'description'
photo = [PhotoSize('Blah', 'ElseBlah', 640, 360, file_size=0)]
text = b'\\U0001f469\\u200d\\U0001f469\\u200d\\U0001f467\\u200d\\U0001f467\\U0001f431
text_entities = [MessageEntity(13, 17, MessageEntity.URL)]
animation = Animation('blah', 'unique_id', 320, 180, 1) |
class DiscordNotifier(Notifier):
__name__ = 'DiscordNotifier'
__type__ = 'addon'
__version__ = '0.11'
__status__ = 'testing'
__config__ = [('enabled', 'bool', 'Activated', False), ('webhookurl', 'string', 'The URL of the webhook', ''), ('captcha', 'bool', 'Notify captcha request', True), ('reconnection', 'bool', 'Notify reconnection request', True), ('downloadfinished', 'bool', 'Notify download finished', True), ('downloadfailed', 'bool', 'Notify download failed', True), ('alldownloadsfinished', 'bool', 'Notify all downloads finished', True), ('alldownloadsprocessed', 'bool', 'Notify all downloads processed', True), ('packagefinished', 'bool', 'Notify package finished', True), ('packagefailed', 'bool', 'Notify package failed', True), ('update', 'bool', 'Notify pyload update', False), ('exit', 'bool', 'Notify pyload shutdown/restart', False), ('sendinterval', 'int', 'Interval in seconds between notifications', 1), ('sendpermin', 'int', 'Max notifications per minute', 60), ('ignoreclient', 'bool', 'Send notifications if client is connected', True)]
__description__ = 'Send push notifications to a Discord channel via a webhook.'
__license__ = 'GPLv3'
__authors__ = [('Jan-Olaf Becker', '')]
def get_key(self):
return self.config.get('webhookurl')
def send(self, event, msg, key):
req = get_request()
self.log_info('Sending message to discord')
self.load(self.get_key(), post={'content': ((event + '\n') + msg)}, req=req) |
def put(id: int) -> dict:
if connexion.request.is_json:
data = connexion.request.get_json()
film = Film.query.filter_by(id=id).first()
film.name = data['name']
film.pub_date = datetime.strptime(data['pubDate'], '%Y-%m-%d').date()
FilmCast.query.filter_by(film=film).delete()
film_cast = []
for actor in data.get('cast', []):
if actor['id']:
film_cast.append(FilmCast(film_id=film.id, actor_id=actor['id']))
db.session.add(film)
db.session.add_all(film_cast)
db.session.commit()
return jsonify(FilmSchema().dump(film))
return jsonify({}) |
class TestReportsAPI(ReportsAPITestCase):
.parametrize('report_type', ['_GET_FLAT_FILE_OPEN_LISTINGS_DATA_', Reports.ReportType.INVENTORY.value, Reports.ReportType.INVENTORY])
.parametrize('marketplace_id', ['ATVPDKIKX0DER', Marketplaces.US.marketplace_id, Marketplaces.US.value, Marketplaces.US])
def test_request_report_enums_accepted(self, api_instance: Reports, report_type, marketplace_id):
params = api_instance.request_report(report_type=report_type, marketplace_ids=marketplace_id)
assert (params['ReportType'] == '_GET_FLAT_FILE_OPEN_LISTINGS_DATA_')
assert (params['MarketplaceIdList.Id.1'] == 'ATVPDKIKX0DER')
def test_request_report(self, api_instance: Reports):
report_type = '_GET_FLAT_FILE_OPEN_LISTINGS_DATA_'
start_date = datetime.datetime(2018, 4, 30, 22, 59, 59)
end_date = datetime.datetime(2018, 4, 30, 23, 59, 59)
marketplace_ids = ['iQzBCmf1y3', 'wH9q0CiEMp']
params = api_instance.request_report(report_type=report_type, start_date=start_date, end_date=end_date, marketplace_ids=marketplace_ids)
self.assert_common_params(params, action='RequestReport')
assert (params['ReportType'] == '_GET_FLAT_FILE_OPEN_LISTINGS_DATA_')
assert (params['StartDate'] == '2018-04-30T22:59:59')
assert (params['EndDate'] == '2018-04-30T23:59:59')
assert (params['MarketplaceIdList.Id.1'] == marketplace_ids[0])
assert (params['MarketplaceIdList.Id.2'] == marketplace_ids[1])
def test_report_options_dict(self, api_instance: Reports):
report_type = '_GET_MERCHANT_LISTINGS_ALL_DATA_'
report_options = {'custom': True, 'somethingelse': 'abc'}
params = api_instance.request_report(report_type=report_type, report_options=report_options)
self.assert_common_params(params, action='RequestReport')
assert (params['ReportType'] == '_GET_MERCHANT_LISTINGS_ALL_DATA_')
options_possible = ('custom=true;somethingelse=abc', 'somethingelse=abc;custom=true')
assert (params['ReportOptions'] in options_possible)
def test_request_report_error(self, api_instance: Reports):
report_type = ['_GET_FLAT_FILE_OPEN_LISTINGS_DATA_']
start_date = datetime.datetime(2018, 4, 30, 22, 59, 59)
end_date = datetime.datetime(2018, 4, 30, 23, 59, 59)
marketplace_ids = ['iQzBCmf1y3', 'wH9q0CiEMp']
with pytest.raises(ValueError):
api_instance.request_report(report_type=report_type, start_date=start_date, end_date=end_date, marketplace_ids=marketplace_ids)
def test_get_report_request_list(self, api_instance: Reports):
request_ids = ['rPlSxpfnR7', 'qRrkqv03qh']
report_types = ['_GET_MFN_PAN_EU_OFFER_STATUS_', '_GET_FLAT_FILE_ORDERS_DATA_']
processing_statuses = ['_SUBMITTED_', '_DONE_NO_DATA_']
max_count = 987
from_date = datetime.datetime(2021, 1, 26, 22, 59, 59)
to_date = datetime.datetime(2021, 1, 26, 23, 59, 59)
params = api_instance.get_report_request_list(request_ids=request_ids, report_types=report_types, processing_statuses=processing_statuses, max_count=max_count, from_date=from_date, to_date=to_date)
self.assert_common_params(params, action='GetReportRequestList')
assert (params['MaxCount'] == str(max_count))
assert (params['RequestedFromDate'] == '2021-01-26T22:59:59')
assert (params['RequestedToDate'] == '2021-01-26T23:59:59')
assert (params['ReportRequestIdList.Id.1'] == 'rPlSxpfnR7')
assert (params['ReportRequestIdList.Id.2'] == 'qRrkqv03qh')
assert (params['ReportTypeList.Type.1'] == '_GET_MFN_PAN_EU_OFFER_STATUS_')
assert (params['ReportTypeList.Type.2'] == '_GET_FLAT_FILE_ORDERS_DATA_')
assert (params['ReportProcessingStatusList.Status.1'] == '_SUBMITTED_')
assert (params['ReportProcessingStatusList.Status.2'] == '_DONE_NO_DATA_')
def test_get_report_request_list_by_next_token(self, api_instance: Reports):
params = api_instance.get_report_request_list(next_token='RXmLZ2bEgE')
self.assert_common_params(params, action='GetReportRequestListByNextToken')
assert (params['NextToken'] == 'RXmLZ2bEgE')
def test_get_report_request_list_by_next_token_alias(self, api_instance: Reports):
params = api_instance.get_report_request_list_by_next_token('0hytxbkaOb')
self.assert_common_params(params, action='GetReportRequestListByNextToken')
assert (params['NextToken'] == '0hytxbkaOb')
def test_get_report_request_count(self, api_instance: Reports):
report_types = ['_GET_XML_ALL_ORDERS_DATA_BY_LAST_UPDATE_', '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_']
processing_statuses = ['_CANCELLED_', '_IN_PROGRESS_']
from_date = datetime.datetime(2021, 1, 26, 22, 59, 59)
to_date = datetime.datetime(2021, 1, 26, 23, 59, 59)
params = api_instance.get_report_request_count(report_types=report_types, processing_statuses=processing_statuses, from_date=from_date, to_date=to_date)
self.assert_common_params(params, action='GetReportRequestCount')
assert (params['RequestedFromDate'] == '2021-01-26T22:59:59')
assert (params['RequestedToDate'] == '2021-01-26T23:59:59')
assert (params['ReportTypeList.Type.1'] == '_GET_XML_ALL_ORDERS_DATA_BY_LAST_UPDATE_')
assert (params['ReportTypeList.Type.2'] == '_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_')
assert (params['ReportProcessingStatusList.Status.1'] == '_CANCELLED_')
assert (params['ReportProcessingStatusList.Status.2'] == '_IN_PROGRESS_')
def test_get_report_list(self, api_instance: Reports):
request_ids = ['c4eik8sxXC', 'NIVgnbHXe0']
report_types = ['_GET_V1_SELLER_PERFORMANCE_REPORT_', '_GET_SELLER_FEEDBACK_DATA_']
max_count = 564
acknowledged = True
from_date = datetime.datetime(2021, 1, 26, 22, 59, 59)
to_date = datetime.datetime(2021, 1, 26, 23, 59, 59)
params = api_instance.get_report_list(request_ids=request_ids, max_count=max_count, report_types=report_types, acknowledged=acknowledged, from_date=from_date, to_date=to_date)
self.assert_common_params(params, action='GetReportList')
assert (params['Acknowledged'] == 'true')
assert (params['AvailableFromDate'] == '2021-01-26T22:59:59')
assert (params['AvailableToDate'] == '2021-01-26T23:59:59')
assert (params['MaxCount'] == '564')
assert (params['ReportRequestIdList.Id.1'] == 'c4eik8sxXC')
assert (params['ReportRequestIdList.Id.2'] == 'NIVgnbHXe0')
assert (params['ReportTypeList.Type.1'] == '_GET_V1_SELLER_PERFORMANCE_REPORT_')
assert (params['ReportTypeList.Type.2'] == '_GET_SELLER_FEEDBACK_DATA_')
def test_get_report_list_by_next_token(self, api_instance: Reports):
params = api_instance.get_report_list(next_token='5u6Of2fS8B')
self.assert_common_params(params, action='GetReportListByNextToken')
assert (params['NextToken'] == '5u6Of2fS8B')
def test_get_report_list_by_next_token_alias(self, api_instance: Reports):
params = api_instance.get_report_list_by_next_token('3TczcliCkb')
self.assert_common_params(params, action='GetReportListByNextToken')
assert (params['NextToken'] == '3TczcliCkb')
def test_get_report_count(self, api_instance: Reports):
report_types = ['_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_', '_GET_AFN_INVENTORY_DATA_BY_COUNTRY_']
acknowledged = True
from_date = datetime.datetime(2021, 1, 26, 22, 59, 59)
to_date = datetime.datetime(2021, 1, 26, 23, 59, 59)
params = api_instance.get_report_count(report_types=report_types, acknowledged=acknowledged, from_date=from_date, to_date=to_date)
self.assert_common_params(params, action='GetReportCount')
assert (params['Acknowledged'] == 'true')
assert (params['AvailableFromDate'] == '2021-01-26T22:59:59')
assert (params['AvailableToDate'] == '2021-01-26T23:59:59')
assert (params['ReportTypeList.Type.1'] == '_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_')
assert (params['ReportTypeList.Type.2'] == '_GET_AFN_INVENTORY_DATA_BY_COUNTRY_')
def test_get_report(self, api_instance: Reports):
params = api_instance.get_report(report_id='wwqrl4bHvD')
self.assert_common_params(params, action='GetReport')
assert (params['ReportId'] == 'wwqrl4bHvD')
def test_get_report_schedule_list(self, api_instance: Reports):
params = api_instance.get_report_schedule_list(report_types=['_GET_FBA_FULFILLMENT_INBOUND_NONCOMPLIANCE_DATA_', '_GET_RESTOCK_INVENTORY_RECOMMENDATIONS_REPORT_'])
self.assert_common_params(params, action='GetReportScheduleList')
assert (params['ReportTypeList.Type.1'] == '_GET_FBA_FULFILLMENT_INBOUND_NONCOMPLIANCE_DATA_')
assert (params['ReportTypeList.Type.2'] == '_GET_RESTOCK_INVENTORY_RECOMMENDATIONS_REPORT_')
def test_get_report_schedule_list_by_next_token(self, api_instance: Reports):
params = api_instance.get_report_schedule_list(next_token='Yj3hOfPcIE')
self.assert_common_params(params, action='GetReportScheduleListByNextToken')
assert (params['NextToken'] == 'Yj3hOfPcIE')
def test_get_report_schedule_list_by_next_token_alias(self, api_instance: Reports):
params = api_instance.get_report_schedule_list_by_next_token('SAlt4JwJGv')
self.assert_common_params(params, action='GetReportScheduleListByNextToken')
assert (params['NextToken'] == 'SAlt4JwJGv')
def test_get_report_schedule_count(self, api_instance: Reports):
params = api_instance.get_report_schedule_count(report_types=['_GET_STRANDED_INVENTORY_UI_DATA_', '_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_'])
self.assert_common_params(params, action='GetReportScheduleCount')
assert (params['ReportTypeList.Type.1'] == '_GET_STRANDED_INVENTORY_UI_DATA_')
assert (params['ReportTypeList.Type.2'] == '_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_')
def test_manage_report_schedule(self, api_instance: Reports):
schedule_date = datetime.datetime(2021, 1, 26)
params = api_instance.manage_report_schedule(report_type='_GET_AFN_INVENTORY_DATA_', schedule='_15_MINUTES_', schedule_date=schedule_date)
self.assert_common_params(params, action='ManageReportSchedule')
assert (params['ReportType'] == '_GET_AFN_INVENTORY_DATA_')
assert (params['Schedule'] == '_15_MINUTES_')
assert (params['ScheduleDate'] == '2021-01-26T00:00:00')
.parametrize('report_type', ['_GET_STRANDED_INVENTORY_UI_DATA_', Reports.ReportType.FBA_INVENTORY_STRANDED.value, Reports.ReportType.FBA_INVENTORY_STRANDED])
.parametrize('schedule', ['_30_MINUTES_', Reports.Schedule.EVERY_30_MIN.value, Reports.Schedule.EVERY_30_MINS.value, Reports.Schedule.EVERY_30_MINUTE.value, Reports.Schedule.EVERY_30_MINUTES.value, Reports.Schedule.EVERY_30_MIN, Reports.Schedule.EVERY_30_MINS, Reports.Schedule.EVERY_30_MINUTE, Reports.Schedule.EVERY_30_MINUTES])
def test_manage_report_schedule_enums(self, api_instance: Reports, report_type, schedule):
params = api_instance.manage_report_schedule(report_type=report_type, schedule=schedule)
self.assert_common_params(params, action='ManageReportSchedule')
assert (params['ReportType'] == '_GET_STRANDED_INVENTORY_UI_DATA_')
assert (params['Schedule'] == '_30_MINUTES_')
def test_cancel_report_requests(self, api_instance: Reports):
request_ids = ['bV414Uy8Ab', 'wCP115hLSn']
report_types = ['_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_', '_GET_FBA_REIMBURSEMENTS_DATA_']
processing_statuses = ['_SUBMITTED_', '_DONE_NO_DATA_']
from_date = datetime.datetime(2021, 1, 27, 22, 59, 59)
to_date = datetime.datetime(2021, 1, 27, 23, 59, 59)
params = api_instance.cancel_report_requests(request_ids=request_ids, report_types=report_types, processing_statuses=processing_statuses, from_date=from_date, to_date=to_date)
self.assert_common_params(params, action='CancelReportRequests')
assert (params['RequestedFromDate'] == '2021-01-27T22:59:59')
assert (params['RequestedToDate'] == '2021-01-27T23:59:59')
assert (params['ReportRequestIdList.Id.1'] == 'bV414Uy8Ab')
assert (params['ReportRequestIdList.Id.2'] == 'wCP115hLSn')
assert (params['ReportTypeList.Type.1'] == '_GET_FBA_ESTIMATED_FBA_FEES_TXT_DATA_')
assert (params['ReportTypeList.Type.2'] == '_GET_FBA_REIMBURSEMENTS_DATA_')
assert (params['ReportProcessingStatusList.Status.1'] == '_SUBMITTED_')
assert (params['ReportProcessingStatusList.Status.2'] == '_DONE_NO_DATA_')
.parametrize('processing_status', ['_DONE_NO_DATA_', Reports.ProcessingStatus.DONE_NO_DATA.value, Reports.ProcessingStatus.DONE_NO_DATA])
def test_cancel_report_requests_processing_enums(self, api_instance: Reports, processing_status):
params = api_instance.cancel_report_requests(processing_statuses=processing_status)
assert (params['ReportProcessingStatusList.Status.1'] == '_DONE_NO_DATA_') |
def test_disconnect_one_invalid(timer):
func1 = mock.Mock()
func2 = mock.Mock()
timer.timeout.connect(func1)
with pytest.raises(TypeError):
timer.timeout.disconnect(func2)
func1.assert_not_called()
func2.assert_not_called()
timer.timeout.emit()
func1.assert_called_once_with() |
class CT_Override(BaseOxmlElement):
def content_type(self):
return self.get('ContentType')
def new(partname, content_type):
xml = ('<Override xmlns="%s"/>' % nsmap['ct'])
override = parse_xml(xml)
override.set('PartName', partname)
override.set('ContentType', content_type)
return override
def partname(self):
return self.get('PartName') |
class Batch(Pipelineable):
dense_features: torch.Tensor
sparse_features: KeyedJaggedTensor
labels: torch.Tensor
def to(self, device: torch.device, non_blocking: bool=False) -> 'Batch':
return Batch(dense_features=self.dense_features.to(device=device, non_blocking=non_blocking), sparse_features=self.sparse_features.to(device=device, non_blocking=non_blocking), labels=self.labels.to(device=device, non_blocking=non_blocking))
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self.dense_features.record_stream(stream)
self.sparse_features.record_stream(stream)
self.labels.record_stream(stream)
def pin_memory(self) -> 'Batch':
return Batch(dense_features=self.dense_features.pin_memory(), sparse_features=self.sparse_features.pin_memory(), labels=self.labels.pin_memory()) |
def make_soft_link():
destination = '/home/xiaoxiao/disk2/ScanNet/rawData/scans/'
source = '/home/xiaoxiao/disk6/ScanNet/'
i = 0
for dir in os.listdir(destination):
if (i == 0):
i += 1
continue
else:
i += 1
print(((((destination + dir) + '/') + dir) + '.txt'))
os.rename(((source + dir) + '/plane_errors_003'), ((source + dir) + '/planercnn_error_003'))
command1 = ((((((('ln -s ' + source) + dir) + '/planercnn_error_003') + ' ') + destination) + dir) + '/')
os.system(command1) |
def test_two_child_crashes() -> None:
async def crasher(etype: type[Exception]) -> NoReturn:
raise etype
async def main() -> None:
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher, KeyError)
nursery.start_soon(crasher, ValueError)
with pytest.raises(ExceptionGroup) as excinfo:
_core.run(main)
assert ({type(exc) for exc in excinfo.value.exceptions} == {ValueError, KeyError}) |
class ResNet(MetaModule):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(3, 64)
self.bn1 = MetaBatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = MetaLinear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x, lin=0, lout=5):
out = x
if ((lin < 1) and (lout > (- 1))):
out = self.conv1(out)
out = self.bn1(out)
out = F.relu(out)
if ((lin < 2) and (lout > 0)):
out = self.layer1(out)
if ((lin < 3) and (lout > 1)):
out = self.layer2(out)
if ((lin < 4) and (lout > 2)):
out = self.layer3(out)
if ((lin < 5) and (lout > 3)):
out = self.layer4(out)
if (lout > 4):
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def generate_area_def_rst_list(area_file: str) -> str:
area_list: List[str] = []
template = '{area_name}\n{n:^>{header_title_length}}\n\n.. raw:: html\n\n{content}\n\n <hr>\n\n'
for (aname, params) in _read_yaml_area_file_content(area_file).items():
area = _create_area_def_from_dict(aname, params)
if (not hasattr(area, '_repr_html_')):
continue
area_rep = area_repr(area, include_header=False, include_static_files=(not bool(area_list)))
content = '\n'.join([x.rjust((len(x) + 5)) for x in area_rep.split('\n')])
area_list.append(template.format(area_name=aname, n='', header_title_length=len(aname), content=content))
return ''.join(area_list) |
def sample_minicorpus(name, factor, topk=30, maxdev=3000):
random.seed(12345)
collection = Collection(path='/dfs/scratch0/okhattab/OpenQA/collection.tsv')
qas_train = Queries(path='/dfs/scratch0/okhattab/OpenQA/NQ/train/qas.json').qas()
qas_dev = Queries(path='/dfs/scratch0/okhattab/OpenQA/NQ/dev/qas.json').qas()
ranking_train = Ranking(path='/dfs/scratch0/okhattab/OpenQA/NQ/train/rankings/C3.tsv.annotated').todict()
ranking_dev = Ranking(path='/dfs/scratch0/okhattab/OpenQA/NQ/dev/rankings/C3.tsv.annotated').todict()
sample_train = random.sample(list(qas_train.keys()), min(len(qas_train.keys()), (300 * factor)))
sample_dev = random.sample(list(qas_dev.keys()), min(len(qas_dev.keys()), maxdev, (30 * factor)))
train_pids = [pid for qid in sample_train for qpids in ranking_train[qid][:topk] for pid in qpids]
dev_pids = [pid for qid in sample_dev for qpids in ranking_dev[qid][:topk] for pid in qpids]
sample_pids = sorted(list(set((train_pids + dev_pids))))
print(f'len(sample_pids) = {len(sample_pids)}')
ROOT = f'/future/u/okhattab/root/unit/data/NQ-{name}'
create_directory(os.path.join(ROOT, 'train'))
create_directory(os.path.join(ROOT, 'dev'))
new_train = Queries(data={qid: qas_train[qid] for qid in sample_train})
new_train.save(os.path.join(ROOT, 'train/questions.tsv'))
new_train.save_qas(os.path.join(ROOT, 'train/qas.json'))
new_dev = Queries(data={qid: qas_dev[qid] for qid in sample_dev})
new_dev.save(os.path.join(ROOT, 'dev/questions.tsv'))
new_dev.save_qas(os.path.join(ROOT, 'dev/qas.json'))
print(f"Saving to {os.path.join(ROOT, 'collection.tsv')}")
Collection(data=[collection[pid] for pid in sample_pids]).save(os.path.join(ROOT, 'collection.tsv'))
print('#> Done!') |
class Old_Packages_TestCase(ParserTest):
def __init__(self, *args, **kwargs):
ParserTest.__init__(self, *args, **kwargs)
self.version = F7
self.ks = '\n%packages\nbash\n'
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
self.parser.readKickstartFromString(self.ks)
self.assertEqual(str(self.handler.packages), '\n%packages\nbash\n\n') |
class Network(Escpos):
def is_usable() -> bool:
return is_usable()
def __init__(self, host: str='', port: int=9100, timeout: Union[(int, float)]=60, *args, **kwargs):
Escpos.__init__(self, *args, **kwargs)
self.host = host
self.port = port
self.timeout = timeout
self._device: Union[(Literal[False], Literal[None], socket.socket)] = False
def open(self, raise_not_found: bool=True) -> None:
if self._device:
self.close()
try:
self.device: Optional[socket.socket] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.settimeout(self.timeout)
self.device.connect((self.host, self.port))
except OSError as e:
self.device = None
if raise_not_found:
raise DeviceNotFoundError(f'''Could not open socket for {self.host}:
{e}''')
else:
logging.error('Network device %s not found', self.host)
return
logging.info('Network printer enabled')
def _raw(self, msg: bytes) -> None:
assert self.device
self.device.sendall(msg)
def _read(self) -> bytes:
assert self.device
return self.device.recv(16)
def close(self) -> None:
if (not self._device):
return
logging.info('Closing Network connection to printer %s', self.host)
try:
self._device.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._device.close()
self._device = False |
def test_ec_private_numbers_hash():
numbers1 = ec.EllipticCurvePrivateNumbers(1, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()))
numbers2 = ec.EllipticCurvePrivateNumbers(1, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()))
numbers3 = ec.EllipticCurvePrivateNumbers(2, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()))
assert (hash(numbers1) == hash(numbers2))
assert (hash(numbers1) != hash(numbers3)) |
def test_each_combination_works():
nrep = 10
nproc = 1
gsl = 'none'
failed_combos = list()
for clf_name in cfg.regressor_choices:
for fs_name in cfg.all_dim_red_methods:
if fs_name.startswith('lle'):
continue
remove_neuropredict_results(out_dir)
try:
cli_str = 'np_regress -y {} -t {} -n {} -o {} -e {} -dr {} -g {} -c {}'.format(out_path1, train_perc, nrep, out_dir, estimator, dr_method, gsl, nproc)
sys.argv = shlex.split(cli_str)
cli()
except:
failed_combos.append('{:35} {:35}'.format(clf_name, fs_name))
traceback.print_exc()
print('\nCombinations failed:\n{}'.format('\n'.join(failed_combos)))
if (len(failed_combos) > 4):
print('5 or more combinations of DR and REGR failed! Fix them') |
class Drinkable(BaseConsumable):
consume_flag = 'drink'
def at_focus_drink(self, caller, **kwargs):
super().handle_consume(caller, 'drink', **kwargs)
def at_focus_sip(self, caller, **kwargs):
super().handle_consume(caller, 'sip', **kwargs)
def at_consume(self, caller, action):
self.msg_char(caller, f'You {action} from *{self.key}.')
def at_already_consumed(self, caller, action):
self.msg_char(caller, f"You can't drink any more.") |
class NamedTupleAnalyzer():
def __init__(self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_namedtuple_classdef(self, defn: ClassDef, is_stub_file: bool, is_func_scope: bool) -> tuple[(bool, (TypeInfo | None))]:
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if (base_expr.fullname in TYPED_NAMEDTUPLE_NAMES):
result = self.check_namedtuple_classdef(defn, is_stub_file)
if (result is None):
return (True, None)
(items, types, default_items, statements) = result
if (is_func_scope and ('' not in defn.name)):
defn.name += ('' + str(defn.line))
existing_info = None
if isinstance(defn.analyzed, NamedTupleExpr):
existing_info = defn.analyzed.info
info = self.build_namedtuple_typeinfo(defn.name, items, types, default_items, defn.line, existing_info)
defn.analyzed = NamedTupleExpr(info, is_typed=True)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
defn.defs.body = statements
return (True, info)
return (False, None)
def check_namedtuple_classdef(self, defn: ClassDef, is_stub_file: bool) -> (tuple[(list[str], list[Type], dict[(str, Expression)], list[Statement])] | None):
if (len(defn.base_type_exprs) > 1):
self.fail('NamedTuple should be a single base', defn)
items: list[str] = []
types: list[Type] = []
default_items: dict[(str, Expression)] = {}
statements: list[Statement] = []
for stmt in defn.defs.body:
statements.append(stmt)
if (not isinstance(stmt, AssignmentStmt)):
if (isinstance(stmt, PassStmt) or (isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, EllipsisExpr))):
continue
if isinstance(stmt, (Decorator, FuncBase)):
continue
if (isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr)):
continue
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif ((len(stmt.lvalues) > 1) or (not isinstance(stmt.lvalues[0], NameExpr))):
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
else:
name = stmt.lvalues[0].name
items.append(name)
if (stmt.type is None):
types.append(AnyType(TypeOfAny.unannotated))
else:
analyzed = self.api.anal_type(stmt.type, allow_placeholder=(not self.api.is_func_scope()), prohibit_self_type='NamedTuple item type')
if (analyzed is None):
return None
types.append(analyzed)
if name.startswith('_'):
self.fail(f'NamedTuple field name cannot start with an underscore: {name}', stmt)
if ((stmt.type is None) or (hasattr(stmt, 'new_syntax') and (not stmt.new_syntax))):
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif isinstance(stmt.rvalue, TempNode):
if default_items:
self.fail('Non-default NamedTuple fields cannot follow default fields', stmt)
else:
default_items[name] = stmt.rvalue
if defn.keywords:
for_function = ' for "__init_subclass__" of "NamedTuple"'
for key in defn.keywords:
self.msg.unexpected_keyword_argument_for_function(for_function, key, defn)
return (items, types, default_items, statements)
def check_namedtuple(self, node: Expression, var_name: (str | None), is_func_scope: bool) -> tuple[((str | None), (TypeInfo | None), list[TypeVarLikeType])]:
if (not isinstance(node, CallExpr)):
return (None, None, [])
call = node
callee = call.callee
if (not isinstance(callee, RefExpr)):
return (None, None, [])
fullname = callee.fullname
if (fullname == 'collections.namedtuple'):
is_typed = False
elif (fullname in TYPED_NAMEDTUPLE_NAMES):
is_typed = True
else:
return (None, None, [])
result = self.parse_namedtuple_args(call, fullname)
if result:
(items, types, defaults, typename, tvar_defs, ok) = result
else:
if var_name:
name = var_name
if is_func_scope:
name += ('' + str(call.line))
else:
name = var_name = ('' + str(call.line))
info = self.build_namedtuple_typeinfo(name, [], [], {}, node.line, None)
self.store_namedtuple_info(info, var_name, call, is_typed)
if ((name != var_name) or is_func_scope):
self.api.add_symbol_skip_local(name, info)
return (var_name, info, [])
if (not ok):
return (typename, None, [])
if var_name:
name = var_name
else:
name = typename
if ((var_name is None) or is_func_scope):
name += ('' + str(call.line))
if defaults:
default_items = {arg_name: default for (arg_name, default) in zip(items[(- len(defaults)):], defaults)}
else:
default_items = {}
existing_info = None
if isinstance(node.analyzed, NamedTupleExpr):
existing_info = node.analyzed.info
info = self.build_namedtuple_typeinfo(name, items, types, default_items, node.line, existing_info)
if var_name:
self.store_namedtuple_info(info, var_name, call, is_typed)
else:
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
if ((name != var_name) or is_func_scope):
self.api.add_symbol_skip_local(name, info)
return (typename, info, tvar_defs)
def store_namedtuple_info(self, info: TypeInfo, name: str, call: CallExpr, is_typed: bool) -> None:
self.api.add_symbol(name, info, call)
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
def parse_namedtuple_args(self, call: CallExpr, fullname: str) -> (None | tuple[(list[str], list[Type], list[Expression], str, list[TypeVarLikeType], bool)]):
type_name = ('NamedTuple' if (fullname in TYPED_NAMEDTUPLE_NAMES) else 'namedtuple')
args = call.args
if (len(args) < 2):
self.fail(f'Too few arguments for "{type_name}()"', call)
return None
defaults: list[Expression] = []
if (len(args) > 2):
if (fullname in TYPED_NAMEDTUPLE_NAMES):
self.fail('Too many arguments for "NamedTuple()"', call)
return None
for (i, arg_name) in enumerate(call.arg_names[2:], 2):
if (arg_name == 'defaults'):
arg = args[i]
if isinstance(arg, (ListExpr, TupleExpr)):
defaults = list(arg.items)
else:
self.fail('List or tuple literal expected as the defaults argument to {}()'.format(type_name), arg)
break
if (call.arg_kinds[:2] != [ARG_POS, ARG_POS]):
self.fail(f'Unexpected arguments to "{type_name}()"', call)
return None
if (not isinstance(args[0], StrExpr)):
self.fail(f'"{type_name}()" expects a string literal as the first argument', call)
return None
typename = args[0].value
types: list[Type] = []
tvar_defs = []
if (not isinstance(args[1], (ListExpr, TupleExpr))):
if ((fullname == 'collections.namedtuple') and isinstance(args[1], StrExpr)):
str_expr = args[1]
items = str_expr.value.replace(',', ' ').split()
else:
self.fail('List or tuple literal expected as the second argument to "{}()"'.format(type_name), call)
return None
else:
listexpr = args[1]
if (fullname == 'collections.namedtuple'):
if (not is_StrExpr_list(listexpr.items)):
self.fail('String literal expected as "namedtuple()" item', call)
return None
items = [item.value for item in listexpr.items]
else:
type_exprs = [t.items[1] for t in listexpr.items if (isinstance(t, TupleExpr) and (len(t.items) == 2))]
tvar_defs = self.api.get_and_bind_all_tvars(type_exprs)
result = self.parse_namedtuple_fields_with_types(listexpr.items, call)
if (result is None):
return None
(items, types, _, ok) = result
if (not ok):
return ([], [], [], typename, [], False)
if (not types):
types = [AnyType(TypeOfAny.unannotated) for _ in items]
underscore = [item for item in items if item.startswith('_')]
if underscore:
self.fail((f'"{type_name}()" field names cannot start with an underscore: ' + ', '.join(underscore)), call)
if (len(defaults) > len(items)):
self.fail(f'Too many defaults given in call to "{type_name}()"', call)
defaults = defaults[:len(items)]
return (items, types, defaults, typename, tvar_defs, True)
def parse_namedtuple_fields_with_types(self, nodes: list[Expression], context: Context) -> (tuple[(list[str], list[Type], list[Expression], bool)] | None):
items: list[str] = []
types: list[Type] = []
for item in nodes:
if isinstance(item, TupleExpr):
if (len(item.items) != 2):
self.fail('Invalid "NamedTuple()" field definition', item)
return None
(name, type_node) = item.items
if isinstance(name, StrExpr):
items.append(name.value)
else:
self.fail('Invalid "NamedTuple()" field name', item)
return None
try:
type = expr_to_unanalyzed_type(type_node, self.options, self.api.is_stub_file)
except TypeTranslationError:
self.fail('Invalid field type', type_node)
return None
analyzed = self.api.anal_type(type, allow_placeholder=(not self.api.is_func_scope()), prohibit_self_type='NamedTuple item type')
if isinstance(analyzed, UnboundType):
analyzed = AnyType(TypeOfAny.from_error)
if (analyzed is None):
return ([], [], [], False)
types.append(analyzed)
else:
self.fail('Tuple expected as "NamedTuple()" field', item)
return None
return (items, types, [], True)
def build_namedtuple_typeinfo(self, name: str, items: list[str], types: list[Type], default_items: Mapping[(str, Expression)], line: int, existing_info: (TypeInfo | None)) -> TypeInfo:
strtype = self.api.named_type('builtins.str')
implicit_any = AnyType(TypeOfAny.special_form)
basetuple_type = self.api.named_type('builtins.tuple', [implicit_any])
dictype = self.api.named_type('builtins.dict', [strtype, implicit_any])
ordereddictype = self.api.named_type('builtins.dict', [strtype, implicit_any])
fallback = self.api.named_type('builtins.tuple', [implicit_any])
iterable_type = self.api.named_type_or_none('typing.Iterable', [implicit_any])
function_type = self.api.named_type('builtins.function')
literals: list[Type] = [LiteralType(item, strtype) for item in items]
match_args_type = TupleType(literals, basetuple_type)
info = (existing_info or self.api.basic_new_typeinfo(name, fallback, line))
info.is_named_tuple = True
tuple_base = TupleType(types, fallback)
if (info.special_alias and has_placeholder(info.special_alias.target)):
self.api.process_placeholder(None, 'NamedTuple item', info, force_progress=(tuple_base != info.tuple_type))
info.update_tuple_type(tuple_base)
info.line = line
info.metadata['namedtuple'] = {'fields': items.copy()}
if ((not has_placeholder(tuple_base)) and (not has_type_vars(tuple_base))):
self.api.schedule_patch(PRIORITY_FALLBACKS, (lambda : calculate_tuple_fallback(tuple_base)))
def add_field(var: Var, is_initialized_in_class: bool=False, is_property: bool=False) -> None:
var.info = info
var.is_initialized_in_class = is_initialized_in_class
var.is_property = is_property
var._fullname = f'{info.fullname}.{var.name}'
info.names[var.name] = SymbolTableNode(MDEF, var)
fields = [Var(item, typ) for (item, typ) in zip(items, types)]
for var in fields:
add_field(var, is_property=True)
vars = [Var(item, typ) for (item, typ) in zip(items, types)]
tuple_of_strings = TupleType([strtype for _ in items], basetuple_type)
add_field(Var('_fields', tuple_of_strings), is_initialized_in_class=True)
add_field(Var('_field_types', dictype), is_initialized_in_class=True)
add_field(Var('_field_defaults', dictype), is_initialized_in_class=True)
add_field(Var('_source', strtype), is_initialized_in_class=True)
add_field(Var('__annotations__', ordereddictype), is_initialized_in_class=True)
add_field(Var('__doc__', strtype), is_initialized_in_class=True)
if (self.options.python_version >= (3, 10)):
add_field(Var('__match_args__', match_args_type), is_initialized_in_class=True)
assert (info.tuple_type is not None)
tvd = TypeVarType(name=SELF_TVAR_NAME, fullname=((info.fullname + '.') + SELF_TVAR_NAME), id=self.api.tvar_scope.new_unique_func_id(), values=[], upper_bound=info.tuple_type, default=AnyType(TypeOfAny.from_omitted_generics))
selftype = tvd
def add_method(funcname: str, ret: Type, args: list[Argument], is_classmethod: bool=False, is_new: bool=False) -> None:
if (is_classmethod or is_new):
first = [Argument(Var('_cls'), TypeType.make_normalized(selftype), None, ARG_POS)]
else:
first = [Argument(Var('_self'), selftype, None, ARG_POS)]
args = (first + args)
types = [arg.type_annotation for arg in args]
items = [arg.variable.name for arg in args]
arg_kinds = [arg.kind for arg in args]
assert (None not in types)
signature = CallableType(cast(List[Type], types), arg_kinds, items, ret, function_type)
signature.variables = [tvd]
func = FuncDef(funcname, args, Block([]))
func.info = info
func.is_class = is_classmethod
func.type = set_callable_name(signature, func)
func._fullname = ((info.fullname + '.') + funcname)
func.line = line
if is_classmethod:
v = Var(funcname, func.type)
v.is_classmethod = True
v.info = info
v._fullname = func._fullname
func.is_decorated = True
dec = Decorator(func, [NameExpr('classmethod')], v)
dec.line = line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[funcname] = sym
add_method('_replace', ret=selftype, args=[Argument(var, var.type, EllipsisExpr(), ARG_NAMED_OPT) for var in vars])
def make_init_arg(var: Var) -> Argument:
default = default_items.get(var.name, None)
kind = (ARG_POS if (default is None) else ARG_OPT)
return Argument(var, var.type, default, kind)
add_method('__new__', ret=selftype, args=[make_init_arg(var) for var in vars], is_new=True)
add_method('_asdict', args=[], ret=ordereddictype)
add_method('_make', ret=selftype, is_classmethod=True, args=[Argument(Var('iterable', iterable_type), iterable_type, None, ARG_POS)])
self_tvar_expr = TypeVarExpr(SELF_TVAR_NAME, ((info.fullname + '.') + SELF_TVAR_NAME), [], info.tuple_type, AnyType(TypeOfAny.from_omitted_generics))
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
return info
def save_namedtuple_body(self, named_tuple_info: TypeInfo) -> Iterator[None]:
nt_names = named_tuple_info.names
named_tuple_info.names = SymbolTable()
(yield)
for prohibited in NAMEDTUPLE_PROHIBITED_NAMES:
if (prohibited in named_tuple_info.names):
if (nt_names.get(prohibited) is named_tuple_info.names[prohibited]):
continue
ctx = named_tuple_info.names[prohibited].node
assert (ctx is not None)
self.fail(f'Cannot overwrite NamedTuple attribute "{prohibited}"', ctx)
for (key, value) in nt_names.items():
if (key in named_tuple_info.names):
if (key == '__doc__'):
continue
sym = named_tuple_info.names[key]
if (isinstance(sym.node, (FuncBase, Decorator)) and (not sym.plugin_generated)):
continue
r_key = get_unique_redefinition_name(key, named_tuple_info.names)
named_tuple_info.names[r_key] = sym
named_tuple_info.names[key] = value
def fail(self, msg: str, ctx: Context) -> None:
self.api.fail(msg, ctx) |
class BoundFileCollection(BoundFile):
def __init__(self, unbound_file_collection, directory_format, path_maker):
super().__init__(unbound_file_collection, directory_format)
self._path_maker = path_maker
def view(self, view_type):
raise NotImplementedError('Use `iter_views` instead.')
def iter_views(self, view_type):
root = pathlib.Path(self._directory_format.path)
paths = [fp for fp in sorted(root.glob('**/*')) if re.match(self.pathspec, str(fp.relative_to(root)))]
from_type = transform.ModelType.from_view_type(self.format)
to_type = transform.ModelType.from_view_type(view_type)
transformation = from_type.make_transformation(to_type)
for fp in paths:
(yield (fp.relative_to(root), transformation(fp))) |
class DummySumMetric(Metric[torch.Tensor]):
def __init__(self: TDummySumMetric, *, device: Optional[torch.device]=None) -> None:
super().__init__(device=device)
self._add_state('sum', torch.tensor(0.0, device=self.device))
_mode()
def update(self: TDummySumMetric, x: torch.Tensor) -> TDummySumMetric:
self.sum += x
return self
_mode()
def compute(self: TDummySumMetric) -> torch.Tensor:
return self.sum
_mode()
def merge_state(self: TDummySumMetric, metrics: Iterable[TDummySumMetric]) -> TDummySumMetric:
for metric in metrics:
self.sum += metric.sum.to(self.device)
return self |
(frozen=True, slots=True)
class TeleporterNetworkNode(ResourceNode):
is_unlocked: Requirement
network: str
requirement_to_activate: Requirement
def requirement_to_leave(self, context: NodeContext) -> Requirement:
return RequirementAnd([self.is_unlocked, ResourceRequirement.simple(self.resource(context))])
def resource(self, context: NodeContext) -> NodeResourceInfo:
return NodeResourceInfo.from_node(self, context)
def can_collect(self, context: NodeContext) -> bool:
resources = context.current_resources
req = self.requirement_to_activate
if (resources.has_resource(self.resource(context)) or req.satisfied(resources, 0, context.database)):
return (not self.is_collected(context))
else:
return False
def is_collected(self, context: NodeContext) -> bool:
current_resources = context.current_resources
return all((context.has_resource(node.resource(context)) for node in _all_nodes_in_network(context, self.network) if node.is_unlocked.satisfied(current_resources, 0, context.database)))
def resource_gain_on_collect(self, context: NodeContext) -> ResourceGain:
for node in _all_nodes_in_network(context, self.network):
if node.is_unlocked.satisfied(context.current_resources, 0, context.database):
(yield (node.resource(context), 1))
def connections_from(self, context: NodeContext) -> typing.Iterator[tuple[(Node, Requirement)]]:
for node in _all_nodes_in_network(context, self.network):
if (node != self):
(yield (node, node.is_unlocked)) |
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if (config.embedding_pretrained is not None):
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=(config.n_vocab - 1))
self.embedding_ngram2 = nn.Embedding(config.n_gram_vocab, config.embed)
self.embedding_ngram3 = nn.Embedding(config.n_gram_vocab, config.embed)
self.dropout = nn.Dropout(config.dropout)
self.fc1 = nn.Linear((config.embed * 3), config.hidden_size)
self.fc2 = nn.Linear(config.hidden_size, config.num_classes)
def forward(self, x):
out_word = self.embedding(x[0])
out_bigram = self.embedding_ngram2(x[2])
out_trigram = self.embedding_ngram3(x[3])
out = torch.cat((out_word, out_bigram, out_trigram), (- 1))
out = out.mean(dim=1)
out = self.dropout(out)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out |
class InvoiceDialog(Factory.Popup):
def __init__(self, title, data, key):
self.status = PR_UNKNOWN
Factory.Popup.__init__(self)
self.app = App.get_running_app()
self.title = title
self.data = data
self.key = key
invoice = self.app.wallet.get_invoice(key)
self.amount_sat = invoice.get_amount_sat()
self.amount_str = self.app.format_amount_and_units(self.amount_sat)
self.description = invoice.message
self.is_lightning = invoice.is_lightning()
self.update_status()
self.log = (self.app.wallet.lnworker.logs[self.key] if self.is_lightning else [])
def update_status(self):
invoice = self.app.wallet.get_invoice(self.key)
self.status = self.app.wallet.get_invoice_status(invoice)
self.status_str = invoice.get_status_str(self.status)
self.status_color = pr_color[self.status]
self.can_pay = (self.status in [PR_UNPAID, PR_FAILED])
if (self.can_pay and self.is_lightning and self.app.wallet.lnworker):
if (self.amount_sat and (self.amount_sat > self.app.wallet.lnworker.num_sats_can_send())):
self.warning = ((_('Warning') + ': ') + _('This amount exceeds the maximum you can currently send with your channels'))
def on_dismiss(self):
self.app.request_popup = None
def copy_to_clipboard(self):
Clipboard.copy(self.data)
msg = _('Text copied to clipboard.')
Clock.schedule_once((lambda dt: self.app.show_info(msg)))
def do_share(self):
self.app.do_share(self.data, _('Share Invoice'))
self.dismiss()
def do_pay(self):
invoice = self.app.wallet.get_invoice(self.key)
self.app.send_screen.do_pay_invoice(invoice)
self.dismiss()
def delete_dialog(self):
from .question import Question
def cb(result):
if result:
self.app.wallet.delete_invoice(self.key)
self.dismiss()
self.app.send_screen.update()
d = Question(_('Delete invoice?'), cb)
d.open()
def show_log(self):
if self.log:
log_str = (_('Payment log:') + '\n\n')
for payment_attempt_log in self.log:
(route_str, chan_str, message) = payment_attempt_log.formatted_tuple()
log_str += (((chan_str + ' --- ') + message) + '\n')
self.app.show_info(log_str) |
class ProxyCompletionHeadSparse(nn.Module):
def __init__(self, channel_in: int, channel_out: int, truncation: int) -> None:
super().__init__()
self.truncation = truncation
self.network = nn.Sequential(Me.MinkowskiInstanceNorm(channel_in), Me.MinkowskiReLU(), Me.MinkowskiLinear(channel_in, channel_out))
def forward(self, x: torch.Tensor) -> torch.Tensor:
output = self.network(x)
return output |
class CoreProperties():
def __init__(self, element):
self._element = element
def author(self):
return self._element.author_text
def author(self, value):
self._element.author_text = value
def category(self):
return self._element.category_text
def category(self, value):
self._element.category_text = value
def comments(self):
return self._element.comments_text
def comments(self, value):
self._element.comments_text = value
def content_status(self):
return self._element.contentStatus_text
_status.setter
def content_status(self, value):
self._element.contentStatus_text = value
def created(self):
return self._element.created_datetime
def created(self, value):
self._element.created_datetime = value
def identifier(self):
return self._element.identifier_text
def identifier(self, value):
self._element.identifier_text = value
def keywords(self):
return self._element.keywords_text
def keywords(self, value):
self._element.keywords_text = value
def language(self):
return self._element.language_text
def language(self, value):
self._element.language_text = value
def last_modified_by(self):
return self._element.lastModifiedBy_text
_modified_by.setter
def last_modified_by(self, value):
self._element.lastModifiedBy_text = value
def last_printed(self):
return self._element.lastPrinted_datetime
_printed.setter
def last_printed(self, value):
self._element.lastPrinted_datetime = value
def modified(self):
return self._element.modified_datetime
def modified(self, value):
self._element.modified_datetime = value
def revision(self):
return self._element.revision_number
def revision(self, value):
self._element.revision_number = value
def subject(self):
return self._element.subject_text
def subject(self, value):
self._element.subject_text = value
def title(self):
return self._element.title_text
def title(self, value):
self._element.title_text = value
def version(self):
return self._element.version_text
def version(self, value):
self._element.version_text = value |
def get_ghz_po_para(n: int) -> Tuple[(QuantumCircuit, List[Parameter])]:
q = QuantumRegister(n, 'q')
delta = Parameter('t')
deltaneg = Parameter('-t')
circ = get_ghz_simple(n, measure=False)
circ.barrier()
circ.append(U2Gate(delta, deltaneg), [q])
meas = get_measurement_circ(n, 'q', 'c', True)
circ = (circ + meas)
return (circ, [delta, deltaneg]) |
def test_initialize_auth(hatch, devpi, temp_dir_cache, helpers, published_project_name, config_file):
config_file.model.publish['index']['ca-cert'] = devpi.ca_cert
config_file.model.publish['index']['repo'] = 'dev'
config_file.model.publish['index']['repos'] = {'dev': devpi.repo}
config_file.save()
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir_cache / published_project_name)
with path.as_cwd():
result = hatch('publish', '--initialize-auth', input=f'''{devpi.user}
{devpi.auth}''')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
Enter your username [__TOKEN__]: {devpi.user}
Enter your credentials:{' '}
'''))
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert (result.exit_code == 0), result.output
result = hatch('build', '-t', 'wheel')
assert (result.exit_code == 0), result.output
build_directory = (path / 'dist')
artifacts = list(build_directory.iterdir())
with path.as_cwd():
result = hatch('publish', str(artifacts[0]))
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
{artifacts[0].relative_to(path)} ... success
[{published_project_name}]
{devpi.repo}{published_project_name}/{current_version}/
''')) |
(max_runs=3)
def test_tell_many():
def f(x, offset=0.123214):
a = 0.01
return ((((np.sin((x ** 2)) + np.sin((x ** 5))) + ((a ** 2) / ((a ** 2) + ((x - offset) ** 2)))) + (x ** 2)) + (1e-05 * (x ** 3)))
def f_vec(x, offset=0.123214):
a = 0.01
y = (x + ((a ** 2) / ((a ** 2) + ((x - offset) ** 2))))
return [y, (0.5 * y), (y ** 2)]
def assert_equal_dicts(d1, d2):
(xs1, ys1) = zip(*sorted(d1.items()))
(xs2, ys2) = zip(*sorted(d2.items()))
ys1 = np.array(ys1, dtype=np.float64)
ys2 = np.array(ys2, dtype=np.float64)
np.testing.assert_almost_equal(xs1, xs2)
np.testing.assert_almost_equal(ys1, ys2)
def test_equal(l1, l2):
assert_equal_dicts(l1.neighbors, l2.neighbors)
assert_equal_dicts(l1.neighbors_combined, l2.neighbors_combined)
assert_equal_dicts(l1.data, l2.data)
assert_equal_dicts(l2.losses, l1.losses)
assert_equal_dicts(l2.losses_combined, l1.losses_combined)
np.testing.assert_almost_equal(sorted(l1.pending_points), sorted(l2.pending_points))
np.testing.assert_almost_equal(l1._bbox[1], l1._bbox[1])
assert (l1._scale == l2._scale)
assert (l1._bbox[0] == l2._bbox[0])
for function in [f, f_vec]:
learner = Learner1D(function, bounds=((- 1), 1))
learner2 = Learner1D(function, bounds=((- 1), 1))
simple(learner, npoints_goal=200)
(xs, ys) = zip(*learner.data.items())
x = 1e-06
max_value = (1000000.0 if (learner.vdim == 1) else np.array((learner.vdim * [1000000.0])))
learner.tell(x, max_value)
learner2.tell(x, max_value)
for x in xs:
learner2.tell_pending(x)
learner2.tell_many(xs, ys)
test_equal(learner, learner2)
def _random_run(learner, learner2, scale_doubling=True):
if (not scale_doubling):
x = 1e-06
max_value = 1000000.0
learner.tell(x, max_value)
learner2.tell(x, max_value)
stash = []
for _i in range(10):
(xs, _) = learner.ask(10)
for x in xs:
learner2.tell_pending(x)
random.shuffle(xs)
for _ in range(5):
stash.append(xs.pop())
ys = [learner.function(x) for x in xs]
learner.tell_many(xs, ys, force=True)
for (x, y) in zip(xs, ys):
learner2.tell(x, y)
random.shuffle(stash)
xs = [stash.pop() for _ in range(random.randint(1, 5))]
ys = [learner.function(x) for x in xs]
learner.tell_many(xs, ys, force=True)
for (x, y) in zip(xs, ys):
learner2.tell(x, y)
if scale_doubling:
max_value = max(learner.data.values())
x = 1e-06
learner.tell(x, (max_value * 10))
learner2.tell(x, (max_value * 10))
learner = Learner1D(f, bounds=((- 1), 1))
learner2 = Learner1D(f, bounds=((- 1), 1))
_random_run(learner, learner2, scale_doubling=False)
test_equal(learner, learner2)
learner = Learner1D(f, bounds=((- 1), 1))
learner2 = Learner1D(f, bounds=((- 1), 1))
_random_run(learner, learner2, scale_doubling=True)
test_equal(learner, learner2) |
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=(- 1), device_id=0):
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if ((N == 0) or (K == 0)):
return iou
threadsPerBlock = (8 * 8)
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([(- 1)]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([(- 1)]), stream)
iou_dev = cuda.to_device(iou.reshape([(- 1)]), stream)
rotate_iou_kernel_eval[(blockspergrid, threadsPerBlock, stream)](N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([(- 1)]), stream=stream)
return iou.astype(boxes.dtype) |
class UserPlan(models.Model):
plan_status = ((0, ''), (1, ''))
user = models.ForeignKey('UserProfile', related_name='self_user', on_delete=models.CASCADE, verbose_name='')
attention = models.ManyToManyField('UserProfile', related_name='attention_user', blank=True, verbose_name='')
title = models.CharField(max_length=32, verbose_name='')
content = models.TextField(verbose_name='')
status = models.PositiveSmallIntegerField(choices=plan_status, verbose_name='', default=0)
start_time = models.DateTimeField(default='', verbose_name='')
end_time = models.DateTimeField(default='', verbose_name='')
add_time = models.DateTimeField(auto_now_add=True, verbose_name='')
class Meta():
db_table = 'ops_users_plan'
verbose_name = ''
verbose_name_plural = verbose_name
unique_together = ('title', 'user') |
class Selector(Layer):
def __init__(self, select, **kwargs):
super(Selector, self).__init__(**kwargs)
self.select = select
self.select_neuron = K.constant(value=self.select)
def build(self, input_shape):
super(Selector, self).build(input_shape)
def call(self, x):
return K.cast(K.equal(x, self.select_neuron), dtype='float32')
def get_config(self):
config = {'select': self.select}
base_config = super(Selector, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
def compute_output_shape(self, input_shape):
return input_shape |
class Appr(Inc_Learning_Appr):
def __init__(self, model, device, nepochs=160, lr=0.1, lr_min=0.0001, lr_factor=10, lr_patience=8, clipgrad=10000, momentum=0.9, wd=0.0005, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, fix_bn=False, eval_on_train=False, logger=None, exemplars_dataset=None, lamb=5.0, pod_flat_factor=1.0, pod_spatial_factor=3.0, remove_adapt_lamda=False, remove_pod_flat=False, remove_pod_spatial=False, remove_cross_entropy=False, pod_pool_type='spatial'):
super(Appr, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd, multi_softmax, wu_nepochs, wu_lr_factor, fix_bn, eval_on_train, logger, exemplars_dataset)
self.lamb = lamb
self.adapt_lamda = (not remove_adapt_lamda)
self.lamda = self.lamb
self.ref_model = None
self.warmup_loss = self.warmup_luci_loss
self.pod_flat = (not remove_pod_flat)
self.pod_spatial = (not remove_pod_spatial)
self.nca_loss = (not remove_cross_entropy)
self._pod_flat_factor = pod_flat_factor
self._pod_spatial_factor = pod_spatial_factor
self._pod_pool_type = pod_pool_type
self._n_classes = 0
self._task_size = 0
self.task_percent = 0
self.lambda_c_base = 5
self.lambda_f_base = 1
have_exemplars = (self.exemplars_dataset.max_num_exemplars + self.exemplars_dataset.max_num_exemplars_per_class)
if (not have_exemplars):
warnings.warn('Warning: LUCIR is expected to use exemplars. Check documentation.')
def exemplars_dataset_class():
return ExemplarsDataset
def extra_parser(args):
parser = ArgumentParser()
parser.add_argument('--lamb', default=5.0, type=float, required=False, help='Trade-off for distillation loss (default=%(default)s)')
parser.add_argument('--remove-adapt-lamda', action='store_true', required=False, help='Deactivate adapting lambda according to the number of classes (default=%(default)s)')
parser.add_argument('--pod-spatial-factor', default=3.0, type=float, required=False, help='Scaling factor for pod spatial loss (default=%(default)s)')
parser.add_argument('--pod-flat-factor', default=1.0, type=float, required=False, help='Scaling factor for pod flat loss (default=%(default)s)')
parser.add_argument('--remove-pod-flat', action='store_true', required=False, help='Deactivate POD flat loss constraint (default=%(default)s)')
parser.add_argument('--remove-pod-spatial', action='store_true', required=False, help='Deactivate POD spatial loss constraint (default=%(default)s)')
parser.add_argument('--pod-pool-type', default='spatial', type=str, choices=['channels', 'width', 'height', 'gap', 'spatial'], help='POD spatial pooling dimension used (default=%(default)s)', metavar='POOLTYPE')
parser.add_argument('--remove-cross-entropy', action='store_true', required=False, help='Deactivate cross entropy loss and use NCA loss instead (default=%(default)s)')
return parser.parse_known_args(args)
def _get_optimizer(self):
params = (list(self.model.model.parameters()) + list(self.model.heads[(- 1)].parameters()))
return torch.optim.SGD(params, lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
def _get_scheduler(self, optimizer):
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=self.nepochs)
return scheduler
def pre_train_process(self, t, trn_loader):
self.t = t
print('have {} paramerters in total'.format(sum((x.numel() for x in self.model.parameters()))))
self.model.heads[(- 1)] = CosineLinear(self.model.heads[(- 1)].in_features, self.model.heads[(- 1)].out_features, nb_proxy=10, to_reduce=True)
self.model.to(self.device)
if (t > 0):
self.model.heads[(- 1)].sigma = self.model.heads[(- 2)].sigma
for h in self.model.heads[:(- 1)]:
for param in h.parameters():
param.requires_grad = False
self.model.heads[(- 1)].sigma.requires_grad = True
if self.adapt_lamda:
self.lamda = (self.lamb * math.sqrt((sum([h.out_features for h in self.model.heads[:(- 1)]]) / self.model.heads[(- 1)].out_features)))
super().pre_train_process(t, trn_loader)
def train_loop(self, t, trn_loader, val_loader):
if ((len(self.exemplars_dataset) > 0) and (t > 0)):
trn_loader = torch.utils.data.DataLoader((trn_loader.dataset + self.exemplars_dataset), batch_size=trn_loader.batch_size, shuffle=True, num_workers=trn_loader.num_workers, pin_memory=trn_loader.pin_memory)
super().train_loop(t, trn_loader, val_loader)
self.exemplars_dataset.collect_exemplars(self.model, trn_loader, val_loader.dataset.transform)
def post_train_process(self, t, trn_loader):
self.ref_model = copy.deepcopy(self.model)
self.ref_model.eval()
for h in self.ref_model.heads:
h.train()
self.ref_model.freeze_all()
def train_epoch(self, t, trn_loader):
self.model.train()
if (self.fix_bn and (t > 0)):
self.model.freeze_bn()
for (images, targets) in trn_loader:
(images, targets) = (images.to(self.device), targets.to(self.device))
(outputs, features_) = self.model(images, return_features=True)
fmaps = features_['fmaps']
features = features_['features']
ref_features = None
ref_fmaps = None
if (t > 0):
(_, ref_features_) = self.ref_model(images, return_features=True)
ref_features = ref_features_['features']
ref_fmaps = ref_features_['fmaps']
loss = self.criterion(t, outputs, targets, features, fmaps, ref_features, ref_fmaps)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def eval(self, t, val_loader):
with torch.no_grad():
(total_loss, total_acc_taw, total_acc_tag, total_num) = (0, 0, 0, 0)
self.model.eval()
for (images, targets) in val_loader:
(outputs, features_) = self.model(images.to(self.device), return_features=True)
fmaps = features_['fmaps']
features = features_['features']
ref_features = None
ref_fmaps = None
if (t > 0):
(_, ref_features_) = self.ref_model(images.to(self.device), return_features=True)
ref_features = ref_features_['features']
ref_fmaps = ref_features_['fmaps']
loss = self.criterion(t, outputs, targets.to(self.device), features, fmaps, ref_features, ref_fmaps)
(hits_taw, hits_tag) = self.calculate_metrics(outputs, targets)
total_loss += (loss.item() * len(targets))
total_acc_taw += hits_taw.sum().item()
total_acc_tag += hits_tag.sum().item()
total_num += len(targets)
return ((total_loss / total_num), (total_acc_taw / total_num), (total_acc_tag / total_num))
def criterion(self, t, outputs, targets, features, fmaps, ref_features=None, ref_fmaps=None, ref_outputs=None):
loss = 0
outputs = torch.cat(outputs, dim=1)
if self.nca_loss:
lsc_loss = nca(outputs, targets)
loss += lsc_loss
else:
ce_loss = nn.CrossEntropyLoss(None)(outputs, targets)
loss += ce_loss
if (ref_features is not None):
if self.pod_flat:
factor = (self._pod_flat_factor * math.sqrt((self._n_classes / self._task_size)))
pod_flat_loss = (F.cosine_embedding_loss(features, ref_features.detach(), torch.ones(features.shape[0]).to(self.device)) * factor)
loss += pod_flat_loss
if self.pod_spatial:
factor = (self._pod_spatial_factor * math.sqrt((self._n_classes / self._task_size)))
spatial_loss = (pod_spatial_loss(fmaps, ref_fmaps, collapse_channels=self._pod_pool_type) * factor)
loss += spatial_loss
return loss
def warmup_luci_loss(outputs, targets):
if (type(outputs) == dict):
return torch.nn.functional.cross_entropy(outputs['wosigma'], targets)
else:
return torch.nn.functional.cross_entropy(outputs, targets) |
def test_bloch_redfield_tensor_spectral_callable():
N = 5
H = qutip.num(N)
a = qutip.destroy(N)
A_op = (a + a.dag())
spectra = (lambda w: ((w > 0) * 0.5))
(R_eigs, evecs) = bloch_redfield_tensor(H=H, a_ops=[(A_op, spectra)], c_ops=[(a ** 2)], fock_basis=False)
assert isinstance(R_eigs, qutip.Qobj)
assert isinstance(evecs, qutip.Qobj) |
class IRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return TensorType(inp.dtype, shape=((None,) * (inp.type.ndim - 1)))
def make_node(self, a, s=None):
a = as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((f'{self.__class__.__name__}: input must have dimension >= 3, with ' + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[1:(- 1)]
s = set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
s = as_tensor_variable(s)
else:
s = as_tensor_variable(s)
if (s.dtype not in integer_dtypes):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
out = np.fft.irfftn(inp, s=tuple(s))
output_storage[0][0] = (out * s.prod()).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = rfft_op(gout, s)
idx = ((([slice(None)] * (gf.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gf = set_subtensor(gf[idx], (gf[idx] * 2))
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]] |
def load_word_vector_file(vec_path: str, vocab: Optional[Iterable[str]]=None):
if (vocab is not None):
vocab = set((x.lower() for x in vocab))
if vec_path.endswith('.pkl'):
with open(vec_path, 'rb') as f:
return pickle.load(f)
elif vec_path.endswith('.txt.gz'):
handle = (lambda x: gzip.open(x, 'r', encoding='utf-8', errors='ignore'))
else:
handle = (lambda x: open(x, 'r', encoding='utf-8', errors='ignore'))
pruned_dict = {}
with handle(vec_path) as fh:
for line in fh:
word_ix = line.find(' ')
word = line[:word_ix]
if ((vocab is None) or (word.lower() in vocab)):
pruned_dict[word] = np.array([float(x) for x in line[(word_ix + 1):(- 1)].split(' ')], dtype=np.float32)
return pruned_dict |
def get_args_parser():
parser = argparse.ArgumentParser('GFNet evaluation script', add_help=False)
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--arch', default='deit_small', type=str, help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'], type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--model-path', default='', help='resume from checkpoint')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='')
parser.set_defaults(pin_mem=True)
return parser |
class Gen_Video():
def __init__(self):
pass
def Gen_Video(self, beat_times, mp3path, uuid):
FONT_URL = '../font/heimi.TTF'
with open((uuid + '.txt'), 'r', encoding='utf-8') as f:
text_str = f.read()
word_list = text_str.split('\n')
clips = []
for (index, beat_time) in enumerate(beat_times[:(- 1)]):
if (index >= len(word_list)):
break
print(f'{(index + 1)}/{len(beat_times)}{word_list[index]}')
text_clip = TextClip(word_list[index], fontsize=(320 // 8), color='white', size=(320, 640), method='caption', font=FONT_URL).set_start(beat_time).set_end(beat_times[(index + 1)])
text_clip = text_clip.set_pos('center')
clips.append(text_clip)
final_clip = CompositeVideoClip(clips)
audio_clip = AudioFileClip(mp3path)
final_video = final_clip.set_audio(audio_clip)
final_video.write_videofile((str(uuid) + '.mp4'), fps=30, codec='mpeg4', preset='ultrafast', audio_codec='libmp3lame', threads=4) |
def fix_database_car_1(sqlite_file):
print('Editing database', sqlite_file)
conn = sqlite3.connect(sqlite_file)
conn.text_factory = (lambda b: b.decode(errors='ignore'))
c = conn.cursor()
query_get_all_tables = "SELECT name FROM sqlite_master WHERE type='table'"
c.execute(query_get_all_tables)
table_names = c.fetchall()
tbl_name = 'cars_data'
c.execute(f'PRAGMA table_info({tbl_name});')
col_data = c.fetchall()
c.execute(f'PRAGMA foreign_key_list({tbl_name});')
foreign_keys_out = c.fetchall()
c.execute(f'SELECT * FROM {tbl_name}')
tbl_data = c.fetchall()
for i in range(len(col_data)):
d = col_data[i]
if (d[1] in ['MPG', 'Horsepower']):
d_new = tuple(((d[:2] + ('REAL',)) + d[3:]))
col_data[i] = d_new
c.execute('PRAGMA foreign_keys = OFF;')
c.execute('PRAGMA legacy_alter_table=ON;')
c.execute(f'DROP TABLE {tbl_name};')
tbl_name_new = tbl_name
column_list = [f'{col[1]} {col[2]}' for col in col_data]
column_list.append(f"PRIMARY KEY ({', '.join((col[1] for col in col_data if col[5]))})")
for f in foreign_keys_out:
line = f'FOREIGN KEY({f[3]}) REFERENCES {f[2]}({f[4]})'
column_list.append(line)
tbl_command = f"CREATE TABLE {tbl_name_new} ( {', '.join(column_list)} );"
c.execute(tbl_command)
column_names = [t[1] for t in col_data]
for d in tbl_data:
data_command = f"INSERT INTO {tbl_name_new} ( {', '.join(column_names)} ) VALUES ( {', '.join((str(dd) for dd in d))} );"
c.execute(data_command)
c.execute('PRAGMA foreign_keys = ON;')
c.execute('PRAGMA legacy_alter_table=OFF;')
conn.commit()
conn.close() |
class CommandTest(EvenniaTest):
def call(self, cmdobj, args, msg=None, cmdset=None, noansi=True, caller=None, receiver=None, cmdstring=None, obj=None, inputs=None, raw_string=None):
caller = (caller if caller else self.char1)
receiver = (receiver if receiver else caller)
cmdobj.caller = caller
cmdobj.cmdname = (cmdstring if cmdstring else cmdobj.key)
cmdobj.raw_cmdname = cmdobj.cmdname
cmdobj.cmdstring = cmdobj.cmdname
cmdobj.args = args
cmdobj.cmdset = cmdset
cmdobj.session = SESSIONS.session_from_sessid(1)
cmdobj.account = self.account
cmdobj.raw_string = (raw_string if (raw_string is not None) else ((cmdobj.key + ' ') + args))
cmdobj.obj = (obj or (caller if caller else self.char1))
old_msg = receiver.msg
inputs = (inputs or [])
try:
receiver.msg = Mock()
if cmdobj.at_pre_cmd():
return
cmdobj.parse()
ret = cmdobj.func()
if isinstance(ret, types.GeneratorType):
while True:
try:
inp = (inputs.pop() if inputs else None)
if inp:
try:
ret.send(inp)
except TypeError:
next(ret)
ret = ret.send(inp)
else:
next(ret)
except StopIteration:
break
cmdobj.at_post_cmd()
except StopIteration:
pass
except InterruptCommand:
pass
stored_msg = [(args[0] if (args and args[0]) else kwargs.get('text', utils.to_str(kwargs))) for (name, args, kwargs) in receiver.msg.mock_calls]
stored_msg = [(str(smsg[0]) if isinstance(smsg, tuple) else str(smsg)) for smsg in stored_msg]
if (msg is not None):
msg = str(msg)
msg_sep = ('|' if noansi else '||')
returned_msg = msg_sep.join((_RE.sub('', ansi.parse_ansi(mess, strip_ansi=noansi)) for mess in stored_msg)).strip()
if (((msg == '') and returned_msg) or (not returned_msg.startswith(msg.strip()))):
sep1 = (((('\n' + ('=' * 30)) + 'Wanted message') + ('=' * 34)) + '\n')
sep2 = (((('\n' + ('=' * 30)) + 'Returned message') + ('=' * 32)) + '\n')
sep3 = ('\n' + ('=' * 78))
retval = ((((sep1 + msg.strip()) + sep2) + returned_msg) + sep3)
raise AssertionError(retval)
else:
returned_msg = '\n'.join((str(msg) for msg in stored_msg))
returned_msg = ansi.parse_ansi(returned_msg, strip_ansi=noansi).strip()
receiver.msg = old_msg
return returned_msg |
def test_env_via_toml_bad(testdir: pytest.Testdir) -> None:
toml_file = (Path(str(testdir.tmpdir)) / 'pyproject.toml')
toml_file.write_text('bad toml', encoding='utf-8')
result = testdir.runpytest()
assert (result.ret == 4)
assert (result.errlines == [f"ERROR: {toml_file}: Expected '=' after a key in a key/value pair (at line 1, column 5)", '']) |
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = ''.join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens |
class RGBTo01Normalization(ImageNormalization):
leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False
def run(self, image: np.ndarray, seg: np.ndarray=None) -> np.ndarray:
assert (image.min() >= 0), 'RGB images are uint 8, for whatever reason I found pixel values smaller than 0. Your images do not seem to be RGB images'
assert (image.max() <= 255), 'RGB images are uint 8, for whatever reason I found pixel values greater than 255. Your images do not seem to be RGB images'
image = image.astype(self.target_dtype)
image = (image / 255.0)
return image |
def __CompareString(ql: Qiling, address: int, params) -> int:
lpString1 = params['lpString1']
lpString2 = params['lpString2']
cchCount1 = params['cchCount1']
cchCount2 = params['cchCount2']
if (cchCount1 > 0):
lpString1 = lpString1[:cchCount1]
if (cchCount2 > 0):
lpString2 = lpString2[:cchCount2]
return cmp(lpString1, lpString2) |
def get_defining_class(meth: Callable[(..., Any)]) -> Optional[Type[Any]]:
if isinstance(meth, functools.partial):
return get_defining_class(meth.func)
if (inspect.ismethod(meth) or (inspect.isbuiltin(meth) and (getattr(meth, '__self__') is not None) and getattr(meth.__self__, '__class__'))):
for cls in inspect.getmro(meth.__self__.__class__):
if (meth.__name__ in cls.__dict__):
return cls
meth = getattr(meth, '__func__', meth)
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth), meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return cast(type, getattr(meth, '__objclass__', None)) |
def create_test_data_loader(args):
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
if (args.dataset == 'mvtec'):
test_dataset = MVTecDataset(args, is_train=False)
elif (args.dataset == 'btad'):
test_dataset = BTADDataset(args, is_train=False)
else:
raise NotImplementedError('{} is not supported dataset!'.format(args.dataset))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=False, **kwargs)
return test_loader |
def test_simdiag_orthonormal_eigenvectors():
a = np.array([[1, 0, 1, (- 1), 0], [0, 4, 0, 0, 1], [1, 0, 4, 1, 0], [(- 1), 0, 1, 4, 0], [0, 1, 0, 0, 4]])
(_, evecs) = qutip.simdiag([qutip.Qobj(a), qutip.qeye(5)])
evecs = np.array([evec.full() for evec in evecs]).squeeze()
np.testing.assert_allclose((evecs evecs.conj().T), np.eye(len(evecs)), atol=1e-13) |
.parametrize('angles', [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[3, 4, 5], [10, 11, 12]]])
.parametrize('kappa', [*range(1, 12)])
.parametrize('constructor', [construct_custom_prga, construct_prga_with_phase, construct_prga_with_identity])
def test_programmable_rotation_gate_array(angles, kappa, constructor):
rotation_gate = cirq.X
programmable_rotation_gate = constructor(*angles, kappa=kappa, rotation_gate=rotation_gate)
assert_valid_bloq_decomposition(programmable_rotation_gate)
greedy_mm = cirq.GreedyQubitManager(prefix='_a')
g = GateHelper(programmable_rotation_gate, context=cirq.DecompositionContext(greedy_mm))
decomposed_circuit = (cirq.Circuit(cirq.I.on_each(*g.all_qubits)) + g.decomposed_circuit)
interleaved_unitaries = [programmable_rotation_gate.interleaved_unitary(i, **g.quregs) for i in range((len(angles) - 1))]
rotations_and_unitary_registers = Signature([*programmable_rotation_gate.rotations_target, *programmable_rotation_gate.interleaved_unitary_target])
rotations_and_unitary_qubits = merge_qubits(rotations_and_unitary_registers, **g.quregs)
simulator = cirq.Simulator(dtype=np.complex128)
def rotation_ops(theta: int) -> cirq.OP_TREE:
for (i, b) in enumerate(bin(theta)[2:][::(- 1)]):
if (b == '1'):
(yield cirq.pow(rotation_gate.on(*g.quregs['rotations_target']), (1 / (2 ** (1 + i)))))
for selection_integer in range(len(angles[0])):
qubit_vals = {x: 0 for x in g.all_qubits}
qubit_vals.update(zip(g.quregs['selection'], iter_bits(selection_integer, g.r.get_left('selection').total_bits())))
initial_state = [qubit_vals[x] for x in g.all_qubits]
result = simulator.simulate(decomposed_circuit, initial_state=initial_state, qubit_order=g.all_qubits)
ru_state_vector = cirq.sub_state_vector(result.final_state_vector, keep_indices=[g.all_qubits.index(q) for q in rotations_and_unitary_qubits])
expected_circuit = cirq.Circuit([[rotation_ops(angles[i][selection_integer]), u] for (i, u) in enumerate(interleaved_unitaries)], rotation_ops(angles[(- 1)][selection_integer]))
expected_ru_state_vector = simulator.simulate(expected_circuit, qubit_order=rotations_and_unitary_qubits).final_state_vector
cirq.testing.assert_allclose_up_to_global_phase(ru_state_vector, expected_ru_state_vector, atol=1e-08)
ancilla_indices = [g.all_qubits.index(q) for q in g.all_qubits if (q not in rotations_and_unitary_qubits)]
ancilla_state_vector = cirq.sub_state_vector(result.final_state_vector, keep_indices=ancilla_indices)
expected_ancilla_state_vector = cirq.quantum_state([initial_state[x] for x in ancilla_indices], qid_shape=((2,) * len(ancilla_indices)), dtype=np.complex128).state_vector()
cirq.testing.assert_allclose_up_to_global_phase(ancilla_state_vector, expected_ancilla_state_vector, atol=1e-08) |
def tokenize_item(sample, tokenizer):
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in sample['context']:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[(- 1)] += c
prev_is_whitespace = False
char_to_word_offset.append((len(doc_tokens) - 1))
orig_to_tok_index = []
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = process(token, tokenizer)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
q_sub_toks = process(sample['question'], tokenizer)
(ans_starts, ans_ends, ans_texts) = ([], [], [])
for answer in sample['matched_answers']:
ans_spans = find_ans_span_with_char_offsets(answer, char_to_word_offset, doc_tokens, all_doc_tokens, orig_to_tok_index, tokenizer)
for (s, e) in ans_spans:
ans_starts.append(s)
ans_ends.append(e)
ans_texts.append(answer['text'])
return {'q_subtoks': q_sub_toks, 'qid': sample['qid'], 'doc_toks': doc_tokens, 'doc_subtoks': all_doc_tokens, 'tok_to_orig_index': tok_to_orig_index, 'starts': ans_starts, 'ends': ans_ends, 'span_texts': ans_texts, 'true_answers': sample['true_answers']} |
()
def _check_alazy_constant_no_ttl():
global constant_call_count
constant_call_count = 0
_constant()
()
def constant():
global constant_call_count
constant_call_count += 1
return constant_call_count
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
constant.dirty()
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq())) |
class StyleSDFConfig(BaseConfig):
name = 'stylesdf'
hint = 'Train a StyleSDF model.'
info = '\nTo train a StyleSDF model, the recommended settings are as follows:\n\n\x08\n- batch_size: 4 (for FF-HQ dataset, 8 GPU)\n- val_batch_size: 16 (for FF-HQ dataset, 8 GPU)\n- data_repeat: 200 (for FF-HQ dataset)\n- total_img: 25_000_000 (for FF-HQ dataset)\n- train_data_mirror: True (for FF-HQ dataset)\n'
def __init__(self, kwargs):
super().__init__(kwargs)
self.config.runner_type = RUNNER
def get_options(cls):
options = super().get_options()
options['Data transformation settings'].extend([cls.command_option('--resolution', type=cls.int_type, default=256, help='Resolution of the training images.'), cls.command_option('--image_channels', type=cls.int_type, default=3, help='Number of channels of the training images.'), cls.command_option('--min_val', type=cls.float_type, default=(- 1.0), help='Minimum pixel value of the training images.'), cls.command_option('--max_val', type=cls.float_type, default=1.0, help='Maximum pixel value of the training images.'), cls.command_option('--use_square', type=cls.bool_type, default=False, help='Whether to use square image for training.'), cls.command_option('--center_crop', type=cls.bool_type, default=False, help='Whether to centrally crop non-square images. This field only takes effect when `use_square` is set as `True`.')])
options['Network settings'].extend([cls.command_option('--g_init_res', type=cls.int_type, default=4, help='The initial resolution to start convolution with in generator.'), cls.command_option('--latent_dim', type=cls.int_type, default=256, help='The dimension of the latent space.'), cls.command_option('--label_dim', type=cls.int_type, default=0, help='Number of classes in conditioning training. Set to `0` to disable conditional training.'), cls.command_option('--d_fmaps_factor', type=cls.float_type, default=1.0, help='A factor to control the number of feature maps of discriminator, which will be `factor * 16384`.'), cls.command_option('--d_mbstd_groups', type=cls.int_type, default=4, help='Number of groups for MiniBatchSTD layer of discriminator.'), cls.command_option('--g_fmaps_factor', type=cls.float_type, default=1.0, help='A factor to control the number of feature maps of generator, which will be `factor * 16384`.'), cls.command_option('--g_num_mappings', type=cls.int_type, default=3, help='Number of mapping layers of generator.'), cls.command_option('--sphere_init_path', type=str, default='pretrained_renderer/sphere_init.pt', help='Path to the sphere init.')])
options['Training settings'].extend([cls.command_option('--d_lr', type=cls.float_type, default=0.0002, help='The learning rate of discriminator.'), cls.command_option('--d_beta_1', type=cls.float_type, default=0.0, help='The Adam hyper-parameter `beta_1` for discriminator optimizer.'), cls.command_option('--d_beta_2', type=cls.float_type, default=0.9, help='The Adam hyper-parameter `beta_2` for discriminator optimizer.'), cls.command_option('--g_lr', type=cls.float_type, default=2e-05, help='The learning rate of generator.'), cls.command_option('--g_beta_1', type=cls.float_type, default=0.0, help='The Adam hyper-parameter `beta_1` for generator optimizer.'), cls.command_option('--g_beta_2', type=cls.float_type, default=0.9, help='The Adam hyper-parameter `beta_2` for generator optimizer.'), cls.command_option('--style_mixing_prob', type=cls.float_type, default=0.9, help='Probability to perform style mixing as a training regularization.'), cls.command_option('--r1_gamma', type=cls.float_type, default=10.0, help='Factor to control the strength of gradient penalty.'), cls.command_option('--g_ema_img', type=cls.int_type, default=10000, help='Factor for updating the smoothed generator, which is particularly used for inference.'), cls.command_option('--use_ada', type=cls.bool_type, default=False, help='Whether to use adaptive augmentation pipeline.')])
options['Rendering options'].extend([cls.command_option('--clamp_mode', type=click.Choice(['softplus', 'relu', 'mipnerf']), default='relu', help='Clamp mode of `sigmas` in intergration process.'), cls.command_option('--num_points', type=cls.int_type, default=24, help='Number of uniform samples to take per ray in coarse pass.'), cls.command_option('--num_importance', type=cls.int_type, default=0, help='Number of importance samples to take per ray in fine pass.'), cls.command_option('--ray_start', type=cls.float_type, default=0.88, help='Near point along each ray to start taking samples.'), cls.command_option('--ray_end', type=cls.float_type, default=1.12, help='Far point along each ray to start taking samples.'), cls.command_option('--radius_fix', type=cls.float_type, default=1.0, help='Radius of sphere for sampling camera position.'), cls.command_option('--polar_mean', type=cls.float_type, default=(PI / 2), help='Mean of polar (vertical) angle for sampling camera position.'), cls.command_option('--polar_stddev', type=cls.float_type, default=0.155, help='Standard deviation of polar (vertical) angle of sphere for sampling camera position.'), cls.command_option('--azimuthal_mean', type=cls.float_type, default=(PI / 2), help='Mean of azimuthal (horizontal) angle for sampling camera position.'), cls.command_option('--azimuthal_stddev', type=cls.float_type, default=0.3, help='Standard deviation of azimuthal (horizontal) angle of sphere for sampling camera position.'), cls.command_option('--fov', type=cls.float_type, default=12, help='Field of view of the camera.'), cls.command_option('--perturbation_strategy', type=click.Choice(['no', 'middle_uniform', 'uniform', 'self_uniform']), default='self_uniform', help='clamp mode of `sigmas` in intergration process.')])
return options
def parse_options(self):
super().parse_options()
resolution = self.args.pop('resolution')
image_channels = self.args.pop('image_channels')
min_val = self.args.pop('min_val')
max_val = self.args.pop('max_val')
use_square = self.args.pop('use_square')
center_crop = self.args.pop('center_crop')
sphere_init_path = self.args.pop('sphere_init_path')
data_transform_kwargs = dict(image_size=resolution, image_channels=image_channels, min_val=min_val, max_val=max_val, use_square=use_square, center_crop=center_crop, resize_size_pre=320, crop_size_pre=256)
self.config.data.train.dataset_type = DATASET
self.config.data.train.transform_kwargs = data_transform_kwargs
self.config.data.val.dataset_type = DATASET
self.config.data.val.transform_kwargs = data_transform_kwargs
self.config.data.train.dataset_type = DATASET
self.config.data.train.transform_kwargs = data_transform_kwargs
self.config.data.val.dataset_type = DATASET
self.config.data.val.transform_kwargs = data_transform_kwargs
latent_dim = self.args.pop('latent_dim')
label_dim = self.args.pop('label_dim')
self.args.pop('g_init_res')
self.args.pop('d_mbstd_groups')
self.args.pop('d_fmaps_factor')
self.args.pop('g_fmaps_factor')
self.config.full_pipeline = False
point_sampling_kwargs = dict(image_boundary_value=1.0, x_axis_right=True, y_axis_up=True, z_axis_out=True, radius_strategy='fix', radius_fix=self.args.pop('radius_fix'), polar_strategy='normal', polar_mean=self.args.pop('polar_mean'), polar_stddev=self.args.pop('polar_stddev'), azimuthal_strategy='normal', azimuthal_mean=self.args.pop('azimuthal_mean'), azimuthal_stddev=self.args.pop('azimuthal_stddev'), fov=self.args.pop('fov'), perturbation_strategy=self.args.pop('perturbation_strategy'), dis_min=self.args.pop('ray_start'), dis_max=self.args.pop('ray_end'), num_points=self.args.pop('num_points'))
ray_marching_kwargs = dict(use_mid_point=False, density_clamp_mode=self.args.pop('clamp_mode'), normalize_radial_dist=False, clip_radial_dist=False)
self.config.models.update(discriminator=dict(model=dict(model_type=DISCRIMINATOR, resolution=resolution, latent_dim=latent_dim, label_dim=label_dim), lr=dict(lr_type='FIXED'), opt=dict(opt_type='Adam', base_lr=self.args.pop('d_lr'), betas=(self.args.pop('d_beta_1'), self.args.pop('d_beta_2'))), kwargs_train=dict(enable_amp=self.config.enable_amp), kwargs_val=dict(enable_amp=False), has_unused_parameters=True, broadcast_buffers=False), generator=dict(model=dict(model_type=GENERATOR, z_dim=latent_dim, w_dim=latent_dim, mapping_layers=self.args.pop('g_num_mappings'), synthesis_input_dim=3, synthesis_output_dim=256, synthesis_layers=8, grid_scale=0.24, freeze_renderer=False, full_pipeline=False, image_resolution=resolution, render_resolution=resolution, num_importance=self.args.pop('num_importance'), smooth_weights=False, point_sampling_kwargs=point_sampling_kwargs, ray_marching_kwargs=ray_marching_kwargs, sphere_init_path=sphere_init_path), lr=dict(lr_type='FIXED'), opt=dict(opt_type='Adam', base_lr=self.args.pop('g_lr'), betas=(self.args.pop('g_beta_1'), self.args.pop('g_beta_2'))), kwargs_train=dict(style_mixing_prob=self.args.pop('style_mixing_prob'), enable_amp=self.config.enable_amp), kwargs_val=dict(enable_amp=False), g_ema_img=self.args.pop('g_ema_img'), has_unused_parameters=True, broadcast_buffers=True))
self.config.loss.update(loss_type=LOSS, d_loss_kwargs=dict(r1_gamma=self.args.pop('r1_gamma'), latent_gamma=0, camera_gamma=15, batch_split=2, fade_steps=10000, full_pipeline=False), g_loss_kwargs=dict(top_k_interval=2000, top_v=0.6, eikonal_lambda=0.1, min_surf_lambda=0.05))
self.config.grad_clip = dict(max_norm=10, norm_type=2)
self.config.controllers.update(DatasetVisualizer=dict(viz_keys='raw_image', viz_num=(32 if (label_dim == 0) else 8), viz_name='Real Data', viz_groups=(4 if (label_dim == 0) else 1), viz_classes=min(10, label_dim), row_major=True, min_val=min_val, max_val=max_val, shuffle=False))
if self.args.pop('use_ada'):
self.config.aug.update(aug_type='AdaAug', xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=0, noise=0, cutout=0)
self.config.aug_kwargs.update(impl='cuda')
self.config.controllers.update(AdaAugController=dict(every_n_iters=4, init_p=0.0, target_p=0.6, speed_img=500000, strategy='adaptive'))
self.config.metrics.update(FID50KFull=dict(init_kwargs=dict(name='fid50k', latent_dim=latent_dim, label_dim=label_dim), eval_kwargs=dict(generator_smooth=dict(enable_amp=False)), interval=None, first_iter=None, save_best=True), GANSnapshot=dict(init_kwargs=dict(name='snapshot', latent_dim=latent_dim, latent_num=32, label_dim=label_dim, min_val=min_val, max_val=max_val), eval_kwargs=dict(generator_smooth=dict(enable_amp=False)), interval=None, first_iter=None, save_best=False)) |
class ObjParser(Parser):
material_parser_cls = MaterialParser
cache_loader_cls = CacheLoader
cache_writer_cls = CacheWriter
def __init__(self, wavefront, file_name, strict=False, encoding='utf-8', create_materials=False, collect_faces=False, parse=True, cache=False):
super(ObjParser, self).__init__(file_name, strict=strict, encoding=encoding)
self.wavefront = wavefront
self.mesh = None
self.material = None
self.create_materials = create_materials
self.collect_faces = collect_faces
self.cache = cache
self.cache_loaded = None
self.normals = []
self.tex_coords = []
if parse:
self.parse()
def parse(self):
start = time.time()
if self.cache:
self.load_cache()
if (not self.cache_loaded):
super(ObjParser, self).parse()
logger.info('%s: Load time: %s', self.file_name, (time.time() - start))
def load_cache(self):
self.cache_loaded = self.cache_loader_cls(self.file_name, self.wavefront, strict=self.strict, create_materials=self.create_materials, encoding=self.encoding, parse=self.parse).parse()
def post_parse(self):
if (self.cache and (not self.cache_loaded)):
self.cache_writer_cls(self.file_name, self.wavefront).write()
def parse_v(self):
self.wavefront.vertices += list(self.consume_vertices())
def consume_vertices(self):
while True:
if (len(self.values) == 7):
(yield (float(self.values[1]), float(self.values[2]), float(self.values[3]), float(self.values[4]), float(self.values[5]), float(self.values[6])))
else:
(yield (float(self.values[1]), float(self.values[2]), float(self.values[3])))
try:
self.next_line()
except StopIteration:
self.line = None
self.values = None
break
if (not self.values):
break
if (self.values[0] != 'v'):
break
def parse_vn(self):
self.normals += list(self.consume_normals())
if (self.values and (self.values[0] == 'vn')):
self.next_line()
def consume_normals(self):
while True:
(yield (float(self.values[1]), float(self.values[2]), float(self.values[3])))
try:
self.next_line()
except StopIteration:
break
if (not self.values):
break
if (self.values[0] != 'vn'):
break
def parse_vt(self):
self.tex_coords += list(self.consume_texture_coordinates())
if (self.values and (self.values[0] == 'vt')):
self.next_line()
def consume_texture_coordinates(self):
while True:
(yield (float(self.values[1]), float(self.values[2])))
try:
self.next_line()
except StopIteration:
break
if (not self.values):
break
if (self.values[0] != 'vt'):
break
_consume
def parse_mtllib(self):
mtllib = ' '.join(self.values[1:])
try:
materials = self.material_parser_cls((self.dir / mtllib), encoding=self.encoding, strict=self.strict, collect_faces=self.collect_faces).materials
self.wavefront.mtllibs.append(mtllib)
except IOError:
if self.create_materials:
return
raise
for (name, material) in materials.items():
self.wavefront.materials[name] = material
_consume
def parse_usemtl(self):
name = ' '.join(self.values[1:])
self.material = self.wavefront.materials.get(name, None)
if (self.material is None):
if (not self.create_materials):
raise PywavefrontException(('Unknown material: %s' % name))
self.material = Material(name, is_default=True, has_faces=self.collect_faces)
self.wavefront.materials[name] = self.material
if (self.mesh is not None):
self.mesh.add_material(self.material)
def parse_usemat(self):
self.parse_usemtl()
_consume
def parse_o(self):
self.mesh = Mesh(self.values[1], has_faces=self.collect_faces)
self.wavefront.add_mesh(self.mesh)
def parse_f(self):
if (self.material is None):
self.material = Material('default{}'.format(len(self.wavefront.materials)), is_default=True, has_faces=self.collect_faces)
self.wavefront.materials[self.material.name] = self.material
if (self.mesh is None):
self.mesh = Mesh(has_faces=self.collect_faces)
self.wavefront.add_mesh(self.mesh)
self.mesh.add_material(self.material)
self.mesh.add_material(self.material)
collected_faces = []
consumed_vertices = self.consume_faces((collected_faces if self.collect_faces else None))
self.material.vertices += list(consumed_vertices)
if self.collect_faces:
self.mesh.faces += list(collected_faces)
if (self.values and (self.values[0] == 'f')):
self.next_line()
def consume_faces(self, collected_faces=None):
Vertex = namedtuple('Vertex', 'idx pos color uv normal')
def emit_vertex(vertex):
for v in vertex.uv:
(yield v)
for v in vertex.color:
(yield v)
for v in vertex.normal:
(yield v)
for v in vertex.pos:
(yield v)
has_vt = False
has_vn = False
has_colors = False
parts = self.values[1].split('/')
if (len(parts) == 2):
has_vt = True
elif (len(parts) == 3):
if (parts[1] != ''):
has_vt = True
has_vn = True
vindex = int(parts[0])
if (vindex < 0):
vindex += len(self.wavefront.vertices)
else:
vindex -= 1
vertex = self.wavefront.vertices[vindex]
has_colors = (len(vertex) == 6)
vertex_format = '_'.join((e[0] for e in [('T2F', has_vt), ('C3F', has_colors), ('N3F', has_vn), ('V3F', True)] if e[1]))
if (self.material.vertex_format and (self.material.vertex_format != vertex_format)):
raise ValueError('Trying to merge vertex data with different format: {}. Material {} has vertex format {}'.format(vertex_format, self.material.name, self.material.vertex_format))
self.material.vertex_format = vertex_format
while True:
(v1, vlast, vcurrent) = (None, None, None)
for (i, v) in enumerate(self.values[1:]):
parts = v.split('/')
v_index = (int(parts[0]) - 1)
try:
t_index = ((int(parts[1]) - 1) if has_vt else None)
except ValueError:
t_index = 0
try:
n_index = ((int(parts[2]) - 1) if has_vn else None)
except ValueError:
n_index = 0
if (v_index < 0):
v_index += (len(self.wavefront.vertices) + 1)
if (has_vt and (t_index < 0)):
t_index += (len(self.tex_coords) + 1)
if (has_vn and (n_index < 0)):
n_index += (len(self.normals) + 1)
vlast = vcurrent
vcurrent = Vertex(idx=v_index, pos=(self.wavefront.vertices[v_index][0:3] if has_colors else self.wavefront.vertices[v_index]), color=(self.wavefront.vertices[v_index][3:] if has_colors else ()), uv=(self.tex_coords[t_index] if (has_vt and (t_index < len(self.tex_coords))) else ()), normal=(self.normals[n_index] if (has_vn and (n_index < len(self.normals))) else ()))
(yield from emit_vertex(vcurrent))
if (i >= 3):
(yield from emit_vertex(v1))
(yield from emit_vertex(vlast))
if (i == 0):
v1 = vcurrent
if ((collected_faces is not None) and (i >= 2)):
if (i == 2):
collected_faces.append([v1.idx, vlast.idx, vcurrent.idx])
if (i >= 3):
collected_faces.append([vcurrent.idx, v1.idx, vlast.idx])
try:
self.next_line()
except StopIteration:
break
if (not self.values):
break
if (self.values[0] != 'f'):
break |
def remove_bpe_dict(pred_dict, bpe_symbol):
new_dict = {}
for i in pred_dict:
if (type(pred_dict[i]) == list):
new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]]
new_dict[i] = new_list
else:
new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol)
return new_dict |
def test_to_smiles_isomeric():
mol = Ligand.from_file(file_name=get_data('bace0.sdf'))
smiles = mol.to_smiles(isomeric=True, explicit_hydrogens=False, mapped=False)
assert ('' in smiles)
smiles = mol.to_smiles(isomeric=False, explicit_hydrogens=False, mapped=False)
assert ('' not in smiles) |
def cli_run():
print('\nAnatomical MRI module')
from visualqc.utils import run_common_utils_before_starting
run_common_utils_before_starting()
wf = make_workflow_from_user_options()
if (wf.vis_type is not None):
wf.run()
else:
raise ValueError('Invalid state for visualQC!\n\t Ensure proper combination of arguments is used.')
return |
class Resnet18(nn.Module):
def __init__(self, embedding_size, pretrained=True, is_norm=True, bn_freeze=True):
super(Resnet18, self).__init__()
self.model = resnet18(pretrained)
self.is_norm = is_norm
self.embedding_size = embedding_size
self.num_ftrs = self.model.fc.in_features
self.model.gap = nn.AdaptiveAvgPool2d(1)
self.model.gmp = nn.AdaptiveMaxPool2d(1)
self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size)
self._initialize_weights()
if bn_freeze:
for m in self.model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad_(False)
m.bias.requires_grad_(False)
def l2_norm(self, input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view((- 1), 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
avg_x = self.model.gap(x)
max_x = self.model.gmp(x)
x = (max_x + avg_x)
x = x.view(x.size(0), (- 1))
x = self.model.embedding(x)
if self.is_norm:
x = self.l2_norm(x)
return x
def _initialize_weights(self):
init.kaiming_normal_(self.model.embedding.weight, mode='fan_out')
init.constant_(self.model.embedding.bias, 0) |
_module()
class RawframeDataset(BaseDataset):
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False, filename_tmpl='img_{:05}.jpg', with_offset=False, multi_class=False, num_classes=None, start_index=1, modality='RGB', sample_by_class=False, power=0.0, dynamic_length=False):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(ann_file, pipeline, data_prefix, test_mode, multi_class, num_classes, start_index, modality, sample_by_class=sample_by_class, power=power, dynamic_length=dynamic_length)
def load_annotations(self):
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
frame_dir = line_split[idx]
if (self.data_prefix is not None):
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[(idx + 1)])
idx += 2
else:
video_info['total_frames'] = int(line_split[idx])
idx += 1
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert (self.num_classes is not None)
video_info['label'] = label
else:
assert (len(label) == 1)
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.0
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.0
results['label'] = onehot
return self.pipeline(results) |
def merge_event_handlers(event_handlers: Sequence[EventHandlerType]) -> EventHandlerType:
if (not event_handlers):
msg = 'No event handlers to merge'
raise ValueError(msg)
elif (len(event_handlers) == 1):
return event_handlers[0]
first_handler = event_handlers[0]
stop_propagation = first_handler.stop_propagation
prevent_default = first_handler.prevent_default
target = first_handler.target
for handler in event_handlers:
if ((handler.stop_propagation != stop_propagation) or (handler.prevent_default != prevent_default) or (handler.target != target)):
msg = "Cannot merge handlers - 'stop_propagation', 'prevent_default' or 'target' mismatch."
raise ValueError(msg)
return EventHandler(merge_event_handler_funcs([h.function for h in event_handlers]), stop_propagation, prevent_default, target) |
def get_dataloader(txtdir, dataset, domain, phase, batch_size, num_workers=8):
assert (phase in ['train', 'val', 'test'])
(names, labels) = _dataset_info(join(txtdir, dataset, ('%s_%s.txt' % (domain, phase))))
if (phase == 'train'):
img_tr = get_train_transformer()
else:
img_tr = get_val_transformer()
curDataset = StandardDataset(names, labels, img_tr)
if (phase == 'train'):
loader = InfiniteDataLoader(dataset=curDataset, weights=None, batch_size=batch_size, num_workers=num_workers)
else:
loader = FastDataLoader(dataset=curDataset, batch_size=batch_size, num_workers=num_workers)
return loader |
.integration
def test_fem_import(long_project):
current_fem = long_project.export_instrument_event_mappings()
instrument_event_mappings = [{'arm_num': '1', 'unique_event_name': 'enrollment_arm_1', 'form': 'demographics'}]
response = long_project.import_instrument_event_mappings(instrument_event_mappings)
assert (response == 1)
response = long_project.export_instrument_event_mappings()
assert (len(response) == 1)
fem_arm_nums = [fem['arm_num'] for fem in response]
fem_unique_event_names = [fem['unique_event_name'] for fem in response]
fem_forms = [fem['form'] for fem in response]
assert (fem_arm_nums == [1])
assert (fem_unique_event_names == ['enrollment_arm_1'])
assert (fem_forms == ['demographics'])
response = long_project.import_instrument_event_mappings(current_fem)
assert (response == 44) |
.end_to_end()
.xfail(strict=True, reason='pytask cannot capture during collection.')
def test_collect_capturing(tmp_path, runner):
source = '\n import sys\n print("collect %s failure" % 13)\n sys.stderr.write("collect %s_stderr failure" % 13)\n import xyz42123\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
for content in ('Captured stdout', 'collect 13 failure', 'Captured stderr', 'collect 13_stderr failure'):
assert (content in result.output) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.