code stringlengths 281 23.7M |
|---|
class Interpreter(_Decoratable, ABC, Generic[(_Leaf_T, _Return_T)]):
def visit(self, tree: Tree[_Leaf_T]) -> _Return_T:
return self._visit_tree(tree)
def _visit_tree(self, tree: Tree[_Leaf_T]):
f = getattr(self, tree.data)
wrapper = getattr(f, 'visit_wrapper', None)
if (wrapper is not None):
return f.visit_wrapper(f, tree.data, tree.children, tree.meta)
else:
return f(tree)
def visit_children(self, tree: Tree[_Leaf_T]) -> List:
return [(self._visit_tree(child) if isinstance(child, Tree) else child) for child in tree.children]
def __getattr__(self, name):
return self.__default__
def __default__(self, tree):
return self.visit_children(tree) |
class TimeTests(unittest.TestCase):
def test_humanize_delta_handle_unknown_units(self):
actual = time.humanize_delta(relativedelta(days=2, hours=2), precision='elephants', max_units=2)
self.assertEqual(actual, '2 days and 2 hours')
def test_humanize_delta_handle_high_units(self):
actual = time.humanize_delta(relativedelta(days=2, hours=2), precision='hours', max_units=20)
self.assertEqual(actual, '2 days and 2 hours')
def test_humanize_delta_should_normal_usage(self):
test_cases = ((relativedelta(days=2), 'seconds', 1, '2 days'), (relativedelta(days=2, hours=2), 'seconds', 2, '2 days and 2 hours'), (relativedelta(days=2, hours=2), 'seconds', 1, '2 days'), (relativedelta(days=2, hours=2), 'days', 2, '2 days'))
for (delta, precision, max_units, expected) in test_cases:
with self.subTest(delta=delta, precision=precision, max_units=max_units, expected=expected):
actual = time.humanize_delta(delta, precision=precision, max_units=max_units)
self.assertEqual(actual, expected)
def test_humanize_delta_raises_for_invalid_max_units(self):
test_cases = ((- 1), 0)
for max_units in test_cases:
with self.subTest(max_units=max_units), self.assertRaises(ValueError) as error:
time.humanize_delta(relativedelta(days=2, hours=2), precision='hours', max_units=max_units)
self.assertEqual(str(error.exception), 'max_units must be positive.')
def test_format_with_duration_none_expiry(self):
test_cases = ((None, None, None, None), (None, 'Why hello there!', None, None), (None, None, float('inf'), None), (None, 'Why hello there!', float('inf'), None))
for (expiry, date_from, max_units, expected) in test_cases:
with self.subTest(expiry=expiry, date_from=date_from, max_units=max_units, expected=expected):
self.assertEqual(time.format_with_duration(expiry, date_from, max_units), expected)
def test_format_with_duration_custom_units(self):
test_cases = (('3000-12-12T00:01:00Z', datetime(3000, 12, 11, 12, 5, 5, tzinfo=UTC), 6, '<t::f> (11 hours, 55 minutes and 55 seconds)'), ('3000-11-23T20:09:00Z', datetime(3000, 4, 25, 20, 15, tzinfo=UTC), 20, '<t::f> (6 months, 28 days, 23 hours and 54 minutes)'))
for (expiry, date_from, max_units, expected) in test_cases:
with self.subTest(expiry=expiry, date_from=date_from, max_units=max_units, expected=expected):
self.assertEqual(time.format_with_duration(expiry, date_from, max_units), expected)
def test_format_with_duration_normal_usage(self):
test_cases = (('2019-12-12T00:01:00Z', datetime(2019, 12, 11, 12, 0, 5, tzinfo=UTC), 2, '<t::f> (12 hours and 55 seconds)'), ('2019-12-12T00:01:00Z', datetime(2019, 12, 11, 12, 0, 5, tzinfo=UTC), 1, '<t::f> (12 hours)'), ('2019-12-12T00:00:00Z', datetime(2019, 12, 11, 23, 59, tzinfo=UTC), 2, '<t::f> (1 minute)'), ('2019-11-23T20:09:00Z', datetime(2019, 11, 15, 20, 15, tzinfo=UTC), 2, '<t::f> (7 days and 23 hours)'), ('2019-11-23T20:09:00Z', datetime(2019, 4, 25, 20, 15, tzinfo=UTC), 2, '<t::f> (6 months and 28 days)'), ('2019-11-23T20:58:00Z', datetime(2019, 11, 23, 20, 53, tzinfo=UTC), 2, '<t::f> (5 minutes)'), ('2019-11-24T00:00:00Z', datetime(2019, 11, 23, 23, 59, 0, tzinfo=UTC), 2, '<t::f> (1 minute)'), ('2019-11-23T23:59:00Z', datetime(2017, 7, 21, 23, 0, tzinfo=UTC), 2, '<t::f> (2 years and 4 months)'), ('2019-11-23T23:59:00Z', datetime(2019, 11, 23, 23, 49, 5, tzinfo=UTC), 2, '<t::f> (9 minutes and 55 seconds)'), (None, datetime(2019, 11, 23, 23, 49, 5, tzinfo=UTC), 2, None))
for (expiry, date_from, max_units, expected) in test_cases:
with self.subTest(expiry=expiry, date_from=date_from, max_units=max_units, expected=expected):
self.assertEqual(time.format_with_duration(expiry, date_from, max_units), expected)
def test_until_expiration_with_duration_none_expiry(self):
self.assertEqual(time.until_expiration(None), 'Permanent')
def test_until_expiration_with_duration_custom_units(self):
test_cases = (('3000-12-12T00:01:00Z', '<t::R>'), ('3000-11-23T20:09:00Z', '<t::R>'))
for (expiry, expected) in test_cases:
with self.subTest(expiry=expiry, expected=expected):
self.assertEqual(time.until_expiration(expiry), expected)
def test_until_expiration_normal_usage(self):
test_cases = (('3000-12-12T00:01:00Z', '<t::R>'), ('3000-12-12T00:01:00Z', '<t::R>'), ('3000-12-12T00:00:00Z', '<t::R>'), ('3000-11-23T20:09:00Z', '<t::R>'), ('3000-11-23T20:09:00Z', '<t::R>'))
for (expiry, expected) in test_cases:
with self.subTest(expiry=expiry, expected=expected):
self.assertEqual(time.until_expiration(expiry), expected) |
class Input(Queue, Readable, Writable):
def __init__(self, maxsize=BUFFER_SIZE):
Queue.__init__(self, maxsize)
self._runlevel = 0
self._writable_runlevel = 0
self.on_initialize = noop
self.on_begin = noop
self.on_end = noop
self.on_finalize = noop
def put(self, data, block=True, timeout=None):
if (data == BEGIN):
if (not self._runlevel):
self.on_initialize()
self._runlevel += 1
self._writable_runlevel += 1
self.on_begin()
return
if (self._writable_runlevel < 1):
raise InactiveWritableError('Cannot put() on an inactive {}.'.format(Writable.__name__))
if (data == END):
self._writable_runlevel -= 1
return Queue.put(self, data, block, timeout)
def _decrement_runlevel(self):
if (self._runlevel == 1):
self.on_finalize()
self._runlevel -= 1
self.on_end()
def get(self, block=True, timeout=None):
if (not self.alive):
raise InactiveReadableError('Cannot get() on an inactive {}.'.format(Readable.__name__))
data = Queue.get(self, block, timeout)
if (data == END):
self._decrement_runlevel()
if (not self.alive):
raise InactiveReadableError('Cannot get() on an inactive {} (runlevel just reached 0).'.format(Readable.__name__))
return self.get(block, timeout)
return data
def shutdown(self):
while (self._runlevel >= 1):
self._decrement_runlevel()
def empty(self):
self.mutex.acquire()
while (self._qsize() and (self.queue[0] == END)):
self._runlevel -= 1
Queue._get(self)
self.mutex.release()
return Queue.empty(self)
def alive(self):
return (self._runlevel > 0) |
class PNet(nn.Module):
def __init__(self, pnet_type='vgg', pnet_rand=False, use_gpu=True):
super(PNet, self).__init__()
self.use_gpu = use_gpu
self.pnet_type = pnet_type
self.pnet_rand = pnet_rand
self.shift = torch.Tensor([(- 0.03), (- 0.088), (- 0.188)]).view(1, 3, 1, 1)
self.scale = torch.Tensor([0.458, 0.448, 0.45]).view(1, 3, 1, 1)
if (self.pnet_type in ['vgg', 'vgg16']):
self.net = vgg16(pretrained=(not self.pnet_rand), requires_grad=False)
elif (self.pnet_type == 'alex'):
self.net = alexnet(pretrained=(not self.pnet_rand), requires_grad=False)
elif (self.pnet_type[:(- 2)] == 'resnet'):
self.net = resnet(pretrained=(not self.pnet_rand), requires_grad=False, num=int(self.pnet_type[(- 2):]))
elif (self.pnet_type == 'squeeze'):
self.net = squeezenet(pretrained=(not self.pnet_rand), requires_grad=False)
self.L = self.net.N_slices
if use_gpu:
self.net.cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
def forward(self, in0, in1, retPerLayer=False):
in0_sc = ((in0 - self.shift.expand_as(in0)) / self.scale.expand_as(in0))
in1_sc = ((in1 - self.shift.expand_as(in0)) / self.scale.expand_as(in0))
outs0 = self.net.forward(in0_sc)
outs1 = self.net.forward(in1_sc)
if retPerLayer:
all_scores = []
for (kk, out0) in enumerate(outs0):
cur_score = (1.0 - cos_sim(outs0[kk], outs1[kk]))
if (kk == 0):
val = (1.0 * cur_score)
else:
val = (val + cur_score)
if retPerLayer:
all_scores += [cur_score]
if retPerLayer:
return (val, all_scores)
else:
return val |
def eez(countries, geo_crs, country_shapes, EEZ_gpkg, out_logging=False, distance=0.01, minarea=0.01, tolerance=0.01):
if out_logging:
logger.info('Stage 2 of 5: Create offshore shapes')
df_eez = load_EEZ(countries, geo_crs, EEZ_gpkg)
eez_countries = [cc for cc in countries if df_eez.name.str.contains(cc).any()]
ret_df = gpd.GeoDataFrame({'name': eez_countries, 'geometry': [df_eez.geometry.loc[(df_eez.name == cc)].geometry.unary_union for cc in eez_countries]}).set_index('name')
ret_df = ret_df.geometry.map((lambda x: _simplify_polys(x, minarea=minarea, tolerance=tolerance)))
ret_df = ret_df.apply((lambda x: make_valid(x)))
country_shapes_with_buffer = country_shapes.buffer(distance)
ret_df_new = ret_df.difference(country_shapes_with_buffer)
ret_df_new = ret_df_new.map((lambda x: (x if (x is None) else _simplify_polys(x, minarea=minarea, tolerance=tolerance))))
ret_df_new = ret_df_new.apply((lambda x: (x if (x is None) else make_valid(x))))
ret_df = ret_df_new.dropna()
ret_df = ret_df[(ret_df.geometry.is_valid & (~ ret_df.geometry.is_empty))]
return ret_df |
def get_up_block(up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None):
up_block_type = (up_block_type[7:] if up_block_type.startswith('UNetRes') else up_block_type)
if (up_block_type == 'UpBlock2D'):
return UpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups)
elif (up_block_type == 'CrossAttnUpBlock2D'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlock2D')
return CrossAttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels)
elif (up_block_type == 'AttnUpBlock2D'):
return AttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attn_num_head_channels=attn_num_head_channels)
elif (up_block_type == 'SkipUpBlock2D'):
return SkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn)
elif (up_block_type == 'AttnSkipUpBlock2D'):
return AttnSkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=attn_num_head_channels)
elif (up_block_type == 'UpDecoderBlock2D'):
return UpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups)
elif (up_block_type == 'AttnUpDecoderBlock2D'):
return AttnUpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attn_num_head_channels=attn_num_head_channels)
raise ValueError(f'{up_block_type} does not exist.') |
def save_checkpoint(model, filename, optimizer=None, meta=None):
if (meta is None):
meta = {}
elif (not isinstance(meta, dict)):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if (hasattr(model, 'CLASSES') and (model.CLASSES is not None)):
meta.update(CLASSES=model.CLASSES)
checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model))}
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for (name, optim) in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError('Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
(model_dir, model_name) = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush() |
class GroundtruthFilterWithNanBoxTest(tf.test.TestCase):
def test_filter_groundtruth_with_nan_box_coordinates(self):
input_tensors = {fields.InputDataFields.groundtruth_boxes: [[np.nan, np.nan, np.nan, np.nan], [0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1, 2], fields.InputDataFields.groundtruth_is_crowd: [False, True], fields.InputDataFields.groundtruth_area: [100.0, 238.7]}
expected_tensors = {fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [2], fields.InputDataFields.groundtruth_is_crowd: [True], fields.InputDataFields.groundtruth_area: [238.7]}
output_tensors = ops.filter_groundtruth_with_nan_box_coordinates(input_tensors)
with self.test_session() as sess:
output_tensors = sess.run(output_tensors)
for key in [fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]:
self.assertAllClose(expected_tensors[key], output_tensors[key])
for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd]:
self.assertAllEqual(expected_tensors[key], output_tensors[key]) |
def test_select_column_using_window_function_with_parameters():
sql = 'INSERT INTO tab1\nSELECT col0,\n max(col3) over (partition BY col1 ORDER BY col2 DESC) AS rnum,\n col4\nFROM tab2'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('col0', 'tab2'), ColumnQualifierTuple('col0', 'tab1')), (ColumnQualifierTuple('col1', 'tab2'), ColumnQualifierTuple('rnum', 'tab1')), (ColumnQualifierTuple('col2', 'tab2'), ColumnQualifierTuple('rnum', 'tab1')), (ColumnQualifierTuple('col3', 'tab2'), ColumnQualifierTuple('rnum', 'tab1')), (ColumnQualifierTuple('col4', 'tab2'), ColumnQualifierTuple('col4', 'tab1'))]) |
def convert_examples_to_features(examples, tokenizer, query_templates, unseen_arguments, nth_query, is_training):
features = []
for (example_id, example) in enumerate(examples):
for event in example.events:
trigger_offset = (event[0][0] - example.s_start)
event_type = event[0][1]
trigger_token = example.sentence[trigger_offset]
arguments = event[1:]
for argument_type in query_templates[event_type]:
query = query_templates[event_type][argument_type][nth_query]
query = query.replace('[trigger]', trigger_token)
tokens = []
segment_ids = []
token_to_orig_map = {}
tokens.append('[CLS]')
segment_ids.append(0)
query_tokens = tokenizer.tokenize(query)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
for (i, token) in enumerate(example.sentence):
token_to_orig_map[len(tokens)] = i
sub_tokens = tokenizer.tokenize(token)
tokens.append(sub_tokens[0])
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
(start_position, end_position) = (None, None)
sentence_start = example.s_start
sentence_offset = (len(query_tokens) + 2)
fea_trigger_offset = (trigger_offset + sentence_offset)
if_trigger_ids = ([0] * len(segment_ids))
if_trigger_ids[fea_trigger_offset] = 1
if is_training:
if (argument_type in unseen_arguments):
continue
no_answer = True
for argument in arguments:
gold_argument_type = argument[2]
if (gold_argument_type == argument_type):
no_answer = False
(answer_start, answer_end) = (argument[0], argument[1])
start_position = ((answer_start - sentence_start) + sentence_offset)
end_position = ((answer_end - sentence_start) + sentence_offset)
features.append(InputFeatures(example_id=example_id, tokens=tokens, token_to_orig_map=token_to_orig_map, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, if_trigger_ids=if_trigger_ids, event_type=event_type, argument_type=argument_type, fea_trigger_offset=fea_trigger_offset, start_position=start_position, end_position=end_position))
else:
if (argument_type not in unseen_arguments):
continue
for argument in arguments:
gold_argument_type = argument[2]
if (gold_argument_type == argument_type):
features.append(InputFeatures(example_id=example_id, tokens=tokens, token_to_orig_map=token_to_orig_map, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, if_trigger_ids=if_trigger_ids, event_type=event_type, argument_type=argument_type, fea_trigger_offset=fea_trigger_offset, start_position=start_position, end_position=end_position))
return features |
class DescribeTabStops():
def it_knows_its_length(self, len_fixture):
(tab_stops, expected_value) = len_fixture
assert (len(tab_stops) == expected_value)
def it_can_iterate_over_its_tab_stops(self, iter_fixture):
(tab_stops, expected_count, tab_stop_, TabStop_, expected_calls) = iter_fixture
count = 0
for tab_stop in tab_stops:
assert (tab_stop is tab_stop_)
count += 1
assert (count == expected_count)
assert (TabStop_.call_args_list == expected_calls)
def it_can_get_a_tab_stop_by_index(self, index_fixture):
(tab_stops, idx, TabStop_, tab, tab_stop_) = index_fixture
tab_stop = tab_stops[idx]
TabStop_.assert_called_once_with(tab)
assert (tab_stop is tab_stop_)
def it_raises_on_indexed_access_when_empty(self):
tab_stops = TabStops(element('w:pPr'))
with pytest.raises(IndexError):
tab_stops[0]
def it_can_add_a_tab_stop(self, add_tab_fixture):
(tab_stops, position, kwargs, expected_xml) = add_tab_fixture
tab_stops.add_tab_stop(position, **kwargs)
assert (tab_stops._element.xml == expected_xml)
def it_can_delete_a_tab_stop(self, del_fixture):
(tab_stops, idx, expected_xml) = del_fixture
del tab_stops[idx]
assert (tab_stops._element.xml == expected_xml)
def it_raises_on_del_idx_invalid(self, del_raises_fixture):
(tab_stops, idx) = del_raises_fixture
with pytest.raises(IndexError) as exc:
del tab_stops[idx]
assert (exc.value.args[0] == 'tab index out of range')
def it_can_clear_all_its_tab_stops(self, clear_all_fixture):
(tab_stops, expected_xml) = clear_all_fixture
tab_stops.clear_all()
assert (tab_stops._element.xml == expected_xml)
(params=['w:pPr', 'w:pPr/w:tabs/w:tab{w:pos=42}', 'w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})'])
def clear_all_fixture(self, request):
pPr_cxml = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml('w:pPr')
return (tab_stops, expected_xml)
(params=[('w:pPr/w:tabs/w:tab{w:pos=42}', 0, 'w:pPr'), ('w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})', 0, 'w:pPr/w:tabs/w:tab{w:pos=42}'), ('w:pPr/w:tabs/(w:tab{w:pos=24},w:tab{w:pos=42})', 1, 'w:pPr/w:tabs/w:tab{w:pos=24}')])
def del_fixture(self, request):
(pPr_cxml, idx, expected_cxml) = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml(expected_cxml)
return (tab_stops, idx, expected_xml)
(params=[('w:pPr', 0), ('w:pPr/w:tabs/w:tab{w:pos=42}', 1)])
def del_raises_fixture(self, request):
(tab_stops_cxml, idx) = request.param
tab_stops = TabStops(element(tab_stops_cxml))
return (tab_stops, idx)
(params=[('w:pPr', Twips(42), {}, 'w:pPr/w:tabs/w:tab{w:pos=42,w:val=left}'), ('w:pPr', Twips(72), {'alignment': WD_TAB_ALIGNMENT.RIGHT}, 'w:pPr/w:tabs/w:tab{w:pos=72,w:val=right}'), ('w:pPr', Twips(24), {'alignment': WD_TAB_ALIGNMENT.CENTER, 'leader': WD_TAB_LEADER.DOTS}, 'w:pPr/w:tabs/w:tab{w:pos=24,w:val=center,w:leader=dot}'), ('w:pPr/w:tabs/w:tab{w:pos=42}', Twips(72), {}, 'w:pPr/w:tabs/(w:tab{w:pos=42},w:tab{w:pos=72,w:val=left})'), ('w:pPr/w:tabs/w:tab{w:pos=42}', Twips(24), {}, 'w:pPr/w:tabs/(w:tab{w:pos=24,w:val=left},w:tab{w:pos=42})'), ('w:pPr/w:tabs/w:tab{w:pos=42}', Twips(42), {}, 'w:pPr/w:tabs/(w:tab{w:pos=42},w:tab{w:pos=42,w:val=left})')])
def add_tab_fixture(self, request):
(pPr_cxml, position, kwargs, expected_cxml) = request.param
tab_stops = TabStops(element(pPr_cxml))
expected_xml = xml(expected_cxml)
return (tab_stops, position, kwargs, expected_xml)
(params=[('w:pPr/w:tabs/w:tab{w:pos=0}', 0), ('w:pPr/w:tabs/(w:tab{w:pos=1},w:tab{w:pos=2},w:tab{w:pos=3})', 1), ('w:pPr/w:tabs/(w:tab{w:pos=4},w:tab{w:pos=5},w:tab{w:pos=6})', 2)])
def index_fixture(self, request, TabStop_, tab_stop_):
(pPr_cxml, idx) = request.param
pPr = element(pPr_cxml)
tab = pPr.xpath('./w:tabs/w:tab')[idx]
tab_stops = TabStops(pPr)
return (tab_stops, idx, TabStop_, tab, tab_stop_)
(params=[('w:pPr', 0), ('w:pPr/w:tabs/w:tab{w:pos=2880}', 1), ('w:pPr/w:tabs/(w:tab{w:pos=2880},w:tab{w:pos=5760})', 2)])
def iter_fixture(self, request, TabStop_, tab_stop_):
(pPr_cxml, expected_count) = request.param
pPr = element(pPr_cxml)
tab_elms = pPr.xpath('//w:tab')
tab_stops = TabStops(pPr)
expected_calls = [call(tab) for tab in tab_elms]
return (tab_stops, expected_count, tab_stop_, TabStop_, expected_calls)
(params=[('w:pPr', 0), ('w:pPr/w:tabs/w:tab{w:pos=2880}', 1)])
def len_fixture(self, request):
(tab_stops_cxml, expected_value) = request.param
tab_stops = TabStops(element(tab_stops_cxml))
return (tab_stops, expected_value)
def TabStop_(self, request, tab_stop_):
return class_mock(request, 'docx.text.tabstops.TabStop', return_value=tab_stop_)
def tab_stop_(self, request):
return instance_mock(request, TabStop) |
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True)
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0)) |
_specialize
_rewriter([Sum, Prod])
def local_sum_prod_of_mul_or_div(fgraph, node):
[node_inps] = node.inputs
if (not node_inps.owner):
return None
inner_op = node_inps.owner.op
if (not ((inner_op == mul) or (inner_op == true_div))):
return None
reduced_axes = node.op.axis
if (reduced_axes is None):
reduced_axes = tuple(range(node_inps.type.ndim))
if (inner_op == mul):
outer_terms = []
inner_terms = []
for term in node_inps.owner.inputs:
term_bcast = term.type.broadcastable
if all((term_bcast[i] for i in reduced_axes)):
outer_terms.append(term.squeeze(reduced_axes))
else:
inner_terms.append(term)
if (not outer_terms):
return None
elif (len(outer_terms) == 1):
[outer_term] = outer_terms
else:
outer_term = mul(*outer_terms)
if (not inner_terms):
inner_term = None
elif (len(inner_terms) == 1):
[inner_term] = inner_terms
else:
inner_term = mul(*inner_terms)
else:
(numerator, denominator) = node_inps.owner.inputs
denominator_bcast = denominator.type.broadcastable
if all((denominator_bcast[i] for i in reduced_axes)):
outer_term = denominator.squeeze(reduced_axes)
inner_term = numerator
else:
return None
if (isinstance(node.op, Prod) and inner_term):
dtype = inner_term.dtype
n_reduced_elements = prod([inner_term.shape[i].astype(dtype) for i in reduced_axes])
outer_term = (outer_term ** n_reduced_elements)
if (not inner_term):
new_out = outer_term
else:
reduced_inner_term = node.op(inner_term)
if (inner_op == mul):
new_out = (outer_term * reduced_inner_term)
else:
new_out = (reduced_inner_term / outer_term)
copy_stack_trace(node.outputs, [inner_term, reduced_inner_term, outer_term])
copy_stack_trace(node.outputs, new_out)
return [new_out] |
def classifier_fn_from_tfhub(output_fields, inception_model, return_tensor=False):
if isinstance(output_fields, six.string_types):
output_fields = [output_fields]
def _classifier_fn(images):
output = inception_model(images)
if (output_fields is not None):
output = {x: output[x] for x in output_fields}
if return_tensor:
assert (len(output) == 1)
output = list(output.values())[0]
return tf.nest.map_structure(tf.compat.v1.layers.flatten, output)
return _classifier_fn |
class ChannelAttention(nn.Module):
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(1)
self.attention = nn.Sequential(nn.Linear(512, 32), nn.BatchNorm1d(32), nn.ReLU(inplace=True), nn.Linear(32, 512), nn.Sigmoid())
def forward(self, sa):
sa = self.gap(sa)
sa = sa.view(sa.size(0), (- 1))
y = self.attention(sa)
out = (sa * y)
return out |
class FeatureConfig(object):
def __init__(self, name, dtype, size, default_value=None):
assert (dtype in ('int64', 'float32', 'string'))
self.name = name
self.dtype = {'int64': tf.int64, 'float32': tf.float32, 'string': tf.string}[dtype]
self.size = size
if (default_value is None):
if (dtype == 'string'):
self.default_value = '-1'
else:
self.default_value = 0
else:
self.default_value = default_value |
class DisconnectTLV(TLV):
typ = 1
def __init__(self):
super(DisconnectTLV, self).__init__()
def getPayload(self):
return b''
def parsePayload(cls, data):
if (len(data) > 0):
raise TypeError('DisconnectTLV must not contain data. got {0!r}'.format(data))
return cls() |
class BaseTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode='base', config=None, tokenizer=None, model=None, **config_kwargs):
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if (model is None):
self.model = self.model_type.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps())
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def total_steps(self) -> int:
num_devices = max(1, self.hparams.gpus)
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
return ((self.dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, stage):
if (stage == 'test'):
self.dataset_size = len(self.test_dataloader().dataset)
else:
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
self.dataset_size = len(self.train_dataloader().dataset)
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)]) -> None:
save_path = self.output_dir.joinpath('best_tfmr')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from huggingface.co')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=3, type=int)
parser.add_argument('--train_batch_size', default=32, type=int)
parser.add_argument('--eval_batch_size', default=32, type=int)
parser.add_argument('--adafactor', action='store_true') |
def load_collectors_from_paths(paths):
collectors = {}
if (paths is None):
return
if isinstance(paths, basestring):
paths = paths.split(',')
paths = map(str.strip, paths)
load_include_path(paths)
for path in paths:
if (not os.path.exists(path)):
raise OSError(('Directory does not exist: %s' % path))
if (path.endswith('tests') or path.endswith('fixtures')):
return collectors
for f in os.listdir(path):
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = load_collectors_from_paths([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
elif (os.path.isfile(fpath) and (len(f) > 3) and (f[(- 3):] == '.py') and (f[0:4] != 'test') and (f[0] != '.')):
modname = f[:(- 3)]
(fp, pathname, description) = imp.find_module(modname, [path])
try:
mod = imp.load_module(modname, fp, pathname, description)
except (KeyboardInterrupt, SystemExit) as err:
logger.error(('System or keyboard interrupt while loading module %s' % modname))
if isinstance(err, SystemExit):
sys.exit(err.code)
raise KeyboardInterrupt
except Exception:
logger.error('Failed to import module: %s. %s', modname, traceback.format_exc())
else:
for (name, cls) in get_collectors_from_module(mod):
collectors[name] = cls
finally:
if fp:
fp.close()
return collectors |
def test_async_subproc_command_eq():
c = Command('cmd')
assert (c == c)
assert (Command('cmd') == Command('cmd'))
assert (Command('cmd') != Command('other'))
assert (Command([1, 2, 3]) == Command([1, 2, 3]))
assert (Command([1, 2, 3]) != Command([11, 22, 33]))
assert (Command('arb') != 'arb')
assert (Command('cmd', is_shell=True, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc', append=True) == Command('cmd', is_shell=True, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc', append=True))
assert (Command('cmd', is_shell=False, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc', append=False) != Command('cmd', is_shell=False, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc', append=True))
assert (Command('cmd', is_shell=True, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc1', append=True) != Command('cmd', is_shell=True, cwd='cwd', is_save=False, is_text=True, stdout='out', stderr='err', encoding='enc2', append=True))
assert (Command('cmd', is_shell=True, cwd='cwd', is_save=True, is_text=True, encoding='enc', append=True) == Command('cmd', is_shell=True, cwd='cwd', is_save=True, is_text=True, encoding='enc', append=True))
assert ([Command('one')] == [Command('one')])
assert ([Command('one')] != [Command('two')])
cmd1 = Command('cmd')
cmd1.results.append('one')
cmd2 = Command('cmd')
cmd2.results.append('one')
assert (cmd1 == cmd2)
cmd2.results.append('two')
assert (cmd1 != cmd2) |
def tensor2edge(tensor):
print(tensor.shape)
tensor = (torch.squeeze(tensor) if (len(tensor.shape) > 2) else tensor)
tmp = torch.sigmoid(tensor)
tmp = tmp.cpu().detach().numpy()
tmp = np.uint8(image_normalization(tmp))
tmp = cv.bitwise_not(tmp)
tmp = cv.cvtColor(tmp, cv.COLOR_GRAY2BGR)
cv.imshow('test_img', tmp)
cv.waitKey(0)
cv.destroyAllWindows() |
def _results_to_dataframe(results: ExecutableGroupResult, func: Callable[([ExecutableResult, QuantumRuntimeConfiguration, SharedRuntimeInfo], Dict)]) -> pd.DataFrame:
return pd.DataFrame([func(result, results.runtime_configuration, results.shared_runtime_info) for result in results.executable_results]) |
class OAuth2PkceS256Test(OAuth2Test):
def do_login(self):
user = super().do_login()
requests = latest_requests()
auth_request = [r for r in requests if (self.backend.authorization_url() in r.url)][0]
code_challenge = auth_request.querystring.get('code_challenge')[0]
code_challenge_method = auth_request.querystring.get('code_challenge_method')[0]
self.assertIsNotNone(code_challenge)
self.assertEqual(code_challenge_method, 's256')
auth_complete = [r for r in requests if (self.backend.access_token_url() in r.url)][0]
code_verifier = auth_complete.parsed_body.get('code_verifier')[0]
self.assertEqual(self.backend.generate_code_challenge(code_verifier, 's256'), code_challenge)
return user |
def garbage_collect_storage(storage_id_whitelist):
if (len(storage_id_whitelist) == 0):
return []
def placements_to_filtered_paths_set(placements_list):
if (not placements_list):
return set()
with ensure_under_transaction():
content_checksums = set([placement.storage.content_checksum for placement in placements_list if placement.storage.cas_path])
unreferenced_checksums = set()
if content_checksums:
query = ImageStorage.select(ImageStorage.content_checksum).where((ImageStorage.content_checksum << list(content_checksums)))
is_referenced_checksums = set([image_storage.content_checksum for image_storage in query])
if is_referenced_checksums:
logger.warning('GC attempted to remove CAS checksums %s, which are still IS referenced', is_referenced_checksums)
query = ApprBlob.select(ApprBlob.digest).where((ApprBlob.digest << list(content_checksums)))
appr_blob_referenced_checksums = set([blob.digest for blob in query])
if appr_blob_referenced_checksums:
logger.warning('GC attempted to remove CAS checksums %s, which are ApprBlob referenced', appr_blob_referenced_checksums)
unreferenced_checksums = ((content_checksums - appr_blob_referenced_checksums) - is_referenced_checksums)
return {(get_image_location_for_id(placement.location_id).name, get_layer_path(placement.storage), placement.storage.content_checksum) for placement in placements_list if ((not placement.storage.cas_path) or (placement.storage.content_checksum in unreferenced_checksums))}
logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
paths_to_remove = []
orphaned_storage_ids = set()
for storage_id_to_check in storage_id_whitelist:
logger.debug('Garbage collecting storage %s', storage_id_to_check)
with db_transaction():
if (not _is_storage_orphaned(storage_id_to_check)):
continue
orphaned_storage_ids.add(storage_id_to_check)
placements_to_remove = list(ImageStoragePlacement.select(ImageStoragePlacement, ImageStorage).join(ImageStorage).where((ImageStorage.id == storage_id_to_check)))
deleted_image_storage_placement = 0
if placements_to_remove:
deleted_image_storage_placement = ImageStoragePlacement.delete().where((ImageStoragePlacement.storage == storage_id_to_check)).execute()
deleted_image_storage_signature = ImageStorageSignature.delete().where((ImageStorageSignature.storage == storage_id_to_check)).execute()
deleted_image_storage = ImageStorage.delete().where((ImageStorage.id == storage_id_to_check)).execute()
paths_to_remove.extend(placements_to_filtered_paths_set(placements_to_remove))
gc_table_rows_deleted.labels(table='ImageStorageSignature').inc(deleted_image_storage_signature)
gc_table_rows_deleted.labels(table='ImageStorage').inc(deleted_image_storage)
gc_table_rows_deleted.labels(table='ImageStoragePlacement').inc(deleted_image_storage_placement)
paths_to_remove = list(set(paths_to_remove))
for (location_name, image_path, storage_checksum) in paths_to_remove:
if storage_checksum:
if (storage_checksum in SPECIAL_BLOB_DIGESTS):
continue
if ImageStorage.select().where((ImageStorage.content_checksum == storage_checksum)).exists():
continue
logger.debug('Removing %s from %s', image_path, location_name)
config.store.remove({location_name}, image_path)
gc_storage_blobs_deleted.inc()
return orphaned_storage_ids |
def lr_setter(optimizer, epoch, args, bl=False):
lr = args.lr
if bl:
lr = (args.lrbl * (0.1 ** (epoch // (args.epochb * 0.5))))
elif args.cos:
lr *= ((0.01 + math.cos((0.5 * ((math.pi * epoch) / args.epochs)))) / 1.01)
else:
if (epoch >= args.epochs_decay[0]):
lr *= 0.1
if (epoch >= args.epochs_decay[1]):
lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def remove_s(data):
if verbose:
print(('#' * 10), 'Step - Remove "s:')
local_vocab = {}
temp_vocab = _check_vocab(data, local_vocab, response='unknown_list')
temp_vocab = [k for k in temp_vocab if _check_replace(k)]
temp_dict = {k: k[:(- 2)] for k in temp_vocab if (_check_replace(k) and (k.lower()[(- 2):] == "'s"))}
data = list(map((lambda x: ' '.join([_make_dict_cleaning(i, temp_dict) for i in x.split()])), data))
if verbose:
_print_dict(temp_dict)
return data |
class VirtualenvRole(Role):
def __init__(self, prov, context):
super(VirtualenvRole, self).__init__(prov, context)
self.user = context['user']
self.base_directory = None
def get_base_directory(self):
return (self.base_directory or os.path.join(self.__get_user_dir(), '.virtualenvs'))
def __get_user_dir(self):
if (self.user == 'root'):
return '/root'
else:
return ('/home/%s' % self.user)
def env_dir(self, env_name):
return os.path.join(self.get_base_directory(), env_name)
def __call__(self, env_name, system_site_packages=False):
from fabric.api import prefix
if (not self.env_exists(env_name)):
self.create_env(env_name, system_site_packages=system_site_packages)
with prefix(('source %s/bin/activate' % self.env_dir(env_name))):
(yield)
def provision(self):
from provy.more.debian import PipRole
with self.using(PipRole) as pip:
pip.ensure_package_installed('virtualenv')
pip.ensure_package_installed('virtualenvwrapper')
def create_env(self, env_name, system_site_packages=False):
env_dir = self.env_dir(env_name)
site_packages_arg = ('--system-site-packages ' if system_site_packages else '')
self.execute(('virtualenv %s%s' % (site_packages_arg, env_dir)), user=self.user)
return env_dir
def env_exists(self, env_name):
return self.remote_exists_dir(self.env_dir(env_name)) |
class TestDarnerCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DarnerCollector', {'interval': 10, 'hosts': ['localhost:22133']})
self.collector = DarnerCollector(config, None)
def test_import(self):
self.assertTrue(DarnerCollector)
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_raw_stats1 = patch.object(DarnerCollector, 'get_raw_stats', Mock(return_value=self.getFixture('stats1').getvalue()))
patch_raw_stats2 = patch.object(DarnerCollector, 'get_raw_stats', Mock(return_value=self.getFixture('stats2').getvalue()))
patch_raw_stats1.start()
self.collector.collect()
patch_raw_stats1.stop()
self.assertPublishedMany(publish_mock, {})
patch_raw_stats2.start()
self.collector.collect()
patch_raw_stats2.stop()
metrics = {'localhost.uptime': 2422175, 'localhost.total_items': 20, 'localhost.curr_connections': 2, 'localhost.total_connections': 15, 'localhost.cmd_get': 100, 'localhost.cmd_set': 150, 'localhost.queues.test1.items': 2, 'localhost.queues.test1.waiters': 4, 'localhost.queues.test1.open_transactions': 8, 'localhost.queues.test_2.items': 16, 'localhost.queues.test_2.waiters': 32, 'localhost.queues.test_2.open_transactions': 64, 'localhost.queues.test_3_bar.items': 128, 'localhost.queues.test_3_bar.waiters': 256, 'localhost.queues.test_3_bar.open_transactions': 512}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
def tent_generator(H, slope=1, bound=0.6):
data = Data()
(x, data.step_size) = np.linspace((- 1), 1, num=H, retstep=True)
y = np.linspace((- 1), 1, num=H)
(XX, YY) = np.meshgrid(x, y)
YY = np.flip(YY, axis=0)
z = np.zeros_like(XX)
zx = np.zeros_like(XX)
zy = np.zeros_like(XX)
mask_top = np.logical_and.reduce(((XX > (- bound)), (XX < bound), (YY > 0), (YY < bound)))
zy[mask_top] = (- slope)
z[mask_top] = ((bound * slope) - (slope * YY[mask_top]))
mask_bottom = np.logical_and.reduce(((XX > (- bound)), (XX < bound), (YY < 0), (YY > (- bound))))
zy[mask_bottom] = slope
z[mask_bottom] = ((bound * slope) + (slope * YY[mask_bottom]))
data.mask = np.ones((H, H), bool)
data.depth_gt = (- z)
data.p = zx
data.q = zy
n = normalize_normal_map(np.stack(((- zx), (- zy), np.ones_like(zx)), axis=(- 1)))
data.n = camera_to_object(n)
data.n[(~ data.mask)] = np.nan
data.n_vis = ((n + 1) / 2)
data.n_vis[(~ data.mask)] = 1
data.vertices = np.zeros_like(data.n)
data.vertices[(..., 0)] = (YY + 1)
data.vertices[(..., 1)] = (XX + 1)
data.vertices[(..., 2)] = data.depth_gt
data.vertices = data.vertices[data.mask]
data.fname = 'tent'
data.projection = 'orthographic'
data.construct_mesh()
return data |
class SpatialWeighting(nn.Module):
def __init__(self, channels, ratio=16, conv_cfg=None, norm_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))):
super().__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert (len(act_cfg) == 2)
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(in_channels=channels, out_channels=int((channels / ratio)), kernel_size=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg[0])
self.conv2 = ConvModule(in_channels=int((channels / ratio)), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return (x * out) |
def get_restart_epoch() -> Union[(int, str)]:
if (constants.restart or (constants.job_type == 'test')):
generation_path = (constants.job_dir + 'generation.log')
epoch = 'NA'
row = (- 1)
while (not isinstance(epoch, int)):
epoch_key = read_row(path=generation_path, row=row, col=0)
try:
epoch = int(epoch_key[6:])
except ValueError:
epoch = 'NA'
row -= 1
elif (constants.job_type == 'fine-tune'):
epoch = constants.generation_epoch
else:
epoch = 0
return epoch |
def main():
logs_dir = 'logs'
headers = ['name', 'model', 'git_hash', 'pretrained_model', 'epoch', 'iteration', 'valid/mean_iu']
rows = []
for log in os.listdir(logs_dir):
log_dir = osp.join(logs_dir, log)
if (not osp.isdir(log_dir)):
continue
try:
log_file = osp.join(log_dir, 'log.csv')
df = pd.read_csv(log_file)
columns = [c for c in df.columns if (not c.startswith('train'))]
df = df[columns]
df = df.set_index(['epoch', 'iteration'])
index_best = df['valid/mean_iu'].idxmax()
row_best = df.loc[index_best].dropna()
with open(osp.join(log_dir, 'config.yaml')) as f:
config = yaml.load(f)
except Exception:
continue
rows.append([osp.join(logs_dir, log), config['model'], config['git_hash'], config.get('pretrained_model', None), row_best.index[0][0], row_best.index[0][1], (100 * row_best['valid/mean_iu'].values[0])])
rows.sort(key=(lambda x: x[(- 1)]), reverse=True)
print(tabulate.tabulate(rows, headers=headers)) |
_api()
class partition_unique(Stream):
_graphviz_shape = 'diamond'
def __init__(self, upstream, n: int, key: Union[(Hashable, Callable[([Any], Hashable)])]=identity, keep: str='first', **kwargs):
self.n = n
self.key = key
self.keep = keep
self._buffer = {}
self._metadata_buffer = {}
Stream.__init__(self, upstream, **kwargs)
def _get_key(self, x):
if callable(self.key):
return self.key(x)
else:
return x[self.key]
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
y = self._get_key(x)
if (self.keep == 'last'):
self._buffer.pop(y, None)
self._metadata_buffer.pop(y, None)
self._buffer[y] = x
self._metadata_buffer[y] = metadata
elif (y not in self._buffer):
self._buffer[y] = x
self._metadata_buffer[y] = metadata
if (len(self._buffer) == self.n):
(result, self._buffer) = (tuple(self._buffer.values()), {})
(metadata_result, self._metadata_buffer) = (list(self._metadata_buffer.values()), {})
ret = self._emit(result, metadata_result)
self._release_refs(metadata_result)
return ret
else:
return [] |
def pip_install(package, remove=False):
if (not report_view_param()):
report_view_param(True)
QTimer.singleShot(2000, (lambda : report_view_param(False)))
postfix = ('.exe' if (platform.system() == 'Windows') else '')
bin_path = os.path.dirname(sys.executable)
exe_path = os.path.join(bin_path, ('FreeCADCmd' + postfix))
if (not os.path.exists(exe_path)):
exe_path = os.path.join(bin_path, ('freecadcmd' + postfix))
if remove:
pip_args = ['pip', 'uninstall', '-y']
else:
pip_args = ['pip', 'install', '--user']
pip_args += ['--disable-pip-version-check', package]
if os.path.exists(exe_path):
stdin = f'''
import sys, site, os
path=site.getusersitepackages()
print(f'user site: {{path}}')
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv = {pip_args}
ret = main()
sys.argv = ['pip', 'show', '--disable-pip-version-check', '{package}']
main()
sys.exit(ret)
'''
args = [exe_path]
print_msg((' '.join(pip_args) + '\n'))
else:
stdin = None
exe_path = os.path.join(bin_path, ('python' + postfix))
if (not os.path.exists(exe_path)):
bin_path = FreeCAD.ConfigGet('BinPath')
exe_path = os.path.join(bin_path, ('python' + postfix))
if (not os.path.exists(exe_path)):
exe_path = ('python3' + postfix)
args = ([exe_path, '-m'] + pip_args)
print_msg((' '.join(args) + '\n'))
try:
if stdin:
proc = subp.Popen(args, stdin=subp.PIPE, stdout=subp.PIPE, stderr=subp.PIPE)
(out, err) = proc.communicate(input=stdin.encode('utf8'))
else:
proc = subp.Popen(args, stdout=subp.PIPE, stderr=subp.PIPE)
(out, err) = proc.communicate()
lines = out.decode('utf8').replace('\r\n', '\n').split('\n')
for line in lines:
print_msg((line + '\n'))
if err:
print_func = print_err
lines = err.decode('utf8').replace('\r\n', '\n').split('\n')
for msg in lines:
m = msg.lower()
if ('warning' in m):
print_func = print_warn
elif any(((key in m) for key in ('exception', 'error'))):
print_func = print_err
print_func((msg + '\n'))
except Exception as e:
msg = str(e)
if (not msg):
msg = 'Failed'
print_err((msg + '\n')) |
class CASClientV1(CASClientBase):
logout_redirect_param_name = 'url'
def verify_ticket(self, ticket):
params = [('ticket', ticket), ('service', self.service_url)]
url = ((urllib_parse.urljoin(self.server_url, 'validate') + '?') + urllib_parse.urlencode(params))
page = self.session.get(url, stream=True, verify=self.verify_ssl_certificate)
try:
page_iterator = page.iter_lines(chunk_size=8192)
verified = next(page_iterator).strip()
if (verified == 'yes'):
return (next(page_iterator).strip(), None, None)
else:
return (None, None, None)
finally:
page.close() |
class TestBaseEncode(ElectrumTestCase):
def test_base43(self):
tx_hex = 'cd0e96f9ca202e017ca3465e3c13373c0df3a4cdd91c1fd02ea42a1a65d2afdffffff757da7cf8322e5063785e2d8ada74702d2648fa2add2d533ba83c52eb110dffdffffff02d07eb544c86eaf95e3bb3b6d2cabb12ab40fc59cad9cace0d066fbfcf150a5a1bbc4f312cd2eb080e8d8a47e5f2ce1a63b23215e54fb5aca9856bf10a950810abceeabc9a86e6ba533e130686e3d7863971b9377e7c658a0220288a69ef2b958a7c2ecfa376841d4a13817ed24fa9a0e0a6b9cb48e6439794ce291735f83ff8de47301bb80fa4724926a34d67e413d8ff8817cf885978f7afed55fe2e86cf95cc41eb7965a3dfcfac4a3c433d41a203e6d685a459e70eac899238c20a0121023d4c9deae1aacf3f822dd97a28deaec7d4e4ff97be746d124a63d20e582f5b290a971600'
tx_bytes = bfh(tx_hex)
tx_base43 = base_encode(tx_bytes, base=43)
self.assertEqual('3E2DH7.J3PKVZJ3RCOXQVS3Y./6-WE.75DDU0K58-0N1FRL565N8ZH-DG1Z.1IGWTE5HK8F7PWH5P8+V3XGZZ6GQBPHNDE+RD8CAQVV1/6PQEMJIZTGPMIJ93B8P$QX+Y2R:TGT9QW8S89U4N2.+FUT8VG+34USI/N/JJ3CE*KLSW:REE8T5Y*9:U6515JIUR$6TODLYHSDE3B5DAF:5TF7V*VAL3G40WBOM0DO2+CFKTTM$G-SO:8U0EW:M8V:4*R9ZDX$B1IRBP9PLMDK8H801PNTFB4$HL1+/U3F61P$4N:UAO88:N5D+J:HI4YR8IM:3A7K1YZ9VMRC/47$6GGW5JEL1N690TDQ4XW+TWHD:V.1.630QK*JN/.EITVU80YS3.8LWKO:2STLWZAVHUXFHQ..NZ0:.J/FTZM.KYDXIE1VBY7/:PHZMQ$.JZQ2.XT32440X/HM+UY/7QP4I+HTD9.DUSY-8R6HDR-B8/PF2NP7I2-MRW9VPW3U9.S0LQ.*221F8KVMD5ANJXZJ8WV4UFZ4R.$-NXVE+-FAL:WFERGU+WHJTHAP', tx_base43)
self.assertEqual(tx_bytes, base_decode(tx_base43, base=43))
def test_base58(self):
data_hex = '0cd394befc58a5be0189f3ceb6a41c8da023b099ce547dd4d8071ed6ed647259fba8c26382edbf5165dfd2404e7a8885d88437db16947a116e451a5d1325e3fd075f9d370120d2ab537af69f32e74fc0ba53aaaab3ac95cfea7'
data_bytes = bfh(data_hex)
data_base58 = base_encode(data_bytes, base=58)
self.assertEqual('VuvZ2K5UEcXCVcogny7NH4Evd9UfeYipsTdWuU4jLDhyaESijKtrGWZTFzVZJPjaoC9jFBs3SFtarhDhQhAxkXosUD8PmUb5UXW1tafcoPiCp8jHy7Fe2CUPXAbYuMvAyrkocbe6', data_base58)
self.assertEqual(data_bytes, base_decode(data_base58, base=58))
def test_base58check(self):
data_hex = '0cd394befc58a5be0189f3ceb6a41c8da023b099ce547dd4d8071ed6ed647259fba8c26382edbf5165dfd2404e7a8885d88437db16947a116e451a5d1325e3fd075f9d370120d2ab537af69f32e74fc0ba53aaaab3ac95cfea7'
data_bytes = bfh(data_hex)
data_base58check = EncodeBase58Check(data_bytes)
self.assertEqual('4GCCJsjHqFbHxWbFBvRg35cSeNLHKeNqkXqFHW87zRmz6iP1dJU9Tk2KHZkoKj45jzVsSV4ZbQ8GpPwko6V3Z7cRfux3zJhUw7TZB6Kpa8Vdya8cMuUtL5Ry3CLtMetaY42u52X7Ey6MAH', data_base58check)
self.assertEqual(data_bytes, DecodeBase58Check(data_base58check)) |
class StubSource():
def __init__(self, module: str, path: (str | None)=None, runtime_all: (list[str] | None)=None) -> None:
self.source = BuildSource(path, module, None)
self.runtime_all = runtime_all
self.ast: (MypyFile | None) = None
def __repr__(self) -> str:
return f'StubSource({self.source})'
def module(self) -> str:
return self.source.module
def path(self) -> (str | None):
return self.source.path |
def test_find_MAP_discrete():
tol1 = (2.0 ** (- 11))
tol2 = (2.0 ** (- 6))
alpha = 4
beta = 4
n = 20
yes = 15
with pm.Model() as model:
p = pm.Beta('p', alpha, beta)
pm.Binomial('ss', n=n, p=p)
pm.Binomial('s', n=n, p=p, observed=yes)
map_est1 = find_MAP()
map_est2 = find_MAP(vars=model.value_vars)
assert_allclose(map_est1['p'], 0., atol=tol1, rtol=0)
assert_allclose(map_est2['p'], 0., atol=tol2, rtol=0)
assert (map_est2['ss'] == 14) |
def attention_mask(loss_mask, prefix_lm=True):
device = loss_mask.device
(batch_size, q_len) = loss_mask.size()
axis = torch.arange(q_len).to(device)
start = axis.unsqueeze(0).masked_fill((~ loss_mask), .0).min(dim=1).values
end = axis.unsqueeze(0).masked_fill((~ loss_mask), (- .0)).max(dim=1).values
assert torch.all(((end - start) == (loss_mask.int().sum(dim=(- 1)) - 1)))
mask = (axis.unsqueeze(1) >= axis.unsqueeze(0)).repeat(batch_size, 1, 1)
if prefix_lm:
mask = torch.where((start.view(batch_size, 1, 1) > axis.view(1, 1, q_len)), True, mask)
mask = torch.where((end.view(batch_size, 1, 1) < axis.view(1, 1, q_len)), False, mask)
return mask |
def main():
pybullet_planning.connect()
pybullet_planning.add_data_path()
p.loadURDF('plane.urdf')
p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=(- 60), cameraPitch=(- 20), cameraTargetPosition=(0, 0, 0.4))
reorientbot.pybullet.create_bin(X=0.4, Y=0.6, Z=0.2)
reorientbot.pybullet.step_and_sleep() |
def load_multiple_centralized_dataset(load_as, args, process_id, mode, task, dataset_list, datadir_list, batch_size, num_workers, data_sampler=None, resize=32, augmentation='default'):
train_dl_dict = {}
test_dl_dict = {}
train_ds_dict = {}
test_ds_dict = {}
class_num_dict = {}
train_data_num_dict = {}
test_data_num_dict = {}
for (i, dataset) in enumerate(dataset_list):
datadir = datadir_list[i]
(train_dl, test_dl, train_data_num, test_data_num, class_num, other_params) = load_data(load_as=load_as, args=args, process_id=process_id, mode='centralized', task='centralized', dataset=dataset, datadir=datadir, batch_size=args.batch_size, num_workers=args.data_load_num_workers, data_sampler=None, resize=resize, augmentation=augmentation)
train_dl_dict[dataset] = train_dl
test_dl_dict[dataset] = test_dl
train_ds_dict[dataset] = other_params['train_ds']
test_ds_dict[dataset] = other_params['test_ds']
class_num_dict[dataset] = class_num
train_data_num_dict[dataset] = train_data_num
test_data_num_dict[dataset] = test_data_num
return (train_dl_dict, test_dl_dict, train_ds_dict, test_ds_dict, class_num_dict, train_data_num_dict, test_data_num_dict) |
def unpack_values(value: Value, ctx: CanAssignContext, target_length: int, post_starred_length: Optional[int]=None) -> Union[(Sequence[Value], CanAssignError)]:
if isinstance(value, MultiValuedValue):
subvals = [unpack_values(val, ctx, target_length, post_starred_length) for val in value.vals]
good_subvals = []
for subval in subvals:
if isinstance(subval, CanAssignError):
return CanAssignError(f'Cannot unpack {value}', [subval])
good_subvals.append(subval)
if (not good_subvals):
return _create_unpacked_list((AnyValue(AnySource.error) if subvals else AnyValue(AnySource.unreachable)), target_length, post_starred_length)
return [unite_values(*vals) for vals in zip(*good_subvals)]
value = replace_known_sequence_value(value)
if isinstance(value, SequenceValue):
if (value.typ is tuple):
return _unpack_sequence_value(value, target_length, post_starred_length)
elif (value.typ is list):
vals = _unpack_sequence_value(value, target_length, post_starred_length)
if (not isinstance(vals, CanAssignError)):
return vals
iterable_type = is_iterable(value, ctx)
if isinstance(iterable_type, CanAssignError):
return iterable_type
return _create_unpacked_list(iterable_type, target_length, post_starred_length) |
def setUpModule():
global mol, m, h1e, g2e, ci0, cis
global norb, nelec, orbsym
mol = gto.Mole()
mol.verbose = 0
mol.atom = '\n O 0. 0. 0.\n H 0. -0.757 0.587\n H 0. 0.757 0.587'
mol.basis = 'sto-3g'
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = m.mo_coeff.T.dot(scf.hf.get_hcore(mol)).dot(m.mo_coeff)
g2e = ao2mo.incore.full(m._eri, m.mo_coeff)
orbsym = m.orbsym
cis = direct_spin1_symm.FCISolver(mol)
cis.orbsym = orbsym
numpy.random.seed(15)
na = cistring.num_strings(norb, (nelec // 2))
ci0 = numpy.random.random((na, na)) |
def test_label_compression_attack():
packet = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03atk\x00\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\xc0\x0c\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x06'
parsed = r.DNSIncoming(packet)
assert (len(parsed.answers()) == 1) |
class TestPXRD(unittest.TestCase):
def test_similarity(self):
sites = ['8a']
C1 = pyxtal()
C1.from_random(3, 227, ['C'], [8], sites=[['8a']])
xrd1 = C1.get_XRD()
C2 = C1.subgroup_once(eps=0.001)
xrd2 = C1.get_XRD()
p1 = xrd1.get_profile()
p2 = xrd2.get_profile()
s = Similarity(p1, p2, x_range=[15, 90])
self.assertTrue((0.9 < s.value < 1.001))
C2.apply_perturbation(0.001, 0.001)
xrd3 = C2.get_XRD()
p3 = xrd3.get_profile()
s = Similarity(p1, p2, x_range=[15, 90])
self.assertTrue((0.95 < s.value < 1.001)) |
def sample(seq_str, experiment_directory='seq2seq/experiment', checkpoint='2019_05_18_20_32_54', resume=True, log_level='info'):
logging.basicConfig(format=LOG_FORMAT, level=getattr(logging, log_level.upper()))
logging.info('experiment_directory: %s', experiment_directory)
logging.info('checkpoint: %s', checkpoint)
(seq2seq, input_vocab, output_vocab) = load_checkpoint(experiment_directory, checkpoint)
predictor = Predictor(seq2seq, input_vocab, output_vocab)
seq = seq_str.strip().split()
return predictor.predict(seq) |
class BackUp(models.Model):
author = models.ForeignKey('Author', on_delete=models.CASCADE)
file = models.FileField(upload_to=user_directory_backup)
is_ready = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
def process(self):
if self.is_ready:
return
serializer = ArchiveSerializer()
entries = self.author.entry_set(manager='objects_published').select_related('topic')
conversations = self.author.conversationarchive_set.all()
entries_text = serializer.serialize(entries, fields=('topic__title', 'content', 'date_created', 'date_edited'))
conversations_text = ('[%s]' % ''.join((('{"target": "%s", "messages": %s},' % (item.target, item.messages)) for item in conversations))[:(- 1)])
content = ('{"entries": %s, "conversations": %s}' % (entries_text, conversations_text))
self.is_ready = True
self.file.save('backup', ContentFile(content.encode('utf-8')), save=True)
settings.get_model('Message').objects.compose(get_generic_superuser(), self.author, gettext('your backup is now ready. you may download your backup file using the link provided in the backup tab of settings.'))
def process_async(self):
from dictionary.tasks import process_backup
process_backup.delay(self.pk)
def delete(self, **kwargs):
super().delete(**kwargs)
self.file.delete(save=False) |
def validate_data(gtFilePath, submFilePath, evaluationParams):
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
for k in gt:
rrc_evaluation_funcs.validate_lines_in_file(k, gt[k], evaluationParams['CRLF'], evaluationParams['LTRB'], True)
for k in subm:
if ((k in gt) == False):
raise Exception(('The sample %s not present in GT' % k))
rrc_evaluation_funcs.validate_lines_in_file(k, subm[k], evaluationParams['CRLF'], evaluationParams['LTRB'], False, evaluationParams['CONFIDENCES']) |
_config
def test_chord_stack(manager):
manager.test_window('two')
manager.test_window('one')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress(['control'], 'd')
manager.c.simulate_keypress([], 'z')
assert (manager.c.get_groups()['a']['focus'] == 'two')
manager.c.simulate_keypress([], 'z')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'k')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'a')
manager.c.simulate_keypress([], '1')
manager.c.simulate_keypress([], 'j')
assert (manager.c.get_groups()['a']['focus'] == 'two')
manager.c.simulate_keypress([], 'j')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'u')
manager.c.simulate_keypress([], 'z')
assert (manager.c.get_groups()['a']['focus'] == 'two')
manager.c.simulate_keypress([], 'z')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'k')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'a')
manager.c.simulate_keypress([], '1')
manager.c.simulate_keypress([], 'v')
manager.c.simulate_keypress([], 'k')
assert (manager.c.get_groups()['a']['focus'] == 'two')
manager.c.simulate_keypress([], 'k')
assert (manager.c.get_groups()['a']['focus'] == 'one')
manager.c.simulate_keypress([], 'z')
assert (manager.c.get_groups()['a']['focus'] == 'one') |
def prepare_ocp(biorbd_model_path: str, final_time: float, n_shooting: int, ode_solver: OdeSolverBase=OdeSolver.RK4(), phase_dynamics: PhaseDynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics: bool=True) -> OptimalControlProgram:
bio_model = BiorbdModel(biorbd_model_path)
objective_functions = ObjectiveList()
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key='tau', weight=1)
objective_functions.add(ObjectiveFcn.Mayer.TRACK_VECTOR_ORIENTATIONS_FROM_MARKERS, node=Node.ALL, weight=100, vector_0_marker_0='m0', vector_0_marker_1='m3', vector_1_marker_0='origin', vector_1_marker_1='m6')
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN, expand_dynamics=expand_dynamics, phase_dynamics=phase_dynamics)
x_bounds = BoundsList()
x_bounds['q'] = bio_model.bounds_from_ranges('q')
x_bounds['q'][(2, [0, (- 1)])] = [(- 1.57), 1.57]
x_bounds['qdot'] = bio_model.bounds_from_ranges('qdot')
(tau_min, tau_max, tau_init) = ((- 100), 100, 2)
u_bounds = BoundsList()
u_bounds['tau'] = (([tau_min] * bio_model.nb_tau), ([tau_max] * bio_model.nb_tau))
u_init = InitialGuessList()
u_init['tau'] = ([tau_init] * bio_model.nb_tau)
return OptimalControlProgram(bio_model, dynamics, n_shooting, final_time, x_bounds=x_bounds, u_bounds=u_bounds, u_init=u_init, objective_functions=objective_functions, ode_solver=ode_solver) |
class AsyncState(State):
async def enter(self, event_data):
_LOGGER.debug('%sEntering state %s. Processing callbacks...', event_data.machine.name, self.name)
(await event_data.machine.callbacks(self.on_enter, event_data))
_LOGGER.info('%sFinished processing state %s enter callbacks.', event_data.machine.name, self.name)
async def exit(self, event_data):
_LOGGER.debug('%sExiting state %s. Processing callbacks...', event_data.machine.name, self.name)
(await event_data.machine.callbacks(self.on_exit, event_data))
_LOGGER.info('%sFinished processing state %s exit callbacks.', event_data.machine.name, self.name) |
.parametrize('with_suffix,', [(False,), (True,)])
def test_inject_include_apps(pipx_temp_env, capsys, with_suffix):
install_args = []
suffix = ''
if with_suffix:
suffix = '_x'
install_args = [f'--suffix={suffix}']
assert (not run_pipx_cli(['install', 'pycowsay', *install_args]))
assert (not run_pipx_cli(['inject', f'pycowsay{suffix}', PKG['black']['spec'], '--include-deps']))
if suffix:
assert run_pipx_cli(['inject', 'pycowsay', PKG['black']['spec'], '--include-deps'])
assert (not run_pipx_cli(['inject', f'pycowsay{suffix}', PKG['black']['spec'], '--include-deps'])) |
def build_test_loader(cfg, is_train=False):
path_catalog = import_file('smoke.config.paths_catalog', cfg.PATHS_CATALOG, True)
DatasetCatalog = path_catalog.DatasetCatalog
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=collator)
data_loaders.append(data_loader)
return data_loader |
class _AssertRaisesContext(object):
def __init__(self, expected, test_case):
self.expected = expected
self.failureException = test_case.failureException
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if (exc_type is None):
exc_name = self.expected.__name__
raise self.failureException(('%s not raised' % (exc_name,)))
if (not issubclass(exc_type, self.expected)):
return False
self.exception = exc_value
return True |
class Test_util(unittest.TestCase):
def test_to_bytes(self):
self.assertEqual(serial.to_bytes([1, 2, 3]), b'\x01\x02\x03')
self.assertEqual(serial.to_bytes(b'\x01\x02\x03'), b'\x01\x02\x03')
self.assertEqual(serial.to_bytes(bytearray([1, 2, 3])), b'\x01\x02\x03')
self.assertRaises(TypeError, serial.to_bytes, b'hello'.decode('utf-8'))
def test_iterbytes(self):
self.assertEqual(list(serial.iterbytes(b'\x01\x02\x03')), [b'\x01', b'\x02', b'\x03']) |
def download(message: Soup.Message, cancellable: Gio.Cancellable, callback: Callable, data: Any, try_decode: bool=False, failure_callback: (FailureCallback | None)=None):
def received(request, ostream):
ostream.close(None)
bs = ostream.steal_as_bytes().get_data()
if (not try_decode):
callback(message, bs, data)
return
code = int(message.get_property('status-code'))
if (code >= 400):
print_w(('HTTP %d error received on %s' % (code, request._uri)))
return
ctype = message.get_property('response-headers').get_content_type()
encoding = ctype[1].get('charset', 'utf-8')
try:
callback(message, bs.decode(encoding), data)
except UnicodeDecodeError:
callback(message, bs, data)
request = HTTPRequest(message, cancellable)
request.provide_target(Gio.MemoryOutputStream.new_resizable())
request.connect('received', received)
request.connect('sent', (lambda r, m: r.receive()))
if failure_callback:
request.connect('send-failure', failure_callback, data)
request.send() |
class Effect1261(BaseEffect):
runTime = 'early'
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
fit.appliedImplants.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Cyberimplant')), 'velocityBonus', implant.getModifiedItemAttr('implantSetSerpentis'), **kwargs) |
def _ensure_datetime_tzinfo(dt: datetime.datetime, tzinfo: (datetime.tzinfo | None)=None) -> datetime.datetime:
if (dt.tzinfo is None):
dt = dt.replace(tzinfo=UTC)
if (tzinfo is not None):
dt = dt.astimezone(get_timezone(tzinfo))
if hasattr(tzinfo, 'normalize'):
dt = tzinfo.normalize(dt)
return dt |
class AdditionalSkipNamesModuleTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs(additional_skip_names=[pyfakefs.tests.import_as_example])
def test_path_exists(self):
self.assertTrue(pyfakefs.tests.import_as_example.exists_this_file())
def test_fake_path_does_not_exist1(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists1('foo'))
def test_fake_path_does_not_exist2(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists2('foo'))
def test_fake_path_does_not_exist3(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists3('foo'))
def test_fake_path_does_not_exist4(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists4('foo'))
def test_fake_path_does_not_exist5(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists5('foo'))
def test_fake_path_does_not_exist6(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists6('foo'))
def test_fake_path_does_not_exist7(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists7('foo'))
def test_open_succeeds(self):
pyfakefs.tests.import_as_example.open_this_file()
def test_path_succeeds(self):
pyfakefs.tests.import_as_example.return_this_file_path() |
.parametrize('bucket, username, password', [pytest.param(_TEST_BUCKET, _TEST_USER, _TEST_PASSWORD, id='same credentials'), pytest.param('another_bucket', 'blech', 'password', id='different credentials')])
def test_copy(bucket, username, password, storage_engine):
another_engine = S3Storage(_TEST_CONTEXT, 'another/path', _TEST_BUCKET, _TEST_USER, _TEST_PASSWORD)
boto3.client('s3').create_bucket(Bucket='another_bucket')
storage_engine.copy_to(another_engine, _TEST_PATH)
assert (another_engine.get_content(_TEST_PATH) == _TEST_CONTENT) |
class PreOCIModel(KeyServerDataInterface):
def list_service_keys(self, service):
return data.model.service_keys.list_service_keys(service)
def get_service_key(self, signer_kid, service=None, alive_only=True, approved_only=True):
try:
key = data.model.service_keys.get_service_key(signer_kid, service, alive_only, approved_only)
return _db_key_to_servicekey(key)
except data.model.ServiceKeyDoesNotExist:
raise ServiceKeyDoesNotExist()
def create_service_key(self, name, kid, service, jwk, metadata, expiration_date, rotation_duration=None):
key = data.model.service_keys.create_service_key(name, kid, service, jwk, metadata, expiration_date, rotation_duration)
return _db_key_to_servicekey(key)
def replace_service_key(self, old_kid, kid, jwk, metadata, expiration_date):
try:
data.model.service_keys.replace_service_key(old_kid, kid, jwk, metadata, expiration_date)
except data.model.ServiceKeyDoesNotExist:
raise ServiceKeyDoesNotExist()
def delete_service_key(self, kid):
try:
key = data.model.service_keys.delete_service_key(kid)
return _db_key_to_servicekey(key)
except data.model.ServiceKeyDoesNotExist:
raise ServiceKeyDoesNotExist() |
def test_several_recursive_types():
dumped_data = {'left': {'left': {'left': None, 'right': None}, 'right': {'left': None, 'right': None}}, 'right': {'left': None, 'right': None}}
loaded_data = Tree(left=Tree(left=Tree(), right=Tree()), right=Tree())
assert (retort.dump(loaded_data) == dumped_data)
assert (retort.load(dumped_data, Tree) == loaded_data) |
def parse_selection(selection, *, op=None):
parsed = _benchmark.parse_benchmark(selection, fail=False)
(spec, metafile) = (parsed if parsed else (None, None))
if (parsed and spec.version):
kind = 'benchmark'
(spec, metafile) = parsed
if metafile:
parsed = _benchmark.Benchmark(spec, metafile)
else:
parsed = spec
elif (parsed and (spec.origin or metafile)):
raise NotImplementedError(selection)
else:
parsed = _utils.parse_tag_pattern(selection)
if parsed:
kind = 'tag'
else:
kind = 'name'
parsed = _utils.parse_name_pattern(selection, fail=True)
if (not parsed):
raise ValueError(f'unsupported selection {selection!r}')
return ((op or '+'), selection, kind, parsed) |
def test_format_datetime(timezone_getter):
dt = datetime(2007, 4, 1, 15, 30)
assert (dates.format_datetime(dt, locale='en_US') == 'Apr 1, 2007, 3:30:00\u202fPM')
full = dates.format_datetime(dt, 'full', tzinfo=timezone_getter('Europe/Paris'), locale='fr_FR')
assert (full == 'dimanche 1 avril 2007, 17:30:00 heure dete dEurope centrale')
custom = dates.format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz", tzinfo=timezone_getter('US/Eastern'), locale='en')
assert (custom == '2007.04.01 AD at 11:30:00 EDT') |
class Tpl(BaseDB, AlchemyMixin):
__tablename__ = 'tpl'
id = Column(Integer, primary_key=True)
disabled = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
public = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
lock = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
success_count = Column(INTEGER, nullable=False, server_default=text("'0'"))
failed_count = Column(INTEGER, nullable=False, server_default=text("'0'"))
ctime = Column(INTEGER, nullable=False)
mtime = Column(INTEGER, nullable=False)
atime = Column(INTEGER, nullable=False)
updateable = Column(INTEGER, nullable=False, server_default=text("'0'"))
_groups = Column(String(256), nullable=False, server_default=text("'None'"))
userid = Column(INTEGER)
siteurl = Column(String(256))
sitename = Column(String(128))
banner = Column(String(1024))
fork = Column(INTEGER)
har = Column(MEDIUMBLOB)
tpl = Column(MEDIUMBLOB)
variables = Column(Text)
init_env = Column(Text)
interval = Column(INTEGER)
note = Column(String(1024))
last_success = Column(INTEGER)
tplurl = Column(String(1024), server_default=text("''"))
def add(self, userid, har, tpl, variables, init_env, interval=None, sql_session=None):
now = time.time()
insert = dict(userid=userid, siteurl=None, sitename=None, banner=None, disabled=0, public=0, fork=None, har=har, tpl=tpl, variables=variables, init_env=init_env, interval=interval, ctime=now, mtime=now, atime=now, last_success=None)
return self._insert(Tpl(**insert), sql_session=sql_session)
def mod(self, id, sql_session=None, **kwargs):
return self._update(update(Tpl).where((Tpl.id == id)).values(**kwargs), sql_session=sql_session)
async def get(self, id, fields=None, one_or_none=False, first=True, to_dict=True, sql_session=None):
assert id, 'need id'
if (fields is None):
_fields = Tpl
else:
_fields = (getattr(Tpl, field) for field in fields)
smtm = select(_fields).where((Tpl.id == id))
result = (await self._get(smtm, one_or_none=one_or_none, first=first, sql_session=sql_session))
if (to_dict and (result is not None)):
return self.to_dict(result, fields)
return result
async def incr_success(self, id, sql_session=None):
result = (await self._execute(text('UPDATE tpl SET success_count=success_count+1, last_success=:last_success WHERE id=:id').bindparams(id=int(id), last_success=int(time.time())), sql_session=sql_session))
return result.rowcount
async def incr_failed(self, id, sql_session=None):
result = (await self._execute(text('UPDATE tpl SET failed_count=failed_count+1 WHERE id=:id').bindparams(id=int(id)), sql_session=sql_session))
return result.rowcount
async def list(self, fields=None, limit=None, to_dict=True, sql_session=None, **kwargs):
if (fields is None):
_fields = Tpl
else:
_fields = (getattr(Tpl, field) for field in fields)
smtm = select(_fields)
for (key, value) in kwargs.items():
smtm = smtm.where((getattr(Tpl, key) == value))
if limit:
smtm = smtm.limit(limit)
result = (await self._get(smtm, sql_session=sql_session))
if (to_dict and (result is not None)):
return [self.to_dict(row, fields) for row in result]
return result
def delete(self, id, sql_session=None):
return self._delete(delete(Tpl).where((Tpl.id == id)), sql_session=sql_session) |
class FilterEditView(EditBaseView):
class _REMOVE():
def __init__(self, filter_list: FilterList, list_type: ListType, filter_type: type[Filter], content: (str | None), description: (str | None), settings_overrides: dict, filter_settings_overrides: dict, loaded_settings: dict, loaded_filter_settings: dict, author: User, embed: Embed, confirm_callback: Callable):
super().__init__(author)
self.filter_list = filter_list
self.list_type = list_type
self.filter_type = filter_type
self.content = content
self.description = description
self.settings_overrides = settings_overrides
self.filter_settings_overrides = filter_settings_overrides
self.loaded_settings = loaded_settings
self.loaded_filter_settings = loaded_filter_settings
self.embed = embed
self.confirm_callback = confirm_callback
all_settings_repr_dict = build_filter_repr_dict(filter_list, list_type, filter_type, settings_overrides, filter_settings_overrides)
populate_embed_from_dict(embed, all_settings_repr_dict)
self.type_per_setting_name = {setting: info[2] for (setting, info) in loaded_settings.items()}
self.type_per_setting_name.update({f'{filter_type.name}/{name}': type_ for (name, (_, _, type_)) in loaded_filter_settings.get(filter_type.name, {}).items()})
add_select = CustomCallbackSelect(self._prompt_new_value, placeholder='Select a setting to edit', options=[SelectOption(label=name) for name in sorted(self.type_per_setting_name)], row=1)
self.add_item(add_select)
if (settings_overrides or filter_settings_overrides):
override_names = (list(settings_overrides) + [f'{filter_list.name}/{setting}' for setting in filter_settings_overrides])
remove_select = CustomCallbackSelect(self._remove_override, placeholder='Select an override to remove', options=[SelectOption(label=name) for name in sorted(override_names)], row=2)
self.add_item(remove_select)
.button(label='Edit Content', row=3)
async def edit_content(self, interaction: Interaction, button: discord.ui.Button) -> None:
modal = EditContentModal(self, interaction.message)
(await interaction.response.send_modal(modal))
.button(label='Edit Description', row=3)
async def edit_description(self, interaction: Interaction, button: discord.ui.Button) -> None:
modal = EditDescriptionModal(self, interaction.message)
(await interaction.response.send_modal(modal))
.button(label='Empty Description', row=3)
async def empty_description(self, interaction: Interaction, button: discord.ui.Button) -> None:
(await self.update_embed(interaction, description=self._REMOVE))
.button(label='Template', row=3)
async def enter_template(self, interaction: Interaction, button: discord.ui.Button) -> None:
modal = TemplateModal(self, interaction.message)
(await interaction.response.send_modal(modal))
.button(label=' Confirm', style=discord.ButtonStyle.green, row=4)
async def confirm(self, interaction: Interaction, button: discord.ui.Button) -> None:
if (self.content is None):
(await interaction.response.send_message(':x: Cannot add a filter with no content.', ephemeral=True, reference=interaction.message))
if (self.description is None):
self.description = ''
(await interaction.response.edit_message(view=None))
try:
(await self.confirm_callback(interaction.message, self.filter_list, self.list_type, self.filter_type, self.content, self.description, self.settings_overrides, self.filter_settings_overrides))
except ResponseCodeError as e:
(await interaction.message.reply(embed=format_response_error(e)))
(await interaction.message.edit(view=self))
except BadArgument as e:
(await interaction.message.reply(embed=Embed(colour=discord.Colour.red(), title='Bad Argument', description=str(e))))
(await interaction.message.edit(view=self))
else:
self.stop()
.button(label=' Cancel', style=discord.ButtonStyle.red, row=4)
async def cancel(self, interaction: Interaction, button: discord.ui.Button) -> None:
(await interaction.response.edit_message(content=' Operation canceled.', embed=None, view=None))
self.stop()
def current_value(self, setting_name: str) -> Any:
if (setting_name in self.settings_overrides):
return self.settings_overrides[setting_name]
if ('/' in setting_name):
(_, setting_name) = setting_name.split('/', maxsplit=1)
if (setting_name in self.filter_settings_overrides):
return self.filter_settings_overrides[setting_name]
return MISSING
async def update_embed(self, interaction_or_msg: (discord.Interaction | discord.Message), *, content: (str | None)=None, description: ((str | type[FilterEditView._REMOVE]) | None)=None, setting_name: (str | None)=None, setting_value: ((str | type[FilterEditView._REMOVE]) | None)=None) -> None:
if ((content is not None) or (description is not None)):
if (content is not None):
filter_type = self.filter_list.get_filter_type(content)
if (not filter_type):
if isinstance(interaction_or_msg, discord.Message):
send_method = interaction_or_msg.channel.send
else:
send_method = interaction_or_msg.response.send_message
(await send_method(f':x: Could not find a filter type appropriate for `{content}`.'))
return
self.content = content
self.filter_type = filter_type
else:
content = self.content
if (description is self._REMOVE):
self.description = None
elif (description is not None):
self.description = description
else:
description = self.description
self.embed.description = (f'`{content}`' if content else '*No content*')
if (description and (description is not self._REMOVE)):
self.embed.description += f' - {description}'
if (len(self.embed.description) > MAX_EMBED_DESCRIPTION):
self.embed.description = (self.embed.description[:(MAX_EMBED_DESCRIPTION - 5)] + '[...]')
if setting_name:
if ('/' in setting_name):
(filter_name, setting_name) = setting_name.split('/', maxsplit=1)
dict_to_edit = self.filter_settings_overrides
default_value = self.filter_type.extra_fields_type().model_dump()[setting_name]
else:
dict_to_edit = self.settings_overrides
default_value = self.filter_list[self.list_type].default(setting_name)
if (setting_value is not self._REMOVE):
if (not repr_equals(setting_value, default_value)):
dict_to_edit[setting_name] = setting_value
elif (setting_name in dict_to_edit):
dict_to_edit.pop(setting_name)
elif (setting_name in dict_to_edit):
dict_to_edit.pop(setting_name)
self.embed.clear_fields()
new_view = self.copy()
try:
if isinstance(interaction_or_msg, discord.Interaction):
(await interaction_or_msg.response.edit_message(embed=self.embed, view=new_view))
else:
(await interaction_or_msg.edit(embed=self.embed, view=new_view))
except discord.errors.HTTPException:
pass
else:
self.stop()
async def edit_setting_override(self, interaction: Interaction, setting_name: str, override_value: Any) -> None:
(await self.update_embed(interaction, setting_name=setting_name, setting_value=override_value))
async def apply_template(self, template_id: str, embed_message: discord.Message, interaction: Interaction) -> None:
try:
(settings, filter_settings) = template_settings(template_id, self.filter_list, self.list_type, self.filter_type)
except BadArgument as e:
(await interaction.response.send_message(f':x: {e}', ephemeral=True))
return
else:
(await interaction.response.defer())
self.settings_overrides = (settings | self.settings_overrides)
self.filter_settings_overrides = (filter_settings | self.filter_settings_overrides)
self.embed.clear_fields()
(await embed_message.edit(embed=self.embed, view=self.copy()))
self.stop()
async def _remove_override(self, interaction: Interaction, select: discord.ui.Select) -> None:
(await self.update_embed(interaction, setting_name=select.values[0], setting_value=self._REMOVE))
def copy(self) -> FilterEditView:
return FilterEditView(self.filter_list, self.list_type, self.filter_type, self.content, self.description, self.settings_overrides, self.filter_settings_overrides, self.loaded_settings, self.loaded_filter_settings, self.author, self.embed, self.confirm_callback) |
(scope='module')
def inline_query_result_mpeg4_gif():
return InlineQueryResultMpeg4Gif(TestInlineQueryResultMpeg4GifBase.id_, TestInlineQueryResultMpeg4GifBase.mpeg4_url, TestInlineQueryResultMpeg4GifBase.thumbnail_url, mpeg4_width=TestInlineQueryResultMpeg4GifBase.mpeg4_width, mpeg4_height=TestInlineQueryResultMpeg4GifBase.mpeg4_height, mpeg4_duration=TestInlineQueryResultMpeg4GifBase.mpeg4_duration, title=TestInlineQueryResultMpeg4GifBase.title, caption=TestInlineQueryResultMpeg4GifBase.caption, parse_mode=TestInlineQueryResultMpeg4GifBase.parse_mode, caption_entities=TestInlineQueryResultMpeg4GifBase.caption_entities, input_message_content=TestInlineQueryResultMpeg4GifBase.input_message_content, reply_markup=TestInlineQueryResultMpeg4GifBase.reply_markup, thumbnail_mime_type=TestInlineQueryResultMpeg4GifBase.thumbnail_mime_type) |
class TestCase(unittest.TestCase, TestCaseMixin):
def __init__(self, methodName: str='runTest', additional_skip_names: Optional[List[Union[(str, ModuleType)]]]=None, modules_to_reload: Optional[List[ModuleType]]=None, modules_to_patch: Optional[Dict[(str, ModuleType)]]=None):
super().__init__(methodName)
self.additional_skip_names = additional_skip_names
self.modules_to_reload = modules_to_reload
self.modules_to_patch = modules_to_patch
def tearDownPyfakefs(self) -> None: |
def do_EQUSIZED(op, stack, state):
length = getlen(op, state)
prev_size = state.esil['size']
state.esil['size'] = length
reg = stack.pop()
(val,) = pop_values(stack, state)
tmp = get_value(reg, state)
if (state.condition != None):
val = z3.If(state.condition, val, tmp)
state.registers[reg] = val
state.esil['old'] = tmp
state.esil['cur'] = val
state.esil['lastsz'] = state.registers[reg].size()
state.esil['size'] = prev_size |
(params=[('chunk', 0), ('chunk', 1), ('enumerate', None)])
def sharding_spec(shape: Tuple[(int, int)], request: SubRequest) -> ShardingSpec:
(sharding_type, dim) = request.param
if (sharding_type == 'chunk'):
return ChunkShardingSpec(dim=dim, placements=[f'rank:{rank}/cpu' for rank in range(WORLD_SIZE)])
assert (sharding_type == 'enumerate')
a = (shape[0] // 2)
b = (shape[1] // 2)
c = (shape[0] - (shape[0] // 2))
d = (shape[1] - (shape[1] // 2))
return EnumerableShardingSpec([ShardMetadata(shard_offsets=[0, 0], shard_sizes=[a, b], placement='rank:0/cpu'), ShardMetadata(shard_offsets=[0, b], shard_sizes=[a, d], placement='rank:1/cpu'), ShardMetadata(shard_offsets=[a, 0], shard_sizes=[c, b], placement='rank:2/cpu'), ShardMetadata(shard_offsets=[a, b], shard_sizes=[c, d], placement='rank:3/cpu')]) |
(short_help='Clip a raster to given bounds.')
('files', nargs=(- 1), type=click.Path(), required=True, metavar='INPUT OUTPUT')
_opt
_opt
_window_options
('--like', type=click.Path(exists=True), help='Raster dataset to use as a template for bounds')
_opt
_opt
_geographic_opt
_projected_opt
_opt
_options
('--with-complement/--without-complement', default=False, help='Include the relative complement of the raster in the given bounds (giving a larger result), else return results only from the intersection of the raster and the bounds (the default).')
_context
def clip(ctx, files, output, bounds, like, driver, nodata, projection, overwrite, creation_options, with_complement, to_data_window):
from rasterio.warp import transform_bounds
with ctx.obj['env']:
(output, files) = resolve_inout(files=files, output=output, overwrite=overwrite)
input = files[0]
with rasterio.open(input) as src:
if (not src.transform.is_rectilinear):
raise click.BadParameter('Non-rectilinear rasters (i.e. with rotation or shear) cannot be clipped')
if bounds:
if (projection == 'geographic'):
bounds = transform_bounds(CRS.from_epsg(4326), src.crs, *bounds)
if disjoint_bounds(bounds, src.bounds):
raise click.BadParameter('must overlap the extent of the input raster', param='--bounds', param_hint='--bounds')
bounds_window = src.window(*bounds)
elif like:
with rasterio.open(like) as template_ds:
bounds = template_ds.bounds
if (template_ds.crs != src.crs):
bounds = transform_bounds(template_ds.crs, src.crs, *bounds)
if disjoint_bounds(bounds, src.bounds):
raise click.BadParameter('must overlap the extent of the input raster', param='--like', param_hint='--like')
bounds_window = src.window(*bounds)
elif to_data_window:
bounds_window = get_data_window(src.read(1, masked=True))
else:
raise click.UsageError('--bounds, --like, or --to-data-window required')
if (not with_complement):
bounds_window = bounds_window.intersection(Window(0, 0, src.width, src.height))
out_window = bounds_window.round_lengths()
out_window = out_window.round_offsets()
height = int(out_window.height)
width = int(out_window.width)
out_kwargs = src.profile
if driver:
out_kwargs['driver'] = driver
if (nodata is not None):
out_kwargs['nodata'] = nodata
out_kwargs.update({'height': height, 'width': width, 'transform': src.window_transform(out_window)})
out_kwargs.update(**creation_options)
if (('blockxsize' in out_kwargs) and (int(out_kwargs['blockxsize']) > width)):
del out_kwargs['blockxsize']
logger.warning('Blockxsize removed from creation options to accomodate small output width')
if (('blockysize' in out_kwargs) and (int(out_kwargs['blockysize']) > height)):
del out_kwargs['blockysize']
logger.warning('Blockysize removed from creation options to accomodate small output height')
with rasterio.open(output, 'w', **out_kwargs) as out:
out.write(src.read(window=out_window, out_shape=(src.count, height, width), boundless=True, masked=True))
if (MaskFlags.per_dataset in src.mask_flag_enums[0]):
out.write_mask(src.read_masks(window=out_window, out_shape=(src.count, height, width), boundless=True)[0]) |
.parametrize('name', ['pypi', 'PyPI'])
def test_source_remove_pypi_and_other(name: str, tester_pypi_and_other: CommandTester, poetry_with_pypi_and_other: Poetry, source_existing: Source) -> None:
tester_pypi_and_other.execute(name)
assert (tester_pypi_and_other.io.fetch_output().strip() == 'Removing source with name PyPI.')
poetry_with_pypi_and_other.pyproject.reload()
sources = poetry_with_pypi_and_other.get_sources()
assert (sources == [source_existing])
assert (tester_pypi_and_other.status_code == 0) |
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return (self.__flops__ / model.__batch_counter__)
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_num_params = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([num_to_str(accumulated_num_params, precision=precision, auto_select_unit=True), '{:.3%}Params'.format((accumulated_num_params / total_params)), (num_to_str(accumulated_flops_cost, precision=precision, auto_select_unit=True) + 'FLOPS'), '{:.3%}FLOPs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if (m.extra_repr != flops_extra_repr):
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert (m.extra_repr != m.original_extra_repr)
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost, flush=flush)
model.apply(del_extra_repr) |
def test_uninject_with_include_apps(pipx_temp_env, capsys, caplog):
assert (not run_pipx_cli(['install', 'pycowsay']))
assert (not run_pipx_cli(['inject', 'pycowsay', PKG['black']['spec'], '--include-deps', '--include-apps']))
assert (not run_pipx_cli(['uninject', 'pycowsay', 'black', '--verbose']))
assert ('removed file' in caplog.text) |
_args('v', 'i', 'none')
def softmax(g, input, dim, dtype=None):
input_dim = input.type().dim()
if input_dim:
if (dim < 0):
dim = (input_dim + dim)
if (input_dim == (dim + 1)):
softmax = g.op('Softmax', input, axis_i=dim)
if (dtype and (dtype.node().kind() != 'prim::Constant')):
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return softmax
max_value = g.op('ReduceMax', input, axes_i=[dim], keepdims_i=1)
input = g.op('Sub', input, max_value)
exp = g.op('Exp', input)
sum = g.op('ReduceSum', exp, axes_i=[dim])
softmax = g.op('Div', exp, sum)
if (dtype and (dtype.node().kind() != 'prim::Constant')):
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op('Cast', softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return softmax |
class IncrPyVars(Component):
def construct(s):
s.incr_in = b8(10)
s.incr_out = b8(0)
s.buf1 = b8(0)
s.buf2 = b8(0)
def upA():
s.buf1 = s.incr_in
s.incr_in += b8(10)
def upB():
s.buf2 = (s.buf1 + b8(1))
def upC():
s.incr_out = s.buf2
s.add_constraints((U(upA) < U(upB)), (U(upB) < U(upC)))
def line_trace(s):
return '{:2} (+1) {:2}'.format(int(s.buf1), int(s.buf2)) |
class NameTransformer(ast.NodeTransformer):
def __init__(self, class_replace_map: Optional[Dict[(str, str)]]=None, import_replace_map: Optional[Dict[(str, str)]]=None, rename_methods: Optional[Dict[(str, str)]]=None):
self.class_replace_map = (class_replace_map if (class_replace_map is not None) else {})
self.import_replace_map = (import_replace_map if (import_replace_map is not None) else {})
self.rename_methods = (rename_methods if (rename_methods is not None) else {})
def visit_Name(self, node: ast.Name) -> ast.AST:
if (node.id in self.class_replace_map):
node.id = self.class_replace_map[node.id]
elif (node.id in self.import_replace_map):
node.id = self.import_replace_map[node.id]
elif (node.id in self.rename_methods):
node.id = self.rename_methods[node.id]
return self.generic_visit(node) |
def test_help_text(monkeypatch, capsys):
mock_exit = mock.Mock(side_effect=ValueError('raised in test to exit early'))
with mock.patch.object(sys, 'exit', mock_exit), pytest.raises(ValueError, match='raised in test to exit early'):
run_pipx_cli(['install', '--help'])
captured = capsys.readouterr()
assert ('apps you can run from anywhere' in captured.out) |
def test_ki_disabled_in_del() -> None:
def nestedfunction() -> bool:
return _core.currently_ki_protected()
def __del__() -> None:
assert _core.currently_ki_protected()
assert nestedfunction()
_core.disable_ki_protection
def outerfunction() -> None:
assert (not _core.currently_ki_protected())
assert (not nestedfunction())
__del__()
__del__()
outerfunction()
assert nestedfunction() |
class Padding(Dict):
_show_valtype = False
def __init__(self, *, none_ok: bool=False, completions: _Completions=None) -> None:
super().__init__(keytype=String(), valtype=Int(minval=0, none_ok=none_ok), fixed_keys=['top', 'bottom', 'left', 'right'], none_ok=none_ok, completions=completions)
def to_py(self, value: Union[(DictType, _UnsetNone)]) -> Union[(usertypes.Unset, PaddingValues)]:
d = super().to_py(value)
if isinstance(d, usertypes.Unset):
return d
return PaddingValues(**d) |
def eval_dialogue_system(infile):
lines = open(infile, 'r').readlines()[1:]
f1_scores = []
rl_scores = []
answer_lengths = []
for line in lines:
line = json.loads(line)
answer = line['answer']
output = line['output'][0]
f1_scores.append(f1(output, answer))
rl_scores.append(rl(output, answer))
answer_lengths.append(len(output.split()))
F1 = round(np.mean(f1_scores), 4)
RL = round(np.mean(rl_scores), 4)
lens = round(np.mean(answer_lengths), 4)
return (F1, RL, lens) |
def get_args_parser():
parser = argparse.ArgumentParser('ReferFormer training and inference scripts.', add_help=False)
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--lr_backbone', default=5e-05, type=float)
parser.add_argument('--lr_backbone_names', default=['backbone.0'], type=str, nargs='+')
parser.add_argument('--lr_text_encoder', default=1e-05, type=float)
parser.add_argument('--lr_text_encoder_names', default=['text_encoder'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_mult', default=1.0, type=float)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--lr_drop', default=[6, 8], type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm')
parser.add_argument('--pretrained_weights', type=str, default=None, help='Path to the pretrained model.')
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true')
parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use')
parser.add_argument('--backbone_pretrained', default=None, type=str, help='if use swin backbone and train from scratch, the path to the pretrained weights')
parser.add_argument('--use_checkpoint', action='store_true', help='whether use checkpoint for swin/video swin backbone')
parser.add_argument('--dilation', action='store_true', help='If true, we replace stride with dilation in the last convolutional block (DC5)')
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features')
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
parser.add_argument('--enc_layers', default=4, type=int, help='Number of encoding layers in the transformer')
parser.add_argument('--dec_layers', default=4, type=int, help='Number of decoding layers in the transformer')
parser.add_argument('--dim_feedforward', default=2048, type=int, help='Intermediate size of the feedforward layers in the transformer blocks')
parser.add_argument('--hidden_dim', default=256, type=int, help='Size of the embeddings (dimension of the transformer)')
parser.add_argument('--dropout', default=0.1, type=float, help='Dropout applied in the transformer')
parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_frames', default=5, type=int, help='Number of clip frames for training')
parser.add_argument('--num_queries', default=5, type=int, help='Number of query slots, all frames share the same queries')
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
parser.add_argument('--pre_norm', action='store_true')
parser.add_argument('--freeze_text_encoder', action='store_true')
parser.add_argument('--masks', action='store_true', help='Train segmentation head if the flag is provided')
parser.add_argument('--mask_dim', default=256, type=int, help='Size of the mask embeddings (dimension of the dynamic mask conv)')
parser.add_argument('--controller_layers', default=3, type=int, help='Dynamic conv layer number')
parser.add_argument('--dynamic_mask_channels', default=8, type=int, help='Dynamic conv final channel number')
parser.add_argument('--no_rel_coord', dest='rel_coord', action='store_false', help='Disables relative coordinates')
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)')
parser.add_argument('--set_cost_class', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_bbox', default=5, type=float, help='L1 box coefficient in the matching cost')
parser.add_argument('--set_cost_giou', default=2, type=float, help='giou box coefficient in the matching cost')
parser.add_argument('--set_cost_mask', default=2, type=float, help='mask coefficient in the matching cost')
parser.add_argument('--set_cost_dice', default=5, type=float, help='mask coefficient in the matching cost')
parser.add_argument('--mask_loss_coef', default=2, type=float)
parser.add_argument('--dice_loss_coef', default=5, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float, help='Relative classification weight of the no-object class')
parser.add_argument('--focal_alpha', default=0.25, type=float)
parser.add_argument('--dataset_file', default='ytvos', help='Dataset name')
parser.add_argument('--coco_path', type=str, default='data/coco')
parser.add_argument('--ytvos_path', type=str, default='data/ref-youtube-vos')
parser.add_argument('--davis_path', type=str, default='data/ref-davis')
parser.add_argument('--a2d_path', type=str, default='data/a2d_sentences')
parser.add_argument('--jhmdb_path', type=str, default='data/jhmdb_sentences')
parser.add_argument('--max_skip', default=3, type=int, help='max skip frame number')
parser.add_argument('--max_size', default=640, type=int, help='max size for the frame')
parser.add_argument('--binary', action='store_true')
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='output', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--threshold', default=0.5, type=float)
parser.add_argument('--ngpu', default=8, type=int, help='gpu number when inference for ref-ytvos and ref-davis')
parser.add_argument('--split', default='valid', type=str, choices=['valid', 'test'])
parser.add_argument('--visualize', action='store_true', help='whether visualize the masks during inference')
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser |
def convert_sentence_to_features(sentence, max_seq_length, tokenizer):
sentence = tokenizer.tokenize(sentence)
sentence = sentence[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
idx_tracker = 0
sentence_start_idx = 1
for token in sentence:
tokens.append(token)
segment_ids.append(0)
idx_tracker += 1
sentence_end_idx = idx_tracker
tokens.append('[SEP]')
segment_ids.append(0)
idx_tracker += 1
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert (len(input_ids) == max_seq_length), print(input_ids, len(input_ids), max_seq_length)
assert (len(input_mask) == max_seq_length), print(input_mask, len(input_mask), max_seq_length)
assert (len(segment_ids) == max_seq_length), print(segment_ids, len(segment_ids), max_seq_length)
return (input_ids, input_mask, segment_ids, (sentence_start_idx, sentence_end_idx), (None, None)) |
_REGISTRY.register(name='Trans2Seg')
class Trans2Seg(SegBaseModel):
def __init__(self):
super().__init__()
if self.backbone.startswith('mobilenet'):
c1_channels = 24
c4_channels = 320
else:
c1_channels = 256
c4_channels = 2048
vit_params = cfg.MODEL.TRANS2Seg
hid_dim = cfg.MODEL.TRANS2Seg.hid_dim
assert ((cfg.AUG.CROP == False) and (cfg.TRAIN.CROP_SIZE[0] == cfg.TRAIN.CROP_SIZE[1] == cfg.TRAIN.BASE_SIZE == cfg.TEST.CROP_SIZE[0] == cfg.TEST.CROP_SIZE[1]))
c4_HxW = ((cfg.TRAIN.BASE_SIZE // 16) ** 2)
vit_params['decoder_feat_HxW'] = c4_HxW
self.transformer_head = TransformerHead(vit_params, c1_channels=c1_channels, c4_channels=c4_channels, hid_dim=hid_dim)
if self.aux:
self.auxlayer = _FCNHead(728, self.nclass)
self.__setattr__('decoder', (['transformer_head', 'auxlayer'] if self.aux else ['transformer_head']))
def forward(self, x):
size = x.size()[2:]
(c1, c2, c3, c4) = self.encoder(x)
outputs = list()
x = self.transformer_head(c4, c1)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return tuple(outputs) |
def train(config, workdir):
sample_dir = os.path.join(workdir, 'samples')
tf.io.gfile.makedirs(sample_dir)
tb_dir = os.path.join(workdir, 'tensorboard')
tf.io.gfile.makedirs(tb_dir)
writer = tensorboard.SummaryWriter(tb_dir)
score_model = mutils.create_model(config)
ema = ExponentialMovingAverage(score_model.parameters(), decay=config.model.ema_rate)
optimizer = losses.get_optimizer(config, score_model.parameters())
state = dict(optimizer=optimizer, model=score_model, ema=ema, step=0)
checkpoint_dir = os.path.join(workdir, 'checkpoints')
checkpoint_meta_dir = os.path.join(workdir, 'checkpoints-meta', 'checkpoint.pth')
tf.io.gfile.makedirs(checkpoint_dir)
tf.io.gfile.makedirs(os.path.dirname(checkpoint_meta_dir))
state = restore_checkpoint(checkpoint_meta_dir, state, config.device)
initial_step = int(state['step'])
(train_ds, eval_ds, _) = datasets.get_dataset(config, uniform_dequantization=config.data.uniform_dequantization)
train_iter = iter(train_ds)
eval_iter = iter(eval_ds)
scaler = datasets.get_data_scaler(config)
inverse_scaler = datasets.get_data_inverse_scaler(config)
if (config.training.sde.lower() == 'vpsde'):
sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'subvpsde'):
sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'vesde'):
sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=config.model.num_scales)
sampling_eps = 1e-05
else:
raise NotImplementedError(f'SDE {config.training.sde} unknown.')
optimize_fn = losses.optimization_manager(config)
continuous = config.training.continuous
reduce_mean = config.training.reduce_mean
likelihood_weighting = config.training.likelihood_weighting
train_step_fn = losses.get_step_fn(sde, train=True, optimize_fn=optimize_fn, reduce_mean=reduce_mean, continuous=continuous, likelihood_weighting=likelihood_weighting)
eval_step_fn = losses.get_step_fn(sde, train=False, optimize_fn=optimize_fn, reduce_mean=reduce_mean, continuous=continuous, likelihood_weighting=likelihood_weighting)
if config.training.snapshot_sampling:
sampling_shape = (config.training.batch_size, config.data.num_channels, config.data.image_size, config.data.image_size)
sampling_fn = sampling.get_sampling_fn(config, sde, sampling_shape, inverse_scaler, sampling_eps)
num_train_steps = config.training.n_iters
logging.info(('Starting training loop at step %d.' % (initial_step,)))
for step in range(initial_step, (num_train_steps + 1)):
batch = torch.from_numpy(next(train_iter)['image']._numpy()).to(config.device).float()
batch = batch.permute(0, 3, 1, 2)
batch = scaler(batch)
loss = train_step_fn(state, batch)
if ((step % config.training.log_freq) == 0):
logging.info(('step: %d, training_loss: %.5e' % (step, loss.item())))
writer.add_scalar('training_loss', loss, step)
if ((step != 0) and ((step % config.training.snapshot_freq_for_preemption) == 0)):
save_checkpoint(checkpoint_meta_dir, state)
if ((step % config.training.eval_freq) == 0):
eval_batch = torch.from_numpy(next(eval_iter)['image']._numpy()).to(config.device).float()
eval_batch = eval_batch.permute(0, 3, 1, 2)
eval_batch = scaler(eval_batch)
eval_loss = eval_step_fn(state, eval_batch)
logging.info(('step: %d, eval_loss: %.5e' % (step, eval_loss.item())))
writer.add_scalar('eval_loss', eval_loss.item(), step)
if (((step != 0) and ((step % config.training.snapshot_freq) == 0)) or (step == num_train_steps)):
save_step = (step // config.training.snapshot_freq)
save_checkpoint(os.path.join(checkpoint_dir, f'checkpoint_{save_step}.pth'), state)
if config.training.snapshot_sampling:
ema.store(score_model.parameters())
ema.copy_to(score_model.parameters())
(sample, n) = sampling_fn(score_model)
ema.restore(score_model.parameters())
this_sample_dir = os.path.join(sample_dir, 'iter_{}'.format(step))
tf.io.gfile.makedirs(this_sample_dir)
nrow = int(np.sqrt(sample.shape[0]))
image_grid = make_grid(sample, nrow, padding=2)
sample = np.clip((sample.permute(0, 2, 3, 1).cpu().numpy() * 255), 0, 255).astype(np.uint8)
with tf.io.gfile.GFile(os.path.join(this_sample_dir, 'sample.np'), 'wb') as fout:
np.save(fout, sample)
with tf.io.gfile.GFile(os.path.join(this_sample_dir, 'sample.png'), 'wb') as fout:
save_image(image_grid, fout) |
.unit()
.parametrize(('plugins', 'expected'), [([(None, DummyDist('pytask-plugin', '0.0.1'))], ['plugin-0.0.1']), ([(None, DummyDist('plugin', '1.0.0'))], ['plugin-1.0.0'])])
def test_format_plugin_names_and_versions(plugins, expected):
assert (_format_plugin_names_and_versions(plugins) == expected) |
def _bounds(scdf, **kwargs):
if scdf.empty:
return None
col_order = [c for c in scdf.columns]
by = kwargs.get('group_by')
if (not (type(by) is list)):
by = [by]
agg_dict = (kwargs.get('agg') if kwargs.get('agg') else {})
agg_dict.update({'Start': 'min', 'End': 'max', 'Chromosome': 'first'})
if ('Strand' in scdf.columns):
agg_dict['Strand'] = 'first'
res = scdf.groupby(by).agg(agg_dict).reset_index()
res = res.reindex(columns=[c for c in col_order if (c in res.columns)])
return res |
def SparseCompRow_matmult(M, y, val, row, col, x, num_iterations):
range_it = range(num_iterations)
t0 = pyperf.perf_counter()
for _ in range_it:
for r in range(M):
sa = 0.0
for i in range(row[r], row[(r + 1)]):
sa += (x[col[i]] * val[i])
y[r] = sa
return (pyperf.perf_counter() - t0) |
.parametrize('qtwe_version, setting, value, expected', [('6.6.1', 'policy.images', 'always', [('ImagePolicy', '0')]), ('6.6.1', 'policy.images', 'never', [('ImagePolicy', '1')]), ('6.6.1', 'policy.images', 'smart', [('ImagePolicy', '2'), ('ImageClassifierPolicy', '0')]), ('6.6.1', 'policy.images', 'smart-simple', [('ImagePolicy', '2'), ('ImageClassifierPolicy', '1')]), ('6.5.3', 'policy.images', 'smart', [('ImagePolicy', '2')]), ('6.5.3', 'policy.images', 'smart-simple', [('ImagePolicy', '2')])])
def test_image_policy(config_stub, qtwe_version: str, setting: str, value: str, expected: List[Tuple[(str, str)]]):
config_stub.val.colors.webpage.darkmode.enabled = True
config_stub.set_obj(('colors.webpage.darkmode.' + setting), value)
versions = version.WebEngineVersions.from_api(qtwe_version=qtwe_version, chromium_version=None)
darkmode_settings = darkmode.settings(versions=versions, special_flags=[])
assert (darkmode_settings['dark-mode-settings'] == expected) |
def create_rectangular_field_function(centre, side_lengths, penumbra_width, rotation=0):
width_profile = create_profile_function(0, side_lengths[0], penumbra_width)
length_profile = create_profile_function(0, side_lengths[1], penumbra_width)
theta = (((- rotation) / 180) * np.pi)
def field(x, y):
x = np.array(x, copy=False)
y = np.array(y, copy=False)
x_shifted = (x - centre[0])
y_shifted = (y - centre[1])
(x_rotated, y_rotated) = rotate_coords(x_shifted, y_shifted, theta)
return (width_profile(x_rotated) * length_profile(y_rotated))
return field |
class FlatSimilarityWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, prefix='attention', opt={}, dropout=None):
super(FlatSimilarityWrapper, self).__init__()
self.score_func_str = opt.get('{}_att_type'.format(prefix), 'none').lower()
self.att_dropout = DropoutWrapper(opt.get('{}_att_dropout'.format(prefix), 0))
self.score_func = None
if (self.score_func_str == 'bilinear'):
self.score_func = BilinearFlatSim(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
else:
self.score_func = FlatSim(x1_dim, x2_dim, prefix=prefix, opt=opt, dropout=dropout)
def forward(self, x1, x2, mask):
scores = self.score_func(x1, x2, mask)
return scores |
.parametrize('shape', [[1.0], [1j], [1.0, 1.0], [1.0, 0.5], [1.0, (0.5 + 0.5j)], [(0.5 - 0.5j), (0.5 + 0.5j)], [1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 1, 1, 1, 1, 1, 1]])
def test_create_one_particle_circuit(shape):
amplitudes = (shape / np.linalg.norm(shape))
qubits = cirq.LineQubit.range(len(amplitudes))
circuit = create_one_particle_circuit(qubits, amplitudes)
assert np.allclose(circuit.final_state_vector(), _single_fermionic_modes_state(amplitudes)) |
def parse_json(json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = ('[-] %s: Failed to parse JSON ' % video_id)
if fatal:
print(errmsg, ve)
else:
print((errmsg + str(ve))) |
_to_string
class BuildError(RoutingException, LookupError):
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum([(0.98 * difflib.SequenceMatcher(None, rule.endpoint, self.endpoint).ratio()), (0.01 * bool(set((self.values or ())).issubset(rule.arguments))), (0.01 * bool((rule.methods and (self.method in rule.methods))))])
if (adapter and adapter.map._rules):
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append(('Could not build url for endpoint %r' % self.endpoint))
if self.method:
message.append((' (%r)' % self.method))
if self.values:
message.append((' with values %r' % sorted(self.values.keys())))
message.append('.')
if self.suggested:
if (self.endpoint == self.suggested.endpoint):
if (self.method and (self.method not in self.suggested.methods)):
message.append((' Did you mean to use methods %r?' % sorted(self.suggested.methods)))
missing_values = (self.suggested.arguments.union(set((self.suggested.defaults or ()))) - set(self.values.keys()))
if missing_values:
message.append((' Did you forget to specify values %r?' % sorted(missing_values)))
else:
message.append((' Did you mean %r instead?' % self.suggested.endpoint))
return u''.join(message) |
class TASFObjects(TestCase):
filename = os.path.join(DATA_DIR, 'silence-1.wma')
def test_invalid_header(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
asf = ASF()
fileobj = BytesIO(b'0&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel\x19\xbf\x01\x00\x00\x00\x00\x00\x07\x00\x00\x00\x01\x02')
self.assertRaises(ASFHeaderError, HeaderObject.parse_full, asf, fileobj) |
def merger_phase_calculation(min_switch_ind, final_i_index, i_phase, m_omega):
assert (type(min_switch_ind) == int), 'min_switch_ind should be an int.'
assert (type(final_i_index) == int), 'final_i_index should be an int.'
assert (type(i_phase) == list), 'i_phase should be a list.'
assert (type(m_omega) == list), 'm_omega should be a list.'
m_phase = np.empty((201 - min_switch_ind))
m_phase[0] = i_phase[final_i_index]
for i in range((min_switch_ind + 1), 201):
m_phase[(i - min_switch_ind)] = (m_phase[((i - min_switch_ind) - 1)] + m_omega[i])
m_phase = list(m_phase)
return m_phase |
(os.environ, {}, clear=True)
('pyinaturalist.auth.get_keyring_credentials')
('pyinaturalist.auth._get_jwt', return_value=NOT_CACHED_RESPONSE)
def test_get_access_token__missing_creds(mock_get_jwt, mock_keyring_credentials):
with pytest.raises(AuthenticationError):
get_access_token('username') |
class Solution():
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
if (bound < 2):
return []
(x_, y_) = ([1], [1])
if (x != 1):
i = 1
while (x_[(- 1)] <= bound):
x_.append((x ** i))
i += 1
if (y != 1):
i = 1
while (y_[(- 1)] <= bound):
y_.append((y ** i))
i += 1
res = set()
for x_bar in x_:
for y_bar in y_:
sum_ = (x_bar + y_bar)
if (sum_ <= bound):
res.add(sum_)
else:
break
return list(res) |
(sampled_from(([(tuple, Tuple), (tuple, tuple), (list, list), (list, List), (deque, deque), (deque, Deque), (set, Set), (set, set), (frozenset, frozenset), (frozenset, FrozenSet)] if is_py39_plus else [(tuple, Tuple), (list, List), (deque, Deque), (set, Set), (frozenset, FrozenSet)])))
def test_seq_of_bare_classes_structure(seq_type_and_annotation):
converter = Converter()
bare_classes = ((int, (1,)), (float, (1.0,)), (str, ('test',)), (bool, (True,)))
(seq_type, annotation) = seq_type_and_annotation
for (cl, vals) in bare_classes:
(frozen=True)
class C():
a: cl
b: cl
inputs = [{'a': cl(*vals), 'b': cl(*vals)} for _ in range(5)]
outputs = converter.structure(inputs, cl=(annotation[C] if (annotation not in (Tuple, tuple)) else annotation[(C, ...)]))
expected = seq_type((C(a=cl(*vals), b=cl(*vals)) for _ in range(5)))
assert (type(outputs) == seq_type)
assert (outputs == expected) |
.parametrize('args, pkgs', [({'where': ['.'], 'namespaces': False}, {'pkg', 'other'}), ({'where': ['.', 'dir1'], 'namespaces': False}, {'pkg', 'other', 'dir2'}), ({'namespaces': True}, {'pkg', 'other', 'dir1', 'dir1.dir2'}), ({}, {'pkg', 'other', 'dir1', 'dir1.dir2'})])
def test_find_packages(tmp_path, args, pkgs):
files = {'pkg/__init__.py', 'other/__init__.py', 'dir1/dir2/__init__.py'}
write_files({k: '' for k in files}, tmp_path)
package_dir = {}
kwargs = {'root_dir': tmp_path, 'fill_package_dir': package_dir, **args}
where = kwargs.get('where', ['.'])
assert (set(expand.find_packages(**kwargs)) == pkgs)
for pkg in pkgs:
pkg_path = find_package_path(pkg, package_dir, tmp_path)
assert os.path.exists(pkg_path)
where = [str((tmp_path / p).resolve()).replace(os.sep, '/') for p in args.pop('where', ['.'])]
assert (set(expand.find_packages(where=where, **args)) == pkgs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.