code stringlengths 281 23.7M |
|---|
def main():
print('\n This program is deprecated!!!\n Instead use pyvideo_scrape ( Continue? yes/[no]\n ')
stay = ('yes' == input().lower())
if (not stay):
exit(0)
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--api-key', help='Can also be specified via the environment variable GOOGLE_API_KEY')
parser.add_argument('-l', '--list')
parser.add_argument('-p', '--path', help='Path to event to normalize.')
args = parser.parse_args()
if args.list:
api_key = get_api_key(args.api_key)
if (not api_key):
print('Please set an API key!')
parser.print_help()
sys.exit(0)
fetch_list(api_key, args.list)
elif args.path:
normalize(args.path)
else:
parser.print_help() |
class Effect6470(BaseEffect):
type = ('projected', 'active')
def handler(fit, module, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowOffensiveModifiers'):
return
strength = module.getModifiedItemAttr('scan{0}StrengthBonus'.format(fit.scanType))
strength *= calculateRangeFactor(srcOptimalRange=module.getModifiedItemAttr('maxRange'), srcFalloffRange=module.getModifiedItemAttr('falloffEffectiveness'), distance=projectionRange)
if ('effect' in kwargs):
from eos.modifiedAttributeDict import ModifiedAttributeDict
strength *= ModifiedAttributeDict.getResistance(fit, kwargs['effect'])
fit.addProjectedEcm(strength) |
()
class WorldWidgetEntry():
item: QtWidgets.QTreeWidgetItem
preset_menu: QtWidgets.QMenu
def update(self, world_details: MultiplayerWorld, detail: (UserWorldDetail | None)):
self.item.setText(0, world_details.name)
self.item.setText(1, world_details.preset.game.long_name)
self.item.setText(2, (detail.connection_state.pretty_text if (detail is not None) else 'Abandoned'))
self.preset_menu.setTitle(f'Preset: {world_details.preset.name}')
if (detail is not None):
self.item.setText(4, 'Last Activity:')
self.item.setTextAlignment(4, QtCore.Qt.AlignmentFlag.AlignRight)
self.item.setData(5, QtCore.Qt.ItemDataRole.DisplayRole, QtCore.QDateTime.fromSecsSinceEpoch(int(detail.last_activity.timestamp()))) |
class MarketImpactBase(SlippageModel):
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = (10.0 / 10000)
def __init__(self):
super(MarketImpactBase, self).__init__()
self._window_data_cache = ExpiringCache()
def get_txn_volume(self, data, order):
raise NotImplementedError('get_txn_volume')
def get_simulated_impact(self, order, current_price, current_volume, txn_volume, mean_volume, volatility):
raise NotImplementedError('get_simulated_impact')
def process_order(self, data, order):
if (order.open_amount == 0):
return (None, None)
minute_data = data.current(order.asset, ['volume', 'high', 'low'])
(mean_volume, volatility) = self._get_window_data(data, order.asset, 20)
price = np.mean([minute_data['high'], minute_data['low']])
volume = minute_data['volume']
if (not volume):
return (None, None)
txn_volume = int(min(self.get_txn_volume(data, order), abs(order.open_amount)))
if (txn_volume == 0):
return (None, None)
if ((mean_volume == 0) or np.isnan(volatility)):
simulated_impact = (price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT)
else:
simulated_impact = self.get_simulated_impact(order=order, current_price=price, current_volume=volume, txn_volume=txn_volume, mean_volume=mean_volume, volatility=volatility)
impacted_price = (price + math.copysign(simulated_impact, order.direction))
if fill_price_worse_than_limit_price(impacted_price, order):
return (None, None)
return (impacted_price, math.copysign(txn_volume, order.direction))
def _get_window_data(self, data, asset, window_length):
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
volume_history = data.history(asset, 'volume', (window_length + 1), '1d')
close_history = data.history(asset, 'close', (window_length + 1), '1d')
except HistoryWindowStartsBeforeData:
return (0, np.NaN)
close_volatility = close_history[:(- 1)].pct_change()[1:].std(skipna=False)
values = {'volume': volume_history[:(- 1)].mean(), 'close': (close_volatility * SQRT_252)}
self._window_data_cache.set(asset, values, data.current_session)
return (values['volume'], values['close']) |
def _fold_to_scale(conv_wrapper: QcQuantizeWrapper, bn_wrapper: QcQuantizeWrapper):
conv = conv_wrapper._layer_to_wrap
bn = bn_wrapper._layer_to_wrap
weight_quantizer = get_wrappers_weight_quantizer(conv_wrapper.param_quantizers)
bias_quantizer = get_wrappers_bias_quantizer(conv_wrapper.param_quantizers)
if (weight_quantizer.quant_scheme not in [QuantScheme.training_range_learning_with_tf_init, QuantScheme.training_range_learning_with_tf_enhanced_init]):
raise _BatchNormFoldingNotSupported(f'BatchNorm folding to scale supports training_range_learning_with_tf_init or training_range_learning_with_tf_enhanced_init only. got {weight_quantizer.quant_scheme}')
output_quantizer = conv_wrapper.output_quantizers[0]
if output_quantizer.is_enabled():
raise _BatchNormFoldingNotSupported('BatchNorm should belong to the same supergroup with the layer to be folded to.')
if bias_quantizer:
if bias_quantizer.is_enabled():
raise _BatchNormFoldingNotSupported("Can't fold BatchNorm to scale if bias quantizer is enabled.")
enc_min = weight_quantizer._encoding_min
enc_max = weight_quantizer._encoding_max
if (not weight_quantizer.is_encoding_valid()):
raise RuntimeError
with bn_wrapper._quantize_params():
_fold_to_weight(conv, bn, fold_backward=True)
gamma = bn.gamma
sigma = K.sqrt((bn.moving_variance + bn.epsilon))
for (i, c) in enumerate((gamma / sigma)):
c = float(c)
if (c >= 0):
enc_max[i].assign((enc_max[i] * c))
enc_min[i].assign((enc_min[i] * c))
else:
enc_max_before_reassign = enc_max[i]
enc_max[i].assign((enc_min[i] * c))
enc_min[i].assign((enc_max_before_reassign * c))
for (conv_output_quantizer, bn_output_quantizer) in zip(conv_wrapper.output_quantizers, bn_wrapper.output_quantizers):
if (bn_output_quantizer.encoding is not None):
conv_output_quantizer._encoding_min.assign(bn_output_quantizer._encoding_min)
conv_output_quantizer._encoding_max.assign(bn_output_quantizer._encoding_max)
conv_output_quantizer._is_encoding_valid = True
tensor_quantizers = (conv_output_quantizer._tensor_quantizer if isinstance(conv_output_quantizer._tensor_quantizer, List) else [conv_output_quantizer._tensor_quantizer])
for tensor_quantizer in tensor_quantizers:
tensor_quantizer.isEncodingValid = True
if bn_output_quantizer.is_enabled():
conv_output_quantizer.enable()
else:
conv_output_quantizer.disable()
bn_output_quantizer.disable()
if (bias_quantizer is None):
bias_quantizer = ParamPerTensorQuantizer(conv, conv.bias.name.split(':')[0], weight_quantizer.quant_scheme, MAP_PYMO_TO_ROUND_MODE[weight_quantizer.round_mode], weight_quantizer.bitwidth, weight_quantizer.data_type, weight_quantizer.is_symmetric, weight_quantizer.use_strict_symmetric, weight_quantizer.use_unsigned_symmetric, enabled=False)
tensor_quantizers = (bias_quantizer._tensor_quantizer if isinstance(bias_quantizer._tensor_quantizer, List) else [bias_quantizer._tensor_quantizer])
for tensor_quantizer in tensor_quantizers:
tensor_quantizer.isEncodingValid = True
conv_wrapper.param_quantizers.append(bias_quantizer) |
def main():
if (len(sys.argv) < 2):
print((('usage: ' + sys.argv[0]) + ' image'))
sys.exit(1)
filename = sys.argv[1]
img_rgb = cv2.imread(filename)
(ayat, contours) = find_ayat(img_rgb)
draw(img_rgb, contours, 'res.png')
for ayah in ayat:
(x, y, w, h) = ayah
print(('marker found at: (%d, %d) - %dx%d' % (x, y, w, h))) |
class Processor():
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.new_tokens = []
if (self.args.input_format == 'entity_marker'):
self.new_tokens = ['[E1]', '[/E1]', '[E2]', '[/E2]']
self.tokenizer.add_tokens(self.new_tokens)
if (self.args.input_format not in ('entity_mask', 'entity_marker', 'entity_marker_punct', 'typed_entity_marker', 'typed_entity_marker_punct')):
raise Exception('Invalid input format!')
def tokenize(self, tokens, subj_type, obj_type, ss, se, os, oe):
sents = []
input_format = self.args.input_format
if (input_format == 'entity_mask'):
subj_type = '[SUBJ-{}]'.format(subj_type)
obj_type = '[OBJ-{}]'.format(obj_type)
for token in (subj_type, obj_type):
if (token not in self.new_tokens):
self.new_tokens.append(token)
self.tokenizer.add_tokens([token])
elif (input_format == 'typed_entity_marker'):
subj_start = '[SUBJ-{}]'.format(subj_type)
subj_end = '[/SUBJ-{}]'.format(subj_type)
obj_start = '[OBJ-{}]'.format(obj_type)
obj_end = '[/OBJ-{}]'.format(obj_type)
for token in (subj_start, subj_end, obj_start, obj_end):
if (token not in self.new_tokens):
self.new_tokens.append(token)
self.tokenizer.add_tokens([token])
elif (input_format == 'typed_entity_marker_punct'):
subj_type = self.tokenizer.tokenize(subj_type.replace('_', ' ').lower())
obj_type = self.tokenizer.tokenize(obj_type.replace('_', ' ').lower())
for (i_t, token) in enumerate(tokens):
tokens_wordpiece = self.tokenizer.tokenize(token)
if (input_format == 'entity_mask'):
if ((ss <= i_t <= se) or (os <= i_t <= oe)):
tokens_wordpiece = []
if (i_t == ss):
new_ss = len(sents)
tokens_wordpiece = [subj_type]
if (i_t == os):
new_os = len(sents)
tokens_wordpiece = [obj_type]
elif (input_format == 'entity_marker'):
if (i_t == ss):
new_ss = len(sents)
tokens_wordpiece = (['[E1]'] + tokens_wordpiece)
if (i_t == se):
tokens_wordpiece = (tokens_wordpiece + ['[/E1]'])
if (i_t == os):
new_os = len(sents)
tokens_wordpiece = (['[E2]'] + tokens_wordpiece)
if (i_t == oe):
tokens_wordpiece = (tokens_wordpiece + ['[/E2]'])
elif (input_format == 'entity_marker_punct'):
if (i_t == ss):
new_ss = len(sents)
tokens_wordpiece = ([''] + tokens_wordpiece)
if (i_t == se):
tokens_wordpiece = (tokens_wordpiece + [''])
if (i_t == os):
new_os = len(sents)
tokens_wordpiece = (['#'] + tokens_wordpiece)
if (i_t == oe):
tokens_wordpiece = (tokens_wordpiece + ['#'])
elif (input_format == 'typed_entity_marker'):
if (i_t == ss):
new_ss = len(sents)
tokens_wordpiece = ([subj_start] + tokens_wordpiece)
if (i_t == se):
tokens_wordpiece = (tokens_wordpiece + [subj_end])
if (i_t == os):
new_os = len(sents)
tokens_wordpiece = ([obj_start] + tokens_wordpiece)
if (i_t == oe):
tokens_wordpiece = (tokens_wordpiece + [obj_end])
elif (input_format == 'typed_entity_marker_punct'):
if (i_t == ss):
new_ss = len(sents)
tokens_wordpiece = (((([''] + ['*']) + subj_type) + ['*']) + tokens_wordpiece)
if (i_t == se):
tokens_wordpiece = (tokens_wordpiece + [''])
if (i_t == os):
new_os = len(sents)
tokens_wordpiece = ((((['#'] + ['^']) + obj_type) + ['^']) + tokens_wordpiece)
if (i_t == oe):
tokens_wordpiece = (tokens_wordpiece + ['#'])
sents.extend(tokens_wordpiece)
sents = sents[:(self.args.max_seq_length - 2)]
input_ids = self.tokenizer.convert_tokens_to_ids(sents)
input_ids = self.tokenizer.build_inputs_with_special_tokens(input_ids)
return (input_ids, (new_ss + 1), (new_os + 1)) |
def _get_replay_buffer(dataset_path, shape_meta, store):
rgb_keys = list()
lowdim_keys = list()
out_resolutions = dict()
lowdim_shapes = dict()
obs_shape_meta = shape_meta['obs']
for (key, attr) in obs_shape_meta.items():
type = attr.get('type', 'low_dim')
shape = tuple(attr.get('shape'))
if (type == 'rgb'):
rgb_keys.append(key)
(c, h, w) = shape
out_resolutions[key] = (w, h)
elif (type == 'low_dim'):
lowdim_keys.append(key)
lowdim_shapes[key] = tuple(shape)
if ('pose' in key):
assert (tuple(shape) in [(2,), (6,)])
action_shape = tuple(shape_meta['action']['shape'])
assert (action_shape in [(2,), (6,)])
cv2.setNumThreads(1)
with threadpool_limits(1):
replay_buffer = real_data_to_replay_buffer(dataset_path=dataset_path, out_store=store, out_resolutions=out_resolutions, lowdim_keys=(lowdim_keys + ['action']), image_keys=rgb_keys)
if (action_shape == (2,)):
zarr_arr = replay_buffer['action']
zarr_resize_index_last_dim(zarr_arr, idxs=[0, 1])
for (key, shape) in lowdim_shapes.items():
if (('pose' in key) and (shape == (2,))):
zarr_arr = replay_buffer[key]
zarr_resize_index_last_dim(zarr_arr, idxs=[0, 1])
return replay_buffer |
.parametrize('status_code', [201])
.parametrize('mock_release_id', range(3))
def test_edit_release_notes_succeeds(default_gitea_client, status_code, mock_release_id):
with requests_mock.Mocker(session=default_gitea_client.session) as m:
m.register_uri('PATCH', gitea_api_matcher, json={'id': mock_release_id}, status_code=status_code)
assert (default_gitea_client.edit_release_notes(mock_release_id, RELEASE_NOTES) == mock_release_id)
assert m.called
assert (len(m.request_history) == 1)
assert (m.last_request.method == 'PATCH')
assert (m.last_request.url == '{api_url}/repos/{owner}/{repo_name}/releases/{release_id}'.format(api_url=default_gitea_client.api_url, owner=default_gitea_client.owner, repo_name=default_gitea_client.repo_name, release_id=mock_release_id))
assert (m.last_request.json() == {'body': RELEASE_NOTES}) |
class TestDebuggingBreakpoints():
.parametrize('arg', ['--pdb', ''])
def test_sys_breakpointhook_configure_and_unconfigure(self, pytester: Pytester, arg: str) -> None:
pytester.makeconftest('\n import sys\n from pytest import hookimpl\n from _pytest.debugging import pytestPDB\n\n def pytest_configure(config):\n config.add_cleanup(check_restored)\n\n def check_restored():\n assert sys.breakpointhook == sys.__breakpointhook__\n\n def test_check():\n assert sys.breakpointhook == pytestPDB.set_trace\n ')
pytester.makepyfile('\n def test_nothing(): pass\n ')
args = ((arg,) if arg else ())
result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(['*1 passed in *'])
def test_pdb_custom_cls(self, pytester: Pytester, custom_debugger_hook) -> None:
p1 = pytester.makepyfile('\n def test_nothing():\n breakpoint()\n ')
result = pytester.runpytest_inprocess('--pdb', '--pdbcls=_pytest:_CustomDebugger', p1)
result.stdout.fnmatch_lines(['*CustomDebugger*', '*1 passed*'])
assert (custom_debugger_hook == ['init', 'set_trace'])
.parametrize('arg', ['--pdb', ''])
def test_environ_custom_class(self, pytester: Pytester, custom_debugger_hook, arg: str) -> None:
pytester.makeconftest("\n import os\n import sys\n\n os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'\n\n def pytest_configure(config):\n config.add_cleanup(check_restored)\n\n def check_restored():\n assert sys.breakpointhook == sys.__breakpointhook__\n\n def test_check():\n import _pytest\n assert sys.breakpointhook is _pytest._CustomDebugger.set_trace\n ")
pytester.makepyfile('\n def test_nothing(): pass\n ')
args = ((arg,) if arg else ())
result = pytester.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(['*1 passed in *'])
.skipif((not (_ENVIRON_PYTHONBREAKPOINT == '')), reason='Requires breakpoint() default value')
def test_sys_breakpoint_interception(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile('\n def test_1():\n breakpoint()\n ')
child = pytester.spawn_pytest(str(p1))
child.expect('test_1')
child.expect('Pdb')
child.sendline('quit')
rest = child.read().decode('utf8')
assert ('Quitting debugger' in rest)
assert ('reading from stdin while output' not in rest)
TestPDB.flush(child)
.xfail(reason='#10042')
def test_pdb_not_altered(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile('\n import pdb\n def test_1():\n pdb.set_trace()\n assert 0\n ')
child = pytester.spawn_pytest(str(p1))
child.expect('test_1')
child.expect('Pdb')
child.sendline('c')
rest = child.read().decode('utf8')
assert ('1 failed' in rest)
assert ('reading from stdin while output' not in rest)
TestPDB.flush(child) |
def evaluate(args, model, tokenizer, prefix=''):
eval_task_names = (('mnli', 'mnli-mm') if (args.task_name == 'mnli') else (args.task_name,))
eval_outputs_dirs = ((args.output_dir, (args.output_dir + '/MM')) if (args.task_name == 'mnli') else (args.output_dir,))
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, nn.DataParallel))):
model = nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if args.global_topk:
threshold_mem = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'masked_bert', 'xlnet', 'albert']) else None)
if ('masked' in args.model_type):
inputs['threshold'] = args.final_threshold
if args.global_topk:
if (threshold_mem is None):
concat = torch.cat([param.view((- 1)) for (name, param) in model.named_parameters() if ('mask_scores' in name)])
n = concat.numel()
kth = max((n - (int((n * args.final_threshold)) + 1)), 1)
threshold_mem = concat.kthvalue(kth).values.item()
inputs['threshold'] = threshold_mem
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
if (args.output_mode == 'classification'):
from scipy.special import softmax
probs = softmax(preds, axis=(- 1))
entropy = np.exp(((- probs) * np.log(probs)).sum(axis=(- 1)).mean())
preds = np.argmax(preds, axis=1)
elif (args.output_mode == 'regression'):
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if (entropy is not None):
result['eval_avg_entropy'] = entropy
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return results |
class KnownValues(unittest.TestCase):
def test_hf_dfgs(self):
mf = scf.UHF(mol).run()
myadc = adc.ADC(mf)
myadc.with_df = df.DF(mol, auxbasis='cc-pvdz-ri')
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
def test_dfhs_dfgs(self):
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
def test_ea_dfadc3(self):
mf = scf.UHF(mol).density_fit(auxbasis='cc-pvdz-jkfit')
mf.kernel()
myadc.with_df = df.DF(mol, auxbasis='cc-pvdz-ri')
myadc.max_memory = 20
myadc.method = 'adc(3)'
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=4)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(e[3], 0., 6)
self.assertAlmostEqual(p[0], 0.9364865, 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
self.assertAlmostEqual(p[3], 0., 6)
def test_ip_dfadc3_dif_aux_basis(self):
mf = scf.UHF(mol).density_fit(auxbasis='cc-pvdz-jkfit')
mf.kernel()
myadc.with_df = df.DF(mol, auxbasis='aug-cc-pvdz-ri')
myadc.max_memory = 2
myadc.method = 'adc(3)'
myadc.method_type = 'ip'
(e, v, p, x) = myadc.kernel(nroots=3)
e_corr = myadc.e_corr
self.assertAlmostEqual(e_corr, (- 0.), 6)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
def test_hf_dfadc3_ip(self):
mf = scf.UHF(mol).run()
myadc = adc.ADC(mf)
myadc.with_df = df.DF(mol, auxbasis='aug-cc-pvdz-ri')
myadc.method = 'adc(3)'
(e, v, p, x) = myadc.kernel(nroots=3)
myadc.analyze()
e_corr = myadc.e_corr
self.assertAlmostEqual(e_corr, (- 0.), 6)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0.4681848, 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6) |
def clean(cands):
fhs = []
for x in cands:
fh = []
ans = stanford_nlp.pos_tag(x[0].encode('utf-8'))
if (len(ans) > 1):
(f, l) = (None, None)
for (ind, (w, p)) in enumerate(ans):
if (p not in ['DT', ',', 'PRP', 'IN']):
fh.append(w)
if (f is None):
f = (x[1][0] + ind)
l = ((x[1][0] + ind) + 1)
if (len(fh) > 0):
fhs.append((' '.join(fh), (f, l)))
elif (ans[0][1] not in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']):
fhs.append((x[0], x[1]))
return fhs |
class EquilibriumDB(RewriteDatabase):
def __init__(self, ignore_newtrees: bool=True, tracks_on_change_inputs: bool=False):
super().__init__()
self.ignore_newtrees = ignore_newtrees
self.tracks_on_change_inputs = tracks_on_change_inputs
self.__final__: dict[(str, bool)] = {}
self.__cleanup__: dict[(str, bool)] = {}
def register(self, name: str, rewriter: Union[('RewriteDatabase', RewritesType)], *tags: str, final_rewriter: bool=False, cleanup: bool=False, **kwargs):
if (final_rewriter and cleanup):
raise ValueError('`final_rewriter` and `cleanup` cannot both be true.')
super().register(name, rewriter, *tags, **kwargs)
self.__final__[name] = final_rewriter
self.__cleanup__[name] = cleanup
def query(self, *tags, **kwtags):
_rewriters = super().query(*tags, **kwtags)
final_rewriters = [o for o in _rewriters if self.__final__.get(o.name, False)]
cleanup_rewriters = [o for o in _rewriters if self.__cleanup__.get(o.name, False)]
rewriters = [o for o in _rewriters if ((o not in final_rewriters) and (o not in cleanup_rewriters))]
if (len(final_rewriters) == 0):
final_rewriters = None
if (len(cleanup_rewriters) == 0):
cleanup_rewriters = None
return pytensor_rewriting.EquilibriumGraphRewriter(rewriters, max_use_ratio=config.optdb__max_use_ratio, ignore_newtrees=self.ignore_newtrees, tracks_on_change_inputs=self.tracks_on_change_inputs, failure_callback=pytensor_rewriting.NodeProcessingGraphRewriter.warn_inplace, final_rewriters=final_rewriters, cleanup_rewriters=cleanup_rewriters) |
class retvalType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
def factory(*args_, **kwargs_):
if (CurrentSubclassModule_ is not None):
subclass = getSubclassFromModule_(CurrentSubclassModule_, retvalType)
if (subclass is not None):
return subclass(*args_, **kwargs_)
if retvalType.subclass:
return retvalType.subclass(*args_, **kwargs_)
else:
return retvalType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def has__content(self):
if ():
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='retvalType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('retvalType')
if (imported_ns_def_ is not None):
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if ((self.original_tagname_ is not None) and (name_ == 'retvalType')):
name_ = self.original_tagname_
if (UseCapturedNS_ and self.ns_prefix_):
namespaceprefix_ = (self.ns_prefix_ + ':')
showIndent(outfile, level, pretty_print)
outfile.write(('<%s%s%s' % (namespaceprefix_, name_, ((namespacedef_ and (' ' + namespacedef_)) or ''))))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='retvalType')
if self.has__content():
outfile.write(('>%s' % (eol_,)))
self._exportChildren(outfile, (level + 1), namespaceprefix_, namespacedef_, name_='retvalType', pretty_print=pretty_print)
outfile.write(('</%s%s>%s' % (namespaceprefix_, name_, eol_)))
else:
outfile.write(('/>%s' % (eol_,)))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='retvalType'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='retvalType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[(- 1)]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
pass |
class TestRevokedCertificateBuilder():
def test_serial_number_must_be_integer(self):
with pytest.raises(TypeError):
x509.RevokedCertificateBuilder().serial_number('notanx509name')
def test_serial_number_must_be_non_negative(self):
with pytest.raises(ValueError):
x509.RevokedCertificateBuilder().serial_number((- 1))
def test_serial_number_must_be_positive(self):
with pytest.raises(ValueError):
x509.RevokedCertificateBuilder().serial_number(0)
def test_minimal_serial_number(self, backend):
revocation_date = datetime.datetime(2002, 1, 1, 12, 1)
builder = x509.RevokedCertificateBuilder().serial_number(1).revocation_date(revocation_date)
revoked_certificate = builder.build(backend)
assert (revoked_certificate.serial_number == 1)
def test_biggest_serial_number(self, backend):
revocation_date = datetime.datetime(2002, 1, 1, 12, 1)
builder = x509.RevokedCertificateBuilder().serial_number(((1 << 159) - 1)).revocation_date(revocation_date)
revoked_certificate = builder.build(backend)
assert (revoked_certificate.serial_number == ((1 << 159) - 1))
def test_serial_number_must_be_less_than_160_bits_long(self):
with pytest.raises(ValueError):
x509.RevokedCertificateBuilder().serial_number((1 << 159))
def test_set_serial_number_twice(self):
builder = x509.RevokedCertificateBuilder().serial_number(3)
with pytest.raises(ValueError):
builder.serial_number(4)
def test_aware_revocation_date(self, backend):
tz = datetime.timezone(datetime.timedelta(hours=(- 8)))
time = datetime.datetime(2012, 1, 16, 22, 43, tzinfo=tz)
utc_time = datetime.datetime(2012, 1, 17, 6, 43)
serial_number = 333
builder = x509.RevokedCertificateBuilder().serial_number(serial_number).revocation_date(time)
revoked_certificate = builder.build(backend)
with pytest.warns(utils.DeprecatedIn42):
assert (revoked_certificate.revocation_date == utc_time)
assert (revoked_certificate.revocation_date_utc == utc_time.replace(tzinfo=datetime.timezone.utc))
def test_revocation_date_invalid(self):
with pytest.raises(TypeError):
x509.RevokedCertificateBuilder().revocation_date('notadatetime')
def test_revocation_date_before_1950(self):
with pytest.raises(ValueError):
x509.RevokedCertificateBuilder().revocation_date(datetime.datetime(1940, 8, 10))
def test_set_revocation_date_twice(self):
builder = x509.RevokedCertificateBuilder().revocation_date(datetime.datetime(2002, 1, 1, 12, 1))
with pytest.raises(ValueError):
builder.revocation_date(datetime.datetime(2002, 1, 1, 12, 1))
def test_add_extension_checks_for_duplicates(self):
builder = x509.RevokedCertificateBuilder().add_extension(x509.CRLReason(x509.ReasonFlags.ca_compromise), False)
with pytest.raises(ValueError):
builder.add_extension(x509.CRLReason(x509.ReasonFlags.ca_compromise), False)
def test_add_invalid_extension(self):
with pytest.raises(TypeError):
x509.RevokedCertificateBuilder().add_extension('notanextension', False)
def test_no_serial_number(self, backend):
builder = x509.RevokedCertificateBuilder().revocation_date(datetime.datetime(2002, 1, 1, 12, 1))
with pytest.raises(ValueError):
builder.build(backend)
def test_no_revocation_date(self, backend):
builder = x509.RevokedCertificateBuilder().serial_number(3)
with pytest.raises(ValueError):
builder.build(backend)
def test_create_revoked(self, backend):
serial_number = 333
revocation_date = datetime.datetime(2002, 1, 1, 12, 1)
builder = x509.RevokedCertificateBuilder().serial_number(serial_number).revocation_date(revocation_date)
revoked_certificate = builder.build(backend)
assert (revoked_certificate.serial_number == serial_number)
with pytest.warns(utils.DeprecatedIn42):
assert (revoked_certificate.revocation_date == revocation_date)
assert (revoked_certificate.revocation_date_utc == revocation_date.replace(tzinfo=datetime.timezone.utc))
assert (len(revoked_certificate.extensions) == 0)
.parametrize('extension', [x509.InvalidityDate(datetime.datetime(2015, 1, 1, 0, 0)), x509.CRLReason(x509.ReasonFlags.ca_compromise), x509.CertificateIssuer([x509.DNSName('cryptography.io')])])
def test_add_extensions(self, backend, extension):
serial_number = 333
revocation_date = datetime.datetime(2002, 1, 1, 12, 1)
builder = x509.RevokedCertificateBuilder().serial_number(serial_number).revocation_date(revocation_date).add_extension(extension, False)
revoked_certificate = builder.build(backend)
assert (revoked_certificate.serial_number == serial_number)
with pytest.warns(utils.DeprecatedIn42):
assert (revoked_certificate.revocation_date == revocation_date)
assert (revoked_certificate.revocation_date_utc == revocation_date.replace(tzinfo=datetime.timezone.utc))
assert (len(revoked_certificate.extensions) == 1)
ext = revoked_certificate.extensions.get_extension_for_class(type(extension))
assert (ext.critical is False)
assert (ext.value == extension)
def test_add_multiple_extensions(self, backend):
serial_number = 333
revocation_date = datetime.datetime(2002, 1, 1, 12, 1)
invalidity_date = x509.InvalidityDate(datetime.datetime(2015, 1, 1, 0, 0))
certificate_issuer = x509.CertificateIssuer([x509.DNSName('cryptography.io')])
crl_reason = x509.CRLReason(x509.ReasonFlags.aa_compromise)
builder = x509.RevokedCertificateBuilder().serial_number(serial_number).revocation_date(revocation_date).add_extension(invalidity_date, True).add_extension(crl_reason, True).add_extension(certificate_issuer, True)
revoked_certificate = builder.build(backend)
assert (len(revoked_certificate.extensions) == 3)
for ext_data in [invalidity_date, certificate_issuer, crl_reason]:
ext = revoked_certificate.extensions.get_extension_for_class(type(ext_data))
assert (ext.critical is True)
assert (ext.value == ext_data) |
def evaluate_hr_ndcg(model, test_queue, topk=10):
model.eval()
with torch.no_grad():
(users, items, _) = test_queue
users = users.cpu().tolist()
(hrs, ndcgs) = ([], [])
inferences_dict = {}
(users_all, items_all) = ([], [])
for user in list(set(users)):
users_all += ([user] * model.num_items)
items_all += list(range(model.num_items))
(inferences, _) = model(torch.tensor(users_all).cuda(), torch.tensor(items_all).cuda())
inferences = inferences.detach().cpu().tolist()
for (i, user) in enumerate(list(set(users))):
inferences_dict[user] = inferences[(i * model.num_items):((i + 1) * model.num_items)]
for (i, user) in enumerate(users):
inferences = inferences_dict[user]
score = inferences[items[i]]
rank = 0
for s in inferences:
if (score < s):
rank += 1
if (rank < topk):
hr = 1.0
ndcg = (math.log(2) / math.log((rank + 2)))
else:
hr = 0.0
ndcg = 0.0
hrs.append(hr)
ndcgs.append(ndcg)
return (np.mean(hrs), np.mean(ndcgs)) |
()
def mock_utils_debugger(mocker):
def call_orig_func(func, *args, **kwargs):
return func(*args, **kwargs)
debugger_mock = mocker.patch('radish.utils.get_debugger')
debugger_mock.return_value.runcall = mocker.MagicMock(side_effect=call_orig_func)
return debugger_mock.return_value |
class Example(object):
def __init__(self, qas_id, qas_type, doc_tokens, question_text, sent_num, sent_names, sup_fact_id, para_start_end_position, sent_start_end_position, entity_start_end_position, orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id
self.qas_type = qas_type
self.doc_tokens = doc_tokens
self.question_text = question_text
self.sent_num = sent_num
self.sent_names = sent_names
self.sup_fact_id = sup_fact_id
self.para_start_end_position = para_start_end_position
self.sent_start_end_position = sent_start_end_position
self.entity_start_end_position = entity_start_end_position
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position |
def _handle_eval_return(self, result, col, as_pyranges, subset):
if as_pyranges:
if (not result):
return pr.PyRanges()
first_hit = list(result.values())[0]
if isinstance(first_hit, pd.Series):
if ((first_hit.dtype == bool) and subset):
return self[result]
elif col:
self.__setattr__(col, result)
return self
else:
raise Exception('Cannot return PyRanges when function returns a Series! Use as_pyranges=False.')
return pr.PyRanges(result)
else:
return result |
class FakeNetworkCache(QAbstractNetworkCache):
def cacheSize(self):
return 0
def data(self, _url):
return None
def insert(self, _dev):
pass
def metaData(self, _url):
return QNetworkCacheMetaData()
def prepare(self, _metadata):
return None
def remove(self, _url):
return False
def updateMetaData(self, _url):
pass |
class MultiAttentionEncoder(SequenceMultiEncoder):
def __init__(self, n_encodings: int, bias: bool=False, key_mapper: SequenceMapper=None, post_process: Mapper=None, init='glorot_uniform'):
self.init = init
self.bias = bias
self.n_encodings = n_encodings
self.key_mapper = key_mapper
self.post_process = post_process
def apply(self, is_train, x, mask=None):
if (self.key_mapper is not None):
with tf.variable_scope('map_keys'):
keys = self.key_mapper.apply(is_train, x, mask)
else:
keys = x
weights = tf.get_variable('weights', (keys.shape.as_list()[(- 1)], self.n_encodings), dtype=tf.float32, initializer=get_keras_initialization(self.init))
dist = tf.tensordot(keys, weights, axes=[[2], [0]])
if self.bias:
dist += tf.get_variable('bias', (1, 1, self.n_encodings), dtype=tf.float32, initializer=tf.zeros_initializer())
if (mask is not None):
bool_mask = tf.expand_dims(tf.cast(tf.sequence_mask(mask, tf.shape(x)[1]), tf.float32), 2)
dist = ((bool_mask * bool_mask) + ((1 - bool_mask) * VERY_NEGATIVE_NUMBER))
dist = tf.nn.softmax(dist, dim=1)
out = tf.einsum('ajk,ajn->ank', x, dist)
if (self.post_process is not None):
with tf.variable_scope('post_process'):
out = self.post_process.apply(is_train, out)
return out |
def read_flit_config(path):
res = _read_flit_config_core(path)
if validate_config(res):
if os.environ.get('FLIT_ALLOW_INVALID'):
log.warning('Allowing invalid data (FLIT_ALLOW_INVALID set). Uploads may still fail.')
else:
raise ConfigError('Invalid config values (see log)')
return res |
class PerImgPert():
def __init__(self, sess, config, filepath, batch_size, regu, learning_rate=0.1, binary_search_steps=1, max_iterations=101, initial_const=1):
(image_size, num_channels, num_labels) = (32, 3, 10)
self.sess = sess
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.initial_const = initial_const
self.batch_size = batch_size
shape = (batch_size, image_size, image_size, num_channels)
shape_pert = (batch_size, image_size, image_size, num_channels)
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((batch_size, num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
self.modifier = tf.Variable(np.zeros(shape_pert, dtype=np.float32))
self.det = tf.Variable(np.ones(shape_pert, dtype=np.float32), constraint=(lambda x: tf.clip_by_value(x, 0, 255)))
self.newimg = ((self.modifier * self.det) + (self.timg * (1 - self.modifier)))
self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels), name='tlab')
self.assign_timg = tf.placeholder(tf.float32, shape, name='timag')
self.assign_const = tf.placeholder(tf.float32, [batch_size], name='tconst')
global_step = tf.contrib.framework.get_or_create_global_step()
model = resnet.Model(config.model, self.newimg)
model_dir = filepath
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
var_list = var_list[5:]
saver = tf.train.Saver(max_to_keep=3, var_list=var_list)
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
if (latest_checkpoint is not None):
saver.restore(sess, latest_checkpoint)
print('Restoring last saved checkpoint: ', latest_checkpoint)
else:
print('Check model directory')
exit()
self.output = model.pre_softmax
real = tf.reduce_sum((self.tlab * self.output), 1)
other = tf.reduce_max((((1 - self.tlab) * self.output) - (self.tlab * 10000)), 1)
loss1 = tf.maximum((- 15.0), (other - real))
self.loss1 = tf.reduce_sum(loss1)
if (regu == 'l2'):
self.loss2 = tf.reduce_sum(tf.square(self.modifier))
else:
self.loss2 = tf.reduce_sum(tf.abs(self.modifier))
self.loss = self.loss1
self.modifier_ST = tf.clip_by_value((tf.sign(self.modifier) * tf.maximum((tf.abs(self.modifier) - (self.LEARNING_RATE / self.initial_const)), 0)), clip_value_min=0, clip_value_max=1)
self.assign_ST = tf.assign(self.modifier, self.modifier_ST)
start_vars = set((x.name for x in tf.global_variables()))
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train1 = optimizer.minimize(self.loss, var_list=[self.modifier])
self.train2 = optimizer.minimize(self.loss, var_list=[self.det])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if (x.name not in start_vars)]
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=(([self.modifier] + [self.det]) + new_vars))
def attack(self, imgs, labs):
self.assign_input = imgs
batch_size = self.batch_size
best_loss = self.loss
perturb_best = self.modifier
CONST = (np.ones(batch_size) * self.initial_const)
for outer_step in range(self.BINARY_SEARCH_STEPS):
self.sess.run(self.init)
self.sess.run(self.setup, {self.assign_timg: imgs, self.assign_tlab: labs, self.assign_const: CONST})
for iteration in range(self.MAX_ITERATIONS):
(_, outp) = self.sess.run([self.train1, self.output])
self.sess.run(self.assign_ST)
(_, perturb_temp, det, outp) = self.sess.run([self.train2, self.modifier, self.det, self.output])
if ((iteration % (self.MAX_ITERATIONS // 10)) == 0):
print(iteration, self.sess.run((self.loss, self.loss1, self.loss2, tf.reduce_sum(tf.abs(perturb_temp)))))
if ((iteration % 50) == 0):
print(np.argmax(outp, axis=1))
output_arg1 = np.argmax(outp, axis=1)
indices1 = np.where(np.equal(output_arg1, np.argmax(labs, axis=1)))
return (perturb_temp, det, indices1, outp) |
def read_tables(data_dir, bc):
bc.create_table('store_sales', os.path.join(data_dir, 'store_sales/*.parquet'))
bc.create_table('date_dim', os.path.join(data_dir, 'date_dim/*.parquet'))
bc.create_table('item', os.path.join(data_dir, 'item/*.parquet'))
bc.create_table('web_sales', os.path.join(data_dir, 'web_sales/*.parquet'))
bc.create_table('store_returns', os.path.join(data_dir, 'store_returns/*.parquet'))
bc.create_table('store', os.path.join(data_dir, 'store/*.parquet')) |
def infer_func_form(node: nodes.Call, base_type: list[nodes.NodeNG], context: (InferenceContext | None)=None, enum: bool=False) -> tuple[(nodes.ClassDef, str, list[str])]:
try:
(name, names) = _find_func_form_arguments(node, context)
try:
attributes: list[str] = names.value.replace(',', ' ').split()
except AttributeError as exc:
if (not enum):
attributes = []
fields = _get_namedtuple_fields(node)
if fields:
fields_node = extract_node(fields)
attributes = [_infer_first(const, context).value for const in fields_node.elts]
else:
if (hasattr(names, 'items') and isinstance(names.items, list)):
attributes = [_infer_first(const[0], context).value for const in names.items if isinstance(const[0], nodes.Const)]
elif hasattr(names, 'elts'):
if all((isinstance(const, nodes.Tuple) for const in names.elts)):
attributes = [_infer_first(const.elts[0], context).value for const in names.elts if isinstance(const, nodes.Tuple)]
else:
attributes = [_infer_first(const, context).value for const in names.elts]
else:
raise AttributeError from exc
if (not attributes):
raise AttributeError from exc
except (AttributeError, InferenceError) as exc:
raise UseInferenceDefault from exc
if (not enum):
attributes = [str(attr) for attr in attributes]
attributes = [attr for attr in attributes if (' ' not in attr)]
name = (name or 'Uninferable')
class_node = nodes.ClassDef(name, lineno=node.lineno, col_offset=node.col_offset, end_lineno=node.end_lineno, end_col_offset=node.end_col_offset, parent=nodes.Unknown())
class_node.parent = node.parent
class_node.postinit(bases=base_type, body=[], decorators=None)
for attr in attributes:
fake_node = nodes.EmptyNode()
fake_node.parent = class_node
fake_node.attrname = attr
class_node.instance_attrs[attr] = [fake_node]
return (class_node, name, attributes) |
def test_parallel_and_sequential_ces_are_equal(s, micro_s, macro_s):
with config.override(PARALLEL_CONCEPT_EVALUATION=False):
c = compute.subsystem.ces(s)
c_micro = compute.subsystem.ces(micro_s)
c_macro = compute.subsystem.ces(macro_s)
with config.override(PARALLEL_CONCEPT_EVALUATION=True):
assert (set(c) == set(compute.subsystem.ces(s)))
assert (set(c_micro) == set(compute.subsystem.ces(micro_s)))
assert (set(c_macro) == set(compute.subsystem.ces(macro_s))) |
class SponsorBenefitModelTests(TestCase):
def setUp(self):
self.sponsorship = baker.make(Sponsorship)
self.sponsorship_benefit = baker.make(SponsorshipBenefit, name='Benefit')
def test_new_copy_also_add_benefit_feature_when_creating_sponsor_benefit(self):
benefit_config = baker.make(LogoPlacementConfiguration, benefit=self.sponsorship_benefit)
self.assertEqual(0, LogoPlacement.objects.count())
sponsor_benefit = SponsorBenefit.new_copy(self.sponsorship_benefit, sponsorship=self.sponsorship)
self.assertEqual(1, LogoPlacement.objects.count())
benefit_feature = sponsor_benefit.features.get()
self.assertIsInstance(benefit_feature, LogoPlacement)
self.assertEqual(benefit_feature.publisher, benefit_config.publisher)
self.assertEqual(benefit_feature.logo_place, benefit_config.logo_place)
def test_new_copy_do_not_save_unexisting_features(self):
benefit_config = baker.make(TieredBenefitConfiguration, package__name='Another package', benefit=self.sponsorship_benefit)
self.assertEqual(0, TieredBenefit.objects.count())
sponsor_benefit = SponsorBenefit.new_copy(self.sponsorship_benefit, sponsorship=self.sponsorship)
self.assertEqual(0, TieredBenefit.objects.count())
self.assertFalse(sponsor_benefit.features.exists())
def test_sponsor_benefit_name_for_display(self):
name = 'Benefit'
sponsor_benefit = baker.make(SponsorBenefit, name=name)
self.assertEqual(sponsor_benefit.name_for_display, name)
benefit_config = baker.make(TieredBenefit, sponsor_benefit=sponsor_benefit, quantity=10)
self.assertEqual(sponsor_benefit.name_for_display, f'{name} (10)')
def test_sponsor_benefit_from_standalone_one(self):
self.sponsorship_benefit.standalone = True
self.sponsorship_benefit.save()
sponsor_benefit = SponsorBenefit.new_copy(self.sponsorship_benefit, sponsorship=self.sponsorship)
self.assertTrue(sponsor_benefit.added_by_user)
self.assertTrue(sponsor_benefit.standalone)
def test_reset_attributes_updates_all_basic_information(self):
benefit = baker.make(SponsorBenefit, sponsorship_benefit=self.sponsorship_benefit)
self.assertNotEqual(benefit.name, self.sponsorship_benefit.name)
benefit.reset_attributes(self.sponsorship_benefit)
benefit.refresh_from_db()
self.assertEqual(benefit.name, self.sponsorship_benefit.name)
self.assertEqual(benefit.description, self.sponsorship_benefit.description)
self.assertEqual(benefit.program_name, self.sponsorship_benefit.program.name)
self.assertEqual(benefit.program, self.sponsorship_benefit.program)
self.assertEqual(benefit.benefit_internal_value, self.sponsorship_benefit.internal_value)
self.assertEqual(benefit.standalone, self.sponsorship_benefit.standalone)
def test_reset_attributes_add_new_features(self):
RequiredTextAssetConfiguration.objects.create(benefit=self.sponsorship_benefit, related_to='sponsorship', internal_name='foo', label='Text')
benefit = baker.make(SponsorBenefit, sponsorship_benefit=self.sponsorship_benefit)
self.assertFalse(benefit.features.count())
benefit.reset_attributes(self.sponsorship_benefit)
benefit.refresh_from_db()
self.assertEqual(1, benefit.features.count())
def test_reset_attributes_delete_removed_features(self):
cfg = RequiredTextAssetConfiguration.objects.create(benefit=self.sponsorship_benefit, related_to='sponsorship', internal_name='foo', label='Text')
benefit = SponsorBenefit.new_copy(self.sponsorship_benefit, sponsorship=self.sponsorship)
self.assertEqual(1, benefit.features.count())
cfg.delete()
benefit.reset_attributes(self.sponsorship_benefit)
benefit.refresh_from_db()
self.assertFalse(benefit.features.count())
def test_reset_attributes_recreate_features_but_keeping_previous_values(self):
cfg = RequiredTextAssetConfiguration.objects.create(benefit=self.sponsorship_benefit, related_to='sponsorship', internal_name='foo', label='Text')
benefit = SponsorBenefit.new_copy(self.sponsorship_benefit, sponsorship=self.sponsorship)
feature = RequiredTextAsset.objects.get()
feature.value = 'foo'
feature.save()
cfg.label = 'New text'
cfg.save()
benefit.reset_attributes(self.sponsorship_benefit)
benefit.refresh_from_db()
self.assertEqual(1, benefit.features.count())
asset = benefit.features.required_assets().get()
self.assertEqual(asset.label, 'New text')
self.assertEqual(asset.value, 'foo')
def test_clone_benefit_regular_attributes_to_a_new_year(self):
benefit = baker.make(SponsorshipBenefit, name='Benefit', description='desc', program__name='prog', package_only=False, new=True, unavailable=True, standalone=True, internal_description='internal desc', internal_value=300, capacity=100, soft_capacity=True, year=2022)
(benefit_2023, created) = benefit.clone(year=2023)
self.assertTrue(created)
self.assertEqual('Benefit', benefit_2023.name)
self.assertEqual('desc', benefit_2023.description)
self.assertEqual(benefit.program, benefit_2023.program)
self.assertFalse(benefit_2023.package_only)
self.assertTrue(benefit_2023.new)
self.assertTrue(benefit_2023.unavailable)
self.assertTrue(benefit_2023.standalone)
self.assertEqual('internal desc', benefit_2023.internal_description)
self.assertEqual(300, benefit_2023.internal_value)
self.assertEqual(100, benefit_2023.capacity)
self.assertTrue(benefit_2023.soft_capacity)
self.assertEqual(2023, benefit_2023.year)
self.assertEqual(benefit.order, benefit_2023.order)
def test_clone_benefit_should_be_idempotent(self):
(benefit_2023, created) = self.sponsorship_benefit.clone(year=2023)
(repeated, created) = self.sponsorship_benefit.clone(year=2023)
self.assertFalse(created)
self.assertEqual(benefit_2023.pk, repeated.pk)
def test_clone_related_objects_as_well(self):
pkgs = baker.make(SponsorshipPackage, _quantity=2)
clauses = baker.make(LegalClause, _quantity=2)
self.sponsorship_benefit.legal_clauses.add(*clauses)
self.sponsorship_benefit.packages.add(*pkgs)
(benefit_2023, _) = self.sponsorship_benefit.clone(2023)
benefit_2023.refresh_from_db()
self.assertEqual(4, SponsorshipPackage.objects.count())
self.assertEqual(2023, benefit_2023.packages.values_list('year', flat=True).distinct().first())
self.assertEqual(4, LegalClause.objects.count())
self.assertEqual(2, benefit_2023.legal_clauses.count())
def test_clone_benefit_feature_configurations(self):
cfg_1 = baker.make(LogoPlacementConfiguration, publisher=PublisherChoices.FOUNDATION, logo_place=LogoPlacementChoices.FOOTER, benefit=self.sponsorship_benefit)
cfg_2 = baker.make(RequiredTextAssetConfiguration, related_to=AssetsRelatedTo.SPONSOR.value, internal_name='config_name', benefit=self.sponsorship_benefit)
(benefit_2023, _) = self.sponsorship_benefit.clone(2023)
self.assertEqual(2, LogoPlacementConfiguration.objects.count())
self.assertEqual(2, RequiredTextAssetConfiguration.objects.count())
self.assertEqual(1, RequiredTextAssetConfiguration.objects.filter(benefit=benefit_2023).count())
self.assertEqual(1, RequiredTextAssetConfiguration.objects.filter(benefit=benefit_2023).count()) |
def test_parametric_mesh_forward():
tmpdir = tempfile.TemporaryDirectory()
generate_smpl_weight_file(tmpdir.name)
model_cfg = dict(pretrained=None, backbone=dict(type='ResNet', depth=50), mesh_head=dict(type='HMRMeshHead', in_channels=2048, smpl_mean_params='tests/data/smpl/smpl_mean_params.npz'), disc=None, smpl=dict(type='SMPL', smpl_path=tmpdir.name, joints_regressor=osp.join(tmpdir.name, 'test_joint_regressor.npy')), train_cfg=dict(disc_step=1), test_cfg=dict(flip_test=False, post_process='default', shift_heatmap=True, modulate_kernel=11), loss_mesh=dict(type='MeshLoss', joints_2d_loss_weight=1, joints_3d_loss_weight=1, vertex_loss_weight=1, smpl_pose_loss_weight=1, smpl_beta_loss_weight=1, focal_length=5000, img_res=256), loss_gan=None)
detector = ParametricMesh(**model_cfg)
detector.init_weights()
optimizers_config = dict(generator=dict(type='Adam', lr=0.0001))
optims = build_optimizers(detector, optimizers_config)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape)
output = detector.train_step(mm_inputs, optims)
assert isinstance(output, dict)
with torch.no_grad():
output = detector.val_step(data_batch=mm_inputs)
assert isinstance(output, dict)
imgs = mm_inputs.pop('img')
img_metas = mm_inputs.pop('img_metas')
output = detector.forward(imgs, img_metas=img_metas, return_loss=False)
assert isinstance(output, dict)
model_cfg['disc'] = dict()
model_cfg['loss_gan'] = dict(type='GANLoss', gan_type='lsgan', real_label_val=1.0, fake_label_val=0.0, loss_weight=1)
optimizers_config['discriminator'] = dict(type='Adam', lr=0.0001)
detector = ParametricMesh(**model_cfg)
detector.init_weights()
optims = build_optimizers(detector, optimizers_config)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape)
output = detector.train_step(mm_inputs, optims)
assert isinstance(output, dict)
with torch.no_grad():
output = detector.val_step(data_batch=mm_inputs)
assert isinstance(output, dict)
imgs = mm_inputs.pop('img')
img_metas = mm_inputs.pop('img_metas')
output = detector.forward(imgs, img_metas=img_metas, return_loss=False)
assert isinstance(output, dict)
_ = detector.forward_dummy(imgs)
tmpdir.cleanup() |
class Opcode(Configurable, OpcodeAPI):
mnemonic: str = None
gas_cost: int = None
def __init__(self) -> None:
if (self.mnemonic is None):
raise TypeError(f'Opcode class {type(self)} missing opcode mnemonic')
if (self.gas_cost is None):
raise TypeError(f'Opcode class {type(self)} missing opcode gas_cost')
def logger(self) -> ExtendedDebugLogger:
return get_extended_debug_logger(f'eth.vm.logic.{self.mnemonic}')
def as_opcode(cls: Type[T], logic_fn: Callable[(..., Any)], mnemonic: str, gas_cost: int) -> T:
if gas_cost:
(logic_fn)
def wrapped_logic_fn(computation: ComputationAPI) -> Any:
computation.consume_gas(gas_cost, mnemonic)
return logic_fn(computation)
else:
wrapped_logic_fn = logic_fn
props = {'__call__': staticmethod(wrapped_logic_fn), 'mnemonic': mnemonic, 'gas_cost': gas_cost}
opcode_cls = type(f'opcode:{mnemonic}', (cls,), props)
return opcode_cls()
def __copy__(self) -> 'Opcode':
return type(self)()
def __deepcopy__(self, memo: Any) -> 'Opcode':
return type(self)() |
.parametrize('shape,tile_shape', [((2,), (3,)), ((2, 2), (3, 2)), ((2, 3), (2, 2))])
def test_read_write_tiles_error(tmp_path, shape, tile_shape):
with pytest.raises(ValueError, match='must be divisible'):
write_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape)
with pytest.raises(ValueError, match='must be divisible'):
read_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape) |
class ButtonsRow():
def __init__(self):
self._content = []
def url(self, label, url):
self._content.append({'text': label, 'url': url})
def callback(self, label, callback, data=None):
def generate_callback_data(chat):
c = ctx()
name = ('%s:%s' % (c.component_name(), callback))
return get_callback_data(c.bot, chat, name, data)
self._content.append({'text': label, 'callback_data': generate_callback_data})
def switch_inline_query(self, label, query='', current_chat=False):
if current_chat:
self._content.append({'text': label, 'switch_inline_query_current_chat': query})
else:
self._content.append({'text': label, 'switch_inline_query': query})
def _get_content(self, chat):
for item in self._content:
new = item.copy()
for (key, value) in new.items():
if callable(value):
new[key] = value(chat)
(yield new) |
class RLlibStarCraft2Env(rllib.MultiAgentEnv):
def __init__(self, **smac_args):
self._env = StarCraft2Env(**smac_args)
self._ready_agents = []
self.observation_space = Dict({'obs': Box((- 1), 1, shape=(self._env.get_obs_size(),)), 'action_mask': Box(0, 1, shape=(self._env.get_total_actions(),))})
self.action_space = Discrete(self._env.get_total_actions())
def reset(self):
(obs_list, state_list) = self._env.reset()
return_obs = {}
for (i, obs) in enumerate(obs_list):
return_obs[i] = {'action_mask': self._env.get_avail_agent_actions(i), 'obs': obs}
self._ready_agents = list(range(len(obs_list)))
return return_obs
def step(self, action_dict):
actions = []
for i in self._ready_agents:
if (i not in action_dict):
raise ValueError('You must supply an action for agent: {}'.format(i))
actions.append(action_dict[i])
if (len(actions) != len(self._ready_agents)):
raise ValueError('Unexpected number of actions: {}'.format(action_dict, self._ready_agents))
(rew, done, info) = self._env.step(actions)
obs_list = self._env.get_obs()
return_obs = {}
for (i, obs) in enumerate(obs_list):
return_obs[i] = {'action_mask': self._env.get_avail_agent_actions(i), 'obs': obs}
rews = {i: (rew / len(obs_list)) for i in range(len(obs_list))}
dones = {i: done for i in range(len(obs_list))}
dones['__all__'] = done
infos = {i: info for i in range(len(obs_list))}
self._ready_agents = list(range(len(obs_list)))
return (return_obs, rews, dones, infos) |
class Effect1638(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Gunnery') or mod.item.requiresSkill('Missile Launcher Operation') or mod.item.requiresSkill('Vorton Projector Operation'))), 'power', (skill.getModifiedItemAttr('powerNeedBonus') * skill.level), **kwargs) |
class Trainer(object):
def __init__(self, args):
self.args = args
if (args.class_name in MVTEC_CLASS_NAMES):
train_dataset = MVTecDataset(args, is_train=True)
test_dataset = MVTecDataset(args, is_train=False)
elif (args.class_name in BTAD_CLASS_NAMES):
train_dataset = BTADDataset(args.data_path, classname=args.class_name, resize=self.args.inp_size, cropsize=self.args.inp_size, is_train=True)
test_dataset = BTADDataset(args.data_path, classname=args.class_name, resize=self.args.inp_size, cropsize=self.args.inp_size, is_train=False)
elif (args.class_name in MVTEC3D_CLASS_NAMES):
train_dataset = MVTec3DDataset(args.data_path, classname=args.class_name, resize=self.args.inp_size, cropsize=self.args.inp_size, is_train=True)
test_dataset = MVTec3DDataset(args.data_path, classname=args.class_name, resize=self.args.inp_size, cropsize=self.args.inp_size, is_train=False)
else:
raise ValueError('Invalid Class Name: {}'.format(args.class_name))
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False, **kwargs)
self.test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=False, **kwargs)
self.build_model()
self.l2_criterion = nn.MSELoss()
self.cos_criterion = nn.CosineSimilarity(dim=(- 1))
def build_model(self):
encoder = timm.create_model(self.args.backbone_arch, features_only=True, out_indices=[2, 3], pretrained=True)
self.encoder = encoder.to(self.args.device).eval()
feat_dims = encoder.feature_info.channels()
print('Feature Dimensions:', feat_dims)
models = []
self.seq_lens = [1024, 256]
self.ws = [32, 16]
for (seq_len, in_channels, d_model) in zip(self.seq_lens, feat_dims, [256, 512]):
model = FOD(seq_len=seq_len, in_channels=in_channels, out_channels=in_channels, d_model=d_model, n_heads=8, n_layers=3, args=self.args)
print('One Model...Done')
models.append(model.to(self.args.device))
self.models = models
print('Creating Models...Done')
params = list(models[0].parameters())
for l in range(1, self.args.feature_levels):
params += list(models[l].parameters())
self.optimizer = torch.optim.Adam(params, lr=self.args.lr)
self.avg_pool = torch.nn.AvgPool2d(3, 1, 1)
def train(self):
path = os.path.join(self.args.save_path, self.args.save_prefix)
if (not os.path.exists(path)):
os.makedirs(path)
start_time = time.time()
train_steps = len(self.train_loader)
(best_img_auc, best_pix_auc) = (0.0, 0.0)
for epoch in range(self.args.num_epochs):
print('TRAIN MODE')
iter_count = 0
(loss_rec_list, loss_intra_entropy_list, loss_inter_entropy_list) = ([], [], [])
(loss_corr_list, loss_target_list) = ([], [])
epoch_time = time.time()
for model in self.models:
model.train()
for (i, (images, _, _, _, _)) in enumerate(self.train_loader):
iter_count += 1
images = images.float().to(self.args.device)
with torch.no_grad():
features = self.encoder(images)
for fl in range(self.args.feature_levels):
m = torch.nn.AvgPool2d(3, 1, 1)
input = m(features[fl])
(N, D, _, _) = input.shape
input = input.permute(0, 2, 3, 1).reshape(N, (- 1), D)
model = self.models[fl]
(output, intra_corrs, intra_targets, inter_corrs, inter_targets) = model(input)
if self.args.with_intra:
(loss_intra1, loss_intra2, loss_intra_entropy) = (0.0, 0.0, 0.0)
for l in range(len(intra_targets)):
L = intra_targets[l].shape[(- 1)]
norm_targets = (intra_targets[l] / torch.unsqueeze(torch.sum(intra_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L)).detach()
loss_intra1 += (torch.mean(kl_loss(norm_targets, intra_corrs[l])) + torch.mean(kl_loss(intra_corrs[l], norm_targets)))
norm_targets = (intra_targets[l] / torch.unsqueeze(torch.sum(intra_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L))
loss_intra2 += (torch.mean(kl_loss(norm_targets, intra_corrs[l].detach())) + torch.mean(kl_loss(intra_corrs[l].detach(), norm_targets)))
loss_intra_entropy += torch.mean(entropy_loss(intra_corrs[l]))
loss_intra1 = (loss_intra1 / len(intra_targets))
loss_intra2 = (loss_intra2 / len(intra_targets))
loss_intra_entropy = (loss_intra_entropy / len(intra_targets))
if self.args.with_inter:
(loss_inter1, loss_inter2, loss_inter_entropy) = (0.0, 0.0, 0.0)
for l in range(len(inter_targets)):
L = inter_targets[l].shape[(- 1)]
norm_targets = (inter_targets[l] / torch.unsqueeze(torch.sum(inter_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L)).detach()
loss_inter1 += (torch.mean(kl_loss(norm_targets, inter_corrs[l])) + torch.mean(kl_loss(inter_corrs[l], norm_targets)))
norm_targets = (inter_targets[l] / torch.unsqueeze(torch.sum(inter_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L))
loss_inter2 += (torch.mean(kl_loss(norm_targets, inter_corrs[l].detach())) + torch.mean(kl_loss(inter_corrs[l].detach(), norm_targets)))
loss_inter_entropy += torch.mean(entropy_loss(inter_corrs[l]))
loss_inter1 = (loss_inter1 / len(inter_targets))
loss_inter2 = (loss_inter2 / len(inter_targets))
loss_inter_entropy = (loss_inter_entropy / len(inter_targets))
loss_rec = (self.l2_criterion(output, input) + torch.mean((1 - self.cos_criterion(output, input))))
if (self.args.with_intra and self.args.with_inter):
loss1 = ((loss_rec + (self.args.lambda1 * loss_intra2)) - (self.args.lambda1 * loss_inter2))
loss2 = ((((loss_rec - (self.args.lambda1 * loss_intra1)) - (self.args.lambda2 * loss_intra_entropy)) + (self.args.lambda1 * loss_inter1)) + (self.args.lambda2 * loss_inter_entropy))
elif self.args.with_intra:
loss1 = (loss_rec + (self.args.lambda1 * loss_intra2))
loss2 = ((loss_rec - (self.args.lambda1 * loss_intra1)) - (self.args.lambda2 * loss_intra_entropy))
elif self.args.with_inter:
loss1 = (loss_rec - (self.args.lambda1 * loss_inter2))
loss2 = ((loss_rec + (self.args.lambda1 * loss_inter1)) + (self.args.lambda2 * loss_inter_entropy))
else:
loss = loss_rec
loss_rec_list.append(loss_rec.item())
if (self.args.with_intra and self.args.with_inter):
loss_target_list.append((loss_intra2 - loss_inter2).item())
loss_corr_list.append(((- loss_intra1) + loss_inter1).item())
loss_intra_entropy_list.append(loss_intra_entropy.item())
loss_inter_entropy_list.append(loss_inter_entropy.item())
elif self.args.with_intra:
loss_target_list.append(loss_intra2.item())
loss_corr_list.append((- loss_intra1).item())
loss_intra_entropy_list.append(loss_intra_entropy.item())
elif self.args.with_inter:
loss_target_list.append((- loss_inter2).item())
loss_corr_list.append(loss_inter1.item())
loss_inter_entropy_list.append(loss_inter_entropy.item())
self.optimizer.zero_grad()
if ((not self.args.with_intra) and (not self.args.with_inter)):
loss.backward()
else:
loss1.backward(retain_graph=True)
loss2.backward()
self.optimizer.step()
speed = ((time.time() - start_time) / iter_count)
left_time = (speed * (((self.args.num_epochs - epoch) * train_steps) - i))
print('Epoch: {} cost time: {}s | speed: {:.4f}s/iter | left time: {:.4f}s'.format((epoch + 1), (time.time() - epoch_time), speed, left_time))
iter_count = 0
start_time = time.time()
if (self.args.with_intra and self.args.with_inter):
print('Epoch: {0}, Steps: {1} | Rec Loss: {2:.7f} | Target Loss: {3:.7f} | Corr Loss: {4:.7f} | Intra Entropy: {5:.7f} | Inter Entropy: {6:.7f}'.format((epoch + 1), train_steps, np.average(loss_rec_list), np.average(loss_target_list), np.average(loss_corr_list), np.average(loss_intra_entropy_list), np.average(loss_inter_entropy_list)))
elif self.args.with_intra:
print('Epoch: {0}, Steps: {1} | Rec Loss: {2:.7f} | Target Loss: {3:.7f} | Corr Loss: {4:.7f} | Intra Entropy: {5:.7f}'.format((epoch + 1), train_steps, np.average(loss_rec_list), np.average(loss_target_list), np.average(loss_corr_list), np.average(loss_intra_entropy_list)))
elif self.args.with_inter:
print('Epoch: {0}, Steps: {1} | Rec Loss: {2:.7f} | Target Loss: {3:.7f} | Corr Loss: {4:.7f} | Inter Entropy: {5:.7f}'.format((epoch + 1), train_steps, np.average(loss_rec_list), np.average(loss_target_list), np.average(loss_corr_list), np.average(loss_inter_entropy_list)))
else:
print('Epoch: {0}, Steps: {1} | Rec Loss: {2:.7f}'.format((epoch + 1), train_steps, np.average(loss_rec_list)))
(img_auc, pix_auc) = self.test(vis=False)
print('Epoch: {0}, Class Name: {1}, Image AUC: {2:.7f} | Pixel AUC: {3:.7f}'.format((epoch + 1), self.args.class_name, img_auc, pix_auc))
if (img_auc > best_img_auc):
best_img_auc = img_auc
state = {'state_dict': [model.state_dict() for model in self.models]}
torch.save(state, os.path.join(path, (self.args.class_name + '-img.pth')))
if (pix_auc > best_pix_auc):
best_pix_auc = pix_auc
state = {'state_dict': [model.state_dict() for model in self.models]}
torch.save(state, os.path.join(path, (self.args.class_name + '-pix.pth')))
return (best_img_auc, best_pix_auc)
def test(self, vis=False, checkpoint_path=None):
if (checkpoint_path is not None):
checkpoint = torch.load(os.path.join(checkpoint_path, (self.args.class_name + '-pix.pth')))
state_dict = checkpoint['state_dict']
for (i, model) in enumerate(self.models):
model.load_state_dict(state_dict[i])
for model in self.models:
model.eval()
temperature = 1
print('TEST MODE')
l2_criterion = nn.MSELoss(reduction='none')
cos_criterion = nn.CosineSimilarity(dim=(- 1))
scores_list = [list() for _ in range(self.args.feature_levels)]
(test_imgs, gt_label_list, gt_mask_list, file_names, img_types) = ([], [], [], [], [])
for (i, (image, label, mask, file_name, img_type)) in enumerate(self.test_loader):
test_imgs.append(image.cpu().numpy())
gt_label_list.extend(label)
gt_mask_list.extend(mask.numpy())
file_names.extend(file_name)
img_types.extend(img_type)
image = image.float().to(self.args.device)
with torch.no_grad():
features = self.encoder(image)
for fl in range(self.args.feature_levels):
m = torch.nn.AvgPool2d(3, 1, 1)
input = m(features[fl])
(N, D, _, _) = input.shape
input = input.permute(0, 2, 3, 1).reshape(N, (- 1), D)
model = self.models[fl]
(output, intra_corrs, intra_targets, inter_corrs, inter_targets) = model(input, train=False)
rec_score = ((torch.mean(l2_criterion(input, output), dim=(- 1)) + 1) - cos_criterion(input, output))
if self.args.with_intra:
(correlations1, correlations2, entropys) = (0.0, 0.0, 0.0)
for l in range(len(intra_targets)):
L = intra_targets[l].shape[(- 1)]
norm_targets = (intra_targets[l] / torch.unsqueeze(torch.sum(intra_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L))
correlations1 += (kl_loss(intra_corrs[l], norm_targets) * temperature)
correlations2 += (kl_loss(norm_targets, intra_corrs[l]) * temperature)
entropys += entropy_loss(intra_corrs[l])
corrs = ((correlations1 + correlations2) / len(intra_targets))
intra_score = torch.softmax((- corrs), dim=(- 1))
if self.args.with_inter:
(correlations1, correlations2, entropys) = (0.0, 0.0, 0.0)
for l in range(len(inter_targets)):
L = inter_targets[l].shape[(- 1)]
norm_targets = (inter_targets[l] / torch.unsqueeze(torch.sum(inter_targets[l], dim=(- 1)), dim=(- 1)).repeat(1, 1, 1, L))
correlations1 += (kl_loss(inter_corrs[l], norm_targets) * temperature)
correlations2 += (kl_loss(norm_targets, inter_corrs[l]) * temperature)
entropys += entropy_loss(inter_corrs[l])
corrs = ((correlations1 + correlations2) / len(inter_targets))
inter_score = torch.softmax((- corrs), dim=(- 1))
inter_score = (torch.max(inter_score) - inter_score)
if (self.args.with_intra and self.args.with_inter):
score = (rec_score * inter_score)
elif self.args.with_intra:
score = (rec_score * intra_score)
elif self.args.with_inter:
score = (rec_score * inter_score)
else:
score = rec_score
score = score.detach()
score = score.reshape(score.shape[0], self.ws[fl], self.ws[fl])
score = F.interpolate(score.unsqueeze(1), size=self.args.inp_size, mode='bilinear', align_corners=True).squeeze().cpu().numpy()
scores_list[fl].append(score)
lvl_scores = []
for l in range(self.args.feature_levels):
lvl_score = np.stack(scores_list[l], axis=0)
lvl_scores.append(lvl_score)
scores = np.zeros_like(lvl_scores[0])
for l in range(self.args.feature_levels):
scores += lvl_scores[l]
scores = (scores / self.args.feature_levels)
gt_mask = np.squeeze(np.asarray(gt_mask_list, dtype=np.bool), axis=1)
pix_auc = roc_auc_score(gt_mask.flatten(), scores.flatten())
for i in range(scores.shape[0]):
scores[i] = gaussian_filter(scores[i], sigma=4)
img_scores = np.max(scores, axis=(1, 2))
gt_label = np.asarray(gt_label_list, dtype=np.bool)
img_auc = roc_auc_score(gt_label, img_scores)
if vis:
(precision, recall, thresholds) = precision_recall_curve(gt_label, img_scores)
a = ((2 * precision) * recall)
b = (precision + recall)
f1 = np.divide(a, b, out=np.zeros_like(a), where=(b != 0))
img_threshold = thresholds[np.argmax(f1)]
visulizer = Visualizer(f'vis_results/{self.args.save_prefix}/{self.args.class_name}')
max_score = np.max(scores)
min_score = np.min(scores)
scores = ((scores - min_score) / (max_score - min_score))
test_imgs = np.concatenate(test_imgs, axis=0)
visulizer.plot(test_imgs, scores, img_scores, gt_mask, file_names, img_types, img_threshold)
return (img_auc, pix_auc) |
def returnPointer(wrapArgs, includeOutput=False):
def decorator(func):
(func)
def inner(*args):
orig = getattr(_egl, func.__name__)
newArgs = list(args)
for argnum in wrapArgs:
item = orig.argtypes[argnum]._type_()
newArgs.insert(argnum, item)
res = orig(*newArgs)
if ((orig.restype is EGLBoolean) and (res.value == 0)):
raise WindowProviderException(f'{func.__name__} failed')
out = []
if includeOutput:
out.append(res)
for argnum in wrapArgs:
out.append(newArgs[argnum].value)
if (len(out) == 1):
return out[0]
return tuple(out)
return inner
return decorator |
class DirectJunctionCreator():
def __init__(self, id, name):
self.id = id
self.junction = Junction(name, id, JunctionType.direct)
self._incoming_lane_ids = []
self._linked_lane_ids = []
def _get_minimum_lanes_to_connect(self, incoming_road, linked_road):
(incoming_connection, _, incoming_lane_section) = _get_related_lanesection(incoming_road, linked_road)
(linked_connection, sign, linked_lane_section) = _get_related_lanesection(linked_road, incoming_road)
incoming_left_lanes = len(incoming_road.lanes.lanesections[incoming_lane_section].leftlanes)
incoming_right_lanes = len(incoming_road.lanes.lanesections[incoming_lane_section].rightlanes)
linked_left_lanes = len(linked_road.lanes.lanesections[linked_lane_section].leftlanes)
linked_right_lanes = len(linked_road.lanes.lanesections[linked_lane_section].rightlanes)
self._incoming_lane_ids = []
self._linked_lane_ids = []
if (sign > 0):
self._incoming_lane_ids.extend([x for x in range((- min(incoming_right_lanes, linked_right_lanes)), 0, 1)])
self._linked_lane_ids.extend([x for x in range((- min(incoming_right_lanes, linked_right_lanes)), 0, 1)])
self._incoming_lane_ids.extend([x for x in range(1, (min(incoming_left_lanes, linked_left_lanes) + 1), 1)])
self._linked_lane_ids.extend([x for x in range(1, (min(incoming_left_lanes, linked_left_lanes) + 1), 1)])
elif (sign < 0):
self._incoming_lane_ids.extend([(- x) for x in range((- min(incoming_left_lanes, linked_right_lanes)), 0, 1)])
self._linked_lane_ids.extend([x for x in range((- min(incoming_left_lanes, linked_right_lanes)), 0, 1)])
self._incoming_lane_ids.extend([(- x) for x in range(1, (min(incoming_right_lanes, linked_left_lanes) + 1), 1)])
self._linked_lane_ids.extend([x for x in range(1, (min(incoming_right_lanes, linked_left_lanes) + 1), 1)])
def _get_contact_point_linked_road(self, incoming_road):
if (incoming_road.successor and (incoming_road.successor.element_id == self.id)):
return ContactPoint.end
elif (incoming_road.predecessor and (incoming_road.predecessor.element_id == self.id)):
return ContactPoint.start
else:
raise AttributeError('road is not connected to this junction')
def add_connection(self, incoming_road, linked_road, incoming_lane_ids=None, linked_lane_ids=None):
linked_lane_offset = 0
inc_lane_offset = 0
incoming_main_road = False
if ((incoming_lane_ids == None) and (linked_lane_ids == None)):
self._get_minimum_lanes_to_connect(incoming_road, linked_road)
elif ((incoming_lane_ids is not None) and (linked_lane_ids is not None)):
if (not isinstance(incoming_lane_ids, list)):
self._incoming_lane_ids = [incoming_lane_ids]
else:
self._incoming_lane_ids = incoming_lane_ids
if (not isinstance(linked_lane_ids, list)):
self._linked_lane_ids = [linked_lane_ids]
if (abs(linked_lane_ids) == 1):
incoming_main_road = True
else:
self._linked_lane_ids = linked_lane_ids
if (min([abs(x) for x in self._linked_lane_ids]) == 1):
incoming_main_road = True
for i in range(len(self._incoming_lane_ids)):
if (self._get_contact_point_linked_road(incoming_road) == self._get_contact_point_linked_road(linked_road)):
if (np.sign(self._incoming_lane_ids[i]) == np.sign(self._linked_lane_ids[i])):
raise MixingDrivingDirection(((('driving direction not consistent when trying to make connection between roads:' + str(incoming_road.id)) + ' and ') + str(linked_road.id)))
elif (np.sign(self._incoming_lane_ids[i]) != np.sign(self._linked_lane_ids[i])):
raise MixingDrivingDirection(((('driving direction not consistent when trying to make connection between roads:' + str(incoming_road.id)) + ' and ') + str(linked_road.id)))
if (len(self._linked_lane_ids) != len(self._linked_lane_ids)):
raise NotSameAmountOfLanesError('the incoming_lane_ids and linked_lane_ids are not the same length')
if (abs(self._incoming_lane_ids[0]) != abs(self._linked_lane_ids[0])):
lane_offset = abs((abs(self._incoming_lane_ids[0]) - abs(self._linked_lane_ids[0])))
if incoming_main_road:
linked_lane_offset = (np.sign(self._linked_lane_ids[0]) * lane_offset)
inc_lane_offset = (((- 1) * np.sign((self._incoming_lane_ids[0] * self._linked_lane_ids[0]))) * linked_lane_offset)
else:
inc_lane_offset = (np.sign(self._incoming_lane_ids[0]) * lane_offset)
linked_lane_offset = (((- 1) * np.sign((self._incoming_lane_ids[0] * self._linked_lane_ids[0]))) * inc_lane_offset)
if (incoming_road.predecessor and (incoming_road.predecessor.element_id == self.id)):
incoming_road.pred_direct_junction[linked_road.id] = inc_lane_offset
else:
incoming_road.succ_direct_junction[linked_road.id] = inc_lane_offset
if (linked_road.predecessor and (linked_road.predecessor.element_id == self.id)):
linked_road.pred_direct_junction[incoming_road.id] = linked_lane_offset
else:
linked_road.succ_direct_junction[incoming_road.id] = linked_lane_offset
connection = Connection(incoming_road.id, linked_road.id, self._get_contact_point_linked_road(linked_road))
for i in range(len(self._incoming_lane_ids)):
connection.add_lanelink(self._incoming_lane_ids[i], self._linked_lane_ids[i])
self.junction.add_connection(connection) |
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__('Pre-compute embeddings for wav2letter++ datasets')
kwargs = {'action': 'store', 'type': str, 'required': True}
self.add_argument('--input', '-i', help='Input Directory', **kwargs)
self.add_argument('--output', '-o', help='Output Directory', **kwargs)
self.add_argument('--model', help='Path to model checkpoint', **kwargs)
self.add_argument('--split', help='Dataset Splits', nargs='+', **kwargs)
self.add_argument('--ext', default='wav', required=False, help='Audio file extension')
self.add_argument('--no-copy-labels', action='store_true', help='Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.')
self.add_argument('--use-feat', action='store_true', help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument('--gpu', help='GPU to use', default=0, type=int) |
class TFCvtEncoder(tf.keras.layers.Layer):
config_class = CvtConfig
def __init__(self, config: CvtConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.stages = [TFCvtStage(config, stage_idx, name=f'stages.{stage_idx}') for stage_idx in range(len(config.depth))]
def call(self, pixel_values: TFModelInputType, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[(TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor])]:
all_hidden_states = (() if output_hidden_states else None)
hidden_state = pixel_values
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
cls_token = None
for (_, stage_module) in enumerate(self.stages):
(hidden_state, cls_token) = stage_module(hidden_state, training=training)
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_state,))
hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
if output_hidden_states:
all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])
if (not return_dict):
return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if (v is not None)))
return TFBaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states) |
def _link_following_layers_to_new_layer_output(new_tensor_output: tf.Tensor, following_layers_and_inputs_dict: Dict[(tf.keras.layers.Layer, List[tf.Tensor])], replaced_layer: tf.keras.layers.Layer):
for (following_layer, keras_inputs) in following_layers_and_inputs_dict.items():
for (idx, keras_input) in enumerate(keras_inputs):
if (keras_input._keras_history.layer == replaced_layer):
keras_inputs[idx] = new_tensor_output
if (isinstance(keras_inputs, list) and (len(keras_inputs) == 1)):
keras_inputs = keras_inputs[0]
_ = following_layer(keras_inputs) |
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
if (args.criterion == 'ctc'):
self.criterion_type = CriterionType.CTC
self.blank = (tgt_dict.index('<ctc_blank>') if ('<ctc_blank>' in tgt_dict.indices) else tgt_dict.bos())
if ('<sep>' in tgt_dict.indices):
self.silence = tgt_dict.index('<sep>')
elif ('|' in tgt_dict.indices):
self.silence = tgt_dict.index('|')
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
elif (args.criterion == 'asg_loss'):
self.criterion_type = CriterionType.ASG
self.blank = (- 1)
self.silence = (- 1)
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert (len(self.asg_transitions) == (self.vocab_size ** 2))
else:
raise RuntimeError(f'unknown criterion: {args.criterion}')
def generate(self, models, sample, **unused):
encoder_input = {k: v for (k, v) in sample['net_input'].items() if (k != 'prev_output_tokens')}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
model = models[0]
encoder_out = model(**encoder_input)
if (self.criterion_type == CriterionType.CTC):
if hasattr(model, 'get_logits'):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
elif (self.criterion_type == CriterionType.ASG):
emissions = encoder_out['encoder_out']
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
idxs = (g[0] for g in it.groupby(idxs))
if (self.criterion_type == CriterionType.CTC):
idxs = filter((lambda x: (x != self.blank)), idxs)
elif (self.criterion_type == CriterionType.ASG):
idxs = filter((lambda x: (x >= 0)), idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs)) |
def default_centerness_model(shared_model, pyramid_feature_size=256, name='centerness_submodel'):
options = {'kernel_size': 3, 'strides': 1, 'padding': 'same'}
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = shared_model(inputs)
outputs = keras.layers.Conv2D(filters=1, kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer='zeros', name='pyramid_centerness', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), 1), name='pyramid_centerness_reshape')(outputs)
outputs = keras.layers.Activation('sigmoid', name='pyramid_centerness_sigmoid')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name) |
class ConvNeXtBlock(nn.Module):
def __init__(self, in_chs: int, out_chs: Optional[int]=None, kernel_size: int=7, stride: int=1, dilation: Tuple[(int, int)]=(1, 1), cfg: MaxxVitConvCfg=MaxxVitConvCfg(), conv_mlp: bool=True, drop_path: float=0.0):
super().__init__()
out_chs = (out_chs or in_chs)
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert ('layernorm' in cfg.norm_layer)
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if (stride == 2):
self.shortcut = Downsample2d(in_chs, out_chs)
elif (in_chs != out_chs):
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert (cfg.stride_mode in ('pool', 'dw'))
(stride_pool, stride_dw) = (1, 1)
if (cfg.stride_mode == 'pool'):
stride_pool = stride
else:
stride_dw = stride
if (stride_pool == 2):
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int((cfg.expand_ratio * out_chs)), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = (LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity())
else:
self.ls = (LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity())
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = (self.drop_path(x) + shortcut)
return x |
class Cluster(pg_api.Cluster):
driver = pg_driver.default
installation = None
data_directory = None
DEFAULT_CLUSTER_ENCODING = DEFAULT_CLUSTER_ENCODING
DEFAULT_CONFIG_FILENAME = DEFAULT_CONFIG_FILENAME
DEFAULT_PID_FILENAME = DEFAULT_PID_FILENAME
DEFAULT_HBA_FILENAME = DEFAULT_HBA_FILENAME
def state(self):
if self.running():
return 'running'
if (not os.path.exists(self.data_directory)):
return 'void'
return 'stopped'
def _e_metas(self):
state = self.state
(yield (None, (('[' + state) + ']')))
if (state == 'running'):
(yield ('pid', self.state))
def daemon_path(self):
return (self.installation.postmaster or self.installation.postgres)
def get_pid_from_file(self):
try:
path = os.path.join(self.data_directory, self.DEFAULT_PID_FILENAME)
with open(path) as f:
return int(f.readline())
except IOError as e:
if (e.errno in (errno.EIO, errno.ENOENT)):
return None
def pid(self):
pid = self.get_pid_from_file()
if (pid is None):
d = self.daemon_process
if (d is not None):
return d.pid
return pid
def settings(self):
if (not hasattr(self, '_settings')):
self._settings = configfile.ConfigFile(self.pgsql_dot_conf)
return self._settings
def hba_file(self, join=os.path.join):
return self.settings.get('hba_file', join(self.data_directory, self.DEFAULT_HBA_FILENAME))
def __init__(self, installation, data_directory):
self.installation = installation
self.data_directory = os.path.abspath(data_directory)
self.pgsql_dot_conf = os.path.join(self.data_directory, self.DEFAULT_CONFIG_FILENAME)
self.daemon_process = None
self.daemon_command = None
def __repr__(self, format='{mod}.{name}({ins!r}, {dir!r})'.format):
return format(type(self).__module__, type(self).__name__, self.installation, self.data_directory)
def __enter__(self):
self.start()
self.wait_until_started()
return self
def __exit__(self, typ, val, tb):
self.stop()
self.wait_until_stopped()
def init(self, password=None, timeout=None, **kw):
initdb = self.installation.initdb
if (initdb is None):
initdb = (self.installation.pg_ctl, 'initdb')
else:
initdb = (initdb,)
if (None in initdb):
raise ClusterInitializationError('unable to find executable for cluster initialization', details={'detail': "The installation does not have 'initdb' or 'pg_ctl'."}, creator=self)
kw.setdefault('encoding', self.DEFAULT_CLUSTER_ENCODING)
opts = []
for x in kw:
if (x in ('logfile', 'extra_arguments')):
continue
if (x not in initdb_option_map):
raise TypeError(('got an unexpected keyword argument %r' % (x,)))
opts.append(initdb_option_map[x])
opts.append(kw[x])
logfile = (kw.get('logfile') or sp.PIPE)
extra_args = tuple([str(x) for x in kw.get('extra_arguments', ())])
supw_file = ()
supw_tmp = None
p = None
try:
if (password is not None):
supw_tmp = namedtemp(encoding=get_python_name(kw['encoding']))
supw_tmp.write(password)
supw_tmp.flush()
supw_file = (('--pwfile=' + supw_tmp.name),)
cmd = ((((initdb + ('-D', self.data_directory)) + tuple(opts)) + supw_file) + extra_args)
p = sp.Popen(cmd, close_fds=close_fds, bufsize=(1024 * 5), stdin=None, stdout=logfile, stderr=sp.PIPE)
try:
(stdout, stderr) = p.communicate(timeout=timeout)
except sp.TimeoutExpired:
p.kill()
(stdout, stderr) = p.communicate()
finally:
rc = p.returncode
if (rc != 0):
r = stderr
try:
msg = r.decode('utf-8')
except UnicodeDecodeError:
msg = os.linesep.join([repr(x)[2:(- 1)] for x in r.splitlines()])
raise InitDBError('initdb exited with non-zero status', details={'command': cmd, 'stderr': msg, 'stdout': msg}, creator=self)
finally:
if (supw_tmp is not None):
n = supw_tmp.name
supw_tmp.close()
if os.path.exists(n):
os.unlink(n)
def drop(self):
if self.running():
self.shutdown()
try:
self.wait_until_stopped()
except ClusterTimeoutError:
self.kill()
try:
self.wait_until_stopped()
except ClusterTimeoutError:
ClusterWarning('cluster failed to shutdown after kill', details={'hint': 'Shared memory may have been leaked.'}, creator=self).emit()
for (root, dirs, files) in os.walk(self.data_directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(self.data_directory)
def start(self, logfile=None, settings=None):
if self.running():
return
cmd = (self.daemon_path, '-D', self.data_directory)
if (settings is not None):
for (k, v) in dict(settings).items():
cmd.append('--{k}={v}'.format(k=k, v=v))
p = sp.Popen(cmd, close_fds=close_fds, bufsize=1024, stdout=(sp.PIPE if (logfile is None) else logfile), stderr=sp.STDOUT, stdin=sp.PIPE)
if (logfile is None):
p.stdout.close()
p.stdin.close()
self.daemon_process = p
self.daemon_command = cmd
def restart(self, logfile=None, settings=None, timeout=10):
if self.running():
self.stop()
self.wait_until_stopped(timeout=timeout)
if self.running():
raise ClusterError('failed to shutdown cluster', creator=self)
self.start(logfile=logfile, settings=settings)
self.wait_until_started(timeout=timeout)
def reload(self):
pid = self.pid
if (pid is not None):
try:
pg_kill(pid, signal.SIGHUP)
except OSError as e:
if (e.errno != errno.ESRCH):
raise
def stop(self):
pid = self.pid
if (pid is not None):
try:
pg_kill(pid, signal.SIGTERM)
except OSError as e:
if (e.errno != errno.ESRCH):
raise
def shutdown(self):
pid = self.pid
if (pid is not None):
try:
pg_kill(pid, signal.SIGINT)
except OSError as e:
if (e.errno != errno.ESRCH):
raise
def kill(self):
pid = self.pid
if (pid is not None):
try:
pg_kill(pid, signal.SIGKILL)
except OSError as e:
if (e.errno != errno.ESRCH):
raise
def initialized(self):
if (os.path.isdir(self.data_directory) and os.path.exists(self.pgsql_dot_conf) and os.path.isdir(os.path.join(self.data_directory, 'base'))):
return True
return False
def running(self):
if (self.daemon_process is not None):
r = self.daemon_process.poll()
if (r is not None):
pid = self.get_pid_from_file()
if (pid is not None):
self.daemon_process = None
return self.running()
return False
else:
return True
else:
pid = self.get_pid_from_file()
if (pid is None):
return False
try:
pg_kill(pid, signal.SIG_DFL)
except OSError as e:
if (e.errno != errno.ESRCH):
raise
return False
return True
def connector(self, **kw):
(host, port) = self.address()
return self.driver.fit(host=(host or 'localhost'), port=(port or 5432), **kw)
def connection(self, **kw):
return self.connector(**kw)()
def connect(self, **kw):
if (not self.running()):
raise ClusterNotRunningError('cannot connect if cluster is not running', creator=self)
x = self.connection(**kw)
x.connect()
return x
def address(self):
d = self.settings.getset(('listen_addresses', 'port'))
if (d.get('listen_addresses') is not None):
addrs = d.get('listen_addresses').lower().split(',')
if (('localhost' in addrs) or ('*' in addrs)):
host = 'localhost'
elif ('127.0.0.1' in addrs):
host = '127.0.0.1'
elif ('::1' in addrs):
host = '::1'
else:
host = addrs[0]
else:
host = None
return (host, d.get('port'))
def ready_for_connections(self):
if (not self.running()):
return False
e = None
(host, port) = self.address()
connection = self.driver.fit(user=' -*- ping -*- ', host=host, port=port, database='template1', sslmode='disable')()
try:
connection.connect()
except pg_exc.ClientCannotConnectError as err:
for attempt in err.database.failures:
x = attempt.error
if (self.installation.version_info[:2] < (8, 1)):
if isinstance(x, (pg_exc.UndefinedObjectError, pg_exc.AuthenticationSpecificationError)):
return True
elif isinstance(x, pg_exc.AuthenticationSpecificationError):
return True
if isinstance(x, (pg_exc.CFError, pg_exc.ProtocolError)):
raise x
if isinstance(x, pg_exc.ServerNotReadyError):
e = x
break
else:
e = err
return (e if (e is not None) else True)
def wait_until_started(self, timeout=10, delay=0.05):
start = time.time()
checkpoint = start
while True:
if (not self.running()):
if (self.daemon_process is not None):
r = self.daemon_process.returncode
if (r is not None):
raise ClusterStartupError('postgres daemon terminated', details={'RESULT': r, 'COMMAND': self.daemon_command}, creator=self)
else:
raise ClusterNotRunningError('postgres daemon has not been started', creator=self)
r = self.ready_for_connections()
checkpoint = time.time()
if (r is True):
break
if ((checkpoint - start) >= timeout):
if ((r is not None) and isinstance(r, pg_exc.ServerNotReadyError)):
raise r
e = ClusterTimeoutError('timeout on startup', creator=self)
if (r not in (True, False)):
raise e from r
raise e
time.sleep(delay)
def wait_until_stopped(self, timeout=10, delay=0.05):
start = time.time()
while (self.running() is True):
if (self.daemon_process is not None):
self.last_exit_code = self.daemon_process.poll()
else:
self.last_exit_code = pg_kill(self.get_pid_from_file(), 0)
if ((time.time() - start) >= timeout):
raise ClusterTimeoutError('timeout on shutdown', creator=self)
time.sleep(delay) |
def is_typed_callable(c: (Type | None)) -> bool:
c = get_proper_type(c)
if ((not c) or (not isinstance(c, CallableType))):
return False
return (not all(((isinstance(t, AnyType) and (t.type_of_any == TypeOfAny.unannotated)) for t in get_proper_types((c.arg_types + [c.ret_type]))))) |
.parametrize('artifacts', [AM2RArtifactConfig(False, False, True, 5), AM2RArtifactConfig(True, False, True, 10), AM2RArtifactConfig(False, True, True, 15), AM2RArtifactConfig(True, True, True, 6)])
def test_assign_pool_results_prefer_anywhere(am2r_game_description, am2r_configuration, artifacts):
patches = GamePatches.create_from_game(am2r_game_description, 0, dataclasses.replace(am2r_configuration, artifacts=artifacts))
pool_results = pool_creator.calculate_pool_results(patches.configuration, patches.game)
initial_starting_place = copy(pool_results.to_place)
result = AM2RBootstrap().assign_pool_results(Random(8000), patches, pool_results)
shuffled_dna = [pickup for pickup in pool_results.to_place if (pickup.pickup_category == METROID_DNA_CATEGORY)]
assert (pool_results.to_place == initial_starting_place)
assert (len(shuffled_dna) == artifacts.required_artifacts)
assert (result.starting_equipment == pool_results.starting)
assert (result.pickup_assignment == {}) |
def test_cli_async_reduce_fails(runner, reactor, server, capsys):
base_url = '
in_stream = ''.join((base_url.format(i) for i in [6, 2, 1]))
args = ['map', 'json.loads', 'reduce', 'toolz.curry(operator.truediv)(*x)']
with pytest.raises(subprocess.CalledProcessError):
helpers.run(args, input=in_stream.encode()).decode() |
_grad()
def log_training(writer, params, step, d_loss, g_loss):
print(f'{int(((100.0 * step) / params.steps))}% | Step {step} :D loss: {d_loss.item():0.3f} | G loss: {g_loss.item():0.3f}')
writer.add_scalar('discriminator loss', d_loss.item(), step)
writer.add_scalar('generator loss', g_loss.item(), step) |
class ResidualParser(object):
def __init__(self, filepath, parse=True):
self.filepath = filepath
self.__residuals = OrderedDict()
if parse:
self.parse()
def parse(self):
try:
with open(self.filepath, 'rb') as f:
for line in f:
if line.startswith('Time ='):
self.timestep = self.__get_time(line)
self.__residuals[self.timestep] = {}
self.__parse_residuals(f)
except Exception as e:
raise 'Failed to parse {}:\n\t{}'.format(self.filepath, e)
def residuals(self):
return self.__residuals
def time_range(self):
_times = self.get_times()
return (_times[0], _times[(- 1)])
def get_times(self):
return self.__residuals.keys()
def get_residuals(self, quantity, time_range):
if (quantity not in self.quantities):
print('Invalid quantity [{}]. Try from the list below:\n{}'.format(quantity, self.quantities))
return ()
if (not time_range):
return (v[quantity] for v in self.__residuals.itervalues())
else:
available_time_range = self.time_range
try:
t0 = max(available_time_range[0], time_range[0])
t1 = min(available_time_range[1], time_range[1])
except IndexError as e:
raise ValueError('Failed to read time_range:\n{}'.format(e))
return (self.__residuals[int(t)][quantity] for t in xrange(t0, t1))
def __get_time(line):
return int(line.split('Time =')[(- 1)])
def __parse_residuals(self, f):
for line in f:
if (not line.startswith('Time =')):
try:
(q, ir, fr, ni) = line.split(': Solving for ')[1].split(',')
self.__residuals[self.timestep][q] = ir.split('= ')[(- 1)]
except IndexError:
pass
else:
self.timestep = self.__get_time(line)
self.__residuals[self.timestep] = {}
self.quantities = self.__residuals[self.timestep].keys() |
class Registry():
def __init__(self, data: object, data_reversed: object=None) -> None:
self.data_reversed = data_reversed
if isinstance(data, (dict, Map)):
self.data = make_immutable(data)
if (data_reversed is None):
self.data_reversed = make_immutable({v: k for (k, v) in data.items()})
elif isinstance(data, (list, tuple)):
self.data_reversed = data
self.data = make_immutable({v: i for (i, v) in enumerate(self.data_reversed)})
else:
raise TypeError("Creating a registry from something other than a dict, Map, tuple, or list isn't supported")
def encode(self, key: object) -> object:
return self.data[key]
def decode(self, value: object) -> object:
return self.data_reversed[value] |
def _join_lexemes(lexemes, links):
EXCLUDED_LINK_TYPES = set(['7', '21', '23', '27'])
moves = dict()
def move_lexeme(from_id, to_id):
lm = lexemes[str(from_id)]
while (to_id in moves):
to_id = moves[to_id]
lexemes[str(to_id)].extend(lm)
del lm[:]
moves[from_id] = to_id
for (link_start, link_end, type_id) in links:
if (type_id in EXCLUDED_LINK_TYPES):
continue
move_lexeme(link_end, link_start)
lex_ids = sorted(lexemes.keys(), key=int)
return [lexemes[lex_id] for lex_id in lex_ids if lexemes[lex_id]] |
def create_sdf_obj(sdfcommand, marching_cube_command, norm_mesh_dir, sdf_dir, obj, res, iso_val, expand_rate, indx, ish5, normalize, num_sample, bandwidth, max_verts, g, reduce):
if (FLAGS.dset == 'abc'):
model_id = os.path.basename(obj).replace('.obj', '')
elif (FLAGS.dset == 'pix3d'):
model_id = obj.split('/')[(- 2)]
elif (FLAGS.dset == 'building'):
model_id = os.path.basename(obj).replace('.obj', '')
elif (FLAGS.dset == 'shapenet'):
model_id = obj.split('/')[(- 2)]
norm_mesh_sub_dir = os.path.join(norm_mesh_dir, model_id)
sdf_sub_dir = os.path.join(sdf_dir, model_id)
if (not os.path.exists(norm_mesh_sub_dir)):
os.makedirs(norm_mesh_sub_dir)
if (not os.path.exists(sdf_sub_dir)):
os.makedirs(sdf_sub_dir)
if (FLAGS.dset == 'pix3d'):
obj_basename = os.path.basename(obj).replace('.obj', '')
sdf_name = obj_basename.replace('model', 'isosurf')
flag_name = obj_basename.replace('model', 'isinsideout')
h5_name = obj_basename.replace('model', 'ori_sample_grid')
sdf_file = os.path.join(sdf_sub_dir, f'{sdf_name}.sdf')
flag_file = os.path.join(sdf_sub_dir, f'{flag_name}.txt')
h5_file = os.path.join(sdf_sub_dir, f'{h5_name}.h5')
else:
sdf_file = os.path.join(sdf_sub_dir, 'isosurf.sdf')
flag_file = os.path.join(sdf_sub_dir, 'isinsideout.txt')
h5_file = os.path.join(sdf_sub_dir, 'ori_sample_grid.h5')
if (ish5 and os.path.exists(h5_file) and (not os.path.exists(flag_file))):
print('skip existed: ', h5_file)
elif ((not ish5) and os.path.exists(sdf_file)):
print('skip existed: ', sdf_file)
else:
model_file = os.path.join(obj)
print('creating', sdf_file)
if normalize:
(norm_obj_file, centroid, m) = get_normalize_mesh(model_file, norm_mesh_sub_dir)
create_one_sdf(sdfcommand, res, expand_rate, sdf_file, norm_obj_file, indx, g=g)
if ish5:
create_h5_sdf_pt(h5_file, sdf_file, flag_file, norm_obj_file, centroid, m, res, num_sample, bandwidth, iso_val, max_verts, normalize, reduce=reduce) |
def _nonlin_solver(fcn, x0, params, method, alpha=None, uv0=None, max_rank=None, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, line_search=True, verbose=False, custom_terminator=None, **unused):
if (method == 'broyden1'):
jacobian = BroydenFirst(alpha=alpha, uv0=uv0, max_rank=max_rank)
elif (method == 'broyden2'):
jacobian = BroydenSecond(alpha=alpha, uv0=uv0, max_rank=max_rank)
elif (method == 'linearmixing'):
jacobian = LinearMixing(alpha=alpha)
else:
raise RuntimeError(('Unknown method: %s' % method))
if (maxiter is None):
maxiter = (100 * (torch.numel(x0) + 1))
if (line_search is True):
line_search = 'armijo'
elif (line_search is False):
line_search = None
x_is_complex = torch.is_complex(x0)
def _ravel(x: torch.Tensor) -> torch.Tensor:
if x_is_complex:
return torch.cat((x.real, x.imag), dim=0).reshape((- 1))
else:
return x.reshape((- 1))
def _pack(x: torch.Tensor) -> torch.Tensor:
if x_is_complex:
n = (len(x) // 2)
(xreal, ximag) = (x[:n], x[n:])
x = (xreal + (1j * ximag))
return x.reshape(xshape)
xshape = x0.shape
func = (lambda x: _ravel(fcn(_pack(x), *params)))
x = _ravel(x0)
y = func(x)
y_norm = y.norm()
stop_cond = (custom_terminator if (custom_terminator is not None) else TerminationCondition(f_tol, f_rtol, y_norm, x_tol, x_rtol))
if (y_norm == 0):
return x.reshape(xshape)
jacobian.setup(x, y, func)
gamma = 0.9
eta_max = 0.9999
eta_threshold = 0.1
eta = 0.001
converge = False
best_ynorm = y_norm
best_x = x
best_dxnorm = x.norm()
best_iter = 0
for i in range(maxiter):
tol = min(eta, (eta * y_norm))
dx = (- jacobian.solve(y, tol=tol))
dx_norm = dx.norm()
if (dx_norm == 0):
raise ValueError('Jacobian inversion yielded zero vector. This indicates a bug in the Jacobian approximation.')
if line_search:
(s, xnew, ynew, y_norm_new) = _nonline_line_search(func, x, y, dx, search_type=line_search)
else:
s = 1.0
xnew = (x + dx)
ynew = func(xnew)
y_norm_new = ynew.norm()
if (y_norm_new < best_ynorm):
best_x = xnew
best_dxnorm = dx_norm
best_ynorm = y_norm_new
best_iter = (i + 1)
jacobian.update(xnew.clone(), ynew)
to_stop = stop_cond.check(xnew, ynew, dx)
if verbose:
if ((i < 10) or ((i % 10) == 0) or to_stop):
print(('%6d: |dx|=%.3e, |f|=%.3e' % (i, dx_norm, y_norm)))
if to_stop:
converge = True
break
eta_A = float((gamma * ((y_norm_new / y_norm) ** 2)))
gamma_eta2 = ((gamma * eta) * eta)
if (gamma_eta2 < eta_threshold):
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma_eta2))
y_norm = y_norm_new
x = xnew
y = ynew
if (not converge):
msg = ('The rootfinder does not converge after %d iterations. Best |dx|=%.3e, |f|=%.3e at iter %d' % (maxiter, best_dxnorm, best_ynorm, best_iter))
warnings.warn(ConvergenceWarning(msg))
x = best_x
return _pack(x) |
def print_bbc_warnings(keyCount, lineCount):
sys.stdout.flush()
limits_exceeded = []
severe = 0
if (keyCount >= 32768):
severe = 1
limits_exceeded.append('BeebEm 32K keystroke limit')
shadow_himem = 32768
mode7_himem = 31744
default_speech_loc = 21760
overhead_per_program_line = 4
for (page, model) in [(6400, 'Model B'), (3584, 'Master')]:
top = (((page + keyCount) + (lineCount * (overhead_per_program_line - 1))) + 2)
if (model == 'Master'):
x = " (use Speech's Sideways RAM version instead, e.g. *SRLOAD SP8000 8000 7 and reset, but sound quality might be worse)"
else:
x = ' (Speech program will be overwritten unless relocated)'
if (top > default_speech_loc):
limits_exceeded.append(('%s TOP=&%X limit%s' % (model, default_speech_loc, x)))
if (top > mode7_himem):
if (model == 'Master'):
if (top > shadow_himem):
limits_exceeded.append((model + ' 32k HIMEM limit (even for shadow modes)'))
else:
limits_exceeded.append((model + ' Mode 7 HIMEM limit (use shadow modes 128-135)'))
else:
limits_exceeded.append((model + ' Mode 7 HIMEM limit'))
if (lineCount > 32768):
limits_exceeded.append('BBC BASIC line number limit')
elif ((10 * lineCount) > 32767):
limits_exceeded.append('AUTO line number limit (try AUTO 0,1)')
if severe:
(warning, after) = ('WARNING: ', '')
else:
(warning, after) = ('Note: ', 'It should still work if pasted into BeebEm as immediate commands. ')
after = (('. ' + after) + 'See comments in lexconvert for more details.\n')
if (len(limits_exceeded) > 1):
sys.stderr.write((((warning + 'this text may be too big for the BBC Micro. The following limits were exceeded: ') + ', '.join(limits_exceeded)) + after))
elif limits_exceeded:
sys.stderr.write((((warning + 'this text may be too big for the BBC Micro because it exceeds the ') + limits_exceeded[0]) + after)) |
class SelfUpdateCommand(SelfCommand):
name = 'self update'
description = 'Updates Poetry to the latest version.'
arguments = [argument('version', 'The version to update to.', optional=True, default='latest')]
options = [option('preview', None, 'Allow the installation of pre-release versions.'), option('dry-run', None, 'Output the operations but do not execute anything (implicitly enables --verbose).')]
help = 'The <c1>self update</c1> command updates Poetry version in its current runtime environment.\n'
def _system_project_handle(self) -> int:
self.write('<info>Updating Poetry version ...</info>\n\n')
application = self.get_application()
add_command = application.find('add')
assert isinstance(add_command, AddCommand)
add_command.set_env(self.env)
application.configure_installer_for_command(add_command, self.io)
argv = ['add', f"{self.argument('version')}"]
if self.option('dry-run'):
argv.append('--dry-run')
if self.option('preview'):
argv.append('--allow-prereleases')
exit_code: int = add_command.run(IO(StringInput(' '.join(argv)), self.io.output, self.io.error_output))
return exit_code |
_grad()
def predict(part):
loader = lib.IndexLoader(D.size(part), args['training']['eval_batch_size'], False, device)
preds = []
for idx in loader:
(_, out) = net_ensemble.forward(X_num[part][idx], (None if (X_cat is None) else X_cat[part][idx]))
preds.append(out)
return torch.cat(preds).cpu() |
def policy_training(device='cuda'):
noiseset = [35, 45, 55]
seed_torch(seed=args.seed)
model = DnCNN_DS(channels=1, num_of_layers=args.num_of_layers)
model = torch.nn.DataParallel(model).cuda()
if os.path.exists(os.path.join(args.outf, 'net.pth')):
print('Loading denoise model...')
model.load_state_dict(torch.load(os.path.join(args.outf, 'net.pth')))
else:
print('Need the classification model!')
return
print('Loading dataset ...\n')
dataset_train = load_imgs('train')
total_train = len(dataset_train)
val_size = int((total_train * args.val_ratio))
indices = list(range(total_train))
random.Random(0).shuffle(indices)
np.save(os.path.join(args.outf, 'policy_train_indices.npy'), np.array(indices))
val_idx = indices[:val_size]
train_idx = indices[val_size:]
train_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers, sampler=sampler.SubsetRandomSampler(train_idx), batch_size=args.batch_size, shuffle=False)
val_loader = DataLoader(dataset=dataset_train, num_workers=args.num_workers, sampler=sampler.SubsetRandomSampler(val_idx), batch_size=1, shuffle=False)
print('Training data size: ', len(train_loader.dataset))
dataset_val = Dataset(train=False)
test_loader_12 = DataLoader(dataset=dataset_val, num_workers=4, batch_size=1, shuffle=False)
dataset_test = load_imgs('Set68')
test_loader = DataLoader(dataset=dataset_test, num_workers=4, batch_size=1, shuffle=False)
model.eval()
p_true_all = list()
psnr_all = list()
np.random.seed(seed=args.seed)
test_noiseL = np.random.choice(noiseset, size=len(val_loader.dataset))
print('Average noise level: ', np.average(test_noiseL))
for (i, batch) in enumerate(val_loader):
data = batch
data = data.cuda()
noise = torch.zeros(data.size())
noise = torch.FloatTensor(data.size()).normal_(mean=0, std=(test_noiseL[i] / 255.0), generator=torch.manual_seed(args.seed))
noise = noise.cuda()
with torch.no_grad():
outputs = model((data + noise))
(p_true, mse_all) = PolicyKL.true_posterior(args, outputs, noise)
p_true_all.append(p_true)
p_true = torch.cat(p_true_all, dim=0)
p_det = max_onehot(p_true, dim=(- 1), device=device)
p_true = torch.mean(p_true, dim=0)
p_det_index = torch.argmax(p_det, dim=1)
print(Counter(list(p_det_index.cpu().numpy())))
p_det = torch.mean(p_det, dim=0)
train_post = {}
nz_post = {}
i = 0
for t in range(len(outputs)):
if (p_det[t] > 0.001):
train_post[i] = t
nz_post[i] = t
i += 1
del train_post[(i - 1)]
p_str = 'val p true:['
p_str += ','.join([('%0.3f' % p_true[t]) for t in nz_post.values()])
print((p_str + ']'))
p_str = 'val p true det:['
p_str += ','.join([('%0.3f' % p_det[t]) for t in nz_post.values()])
print((p_str + ']'))
print(nz_post)
if (args.policy_type == 'multiclass'):
score_net = MulticlassNet(args, nz_post, 1)
elif (args.policy_type == 'sequential'):
score_net = MulticlassNet(args, train_post, 1)
else:
print('Model not implemented!!')
return
score_net = torch.nn.DataParallel(score_net)
score_net = score_net.cuda()
if (args.restart and os.path.exists(os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type)))):
print('Loading previous policynet model...')
dump = os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type))
score_net.load_state_dict(torch.load(dump))
if (args.phase == 'train'):
optimizer = optim.Adam(list(score_net.parameters()), lr=0.001, weight_decay=args.weight_decay)
milestones = [10, 20, 40, 60, 80]
gammas = [1, 1, 1, 1, 1]
scheduler = MultiStepMultiLR(optimizer, milestones=milestones, gammas=gammas)
trainer = PolicyKL(args=args, model=model, score_net=score_net, train_post=train_post, nz_post=nz_post, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, test_loader=test_loader, device=device, scheduler=scheduler)
trainer.train()
dump = os.path.join(args.outf, '{}_policy_net.dump'.format(args.policy_type))
score_net.load_state_dict(torch.load(dump))
PolicyKL.test(args=args, score_net=score_net, model=model, data_loader=test_loader, nz_post=nz_post, device=device, noiseset=[75])
print(args.outf) |
def run_procedure(event):
global flag
text01.delete(0.0, tkinter.END)
if (flag == 0):
messagebox.showinfo('Topic', "You haven't chosen the algorithm. Please choose the algorithm before running.")
return
elif ((show['text'] == '') or (show['text'] == 'file')):
messagebox.showinfo('Topic', "You haven't chosen the data source. Please choose the data source before running.")
return
elif ((show_o['text'] == '') or (show_o['text'] == 'file')):
messagebox.showinfo('Topic', "You haven't chosen the output file. Please choose the output file before running.")
return
try:
if (flag == 1):
text01.insert('1.0', 'Runing...\n')
HANP_Miner(filename, v3.get(), v4.get(), v2.get(), outputfilename)
elif (flag == 2):
text01.insert('1.0', 'Runing...\n')
HANP_df(filename, v3.get(), v4.get(), v2.get(), outputfilename)
elif (flag == 3):
text01.insert('1.0', 'Runing...\n')
HANP_bf(filename, v3.get(), v4.get(), v2.get(), outputfilename)
elif (flag == 4):
text01.insert('1.0', 'Runing...\n')
HANP_nogap(filename, v2.get(), outputfilename)
elif (flag == 5):
text01.insert('1.0', 'Runing...\n')
NOSEP(filename, v3.get(), v4.get(), v2.get(), outputfilename)
except Exception as e:
messagebox.showinfo('Exception or Error during Running', e.args)
else:
with open(outputfilename, 'r') as file:
result = file.read()
text01.insert('2.0', result)
flag = 0 |
def update(dt):
for i in range(len(game_objects)):
for j in range((i + 1), len(game_objects)):
obj_1 = game_objects[i]
obj_2 = game_objects[j]
if ((not obj_1.dead) and (not obj_2.dead)):
if obj_1.collides_with(obj_2):
obj_1.handle_collision_with(obj_2)
obj_2.handle_collision_with(obj_1)
to_add = []
for obj in game_objects:
obj.update(dt)
to_add.extend(obj.new_objects)
obj.new_objects = []
for to_remove in [obj for obj in game_objects if obj.dead]:
to_add.extend(obj.new_objects)
to_remove.delete()
game_objects.remove(to_remove)
game_objects.extend(to_add) |
def set_num_cpu_threads(out_f, num_cpus):
out_f.write('export EXP_NUM_CPU_THREADS={}\n'.format(num_cpus))
out_f.write('export OMP_NUM_THREADS=${EXP_NUM_CPU_THREADS}\n')
out_f.write('export MKL_NUM_THREADS=${EXP_NUM_CPU_THREADS}\n')
out_f.write('export NUMEXPR_NUM_THREADS=${EXP_NUM_CPU_THREADS}\n')
out_f.write('\n') |
def build_profile_plot(ax, model, path_selection):
nodes = model.nodes.dataframe
links = model.links.dataframe
profile_config = {'nodes': [], 'links': [], 'path_selection': path_selection}
ground_levels = {'x': [], 'level': []}
rolling_x_pos = 0.0
for (ind, link_set) in enumerate(path_selection):
(us_node, ds_node, link_id) = link_set
if (ind == 0):
invert_el = float(nodes.loc[[us_node]].InvertElev)
profile_config['nodes'].append({'id_name': us_node, 'rolling_x_pos': rolling_x_pos, 'invert_el': invert_el})
ret = _add_node_plot(ax, rolling_x_pos, model, us_node, link_set)
ground_levels['x'].extend(ret['x'])
ground_levels['level'].extend(ret['level'])
old_rolling_x_pos = rolling_x_pos
if (links.loc[[link_id]].Type[0] == 'CONDUIT'):
rolling_x_pos += float(links.loc[[link_id]].Length)
elif (links.loc[[link_id]].Type[0] == 'WEIR'):
rolling_x_pos += DEFAULT_WEIR_LENGTH
elif (links.loc[[link_id]].Type[0] == 'ORIFICE'):
rolling_x_pos += DEFAULT_ORIFICE_LENGTH
elif (links.loc[[link_id]].Type[0] == 'PUMP'):
rolling_x_pos += DEFAULT_PUMP_LENGTH
elif (links.loc[[link_id]].Type[0] == 'OUTLET'):
rolling_x_pos += DEFAULT_OUTLET_LENGTH
invert_el = float(nodes.loc[[ds_node]].InvertElev)
profile_config['nodes'].append({'id_name': ds_node, 'rolling_x_pos': rolling_x_pos, 'invert_el': invert_el})
ret = _add_node_plot(ax, rolling_x_pos, model, ds_node, link_set)
ground_levels['x'].extend(ret['x'])
ground_levels['level'].extend(ret['level'])
ret = _add_link_plot(ax, old_rolling_x_pos, rolling_x_pos, model, link_set)
(link_mid_x, link_mid_y) = ((sum(ret['x']) / 2.0), (sum(ret['bottom']) / 2.0))
profile_config['links'].append({'id_name': link_id, 'rolling_x_pos': link_mid_x, 'midpoint_bottom': link_mid_y, 'link_type': ret['link_type'], 'mid_x': ret['mid_x'], 'mid_y': ret['mid_y']})
_add_ground_plot(ax, ground_levels)
return profile_config |
def ql_syscall_readv(ql: Qiling, fd: int, vec: int, vlen: int):
regreturn = 0
size_t_len = ql.arch.pointersize
iov = ql.mem.read(vec, ((vlen * size_t_len) * 2))
ql.log.debug('readv() CONTENT:')
for i in range(vlen):
addr = ql.unpack(iov[((i * size_t_len) * 2):(((i * size_t_len) * 2) + size_t_len)])
l = ql.unpack(iov[(((i * size_t_len) * 2) + size_t_len):(((i * size_t_len) * 2) + (size_t_len * 2))])
regreturn += l
if hasattr(ql.os.fd[fd], 'read'):
data = ql.os.fd[fd].read(l)
ql.log.debug(f'{data!r}')
ql.mem.write(addr, data)
return regreturn |
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(pd.DataFrame({'col1': [1.0, 2.0, 3.0], 'col2': ['a', 'b', 'c'], 'col3': ['foo', 'bar', '42']}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(), md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'}, index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']}, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index, columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index, columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'}, index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'}, index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame({'col1': [1, np.nan, 4.2], 'col2': [np.nan, 'foo', np.nan]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [1, np.nan, 4.2], 'col2': [np.nan, 'foo', np.nan]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2) |
class MapleCM(object):
def __init__(self, bootstrap_with=None, use_timer=False, incr=False, with_proof=False, warm_start=False):
if incr:
raise NotImplementedError('Incremental mode is not supported by MapleCM.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof, warm_start)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False, warm_start=False):
if (not self.maplesat):
self.maplesat = pysolvers.maplecm_new()
if bootstrap_with:
if ((type(bootstrap_with) == CNFPlus) and bootstrap_with.atmosts):
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0
self.accu_time = 0.0
if warm_start:
self.start_mode(warm=True)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplecm_tracepr(self.maplesat, self.prfile)
def start_mode(self, warm=False):
if self.maplesat:
pysolvers.maplecm_set_start(self.maplesat, int(warm))
def delete(self):
if self.maplesat:
pysolvers.maplecm_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve(self.maplesat, assumptions, int(MainThread.check()))
if self.use_timer:
self.call_time = (process_time() - start_time)
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve_lim(self.maplesat, assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = (process_time() - start_time)
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
if self.maplesat:
pysolvers.maplecm_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
if self.maplesat:
pysolvers.maplecm_pbudget(self.maplesat, budget)
def dec_budget(self, budget):
raise NotImplementedError('Limit on decisions is unsupported by MapleCM.')
def interrupt(self):
if self.maplesat:
pysolvers.maplecm_interrupt(self.maplesat)
def clear_interrupt(self):
if self.maplesat:
pysolvers.maplecm_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
if self.maplesat:
if self.use_timer:
start_time = process_time()
(st, props) = pysolvers.maplecm_propagate(self.maplesat, assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = (process_time() - start_time)
self.accu_time += self.call_time
return (bool(st), (props if (props != None) else []))
def set_phases(self, literals=[]):
if self.maplesat:
pysolvers.maplecm_setphases(self.maplesat, literals)
def get_status(self):
if self.maplesat:
return self.status
def get_model(self):
if (self.maplesat and (self.status == True)):
model = pysolvers.maplecm_model(self.maplesat)
return (model if (model != None) else [])
def get_core(self):
if (self.maplesat and (self.status == False)):
return pysolvers.maplecm_core(self.maplesat)
def get_proof(self):
if (self.maplesat and self.prfile):
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
if self.maplesat:
return self.call_time
def time_accum(self):
if self.maplesat:
return self.accu_time
def nof_vars(self):
if self.maplesat:
return pysolvers.maplecm_nof_vars(self.maplesat)
def nof_clauses(self):
if self.maplesat:
return pysolvers.maplecm_nof_cls(self.maplesat)
def accum_stats(self):
if self.maplesat:
return pysolvers.maplecm_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
if self.maplesat:
done = False
while (not done):
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if (model is not None):
self.add_clause([(- l) for l in model])
(yield model)
else:
done = True
def add_clause(self, clause, no_return=True):
if self.maplesat:
res = pysolvers.maplecm_add_cl(self.maplesat, clause)
if (res == False):
self.status = False
if (not no_return):
return res
def add_atmost(self, lits, k, no_return=True):
raise NotImplementedError('Atmost constraints are not supported by MapleCM.')
def add_xor_clause(self, lits, value=True):
raise NotImplementedError('XOR clauses are supported only by CryptoMinisat')
def append_formula(self, formula, no_return=True):
if self.maplesat:
res = None
if ((type(formula) == CNFPlus) and formula.atmosts):
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in formula:
res = self.add_clause(clause, no_return)
if ((not no_return) and (res == False)):
return res
if (not no_return):
return res
def supports_atmost(self):
return False |
def _torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, 'wb') as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if (i == 2):
logger.error(traceback.format_exc()) |
_predicate(bytearray)
class BytearrayBase64Provider(LoaderProvider, Base64DumperMixin):
_BYTES_PROVIDER = BytesBase64Provider()
def _provide_loader(self, mediator: Mediator, request: LoaderRequest) -> Loader:
request.loc_map.get_or_raise(TypeHintLoc, (lambda : CannotProvide))
bytes_loader = self._BYTES_PROVIDER.apply_provider(mediator, replace(request, loc_map=request.loc_map.add(TypeHintLoc(bytes))))
def bytearray_base64_loader(data):
return bytearray(bytes_loader(data))
return bytearray_base64_loader |
def _squeezenet(version, pretrained, progress, **kwargs):
model = SqueezeNet(version, **kwargs)
if pretrained:
arch = ('squeezenet' + version)
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model |
def send_email(*, template: EmailTemplate, to: str, subject: str, from_: Optional[str]=None, variables: Optional[dict[(str, str)]]=None, reply_to: List[str]=None):
from_ = (from_ or settings.DEFAULT_EMAIL_FROM)
backend = get_email_backend(settings.PYTHONIT_EMAIL_BACKEND, environment=settings.ENVIRONMENT)
backend.send_email(template=template, from_=from_, to=to, subject=subject, variables=variables, reply_to=reply_to) |
class ViewBoxMenu(QtWidgets.QMenu):
def __init__(self, view):
QtWidgets.QMenu.__init__(self)
self.view = weakref.ref(view)
self.valid = False
self.viewMap = weakref.WeakValueDictionary()
self.setTitle(translate('ViewBox', 'ViewBox options'))
self.viewAll = QtGui.QAction(translate('ViewBox', 'View All'), self)
self.viewAll.triggered.connect(self.autoRange)
self.addAction(self.viewAll)
self.ctrl = []
self.widgetGroups = []
self.dv = QtGui.QDoubleValidator(self)
for axis in 'XY':
m = self.addMenu(f"{axis} {translate('ViewBox', 'axis')}")
w = QtWidgets.QWidget()
ui = ui_template.Ui_Form()
ui.setupUi(w)
a = QtWidgets.QWidgetAction(self)
a.setDefaultWidget(w)
m.addAction(a)
self.ctrl.append(ui)
wg = WidgetGroup(w)
self.widgetGroups.append(wg)
connects = [(ui.mouseCheck.toggled, 'MouseToggled'), (ui.manualRadio.clicked, 'ManualClicked'), (ui.minText.editingFinished, 'RangeTextChanged'), (ui.maxText.editingFinished, 'RangeTextChanged'), (ui.autoRadio.clicked, 'AutoClicked'), (ui.autoPercentSpin.valueChanged, 'AutoSpinChanged'), (ui.linkCombo.currentIndexChanged, 'LinkComboChanged'), (ui.autoPanCheck.toggled, 'AutoPanToggled'), (ui.visibleOnlyCheck.toggled, 'VisibleOnlyToggled')]
for (sig, fn) in connects:
sig.connect(getattr(self, (axis.lower() + fn)))
self.ctrl[0].invertCheck.toggled.connect(self.xInvertToggled)
self.ctrl[1].invertCheck.toggled.connect(self.yInvertToggled)
leftMenu = self.addMenu(translate('ViewBox', 'Mouse Mode'))
group = QtGui.QActionGroup(self)
group.triggered.connect(self.setMouseMode)
pan = QtGui.QAction(translate('ViewBox', '3 button'), group)
zoom = QtGui.QAction(translate('ViewBox', '1 button'), group)
pan.setCheckable(True)
zoom.setCheckable(True)
leftMenu.addActions(group.actions())
self.mouseModes = [pan, zoom]
self.view().sigStateChanged.connect(self.viewStateChanged)
self.updateState()
def viewStateChanged(self):
self.valid = False
if (self.ctrl[0].minText.isVisible() or self.ctrl[1].minText.isVisible()):
self.updateState()
def updateState(self):
state = self.view().getState(copy=False)
if (state['mouseMode'] == ViewBox.PanMode):
self.mouseModes[0].setChecked(True)
else:
self.mouseModes[1].setChecked(True)
for i in [0, 1]:
tr = state['targetRange'][i]
self.ctrl[i].minText.setText(('%0.5g' % tr[0]))
self.ctrl[i].maxText.setText(('%0.5g' % tr[1]))
if (state['autoRange'][i] is not False):
self.ctrl[i].autoRadio.setChecked(True)
if (state['autoRange'][i] is not True):
self.ctrl[i].autoPercentSpin.setValue(int((state['autoRange'][i] * 100)))
else:
self.ctrl[i].manualRadio.setChecked(True)
self.ctrl[i].mouseCheck.setChecked(state['mouseEnabled'][i])
c = self.ctrl[i].linkCombo
c.blockSignals(True)
try:
view = state['linkedViews'][i]
if (view is None):
view = ''
ind = c.findText(view)
if (ind == (- 1)):
ind = 0
c.setCurrentIndex(ind)
finally:
c.blockSignals(False)
self.ctrl[i].autoPanCheck.setChecked(state['autoPan'][i])
self.ctrl[i].visibleOnlyCheck.setChecked(state['autoVisibleOnly'][i])
xy = ['x', 'y'][i]
self.ctrl[i].invertCheck.setChecked(state.get((xy + 'Inverted'), False))
self.valid = True
def popup(self, *args):
if (not self.valid):
self.updateState()
QtWidgets.QMenu.popup(self, *args)
def autoRange(self):
self.view().autoRange()
def xMouseToggled(self, b):
self.view().setMouseEnabled(x=b)
def xManualClicked(self):
self.view().enableAutoRange(ViewBox.XAxis, False)
def xRangeTextChanged(self):
self.ctrl[0].manualRadio.setChecked(True)
self.view().setXRange(*self._validateRangeText(0), padding=0)
def xAutoClicked(self):
val = (self.ctrl[0].autoPercentSpin.value() * 0.01)
self.view().enableAutoRange(ViewBox.XAxis, val)
def xAutoSpinChanged(self, val):
self.ctrl[0].autoRadio.setChecked(True)
self.view().enableAutoRange(ViewBox.XAxis, (val * 0.01))
def xLinkComboChanged(self, ind):
self.view().setXLink(str(self.ctrl[0].linkCombo.currentText()))
def xAutoPanToggled(self, b):
self.view().setAutoPan(x=b)
def xVisibleOnlyToggled(self, b):
self.view().setAutoVisible(x=b)
def yMouseToggled(self, b):
self.view().setMouseEnabled(y=b)
def yManualClicked(self):
self.view().enableAutoRange(ViewBox.YAxis, False)
def yRangeTextChanged(self):
self.ctrl[1].manualRadio.setChecked(True)
self.view().setYRange(*self._validateRangeText(1), padding=0)
def yAutoClicked(self):
val = (self.ctrl[1].autoPercentSpin.value() * 0.01)
self.view().enableAutoRange(ViewBox.YAxis, val)
def yAutoSpinChanged(self, val):
self.ctrl[1].autoRadio.setChecked(True)
self.view().enableAutoRange(ViewBox.YAxis, (val * 0.01))
def yLinkComboChanged(self, ind):
self.view().setYLink(str(self.ctrl[1].linkCombo.currentText()))
def yAutoPanToggled(self, b):
self.view().setAutoPan(y=b)
def yVisibleOnlyToggled(self, b):
self.view().setAutoVisible(y=b)
def yInvertToggled(self, b):
self.view().invertY(b)
def xInvertToggled(self, b):
self.view().invertX(b)
def setMouseMode(self, action):
mode = None
if (action == self.mouseModes[0]):
mode = 'pan'
elif (action == self.mouseModes[1]):
mode = 'rect'
if (mode is not None):
self.view().setLeftButtonAction(mode)
def setViewList(self, views):
names = ['']
self.viewMap.clear()
for v in views:
name = v.name
if (name is None):
continue
names.append(name)
self.viewMap[name] = v
for i in [0, 1]:
c = self.ctrl[i].linkCombo
current = c.currentText()
c.blockSignals(True)
changed = True
try:
c.clear()
for name in names:
c.addItem(name)
if (name == current):
changed = False
c.setCurrentIndex((c.count() - 1))
finally:
c.blockSignals(False)
if changed:
c.setCurrentIndex(0)
c.currentIndexChanged.emit(c.currentIndex())
def _validateRangeText(self, axis):
inputs = (self.ctrl[axis].minText.text(), self.ctrl[axis].maxText.text())
vals = self.view().viewRange()[axis]
for (i, text) in enumerate(inputs):
try:
vals[i] = float(text)
except ValueError:
pass
return vals |
class TestLogDet():
def setup_method(self):
np.random.seed(899853)
self.op_class = LogDet
self.op = logdet
.change_flags(compute_test_value='ignore')
def validate(self, input_mat):
x = pytensor.tensor.matrix()
f = pytensor.function([x], self.op(x))
out = f(input_mat)
svd_diag = np.linalg.svd(input_mat, compute_uv=False)
numpy_out = np.sum(np.log(np.abs(svd_diag)))
np.allclose(numpy_out, out)
verify_grad(self.op, [input_mat])
.skipif((pytensor.config.device in ['cuda', 'gpu']), reason='No logDet implementation on GPU.')
def test_basic(self):
test_case_1 = (np.random.randn(3, 3) / np.sqrt(3))
test_case_2 = (np.random.randn(10, 10) / np.sqrt(10))
self.validate(test_case_1.astype(pytensor.config.floatX))
self.validate(test_case_2.astype(pytensor.config.floatX)) |
class CompletedRequest(object):
def __init__(self, reqId, operation, taskId, status):
self.requestId = reqId
self.operation = operation
self.taskid = taskId
self.status = status
def __repr__(self):
return ('CompletedRequest: %d (%s <=> %d) == %s' % (self.requestId, self.operation, self.taskid, self.status)) |
class whisper_gpt():
def __init__(self, model_size, file):
self.model_size = model_size
self.file = file
self.model = whisper.load_model(model_size)
def transcribe(self):
self.final = self.model.transcribe(self.file)
def get_result(self):
self.transription = self.final['text']
return self.transription |
def full_test_loader(data_dir):
test_data = [i for i in os.listdir((data_dir + 'test/A/')) if (not i.startswith('.'))]
test_data.sort()
test_label_paths = []
if ('DSIFN' in data_dir):
for img in test_data:
test_label_paths.append((((data_dir + 'test/label/') + img.split('.')[0]) + '.tif'))
else:
for img in test_data:
test_label_paths.append(((data_dir + 'test/label/') + img))
test_data_path = []
for img in test_data:
test_data_path.append([(data_dir + 'test/'), img])
test_dataset = {}
for cp in range(len(test_data)):
test_dataset[cp] = {'image': test_data_path[cp], 'label': test_label_paths[cp]}
return test_dataset |
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
def __init__(self, positive_fraction=0.5):
if ((positive_fraction < 0) or (positive_fraction > 1)):
raise ValueError(('positive_fraction should be in range [0,1]. Received: %s.' % positive_fraction))
self._positive_fraction = positive_fraction
def subsample(self, indicator, batch_size, labels):
if (len(indicator.get_shape().as_list()) != 1):
raise ValueError(('indicator must be 1 dimensional, got a tensor of shape %s' % indicator.get_shape()))
if (len(labels.get_shape().as_list()) != 1):
raise ValueError(('labels must be 1 dimensional, got a tensor of shape %s' % labels.get_shape()))
if (labels.dtype != tf.bool):
raise ValueError(('labels should be of type bool. Received: %s' % labels.dtype))
if (indicator.dtype != tf.bool):
raise ValueError(('indicator should be of type bool. Received: %s' % indicator.dtype))
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
max_num_pos = int((self._positive_fraction * batch_size))
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
max_num_neg = (batch_size - tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)))
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
sampled_idx = tf.logical_or(sampled_pos_idx, sampled_neg_idx)
return sampled_idx |
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if (not MainWindow.objectName()):
MainWindow.setObjectName(u'MainWindow')
MainWindow.resize(1169, 667)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u'centralwidget')
self.label_3 = QLabel(self.centralwidget)
self.label_3.setObjectName(u'label_3')
self.label_3.setGeometry(QRect(10, 610, 21, 31))
font = QFont()
font.setPointSize(20)
self.label_3.setFont(font)
self.playerA_type = QLabel(self.centralwidget)
self.playerA_type.setObjectName(u'playerA_type')
self.playerA_type.setGeometry(QRect(150, 10, 131, 31))
font1 = QFont()
font1.setPointSize(14)
self.playerA_type.setFont(font1)
self.playerA_type.setTextFormat(Qt.PlainText)
self.playerA_type.setWordWrap(True)
self.playerB_type = QLabel(self.centralwidget)
self.playerB_type.setObjectName(u'playerB_type')
self.playerB_type.setGeometry(QRect(30, 610, 131, 31))
self.playerB_type.setFont(font1)
self.playerB_type.setTextFormat(Qt.AutoText)
self.playerB_type.setWordWrap(True)
self.field = QGraphicsView(self.centralwidget)
self.field.setObjectName(u'field')
self.field.setGeometry(QRect(10, 50, 311, 551))
self.field.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.field.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
brush = QBrush(QColor(25, 137, 100, 255))
brush.setStyle(Qt.SolidPattern)
self.field.setBackgroundBrush(brush)
self.score = QLabel(self.centralwidget)
self.score.setObjectName(u'score')
self.score.setGeometry(QRect(20, 10, 131, 31))
self.score.setFont(font)
self.label_2 = QLabel(self.centralwidget)
self.label_2.setObjectName(u'label_2')
self.label_2.setGeometry(QRect(290, 10, 21, 31))
self.label_2.setFont(font)
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u'tabWidget')
self.tabWidget.setGeometry(QRect(340, 10, 811, 631))
self.tabWidget.setAutoFillBackground(True)
self.ball_round = QLabel(self.centralwidget)
self.ball_round.setObjectName(u'ball_round')
self.ball_round.setGeometry(QRect(180, 610, 141, 31))
self.ball_round.setFont(font)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u'menubar')
self.menubar.setGeometry(QRect(0, 0, 1169, 22))
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex((- 1))
QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate('MainWindow', u'MainWindow', None))
self.label_3.setText(QCoreApplication.translate('MainWindow', u'B', None))
self.playerA_type.setText(QCoreApplication.translate('MainWindow', u'unknown', None))
self.playerB_type.setText(QCoreApplication.translate('MainWindow', u'unknown', None))
self.score.setText(QCoreApplication.translate('MainWindow', u'A --:-- B', None))
self.label_2.setText(QCoreApplication.translate('MainWindow', u'A', None))
self.ball_round.setText(QCoreApplication.translate('MainWindow', u'Round:1', None)) |
class TargetWrapper(BaseWrapper):
def __init__(self, item, lightnessID, lineStyleID):
super().__init__(item=item)
self.lightnessID = lightnessID
self.lineStyleID = lineStyleID
self.resistMode = TargetResistMode.auto
def getResists(self, includeLayer=False):
em = therm = kin = explo = 0
layer = None
if self.isProfile:
em = self.item.emAmount
therm = self.item.thermalAmount
kin = self.item.kineticAmount
explo = self.item.explosiveAmount
if self.isFit:
if (self.resistMode == TargetResistMode.auto):
(em, therm, kin, explo, layer) = _getAutoResists(fit=self.item)
elif (self.resistMode == TargetResistMode.shield):
(em, therm, kin, explo) = _getShieldResists(ship=self.item.ship)
elif (self.resistMode == TargetResistMode.armor):
(em, therm, kin, explo) = _getArmorResists(ship=self.item.ship)
elif (self.resistMode == TargetResistMode.hull):
(em, therm, kin, explo) = _getHullResists(ship=self.item.ship)
elif (self.resistMode == TargetResistMode.weightedAverage):
(em, therm, kin, explo) = _getWeightedResists(fit=self.item)
if includeLayer:
return (em, therm, kin, explo, layer)
else:
return (em, therm, kin, explo) |
def test_link_resolve(pytester: Pytester) -> None:
'See:
sub1 = pytester.mkpydir('sub1')
p = sub1.joinpath('test_foo.py')
p.write_text(textwrap.dedent('\n import pytest\n def test_foo():\n raise AssertionError()\n '), encoding='utf-8')
subst = subst_path_linux
if (sys.platform == 'win32'):
subst = subst_path_windows
with subst(p) as subst_p:
result = pytester.runpytest(str(subst_p), '-v')
stdout = result.stdout.str()
assert ('sub1/test_foo.py' not in stdout)
expect = (f'*{subst_p}*' if (sys.platform == 'win32') else '*sub2/test_foo.py*')
result.stdout.fnmatch_lines([expect]) |
def prune_it(p, keep_only_ema=False):
print(f'Pruning in path: {p}')
size_initial = os.path.getsize(p)
nsd = dict()
sd = torch.load(p, map_location='cpu')
print(sd.keys())
for k in sd.keys():
if (k != 'optimizer_states'):
nsd[k] = sd[k]
else:
print(f'removing optimizer states for path {p}')
if ('global_step' in sd):
print(f"This is global step {sd['global_step']}.")
if keep_only_ema:
sd = nsd['state_dict'].copy()
ema_keys = {k: ('model_ema.' + k[6:].replace('.', '.')) for k in sd.keys() if k.startswith('model.')}
new_sd = dict()
for k in sd:
if (k in ema_keys):
new_sd[k] = sd[ema_keys[k]].half()
elif ((not k.startswith('model_ema.')) or (k in ['model_ema.num_updates', 'model_ema.decay'])):
new_sd[k] = sd[k].half()
assert (len(new_sd) == (len(sd) - len(ema_keys)))
nsd['state_dict'] = new_sd
else:
sd = nsd['state_dict'].copy()
new_sd = dict()
for k in sd:
new_sd[k] = sd[k].half()
nsd['state_dict'] = new_sd
fn = (f'{os.path.splitext(p)[0]}-pruned.ckpt' if (not keep_only_ema) else f'{os.path.splitext(p)[0]}-ema-pruned.ckpt')
print(f'saving pruned checkpoint at: {fn}')
torch.save(nsd, fn)
newsize = os.path.getsize(fn)
MSG = (f'New ckpt size: {(newsize * 1e-09):.2f} GB. ' + f'Saved {((size_initial - newsize) * 1e-09):.2f} GB by removing optimizer states')
if keep_only_ema:
MSG += ' and non-EMA weights'
print(MSG) |
class TestPairClassificationEvaluator():
def test_accuracy(self):
scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9]
labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0]
high_score_more_similar = True
(acc, acc_threshold) = PairClassificationEvaluator.find_best_acc_and_threshold(scores, labels, high_score_more_similar)
assert (acc == pytest.approx(0.9, TOL))
assert (acc_threshold == pytest.approx(7.95999, TOL))
def test_f1(self):
scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9]
labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0]
high_score_more_similar = True
(f1, precision, recall, f1_threshold) = PairClassificationEvaluator.find_best_f1_and_threshold(scores, labels, high_score_more_similar)
assert (f1 == pytest.approx(0.66666, TOL))
assert (precision == pytest.approx(1.0, TOL))
assert (recall == pytest.approx(0.5, TOL))
assert (f1_threshold == pytest.approx(7.95999, TOL))
def test_ap(self):
scores = [6.12, 5.39, 5.28, 5.94, 6.34, 6.47, 7.88, 6.62, 8.04, 5.9]
labels = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0]
high_score_more_similar = True
ap = PairClassificationEvaluator.ap_score(scores, labels, high_score_more_similar)
assert (ap == pytest.approx(0.7, TOL)) |
_scoped
class Calculator(Base):
__tablename__ = 'dynamic_content_error_calculator'
id = Column(Integer, primary_key=True)
operand_a = Column(Integer)
operand_b = Column(Integer)
operator = Column(UnicodeText)
result = Column(Integer)
fields = ExposedNames()
fields.operand_a = (lambda i: IntegerField(label='A', required=True))
fields.operand_b = (lambda i: IntegerField(label='B', required=True))
fields.operator = (lambda i: ChoiceField([Choice('plus', Field(label='+')), Choice('divide', Field(label=''))], required=True))
events = ExposedNames()
events.inputs_changed = (lambda i: Event(action=Action(i.recalculate)))
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.operand_a = 1
self.operand_b = 1
self.operator = 'plus'
self.recalculate()
def is_divide_by_zero(self):
return ((self.operator == 'divide') and (self.operand_b == 0))
def recalculate(self):
if self.is_divide_by_zero:
self.result = None
raise DomainException(message="I can't divide by 0")
if (self.operator == 'plus'):
self.result = (self.operand_a + self.operand_b)
elif (self.operator == 'divide'):
self.result = int((self.operand_a / self.operand_b)) |
def build_action_prediction_dataset(args):
playthroughs = (json.loads(line.rstrip(',\n')) for line in open(args.input) if (len(line.strip()) > 1))
graph_dataset = GraphDataset()
dataset = []
for example in next_example(playthroughs):
(root, candidates) = (example[0], example[1:])
if (len(candidates) < args.min_candidates):
continue
previous_graph = graph_dataset.compress(root['graph_{}'.format(args.graph_type)])
action_choices = [candidate['action'] for candidate in candidates]
current_graphs = [graph_dataset.compress(candidate['graph_{}'.format(args.graph_type)]) for candidate in candidates]
for (i, candidate) in enumerate(candidates):
dataset.append({'game': candidate['game'], 'step': candidate['step'], 'previous_graph': previous_graph, 'current_graph': current_graphs[i], 'target_action': action_choices[i], 'action_choices': action_choices})
if (args.output is None):
args.output = (os.path.splitext(args.input)[0] + '.ap.{}.json'.format(args.graph_type))
data = {'graph_index': graph_dataset.dumps(), 'examples': dataset}
with open(args.output, 'w') as f:
json.dump(data, f)
if args.verbose:
print('This dataset has {:,} datapoints.'.format(len(dataset))) |
def get_julian_day_from_gregorian_date(year, month, day):
is_leap = False
if ((year / 4.0) == round((year / 4.0))):
if ((year / 100.0) == round((year / 100.0))):
if ((year / 400.0) == round((year / 400.0))):
is_leap = True
else:
is_leap = True
if (month == 2):
max_days = (29 if is_leap else 28)
if (day > max_days):
raise ValueError(('Invalid day: %s, it must be <= %s' % (day, max_days)))
year = float(year)
month = float(month)
day = float(day)
if (month <= 2):
year -= 1
month += 12
century = floor((year / 100))
return ((((floor((365.25 * (year + 4716))) + floor((30.6001 * (month + 1)))) + day) + ((2 - century) + floor((century / 4)))) - 1524.5) |
def test_input_runlevels():
q = Input()
assert (not q.alive)
with pytest.raises(InactiveWritableError):
q.put('hello, unborn queue.')
q.put(BEGIN)
assert (q.alive and (q._runlevel == 1))
q.put('foo')
q.put(BEGIN)
assert (q.alive and (q._runlevel == 2))
q.put('bar')
q.put(END)
assert (q.get() == 'foo')
assert (q.get() == 'bar')
with pytest.raises(Empty):
q.get(block=False)
assert q.alive
q.put('baz')
q.put(END)
with pytest.raises(InactiveWritableError):
q.put('foo')
assert (q.get() == 'baz')
with pytest.raises(InactiveReadableError):
q.get() |
def distributed_worker(local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args):
if (not torch.cuda.is_available()):
raise OSError('CUDA is not available. Please check your environments')
global_rank = ((machine_rank * n_gpu_per_machine) + local_rank)
print('local_rank ', local_rank)
print('global_rank ', global_rank)
try:
dist.init_process_group(backend='NCCL', init_method=dist_url, world_size=world_size, rank=global_rank)
except Exception:
raise OSError('failed to initialize NCCL groups')
dist_fn.synchronize()
if (n_gpu_per_machine > torch.cuda.device_count()):
raise ValueError(f'specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})')
torch.cuda.set_device(local_rank)
if (dist_fn.LOCAL_PROCESS_GROUP is not None):
raise ValueError('torch.distributed.LOCAL_PROCESS_GROUP is not None')
n_machine = (world_size // n_gpu_per_machine)
for i in range(n_machine):
ranks_on_i = list(range((i * n_gpu_per_machine), ((i + 1) * n_gpu_per_machine)))
pg = dist.new_group(ranks_on_i)
if (i == machine_rank):
dist_fn.LOCAL_PROCESS_GROUP = pg
fn(local_rank, *args) |
def sample(experiment_directory='/home/xweiwang/RL/seq2seq/experiment', checkpoint='2019_05_18_20_32_54', resume=True, log_level='info'):
logging.basicConfig(format=LOG_FORMAT, level=getattr(logging, log_level.upper()))
logging.info('experiment_directory: %s', experiment_directory)
logging.info('checkpoint: %s', checkpoint)
(seq2seq, input_vocab, output_vocab) = load_checkpoint(experiment_directory, checkpoint)
predictor = Predictor(seq2seq, input_vocab, output_vocab)
while True:
seq_str = input('Type in a source sequence: ')
seq = seq_str.strip().split()
print(predictor.predict(seq)) |
def test_output_parent_function_json_with_sample_data_bundle(sample_data_bundle):
output_parent_function_json(sample_data_bundle)
with open('rules_classification.json', 'r') as classification_report:
report = json.load(classification_report)
assert (len(report['rules_classification']) == 2)
first_item = report['rules_classification'][0]
second_item = report['rules_classification'][1]
if (first_item['parent'] != 'Lcom/google/progress/Locate;getLocation'):
(first_item, second_item) = (second_item, first_item)
assert (set(first_item['crime']) == {'The Crime'})
assert (set(second_item['crime']) == {'Call Lcom/google/progress/Locate;getLocation', 'Another Crime'})
assert (second_item['parent'] == 'Lcom/google/progress/AndroidClientService;sendMessage') |
class ForDictionaryCommon(ForGenerator):
dict_next_op: ClassVar[CFunctionDescription]
dict_iter_op: ClassVar[CFunctionDescription]
def need_cleanup(self) -> bool:
return True
def init(self, expr_reg: Value, target_type: RType) -> None:
builder = self.builder
self.target_type = target_type
self.expr_target = builder.maybe_spill(expr_reg)
offset = Integer(0)
self.offset_target = builder.maybe_spill_assignable(offset)
self.size = builder.maybe_spill(self.load_len(self.expr_target))
iter_reg = builder.call_c(self.dict_iter_op, [expr_reg], self.line)
self.iter_target = builder.maybe_spill(iter_reg)
def gen_condition(self) -> None:
builder = self.builder
line = self.line
self.next_tuple = self.builder.call_c(self.dict_next_op, [builder.read(self.iter_target, line), builder.read(self.offset_target, line)], line)
new_offset = builder.add(TupleGet(self.next_tuple, 1, line))
builder.assign(self.offset_target, new_offset, line)
should_continue = builder.add(TupleGet(self.next_tuple, 0, line))
builder.add(Branch(should_continue, self.body_block, self.loop_exit, Branch.BOOL))
def gen_step(self) -> None:
builder = self.builder
line = self.line
builder.call_c(dict_check_size_op, [builder.read(self.expr_target, line), builder.read(self.size, line)], line)
def gen_cleanup(self) -> None:
self.builder.call_c(no_err_occurred_op, [], self.line) |
def get_test_data():
data_fname = (TEST_DATA_DIR / 'titanic.csv')
data_fname.parent.mkdir(parents=True, exist_ok=True)
if (not data_fname.exists()):
data = pd.read_csv(' index_col=0)
data.to_csv(data_fname)
else:
data = pd.read_csv(data_fname, index_col=0)
data = data.drop('Name', axis=1)
return data |
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for (batch, (X, y)) in enumerate(dataloader):
(X, y) = (X.to(device), y.to(device))
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((batch % 100) == 0):
(loss, current) = (loss.item(), (batch * len(X)))
print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]') |
def download_object_model(model_name, owner, version=1):
file_tree_response = get_model_file_tree(model_name, owner, version)
file_tree_response = file_tree_response.json()
model_path = (DOWNLOAD_PATH + model_name)
make_directories(model_path)
make_directories(GLB_DIR)
texture_file_path = ''
meshes_path = ''
thumbnail_path = ''
for files in file_tree_response['file_tree']:
name = files['name']
if ('children' in files.keys()):
children = files['children']
if (name == 'materials'):
children = children[0]['children']
for child_object in children:
file_path = child_object['path']
output_path = (model_path + file_path)
texture_file_path = output_path
directory_path = (model_path + '/'.join(file_path.split('/')[:(- 1)]))
make_directories(directory_path)
url_encoded_file_path = urllib.parse.quote_plus(file_path[1:])
download_url = (API_ENDPOINT + DOWNLOAD_API.format(owner, model_name, version, url_encoded_file_path))
download_file(download_url, output_path)
elif (name in ['meshes', 'thumbnails']):
for child_object in children:
file_path = child_object['path']
output_path = (model_path + file_path)
directory_path = (model_path + '/'.join(file_path.split('/')[:(- 1)]))
make_directories(directory_path)
if (name == 'meshes'):
meshes_path = directory_path
if (name == 'thumbnails'):
thumbnail_path = output_path
url_encoded_file_path = urllib.parse.quote_plus(file_path[1:])
download_url = (API_ENDPOINT + DOWNLOAD_API.format(owner, model_name, version, url_encoded_file_path))
download_file(download_url, output_path)
else:
file_path = files['path']
output_path = (model_path + file_path)
directory_path = (model_path + '/'.join(file_path.split('/')[:(- 1)]))
make_directories(directory_path)
url_encoded_file_path = urllib.parse.quote_plus(file_path[1:])
download_url = (API_ENDPOINT + DOWNLOAD_API.format(owner, model_name, version, url_encoded_file_path))
download_file(download_url, output_path)
copy_file(texture_file_path, meshes_path)
object_file = (meshes_path + '/model.obj')
output_glb_path = (GLB_DIR + '/{}.glb'.format(model_name))
object_config_path = (GLB_DIR + '/{}.object_config.json'.format(model_name))
icon_path = (GLB_DIR + '/{}.png'.format(model_name))
create_object_config(output_glb_path, object_config_path)
copy_file(thumbnail_path, icon_path) |
def import_MarketDuke_nodistractors(data_dir, dataset_name):
dataset_dir = os.path.join(data_dir, dataset_name)
if (not os.path.exists(dataset_dir)):
print((('Please Download ' + dataset_name) + ' Dataset'))
dataset_dir = os.path.join(data_dir, dataset_name)
data_group = ['train', 'query', 'gallery']
for group in data_group:
if (group == 'train'):
name_dir = os.path.join(dataset_dir, 'bounding_box_train')
elif (group == 'query'):
name_dir = os.path.join(dataset_dir, 'query')
else:
name_dir = os.path.join(dataset_dir, 'bounding_box_test')
file_list = sorted(os.listdir(name_dir))
globals()[group] = {}
globals()[group]['data'] = []
globals()[group]['ids'] = []
for name in file_list:
if (name[(- 3):] == 'jpg'):
id = name.split('_')[0]
cam = name.split('_')[1][1]
images = os.path.join(name_dir, name)
if ((id != '0000') and (id != '-1')):
if (id not in globals()[group]['ids']):
globals()[group]['ids'].append(id)
globals()[group]['data'].append([images, globals()[group]['ids'].index(id), id, cam, name.split('.')[0]])
return (train, query, gallery) |
def test_deployment_create(project, resp_deployment_create):
deployment = project.deployments.create({'environment': 'Test', 'sha': '1agf4gs', 'ref': 'main', 'tag': False, 'status': 'created'})
assert (deployment.id == 42)
assert (deployment.status == 'success')
assert (deployment.ref == 'main')
deployment.status = 'failed'
deployment.save()
assert (deployment.status == 'failed') |
.supported(only_if=(lambda backend: backend.rsa_encryption_supported(padding.PKCS1v15())), skip_message='Does not support PKCS1v1.5 for encryption.')
_tests('rsa_pkcs1_2048_test.json', 'rsa_pkcs1_3072_test.json', 'rsa_pkcs1_4096_test.json')
def test_rsa_pkcs1_encryption(backend, wycheproof):
key = wycheproof.cache_value_to_group('cached_key', (lambda : serialization.load_pem_private_key(wycheproof.testgroup['privateKeyPem'].encode('ascii'), password=None, unsafe_skip_rsa_key_validation=True)))
assert isinstance(key, rsa.RSAPrivateKey)
if wycheproof.valid:
pt = key.decrypt(binascii.unhexlify(wycheproof.testcase['ct']), padding.PKCS1v15())
assert (pt == binascii.unhexlify(wycheproof.testcase['msg']))
elif backend._lib.Cryptography_HAS_IMPLICIT_RSA_REJECTION:
try:
assert (key.decrypt(binascii.unhexlify(wycheproof.testcase['ct']), padding.PKCS1v15()) != binascii.unhexlify(wycheproof.testcase['ct']))
except ValueError:
pass
else:
with pytest.raises(ValueError):
key.decrypt(binascii.unhexlify(wycheproof.testcase['ct']), padding.PKCS1v15()) |
class TestHDF5BasicIO():
def test_write_and_read(self, tmp_path, rng):
file_name = str((tmp_path / 'test.ga'))
basis_names = np.array(layout.basis_names, dtype=str)
mv_array = ConformalMVArray([random_point_pair(rng=rng) for i in range(1000)]).value
write_ga_file(file_name, mv_array, layout.metric, basis_names, compression=True, transpose=False, sparse=False, support=False)
(data_array, metric_2, basis_names_2, support) = read_ga_file(file_name)
np.testing.assert_equal(data_array, mv_array)
np.testing.assert_equal(layout.metric, metric_2)
np.testing.assert_equal(basis_names, basis_names_2)
def test_write_and_read_array(self, tmp_path, rng):
file_name = str((tmp_path / 'test.ga'))
mv_array = MVArray([random_point_pair(rng=rng) for i in range(1000)])
mv_array.save(file_name, compression=True, transpose=False, sparse=False, support=False)
loaded_array = layout.load_ga_file(file_name)
np.testing.assert_equal(loaded_array.value, mv_array.value) |
class StartOfPeriodLedgerField(object):
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if (packet_field is None):
self._packet_field = ledger_field.rsplit('.', 1)[(- 1)]
else:
self._packet_field = packet_field
def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source):
self._start_of_simulation = self._get_ledger_field(ledger)
def start_of_session(self, ledger, session, data_portal):
self._previous_day = self._get_ledger_field(ledger)
def _end_of_period(self, sub_field, packet, ledger):
packet_field = self._packet_field
packet['cumulative_perf'][packet_field] = self._start_of_simulation
packet[sub_field][packet_field] = self._previous_day
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
self._end_of_period('minute_perf', packet, ledger)
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
self._end_of_period('daily_perf', packet, ledger) |
_loss
def rotated_iou_loss(pred, target, linear=False, mode='log', eps=1e-06):
assert (mode in ['linear', 'square', 'log'])
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in poly_iou_loss is deprecated, please use "mode=`linear`" instead.')
if (diff_iou_rotated_2d is None):
raise ImportError('Please install mmcv-full >= 1.5.0.')
ious = diff_iou_rotated_2d(pred.unsqueeze(0), target.unsqueeze(0))
ious = ious.squeeze(0).clamp(min=eps)
if (mode == 'linear'):
loss = (1 - ious)
elif (mode == 'square'):
loss = (1 - (ious ** 2))
elif (mode == 'log'):
loss = (- ious.log())
else:
raise NotImplementedError
return loss |
def circ_corrcl(x, y):
from scipy.stats import pearsonr, chi2
x = np.asarray(x)
y = np.asarray(y)
assert (x.size == y.size), 'x and y must have the same length.'
(x, y) = remove_na(x, y, paired=True)
n = x.size
rxs = pearsonr(y, np.sin(x))[0]
rxc = pearsonr(y, np.cos(x))[0]
rcs = pearsonr(np.sin(x), np.cos(x))[0]
r = np.sqrt(((((rxc ** 2) + (rxs ** 2)) - (((2 * rxc) * rxs) * rcs)) / (1 - (rcs ** 2))))
pval = chi2.sf((n * (r ** 2)), 2)
return (r, pval) |
class VanLargeKernelAttentionLayer(nn.Module):
def __init__(self, hidden_size: int):
super().__init__()
self.attention = VanLargeKernelAttention(hidden_size)
def forward(self, hidden_state):
attention = self.attention(hidden_state)
attended = (hidden_state * attention)
return attended |
def check_link_url(link: Link) -> int:
try:
rc = requests.head(link.uri, timeout=2, allow_redirects=True)
except (requests.ConnectionError, requests.exceptions.ReadTimeout) as exc:
fail(link, exc)
return 2
if (rc.status_code == 200):
ok(link)
return 0
else:
fail(link, rc.status_code)
return 4 |
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(arrays=[pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters={'Technology': 'mc-Si'}, module_type='multisi'), pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters={'Technology': 'mc-Si'}, module_type='multisi')])
(loss_one, loss_two) = system.first_solar_spectral_loss(1, 3)
assert (loss_one == loss_two) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.