code stringlengths 281 23.7M |
|---|
.parametrize('section', ['console', 'gui'])
.parametrize('kind', ['win-ia32', 'win-amd64', 'win-arm'])
def test_script_generate_launcher(section, kind):
launcher_data = _read_launcher_data(section, kind)
script = Script('foo', 'foo.bar', 'baz.qux', section=section)
(name, data) = script.generate('#!C:\\path to my\\python.exe\n', kind=kind)
prefix_len = (len(launcher_data) + len(b'#!C:\\path to my\\python.exe\n'))
stream = io.BytesIO(data[prefix_len:])
with zipfile.ZipFile(stream) as zf:
code = zf.read('__main__.py')
assert (name == 'foo.exe')
assert data.startswith(launcher_data)
if (section == 'gui'):
assert (b'#!C:\\path to my\\pythonw.exe\n' in data)
else:
assert (b'#!C:\\path to my\\python.exe\n' in data)
assert (b'\nfrom foo.bar import baz\n' in code)
assert (b'baz.qux()' in code) |
def get_matched_files(refresh=False):
TMP_MATCHED_FILES = '/tmp/matched_files.txt'
if (refresh or (not os.path.exists(TMP_MATCHED_FILES))):
with st.spinner('Refreshing... (this may take a while)'):
matched_files = subprocess.run(['gsutil', 'ls', glob_pattern], capture_output=True)
matched_files = matched_files.stdout.decode('utf-8').strip().split('\n')
with open(TMP_MATCHED_FILES, 'w') as f:
f.write('\n'.join(matched_files))
else:
with open(TMP_MATCHED_FILES, 'r') as f:
matched_files = f.read().strip().split('\n')
st.write(f'Using cached matched files from `{TMP_MATCHED_FILES}`')
st.markdown(f'Found **{len(matched_files)}** files.')
return matched_files |
class SnapshotRollbackView(ObjectPermissionMixin, RedirectViewMixin, DetailView):
model = Snapshot
queryset = Snapshot.objects.all()
permission_required = 'projects.rollback_snapshot_object'
template_name = 'projects/snapshot_rollback.html'
def get_queryset(self):
return Snapshot.objects.filter(project_id=self.kwargs['project_id'])
def get_permission_object(self):
return self.get_object().project
def post(self, request, *args, **kwargs):
snapshot = self.get_object()
if ('cancel' not in request.POST):
snapshot.rollback()
return HttpResponseRedirect(reverse('project', args=[snapshot.project.id])) |
class TalkieConnectionOwner(object):
def __init__(self):
self._connections = []
def talkie_connect(self, state, path, listener, drop_args=False):
if drop_args:
listener = drop_args_wrapper(listener)
if (not isinstance(path, str)):
return [self.talkie_connect(state, path_, listener, False) for path_ in path]
connection = state.talkie_connect(path, listener)
self._connections.append(connection)
return connection
def talkie_disconnect_all(self):
while self._connections:
try:
self._connections.pop().release()
except Exception:
pass |
def test_instanceloader_yaml_dup_anchor(tmp_path, open_wide):
f = (tmp_path / 'foo.yaml')
f.write_text('a:\n b: &anchor\n - 1\n - 2\n c: &anchor d\n')
loader = InstanceLoader(open_wide(f))
data = list(loader.iter_files())
assert (data == [(str(f), {'a': {'b': [1, 2], 'c': 'd'}})]) |
def test_cross_layer_equalization_stepwise():
orig = tf.keras.applications.ResNet50(input_shape=(224, 224, 3))
(folded_pairs, model) = fold_all_batch_norms(orig)
bn_dict = {}
for (conv_or_linear, bn) in folded_pairs:
bn_dict[conv_or_linear] = bn
(conv1, conv2, conv3) = (model.layers[6], model.layers[14], model.layers[11])
(w1, _) = conv1.get_weights()
(w2, _) = conv2.get_weights()
(w3, _) = conv3.get_weights()
cls_set_info_list = CrossLayerScaling.scale_model(model)
assert (not np.allclose(conv1.kernel, w1))
assert (not np.allclose(conv2.kernel, w2))
assert (not np.allclose(conv3.kernel, w3))
(_, b1) = conv1.get_weights()
(_, b2) = conv2.get_weights()
HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
for (bias_val_before, bias_val_after) in zip(b1, conv1.bias.numpy()):
assert (bias_val_after <= bias_val_before)
for (bias_val_before, bias_val_after) in zip(b2, conv2.bias.numpy()):
assert (bias_val_after <= bias_val_before) |
class HTLCManager():
def __init__(self, log: 'StoredDict', *, initial_feerate=None):
if (len(log) == 0):
initial = {'adds': {}, 'locked_in': {}, 'settles': {}, 'fails': {}, 'fee_updates': {}, 'revack_pending': False, 'next_htlc_id': 0, 'ctn': (- 1)}
log[LOCAL] = deepcopy(initial)
log[REMOTE] = deepcopy(initial)
log['unacked_local_updates2'] = {}
if ('unfulfilled_htlcs' not in log):
log['unfulfilled_htlcs'] = {}
if ('fail_htlc_reasons' not in log):
log['fail_htlc_reasons'] = {}
if (initial_feerate is not None):
assert (type(initial_feerate) is int)
for sub in (LOCAL, REMOTE):
if (not log[sub]['fee_updates']):
log[sub]['fee_updates'][0] = FeeUpdate(rate=initial_feerate, ctn_local=0, ctn_remote=0)
self.log = log
self.lock = threading.RLock()
self._init_maybe_active_htlc_ids()
def with_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return func_wrapper
_lock
def ctn_latest(self, sub: HTLCOwner) -> int:
return (self.ctn_oldest_unrevoked(sub) + int(self.is_revack_pending(sub)))
def ctn_oldest_unrevoked(self, sub: HTLCOwner) -> int:
return self.log[sub]['ctn']
def is_revack_pending(self, sub: HTLCOwner) -> bool:
return self.log[sub]['revack_pending']
def _set_revack_pending(self, sub: HTLCOwner, pending: bool) -> None:
self.log[sub]['revack_pending'] = pending
def get_next_htlc_id(self, sub: HTLCOwner) -> int:
return self.log[sub]['next_htlc_id']
_lock
def channel_open_finished(self):
self.log[LOCAL]['ctn'] = 0
self.log[REMOTE]['ctn'] = 0
self._set_revack_pending(LOCAL, False)
self._set_revack_pending(REMOTE, False)
_lock
def send_htlc(self, htlc: UpdateAddHtlc) -> UpdateAddHtlc:
htlc_id = htlc.htlc_id
if (htlc_id != self.get_next_htlc_id(LOCAL)):
raise Exception(f'unexpected local htlc_id. next should be {self.get_next_htlc_id(LOCAL)} but got {htlc_id}')
self.log[LOCAL]['adds'][htlc_id] = htlc
self.log[LOCAL]['locked_in'][htlc_id] = {LOCAL: None, REMOTE: (self.ctn_latest(REMOTE) + 1)}
self.log[LOCAL]['next_htlc_id'] += 1
self._maybe_active_htlc_ids[LOCAL].add(htlc_id)
return htlc
_lock
def recv_htlc(self, htlc: UpdateAddHtlc) -> None:
htlc_id = htlc.htlc_id
if (htlc_id != self.get_next_htlc_id(REMOTE)):
raise Exception(f'unexpected remote htlc_id. next should be {self.get_next_htlc_id(REMOTE)} but got {htlc_id}')
self.log[REMOTE]['adds'][htlc_id] = htlc
self.log[REMOTE]['locked_in'][htlc_id] = {LOCAL: (self.ctn_latest(LOCAL) + 1), REMOTE: None}
self.log[REMOTE]['next_htlc_id'] += 1
self._maybe_active_htlc_ids[REMOTE].add(htlc_id)
_lock
def send_settle(self, htlc_id: int) -> None:
next_ctn = (self.ctn_latest(REMOTE) + 1)
if (not self.is_htlc_active_at_ctn(ctx_owner=REMOTE, ctn=next_ctn, htlc_proposer=REMOTE, htlc_id=htlc_id)):
raise Exception(f'(local) cannot remove htlc that is not there...')
self.log[REMOTE]['settles'][htlc_id] = {LOCAL: None, REMOTE: next_ctn}
_lock
def recv_settle(self, htlc_id: int) -> None:
next_ctn = (self.ctn_latest(LOCAL) + 1)
if (not self.is_htlc_active_at_ctn(ctx_owner=LOCAL, ctn=next_ctn, htlc_proposer=LOCAL, htlc_id=htlc_id)):
raise Exception(f'(remote) cannot remove htlc that is not there...')
self.log[LOCAL]['settles'][htlc_id] = {LOCAL: next_ctn, REMOTE: None}
_lock
def send_fail(self, htlc_id: int) -> None:
next_ctn = (self.ctn_latest(REMOTE) + 1)
if (not self.is_htlc_active_at_ctn(ctx_owner=REMOTE, ctn=next_ctn, htlc_proposer=REMOTE, htlc_id=htlc_id)):
raise Exception(f'(local) cannot remove htlc that is not there...')
self.log[REMOTE]['fails'][htlc_id] = {LOCAL: None, REMOTE: next_ctn}
_lock
def recv_fail(self, htlc_id: int) -> None:
next_ctn = (self.ctn_latest(LOCAL) + 1)
if (not self.is_htlc_active_at_ctn(ctx_owner=LOCAL, ctn=next_ctn, htlc_proposer=LOCAL, htlc_id=htlc_id)):
raise Exception(f'(remote) cannot remove htlc that is not there...')
self.log[LOCAL]['fails'][htlc_id] = {LOCAL: next_ctn, REMOTE: None}
_lock
def send_update_fee(self, feerate: int) -> None:
fee_update = FeeUpdate(rate=feerate, ctn_local=None, ctn_remote=(self.ctn_latest(REMOTE) + 1))
self._new_feeupdate(fee_update, subject=LOCAL)
_lock
def recv_update_fee(self, feerate: int) -> None:
fee_update = FeeUpdate(rate=feerate, ctn_local=(self.ctn_latest(LOCAL) + 1), ctn_remote=None)
self._new_feeupdate(fee_update, subject=REMOTE)
_lock
def _new_feeupdate(self, fee_update: FeeUpdate, subject: HTLCOwner) -> None:
d = self.log[subject]['fee_updates']
n = len(d)
last_fee_update = d[(n - 1)]
if (((last_fee_update.ctn_local is None) or (last_fee_update.ctn_local > self.ctn_latest(LOCAL))) and ((last_fee_update.ctn_remote is None) or (last_fee_update.ctn_remote > self.ctn_latest(REMOTE)))):
d[(n - 1)] = fee_update
else:
d[n] = fee_update
_lock
def send_ctx(self) -> None:
assert (self.ctn_latest(REMOTE) == self.ctn_oldest_unrevoked(REMOTE)), (self.ctn_latest(REMOTE), self.ctn_oldest_unrevoked(REMOTE))
self._set_revack_pending(REMOTE, True)
_lock
def recv_ctx(self) -> None:
assert (self.ctn_latest(LOCAL) == self.ctn_oldest_unrevoked(LOCAL)), (self.ctn_latest(LOCAL), self.ctn_oldest_unrevoked(LOCAL))
self._set_revack_pending(LOCAL, True)
_lock
def send_rev(self) -> None:
self.log[LOCAL]['ctn'] += 1
self._set_revack_pending(LOCAL, False)
for htlc_id in self._maybe_active_htlc_ids[REMOTE]:
ctns = self.log[REMOTE]['locked_in'][htlc_id]
if ((ctns[REMOTE] is None) and (ctns[LOCAL] <= self.ctn_latest(LOCAL))):
ctns[REMOTE] = (self.ctn_latest(REMOTE) + 1)
for log_action in ('settles', 'fails'):
for htlc_id in self._maybe_active_htlc_ids[LOCAL]:
ctns = self.log[LOCAL][log_action].get(htlc_id, None)
if (ctns is None):
continue
if ((ctns[REMOTE] is None) and (ctns[LOCAL] <= self.ctn_latest(LOCAL))):
ctns[REMOTE] = (self.ctn_latest(REMOTE) + 1)
self._update_maybe_active_htlc_ids()
for (k, fee_update) in list(self.log[REMOTE]['fee_updates'].items()):
if ((fee_update.ctn_remote is None) and (fee_update.ctn_local <= self.ctn_latest(LOCAL))):
fee_update.ctn_remote = (self.ctn_latest(REMOTE) + 1)
_lock
def recv_rev(self) -> None:
self.log[REMOTE]['ctn'] += 1
self._set_revack_pending(REMOTE, False)
for htlc_id in self._maybe_active_htlc_ids[LOCAL]:
ctns = self.log[LOCAL]['locked_in'][htlc_id]
if ((ctns[LOCAL] is None) and (ctns[REMOTE] <= self.ctn_latest(REMOTE))):
ctns[LOCAL] = (self.ctn_latest(LOCAL) + 1)
for log_action in ('settles', 'fails'):
for htlc_id in self._maybe_active_htlc_ids[REMOTE]:
ctns = self.log[REMOTE][log_action].get(htlc_id, None)
if (ctns is None):
continue
if ((ctns[LOCAL] is None) and (ctns[REMOTE] <= self.ctn_latest(REMOTE))):
ctns[LOCAL] = (self.ctn_latest(LOCAL) + 1)
self._update_maybe_active_htlc_ids()
for (k, fee_update) in list(self.log[LOCAL]['fee_updates'].items()):
if ((fee_update.ctn_local is None) and (fee_update.ctn_remote <= self.ctn_latest(REMOTE))):
fee_update.ctn_local = (self.ctn_latest(LOCAL) + 1)
self.log['unacked_local_updates2'].pop(self.log[REMOTE]['ctn'], None)
_lock
def _update_maybe_active_htlc_ids(self) -> None:
sanity_margin = 1
for htlc_proposer in (LOCAL, REMOTE):
for log_action in ('settles', 'fails'):
for htlc_id in list(self._maybe_active_htlc_ids[htlc_proposer]):
ctns = self.log[htlc_proposer][log_action].get(htlc_id, None)
if (ctns is None):
continue
if ((ctns[LOCAL] is not None) and (ctns[LOCAL] <= (self.ctn_oldest_unrevoked(LOCAL) - sanity_margin)) and (ctns[REMOTE] is not None) and (ctns[REMOTE] <= (self.ctn_oldest_unrevoked(REMOTE) - sanity_margin))):
self._maybe_active_htlc_ids[htlc_proposer].remove(htlc_id)
if (log_action == 'settles'):
htlc = self.log[htlc_proposer]['adds'][htlc_id]
self._balance_delta -= (htlc.amount_msat * htlc_proposer)
_lock
def _init_maybe_active_htlc_ids(self):
self._maybe_active_htlc_ids = {LOCAL: set(), REMOTE: set()}
self._balance_delta = 0
for htlc_proposer in (LOCAL, REMOTE):
for htlc_id in self.log[htlc_proposer]['adds']:
self._maybe_active_htlc_ids[htlc_proposer].add(htlc_id)
self._update_maybe_active_htlc_ids()
_lock
def discard_unsigned_remote_updates(self):
for (htlc_id, ctns) in list(self.log[REMOTE]['locked_in'].items()):
if (ctns[LOCAL] > self.ctn_latest(LOCAL)):
del self.log[REMOTE]['locked_in'][htlc_id]
del self.log[REMOTE]['adds'][htlc_id]
self._maybe_active_htlc_ids[REMOTE].discard(htlc_id)
if self.log[REMOTE]['locked_in']:
self.log[REMOTE]['next_htlc_id'] = (max([int(x) for x in self.log[REMOTE]['locked_in'].keys()]) + 1)
else:
self.log[REMOTE]['next_htlc_id'] = 0
for log_action in ('settles', 'fails'):
for (htlc_id, ctns) in list(self.log[LOCAL][log_action].items()):
if (ctns[LOCAL] > self.ctn_latest(LOCAL)):
del self.log[LOCAL][log_action][htlc_id]
for (k, fee_update) in list(self.log[REMOTE]['fee_updates'].items()):
if (fee_update.ctn_local > self.ctn_latest(LOCAL)):
self.log[REMOTE]['fee_updates'].pop(k)
_lock
def store_local_update_raw_msg(self, raw_update_msg: bytes, *, is_commitment_signed: bool) -> None:
if is_commitment_signed:
ctn_idx = self.ctn_latest(REMOTE)
else:
ctn_idx = (self.ctn_latest(REMOTE) + 1)
l = self.log['unacked_local_updates2'].get(ctn_idx, [])
l.append(raw_update_msg.hex())
self.log['unacked_local_updates2'][ctn_idx] = l
_lock
def get_unacked_local_updates(self) -> Dict[(int, Sequence[bytes])]:
return {int(ctn): [bfh(msg) for msg in messages] for (ctn, messages) in self.log['unacked_local_updates2'].items()}
def get_htlc_by_id(self, htlc_proposer: HTLCOwner, htlc_id: int) -> UpdateAddHtlc:
return self.log[htlc_proposer]['adds'][htlc_id]
_lock
def is_htlc_active_at_ctn(self, *, ctx_owner: HTLCOwner, ctn: int, htlc_proposer: HTLCOwner, htlc_id: int) -> bool:
htlc_id = int(htlc_id)
if (htlc_id >= self.get_next_htlc_id(htlc_proposer)):
return False
settles = self.log[htlc_proposer]['settles']
fails = self.log[htlc_proposer]['fails']
ctns = self.log[htlc_proposer]['locked_in'][htlc_id]
if ((ctns[ctx_owner] is not None) and (ctns[ctx_owner] <= ctn)):
not_settled = ((htlc_id not in settles) or (settles[htlc_id][ctx_owner] is None) or (settles[htlc_id][ctx_owner] > ctn))
not_failed = ((htlc_id not in fails) or (fails[htlc_id][ctx_owner] is None) or (fails[htlc_id][ctx_owner] > ctn))
if (not_settled and not_failed):
return True
return False
_lock
def htlcs_by_direction(self, subject: HTLCOwner, direction: Direction, ctn: int=None) -> Dict[(int, UpdateAddHtlc)]:
assert (type(subject) is HTLCOwner)
assert (type(direction) is Direction)
if (ctn is None):
ctn = self.ctn_oldest_unrevoked(subject)
d = {}
party = (subject if (direction == SENT) else subject.inverted())
if (ctn >= self.ctn_oldest_unrevoked(subject)):
considered_htlc_ids = self._maybe_active_htlc_ids[party]
else:
considered_htlc_ids = self.log[party]['locked_in']
for htlc_id in considered_htlc_ids:
htlc_id = int(htlc_id)
if self.is_htlc_active_at_ctn(ctx_owner=subject, ctn=ctn, htlc_proposer=party, htlc_id=htlc_id):
d[htlc_id] = self.log[party]['adds'][htlc_id]
return d
_lock
def htlcs(self, subject: HTLCOwner, ctn: int=None) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
assert (type(subject) is HTLCOwner)
if (ctn is None):
ctn = self.ctn_oldest_unrevoked(subject)
l = []
l += [(SENT, x) for x in self.htlcs_by_direction(subject, SENT, ctn).values()]
l += [(RECEIVED, x) for x in self.htlcs_by_direction(subject, RECEIVED, ctn).values()]
return l
_lock
def get_htlcs_in_oldest_unrevoked_ctx(self, subject: HTLCOwner) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
assert (type(subject) is HTLCOwner)
ctn = self.ctn_oldest_unrevoked(subject)
return self.htlcs(subject, ctn)
_lock
def get_htlcs_in_latest_ctx(self, subject: HTLCOwner) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
assert (type(subject) is HTLCOwner)
ctn = self.ctn_latest(subject)
return self.htlcs(subject, ctn)
_lock
def get_htlcs_in_next_ctx(self, subject: HTLCOwner) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
assert (type(subject) is HTLCOwner)
ctn = (self.ctn_latest(subject) + 1)
return self.htlcs(subject, ctn)
def was_htlc_preimage_released(self, *, htlc_id: int, htlc_proposer: HTLCOwner) -> bool:
settles = self.log[htlc_proposer]['settles']
if (htlc_id not in settles):
return False
return (settles[htlc_id][htlc_proposer] is not None)
def was_htlc_failed(self, *, htlc_id: int, htlc_proposer: HTLCOwner) -> bool:
fails = self.log[htlc_proposer]['fails']
if (htlc_id not in fails):
return False
return (fails[htlc_id][htlc_proposer] is not None)
_lock
def all_settled_htlcs_ever_by_direction(self, subject: HTLCOwner, direction: Direction, ctn: int=None) -> Sequence[UpdateAddHtlc]:
assert (type(subject) is HTLCOwner)
if (ctn is None):
ctn = self.ctn_oldest_unrevoked(subject)
party = (subject if (direction == SENT) else subject.inverted())
d = []
for (htlc_id, ctns) in self.log[party]['settles'].items():
if ((ctns[subject] is not None) and (ctns[subject] <= ctn)):
d.append(self.log[party]['adds'][htlc_id])
return d
_lock
def all_settled_htlcs_ever(self, subject: HTLCOwner, ctn: int=None) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
assert (type(subject) is HTLCOwner)
if (ctn is None):
ctn = self.ctn_oldest_unrevoked(subject)
sent = [(SENT, x) for x in self.all_settled_htlcs_ever_by_direction(subject, SENT, ctn)]
received = [(RECEIVED, x) for x in self.all_settled_htlcs_ever_by_direction(subject, RECEIVED, ctn)]
return (sent + received)
_lock
def all_htlcs_ever(self) -> Sequence[Tuple[(Direction, UpdateAddHtlc)]]:
sent = [(SENT, htlc) for htlc in self.log[LOCAL]['adds'].values()]
received = [(RECEIVED, htlc) for htlc in self.log[REMOTE]['adds'].values()]
return (sent + received)
_lock
def get_balance_msat(self, whose: HTLCOwner, *, ctx_owner=HTLCOwner.LOCAL, ctn: int=None, initial_balance_msat: int) -> int:
if (ctn is None):
ctn = self.ctn_oldest_unrevoked(ctx_owner)
balance = initial_balance_msat
if (ctn >= self.ctn_oldest_unrevoked(ctx_owner)):
balance += (self._balance_delta * whose)
considered_sent_htlc_ids = self._maybe_active_htlc_ids[whose]
considered_recv_htlc_ids = self._maybe_active_htlc_ids[(- whose)]
else:
considered_sent_htlc_ids = self.log[whose]['settles']
considered_recv_htlc_ids = self.log[(- whose)]['settles']
for htlc_id in considered_sent_htlc_ids:
ctns = self.log[whose]['settles'].get(htlc_id, None)
if (ctns is None):
continue
if ((ctns[ctx_owner] is not None) and (ctns[ctx_owner] <= ctn)):
htlc = self.log[whose]['adds'][htlc_id]
balance -= htlc.amount_msat
for htlc_id in considered_recv_htlc_ids:
ctns = self.log[(- whose)]['settles'].get(htlc_id, None)
if (ctns is None):
continue
if ((ctns[ctx_owner] is not None) and (ctns[ctx_owner] <= ctn)):
htlc = self.log[(- whose)]['adds'][htlc_id]
balance += htlc.amount_msat
return balance
_lock
def _get_htlcs_that_got_removed_exactly_at_ctn(self, ctn: int, *, ctx_owner: HTLCOwner, htlc_proposer: HTLCOwner, log_action: str) -> Sequence[UpdateAddHtlc]:
if (ctn >= self.ctn_oldest_unrevoked(ctx_owner)):
considered_htlc_ids = self._maybe_active_htlc_ids[htlc_proposer]
else:
considered_htlc_ids = self.log[htlc_proposer][log_action]
htlcs = []
for htlc_id in considered_htlc_ids:
ctns = self.log[htlc_proposer][log_action].get(htlc_id, None)
if (ctns is None):
continue
if (ctns[ctx_owner] == ctn):
htlcs.append(self.log[htlc_proposer]['adds'][htlc_id])
return htlcs
def received_in_ctn(self, local_ctn: int) -> Sequence[UpdateAddHtlc]:
return self._get_htlcs_that_got_removed_exactly_at_ctn(local_ctn, ctx_owner=LOCAL, htlc_proposer=REMOTE, log_action='settles')
def sent_in_ctn(self, remote_ctn: int) -> Sequence[UpdateAddHtlc]:
return self._get_htlcs_that_got_removed_exactly_at_ctn(remote_ctn, ctx_owner=REMOTE, htlc_proposer=LOCAL, log_action='settles')
def failed_in_ctn(self, remote_ctn: int) -> Sequence[UpdateAddHtlc]:
return self._get_htlcs_that_got_removed_exactly_at_ctn(remote_ctn, ctx_owner=REMOTE, htlc_proposer=LOCAL, log_action='fails')
_lock
def get_feerate(self, subject: HTLCOwner, ctn: int) -> int:
ctn = max(0, ctn)
assert (not ((len(self.log[LOCAL]['fee_updates']) > 1) and (len(self.log[REMOTE]['fee_updates']) > 1)))
fee_log = self.log[LOCAL]['fee_updates']
if (len(self.log[REMOTE]['fee_updates']) > 1):
fee_log = self.log[REMOTE]['fee_updates']
left = 0
right = len(fee_log)
while True:
i = ((left + right) // 2)
ctn_at_i = (fee_log[i].ctn_local if (subject == LOCAL) else fee_log[i].ctn_remote)
if ((right - left) <= 1):
break
if (ctn_at_i is None):
right = i
continue
if (ctn_at_i <= ctn):
left = i
else:
right = i
assert (ctn_at_i <= ctn)
return fee_log[i].rate
def get_feerate_in_oldest_unrevoked_ctx(self, subject: HTLCOwner) -> int:
return self.get_feerate(subject=subject, ctn=self.ctn_oldest_unrevoked(subject))
def get_feerate_in_latest_ctx(self, subject: HTLCOwner) -> int:
return self.get_feerate(subject=subject, ctn=self.ctn_latest(subject))
def get_feerate_in_next_ctx(self, subject: HTLCOwner) -> int:
return self.get_feerate(subject=subject, ctn=(self.ctn_latest(subject) + 1)) |
def get_normalize_mesh(model_file, norm_mesh_sub_dir):
total = 16384
print('[*] loading model with trimesh...', model_file)
mesh_list = trimesh.load_mesh(model_file, process=False)
print('[*] done!', model_file)
mesh = as_mesh(mesh_list)
if (not isinstance(mesh, list)):
mesh_list = [mesh]
area_sum = 0
area_lst = []
for (idx, mesh) in enumerate(mesh_list):
area = np.sum(mesh.area_faces)
area_lst.append(area)
area_sum += area
area_lst = np.asarray(area_lst)
amount_lst = ((area_lst * total) / area_sum).astype(np.int32)
points_all = np.zeros((0, 3), dtype=np.float32)
for i in range(amount_lst.shape[0]):
mesh = mesh_list[i]
(points, index) = trimesh.sample.sample_surface(mesh, amount_lst[i])
points_all = np.concatenate([points_all, points], axis=0)
centroid = np.mean(points_all, axis=0)
points_all = (points_all - centroid)
m = np.max(np.sqrt(np.sum((points_all ** 2), axis=1)))
obj_file = os.path.join(norm_mesh_sub_dir, 'pc_norm.obj')
ori_mesh_list = trimesh.load_mesh(model_file, process=False)
ori_mesh = as_mesh(ori_mesh_list)
ori_mesh.vertices = ((ori_mesh.vertices - centroid) / float(m))
ori_mesh.export(obj_file)
print('[*] export_mesh: ', obj_file)
return (obj_file, centroid, m) |
def test_python_option(tester: CommandTester) -> None:
inputs = ['my-package', '1.2.3', 'This is a description', 'n', 'MIT', 'n', 'n', '\n']
tester.execute("--python '~2.7 || ^3.6'", inputs='\n'.join(inputs))
expected = '[tool.poetry]\nname = "my-package"\nversion = "1.2.3"\ndescription = "This is a description"\nauthors = ["Your Name <>"]\nlicense = "MIT"\nreadme = "README.md"\n\n[tool.poetry.dependencies]\npython = "~2.7 || ^3.6"\n'
assert (expected in tester.io.fetch_output()) |
def test_apply_classical_cbloq():
bb = BloqBuilder()
x = bb.add_register(Register('x', 1, shape=(5,)))
(x, y) = bb.add(ApplyClassicalTest(), x=x)
(y, z) = bb.add(ApplyClassicalTest(), x=y)
cbloq = bb.finalize(x=x, y=y, z=z)
xarr = np.zeros(5)
(x, y, z) = cbloq.call_classically(x=xarr)
np.testing.assert_array_equal(x, xarr)
np.testing.assert_array_equal(y, [1, 0, 1, 0, 1])
np.testing.assert_array_equal(z, xarr) |
class ChoiceFeedbackQuestionValueFactory(Factory):
class Meta():
model = 'feedback.ChoiceFeedbackQuestionValue'
strategy = factory.CREATE_STRATEGY
question = factory.SubFactory('tests.factories.ChoiceFeedbackQuestionFactory')
title = factory.Sequence((lambda n: 'title{}'.format(n)))
value = factory.Sequence((lambda n: n)) |
def pdb_reformat(reference, target):
from qubekit.molecules.protein import Protein
pro = Protein(reference)
print(pro.pdb_names)
with open(target, 'r') as traj:
lines = traj.readlines()
PRO = False
i = 0
new_traj = open('QUBE_traj.pdb', 'w+')
for line in lines:
if ('MODEL' in line):
PRO = True
new_traj.write(line)
i = 1
continue
elif ('TER' in line):
PRO = False
if ('QUP' in line):
new_traj.write(f'{line[:16]} {pro.residues[(- 1)]:4}{line[21:]}')
else:
new_traj.write(line)
continue
if PRO:
if (len(pro.pdb_names[(i - 1)]) <= 3):
new_traj.write(f'ATOM {i:4} {pro.pdb_names[(i - 1)]:3} {pro.Residues[(i - 1)]:4}{line[21:]}')
elif (len(pro.pdb_names[(i - 1)]) == 4):
new_traj.write(f'ATOM {i:4} {pro.pdb_names[(i - 1)]:4} {pro.Residues[(i - 1)]:4}{line[21:]}')
i += 1
else:
new_traj.write(line) |
class MitreParser():
def __init__(self, name):
self.graph = QBIxora(name)
self.mitrepath = path.abspath(path.join(path.dirname(__file__), 'mitrefiles'))
if (not self.mitrepath.endswith(path.sep)):
self.mitrepath = (self.mitrepath + path.sep)
if (not path.isdir(self.mitrepath)):
mkdir(self.mitrepath)
self.preattackjson = {}
self.enterpriseattackjson = {}
self.fulldict = {}
self.usedict = {}
self.preattackurl = '
self.enterpriseattackurl = '
self.setup(self.mitrepath)
def setup(self, _path):
temp_list = {}
if ((not path.exists((_path + 'enterprise-attack.json'))) and (not path.exists((_path + 'pre-attack.json')))):
urlretrieve(self.enterpriseattackurl, (_path + 'enterprise-attack.json'))
urlretrieve(self.preattackurl, (_path + 'pre-attack.json'))
with copen((_path + 'enterprise-attack.json'), encoding='ascii', errors='ignore') as enterprise, copen((_path + 'pre-attack.json'), encoding='ascii', errors='ignore') as pre:
self.preattack = pre.read()
self.enterprise = enterprise.read()
if (path.exists((_path + 'hardcoded_usedict.json')) and path.exists((_path + 'hardcoded_fulldict.json'))):
self.fulldict = load(copen((_path + 'hardcoded_fulldict.json')))
self.usedict = load(copen((_path + 'hardcoded_usedict.json')))
else:
temp_list['preattack'] = loads(self.preattack)['objects']
temp_list['enterprise'] = loads(self.enterprise)['objects']
self.update_dict(temp_list['preattack'], {'collection': 'preattack'})
self.update_dict(temp_list['enterprise'], {'collection': 'enterprise'})
self.fulldict = (temp_list['preattack'] + temp_list['enterprise'])
self.usedict = self.finduses()
dump(self.fulldict, copen((_path + 'hardcoded_fulldict.json'), 'w'))
dump(self.usedict, copen((_path + 'hardcoded_usedict.json'), 'w'))
def update_dict(self, temp_d, temp_s):
for temp_x in temp_d:
temp_x.update(temp_s)
def search_once(self, temp_s, temp_d):
with ignore_excpetion(Exception):
for temp_x in temp_s:
if all((((temp_k in temp_x) and (temp_x[temp_k] == temp_var)) for (temp_k, temp_var) in temp_d.items())):
return temp_x
return None
def search_in_mitre_and_return(self, temp_s, temp_d, temp_r):
temp_l = []
for temp_x in temp_s:
if all((((temp_k in temp_x) and (temp_x[temp_k] == temp_var)) for (temp_k, temp_var) in temp_d.items())):
temp_l.append({key: temp_x.get(key) for key in temp_r})
return temp_l
def nested_search(self, temp_k, temp_d):
if (temp_k in temp_d):
return temp_d[temp_k]
for (temp_k, temp_var) in temp_d.items():
if isinstance(temp_var, dict):
result = self.nested_search(temp_k, temp_var)
if result:
return (temp_k, result)
def findid(self, temp_s, _print):
temp_l = {}
for temp_x in temp_s[0]:
if (temp_x['type'] == 'attack-pattern'):
if (temp_x['id'] not in temp_l):
temp_l.update({temp_x['id']: temp_x['name']})
if isinstance(temp_x['description'], list):
for temp_d in temp_x['description']:
if (temp_d['type'] == 'attack-pattern'):
if (temp_d['id'] not in temp_l):
temp_l.update({temp_d['id']: temp_d['name']})
if _print:
print(dumps(temp_l, indent=4, sort_keys=True))
return temp_l
def countitem(self, temp_s, temp_k):
return Counter([temp_d[temp_k] for temp_d in temp_s])
def finduses(self):
temp_l = self.search_in_mitre_and_return(self.fulldict, {'relationship_type': 'uses'}, ['source_ref', 'target_ref', 'description', 'collection', 'kill_chain_phases'])
temp_d = {}
temp_added = {}
temp_counter = 0
for temp_i in temp_l:
temp_counter += 1
temp_s = self.search_once(self.fulldict, {'id': temp_i['source_ref']})
temp_u = self.search_once(self.fulldict, {'id': temp_i['target_ref']})
temp_xx = None
temp_xs = None
with ignore_excpetion(Exception):
temp_xx = temp_u['external_references'][0]['external_id']
temp_xs = temp_s['external_references'][0]['external_id']
if (temp_s and temp_u):
if temp_d.get(temp_s['type'.lower().rstrip()]):
if ((temp_d[temp_s['type']].get(temp_s['name']) == []) or temp_d[temp_s['type']].get(temp_s['name'])):
temp_dict_ = {'id': temp_xx, 'name': temp_u['name'], 'type': temp_u['type'], 'collection': temp_i['collection']}
if ('kill_chain_phases' in temp_u):
temp_dict_.update({'kill_chain_phases': ', '.join([_['phase_name'] for _ in temp_u['kill_chain_phases']])})
if ((temp_u['type'] == 'malware') or (temp_u['type'] == 'tool')):
temp_dict_.update({'techniques': []})
else:
temp_dict_.update({'description': temp_i['description']})
temp_d[temp_s['type']][temp_s['name']]['techniques'].append(temp_dict_)
else:
temp_dict_ = {}
if ('kill_chain_phases' in temp_u):
temp_dict_ = {temp_s['name']: {'id': temp_xs, 'description': temp_s['description'], 'techniques': [{'id': temp_xx, 'name': temp_u['name'], 'type': temp_u['type'], 'description': temp_i['description'], 'collection': temp_i['collection'], 'kill_chain_phases': ', '.join([_['phase_name'] for _ in temp_u['kill_chain_phases']])}]}}
else:
temp_dict_ = {temp_s['name']: {'id': temp_xs, 'description': temp_s['description'], 'techniques': [{'id': temp_xx, 'name': temp_u['name'], 'type': temp_u['type'], 'description': temp_i['description'], 'collection': temp_i['collection']}]}}
if ('aliases' in temp_s):
temp_dict_[temp_s['name']].update({'aliases': ', '.join(temp_s['aliases'])})
temp_d[temp_s['type']].update(temp_dict_)
else:
temp_dict_ = {}
if ('kill_chain_phases' in temp_u):
temp_dict_ = {temp_s['name']: {'id': temp_xs, 'description': temp_s['description'], 'techniques': [{'id': temp_xx, 'name': temp_u['name'], 'type': temp_u['type'], 'description': temp_i['description'], 'collection': temp_i['collection'], 'kill_chain_phases': ', '.join([_['phase_name'] for _ in temp_u['kill_chain_phases']])}]}}
else:
temp_dict_ = {temp_s['name']: {'id': temp_xs, 'description': temp_s['description'], 'techniques': [{'id': temp_xx, 'name': temp_u['name'], 'type': temp_u['type'], 'description': temp_i['description'], 'collection': temp_i['collection']}]}}
if ('aliases' in temp_s):
temp_dict_[temp_s['name']].update({'aliases': ', '.join(temp_s['aliases'])})
temp_d.update({temp_s['type'].lower().rstrip(): temp_dict_})
return temp_d
def findapt(self, apt, _print=False):
temp_x = self.usedict['intrusion-set'][apt]
temp_c = self.countitem(temp_x, 'collection')
if _print:
print(dumps([temp_x, temp_c], indent=4, sort_keys=True))
return [temp_x, temp_c]
def listapts(self, _print=False):
temp_x = list(self.usedict['intrusion-set'])
if _print:
print(dumps(temp_x, indent=4, sort_keys=True))
return temp_x
def findmalware(self, malware, _print=False):
if (malware in self.usedict['malware']):
temp_x = self.usedict['malware'][malware]
if _print:
print(dumps(temp_x, indent=4, sort_keys=True))
else:
return temp_x
return None
def findtool(self, tool, _print=False):
if (tool in self.usedict['tool']):
temp_x = self.usedict['tool'][tool]
if _print:
print(dumps(temp_x, indent=4, sort_keys=True))
else:
return temp_x
return None
def findword(self, word, _print=False):
temp_x = {}
pattern = rcompile(('(^.*%s.*$)' % word), (8 | 2))
temp_x['enterpriseattack'] = list(set(findall(pattern, self.enterprise)))
temp_x['preattack'] = list(set(findall(pattern, self.preattack)))
if _print:
print(dumps(temp_x, indent=4, sort_keys=True))
return temp_x
def random_color(self):
rand = (lambda : randint(100, 200))
return ('#%02X%02X%02X' % (rand(), rand(), rand()))
def gen_apt_graph(self):
for (apt, value) in self.usedict['intrusion-set'].items():
if (apt != value['aliases']):
search = '{} - {}'.format(value['id'], value['aliases'])
else:
search = '{}'.format(apt)
body = ((('<b>Aliases:</b> ' + value['aliases']) + '<br><hr><b>Description:</b> ') + value['description'])
self.graph.add_node(apt, _set={'header': value['id'], 'group': ((len(value) % 5) + 1), 'width': 10, 'color': '#fce903', 'body': body}, search=search)
if ('techniques' in value):
for technique in value['techniques']:
if ((technique['type'] != 'malware') and (technique['type'] != 'tool')):
if ('kill_chain_phases' in technique):
body = ('<b>Tactics:</b> ' if (', ' in technique['kill_chain_phases']) else '<b>Tactic:</b> ')
body += ((technique['kill_chain_phases'] + '<br><hr><b>Description: </b>') + technique['description'])
self.graph.add_node(technique['name'], _set={'header': technique['id'], 'body': body}, search='{} - {}'.format(technique['id'], technique['name']))
else:
self.graph.add_node(technique['name'], _set={'header': technique['id'], 'body': technique['description']}, search='{} - {}'.format(technique['id'], technique['name']))
self.graph.add_edge(apt, technique['name'], {'width': 1})
else:
color = ''
if (technique['type'] == 'malware'):
color = '#ff3232'
else:
color = '#ff1a8c'
self.graph.add_node(technique['name'], _set={'header': self.usedict[technique['type']][technique['name']]['id'], 'body': self.usedict[technique['type']][technique['name']]['description'], 'color': color}, search='{} - {}'.format(technique['id'], technique['name']))
self.graph.add_edge(apt, technique['name'], {'width': 3})
for _technique in self.usedict[technique['type']][technique['name']]['techniques']:
if ('kill_chain_phases' in _technique):
body = ('<b>Tactics:</b> ' if (', ' in _technique['kill_chain_phases']) else '<b>Tactic:</b> ')
body += ((_technique['kill_chain_phases'] + '<br><hr><b>Description: </b>') + _technique['description'])
self.graph.add_node(_technique['name'], _set={'header': _technique['id'], 'body': body}, search='{} - {}'.format(_technique['id'], _technique['name']))
else:
self.graph.add_node(_technique['name'], _set={'header': _technique['id'], 'body': _technique['description']}, search='{} - {}'.format(_technique['id'], _technique['name']))
self.graph.add_edge(technique['name'], _technique['name'], {'width': 1}) |
def get_files(**kwargs):
metadata_directory = kwargs.get('metadata_directory', '')
files = []
for f in get_template_files(**kwargs):
if (str(f.path) == 'LICENSE.txt'):
files.append(File(Path(metadata_directory, 'licenses', f.path), f.contents))
if (f.path.parts[0] not in {kwargs['package_name'], 'tests'}):
continue
if (f.path == Path('tests', '__init__.py')):
f.path = Path('tests', 'foo.py')
files.append(f)
files.extend((File(Path(metadata_directory, 'WHEEL'), f'''Wheel-Version: 1.0
Generator: hatchling {__version__}
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
'''), File(Path(metadata_directory, 'METADATA'), f'''Metadata-Version: {DEFAULT_METADATA_VERSION}
Name: {kwargs['project_name']}
Version: 0.0.1
License-File: LICENSE.txt
''')))
record_file = File(Path(metadata_directory, 'RECORD'), '')
update_record_file_contents(record_file, files)
files.append(record_file)
return files |
def get_beam_indices_of_fraction_group(dicom_dataset, fraction_group_number):
(beam_numbers, _) = get_referenced_beam_sequence(dicom_dataset, fraction_group_number)
beam_sequence_numbers = [beam_sequence.BeamNumber for beam_sequence in dicom_dataset.BeamSequence]
beam_indexes = [beam_sequence_numbers.index(beam_number) for beam_number in beam_numbers]
return beam_indexes |
def _load_library():
osp = os.path
if sys.platform.startswith('darwin'):
libfile = osp.abspath(osp.join(osp.dirname(__file__), '../../vendor/mujoco/libglfw.3.dylib'))
elif sys.platform.startswith('linux'):
libfile = osp.abspath(osp.join(osp.dirname(__file__), '../../vendor/mujoco/libglfw.so.3'))
elif sys.platform.startswith('win'):
libfile = osp.abspath(osp.join(osp.dirname(__file__), '../../vendor/mujoco/glfw3.dll'))
else:
raise RuntimeError(('unrecognized platform %s' % sys.platform))
return ctypes.CDLL(libfile) |
def AddExtraLayers(net, use_batchnorm=True):
use_relu = True
from_layer = net.keys()[(- 1)]
out_layer = 'conv6_1'
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 1, 0, 1)
from_layer = out_layer
out_layer = 'conv6_2'
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 512, 3, 1, 2)
for i in xrange(7, 9):
from_layer = out_layer
out_layer = 'conv{}_1'.format(i)
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1)
from_layer = out_layer
out_layer = 'conv{}_2'.format(i)
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 2)
name = net.keys()[(- 1)]
net.pool6 = L.Pooling(net[name], pool=P.Pooling.AVE, global_pooling=True)
return net |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
dataset = build_dataset(cfg.data.val)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False)
model = build_posenet(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDataParallel(model, device_ids=[0])
num_warmup = 5
pure_inf_time = 0
for (i, data) in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, **data)
torch.cuda.synchronize()
elapsed = (time.perf_counter() - start_time)
if (i >= num_warmup):
pure_inf_time += elapsed
if (((i + 1) % args.log_interval) == 0):
its = (((i + 1) - num_warmup) / pure_inf_time)
print(f'Done item [{(i + 1):<3}], {its:.2f} items / s')
print(f'Overall average: {its:.2f} items / s')
print(f'Total time: {pure_inf_time:.2f} s') |
def contractreceivechannelclosed_from_event(canonical_identifier: CanonicalIdentifier, event: DecodedEvent) -> ContractReceiveChannelClosed:
data = event.event_data
args = data['args']
return ContractReceiveChannelClosed(transaction_from=args['closing_participant'], canonical_identifier=canonical_identifier, transaction_hash=event.transaction_hash, block_number=event.block_number, block_hash=event.block_hash) |
class ASFInfo(StreamInfo):
length = 0.0
sample_rate = 0
bitrate = 0
channels = 0
codec_type = u''
codec_name = u''
codec_description = u''
def __init__(self):
self.length = 0.0
self.sample_rate = 0
self.bitrate = 0
self.channels = 0
self.codec_type = u''
self.codec_name = u''
self.codec_description = u''
def pprint(self):
s = (u'ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds' % ((self.codec_type or self.codec_name or u'???'), self.bitrate, self.sample_rate, self.channels, self.length))
return s |
def remap(raw_file, remap_dict_file, remap_file):
with open(remap_dict_file, 'rb') as f:
uid_remap_dict = pkl.load(f)
iid_remap_dict = pkl.load(f)
cid_remap_dict = pkl.load(f)
sid_remap_dict = pkl.load(f)
bid_remap_dict = pkl.load(f)
aid_remap_dict = pkl.load(f)
gid_remap_dict = pkl.load(f)
newlines = []
with open(raw_file, 'r') as f:
lines = f.readlines()[1:]
for line in lines:
(uid, iid, cid, sid, bid, date, _, aid, gid) = line[:(- 1)].split(',')
uid = uid_remap_dict[uid]
iid = iid_remap_dict[iid]
cid = cid_remap_dict[cid]
sid = sid_remap_dict[sid]
bid = bid_remap_dict[bid]
aid = aid_remap_dict[aid]
gid = gid_remap_dict[gid]
month = int(date[:2])
day = int(date[2:])
sea_id = str((get_season(month) + ORI_FEATSIZE))
ud_id = str(((get_ud(day) + ORI_FEATSIZE) + 4))
date = ('2015' + date)
time_stamp = str(int(time.mktime(datetime.datetime.strptime(date, '%Y%m%d').timetuple())))
newline = (','.join([uid, aid, gid, iid, cid, sid, bid, sea_id, ud_id, time_stamp]) + '\n')
newlines.append(newline)
with open(remap_file, 'w') as f:
f.writelines(newlines) |
class MsgContainer(TLObject):
ID =
__slots__ = ['messages']
QUALNAME = 'MsgContainer'
def __init__(self, messages: List[Message]):
self.messages = messages
def read(data: BytesIO, *args: Any) -> 'MsgContainer':
count = Int.read(data)
return MsgContainer([Message.read(data) for _ in range(count)])
def write(self, *args: Any) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
count = len(self.messages)
b.write(Int(count))
for message in self.messages:
b.write(message.write())
return b.getvalue() |
def exportMultiBuy(fit, options, callback):
itemAmounts = {}
for module in fit.modules:
if module.item:
if module.isMutated:
continue
_addItem(itemAmounts, module.item)
if (module.charge and options[PortMultiBuyOptions.LOADED_CHARGES]):
_addItem(itemAmounts, module.charge, module.numCharges)
for drone in fit.drones:
_addItem(itemAmounts, drone.item, drone.amount)
for fighter in fit.fighters:
_addItem(itemAmounts, fighter.item, fighter.amount)
if options[PortMultiBuyOptions.CARGO]:
for cargo in fit.cargo:
_addItem(itemAmounts, cargo.item, cargo.amount)
if options[PortMultiBuyOptions.IMPLANTS]:
for implant in fit.implants:
_addItem(itemAmounts, implant.item)
if options[PortMultiBuyOptions.BOOSTERS]:
for booster in fit.boosters:
_addItem(itemAmounts, booster.item)
if options[PortMultiBuyOptions.OPTIMIZE_PRICES]:
def formatCheaperExportCb(replacementsCheaper):
updatedAmounts = {}
for (item, itemAmount) in itemAmounts.items():
_addItem(updatedAmounts, replacementsCheaper.get(item, item), itemAmount)
string = _prepareString(fit.ship.item, updatedAmounts)
callback(string)
priceSvc = sPrc.getInstance()
priceSvc.findCheaperReplacements(itemAmounts, formatCheaperExportCb)
else:
string = _prepareString(fit.ship.item, itemAmounts)
if callback:
callback(string)
else:
return string |
class CacheFTPHandler(FTPHandler):
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = (user, host, port, '/'.join(dirs), timeout)
if (key in self.cache):
self.timeout[key] = (time.time() + self.delay)
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = (time.time() + self.delay)
self.check_cache()
return self.cache[key]
def check_cache(self):
t = time.time()
if (self.soonest <= t):
for (k, v) in iteritems(self.timeout):
if (v < t):
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
if (len(self.cache) == self.max_conns):
for (k, v) in iteritems(self.timeout):
if (v == self.soonest):
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values()) |
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if (int(data) is not data):
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if (data.imag == 0.0):
data = (u'%r' % data.real)
elif (data.real == 0.0):
data = (u'%rj' % data.imag)
elif (data.imag > 0):
data = (u'%r+%rj' % (data.real, data.imag))
else:
data = (u'%r%rj' % (data.real, data.imag))
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = (u'%s.%s' % (data.__module__, data.__name__))
return self.represent_scalar((u'tag:yaml.org,2002:python/name:' + name), u'')
def represent_module(self, data):
return self.represent_scalar((u'tag:yaml.org,2002:python/module:' + data.__name__), u'')
def represent_instance(self, data):
cls = data.__class__
class_name = (u'%s.%s' % (cls.__module__, cls.__name__))
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if ((args is None) and isinstance(state, dict)):
return self.represent_mapping((u'tag:yaml.org,2002:python/object:' + class_name), state)
if (isinstance(state, dict) and (not state)):
return self.represent_sequence((u'tag:yaml.org,2002:python/object/new:' + class_name), args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping((u'tag:yaml.org,2002:python/object/new:' + class_name), value)
def represent_object(self, data):
cls = type(data)
if (cls in copy_reg.dispatch_table):
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError(('cannot represent object: %r' % data))
reduce = (list(reduce) + ([None] * 5))[:5]
(function, args, state, listitems, dictitems) = reduce
args = list(args)
if (state is None):
state = {}
if (listitems is not None):
listitems = list(listitems)
if (dictitems is not None):
dictitems = dict(dictitems)
if (function.__name__ == '__newobj__'):
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = (u'%s.%s' % (function.__module__, function.__name__))
if ((not args) and (not listitems) and (not dictitems) and isinstance(state, dict) and newobj):
return self.represent_mapping((u'tag:yaml.org,2002:python/object:' + function_name), state)
if ((not listitems) and (not dictitems) and isinstance(state, dict) and (not state)):
return self.represent_sequence((tag + function_name), args)
value = {}
if args:
value['args'] = args
if (state or (not isinstance(state, dict))):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping((tag + function_name), value) |
def test_is_open_on_minute(benchmark):
xhkg = get_calendar('XHKG')
timestamps = [pd.Timestamp('2019-10-11 01:20:00', tz=UTC), pd.Timestamp('2019-10-11 01:30:00', tz=UTC), pd.Timestamp('2019-10-11 01:31:00', tz=UTC), pd.Timestamp('2019-10-11 04:31:00', tz=UTC), pd.Timestamp('2019-10-11 08:00:00', tz=UTC), pd.Timestamp('2019-10-11 08:01:00', tz=UTC)]
benchmark(is_open_on_minute_bench, xhkg, timestamps) |
def make_dataset(path, impl, skip_warmup=False):
if (not IndexedDataset.exists(path)):
print(f'Dataset does not exist: {path}')
print('Path should be a basename that both .idx and .bin can be appended to get full filenames.')
return None
if (impl == 'infer'):
impl = infer_dataset_impl(path)
if ((impl == 'lazy') and IndexedDataset.exists(path)):
return IndexedDataset(path)
elif ((impl == 'cached') and IndexedDataset.exists(path)):
return IndexedCachedDataset(path)
elif ((impl == 'mmap') and MMapIndexedDataset.exists(path)):
return MMapIndexedDataset(path, skip_warmup)
print(f'Unknown dataset implementation: {impl}')
return None |
class UperNetPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [nn.AdaptiveAvgPool2d(pool_scale), UperNetConvModule(in_channels, channels, kernel_size=1)]
for (i, layer) in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state |
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
elif ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3) |
def dL3_hat(mu, C_hat, r, m, n):
vals = numpy.zeros(2)
for i in range(m):
numer0 = (C_hat[i][0] - C_hat[i][2])
numer1 = (C_hat[i][1] - C_hat[i][2])
denom = ((((C_hat[i][0] - C_hat[i][(n - 1)]) * mu[0]) + ((C_hat[i][1] - C_hat[i][(n - 1)]) * mu[1])) + C_hat[i][2])
vals[0] += (r[i] * (numer0 / denom))
vals[1] += (r[i] * (numer1 / denom))
return vals |
class TestInhibitAnyPolicyExtension():
def test_inhibit_any_policy(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'inhibit_any_policy_5.pem'), x509.load_pem_x509_certificate)
iap = cert.extensions.get_extension_for_class(x509.InhibitAnyPolicy).value
assert (iap.skip_certs == 5) |
def get_hyperplanes(p1, p2, wrap):
d = len(wrap)
p2_list = [p2]
wrap_amount = (2.0 * np.pi)
for i in range(d):
if wrap[i]:
list_lo = []
list_hi = []
for p_ in p2_list:
lo = np.copy(p_)
lo[i] -= wrap_amount
list_lo.append(lo)
hi = np.copy(p_)
hi[i] += wrap_amount
list_hi.append(hi)
p2_list += (list_lo + list_hi)
result = []
for p_ in p2_list:
result = (result + get_hyperplane(p1, p_))
return result |
def remap_ids(w, old2new, id_order=[], **kwargs):
if (not isinstance(w, W)):
raise Exception('w must be a spatial weights object')
new_neigh = {}
new_weights = {}
for (key, value) in list(w.neighbors.items()):
new_values = [old2new[i] for i in value]
new_key = old2new[key]
new_neigh[new_key] = new_values
new_weights[new_key] = copy.copy(w.weights[key])
if id_order:
return W(new_neigh, new_weights, id_order, **kwargs)
elif w.id_order:
id_order = [old2new[i] for i in w.id_order]
return W(new_neigh, new_weights, id_order, **kwargs)
else:
return W(new_neigh, new_weights, **kwargs) |
class F39_Network(F27_Network):
removedKeywords = F27_Network.removedKeywords
removedAttrs = F27_Network.removedAttrs
def _getParser(self):
op = F27_Network._getParser(self)
op.add_argument('--ipv4-dns-search', default=None, version=F39, dest='ipv4_dns_search', help='\n Use this option to set IPv4 search domains. For example: ``--ipv4-dns-search domain1.example.com,domain2.example.com``\n\n Requires ``--device`` to be specified.')
op.add_argument('--ipv6-dns-search', default=None, version=F39, dest='ipv6_dns_search', help='\n Use this option to set IPv6 search domains. For example: ``--ipv6-dns-search domain1.example.com,domain2.example.com``\n\n Requires ``--device`` to be specified.')
op.add_argument('--ipv4-ignore-auto-dns', action='store_true', version=F39, dest='ipv4_ignore_auto_dns', help='\n Use this option to ignore IPv4 automatic DNS.\n\n Requires ``--device`` to be specified.')
op.add_argument('--ipv6-ignore-auto-dns', action='store_true', version=F39, dest='ipv6_ignore_auto_dns', help='\n Use this option to ignore IPv6 automatic DNS.\n\n Requires ``--device`` to be specified.')
return op
def parse(self, args):
retval = F27_Network.parse(self, args)
if (not retval.device):
if retval.ipv4_dns_search:
msg = _('Option --ipv4-dns-search requires --device to be specified')
raise KickstartParseError(msg, lineno=self.lineno)
if retval.ipv6_dns_search:
msg = _('Option --ipv6-dns-search requires --device to be specified')
raise KickstartParseError(msg, lineno=self.lineno)
if retval.ipv4_ignore_auto_dns:
msg = _('Option --ipv4-ignore-auto-dns requires --device to be specified')
raise KickstartParseError(msg, lineno=self.lineno)
if retval.ipv6_ignore_auto_dns:
msg = _('Option --ipv6-ignore-auto-dns requires --device to be specified')
raise KickstartParseError(msg, lineno=self.lineno)
return retval |
def _find_dists(dists: List[str]) -> List[str]:
uploads = []
for filename in dists:
if os.path.exists(filename):
uploads.append(filename)
continue
files = glob.glob(filename)
if (not files):
raise exceptions.InvalidDistribution(("Cannot find file (or expand pattern): '%s'" % filename))
uploads.extend(files)
return _group_wheel_files_first(uploads) |
class Axicon(DOE):
def __init__(self, period, radius=None, aberration=None):
global bd
from ..util.backend_functions import backend as bd
self.period = period
self.radius = radius
def get_transmittance(self, xx, yy, ):
t = 1
if (self.radius != None):
t = bd.where((((xx ** 2) + (yy ** 2)) < (self.radius ** 2)), t, bd.zeros_like(xx))
r = bd.sqrt(((xx ** 2) + (yy ** 2)))
phase_shift = ((((- 2) * bd.pi) * r) / self.period)
t = (t * bd.exp((1j * phase_shift)))
return t |
def cache_intermediate_datasets(cached_dataset, cache_on_cpu, model, module_name, forward_fn, path=None):
cached_data = []
iterator = iter(cached_dataset)
for idx in range(len(cached_dataset)):
def fn(_, inputs):
inputs = [*inputs]
if cache_on_cpu:
cached_data.append([inp.cpu() for inp in inputs])
else:
save_to_cache(inputs, path, idx)
raise StopForwardException
handle = get_named_module(model, module_name).register_forward_pre_hook(fn)
data = next(iterator)
try:
with in_eval_mode(model), torch.no_grad():
_ = forward_fn(model, data)
except StopForwardException:
pass
handle.remove()
return cached_data |
.parametrize('src_order', [15, 20, 25, 30])
.parametrize('src_gamma', [(- 1.0), (- 0.5), 0.0])
.parametrize('dst_order', [15, 20, 25, 30])
.parametrize('dst_gamma', [(- 1.0), (- 0.5), 0.0])
def test_gc2gc(src_order, src_gamma, dst_order, dst_gamma):
np.random.seed(98765)
src = np.random.rand((src_order + 1))
dst = pysptk.gc2gc(src, src_gamma, dst_order, dst_gamma)
assert np.all(np.isfinite(dst)) |
.parametrize('has_output_dir', [False, True])
def test_on_output_file_button_exists(skip_qtbot, tmp_path, mocker, has_output_dir):
mock_prompt = mocker.patch('randovania.gui.lib.common_qt_lib.prompt_user_for_output_file', autospec=True)
if has_output_dir:
output_directory = tmp_path.joinpath('output_path')
expected_default_name = str(tmp_path.joinpath('output_path', 'SM Randomizer - MyHash'))
output_directory.mkdir()
else:
output_directory = None
expected_default_name = 'SM Randomizer - MyHash'
options = MagicMock()
options.options_for_game.return_value = SuperMetroidPerGameOptions(cosmetic_patches=SuperMetroidCosmeticPatches.default(), output_directory=output_directory, output_format='smc')
window = SuperMetroidGameExportDialog(options, {}, 'MyHash', True, [])
mock_prompt.return_value = tmp_path.joinpath('foo', 'game.smc')
skip_qtbot.mouseClick(window.output_file_button, QtCore.Qt.MouseButton.LeftButton)
mock_prompt.assert_called_once_with(window, (expected_default_name + '.smc'), window.valid_output_file_types)
assert (window.output_file_edit.text() == str(tmp_path.joinpath('foo', 'game.smc')))
assert tmp_path.joinpath('foo').is_dir() |
def test_git_clone_fails_for_non_existent_revision(source_url: str) -> None:
revision = sha1(uuid.uuid4().bytes).hexdigest()
with pytest.raises(PoetryConsoleError) as e:
Git.clone(url=source_url, revision=revision)
assert (f"Failed to clone {source_url} at '{revision}'" in str(e.value)) |
(scope='session')
def gerb_l2_hr_h5_dummy_file(tmp_path_factory):
filename = (tmp_path_factory.mktemp('data') / FNAME)
with h5py.File(filename, 'w') as fid:
fid.create_group('/Angles')
fid['/Angles/Relative Azimuth'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Angles/Relative Azimuth'].attrs['Quantisation Factor'] = np.array(0.1, dtype='float64')
fid['/Angles/Solar Zenith'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Angles/Solar Zenith'].attrs['Quantisation Factor'] = np.array(0.1, dtype='float64')
write_h5_null_string_att(fid['/Angles/Relative Azimuth'].id, 'Unit', 'Degree')
fid['/Angles/Viewing Azimuth'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Angles/Viewing Azimuth'].attrs['Quantisation Factor'] = np.array(0.1, dtype='float64')
write_h5_null_string_att(fid['/Angles/Viewing Azimuth'].id, 'Unit', 'Degree')
fid['/Angles/Viewing Zenith'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Angles/Viewing Zenith'].attrs['Quantisation Factor'] = np.array(0.1, dtype='float64')
write_h5_null_string_att(fid['/Angles/Viewing Zenith'].id, 'Unit', 'Degree')
fid.create_group('/GERB')
dt = h5py.h5t.TypeID.copy(h5py.h5t.C_S1)
dt.set_size(3)
dt.set_strpad(h5py.h5t.STR_NULLTERM)
write_h5_null_string_att(fid['/GERB'].id, 'Instrument Identifier', 'G4')
fid.create_group('/GGSPS')
fid['/GGSPS'].attrs['L1.5 NANRG Product Version'] = np.array((- 1), dtype='int32')
fid.create_group('/Geolocation')
write_h5_null_string_att(fid['/Geolocation'].id, 'Geolocation File Name', 'G4_SEV4_L20_HR_GEO__181500_V010.hdf')
fid['/Geolocation'].attrs['Nominal Satellite Longitude (degrees)'] = np.array(0.0, dtype='float64')
fid.create_group('/Imager')
fid['/Imager'].attrs['Instrument Identifier'] = np.array(4, dtype='int32')
write_h5_null_string_att(fid['/Imager'].id, 'Type', 'SEVIRI')
fid.create_group('/RMIB')
fid.create_group('/Radiometry')
fid['/Radiometry'].attrs['SEVIRI Radiance Definition Flag'] = np.array(2, dtype='int32')
fid['/Radiometry/A Values (per GERB detector cell)'] = np.ones(shape=(256,), dtype=np.dtype('>f8'))
fid['/Radiometry/C Values (per GERB detector cell)'] = np.ones(shape=(256,), dtype=np.dtype('>f8'))
fid['/Radiometry/Longwave Correction'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Longwave Correction'].attrs['Offset'] = np.array(1.0, dtype='float64')
fid['/Radiometry/Longwave Correction'].attrs['Quantisation Factor'] = np.array(0.005, dtype='float64')
fid['/Radiometry/Shortwave Correction'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Shortwave Correction'].attrs['Offset'] = np.array(1.0, dtype='float64')
fid['/Radiometry/Shortwave Correction'].attrs['Quantisation Factor'] = np.array(0.005, dtype='float64')
fid['/Radiometry/Solar Flux'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Solar Flux'].attrs['Quantisation Factor'] = np.array(0.25, dtype='float64')
write_h5_null_string_att(fid['/Radiometry/Solar Flux'].id, 'Unit', 'Watt per square meter')
fid['/Radiometry/Solar Radiance'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Solar Radiance'].attrs['Quantisation Factor'] = np.array(0.05, dtype='float64')
write_h5_null_string_att(fid['/Radiometry/Solar Radiance'].id, 'Unit', 'Watt per square meter per steradian')
fid['/Radiometry/Thermal Flux'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Thermal Flux'].attrs['Quantisation Factor'] = np.array(0.25, dtype='float64')
write_h5_null_string_att(fid['/Radiometry/Thermal Flux'].id, 'Unit', 'Watt per square meter')
fid['/Radiometry/Thermal Radiance'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Radiometry/Thermal Radiance'].attrs['Quantisation Factor'] = np.array(0.05, dtype='float64')
write_h5_null_string_att(fid['/Radiometry/Thermal Radiance'].id, 'Unit', 'Watt per square meter per steradian')
fid.create_group('/Scene Identification')
write_h5_null_string_att(fid['/Scene Identification'].id, 'Solar Angular Dependency Models Set Version', 'CERES_TRMM.1')
write_h5_null_string_att(fid['/Scene Identification'].id, 'Thermal Angular Dependency Models Set Version', 'RMIB.3')
fid['/Scene Identification/Cloud Cover'] = np.ones(shape=(1237, 1237), dtype=np.dtype('uint8'))
fid['/Scene Identification/Cloud Cover'].attrs['Quantisation Factor'] = np.array(0.01, dtype='float64')
write_h5_null_string_att(fid['/Scene Identification/Cloud Cover'].id, 'Unit', 'Percent')
fid['/Scene Identification/Cloud Optical Depth (logarithm)'] = np.ones(shape=(1237, 1237), dtype=np.dtype('>i2'))
fid['/Scene Identification/Cloud Optical Depth (logarithm)'].attrs['Quantisation Factor'] = np.array(0.00025, dtype='float64')
fid['/Scene Identification/Cloud Phase'] = np.ones(shape=(1237, 1237), dtype=np.dtype('uint8'))
fid['/Scene Identification/Cloud Phase'].attrs['Quantisation Factor'] = np.array(0.01, dtype='float64')
write_h5_null_string_att(fid['/Scene Identification/Cloud Phase'].id, 'Unit', 'Percent (Water=0%,Mixed,Ice=100%)')
fid.create_group('/Times')
fid['/Times/Time (per row)'] = np.ones(shape=(1237,), dtype=np.dtype('|S22'))
return filename |
_tests('aes_cbc_pkcs5_test.json')
def test_aes_cbc_pkcs5(backend, wycheproof):
key = binascii.unhexlify(wycheproof.testcase['key'])
iv = binascii.unhexlify(wycheproof.testcase['iv'])
msg = binascii.unhexlify(wycheproof.testcase['msg'])
ct = binascii.unhexlify(wycheproof.testcase['ct'])
padder = padding.PKCS7(128).padder()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend)
enc = cipher.encryptor()
computed_ct = (enc.update((padder.update(msg) + padder.finalize())) + enc.finalize())
dec = cipher.decryptor()
padded_msg = (dec.update(ct) + dec.finalize())
unpadder = padding.PKCS7(128).unpadder()
if (wycheproof.valid or wycheproof.acceptable):
assert (computed_ct == ct)
computed_msg = (unpadder.update(padded_msg) + unpadder.finalize())
assert (computed_msg == msg)
else:
assert (computed_ct != ct)
with pytest.raises(ValueError):
(unpadder.update(padded_msg) + unpadder.finalize()) |
class HP11713A(Instrument):
ATTENUATOR_X = {}
ATTENUATOR_Y = {}
channels = Instrument.MultiChannelCreator(SwitchDriverChannel, list(range(0, 9)))
def __init__(self, adapter, name='Hewlett-Packard HP11713A', **kwargs):
super().__init__(adapter, name, includeSCPI=False, send_end=True, **kwargs)
def attenuation_x(self, attenuation):
rounding = 0
if (list(self.ATTENUATOR_X.keys())[1] == 10):
rounding = (- 1)
(self.ch_1.enabled, self.ch_2.enabled, self.ch_3.enabled, self.ch_4.enabled) = self.ATTENUATOR_X[int(round(attenuation, rounding))]
def attenuation_y(self, attenuation):
rounding = 0
if (list(self.ATTENUATOR_Y.keys())[1] == 10):
rounding = (- 1)
(self.ch_5.enabled, self.ch_6.enabled, self.ch_7.enabled, self.ch_8.enabled) = self.ATTENUATOR_Y[int(round(attenuation, rounding))]
def deactivate_all(self):
self.write('B') |
def pretix_quotas():
return {'count': 2, 'next': None, 'previous': None, 'results': [{'id': 1, 'name': 'Ticket Quota', 'size': 200, 'available_number': 118, 'items': [1], 'variations': [], 'subevent': None, 'close_when_sold_out': False, 'closed': False}, {'id': 2, 'name': 'T-shirt Quota', 'size': 200, 'available_number': 150, 'items': [2], 'variations': [1, 2], 'subevent': None, 'close_when_sold_out': False, 'closed': False}]} |
class TasksRendererMixin():
def render_task(self, xml, task):
if (task['uri'] not in self.uris):
self.uris.add(task['uri'])
xml.startElement('task', {'dc:uri': task['uri']})
self.render_text_element(xml, 'uri_prefix', {}, task['uri_prefix'])
self.render_text_element(xml, 'uri_path', {}, task['uri_path'])
self.render_text_element(xml, 'dc:comment', {}, task['comment'])
self.render_text_element(xml, 'order', {}, task['order'])
for (lang_code, lang_string, lang_field) in get_languages():
self.render_text_element(xml, 'title', {'lang': lang_code}, task[('title_%s' % lang_code)])
self.render_text_element(xml, 'text', {'lang': lang_code}, task[('text_%s' % lang_code)])
self.render_text_element(xml, 'start_attribute', {'dc:uri': task['start_attribute']}, None)
self.render_text_element(xml, 'end_attribute', {'dc:uri': task['end_attribute']}, None)
self.render_text_element(xml, 'days_before', {}, task['days_before'])
self.render_text_element(xml, 'days_after', {}, task['days_after'])
xml.startElement('conditions', {})
if (('conditions' in task) and task['conditions']):
for condition in task['conditions']:
self.render_text_element(xml, 'condition', {'dc:uri': condition['uri']}, None)
xml.endElement('conditions')
xml.startElement('catalogs', {})
if (('catalogs' in task) and task['catalogs']):
for catalog in task['catalogs']:
self.render_text_element(xml, 'catalog', {'dc:uri': catalog}, None)
xml.endElement('catalogs')
xml.endElement('task')
if self.context.get('conditions'):
for condition in task['conditions']:
self.render_condition(xml, condition) |
def inspect_node(mixed, *, _partial=None):
if isconfigurabletype(mixed, strict=True):
(inst, typ) = (None, mixed)
elif isconfigurable(mixed):
(inst, typ) = (mixed, type(mixed))
elif hasattr(mixed, 'func'):
return inspect_node(mixed.func, _partial=(mixed.args, mixed.keywords))
else:
raise TypeError('Not a Configurable, nor a Configurable instance and not even a partially configured Configurable. Check your inputs.')
return ConfigurableInspection(typ, inst, list(typ.__options__), list(typ.__processors__), _partial) |
class TweetEncoder(nn.Module):
def __init__(self, output_dim=512):
super().__init__()
self._hidden_size = 768
self.bertweet = AutoModel.from_pretrained('vinai/bertweet-base')
self.linear_transform = nn.Linear(self._hidden_size, output_dim)
nn.init.xavier_normal_(self.linear_transform.weight)
def forward(self, tweet_input_ids):
features = self.bertweet(tweet_input_ids)[1]
return self.linear_transform(features) |
def aggregate_text_similarities(result_dict):
all_averages = [result_dict[prompt]['text_similarities'] for prompt in result_dict]
all_averages = np.array(all_averages).flatten()
total_average = np.average(all_averages)
total_std = np.std(all_averages)
return (total_average, total_std) |
def reduce_leaky_relu(_, op_tensor_tuple: Tuple[(Op, List[tf.Tensor])], _op_mask) -> (str, tf.Operation, tf.Operation):
name = ('reduced_' + op_tensor_tuple[0].dotted_name)
alpha = op_tensor_tuple[0].get_module().get_attr('alpha')
assert (alpha is not None)
new_tensor = tf.nn.leaky_relu(op_tensor_tuple[1][0], alpha=alpha, name=name)
module = new_tensor.op
return (name, new_tensor.op, module) |
def test_show_issue() -> None:
class RESTObject():
def __init__(self, manager: str, attrs: int) -> None:
...
class Mixin(RESTObject):
...
with pytest.raises(TypeError) as exc_info:
class Wrongv4Object(RESTObject, Mixin):
...
assert ('MRO' in exc_info.exconly())
class Correctv4Object(Mixin, RESTObject):
... |
def verify_build_token(token, aud, token_type, instance_keys):
try:
headers = jwt.get_unverified_header(token)
except jwtutil.InvalidTokenError as ite:
logger.error('Invalid token reason: %s', ite)
raise InvalidBuildTokenException(ite)
kid = headers.get('kid', None)
if (kid is None):
logger.error('Missing kid header on encoded JWT: %s', token)
raise InvalidBuildTokenException('Missing kid header')
public_key = instance_keys.get_service_key_public_key(kid)
if (public_key is None):
logger.error('Could not find requested service key %s with encoded JWT: %s', kid, token)
raise InvalidBuildTokenException('Unknown service key')
try:
payload = jwtutil.decode(token, public_key, verify=True, algorithms=[ALGORITHM], audience=aud, issuer=instance_keys.service_name, leeway=JWT_CLOCK_SKEW_SECONDS)
except jwtutil.InvalidTokenError as ite:
logger.error('Invalid token reason: %s', ite)
raise InvalidBuildTokenException(ite)
if ('sub' not in payload):
raise InvalidBuildTokenException('Missing sub field in JWT')
if (payload['sub'] != ANONYMOUS_SUB):
raise InvalidBuildTokenException('Wrong sub field in JWT')
if (('context' not in payload) or (not payload['context']['token_type']) or (not payload['context']['build_id']) or (not payload['context']['job_id']) or (not payload['context']['expiration'])):
raise InvalidBuildTokenException('Missing context field in JWT')
try:
jsonschema.validate(payload['context'], BUILD_TOKEN_CONTEXT_SCHEMA)
except jsonschema.ValidationError:
raise InvalidBuildTokenException('Unable to validate build token context schema: malformed context')
if (payload['context']['token_type'] != token_type):
raise InvalidBuildTokenException(('Build token type in JWT does not match expected type: %s' % token_type))
return payload |
class VolSDFTrainRunner():
def __init__(self, **kwargs):
torch.set_default_dtype(torch.float32)
torch.set_num_threads(1)
f = open(kwargs['conf'])
conf_text = f.read()
conf_text = conf_text.replace('SCAN_ID', str(kwargs['scan_id']))
conf_text = conf_text.replace('VIEW_NUM', str(kwargs['view_num']))
f.close()
self.conf = ConfigFactory.parse_string(conf_text)
self.conf_path = kwargs['conf']
self.batch_size = kwargs['batch_size']
self.nepochs = kwargs['nepochs']
self.exps_folder_name = kwargs['exps_folder_name']
self.GPU_INDEX = kwargs['gpu_index']
self.expname = (self.conf.get_string('train.expname') + kwargs['expname'])
scan_id = (kwargs['scan_id'] if (kwargs['scan_id'] != (- 1)) else self.conf.get_int('dataset.scan_id', default=(- 1)))
lustre_exp_path = self.conf.get_string('train.root_path')
if (kwargs['is_continue'] and (kwargs['timestamp'] == 'latest')):
if os.path.exists(os.path.join(lustre_exp_path, kwargs['exps_folder_name'], self.expname)):
timestamps = os.listdir(os.path.join(lustre_exp_path, kwargs['exps_folder_name'], self.expname))
if (len(timestamps) == 0):
is_continue = False
timestamp = None
else:
timestamp = sorted(timestamps)[(- 1)]
is_continue = True
else:
is_continue = False
timestamp = None
if (self.conf.get_string('train.assign_checkpnts_dir', default=None) is not None):
is_continue = True
timestamp = kwargs['timestamp']
print('loading model from timestamp={}'.format(timestamp))
else:
timestamp = kwargs['timestamp']
is_continue = kwargs['is_continue']
utils.mkdir_ifnotexists(os.path.join(lustre_exp_path, self.exps_folder_name))
self.expdir = os.path.join(lustre_exp_path, self.exps_folder_name, self.expname)
if ('/' in self.expname):
utils.mkdir_ifnotexists(os.path.join(lustre_exp_path, self.exps_folder_name, self.expname.split('/')[0]))
utils.mkdir_ifnotexists(self.expdir)
self.timestamp = '{:%Y_%m_%d_%H_%M_%S}'.format(datetime.now())
utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp))
self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots')
utils.mkdir_ifnotexists(self.plots_dir)
self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints')
utils.mkdir_ifnotexists(self.checkpoints_path)
self.model_params_subdir = 'ModelParameters'
self.optimizer_params_subdir = 'OptimizerParameters'
self.scheduler_params_subdir = 'SchedulerParameters'
utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir))
utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.optimizer_params_subdir))
utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.scheduler_params_subdir))
self.tb_path = os.path.join(self.expdir, self.timestamp, 'tb_path')
self.file_backup()
if (not (self.GPU_INDEX == 'ignore')):
os.environ['CUDA_VISIBLE_DEVICES'] = '{0}'.format(self.GPU_INDEX)
print('shell command : {0}'.format(' '.join(sys.argv)))
print('Loading data ...')
dataset_conf = self.conf.get_config('dataset')
if (kwargs['scan_id'] != (- 1)):
dataset_conf['scan_id'] = kwargs['scan_id']
self.train_dataset = utils.get_class(self.conf.get_string('train.dataset_class'))(conf=dataset_conf)
self.ds_len = len(self.train_dataset)
print('Finish loading data. Data-set size: {0}'.format(self.ds_len))
self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, collate_fn=self.train_dataset.collate_fn, num_workers=8)
self.plot_dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.conf.get_int('plot.plot_nimgs'), shuffle=True, collate_fn=self.train_dataset.collate_fn)
conf_model = self.conf.get_config('model')
self.model = utils.get_class(self.conf.get_string('train.model_class'))(conf=conf_model)
if torch.cuda.is_available():
self.model.cuda()
self.loss = utils.get_class(self.conf.get_string('train.loss_class'))(**self.conf.get_config('loss'))
self.lr = self.conf.get_float('train.learning_rate')
params_to_update = []
for (name, param) in self.model.named_parameters():
if (param.requires_grad == True):
params_to_update.append(param)
else:
print('param_not_to_update:', name)
self.optimizer = torch.optim.Adam(params_to_update, lr=self.lr)
decay_rate = self.conf.get_float('train.sched_decay_rate', default=0.1)
decay_steps = (self.nepochs * len(self.train_dataset))
self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, (decay_rate ** (1.0 / decay_steps)))
self.do_vis = kwargs['do_vis']
self.start_epoch = 0
if is_continue:
assign_checkpnts_dir = self.conf.get_string('train.assign_checkpnts_dir', default=None)
if ((assign_checkpnts_dir is not None) and os.path.exists(assign_checkpnts_dir)):
old_checkpnts_dir = assign_checkpnts_dir
else:
old_checkpnts_dir = os.path.join(self.expdir, timestamp, 'checkpoints')
saved_model_state = torch.load(os.path.join(old_checkpnts_dir, 'ModelParameters', (str(kwargs['checkpoint']) + '.pth')))
self.start_epoch = saved_model_state['epoch']
(missing_keys, unexpected_keys) = self.model.load_state_dict(saved_model_state['model_state_dict'], strict=False)
print('when loading model, missing keys:', missing_keys)
print('when loading model, unexpected keys:', unexpected_keys)
print('load model from: {} checkpoint={}, start_epoch={} '.format(old_checkpnts_dir, kwargs['checkpoint'], self.start_epoch))
if (len(missing_keys) == 0):
data = torch.load(os.path.join(old_checkpnts_dir, 'OptimizerParameters', (str(kwargs['checkpoint']) + '.pth')))
self.optimizer.load_state_dict(data['optimizer_state_dict'])
data = torch.load(os.path.join(old_checkpnts_dir, self.scheduler_params_subdir, (str(kwargs['checkpoint']) + '.pth')))
self.scheduler.load_state_dict(data['scheduler_state_dict'])
else:
print('when loading previous optimizer, params unmatched, use initial optimizer')
self.num_pixels = self.conf.get_int('train.num_pixels')
self.total_pixels = self.train_dataset.total_pixels
self.img_res = self.train_dataset.img_res
self.n_batches = len(self.train_dataloader)
self.plot_freq = self.conf.get_int('train.plot_freq')
self.checkpoint_freq = self.conf.get_int('train.checkpoint_freq', default=100)
self.split_n_pixels = self.conf.get_int('train.split_n_pixels', default=10000)
self.plot_conf = self.conf.get_config('plot')
def save_checkpoints(self, epoch):
torch.save({'epoch': epoch, 'model_state_dict': self.model.state_dict()}, os.path.join(self.checkpoints_path, self.model_params_subdir, (str(epoch) + '.pth')))
torch.save({'epoch': epoch, 'model_state_dict': self.model.state_dict()}, os.path.join(self.checkpoints_path, self.model_params_subdir, 'latest.pth'))
torch.save({'epoch': epoch, 'optimizer_state_dict': self.optimizer.state_dict()}, os.path.join(self.checkpoints_path, self.optimizer_params_subdir, (str(epoch) + '.pth')))
torch.save({'epoch': epoch, 'optimizer_state_dict': self.optimizer.state_dict()}, os.path.join(self.checkpoints_path, self.optimizer_params_subdir, 'latest.pth'))
torch.save({'epoch': epoch, 'scheduler_state_dict': self.scheduler.state_dict()}, os.path.join(self.checkpoints_path, self.scheduler_params_subdir, (str(epoch) + '.pth')))
torch.save({'epoch': epoch, 'scheduler_state_dict': self.scheduler.state_dict()}, os.path.join(self.checkpoints_path, self.scheduler_params_subdir, 'latest.pth'))
def file_backup(self):
dir_lis = self.conf['train.recording']
os.makedirs(os.path.join(self.expdir, self.timestamp, 'recording'), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.expdir, self.timestamp, 'recording', dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if (f_name[(- 3):] == '.py'):
copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
copyfile(self.conf_path, os.path.join(self.expdir, self.timestamp, 'recording', 'config.conf'))
def run(self):
torch.cuda.empty_cache()
writer = SummaryWriter(logdir=self.tb_path)
print('training...from epoch {0} to {1}'.format(self.start_epoch, self.nepochs))
for epoch in range(self.start_epoch, (self.nepochs + 1)):
if ((epoch % self.checkpoint_freq) == 0):
self.save_checkpoints(epoch)
if (self.do_vis and ((epoch % self.plot_freq) == 0)):
self.model.eval()
self.model.set_plot_template(self.plot_conf.get_bool('plot_template'))
self.model.set_plot_displacement(self.plot_conf.get_bool('plot_delta_sdf'))
self.train_dataset.change_sampling_idx((- 1))
(indices, model_input, ground_truth) = next(iter(self.plot_dataloader))
model_input['intrinsics'] = model_input['intrinsics'].cuda()
model_input['uv'] = model_input['uv'].cuda()
model_input['pose'] = model_input['pose'].cuda()
model_input['id'] = model_input['id'].cuda()
if ('FaceDataset' in self.conf.get_string('train.dataset_class')):
(pose, intrinsics, idx, rgb) = self.train_dataset.get_specific_item(people_order=model_input['id'].cpu()[0], cam_id=15)
model_input['intrinsics'] = intrinsics.cuda()
model_input['pose'] = pose.cuda()
model_input['id'] = idx.cuda()
ground_truth['rgb'] = rgb
print('input_id', model_input['id'].item())
if self.conf.get_bool('render.render_trainset', default=False):
print('render mesh and trainset views')
os.system('rm -rf {0} '.format(os.path.join(self.expdir, self.timestamp)))
poses = self.train_dataset.pose_all
intrinsics = self.train_dataset.intrinsics_all
render_path = self.conf.get_string('render.render_path', default='render_path')
post_fix = '{}x{}_{}'.format(self.img_res[0], self.img_res[1], render_path)
save_dir = os.path.join(self.plots_dir, 'vis', (post_fix + str(self.conf.get_int('dataset.scan_id'))))
os.makedirs(save_dir, exist_ok=True)
plt.plot_mesh(self.model, model_input['id'], save_dir, self.train_dataset.id_list[model_input['id']], resolution=self.plot_conf['resolution'], grid_boundary=self.plot_conf['grid_boundary'], level=self.plot_conf['level'], cam_scale=self.conf.get_float('dataset.cam_scale', default=1.0))
for i in tqdm(range(poses.shape[0])):
pose = poses[i]
k = intrinsics[i]
res = []
model_input['pose'] = pose.unsqueeze(0).cuda()
model_input['intrinsics'] = k.unsqueeze(0).cuda()
split = utils.split_input(model_input, self.total_pixels, n_pixels=self.split_n_pixels)
torch.cuda.empty_cache()
for s in tqdm(split):
out = self.model(s, embeddings=None)
res.append({'rgb_values': out['rgb_values'].detach(), 'normal_map': out['normal_map'].detach()})
(batch_size, num_samples, _) = ground_truth['rgb'].shape
model_outputs = utils.merge_output(res, self.total_pixels, batch_size)
rgbs = model_outputs['rgb_values'].reshape(batch_size, num_samples, 3)
normal_map = model_outputs['normal_map'].reshape(batch_size, num_samples, 3)
normal_map = ((normal_map + 1.0) / 2.0)
plt.plot_images(rgbs, None, save_dir, self.train_dataset.names[i].split('.')[0], 1, self.img_res, render_only=True)
plt.plot_normal_maps(normal_map, save_dir, self.train_dataset.names[i].split('.')[0], 1, self.img_res)
return
elif self.conf.get_bool('render.render_novel_view', default=False):
print('render novel view:')
os.system('rm -rf {0} '.format(os.path.join(self.expdir, self.timestamp)))
rgb_imgs = []
normal_maps = []
(num_views, render_path) = (self.conf.get_int('render.num_views'), self.conf.get_string('render.render_path'))
start_pose = self.conf.get_int('render.start_pose', default=0)
end_pose = self.conf.get_int('render.end_pose', default=1)
post_fix = '{}x{}_{}_{}'.format(self.img_res[0], self.img_res[1], num_views, render_path)
save_dir = os.path.join(self.plots_dir, 'vis', (post_fix + str(self.conf.get_int('dataset.scan_id'))))
os.makedirs(save_dir, exist_ok=True)
for i in tqdm(range(num_views)):
res = []
pose = self.train_dataset.get_novel_pose_between(start_pose, end_pose, i, n_frames=num_views)
model_input['pose'] = pose.unsqueeze(0).cuda()
split = utils.split_input(model_input, self.total_pixels, n_pixels=self.split_n_pixels)
torch.cuda.empty_cache()
for s in tqdm(split):
out = self.model(s)
res.append({'rgb_values': out['rgb_values'].detach(), 'normal_map': out['normal_map'].detach()})
(batch_size, num_samples, _) = ground_truth['rgb'].shape
model_outputs = utils.merge_output(res, self.total_pixels, batch_size)
rgbs = model_outputs['rgb_values'].reshape(batch_size, num_samples, 3)
normal_map = model_outputs['normal_map'].reshape(batch_size, num_samples, 3)
normal_map = ((normal_map + 1.0) / 2.0)
img = plt.plot_images(rgbs, None, save_dir, i, 1, self.img_res, render_only=True, no_save=True)
normal = plt.plot_normal_maps(normal_map, save_dir, i, 1, self.img_res, no_save=True)
rgb_imgs.append(np.asarray(img))
normal_maps.append(np.asarray(normal))
del out, batch_size, num_samples, model_outputs, res, rgbs
for i in range(num_views):
rgb_imgs.append(rgb_imgs[((num_views - i) - 1)])
normal_maps.append(normal_maps[((num_views - i) - 1)])
(w, h) = (self.img_res[0], self.img_res[1])
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_dir = os.path.join(save_dir, 'render')
os.makedirs(video_dir, exist_ok=True)
writer = cv2.VideoWriter(os.path.join(video_dir, '{:0>8d}_{}_rgb.mp4'.format(epoch, post_fix)), fourcc, 15, (w, h))
writer_normal = cv2.VideoWriter(os.path.join(video_dir, '{:0>8d}_normal.mp4'.format(epoch)), fourcc, 15, (w, h))
for image in rgb_imgs:
img_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
writer.write(img_bgr)
writer.release()
for normal in normal_maps:
normal_bgr = cv2.cvtColor(normal, cv2.COLOR_RGB2BGR)
writer_normal.write(normal_bgr)
writer_normal.release()
print('finish render only')
return
else:
split = utils.split_input(model_input, self.total_pixels, n_pixels=self.split_n_pixels)
res = []
torch.cuda.empty_cache()
for s in tqdm(split):
out = self.model(s)
d = {'rgb_values': out['rgb_values'].detach(), 'normal_map': out['normal_map'].detach()}
res.append(d)
batch_size = ground_truth['rgb'].shape[0]
model_outputs = utils.merge_output(res, self.total_pixels, batch_size)
plot_data = self.get_plot_data(model_outputs, model_input['id'], model_input['pose'], ground_truth['rgb'])
plt.plot(self.model, indices, plot_data, self.plots_dir, epoch, self.img_res, writer=None, cam_scale=self.conf.get_float('dataset.cam_scale', default=1.0), **self.plot_conf)
del indices, model_input, ground_truth, out, d, res, split, batch_size, model_outputs, plot_data
self.model.train()
print('epoch', epoch, 'plot over')
torch.cuda.empty_cache()
print(self.expdir, self.timestamp)
self.train_dataset.change_sampling_idx(self.num_pixels)
select_print_loss_id = np.random.randint(low=0, high=self.n_batches)
for (data_index, (indices, model_input, ground_truth)) in enumerate(self.train_dataloader):
model_input['intrinsics'] = model_input['intrinsics'].cuda()
model_input['uv'] = model_input['uv'].cuda()
model_input['pose'] = model_input['pose'].cuda()
model_input['id'] = model_input['id'].cuda()
model_outputs = self.model(model_input)
loss_output = self.loss(model_outputs, ground_truth)
loss = loss_output['loss']
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (data_index == select_print_loss_id):
psnr = rend_util.get_psnr(model_outputs['rgb_values'], ground_truth['rgb'].cuda().reshape((- 1), 3))
writer.add_scalar('loss', loss.item(), epoch)
writer.add_scalar('rgb_loss', loss_output['rgb_loss'].item(), epoch)
writer.add_scalar('eik_loss', loss_output['eikonal_loss'].item(), epoch)
writer.add_scalar('deform_grad_loss', loss_output['deform_grad_loss'].item(), epoch)
writer.add_scalar('deform_loss', loss_output['deform_loss'].item(), epoch)
writer.add_scalar('disp_loss', loss_output['disp_loss'].item(), epoch)
writer.add_scalar('disp_grad_loss', loss_output['disp_grad_loss'].item(), epoch)
writer.add_scalar('shape_code_loss', loss_output['shape_code_loss'].item(), epoch)
writer.add_scalar('color_code_loss', loss_output['color_code_loss'].item(), epoch)
writer.add_scalar('psnr', psnr.item(), epoch)
print('**{}/{}*{}/{}** loss={} eik={} deform_grad={} deform={} disp_grad={} disp={} rgb={} psnr={}'.format(self.expdir, self.timestamp, epoch, self.nepochs, loss_output['loss'].item(), loss_output['eikonal_loss'].item(), loss_output['deform_grad_loss'].item(), loss_output['deform_loss'].item(), loss_output['disp_grad_loss'].item(), loss_output['disp_loss'].item(), loss_output['rgb_loss'].item(), psnr.item()))
self.train_dataset.change_sampling_idx(self.num_pixels)
self.scheduler.step()
self.save_checkpoints(epoch)
writer.close()
def get_plot_data(self, model_outputs, id, pose, rgb_gt):
(batch_size, num_samples, _) = rgb_gt.shape
rgb_eval = model_outputs['rgb_values'].reshape(batch_size, num_samples, 3)
normal_map = model_outputs['normal_map'].reshape(batch_size, num_samples, 3)
normal_map = ((normal_map + 1.0) / 2.0)
plot_data = {'id': id, 'rgb_gt': rgb_gt, 'pose': pose, 'rgb_eval': rgb_eval, 'normal_map': normal_map}
return plot_data |
.parametrize('converter_cls', [BaseConverter, Converter])
.parametrize('unstructure_strat', [UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE])
def test_unstructure_attrs_mappings(benchmark, converter_cls, unstructure_strat):
class FrozenCls():
a: int
class C():
a: Mapping[(int, str)]
b: Dict[(float, bytes)]
c: MutableMapping[(int, FrozenCls)]
c = converter_cls(unstruct_strat=unstructure_strat)
benchmark(c.unstructure, C({i: str(i) for i in range(30)}, {float(i): bytes(i) for i in range(30)}, {i: FrozenCls(i) for i in range(30)})) |
.parametrize('username,password', users)
.parametrize('value_id', values)
def test_detail(db, client, username, password, value_id):
client.login(username=username, password=password)
value = Value.objects.get(pk=value_id)
url = reverse(urlnames['detail'], args=[value_id])
response = client.get(url)
if (value.project.id in view_value_permission_map.get(username, [])):
assert (response.status_code == 200)
assert isinstance(response.json(), dict)
assert (response.json().get('id') == value_id)
elif password:
assert (response.status_code == 404)
else:
assert (response.status_code == 401) |
def _update_user_newsletter(graphql_client, user, open_to_newsletter):
query = '\n mutation(\n $open_to_newsletter: Boolean!,\n $open_to_recruiting: Boolean!,\n $date_birth: String\n ){\n update(input: {\n openToNewsletter: $open_to_newsletter,\n openToRecruiting: $open_to_recruiting,\n dateBirth: $date_birth\n }){\n __typename\n ... on User {\n id\n openToNewsletter\n }\n ... on UpdateErrors {\n validationOpenToNewsletter: openToNewsletter\n nonFieldErrors\n }\n }\n }\n '
variables = {'open_to_newsletter': open_to_newsletter, 'open_to_recruiting': user.open_to_recruiting, 'date_birth': f'{user.date_birth:%Y-%m-%d}'}
return (graphql_client.query(query=query, variables=variables), variables) |
def load_ed25519_vectors(vector_data):
data = []
for line in vector_data:
(secret_key, public_key, message, signature, _) = line.split(':')
secret_key = secret_key[0:64]
signature = signature[0:128]
data.append({'secret_key': secret_key, 'public_key': public_key, 'message': message, 'signature': signature})
return data |
class NoneCoalesceTernaryVisitor(ast.NodeVisitor):
def __init__(self, file_, callback):
self.__file = file_
self.__callback = callback
def visit_IfExp(self, ifexp):
if isinstance(ifexp.test, ast.Compare):
op = ifexp.test.ops[0]
if (isinstance(op, (ast.Is, ast.IsNot)) and isinstance(ifexp.test.left, ast.Name) and isinstance(ifexp.test.comparators[0], ast.NameConstant) and (ifexp.test.comparators[0].value is None)):
test_name = ifexp.test.left.id
else:
return
if (isinstance(op, ast.IsNot) and isinstance(ifexp.body, ast.Name)):
result_name = ifexp.body.id
elif (isinstance(op, ast.Is) and isinstance(ifexp.orelse, ast.Name)):
result_name = ifexp.orelse.id
else:
return
if (test_name == result_name):
self.__callback(self.__file, ifexp.test.lineno, None) |
_fixtures(ConfigWithFiles)
def test_config_defaults_dangerous(config_with_files):
fixture = config_with_files
fixture.set_config_spec(easter_egg, 'reahl.component_dev.test_config:ConfigWithDangerousDefaultedSetting')
config = StoredConfiguration(fixture.config_dir.name)
with CallMonitor(logging.getLogger('reahl.component.config').warning) as monitor:
config.configure()
logged_message = monitor.calls[0].args[0]
message_regex = '^some_key.some_setting has been defaulted to a value not suitable for production use: "default value". You can set it in /.*/config_file_for_this_egg.py'
assert re.match(message_regex, logged_message)
assert (config.some_key.some_setting == 'default value') |
def test_parse_empty_string_default(default_parser):
line = ''
statement = default_parser.parse(line)
assert (statement == '')
assert (statement.args == statement)
assert (statement.raw == line)
assert (statement.command == '')
assert (statement.arg_list == [])
assert (statement.multiline_command == '')
assert (statement.terminator == '')
assert (statement.suffix == '')
assert (statement.pipe_to == '')
assert (statement.output == '')
assert (statement.output_to == '')
assert (statement.command_and_args == line)
assert (statement.argv == statement.arg_list) |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('schedule', '0002_auto__0043')]
operations = [migrations.CreateModel(name='ScheduleItemType', fields=[('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')), ('modified_at', models.DateTimeField(auto_now=True, verbose_name='Last Modified At')), ('title', models.CharField(max_length=100)), ('created_by', models.ForeignKey(related_name='created_scheduleitemtype_set', verbose_name='Created By', blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ('modified_by', models.ForeignKey(related_name='updated_scheduleitemtype_set', verbose_name='Modified By', blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True))], options={'abstract': False}, bases=(models.Model,)), migrations.RunPython(load_fixture, reverse_code=unload_fixture)] |
class TestBiasCorrection(unittest.TestCase):
def test_get_output_data(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session(graph=tf.Graph())
input_op_names = ['input_1']
output_op_name = 'scope_1/conv2d_2/Conv2D'
with sess.graph.as_default():
_ = keras_model_functional()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
data = np.random.rand(1, 32, 32, 3)
output = BiasCorrection._get_output_data(sess, input_op_names, output_op_name, data)
self.assertEqual(output.shape[3], 8)
sess.close()
def test_bias_correction_single_layer(self):
tf.compat.v1.reset_default_graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
inputs = tf.keras.Input(shape=(32, 16, 3))
conv_op = tf.keras.layers.Conv2D(16, (3, 3))(inputs)
relu_1 = tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
relu_2 = tf.nn.relu(conv2_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(config=config, graph=tf.compat.v1.get_default_graph())
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
w_numpy_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3])
b_shape = BiasUtils.get_shape(conv_op)
b_numpy_data = np.random.rand(b_shape[0])
WeightTensorUtils.update_tensor_for_op(sess, conv_op, w_numpy_data)
BiasUtils.update_bias_for_op(sess, conv_op, b_numpy_data)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
bool_ = BiasUtils.is_bias_none(conv_op)
n_sess = aimet_tensorflow.utils.graph_saver.save_and_load_graph('./test_update', sess)
output_op = n_sess.graph.get_operation_by_name('Relu_1')
conv_op = n_sess.graph.get_operation_by_name('conv2d/Conv2D')
import copy
bias_data = copy.deepcopy(BiasUtils.get_bias_as_numpy_data(n_sess, conv_op))
input_op_name = conv_op.inputs[0].op.name
bias_corr_input = BiasCorrectionParams(batch_size=1, num_quant_samples=10, num_bias_correct_samples=10, input_op_names=[input_op_name], output_op_names=[output_op.name])
quant_params = QuantParams(use_cuda=False)
np.random.seed(0)
shape = conv_op.inputs[0].shape
dataset = np.random.rand(1, shape[1], shape[2], shape[3])
with unittest.mock.patch('aimet_tensorflow.bias_correction.iter_first_x') as iter_first_x:
iter_first_x.return_value = [dataset]
quantsim = BiasCorrection._get_quantized_model(n_sess, quant_params, bias_corr_input.input_op_names, bias_corr_input.output_op_names, bias_corr_input.num_quant_samples, bias_corr_input.batch_size, dataset)
BiasCorrection.bias_correction_per_layer(reference_model=sess, corrected_model=quantsim.session, bias_correct_params=bias_corr_input, layer_name_to_be_corrected=conv_op.name, data_set=dataset)
conv_op = quantsim.session.graph.get_operation_by_name('conv2d/Conv2D')
bias_data_updated = BiasUtils.get_bias_as_numpy_data(quantsim.session, conv_op)
self.assertFalse(np.allclose(bias_data, bias_data_updated, atol=0.0001))
print('Test completed')
sess.close()
n_sess.close()
quantsim.session.close()
def test_bias_correction_model_tf_enhanced(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
relu_1 = tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(relu_1)
relu_2 = tf.nn.relu(conv2_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_2)
relu_3 = tf.nn.relu(conv3_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
b_shape = BiasUtils.get_shape(conv_op)
numpy_data = np.random.rand(b_shape[0])
BiasUtils.update_bias_for_op(sess, conv_op, numpy_data)
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
w_numpy_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3])
WeightTensorUtils.update_tensor_for_op(sess, conv_op, w_numpy_data)
n_sess = aimet_tensorflow.utils.graph_saver.save_and_load_graph('./test_update', sess)
output_op = n_sess.graph.get_operation_by_name('Relu_1')
conv_op = n_sess.graph.get_operation_by_name('conv2d/Conv2D')
bias_data = BiasUtils.get_bias_as_numpy_data(n_sess, conv_op)
input_op_name = conv_op.inputs[0].op.name
output_op = n_sess.graph.get_operation_by_name('Relu_2')
input_op_names = [input_op_name]
output_op_names = [output_op.name]
batch_size = 1
num_samples = 10
np.random.seed(0)
shape = conv_op.inputs[0].shape
dataset = np.random.rand(10, 1, shape[1], shape[2], shape[3])
dataset = tf.convert_to_tensor(dataset)
dataset = tf.data.Dataset.from_tensor_slices(dataset)
quant_params = QuantParams(use_cuda=False)
quant_params.use_cuda = False
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=input_op_names, output_op_names=output_op_names)
conv_op = sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert BiasUtils.is_bias_none(conv_op)
new_sess = BiasCorrection.correct_bias(n_sess, bias_correction_params, quant_params, dataset)
conv_op = new_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert (not BiasUtils.is_bias_none(conv_op))
sess.close()
n_sess.close()
new_sess.close()
def test_bias_correction_model_tf(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
relu_1 = tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(relu_1)
relu_2 = tf.nn.relu(conv2_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_2)
relu_3 = tf.nn.relu(conv3_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
b_shape = BiasUtils.get_shape(conv_op)
numpy_data = np.random.rand(b_shape[0])
BiasUtils.update_bias_for_op(sess, conv_op, numpy_data)
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
w_numpy_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3])
WeightTensorUtils.update_tensor_for_op(sess, conv_op, w_numpy_data)
n_sess = save_and_load_graph('./test_update', sess)
output_op = n_sess.graph.get_operation_by_name('Relu_1')
conv_op = n_sess.graph.get_operation_by_name('conv2d/Conv2D')
bias_data = BiasUtils.get_bias_as_numpy_data(n_sess, conv_op)
input_op_name = conv_op.inputs[0].op.name
output_op = n_sess.graph.get_operation_by_name('Relu_2')
input_op_names = [input_op_name]
output_op_names = [output_op.name]
batch_size = 1
num_samples = 10
np.random.seed(0)
shape = conv_op.inputs[0].shape
dataset = np.random.rand(10, 1, shape[1], shape[2], shape[3])
dataset = tf.convert_to_tensor(dataset)
dataset = tf.data.Dataset.from_tensor_slices(dataset)
quant_params = QuantParams(quant_mode='tf', use_cuda=False)
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=input_op_names, output_op_names=output_op_names)
conv_op = sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert BiasUtils.is_bias_none(conv_op)
new_sess = BiasCorrection.correct_bias(sess, bias_correction_params, quant_params, dataset)
conv_op = new_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert (not BiasUtils.is_bias_none(conv_op))
sess.close()
n_sess.close()
new_sess.close()
def test_bias_update_to_dense(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3))
x = tf.keras.layers.Flatten()(inputs)
dense = tf.keras.layers.Dense(2, use_bias=False, activation=tf.nn.softmax, name='single_residual')(x)
_ = tf.nn.relu(dense)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
input_op = sess.graph.get_operation_by_name('input_1')
output_op = sess.graph.get_operation_by_name('Relu')
input_op_names = ['input_1']
output_op_names = [output_op.name]
batch_size = 1
num_samples = 10
np.random.seed(0)
shape = input_op.outputs[0].shape
dataset = np.random.rand(10, 1, shape[1], shape[2], shape[3])
dataset = tf.convert_to_tensor(dataset)
dataset = tf.data.Dataset.from_tensor_slices(dataset)
quant_params = QuantParams(use_cuda=False)
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=input_op_names, output_op_names=output_op_names)
dense_conv_op = sess.graph.get_operation_by_name('single_residual/MatMul')
assert BiasUtils.is_bias_none(dense_conv_op)
new_sess = BiasCorrection.correct_bias(sess, bias_correction_params, quant_params, dataset)
updated_dense_conv_op = new_sess.graph.get_operation_by_name('single_residual/MatMul')
bias = BiasUtils.get_bias_as_numpy_data(new_sess, updated_dense_conv_op)
assert (not BiasUtils.is_bias_none(updated_dense_conv_op))
sess.close()
new_sess.close()
def test_depthwise_custom(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(10, 10, 3))
x = tf.keras.layers.Conv2D(10, (1, 1))(inputs)
with tf.compat.v1.variable_scope('standalone_depthwise'):
x = tf.compat.v1.nn.depthwise_conv2d_native(x, tf.compat.v1.get_variable(initializer=tf.random.truncated_normal(shape=(3, 3, 10, 1)), name='depthwise_kernel'), [1, 1, 1, 1], 'VALID')
_ = tf.nn.relu(x)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
depthwise_conv_op = sess.graph.get_operation_by_name('standalone_depthwise/DepthwiseConv2dNative')
input_op = sess.graph.get_operation_by_name('input_1')
output_op = sess.graph.get_operation_by_name('Relu')
input_op_names = ['input_1']
output_op_names = [output_op.name]
batch_size = 1
num_samples = 10
np.random.seed(0)
shape = input_op.outputs[0].shape
dataset = np.random.rand(10, 1, shape[1], shape[2], shape[3])
dataset = tf.convert_to_tensor(dataset)
dataset = tf.data.Dataset.from_tensor_slices(dataset)
quant_params = QuantParams(use_cuda=False)
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=input_op_names, output_op_names=output_op_names)
assert BiasUtils.is_bias_none(depthwise_conv_op)
new_sess = BiasCorrection.correct_bias(sess, bias_correction_params, quant_params, dataset)
updated_conv_op = new_sess.graph.get_operation_by_name('standalone_depthwise/DepthwiseConv2dNative')
assert (not BiasUtils.is_bias_none(updated_conv_op))
sess.close()
new_sess.close()
def test_bn_based_bias_correction_layer_selection_bn_conv(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3), name='inputs')
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
relu = tf.nn.relu(bn_op)
conv1_op = tf.keras.layers.Conv2D(32, (3, 3))(relu)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(conv1_op)
_ = tf.nn.relu(conv2_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
start_op = 'inputs'
bn_conv_linear_dict = BiasCorrection.find_all_convs_bn_with_activation(sess, start_op, 'Relu_1')
assert (len(bn_conv_linear_dict) == 1)
sess.close()
def test_layer_selection_bn_depthwise_conv(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(10, 10, 3))
x = tf.keras.layers.Conv2D(10, (1, 1))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(x, training=False)
with tf.compat.v1.variable_scope('standalone_depthwise'):
x = tf.compat.v1.nn.depthwise_conv2d_native(bn_op, tf.compat.v1.get_variable(initializer=tf.random.truncated_normal(shape=(3, 3, 10, 1)), name='depthwise_kernel'), [1, 1, 1, 1], 'VALID')
_ = tf.nn.relu(x)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
start_op = 'input_1'
bn_conv_linear_dict = BiasCorrection.find_all_convs_bn_with_activation(sess, start_op, 'Relu')
depthwise_op = sess.graph.get_operation_by_name('standalone_depthwise/DepthwiseConv2dNative')
assert (1 == len(bn_conv_linear_dict))
assert (depthwise_op in bn_conv_linear_dict.keys())
sess.close()
def test_bn_conv_layer_selection_bn_relu_conv(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3), name='inputs')
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op, training=False)
relu = tf.nn.relu(bn_op)
conv1_op = tf.keras.layers.Conv2D(32, (3, 3))(relu)
relu_2 = tf.nn.relu(conv1_op)
bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(relu_2)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op_2, training=False)
_ = tf.nn.relu(conv2_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
start_op = 'inputs'
bn_conv_linear_dict = BiasCorrection.find_all_convs_bn_with_activation(sess, start_op, 'Relu_2')
assert (2 == len(bn_conv_linear_dict))
conv1_op = sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert (bn_conv_linear_dict[conv1_op].in_activation_type == ActivationType.relu)
assert (bn_conv_linear_dict[conv1_op].input_bn is not None)
conv2_op = sess.graph.get_operation_by_name('conv2d_2/Conv2D')
assert (bn_conv_linear_dict[conv2_op].in_activation_type == ActivationType.no_activation)
assert (bn_conv_linear_dict[conv2_op].input_bn is not None)
sess.close()
def test_bn_based_bias_correction_single_layer_functions_invoked(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3), name='inputs')
conv1_op = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 1), bias_initializer='random_uniform')(inputs)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(conv1_op)
_ = tf.nn.relu(conv2_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
output_op = sess.graph.get_operation_by_name('Relu')
input_op_name = 'inputs'
bias_corr_input = BiasCorrectionParams(batch_size=1, num_quant_samples=10, num_bias_correct_samples=10, input_op_names=[input_op_name], output_op_names=[output_op.name])
quant_params = QuantParams(use_cuda=False)
np.random.seed(0)
shape = conv_op.inputs[0].shape
dataset = np.random.rand(1, shape[1], shape[2], shape[3])
(bias_tensor, weight) = BiasCorrection._get_conv_linear_params(sess, conv_op)
q_weight = BiasCorrection._get_quantized_weights(weight, quant_params)
with unittest.mock.patch('aimet_tensorflow.bias_correction.iter_first_x') as iter_first_x:
iter_first_x.return_value = [dataset]
with unittest.mock.patch('aimet_tensorflow.bias_correction.BiasCorrection.analytical_bias_correction_per_layer', return_value=sess) as mocked_analytical_bias_correction_per_layer:
with unittest.mock.patch('aimet_tensorflow.bias_correction.BiasCorrection.bias_correction_per_layer', return_value=sess) as mocked_bias_correction_per_layer:
updated_sess = BiasCorrection.correct_bias(sess, bias_corr_input, quant_params, dataset, perform_only_empirical_bias_corr=False)
assert mocked_analytical_bias_correction_per_layer.called
called_args = mocked_analytical_bias_correction_per_layer.call_args
assert (called_args[1]['is_first_conv'] == True)
assert mocked_bias_correction_per_layer.called
sess.close()
updated_sess.close()
def test_analytical_empirical_bias_correction_single_layer(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3), name='inputs')
conv1_op = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 1), bias_initializer='random_uniform')(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True, beta_initializer='random_uniform', gamma_initializer='random_uniform', moving_mean_initializer='random_uniform', moving_variance_initializer='random_uniform')(conv1_op, training=False)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(conv2_op)
conv4_op = tf.keras.layers.Conv2D(32, (3, 3))(conv3_op)
conv5_op = tf.keras.layers.Conv2D(32, (3, 3))(conv4_op)
bn_op2 = tf.keras.layers.BatchNormalization(fused=True, beta_initializer='random_uniform', gamma_initializer='random_uniform', moving_mean_initializer='random_uniform', moving_variance_initializer='random_uniform')(conv5_op, training=False)
relu_1 = tf.nn.relu(bn_op2)
conv6_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
_ = tf.nn.relu(conv6_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
output_op = sess.graph.get_operation_by_name('Relu_1')
input_op_name = 'inputs'
bias_corr_input = BiasCorrectionParams(batch_size=1, num_quant_samples=10, num_bias_correct_samples=10, input_op_names=[input_op_name], output_op_names=[output_op.name])
quant_params = QuantParams(use_cuda=False)
np.random.seed(0)
input_tensor = sess.graph.get_tensor_by_name('inputs:0')
shape = input_tensor.shape
dataset = np.random.rand(1, shape[1], shape[2], shape[3])
with unittest.mock.patch('aimet_tensorflow.bias_correction.iter_first_x') as iter_first_x:
iter_first_x.return_value = [dataset]
with unittest.mock.patch('aimet_tensorflow.bias_correction.BiasCorrection.analytical_bias_correction_per_layer', return_value=sess) as mocked_analytical_bias_correction_per_layer:
with unittest.mock.patch('aimet_tensorflow.bias_correction.BiasCorrection.bias_correction_per_layer', return_value=sess) as mocked_bias_correction_per_layer:
updated_sess = BiasCorrection.correct_bias(sess, bias_corr_input, quant_params, dataset, perform_only_empirical_bias_corr=False)
assert mocked_bias_correction_per_layer.called
assert mocked_analytical_bias_correction_per_layer.called
self.assertEqual(mocked_analytical_bias_correction_per_layer.call_count, 3)
self.assertEqual(mocked_bias_correction_per_layer.call_count, 3)
sess.close()
updated_sess.close()
def test_bias_correction_model_tf_with_no_bias(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
relu_1 = tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(relu_1)
relu_2 = tf.nn.relu(conv2_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(relu_2)
_ = tf.nn.relu(conv3_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
w_numpy_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3])
n_sess = save_and_load_graph('./test_update', sess)
conv_op = n_sess.graph.get_operation_by_name('conv2d/Conv2D')
input_op_name = conv_op.inputs[0].op.name
output_op = n_sess.graph.get_operation_by_name('Relu_2')
input_op_names = [input_op_name]
output_op_names = [output_op.name]
batch_size = 1
num_samples = 10
np.random.seed(0)
shape = conv_op.inputs[0].shape
dataset = np.random.rand(10, 1, shape[1], shape[2], shape[3])
dataset = tf.convert_to_tensor(dataset)
dataset = tf.data.Dataset.from_tensor_slices(dataset)
quant_params = QuantParams(quant_mode='tf', use_cuda=False)
bias_correction_params = BiasCorrectionParams(batch_size=batch_size, num_quant_samples=num_samples, num_bias_correct_samples=num_samples, input_op_names=input_op_names, output_op_names=output_op_names)
conv_op = sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert BiasUtils.is_bias_none(conv_op)
new_sess = BiasCorrection.correct_bias(n_sess, bias_correction_params, quant_params, dataset, perform_only_empirical_bias_corr=False)
conv_op = new_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
assert (not BiasUtils.is_bias_none(conv_op))
sess.close()
n_sess.close()
new_sess.close()
def test_analytical_empirical_bias_correction(self):
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3), name='inputs')
conv_op = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 1), bias_initializer='random_uniform')(inputs)
conv1_op = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 1), bias_initializer='random_uniform')(conv_op)
bn_op = tf.keras.layers.BatchNormalization(fused=True, beta_initializer='random_uniform', gamma_initializer='random_uniform', moving_mean_initializer='random_uniform', moving_variance_initializer='random_uniform')(conv1_op, training=False)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 1), bias_initializer='random_uniform')(bn_op)
bn_op2 = tf.keras.layers.BatchNormalization(fused=True, beta_initializer='random_uniform', gamma_initializer='random_uniform', moving_mean_initializer='random_uniform', moving_variance_initializer='random_uniform')(conv2_op, training=False)
relu_1 = tf.nn.relu(bn_op2)
conv6_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
_ = tf.nn.relu(conv6_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
sess.run(init)
output_op = sess.graph.get_operation_by_name('Relu_1')
input_op_name = 'inputs'
bias_corr_input = BiasCorrectionParams(batch_size=1, num_quant_samples=10, num_bias_correct_samples=10, input_op_names=[input_op_name], output_op_names=[output_op.name])
quant_params = QuantParams(use_cuda=False)
np.random.seed(0)
input_tensor = sess.graph.get_tensor_by_name('inputs:0')
shape = input_tensor.shape
dataset = np.random.rand(1, shape[1], shape[2], shape[3])
conv_bn_dict = BiasCorrection.find_all_convs_bn_with_activation(sess, [input_op_name], [output_op.name])
new_sess = equalize_model(sess, input_op_name, output_op.name)
conv_with_bn_op = new_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
old_bias_as_numpy = BiasUtils.get_bias_as_numpy_data(new_sess, conv_with_bn_op)
with unittest.mock.patch('aimet_tensorflow.bias_correction.iter_first_x') as iter_first_x:
iter_first_x.return_value = [dataset]
with unittest.mock.patch('aimet_tensorflow.bias_correction.BiasCorrection.analytical_bias_correction_per_layer', return_value=sess) as mocked_analytical_bias_correction_per_layer:
updated_sess = BiasCorrection.correct_bias(new_sess, bias_corr_input, quant_params, dataset, conv_bn_dict=conv_bn_dict, perform_only_empirical_bias_corr=False)
self.assertEqual(mocked_analytical_bias_correction_per_layer.call_count, 3)
sess.close()
new_sess.close() |
class CssDjangoLexer(DelegatingLexer):
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
filenames = ['*.css.j2', '*.css.jinja2']
version_added = ''
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
url = '
def __init__(self, **options):
super().__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return (DjangoLexer.analyse_text(text) - 0.05) |
_rewriter([sparse.mul_s_v])
def local_mul_s_v(fgraph, node):
if (node.op == sparse.mul_s_v):
(x, y) = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
if x_is_sparse_variable:
svar = x
dvar = y
else:
svar = y
dvar = x
if (dvar.type.ndim != 1):
return False
elif (svar.type.format == 'csr'):
CSx = sparse.CSR
mul_s_v_csx = mul_s_v_csr
else:
return False
(s_val, s_ind, s_ptr, s_shape) = sparse.csm_properties(svar)
c_data = mul_s_v_csx(s_val, s_ind, s_ptr, dvar)
return [CSx(c_data, s_ind, s_ptr, s_shape)]
return False |
class TarDataset(data.Dataset):
def download_or_unzip(cls, root):
path = os.path.join(root, cls.dirname)
if (not os.path.isdir(path)):
tpath = os.path.join(root, cls.filename)
if (not os.path.isfile(tpath)):
print('downloading')
urllib.request.urlretrieve(cls.url, tpath)
with tarfile.open(tpath, 'r') as tfile:
print('extracting')
tfile.extractall(root)
return os.path.join(path, '') |
def test_ConnectionState_inconsistent_protocol_switch() -> None:
for (client_switches, server_switch) in [([], _SWITCH_CONNECT), ([], _SWITCH_UPGRADE), ([_SWITCH_UPGRADE], _SWITCH_CONNECT), ([_SWITCH_CONNECT], _SWITCH_UPGRADE)]:
cs = ConnectionState()
for client_switch in client_switches:
cs.process_client_switch_proposal(client_switch)
cs.process_event(CLIENT, Request)
with pytest.raises(LocalProtocolError):
cs.process_event(SERVER, Response, server_switch) |
_module()
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string |
class KeyPair():
def create_key_pair(key_size: int=1024):
private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_size)
public_key = private_key.public_key()
private_key_bytes = private_key.private_bytes(encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption())
public_key_bytes = public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo)
return KeyPair(private_key=base64.b64encode(private_key_bytes).decode('UTF-8'), public_key=base64.b64encode(public_key_bytes).decode('UTF-8'))
def __init__(self, private_key: str, public_key: str):
self.private_key = private_key
self.public_key = public_key
def get_private_key(self) -> str:
return self.private_key
def get_public_key(self) -> str:
return self.public_key |
def file_content():
try:
return importlib.resources.files('pytest_html').joinpath('resources', 'style.css').read_bytes().decode('utf-8').strip()
except AttributeError:
import pkg_resources
return pkg_resources.resource_string('pytest_html', os.path.join('resources', 'style.css')).decode('utf-8') |
def _model_variable_getter(getter, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, rename=None, use_resource=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE, **_):
short_name = name.split('/')[(- 1)]
if (rename and (short_name in rename)):
name_components = name.split('/')
name_components[(- 1)] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, collections=collections, trainable=trainable, caching_device=caching_device, partitioner=partitioner, custom_getter=getter, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation) |
def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (bottleneck is None):
bottleneck = (blocks >= 50)
if (blocks == 10):
layers = [1, 1, 1, 1]
elif (blocks == 12):
layers = [2, 1, 1, 1]
elif ((blocks == 14) and (not bottleneck)):
layers = [2, 2, 1, 1]
elif ((blocks == 14) and bottleneck):
layers = [1, 1, 1, 1]
elif (blocks == 16):
layers = [2, 2, 2, 1]
elif (blocks == 18):
layers = [2, 2, 2, 2]
elif ((blocks == 26) and (not bottleneck)):
layers = [3, 3, 3, 3]
elif ((blocks == 26) and bottleneck):
layers = [2, 2, 2, 2]
elif (blocks == 34):
layers = [3, 4, 6, 3]
elif ((blocks == 38) and bottleneck):
layers = [3, 3, 3, 3]
elif (blocks == 50):
layers = [3, 4, 6, 3]
elif (blocks == 101):
layers = [3, 4, 23, 3]
elif (blocks == 152):
layers = [3, 8, 36, 3]
elif (blocks == 200):
layers = [3, 24, 36, 3]
else:
raise ValueError('Unsupported ResNet with number of blocks: {}'.format(blocks))
if bottleneck:
assert (((sum(layers) * 3) + 2) == blocks)
else:
assert (((sum(layers) * 2) + 2) == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [(ci * bottleneck_factor) for ci in channels_per_layers]
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if (width_scale != 1.0):
channels = [[(int((cij * width_scale)) if ((i != (len(channels) - 1)) or (j != (len(ci) - 1))) else cij) for (j, cij) in enumerate(ci)] for (i, ci) in enumerate(channels)]
init_block_channels = int((init_block_channels * width_scale))
net = ResNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def plot_sapm(sapm_data, effective_irradiance):
(fig, axes) = plt.subplots(2, 3, figsize=(16, 10), sharex=False, sharey=False, squeeze=False)
plt.subplots_adjust(wspace=0.2, hspace=0.3)
ax = axes[(0, 0)]
sapm_data.filter(like='i_').plot(ax=ax)
ax.set_ylabel('Current (A)')
ax = axes[(0, 1)]
sapm_data.filter(like='v_').plot(ax=ax)
ax.set_ylabel('Voltage (V)')
ax = axes[(0, 2)]
sapm_data.filter(like='p_').plot(ax=ax)
ax.set_ylabel('Power (W)')
ax = axes[(1, 0)]
[ax.plot(effective_irradiance, current, label=name) for (name, current) in sapm_data.filter(like='i_').iteritems()]
ax.set_ylabel('Current (A)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=2)
ax = axes[(1, 1)]
[ax.plot(effective_irradiance, voltage, label=name) for (name, voltage) in sapm_data.filter(like='v_').iteritems()]
ax.set_ylabel('Voltage (V)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=4)
ax = axes[(1, 2)]
ax.plot(effective_irradiance, sapm_data['p_mp'], label='p_mp')
ax.set_ylabel('Power (W)')
ax.set_xlabel('Effective Irradiance')
ax.legend(loc=2)
for ax in axes.flatten():
for tk in ax.get_xticklabels():
tk.set_visible(True) |
class DescribeCT_Tc():
def it_can_merge_to_another_tc(self, tr_, _span_dimensions_, _tbl_, _grow_to_, top_tc_):
top_tr_ = tr_
(tc, other_tc) = (element('w:tc'), element('w:tc'))
(top, left, height, width) = (0, 1, 2, 3)
_span_dimensions_.return_value = (top, left, height, width)
_tbl_.return_value.tr_lst = [tr_]
tr_.tc_at_grid_col.return_value = top_tc_
merged_tc = tc.merge(other_tc)
_span_dimensions_.assert_called_once_with(tc, other_tc)
top_tr_.tc_at_grid_col.assert_called_once_with(left)
top_tc_._grow_to.assert_called_once_with(width, height)
assert (merged_tc is top_tc_)
def it_knows_its_extents_to_help(self, extents_fixture):
(tc, attr_name, expected_value) = extents_fixture
extent = getattr(tc, attr_name)
assert (extent == expected_value)
def it_calculates_the_dimensions_of_a_span_to_help(self, span_fixture):
(tc, other_tc, expected_dimensions) = span_fixture
dimensions = tc._span_dimensions(other_tc)
assert (dimensions == expected_dimensions)
def it_raises_on_invalid_span(self, span_raise_fixture):
(tc, other_tc) = span_raise_fixture
with pytest.raises(InvalidSpanError):
tc._span_dimensions(other_tc)
def it_can_grow_itself_to_help_merge(self, grow_to_fixture):
(tc, width, height, top_tc, expected_calls) = grow_to_fixture
tc._grow_to(width, height, top_tc)
assert (tc._span_to_width.call_args_list == expected_calls)
def it_can_extend_its_horz_span_to_help_merge(self, top_tc_, grid_span_, _move_content_to_, _swallow_next_tc_):
grid_span_.side_effect = [1, 3, 4]
(grid_width, vMerge) = (4, 'continue')
tc = element('w:tc')
tc._span_to_width(grid_width, top_tc_, vMerge)
_move_content_to_.assert_called_once_with(tc, top_tc_)
assert (_swallow_next_tc_.call_args_list == [call(tc, grid_width, top_tc_), call(tc, grid_width, top_tc_)])
assert (tc.vMerge == vMerge)
def it_knows_its_inner_content_block_item_elements(self):
tc = cast(CT_Tc, element('w:tc/(w:p,w:tbl,w:p)'))
assert ([type(e) for e in tc.inner_content_elements] == [CT_P, CT_Tbl, CT_P])
def it_can_swallow_the_next_tc_help_merge(self, swallow_fixture):
(tc, grid_width, top_tc, tr, expected_xml) = swallow_fixture
tc._swallow_next_tc(grid_width, top_tc)
assert (tr.xml == expected_xml)
def it_adds_cell_widths_on_swallow(self, add_width_fixture):
(tc, grid_width, top_tc, tr, expected_xml) = add_width_fixture
tc._swallow_next_tc(grid_width, top_tc)
assert (tr.xml == expected_xml)
def it_raises_on_invalid_swallow(self, swallow_raise_fixture):
(tc, grid_width, top_tc, tr) = swallow_raise_fixture
with pytest.raises(InvalidSpanError):
tc._swallow_next_tc(grid_width, top_tc)
def it_can_move_its_content_to_help_merge(self, move_fixture):
(tc, tc_2, expected_tc_xml, expected_tc_2_xml) = move_fixture
tc._move_content_to(tc_2)
assert (tc.xml == expected_tc_xml)
assert (tc_2.xml == expected_tc_2_xml)
def it_raises_on_tr_above(self, tr_above_raise_fixture):
tc = tr_above_raise_fixture
with pytest.raises(ValueError, match='no tr above topmost tr'):
tc._tr_above
(params=[('w:tr/(w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p),w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p))', 0, 2, 'w:tr/(w:tc/(w:tcPr/(w:tcW{w:w=2880,w:type=dxa},w:gridSpan{w:val=2}),w:p))'), ('w:tr/(w:tc/w:p,w:tc/w:p)', 0, 2, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))'), ('w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p))', 0, 2, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))'), ('w:tr/(w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p),w:tc/w:p)', 0, 2, 'w:tr/(w:tc/(w:tcPr/(w:tcW{w:w=1440,w:type=dxa},w:gridSpan{w:val=2}),w:p))')])
def add_width_fixture(self, request):
(tr_cxml, tc_idx, grid_width, expected_tr_cxml) = request.param
tr = element(tr_cxml)
tc = top_tc = tr[tc_idx]
expected_tr_xml = xml(expected_tr_cxml)
return (tc, grid_width, top_tc, tr, expected_tr_xml)
(params=[(0, 0, 0, 'top', 0), (2, 0, 1, 'top', 0), (2, 1, 1, 'top', 0), (4, 2, 1, 'top', 1), (0, 0, 0, 'left', 0), (1, 0, 1, 'left', 2), (3, 1, 0, 'left', 0), (3, 1, 1, 'left', 2), (0, 0, 0, 'bottom', 1), (1, 0, 0, 'bottom', 1), (2, 0, 1, 'bottom', 2), (4, 1, 1, 'bottom', 3), (0, 0, 0, 'right', 1), (1, 0, 0, 'right', 2), (0, 0, 0, 'right', 1), (4, 2, 1, 'right', 3)])
def extents_fixture(self, request):
(snippet_idx, row, col, attr_name, expected_value) = request.param
tbl = self._snippet_tbl(snippet_idx)
tc = tbl.tr_lst[row].tc_lst[col]
return (tc, attr_name, expected_value)
(params=[(0, 0, 0, 2, 1), (0, 0, 1, 1, 2), (0, 1, 1, 2, 2), (1, 0, 0, 2, 2), (2, 0, 0, 2, 2), (2, 1, 2, 1, 2)])
def grow_to_fixture(self, request, _span_to_width_):
(snippet_idx, row, col, width, height) = request.param
tbl = self._snippet_tbl(snippet_idx)
tc = tbl.tr_lst[row].tc_lst[col]
start = (0 if (height == 1) else 1)
end = (start + height)
expected_calls = [call(width, tc, None), call(width, tc, 'restart'), call(width, tc, 'continue'), call(width, tc, 'continue')][start:end]
return (tc, width, height, None, expected_calls)
(params=[('w:tc/w:p', 'w:tc/w:p', 'w:tc/w:p', 'w:tc/w:p'), ('w:tc/w:p', 'w:tc/w:p/w:r', 'w:tc/w:p', 'w:tc/w:p/w:r'), ('w:tc/w:p/w:r', 'w:tc/w:p', 'w:tc/w:p', 'w:tc/w:p/w:r'), ('w:tc/(w:p/w:r,w:sdt)', 'w:tc/w:p', 'w:tc/w:p', 'w:tc/(w:p/w:r,w:sdt)'), ('w:tc/(w:p/w:r,w:sdt)', 'w:tc/(w:tbl,w:p)', 'w:tc/w:p', 'w:tc/(w:tbl,w:p/w:r,w:sdt)')])
def move_fixture(self, request):
(tc_cxml, tc_2_cxml, expected_tc_cxml, expected_tc_2_cxml) = request.param
(tc, tc_2) = (element(tc_cxml), element(tc_2_cxml))
expected_tc_xml = xml(expected_tc_cxml)
expected_tc_2_xml = xml(expected_tc_2_cxml)
return (tc, tc_2, expected_tc_xml, expected_tc_2_xml)
(params=[(0, 0, 0, 0, 1, (0, 0, 1, 2)), (0, 0, 1, 2, 1, (0, 1, 3, 1)), (0, 2, 2, 1, 1, (1, 1, 2, 2)), (0, 1, 2, 1, 0, (1, 0, 1, 3)), (1, 0, 0, 1, 1, (0, 0, 2, 2)), (1, 0, 1, 0, 0, (0, 0, 1, 3)), (2, 0, 1, 2, 1, (0, 1, 3, 1)), (2, 0, 1, 1, 0, (0, 0, 2, 2)), (2, 1, 2, 0, 1, (0, 1, 2, 2)), (4, 0, 1, 0, 0, (0, 0, 1, 3))])
def span_fixture(self, request):
(snippet_idx, row, col, row_2, col_2, expected_value) = request.param
tbl = self._snippet_tbl(snippet_idx)
tc = tbl.tr_lst[row].tc_lst[col]
tc_2 = tbl.tr_lst[row_2].tc_lst[col_2]
return (tc, tc_2, expected_value)
(params=[(1, 0, 0, 1, 0), (1, 1, 0, 0, 0), (2, 0, 2, 0, 1), (5, 0, 1, 1, 0), (5, 1, 0, 2, 1), (6, 1, 0, 0, 1), (6, 0, 1, 1, 2)])
def span_raise_fixture(self, request):
(snippet_idx, row, col, row_2, col_2) = request.param
tbl = self._snippet_tbl(snippet_idx)
tc = tbl.tr_lst[row].tc_lst[col]
tc_2 = tbl.tr_lst[row_2].tc_lst[col_2]
return (tc, tc_2)
(params=[('w:tr/(w:tc/w:p,w:tc/w:p)', 0, 2, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))'), ('w:tr/(w:tc/w:p,w:tc/w:p,w:tc/w:p)', 1, 2, 'w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))'), ('w:tr/(w:tc/w:p/w:r/w:t"a",w:tc/w:p/w:r/w:t"b")', 0, 2, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p/w:r/w:t"a",w:p/w:r/w:t"b"))'), ('w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p),w:tc/w:p)', 0, 3, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=3},w:p))'), ('w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))', 0, 3, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=3},w:p))')])
def swallow_fixture(self, request):
(tr_cxml, tc_idx, grid_width, expected_tr_cxml) = request.param
tr = element(tr_cxml)
tc = top_tc = tr[tc_idx]
expected_tr_xml = xml(expected_tr_cxml)
return (tc, grid_width, top_tc, tr, expected_tr_xml)
(params=[('w:tr/w:tc/w:p', 0, 2), ('w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))', 0, 2)])
def swallow_raise_fixture(self, request):
(tr_cxml, tc_idx, grid_width) = request.param
tr = element(tr_cxml)
tc = top_tc = tr[tc_idx]
return (tc, grid_width, top_tc, tr)
(params=[(0, 0, 0), (4, 0, 0)])
def tr_above_raise_fixture(self, request):
(snippet_idx, row_idx, col_idx) = request.param
tbl = parse_xml(snippet_seq('tbl-cells')[snippet_idx])
tc = tbl.tr_lst[row_idx].tc_lst[col_idx]
return tc
def grid_span_(self, request):
return property_mock(request, CT_Tc, 'grid_span')
def _grow_to_(self, request):
return method_mock(request, CT_Tc, '_grow_to')
def _move_content_to_(self, request):
return method_mock(request, CT_Tc, '_move_content_to')
def _span_dimensions_(self, request):
return method_mock(request, CT_Tc, '_span_dimensions')
def _span_to_width_(self, request):
return method_mock(request, CT_Tc, '_span_to_width', autospec=False)
def _snippet_tbl(self, idx):
return parse_xml(snippet_seq('tbl-cells')[idx])
def _swallow_next_tc_(self, request):
return method_mock(request, CT_Tc, '_swallow_next_tc')
def _tbl_(self, request):
return property_mock(request, CT_Tc, '_tbl')
def top_tc_(self, request):
return instance_mock(request, CT_Tc)
def tr_(self, request):
return instance_mock(request, CT_Row) |
def main():
parser = argparse.ArgumentParser(description='Baseline')
parser.add_argument('--conf_path', type=str, metavar='conf_path', help='input the path of config file')
parser.add_argument('--id', type=int, metavar='experiment_id', help='Experiment ID')
args = parser.parse_args()
option = Option(args.conf_path)
option.manualSeed = (args.id + 1)
option.experimentID = (option.experimentID + '{:0>2d}_repeat'.format(args.id))
if (option.dataset in ['cifar100']):
generator = Generator(option)
elif (option.dataset in ['imagenet']):
generator = Generator_imagenet(option)
else:
assert False, 'invalid data set'
experiment = ExperimentDesign(generator, option)
experiment.run() |
class SawyerDialTurnV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'dial_pos': obs[3:6], 'goal_pos': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_pow': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_xyz(o_d), p=5.0)
action['grab_pow'] = 0.0
return action.array
def _desired_xyz(o_d):
hand_pos = o_d['hand_pos']
dial_pos = (o_d['dial_pos'] + np.array([0.0, (- 0.028), 0.0]))
if (abs((hand_pos[2] - dial_pos[2])) > 0.02):
return np.array([hand_pos[0], hand_pos[1], dial_pos[2]])
elif (abs((hand_pos[1] - dial_pos[1])) > 0.02):
return np.array([(dial_pos[0] + 0.2), dial_pos[1], dial_pos[2]])
return np.array([(dial_pos[0] - 0.1), dial_pos[1], dial_pos[2]]) |
def test_itransform_pipeline_radians():
trans = Transformer.from_pipeline('+proj=pipeline +step +inv +proj=cart +ellps=WGS84 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
assert_almost_equal(list(trans.itransform([((- 2704026.01), (- 4253051.81), 3895878.82)], radians=True)), [((- 2.), 0., (- 20.))])
assert_almost_equal(list(trans.itransform([((- 2.), 0., (- 20.))], radians=True, direction=TransformDirection.INVERSE)), [((- 2704026.01), (- 4253051.81), 3895878.82)]) |
.skipif((not fs_supports_symlink()), reason='symlink is not supported')
def test_py_info_cached_symlink_error(mocker, tmp_path, session_app_data):
spy = mocker.spy(cached_py_info, '_run_subprocess')
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(tmp_path), session_app_data)
symlinked = (tmp_path / 'a')
symlinked.symlink_to(tmp_path)
with pytest.raises(RuntimeError):
PythonInfo.from_exe(str(symlinked), session_app_data)
assert (spy.call_count == 2) |
def list2mat(input, undirected, sep, format):
nodes = set()
with open(input, 'r') as inf:
for line in inf:
if (line.startswith('#') or line.startswith('%')):
continue
line = line.strip()
splt = line.split(sep)
if (not splt):
continue
if (format == 'edgelist'):
assert (len(splt) == 2), 'In edgelist there should be 2 values per line '
if (format == 'weighted_edgelist'):
assert (len(splt) == 3), 'In weighted edgelist there should be 3 values per line'
splt = splt[:(- 1)]
for node in splt:
nodes.add(node)
number_of_nodes = len(nodes)
isnumbers = is_numbers_only(nodes)
logging.info('Node IDs are numbers: %s', isnumbers)
if isnumbers:
node2id = dict(zip(sorted(map(int, nodes)), range(number_of_nodes)))
else:
node2id = dict(zip(sorted(nodes), range(number_of_nodes)))
graph = defaultdict(set)
with open(input, 'r') as inf:
for line in inf:
if (line.startswith('#') or line.startswith('%')):
continue
line = line.strip()
splt = line.split(sep)
if (not splt):
continue
weight = 1
src = (node2id[int(splt[0])] if isnumbers else node2id[splt[0]])
if (format == 'weighted_edgelist'):
weight = float(splt[(- 1)])
splt = splt[:(- 1)]
for node in splt[1:]:
if isnumbers:
tgt = node2id[int(node)]
else:
tgt = node2id[node]
graph[src].add((tgt, weight))
if undirected:
graph[tgt].add((src, weight))
indptr = np.zeros((number_of_nodes + 1), dtype=np.int32)
indptr[0] = 0
for i in range(number_of_nodes):
indptr[(i + 1)] = (indptr[i] + len(graph[i]))
number_of_edges = indptr[(- 1)]
indices = np.zeros(number_of_edges, dtype=np.int32)
weights = np.zeros(number_of_edges, dtype=np.float32)
cur = 0
for node in range(number_of_nodes):
for adjv in sorted(graph[node]):
(indices[cur], weights[cur]) = adjv
cur += 1
return (indptr[:(- 1)], indices, weights) |
def get_r50_l16_config():
config = get_l16_config()
config.patches.grid = (16, 16)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
config.classifier = 'seg'
config.resnet_pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz'
config.decoder_channels = (256, 128, 64, 16)
config.skip_channels = [512, 256, 64, 16]
config.n_classes = 2
config.activation = 'softmax'
return config |
def _box_aug_per_img(img, target, aug_type=None, scale_ratios=None, scale_splits=None, img_prob=0.1, box_prob=0.3, level=1):
if (random.random() > img_prob):
return (img, target)
img_mean = torch.Tensor(pixel_mean).reshape(3, 1, 1).to(img.device)
img = (img + img_mean)
img /= 255.0
bboxes = target['gt_boxes']
tag = ('prob' if (aug_type in geometric_aug_func) else 'area')
scale_ratios_splits = [scale_ratios[tag], scale_splits]
if (scale_ratios is None):
box_sample_prob = ([box_prob] * len(bboxes.tensor))
else:
box_sample_prob = [_box_sample_prob(bbox, scale_ratios_splits, box_prob=box_prob) for bbox in bboxes.tensor]
if (aug_type in color_aug_func):
img_aug = color_aug_func[aug_type](img, level, bboxes, [scale_ratios['area'], scale_splits], box_sample_prob)
elif (aug_type in geometric_aug_func):
(img_aug, target) = geometric_aug_func[aug_type](img, level, target, box_sample_prob)
else:
raise ValueError(('Unknown box-level augmentation function %s.' % aug_type))
tensor = ((img_aug * 255.0) - img_mean)
return (tensor, target) |
def get_model_params(model, cfg):
optim_cfg = cfg.SOLVER
base_lr = optim_cfg.BASE_LR
params = []
for (key, value) in model.named_parameters():
if (not value.requires_grad):
continue
key_lr = [base_lr]
if ('bias' in key):
key_lr.append((cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR))
params += [{'params': [value], 'lr': max(key_lr)}]
return params |
def format_rouge_scores(scores):
return '\n\n****** ROUGE SCORES ******\n\n** ROUGE 1\nF1 >> {:.3f}\nPrecision >> {:.3f}\nRecall >> {:.3f}\n\n** ROUGE 2\nF1 >> {:.3f}\nPrecision >> {:.3f}\nRecall >> {:.3f}\n\n** ROUGE L\nF1 >> {:.3f}\nPrecision >> {:.3f}\nRecall >> {:.3f}'.format(scores['rouge-1']['f'], scores['rouge-1']['p'], scores['rouge-1']['r'], scores['rouge-2']['f'], scores['rouge-2']['p'], scores['rouge-2']['r'], scores['rouge-l']['f'], scores['rouge-l']['p'], scores['rouge-l']['r']) |
class UserDal(object):
ALLOWED_ROLES = [u'owner', u'dev']
def __init__(self):
pass
def get_roles():
return {item.id: item.name_ch for item in Role.query.all()}
def get_role_name(role_id):
role = Role.query.get(role_id)
if role:
return role.name
else:
return None
def get_user_info(**kwargs):
return User.query.filter_by(**kwargs).first()
_on_success
def add_user(username, email, password, role_id):
if User.query.filter_by(username=username).first():
raise BadParam(('user with username %s already exist.' % username))
if User.query.filter_by(email=email).first():
raise BadParam(('user with email %s already exist.' % email))
user = User(username=username, email=email, password=password, role_id=role_id)
db.session.add(user)
def list_user(role_id='', page=1, page_size=10):
query = User.query
if role_id:
query = query.filter_by(role_id=role_id)
return [obj.json_serialize() for obj in query.offset(((page - 1) * page_size)).limit(page_size).all()]
_on_success
def delete_user(username):
User.query.filter_by(username=username).delete() |
class NegativeBaseEntryTestCase(unittest.TestCase):
def setUpClass(cls):
cls.entry = BaseEntry(**{'name': 'vw bully', 'value': (- 6000), 'date': '2000-01-01'})
def test_name(self):
self.assertEqual(self.entry.name, 'vw bully')
def test_value(self):
self.assertEqual(self.entry.value, 6000) |
()
def _check_alazy_constant_ttl():
global constant_call_count
constant_call_count = 0
_constant(ttl=100000)
()
def constant():
global constant_call_count
constant_call_count += 1
return constant_call_count
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
time.sleep(0.1)
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
constant.dirty()
assert_eq(3, (yield constant.asynq()))
assert_eq(3, (yield constant.asynq()))
assert_eq(3, (yield constant.asynq())) |
(scope='module')
def chat_join_request(bot, time):
cjr = ChatJoinRequest(chat=TestChatJoinRequestBase.chat, from_user=TestChatJoinRequestBase.from_user, date=time, bio=TestChatJoinRequestBase.bio, invite_link=TestChatJoinRequestBase.invite_link, user_chat_id=TestChatJoinRequestBase.from_user.id)
cjr.set_bot(bot)
return cjr |
.parametrize('progress, load_status, expected_visible', [(15, usertypes.LoadStatus.loading, True), (100, usertypes.LoadStatus.success, False), (100, usertypes.LoadStatus.error, False), (100, usertypes.LoadStatus.warn, False), (100, usertypes.LoadStatus.none, False)])
def test_tab_changed(fake_web_tab, progress_widget, progress, load_status, expected_visible):
tab = fake_web_tab(progress=progress, load_status=load_status)
progress_widget.on_tab_changed(tab)
actual = (progress_widget.value(), progress_widget.isVisible())
expected = (tab.progress(), expected_visible)
assert (actual == expected) |
class SendContact():
async def send_contact(self: 'pyrogram.Client', chat_id: Union[(int, str)], phone_number: str, first_name: str, last_name: str=None, vcard: str=None, disable_notification: bool=None, reply_to_message_id: int=None, schedule_date: datetime=None, protect_content: bool=None, reply_markup: Union[('types.InlineKeyboardMarkup', 'types.ReplyKeyboardMarkup', 'types.ReplyKeyboardRemove', 'types.ForceReply')]=None) -> 'types.Message':
r = (await self.invoke(raw.functions.messages.SendMedia(peer=(await self.resolve_peer(chat_id)), media=raw.types.InputMediaContact(phone_number=phone_number, first_name=first_name, last_name=(last_name or ''), vcard=(vcard or '')), message='', silent=(disable_notification or None), reply_to_msg_id=reply_to_message_id, random_id=self.rnd_id(), schedule_date=utils.datetime_to_timestamp(schedule_date), noforwards=protect_content, reply_markup=((await reply_markup.write(self)) if reply_markup else None))))
for i in r.updates:
if isinstance(i, (raw.types.UpdateNewMessage, raw.types.UpdateNewChannelMessage, raw.types.UpdateNewScheduledMessage)):
return (await types.Message._parse(self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats}, is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage))) |
def test_num_threads():
before = kvikio.defaults.get_num_threads()
with kvikio.defaults.set_num_threads(3):
assert (kvikio.defaults.get_num_threads() == 3)
kvikio.defaults.num_threads_reset(4)
assert (kvikio.defaults.get_num_threads() == 4)
assert (before == kvikio.defaults.get_num_threads()) |
def test_attn_label_convertor():
tmp_dir = tempfile.TemporaryDirectory()
dict_file = osp.join(tmp_dir.name, 'fake_dict.txt')
_create_dummy_dict_file(dict_file)
with pytest.raises(NotImplementedError):
AttnConvertor(5)
with pytest.raises(AssertionError):
AttnConvertor('DICT90', dict_file, '1')
with pytest.raises(AssertionError):
AttnConvertor('DICT90', dict_file, True, '1')
label_convertor = AttnConvertor(dict_file=dict_file, max_seq_len=10)
assert (label_convertor.num_classes() == 10)
assert (len(label_convertor.idx2char) == 10)
assert (label_convertor.idx2char[0] == 'h')
assert (label_convertor.idx2char[1] == 'e')
assert (label_convertor.idx2char[(- 3)] == '<UKN>')
assert (label_convertor.char2idx['h'] == 0)
assert (label_convertor.unknown_idx == 7)
strings = ['hell']
targets_dict = label_convertor.str2tensor(strings)
assert torch.allclose(targets_dict['targets'][0], torch.LongTensor([0, 1, 2, 2]))
assert torch.allclose(targets_dict['padded_targets'][0], torch.LongTensor([8, 0, 1, 2, 2, 8, 9, 9, 9, 9]))
dummy_output = torch.Tensor([[[100, 2, 3, 4, 5, 6, 7, 8, 9], [1, 100, 3, 4, 5, 6, 7, 8, 9], [1, 2, 100, 4, 5, 6, 7, 8, 9], [1, 2, 100, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 100], [1, 2, 3, 4, 5, 6, 7, 100, 9], [1, 2, 3, 4, 5, 6, 7, 100, 9], [1, 2, 3, 4, 5, 6, 7, 100, 9], [1, 2, 3, 4, 5, 6, 7, 100, 9], [1, 2, 3, 4, 5, 6, 7, 100, 9]]])
(indexes, scores) = label_convertor.tensor2idx(dummy_output)
assert np.allclose(indexes, [[0, 1, 2, 2]])
with pytest.raises(AssertionError):
label_convertor.str2idx('hell')
tmp_indexes = label_convertor.str2idx(strings)
assert np.allclose(tmp_indexes, [[0, 1, 2, 2]])
input_indexes = [[0, 1, 2, 2]]
with pytest.raises(AssertionError):
label_convertor.idx2str('hell')
output_strings = label_convertor.idx2str(input_indexes)
assert (output_strings[0] == 'hell')
tmp_dir.cleanup() |
class LRUCacheDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
_cache(maxsize=8)
def __getitem__(self, index):
return self.dataset[index]
_cache(maxsize=8)
def collater(self, samples):
return self.dataset.collater(samples) |
def test_accepts_none(msg):
a = m.NoneTester()
assert (m.no_none1(a) == 42)
assert (m.no_none2(a) == 42)
assert (m.no_none3(a) == 42)
assert (m.no_none4(a) == 42)
assert (m.no_none5(a) == 42)
assert (m.ok_none1(a) == 42)
assert (m.ok_none2(a) == 42)
assert (m.ok_none3(a) == 42)
assert (m.ok_none4(a) == 42)
assert (m.ok_none5(a) == 42)
with pytest.raises(TypeError) as excinfo:
m.no_none1(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.no_none2(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.no_none3(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.no_none4(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.no_none5(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
assert (m.ok_none1(None) == (- 1))
assert (msg(excinfo.value) == '\n ok_none1(): incompatible function arguments. The following argument types are supported:\n 1. (arg0: m.methods_and_attributes.NoneTester) -> int\n\n Invoked with: None\n ')
assert (m.ok_none2(None) == (- 1))
assert (m.ok_none3(None) == (- 1))
assert (m.ok_none4(None) == (- 1))
assert (m.ok_none5(None) == (- 1)) |
class RMSpropTFOptimizer(Optimizer):
def __init__(self, params, lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= momentum)):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (not (0.0 <= alpha)):
raise ValueError('Invalid alpha value: {}'.format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if (group['momentum'] > 0):
state['momentum_buffer'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(alpha).addcmul_((1 - alpha), grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_((1 - alpha), grad)
avg = square_avg.addcmul((- 1), grad_avg, grad_avg).add_(group['eps']).sqrt_()
else:
avg = square_avg.add_(group['eps']).sqrt_()
if (group['momentum'] > 0):
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_((- group['lr']), buf)
else:
p.data.addcdiv_((- group['lr']), grad, avg)
return loss |
class VisaIOWarning(Warning):
def __init__(self, error_code: int) -> None:
(abbreviation, description) = completion_and_error_messages.get(error_code, ('?', 'Unknown code.'))
super(VisaIOWarning, self).__init__(('%s (%d): %s' % (abbreviation, error_code, description)))
self.error_code = error_code
self.abbreviation = abbreviation
self.description = description
def __reduce__(self) -> Tuple[(type, Tuple[int])]:
return (VisaIOWarning, (self.error_code,)) |
.parametrize('schema', [{'required': ['\x00']}, {'properties': {'\x00': {'type': 'integer'}}}, {'dependencies': {'\x00': ['a']}}, {'dependencies': {'\x00': {'type': 'integer'}}}, {'required': ['y']}, {'properties': {'y': {'type': 'integer'}}}, {'dependencies': {'y': ['a']}}, {'dependencies': {'y': {'type': 'integer'}}}])
(deadline=None)
(data=st.data())
def test_alphabet_name_validation(data, schema):
with pytest.raises(InvalidArgument):
data.draw(from_schema(schema, allow_x00=False, codec='ascii')) |
def make_dict_unstructure_fn(cl: type[T], converter: BaseConverter, _cattrs_omit_if_default: bool=False, _cattrs_use_linecache: bool=True, _cattrs_use_alias: bool=False, _cattrs_include_init_false: bool=False, **kwargs: AttributeOverride) -> Callable[([T], dict[(str, Any)])]:
origin = get_origin(cl)
attrs = adapted_fields((origin or cl))
if any((isinstance(a.type, str) for a in attrs)):
resolve_types(cl)
mapping = {}
if is_generic(cl):
mapping = generate_mapping(cl, mapping)
for base in getattr(origin, '__orig_bases__', ()):
if (is_generic(base) and (not str(base).startswith('typing.Generic'))):
mapping = generate_mapping(base, mapping)
break
if (origin is not None):
cl = origin
cl_name = cl.__name__
fn_name = ('unstructure_' + cl_name)
globs = {}
lines = []
invocation_lines = []
internal_arg_parts = {}
try:
working_set = already_generating.working_set
except AttributeError:
working_set = set()
already_generating.working_set = working_set
if (cl in working_set):
raise RecursionError()
working_set.add(cl)
try:
for a in attrs:
attr_name = a.name
override = kwargs.pop(attr_name, neutral)
if override.omit:
continue
if ((override.omit is None) and (not a.init) and (not _cattrs_include_init_false)):
continue
if (override.rename is None):
kn = (attr_name if (not _cattrs_use_alias) else a.alias)
else:
kn = override.rename
d = a.default
handler = None
if (override.unstruct_hook is not None):
handler = override.unstruct_hook
elif (a.type is not None):
t = a.type
if isinstance(t, TypeVar):
if (t.__name__ in mapping):
t = mapping[t.__name__]
else:
handler = converter.unstructure
elif (is_generic(t) and (not is_bare(t)) and (not is_annotated(t))):
t = deep_copy_with(t, mapping)
if (handler is None):
if (is_bare_final(t) and (a.default is not NOTHING) and (not isinstance(a.default, Factory))):
t = a.default.__class__
try:
handler = converter._unstructure_func.dispatch(t)
except RecursionError:
handler = converter.unstructure
else:
handler = converter.unstructure
is_identity = (handler == identity)
if (not is_identity):
unstruct_handler_name = f'__c_unstr_{attr_name}'
globs[unstruct_handler_name] = handler
internal_arg_parts[unstruct_handler_name] = handler
invoke = f'{unstruct_handler_name}(instance.{attr_name})'
else:
invoke = f'instance.{attr_name}'
if ((d is not NOTHING) and ((_cattrs_omit_if_default and (override.omit_if_default is not False)) or override.omit_if_default)):
def_name = f'__c_def_{attr_name}'
if isinstance(d, Factory):
globs[def_name] = d.factory
internal_arg_parts[def_name] = d.factory
if d.takes_self:
lines.append(f' if instance.{attr_name} != {def_name}(instance):')
else:
lines.append(f' if instance.{attr_name} != {def_name}():')
lines.append(f" res['{kn}'] = {invoke}")
else:
globs[def_name] = d
internal_arg_parts[def_name] = d
lines.append(f' if instance.{attr_name} != {def_name}:')
lines.append(f" res['{kn}'] = {invoke}")
else:
invocation_lines.append(f"'{kn}': {invoke},")
internal_arg_line = ', '.join([f'{i}={i}' for i in internal_arg_parts])
if internal_arg_line:
internal_arg_line = f', {internal_arg_line}'
for (k, v) in internal_arg_parts.items():
globs[k] = v
total_lines = ((((([f'def {fn_name}(instance{internal_arg_line}):'] + [' res = {']) + [f' {line}' for line in invocation_lines]) + [' }']) + lines) + [' return res'])
script = '\n'.join(total_lines)
fname = generate_unique_filename(cl, 'unstructure', lines=(total_lines if _cattrs_use_linecache else []))
eval(compile(script, fname, 'exec'), globs)
finally:
working_set.remove(cl)
if (not working_set):
del already_generating.working_set
res = globs[fn_name]
res.overrides = kwargs
return res |
class IRBuilderVisitor(IRVisitor):
builder: IRBuilder
def visit_mypy_file(self, mypyfile: MypyFile) -> None:
assert False, 'use transform_mypy_file instead'
def visit_class_def(self, cdef: ClassDef) -> None:
transform_class_def(self.builder, cdef)
def visit_import(self, node: Import) -> None:
transform_import(self.builder, node)
def visit_import_from(self, node: ImportFrom) -> None:
transform_import_from(self.builder, node)
def visit_import_all(self, node: ImportAll) -> None:
transform_import_all(self.builder, node)
def visit_func_def(self, fdef: FuncDef) -> None:
transform_func_def(self.builder, fdef)
def visit_overloaded_func_def(self, o: OverloadedFuncDef) -> None:
transform_overloaded_func_def(self.builder, o)
def visit_decorator(self, dec: Decorator) -> None:
transform_decorator(self.builder, dec)
def visit_block(self, block: Block) -> None:
transform_block(self.builder, block)
def visit_expression_stmt(self, stmt: ExpressionStmt) -> None:
transform_expression_stmt(self.builder, stmt)
def visit_return_stmt(self, stmt: ReturnStmt) -> None:
transform_return_stmt(self.builder, stmt)
self.builder.mark_block_unreachable()
def visit_assignment_stmt(self, stmt: AssignmentStmt) -> None:
transform_assignment_stmt(self.builder, stmt)
def visit_operator_assignment_stmt(self, stmt: OperatorAssignmentStmt) -> None:
transform_operator_assignment_stmt(self.builder, stmt)
def visit_if_stmt(self, stmt: IfStmt) -> None:
transform_if_stmt(self.builder, stmt)
def visit_while_stmt(self, stmt: WhileStmt) -> None:
transform_while_stmt(self.builder, stmt)
def visit_for_stmt(self, stmt: ForStmt) -> None:
transform_for_stmt(self.builder, stmt)
def visit_break_stmt(self, stmt: BreakStmt) -> None:
transform_break_stmt(self.builder, stmt)
self.builder.mark_block_unreachable()
def visit_continue_stmt(self, stmt: ContinueStmt) -> None:
transform_continue_stmt(self.builder, stmt)
self.builder.mark_block_unreachable()
def visit_raise_stmt(self, stmt: RaiseStmt) -> None:
transform_raise_stmt(self.builder, stmt)
self.builder.mark_block_unreachable()
def visit_try_stmt(self, stmt: TryStmt) -> None:
transform_try_stmt(self.builder, stmt)
def visit_with_stmt(self, stmt: WithStmt) -> None:
transform_with_stmt(self.builder, stmt)
def visit_pass_stmt(self, stmt: PassStmt) -> None:
pass
def visit_assert_stmt(self, stmt: AssertStmt) -> None:
transform_assert_stmt(self.builder, stmt)
def visit_del_stmt(self, stmt: DelStmt) -> None:
transform_del_stmt(self.builder, stmt)
def visit_global_decl(self, stmt: GlobalDecl) -> None:
pass
def visit_nonlocal_decl(self, stmt: NonlocalDecl) -> None:
pass
def visit_match_stmt(self, stmt: MatchStmt) -> None:
transform_match_stmt(self.builder, stmt)
def visit_name_expr(self, expr: NameExpr) -> Value:
return transform_name_expr(self.builder, expr)
def visit_member_expr(self, expr: MemberExpr) -> Value:
return transform_member_expr(self.builder, expr)
def visit_super_expr(self, expr: SuperExpr) -> Value:
return transform_super_expr(self.builder, expr)
def visit_call_expr(self, expr: CallExpr) -> Value:
return transform_call_expr(self.builder, expr)
def visit_unary_expr(self, expr: UnaryExpr) -> Value:
return transform_unary_expr(self.builder, expr)
def visit_op_expr(self, expr: OpExpr) -> Value:
return transform_op_expr(self.builder, expr)
def visit_index_expr(self, expr: IndexExpr) -> Value:
return transform_index_expr(self.builder, expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> Value:
return transform_conditional_expr(self.builder, expr)
def visit_comparison_expr(self, expr: ComparisonExpr) -> Value:
return transform_comparison_expr(self.builder, expr)
def visit_int_expr(self, expr: IntExpr) -> Value:
return transform_int_expr(self.builder, expr)
def visit_float_expr(self, expr: FloatExpr) -> Value:
return transform_float_expr(self.builder, expr)
def visit_complex_expr(self, expr: ComplexExpr) -> Value:
return transform_complex_expr(self.builder, expr)
def visit_str_expr(self, expr: StrExpr) -> Value:
return transform_str_expr(self.builder, expr)
def visit_bytes_expr(self, expr: BytesExpr) -> Value:
return transform_bytes_expr(self.builder, expr)
def visit_ellipsis(self, expr: EllipsisExpr) -> Value:
return transform_ellipsis(self.builder, expr)
def visit_list_expr(self, expr: ListExpr) -> Value:
return transform_list_expr(self.builder, expr)
def visit_tuple_expr(self, expr: TupleExpr) -> Value:
return transform_tuple_expr(self.builder, expr)
def visit_dict_expr(self, expr: DictExpr) -> Value:
return transform_dict_expr(self.builder, expr)
def visit_set_expr(self, expr: SetExpr) -> Value:
return transform_set_expr(self.builder, expr)
def visit_list_comprehension(self, expr: ListComprehension) -> Value:
return transform_list_comprehension(self.builder, expr)
def visit_set_comprehension(self, expr: SetComprehension) -> Value:
return transform_set_comprehension(self.builder, expr)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> Value:
return transform_dictionary_comprehension(self.builder, expr)
def visit_slice_expr(self, expr: SliceExpr) -> Value:
return transform_slice_expr(self.builder, expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> Value:
return transform_generator_expr(self.builder, expr)
def visit_lambda_expr(self, expr: LambdaExpr) -> Value:
return transform_lambda_expr(self.builder, expr)
def visit_yield_expr(self, expr: YieldExpr) -> Value:
return transform_yield_expr(self.builder, expr)
def visit_yield_from_expr(self, o: YieldFromExpr) -> Value:
return transform_yield_from_expr(self.builder, o)
def visit_await_expr(self, o: AwaitExpr) -> Value:
return transform_await_expr(self.builder, o)
def visit_assignment_expr(self, o: AssignmentExpr) -> Value:
return transform_assignment_expr(self.builder, o)
def visit_enum_call_expr(self, o: EnumCallExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit__promote_expr(self, o: PromoteExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_namedtuple_expr(self, o: NamedTupleExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_newtype_expr(self, o: NewTypeExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_temp_node(self, o: TempNode) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_type_alias_expr(self, o: TypeAliasExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_type_application(self, o: TypeApplication) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_type_var_expr(self, o: TypeVarExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_paramspec_expr(self, o: ParamSpecExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_type_var_tuple_expr(self, o: TypeVarTupleExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_typeddict_expr(self, o: TypedDictExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_reveal_expr(self, o: RevealExpr) -> Value:
assert False, "can't compile analysis-only expressions"
def visit_var(self, o: Var) -> None:
assert False, "can't compile Var; should have been handled already?"
def visit_cast_expr(self, o: CastExpr) -> Value:
assert False, 'CastExpr should have been handled in CallExpr'
def visit_assert_type_expr(self, o: AssertTypeExpr) -> Value:
assert False, 'AssertTypeExpr should have been handled in CallExpr'
def visit_star_expr(self, o: StarExpr) -> Value:
assert False, 'should have been handled in Tuple/List/Set/DictExpr or CallExpr'
def bail(self, msg: str, line: int) -> NoReturn:
self.builder.error(msg, line)
raise UnsupportedException() |
def sync_states(states: Dict[(str, Dict[(str, Any)])], devices: Dict[(str, torch.device)], metrics_traversal_order: List[Tuple[(str, str)]], process_group: Optional[dist.ProcessGroup]=None, rank: Optional[int]=None) -> Optional[List[Dict[(str, Dict[(str, Any)])]]]:
gathered_states = [_get_empty_metric_state_collection(metrics_traversal_order=metrics_traversal_order) for _ in range(dist.get_world_size())]
for (metric_name, state_name) in metrics_traversal_order:
my_state_data = states[metric_name][state_name]
if isinstance(my_state_data, torch.Tensor):
_sync_tensor_states(metric_name, state_name, my_state_data, gathered_states, process_group=process_group, rank=rank)
elif isinstance(my_state_data, list):
_sync_list_tensor_states(metric_name, state_name, my_state_data, devices[metric_name], gathered_states, process_group=process_group, rank=rank)
elif isinstance(my_state_data, dict):
_sync_dict_tensor_states(metric_name, state_name, my_state_data, devices[metric_name], gathered_states, process_group=process_group, rank=rank)
elif isinstance(my_state_data, (int, float)):
_sync_obj_states(metric_name, state_name, my_state_data, gathered_states, process_group=process_group, rank=rank)
else:
raise RuntimeError(f'Do not know how to sync state of type: {type(my_state_data)} for state {metric_name} {state_name}')
if ((rank is None) or (dist.get_rank(group=process_group) == rank)):
return gathered_states
return None |
def main(argv):
args = setup_args().parse_args(argv)
if ((not args.show) and (not args.output)):
raise ValueError('select output file destination or --show')
scatters = []
for f in args.results_file:
rv = parse_json_file(f, args.metric)
scatters.append(rv)
ylabel = f'{args.metric} [dB]'
func_map = {'matplotlib': matplotlib_plt, 'plotly': plotly_plt}
func_map[args.backend](scatters, args.title, ylabel, args.output, limits=args.axes, figsize=args.figsize, show=args.show) |
class PermanentCheckBox(QtWidgets.QCheckBox):
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Permanent Markers / Annotations</h3>If checked, the markers and annotations created with the Pick/Click callbacks will be permanent.(e.g. they are not removed on the next click).') |
def _create_pr_data_frame(evaluation_runs: List[EvaluationRun], methods: List[str]) -> DataFrame:
data_frames = []
for (evaluation_run, name) in zip(evaluation_runs, methods):
spotting_evaluation = evaluation_run.evaluation.spotting_evaluation
pr_data_frame = spotting_evaluation.pr_data_frame
pr_data_frame = _add_class_name_mapping(pr_data_frame, spotting_evaluation.label_map)
pr_data_frame[METHOD] = name
data_frames.append(pr_data_frame)
concatenated = pandas.concat(data_frames)
drop_selection = ((concatenated[SpottingEvaluation.PRECISION] == 0) & (concatenated[SpottingEvaluation.RECALL] == 0))
concatenated.drop(concatenated[drop_selection].index, inplace=True)
concatenated.sort_values(SpottingEvaluation.THRESHOLD, inplace=True)
return concatenated |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.