code stringlengths 281 23.7M |
|---|
class ELFTest(unittest.TestCase):
def test_tenda_ac15_arm(self):
def nvram_listener():
server_address = '../examples/rootfs/arm_tendaac15/var/cfm_socket'
data = ''
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(server_address)
sock.listen(1)
while True:
(connection, client_address) = sock.accept()
try:
while True:
data += str(connection.recv(1024))
if ('lan.webiplansslen' in data):
connection.send('192.168.170.169'.encode())
else:
break
data = ''
finally:
connection.close()
def patcher(ql):
br0_addr = ql.mem.search(('br0'.encode() + b'\x00'))
for addr in br0_addr:
ql.mem.write(addr, b'lo\x00')
def my_tenda():
ql = Qiling(['../examples/rootfs/arm_tendaac15/bin/ '../examples/rootfs/arm_tendaac15', verbose=QL_VERBOSE.DEBUG)
ql.add_fs_mapper('/dev/urandom', '/dev/urandom')
ql.hook_address(patcher, ql.loader.elf_entry)
ql.run()
del ql
if (__name__ == '__main__'):
threadLock = threading.Lock()
threads = []
nvram_listener_therad = threading.Thread(target=nvram_listener, daemon=True)
mytenda_therad = threading.Thread(target=my_tenda, daemon=True)
nvram_listener_therad.start()
mytenda_therad.start()
threads.append(nvram_listener_therad)
threads.append(mytenda_therad)
time.sleep(5)
conn = 8080, timeout=10)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded'}
web_data = {'page': 'CCCCAAAA', 'entrys': 'sync'}
json_data = json.dumps(web_data)
conn.request('POST', '/goform/addressNat', json_data, headers)
response = conn.getresponse()
self.assertIn('Please update your documents to reflect the new location.', response.read().decode()) |
def add_extra_methods_hook(ctx: ClassDefContext) -> None:
add_method(ctx, 'foo_classmethod', [], NoneType(), is_classmethod=True)
add_method(ctx, 'foo_staticmethod', [Argument(Var(''), ctx.api.named_type('builtins.int'), None, ARG_POS)], ctx.api.named_type('builtins.str'), is_staticmethod=True) |
def add_validate_argument(parser: ArgumentParser):
group = parser.add_mutually_exclusive_group()
group.add_argument('--validate', action='store_true', dest='validate', default=True, help="After generating a layout, validate if it's possible. Default behaviour.")
group.add_argument('--no-validate', action='store_false', dest='validate', default=True, help="After generating a layout, don't validate if it's possible.") |
class APICallResponseDataValidator(BaseAPICallResponseValidator):
def iter_errors(self, request: Request, response: Response) -> Iterator[Exception]:
try:
(_, operation, _, _, _) = self._find_path(request)
except PathError as exc:
(yield exc)
return
(yield from self._iter_data_errors(response.status_code, response.data, response.content_type, operation)) |
class Icassp2018Test(unittest.TestCase):
def test_1000by6_matrix(self):
matrix = np.array((((([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400) + ([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300)) + ([[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200)) + ([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)))
noisy = ((np.random.rand(1000, 6) * 2) - 1)
matrix = (matrix + (noisy * 0.1))
labels = configs.icassp2018_clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array((((([0] * 400) + ([1] * 300)) + ([2] * 200)) + ([3] * 100)))
np.testing.assert_equal(expected, labels) |
class VOC12SegmentationDataset(Dataset):
def __init__(self, img_name_list_path, label_dir, crop_size, voc12_root, rescale=None, img_normal=TorchvisionNormalize(), hor_flip=False, crop_method='random'):
self.img_name_list = load_img_name_list(img_name_list_path)
self.voc12_root = voc12_root
self.label_dir = label_dir
self.rescale = rescale
self.crop_size = crop_size
self.img_normal = img_normal
self.hor_flip = hor_flip
self.crop_method = crop_method
def __len__(self):
return len(self.img_name_list)
def __getitem__(self, idx):
name = self.img_name_list[idx]
name_str = decode_int_filename(name)
img = imageio.imread(get_img_path(name_str, self.voc12_root))
label = imageio.imread(os.path.join(self.label_dir, (name_str + '.png')))
img = np.asarray(img)
if self.rescale:
(img, label) = imutils.random_scale((img, label), scale_range=self.rescale, order=(3, 0))
if self.img_normal:
img = self.img_normal(img)
if self.hor_flip:
(img, label) = imutils.random_lr_flip((img, label))
if (self.crop_method == 'random'):
(img, label) = imutils.random_crop((img, label), self.crop_size, (0, 255))
else:
img = imutils.top_left_crop(img, self.crop_size, 0)
label = imutils.top_left_crop(label, self.crop_size, 255)
img = imutils.HWC_to_CHW(img)
return {'name': name, 'img': img, 'label': label} |
.parametrize('files, glob_pattern, upload_statuses, expected', [(['foo.zip', 'bar.whl'], '*.zip', [True], 1), (['foo.whl', 'foo.egg', 'foo.tar.gz'], 'foo.*', [True, True, True], 3), ([], '*', [], 0), (['specialconfig.yaml', 'something.whl', 'desc.md'], '*.yaml', [True], 1), (['specialconfig.yaml', 'something.whl', 'desc.md'], '*.md', [True], 1)])
def test_upload_dists_when_release_id_found(default_gitea_client, files, glob_pattern, upload_statuses, expected):
release_id = 420
tag = "doesn't matter"
with mock.patch.object(default_gitea_client, 'get_release_id_by_tag') as mock_get_release_id_by_tag, mock.patch.object(default_gitea_client, 'upload_asset') as mock_upload_asset, mock.patch.object(glob, 'glob') as mock_glob_glob, mock.patch.object(os.path, 'isfile') as mock_os_path_isfile:
mock_os_path_isfile.return_value = True
matching_files = glob.fnmatch.filter(files, glob_pattern)
mock_glob_glob.return_value = matching_files
mock_get_release_id_by_tag.return_value = release_id
mock_upload_asset.side_effect = upload_statuses
assert (default_gitea_client.upload_dists(tag, glob_pattern) == expected)
mock_get_release_id_by_tag.assert_called_once_with(tag=tag)
assert ([mock.call(release_id, fn) for fn in matching_files] == mock_upload_asset.call_args_list) |
def test_history_clear(mocker, hist_file):
app = cmd2.Cmd(persistent_history_file=hist_file)
run_cmd(app, 'help')
run_cmd(app, 'alias')
(out, err) = run_cmd(app, 'history')
assert out
verify_hi_last_result(app, 2)
run_cmd(app, 'history --clear')
assert (app.last_result is True)
(out, err) = run_cmd(app, 'history')
assert (out == [])
assert (not os.path.exists(hist_file))
verify_hi_last_result(app, 0)
run_cmd(app, 'history --clear')
assert (app.last_result is True)
mock_remove = mocker.patch('os.remove')
mock_remove.side_effect = OSError
(out, err) = run_cmd(app, 'history --clear')
assert (out == [])
assert ('Error removing history file' in err[0])
assert (app.last_result is False) |
class GaussianMLPRegressor(LasagnePowered, Serializable):
def __init__(self, input_shape, output_dim, mean_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=NL.rectify, optimizer=None, use_trust_region=True, step_size=0.01, learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), std_nonlinearity=None, normalize_inputs=True, normalize_outputs=True, name=None, batchsize=None, subsample_factor=1.0):
Serializable.quick_init(self, locals())
self._batchsize = batchsize
self._subsample_factor = subsample_factor
if (optimizer is None):
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self._optimizer = optimizer
if (mean_network is None):
mean_network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=None)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(input_shape=input_shape, input_var=mean_network.input_layer.input_var, output_dim=output_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_nonlinearity, output_nonlinearity=None).output_layer
else:
l_log_std = ParamLayer(mean_network.input_layer, num_units=output_dim, param=lasagne.init.Constant(np.log(init_std)), name='output_log_std', trainable=learn_std)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix('ys')
old_means_var = TT.matrix('old_means')
old_log_stds_var = TT.matrix('old_log_stds')
x_mean_var = theano.shared(np.zeros(((1,) + input_shape), dtype=theano.config.floatX), name='x_mean', broadcastable=((True,) + ((False,) * len(input_shape))))
x_std_var = theano.shared(np.ones(((1,) + input_shape), dtype=theano.config.floatX), name='x_std', broadcastable=((True,) + ((False,) * len(input_shape))))
y_mean_var = theano.shared(np.zeros((1, output_dim), dtype=theano.config.floatX), name='y_mean', broadcastable=(True, False))
y_std_var = theano.shared(np.ones((1, output_dim), dtype=theano.config.floatX), name='y_std', broadcastable=(True, False))
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
normalized_ys_var = ((ys_var - y_mean_var) / y_std_var)
normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * y_std_var) + y_mean_var)
log_stds_var = (normalized_log_stds_var + TT.log(y_std_var))
normalized_old_means_var = ((old_means_var - y_mean_var) / y_std_var)
normalized_old_log_stds_var = (old_log_stds_var - TT.log(y_std_var))
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(dist.kl_sym(dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars))
loss = (- TT.mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars)))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(loss=loss, target=self, network_outputs=[normalized_means_var, normalized_log_stds_var])
if use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, step_size)
optimizer_args['inputs'] = [xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args['inputs'] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if (self._subsample_factor < 1):
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int((num_samples_tot * self._subsample_factor)))
(xs, ys) = (xs[idx], ys[idx])
if self._normalize_inputs:
self._x_mean_var.set_value(np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value((np.std(xs, axis=0, keepdims=True) + 1e-08).astype(theano.config.floatX))
if self._normalize_outputs:
self._y_mean_var.set_value(np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value((np.std(ys, axis=0, keepdims=True) + 1e-08).astype(theano.config.floatX))
if self._name:
prefix = (self._name + '_')
else:
prefix = ''
(loss_before, loss_after, mean_kl, batch_count) = (0.0, 0.0, 0.0, 0)
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
(xs, ys) = batch
if self._use_trust_region:
(old_means, old_log_stds) = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular((prefix + 'LossBefore'), (loss_before / batch_count))
logger.record_tabular((prefix + 'LossAfter'), (loss_after / batch_count))
logger.record_tabular((prefix + 'dLoss'), (loss_before - (loss_after / batch_count)))
if self._use_trust_region:
logger.record_tabular((prefix + 'MeanKL'), (mean_kl / batch_count))
def predict(self, xs):
return self._f_predict(xs)
def sample_predict(self, xs):
(means, log_stds) = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
(means, log_stds) = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = ((x_var - self._x_mean_var) / self._x_std_var)
(normalized_means_var, normalized_log_stds_var) = L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * self._y_std_var) + self._y_mean_var)
log_stds_var = (normalized_log_stds_var + TT.log(self._y_std_var))
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags) |
def test_collection_args_do_not_duplicate_modules(pytester: Pytester) -> None:
pytester.makepyfile(**{'d/test_it': '\n def test_1(): pass\n def test_2(): pass\n '})
result = pytester.runpytest('--collect-only', 'd/test_it.py::test_1', 'd/test_it.py::test_2')
result.stdout.fnmatch_lines([' <Dir d>', ' <Module test_it.py>', ' <Function test_1>', ' <Function test_2>'], consecutive=True)
result = pytester.runpytest('--collect-only', '--keep-duplicates', 'd', 'd')
result.stdout.fnmatch_lines([' <Dir d>', ' <Module test_it.py>', ' <Function test_1>', ' <Function test_2>', ' <Function test_1>', ' <Function test_2>'], consecutive=True) |
def test_update_pfs():
properties = factories.BalanceProofSignedStateProperties(pkey=PRIVKEY)
balance_proof = factories.create(properties)
channel_state = factories.create(factories.NettingChannelStateProperties())
channel_state.our_state.balance_proof = balance_proof
channel_state.partner_state.balance_proof = balance_proof
message = PFSCapacityUpdate.from_channel_state(channel_state=channel_state)
assert (message.signature == EMPTY_SIGNATURE)
(privkey2, address2) = factories.make_privkey_address()
signer2 = LocalSigner(privkey2)
message.sign(signer2)
assert (recover(message._data_to_sign(), message.signature) == address2)
assert (message == DictSerializer.deserialize(DictSerializer.serialize(message))) |
class NoneWordSplitter(object):
def __init__(self, model):
pass
def split(self, string):
return [string]
def process_line(self, string):
return [string]
def finished_word(self, string):
return True
def merge(self, list_of_string):
return ''.join(list_of_string)
def last_full_word_step(self, tokens, step):
return len(tokens)
def end_idx_last_full_word(self, tokens):
return len(tokens) |
def synchronized(lock: threading.RLock) -> Callable[([CallableT], CallableT)]:
def outside_wrapper(function: CallableT) -> CallableT:
(function)
def wrapper(*args: Any, **kwargs: Any) -> Any:
with lock:
return function(*args, **kwargs)
return cast(CallableT, wrapper)
return outside_wrapper |
class NitrobitNet(SimpleDownloader):
__name__ = 'NitrobitNet'
__type__ = 'downloader'
__version__ = '0.02'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', False), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Nitrobit.net downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
LOGIN_PREMIUM = True
URL_REPLACEMENTS = [((__pattern__ + '.*'), '
NAME_PATTERN = '<b> : </b><span title="(?P<N>.+?)"'
SIZE_PATTERN = '<b> : </b><span dir="ltr" style="text-align: left;">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+)</span>'
DL_LIMIT_PATTERN = 'daily downloadlimit reached| '
LINK_PREMIUM_PATTERN = 'id="download" href="(.+?)"'
def handle_premium(self, pyfile):
current_millis = int((time.time() * 1000))
self.req. ['X-Requested-With: XMLHttpRequest'])
self.data = self.load(' get={'password': self.account.info['login']['password'], 'file': self.info['pattern']['ID'], 'keep': 'false', '_': current_millis})
m = re.search('id="unlockedTick".+?alt="(\\d+)"', self.data)
if (m is not None):
validuntil = (time.time() + float(m.group(1)))
self.log_info(self._('Account valid until {}').format(time.strftime('%d/%m/%Y'), time.gmtime(validuntil)))
m = re.search('id="dailyVolume" value="(\\d+)?/(\\d+)"', self.data)
if (m is not None):
trafficleft = (int(m.group(2)) - int((m.group(1) or '0')))
self.log_info(self._('Daily traffic left {}').format(format.size(trafficleft)))
m = re.search(self.LINK_PREMIUM_PATTERN, self.data)
if (m is not None):
self.link = m.group(1) |
class TestVectorize():
def test_elemwise(self):
vec = tensor(shape=(None,))
mat = tensor(shape=(None, None))
node = exp(vec).owner
vect_node = vectorize_node(node, mat)
assert (vect_node.op == exp)
assert (vect_node.inputs[0] is mat)
def test_dimshuffle(self):
vec = tensor(shape=(None,))
mat = tensor(shape=(None, None))
node = exp(vec).owner
vect_node = vectorize_node(node, mat)
assert (vect_node.op == exp)
assert (vect_node.inputs[0] is mat)
col_mat = tensor(shape=(None, 1))
tcol_mat = tensor(shape=(None, None, 1))
node = col_mat.dimshuffle(0).owner
vect_node = vectorize_node(node, tcol_mat)
assert isinstance(vect_node.op, DimShuffle)
assert (vect_node.op.new_order == (0, 1))
assert (vect_node.inputs[0] is tcol_mat)
assert (vect_node.outputs[0].type.shape == (None, None))
def test_CAReduce(self):
mat = tensor(shape=(None, None))
tns = tensor(shape=(None, None, None))
node = pt_sum(mat).owner
vect_node = vectorize_node(node, tns)
assert isinstance(vect_node.op, Sum)
assert (vect_node.op.axis == (1, 2))
assert (vect_node.inputs[0] is tns)
bool_mat = tensor(dtype='bool', shape=(None, None))
bool_tns = tensor(dtype='bool', shape=(None, None, None))
node = pt_any(bool_mat, axis=(- 2)).owner
vect_node = vectorize_node(node, bool_tns)
assert isinstance(vect_node.op, Any)
assert (vect_node.op.axis == (1,))
assert (vect_node.inputs[0] is bool_tns) |
class Graphite(Layer):
def __init__(self, input_dim, output_dim, dropout=0.0, act=tf.nn.relu, **kwargs):
super(Graphite, self).__init__(**kwargs)
with tf.variable_scope((self.name + '_vars')):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name='weights')
self.dropout = dropout
self.act = act
def _call(self, inputs):
x = inputs[0]
recon_1 = inputs[1]
recon_2 = inputs[2]
x = tf.matmul(x, self.vars['weights'])
x = (tf.matmul(recon_1, tf.matmul(tf.transpose(recon_1), x)) + tf.matmul(recon_2, tf.matmul(tf.transpose(recon_2), x)))
outputs = self.act(x)
return outputs |
def train_mnist(config, data_dir=None, num_epochs=10, num_workers=4, use_gpu=False, callbacks=None):
model = MNISTClassifier(config, data_dir)
callbacks = (callbacks or [])
trainer = pl.Trainer(max_epochs=num_epochs, callbacks=callbacks, strategy=HorovodRayStrategy(num_workers=num_workers, use_gpu=use_gpu))
trainer.fit(model) |
class OrthoMover(Behaviour):
cam = ShowInInspector(Camera)
async def Update(self, dt):
if Input.GetKey(KeyCode.E):
self.cam.orthoSize -= (dt * 3)
if Input.GetKey(KeyCode.Q):
self.cam.orthoSize += (dt * 3)
self.cam.orthoSize = Mathf.Clamp(self.cam.orthoSize, 2, 16)
x = ((Vector3(1, 0, (- 1)) * dt) * 8)
y = ((Vector3(1, 0, 1) * dt) * 8)
self.cam.transform.position += (x * Input.GetAxis('Horizontal'))
self.cam.transform.position += (y * Input.GetAxis('Vertical')) |
def write_uem(uemf, uem, n_digits=3):
with open(uemf, 'wb') as f:
for file_id in sorted(iterkeys(uem)):
for (onset, offset) in sorted(uem[file_id]):
line = ' '.join([file_id, '1', format_float(onset, n_digits), format_float(offset, n_digits)])
f.write(line.encode('utf-8'))
f.write(b'\n') |
class Network_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, arch, reg):
super(Network_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self.arch = arch
self.mlp_p = arch['mlp']['p']
self.mlp_q = arch['mlp']['q']
self.mlp_r = arch['mlp']['r']
if (arch['triple'] == 'concat_concat'):
self._FC = nn.Linear((3 * embedding_dim), 1, bias=False)
elif ('concat' in arch['triple']):
self._FC = nn.Linear((2 * embedding_dim), 1, bias=False)
else:
self._FC = nn.Linear(embedding_dim, 1, bias=False)
def parameters(self):
return (((list(self._PsEmbedding.parameters()) + list(self._QsEmbedding.parameters())) + list(self._RsEmbedding.parameters())) + list(self._FC.parameters()))
def forward(self, ps, qs, rs):
constrain(next(self._FC.parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
ps_embedding_trans = self.mlp_p(ps_embedding.view((- 1), 1)).view(ps_embedding.size())
qs_embedding_trans = self.mlp_q(qs_embedding.view((- 1), 1)).view(qs_embedding.size())
rs_embedding_trans = self.mlp_r(rs_embedding.view((- 1), 1)).view(rs_embedding.size())
inferences = self._FC(ops_triple(self.arch['triple'], ps_embedding_trans, qs_embedding_trans, rs_embedding_trans))
regs = (self.reg * ((torch.norm(ps_embedding) + torch.norm(qs_embedding)) + torch.norm(rs_embedding)))
return (inferences, regs) |
.slow
.pydicom
def test_ct(pinn):
for p in pinn:
export_path = os.path.join(working_path, 'output', p.patient_info['MedicalRecordNumber'], 'CT')
os.makedirs(export_path)
export_plan = p.plans[0]
p.export_image(export_plan.primary_image, export_path=export_path)
for f in os.listdir(export_path):
if f.startswith('CT'):
exported_ct = pydicom.read_file(os.path.join(export_path, f))
assert (exported_ct.Modality == 'CT')
pinn_ct = find_corresponding_dicom(exported_ct)
assert (pinn_ct is not None)
assert (pinn_ct.PatientID == exported_ct.PatientID)
assert (pinn_ct.SliceLocation == exported_ct.SliceLocation)
exported_img = exported_ct.pixel_array.astype(np.int16)
pinn_img = pinn_ct.pixel_array.astype(np.int16)
assert (exported_img.shape == pinn_img.shape)
assert np.allclose(exported_img, pinn_img, atol=1e-05) |
class TestPriceHistory(unittest.TestCase):
def setUpClass(cls):
cls.session = session_gbl
def tearDownClass(cls):
if (cls.session is not None):
cls.session.close()
def test_daily_index(self):
tkrs = ['BHP.AX', 'IMP.JO', 'BP.L', 'PNL.L', 'INTC']
intervals = ['1d', '1wk', '1mo']
for tkr in tkrs:
dat = yf.Ticker(tkr, session=self.session)
for interval in intervals:
df = dat.history(period='5y', interval=interval)
f = (df.index.time == _dt.time(0))
self.assertTrue(f.all())
def test_download(self):
tkrs = ['BHP.AX', 'IMP.JO', 'BP.L', 'PNL.L', 'INTC']
intervals = ['1d', '1wk', '1mo']
for interval in intervals:
df = yf.download(tkrs, period='5y', interval=interval)
f = (df.index.time == _dt.time(0))
self.assertTrue(f.all())
df_tkrs = df.columns.levels[1]
self.assertEqual(sorted(tkrs), sorted(df_tkrs))
def test_download_with_invalid_ticker(self):
invalid_tkrs = ['AAPL', 'ATVI']
valid_tkrs = ['AAPL', 'INTC']
data_invalid_sym = yf.download(invalid_tkrs, start='2023-11-16', end='2023-11-17')
data_valid_sym = yf.download(valid_tkrs, start='2023-11-16', end='2023-11-17')
self.assertEqual(data_invalid_sym['Close']['AAPL']['2023-11-16'], data_valid_sym['Close']['AAPL']['2023-11-16'])
def test_duplicatingHourly(self):
tkrs = ['IMP.JO', 'BHG.JO', 'SSW.JO', 'BP.L', 'INTC']
for tkr in tkrs:
dat = yf.Ticker(tkr, session=self.session)
tz = dat._get_ticker_tz(proxy=None, timeout=None)
dt_utc = _tz.timezone('UTC').localize(_dt.datetime.utcnow())
dt = dt_utc.astimezone(_tz.timezone(tz))
start_d = (dt.date() - _dt.timedelta(days=7))
df = dat.history(start=start_d, interval='1h')
dt0 = df.index[(- 2)]
dt1 = df.index[(- 1)]
try:
self.assertNotEqual(dt0.hour, dt1.hour)
except AssertionError:
print('Ticker = ', tkr)
raise
def test_duplicatingDaily(self):
tkrs = ['IMP.JO', 'BHG.JO', 'SSW.JO', 'BP.L', 'INTC']
test_run = False
for tkr in tkrs:
dat = yf.Ticker(tkr, session=self.session)
tz = dat._get_ticker_tz(proxy=None, timeout=None)
dt_utc = _tz.timezone('UTC').localize(_dt.datetime.utcnow())
dt = dt_utc.astimezone(_tz.timezone(tz))
if (dt.time() < _dt.time(17, 0)):
continue
test_run = True
df = dat.history(start=(dt.date() - _dt.timedelta(days=7)), interval='1d')
dt0 = df.index[(- 2)]
dt1 = df.index[(- 1)]
try:
self.assertNotEqual(dt0, dt1)
except AssertionError:
print('Ticker = ', tkr)
raise
if (not test_run):
self.skipTest('Skipping test_duplicatingDaily() because only expected to fail just after market close')
def test_duplicatingWeekly(self):
tkrs = ['MSFT', 'IWO', 'VFINX', '^GSPC', 'BTC-USD']
test_run = False
for tkr in tkrs:
dat = yf.Ticker(tkr, session=self.session)
tz = dat._get_ticker_tz(proxy=None, timeout=None)
dt = _tz.timezone(tz).localize(_dt.datetime.now())
if (dt.date().weekday() not in [1, 2, 3, 4]):
continue
test_run = True
df = dat.history(start=(dt.date() - _dt.timedelta(days=7)), interval='1wk')
dt0 = df.index[(- 2)]
dt1 = df.index[(- 1)]
try:
self.assertNotEqual(dt0.week, dt1.week)
except AssertionError:
print('Ticker={}: Last two rows within same week:'.format(tkr))
print(df.iloc[(df.shape[0] - 2):])
raise
if (not test_run):
self.skipTest('Skipping test_duplicatingWeekly() because not possible to fail Monday/weekend')
def test_pricesEventsMerge(self):
tkr = 'INTC'
start_d = _dt.date(2022, 1, 1)
end_d = _dt.date(2023, 1, 1)
df = yf.Ticker(tkr, session=self.session).history(interval='1d', start=start_d, end=end_d)
div = 1.0
future_div_dt = (df.index[(- 1)] + _dt.timedelta(days=1))
if (future_div_dt.weekday() in [5, 6]):
future_div_dt += (_dt.timedelta(days=1) * (7 - future_div_dt.weekday()))
divs = _pd.DataFrame(data={'Dividends': [div]}, index=[future_div_dt])
df2 = yf.utils.safe_merge_dfs(df.drop(['Dividends', 'Stock Splits'], axis=1), divs, '1d')
self.assertIn(future_div_dt, df2.index)
self.assertIn('Dividends', df2.columns)
self.assertEqual(df2['Dividends'].iloc[(- 1)], div)
def test_pricesEventsMerge_bug(self):
tkr = 'S32.AX'
interval = '30m'
df_index = []
d = 13
for h in range(0, 16):
for m in [0, 30]:
df_index.append(_dt.datetime(2023, 9, d, h, m))
df_index.append(_dt.datetime(2023, 9, d, 16))
df = _pd.DataFrame(index=df_index)
df.index = _pd.to_datetime(df.index)
df['Close'] = 1.0
div = 1.0
future_div_dt = _dt.datetime(2023, 9, 14, 10)
divs = _pd.DataFrame(data={'Dividends': [div]}, index=[future_div_dt])
df2 = yf.utils.safe_merge_dfs(df, divs, interval)
def test_intraDayWithEvents(self):
tkrs = ['BHP.AX', 'IMP.JO', 'BP.L', 'PNL.L', 'INTC']
test_run = False
for tkr in tkrs:
start_d = (_dt.date.today() - _dt.timedelta(days=59))
end_d = None
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=True)
df_daily_divs = df_daily['Dividends'][(df_daily['Dividends'] != 0)]
if (df_daily_divs.shape[0] == 0):
continue
last_div_date = df_daily_divs.index[(- 1)]
start_d = last_div_date.date()
end_d = (last_div_date.date() + _dt.timedelta(days=1))
df_intraday = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='15m', actions=True)
self.assertTrue((df_intraday['Dividends'] != 0.0).any())
df_intraday_divs = df_intraday['Dividends'][(df_intraday['Dividends'] != 0)]
df_intraday_divs.index = df_intraday_divs.index.floor('D')
self.assertTrue(df_daily_divs.equals(df_intraday_divs))
test_run = True
if (not test_run):
self.skipTest('Skipping test_intraDayWithEvents() because no tickers had a dividend in last 60 days')
def test_intraDayWithEvents_tase(self):
tase_tkrs = ['ICL.TA', 'ESLT.TA', 'ONE.TA', 'MGDL.TA']
test_run = False
for tkr in tase_tkrs:
start_d = (_dt.date.today() - _dt.timedelta(days=59))
end_d = None
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=True)
df_daily_divs = df_daily['Dividends'][(df_daily['Dividends'] != 0)]
if (df_daily_divs.shape[0] == 0):
continue
last_div_date = df_daily_divs.index[(- 1)]
start_d = last_div_date.date()
end_d = (last_div_date.date() + _dt.timedelta(days=1))
df_intraday = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='15m', actions=True)
self.assertTrue((df_intraday['Dividends'] != 0.0).any())
df_intraday_divs = df_intraday['Dividends'][(df_intraday['Dividends'] != 0)]
df_intraday_divs.index = df_intraday_divs.index.floor('D')
self.assertTrue(df_daily_divs.equals(df_intraday_divs))
test_run = True
if (not test_run):
self.skipTest('Skipping test_intraDayWithEvents_tase() because no tickers had a dividend in last 60 days')
def test_dailyWithEvents(self):
start_d = _dt.date(2022, 1, 1)
end_d = _dt.date(2023, 1, 1)
tkr_div_dates = {'BHP.AX': [_dt.date(2022, 9, 1), _dt.date(2022, 2, 24)], 'IMP.JO': [_dt.date(2022, 9, 21), _dt.date(2022, 3, 16)], 'BP.L': [_dt.date(2022, 11, 10), _dt.date(2022, 8, 11), _dt.date(2022, 5, 12), _dt.date(2022, 2, 17)], 'INTC': [_dt.date(2022, 11, 4), _dt.date(2022, 8, 4), _dt.date(2022, 5, 5), _dt.date(2022, 2, 4)]}
for (tkr, dates) in tkr_div_dates.items():
df = yf.Ticker(tkr, session=self.session).history(interval='1d', start=start_d, end=end_d)
df_divs = df[(df['Dividends'] != 0)].sort_index(ascending=False)
try:
self.assertTrue((df_divs.index.date == dates).all())
except AssertionError:
print(f'- ticker = {tkr}')
print('- response:')
print(df_divs.index.date)
print('- answer:')
print(dates)
raise
def test_dailyWithEvents_bugs(self):
tkr1 = 'QQQ'
tkr2 = 'GDX'
start_d = '2014-12-29'
end_d = '2020-11-29'
df1 = yf.Ticker(tkr1).history(start=start_d, end=end_d, interval='1d', actions=True)
df2 = yf.Ticker(tkr2).history(start=start_d, end=end_d, interval='1d', actions=True)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
self.assertTrue(((df2['Dividends'] > 0) | (df2['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{} missing these dates: {}'.format(tkr1, missing_from_df1))
print('{} missing these dates: {}'.format(tkr2, missing_from_df2))
raise
tkrs = [tkr1, tkr2]
for tkr in tkrs:
df1 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=True)
df2 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=False)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{}-with-events missing these dates: {}'.format(tkr, missing_from_df1))
print('{}-without-events missing these dates: {}'.format(tkr, missing_from_df2))
raise
div_dt = _pd.Timestamp(2022, 7, 21).tz_localize('America/New_York')
df_dividends = _pd.DataFrame(data={'Dividends': [1.0]}, index=[div_dt])
df_prices = _pd.DataFrame(data=({c: [1.0] for c in yf.const.price_colnames} | {'Volume': 0}), index=[(div_dt + _dt.timedelta(days=1))])
df_merged = yf.utils.safe_merge_dfs(df_prices, df_dividends, '1d')
self.assertEqual(df_merged.shape[0], 2)
self.assertTrue(df_merged[df_prices.columns].iloc[1:].equals(df_prices))
self.assertEqual(df_merged.index[0], div_dt)
def test_intraDayWithEvents(self):
tkrs = ['BHP.AX', 'IMP.JO', 'BP.L', 'PNL.L', 'INTC']
test_run = False
for tkr in tkrs:
start_d = (_dt.date.today() - _dt.timedelta(days=59))
end_d = None
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=True)
df_daily_divs = df_daily['Dividends'][(df_daily['Dividends'] != 0)]
if (df_daily_divs.shape[0] == 0):
continue
last_div_date = df_daily_divs.index[(- 1)]
start_d = last_div_date.date()
end_d = (last_div_date.date() + _dt.timedelta(days=1))
df_intraday = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='15m', actions=True)
self.assertTrue((df_intraday['Dividends'] != 0.0).any())
df_intraday_divs = df_intraday['Dividends'][(df_intraday['Dividends'] != 0)]
df_intraday_divs.index = df_intraday_divs.index.floor('D')
self.assertTrue(df_daily_divs.equals(df_intraday_divs))
test_run = True
if (not test_run):
self.skipTest('Skipping test_intraDayWithEvents() because no tickers had a dividend in last 60 days')
def test_intraDayWithEvents_tase(self):
tase_tkrs = ['ICL.TA', 'ESLT.TA', 'ONE.TA', 'MGDL.TA']
test_run = False
for tkr in tase_tkrs:
start_d = (_dt.date.today() - _dt.timedelta(days=59))
end_d = None
df_daily = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1d', actions=True)
df_daily_divs = df_daily['Dividends'][(df_daily['Dividends'] != 0)]
if (df_daily_divs.shape[0] == 0):
continue
last_div_date = df_daily_divs.index[(- 1)]
start_d = last_div_date.date()
end_d = (last_div_date.date() + _dt.timedelta(days=1))
df_intraday = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='15m', actions=True)
self.assertTrue((df_intraday['Dividends'] != 0.0).any())
df_intraday_divs = df_intraday['Dividends'][(df_intraday['Dividends'] != 0)]
df_intraday_divs.index = df_intraday_divs.index.floor('D')
self.assertTrue(df_daily_divs.equals(df_intraday_divs))
test_run = True
if (not test_run):
self.skipTest('Skipping test_intraDayWithEvents_tase() because no tickers had a dividend in last 60 days')
def test_weeklyWithEvents(self):
tkr1 = 'QQQ'
tkr2 = 'GDX'
start_d = '2014-12-29'
end_d = '2020-11-29'
df1 = yf.Ticker(tkr1).history(start=start_d, end=end_d, interval='1wk', actions=True)
df2 = yf.Ticker(tkr2).history(start=start_d, end=end_d, interval='1wk', actions=True)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
self.assertTrue(((df2['Dividends'] > 0) | (df2['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{} missing these dates: {}'.format(tkr1, missing_from_df1))
print('{} missing these dates: {}'.format(tkr2, missing_from_df2))
raise
tkrs = [tkr1, tkr2]
for tkr in tkrs:
df1 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1wk', actions=True)
df2 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1wk', actions=False)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{}-with-events missing these dates: {}'.format(tkr, missing_from_df1))
print('{}-without-events missing these dates: {}'.format(tkr, missing_from_df2))
raise
def test_monthlyWithEvents(self):
tkr1 = 'QQQ'
tkr2 = 'GDX'
start_d = '2014-12-29'
end_d = '2020-11-29'
df1 = yf.Ticker(tkr1).history(start=start_d, end=end_d, interval='1mo', actions=True)
df2 = yf.Ticker(tkr2).history(start=start_d, end=end_d, interval='1mo', actions=True)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
self.assertTrue(((df2['Dividends'] > 0) | (df2['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{} missing these dates: {}'.format(tkr1, missing_from_df1))
print('{} missing these dates: {}'.format(tkr2, missing_from_df2))
raise
tkrs = [tkr1, tkr2]
for tkr in tkrs:
df1 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1mo', actions=True)
df2 = yf.Ticker(tkr, session=self.session).history(start=start_d, end=end_d, interval='1mo', actions=False)
self.assertTrue(((df1['Dividends'] > 0) | (df1['Stock Splits'] > 0)).any())
try:
self.assertTrue(df1.index.equals(df2.index))
except AssertionError:
missing_from_df1 = df2.index.difference(df1.index)
missing_from_df2 = df1.index.difference(df2.index)
print('{}-with-events missing these dates: {}'.format(tkr, missing_from_df1))
print('{}-without-events missing these dates: {}'.format(tkr, missing_from_df2))
raise
def test_monthlyWithEvents2(self):
dfm = yf.Ticker('ABBV').history(period='max', interval='1mo')
dfd = yf.Ticker('ABBV').history(period='max', interval='1d')
dfd = dfd[(dfd.index > dfm.index[0])]
dfm_divs = dfm[(dfm['Dividends'] != 0)]
dfd_divs = dfd[(dfd['Dividends'] != 0)]
self.assertEqual(dfm_divs.shape[0], dfd_divs.shape[0])
dfm = yf.Ticker('F').history(period='50mo', interval='1mo')
dfd = yf.Ticker('F').history(period='50mo', interval='1d')
dfd = dfd[(dfd.index > dfm.index[0])]
dfm_divs = dfm[(dfm['Dividends'] != 0)]
dfd_divs = dfd[(dfd['Dividends'] != 0)]
self.assertEqual(dfm_divs.shape[0], dfd_divs.shape[0])
def test_tz_dst_ambiguous(self):
try:
yf.Ticker('ESLT.TA', session=self.session).history(start='2002-10-06', end='2002-10-09', interval='1d')
except _tz.exceptions.AmbiguousTimeError:
raise Exception('Ambiguous DST issue not resolved')
def test_dst_fix(self):
tkr = 'AGRO3.SA'
dat = yf.Ticker(tkr, session=self.session)
start = '2021-01-11'
end = '2022-11-05'
interval = '1d'
df = dat.history(start=start, end=end, interval=interval)
self.assertTrue(((df.index.weekday >= 0) & (df.index.weekday <= 4)).all())
interval = '1wk'
df = dat.history(start=start, end=end, interval=interval)
try:
self.assertTrue((df.index.weekday == 0).all())
except AssertionError:
print('Weekly data not aligned to Monday')
raise
def test_prune_post_intraday_us(self):
tkr = 'AMZN'
interval = '1h'
interval_td = _dt.timedelta(hours=1)
time_open = _dt.time(9, 30)
time_close = _dt.time(16)
special_day = _dt.date(2022, 11, 25)
time_early_close = _dt.time(13)
dat = yf.Ticker(tkr, session=self.session)
start_d = (special_day - _dt.timedelta(days=7))
end_d = (special_day + _dt.timedelta(days=7))
df = dat.history(start=start_d, end=end_d, interval=interval, prepost=False, keepna=True)
tg_last_dt = df.loc[str(special_day)].index[(- 1)]
self.assertTrue((tg_last_dt.time() < time_early_close))
start_d = _dt.date(special_day.year, 1, 1)
end_d = _dt.date((special_day.year + 1), 1, 1)
df = dat.history(start=start_d, end=end_d, interval='1h', prepost=False, keepna=True)
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
f_early_close = ((last_dts + interval_td).dt.time < time_close)
early_close_dates = last_dts.index[f_early_close].values
self.assertEqual(len(early_close_dates), 1)
self.assertEqual(early_close_dates[0], special_day)
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
f_late_open = (first_dts.dt.time > time_open)
late_open_dates = first_dts.index[f_late_open]
self.assertEqual(len(late_open_dates), 0)
def test_prune_post_intraday_omx(self):
tkr = 'AEC.ST'
interval = '1h'
interval_td = _dt.timedelta(hours=1)
time_open = _dt.time(9)
time_close = _dt.time(17, 30)
special_day = _dt.date(2022, 12, 23)
time_early_close = _dt.time(13, 2)
dat = yf.Ticker(tkr, session=self.session)
half_days = [_dt.date(special_day.year, x[0], x[1]) for x in [(1, 5), (4, 14), (5, 25), (6, 23), (11, 4), (12, 23), (12, 30)]]
expected_incorrect_half_days = [_dt.date(2022, 4, 13)]
half_days = sorted((half_days + expected_incorrect_half_days))
start_d = (special_day - _dt.timedelta(days=7))
end_d = (special_day + _dt.timedelta(days=7))
df = dat.history(start=start_d, end=end_d, interval=interval, prepost=False, keepna=True)
tg_last_dt = df.loc[str(special_day)].index[(- 1)]
self.assertTrue((tg_last_dt.time() < time_early_close))
start_d = _dt.date(special_day.year, 1, 1)
end_d = _dt.date((special_day.year + 1), 1, 1)
df = dat.history(start=start_d, end=end_d, interval='1h', prepost=False, keepna=True)
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
f_early_close = ((last_dts + interval_td).dt.time < time_close)
early_close_dates = last_dts.index[f_early_close].values
unexpected_early_close_dates = [d for d in early_close_dates if (d not in half_days)]
self.assertEqual(len(unexpected_early_close_dates), 0)
self.assertEqual(len(early_close_dates), len(half_days))
self.assertTrue(_np.equal(early_close_dates, half_days).all())
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
f_late_open = (first_dts.dt.time > time_open)
late_open_dates = first_dts.index[f_late_open]
self.assertEqual(len(late_open_dates), 0)
def test_prune_post_intraday_asx(self):
tkr = 'BHP.AX'
interval = '1h'
interval_td = _dt.timedelta(hours=1)
time_open = _dt.time(10)
time_close = _dt.time(16, 12)
dat = yf.Ticker(tkr, session=self.session)
start_d = _dt.date(2022, 1, 1)
end_d = _dt.date((2022 + 1), 1, 1)
df = dat.history(start=start_d, end=end_d, interval='1h', prepost=False, keepna=True)
last_dts = _pd.Series(df.index).groupby(df.index.date).last()
f_early_close = ((last_dts + interval_td).dt.time < time_close)
early_close_dates = last_dts.index[f_early_close].values
self.assertEqual(len(early_close_dates), 0)
first_dts = _pd.Series(df.index).groupby(df.index.date).first()
f_late_open = (first_dts.dt.time > time_open)
late_open_dates = first_dts.index[f_late_open]
self.assertEqual(len(late_open_dates), 0)
def test_weekly_2rows_fix(self):
tkr = 'AMZN'
start = (_dt.date.today() - _dt.timedelta(days=14))
start -= _dt.timedelta(days=start.weekday())
dat = yf.Ticker(tkr)
df = dat.history(start=start, interval='1wk')
self.assertTrue((df.index.weekday == 0).all())
def test_aggregate_capital_gains(self):
tkr = 'FXAIX'
dat = yf.Ticker(tkr, session=self.session)
start = '2017-12-31'
end = '2019-12-31'
interval = '3mo'
df = dat.history(start=start, end=end, interval=interval) |
class ScriptExecutor(object):
_action_executors = {Action.WALK: WalkExecutor(), Action.FIND: FindExecutor(), Action.SIT: SitExecutor(), Action.STANDUP: StandUpExecutor(), Action.GRAB: GrabExecutor(), Action.OPEN: OpenExecutor(False), Action.CLOSE: OpenExecutor(True), Action.PUTBACK: PutExecutor(Relation.ON), Action.PUTIN: PutExecutor(Relation.INSIDE), Action.SWITCHON: SwitchExecutor(True), Action.SWITCHOFF: SwitchExecutor(False), Action.DRINK: DrinkExecutor(), Action.LOOKAT: LookAtExecutor(), Action.TURNTO: TurnToExecutor(), Action.WIPE: WipeExecutor(), Action.RUN: WalkExecutor(), Action.PUTON: PutOnExecutor(), Action.PUTOFF: PutOffExecutor(), Action.GREET: GreetExecutor(), Action.DROP: DropExecutor(), Action.READ: ReadExecutor(), Action.POINTAT: PointAtExecutor(), Action.TOUCH: TouchExecutor(), Action.LIE: LieExecutor(), Action.PUTOBJBACK: PutBackExecutor(), Action.POUR: PourExecutor(), Action.TYPE: TypeExecutor(), Action.WATCH: WatchExecutor(), Action.PUSH: MoveExecutor(), Action.PULL: MoveExecutor(), Action.MOVE: MoveExecutor(), Action.RINSE: WashExecutor(), Action.WASH: WashExecutor(), Action.SCRUB: WashExecutor(), Action.SQUEEZE: SqueezeExecutor(), Action.PLUGIN: PlugExecutor(True), Action.PLUGOUT: PlugExecutor(False), Action.CUT: CutExecutor(), Action.EAT: EatExecutor(), Action.SLEEP: SleepExecutor(), Action.WAKEUP: WakeUpExecutor(), Action.RELEASE: DropExecutor()}
def __init__(self, graph: EnvironmentGraph, name_equivalence, char_index: int=0):
self.graph = graph
self.name_equivalence = name_equivalence
self.processing_time_limit = 10
self.processing_limit = 0
self.info = ExecutionInfo()
self.char_index = char_index
def find_solutions(self, script: Script, init_changers: List[StateChanger]=None):
self.processing_limit = (time.time() + self.processing_time_limit)
init_state = EnvironmentState(self.graph, self.name_equivalence)
_apply_initial_changers(init_state, script, init_changers)
return self.find_solutions_rec(script, 0, init_state)
def find_solutions_rec(self, script: Script, script_index: int, state: EnvironmentState):
if (script_index >= len(script)):
(yield state)
future_script = script.from_index(script_index)
next_states = self.call_action_method(future_script, state, self.info, self.char_index)
if (next_states is not None):
for next_state in next_states:
for rec_state_list in self.find_solutions_rec(script, (script_index + 1), next_state):
(yield rec_state_list)
if (time.time() > self.processing_limit):
break
def execute(self, script: Script, init_changers: List[StateChanger]=None, w_graph_list: bool=True):
info = self.info
state = EnvironmentState(self.graph, self.name_equivalence, instance_selection=True)
_apply_initial_changers(state, script, init_changers)
graph_state_list = []
for i in range(len(script)):
prev_state = state
if w_graph_list:
graph_state_list.append(state.to_dict())
future_script = script.from_index(i)
state = next(self.call_action_method(future_script, state, info, self.char_index), None)
if (state is None):
return (False, prev_state, graph_state_list)
if w_graph_list:
graph_state_list.append(state.to_dict())
return (True, state, graph_state_list)
def call_action_method(cls, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
executor = cls._action_executors.get(script[0].action, UnknownExecutor())
return executor.execute(script, state, info, char_index, modify, in_place)
def check_one_step(self, script: Script, state: EnvironmentState):
prev_state = state
state = next(self.call_action_method(script, state, self.info, self.char_index, False), None)
if (state is None):
return False
return True
def execute_one_step(self, script: Script, state: EnvironmentState, in_place=False):
prev_state = state
state = next(self.call_action_method(script, state, self.info, self.char_index, in_place=in_place), None)
if (state is None):
return (False, prev_state)
return (True, state) |
def test_linestyle_checks():
sys = ct.tf([100], [1, 1, 1])
lines = ct.nyquist_plot(sys, primary_style=[':', ':'], mirror_style=[':', ':'])
assert all([(line.get_linestyle() == ':') for line in lines[0]])
lines = ct.nyquist_plot(sys, color='g')
assert all([(line.get_color() == 'g') for line in lines[0]])
lines = ct.nyquist_plot(sys, mirror_style=False)
assert (lines[0][2:] == [None, None])
with pytest.raises(ValueError, match="invalid 'primary_style'"):
ct.nyquist_plot(sys, primary_style=False)
with pytest.raises(ValueError, match="invalid 'mirror_style'"):
ct.nyquist_plot(sys, mirror_style=0.2)
with pytest.warns(PendingDeprecationWarning, match='single string'):
ct.nyquist_plot(sys, primary_style=':', mirror_style='-.') |
def avg_n_dicts(dicts, experiment=None, step=None):
means = {}
for dic in dicts:
for key in dic:
if (key not in means):
means[key] = 0
means[key] += (dic[key] / len(dicts))
if (experiment is not None):
experiment.log_metrics(means, step=step)
return means |
class PrimaryKeyIndexLocator(Locator, dict):
def of(primary_key_index_meta: PrimaryKeyIndexMeta) -> PrimaryKeyIndexLocator:
pki_root_path = PrimaryKeyIndexLocator._root_path(primary_key_index_meta.compacted_partition_locator, primary_key_index_meta.primary_keys, primary_key_index_meta.sort_keys, primary_key_index_meta.primary_key_index_algorithm_version)
pkil = PrimaryKeyIndexLocator()
pkil['primaryKeyIndexMeta'] = primary_key_index_meta
pkil['primaryKeyIndexRootPath'] = pki_root_path
return pkil
def _root_path(compacted_partition_locator: PartitionLocator, primary_keys: List[str], sort_keys: List[SortKey], primary_key_index_algorithm_version: str) -> str:
pl_hexdigest = compacted_partition_locator.hexdigest()
pki_version_str = f'{pl_hexdigest}|{primary_keys}|{sort_keys}|{primary_key_index_algorithm_version}'
return sha1_hexdigest(pki_version_str.encode('utf-8'))
def primary_key_index_meta(self) -> PrimaryKeyIndexMeta:
val: Dict[(str, Any)] = self.get('primaryKeyIndexMeta')
if ((val is not None) and (not isinstance(val, PrimaryKeyIndexMeta))):
self['primaryKeyIndexMeta'] = val = PrimaryKeyIndexMeta(val)
return val
def primary_key_index_root_path(self) -> str:
return self['primaryKeyIndexRootPath']
def get_primary_key_index_s3_url_base(self, s3_bucket: str) -> str:
pki_root_path = self.primary_key_index_root_path
return f's3://{s3_bucket}/{pki_root_path}'
def canonical_string(self) -> str:
return self.primary_key_index_root_path |
.parametrize('sampler', [sample_blackjax_nuts, sample_numpyro_nuts])
.skipif((len(jax.devices()) < 2), reason='not enough devices')
def test_deterministic_samples(sampler):
pytensor.config.on_opt_error = 'raise'
np.random.seed(13244)
obs = np.random.normal(10, 2, size=100)
obs_at = pytensor.shared(obs, borrow=True, name='obs')
with pm.Model() as model:
a = pm.Uniform('a', (- 20), 20)
b = pm.Deterministic('b', (a / 2.0))
c = pm.Normal('c', a, sigma=1.0, observed=obs_at)
trace = sampler(chains=2, random_seed=1322, keep_untransformed=True)
assert (8 < trace.posterior['a'].mean() < 11)
assert np.allclose(trace.posterior['b'].values, (trace.posterior['a'].values / 2)) |
def test_all_extras_populates_installer(tester: CommandTester, mocker: MockerFixture) -> None:
assert isinstance(tester.command, InstallerCommand)
mocker.patch.object(tester.command.installer, 'run', return_value=1)
tester.execute('--all-extras')
assert (tester.command.installer._extras == ['extras-a', 'extras-b']) |
def aead_chacha20poly1305_encrypt(key, counter, plain_text, auth_text):
cipher = ChaCha20_Poly1305.new(key=key, nonce=(b'\x00\x00\x00\x00' + counter.to_bytes(8, 'little')))
cipher.update(auth_text)
(cipher_text, digest) = cipher.encrypt_and_digest(plain_text)
return (cipher_text + digest) |
def render_pep8_errors_e128(msg, _node, source_lines):
line = msg.line
res = re.search('column (\\d+)', msg.msg)
col = int(res.group().split()[(- 1)])
(yield from render_context((line - 2), line, source_lines))
(yield (line, slice(0, (col if (col != 0) else None)), LineType.ERROR, source_lines[(line - 1)]))
(yield from render_context((line + 1), (line + 3), source_lines)) |
class get_model(LightningBaseModel):
def __init__(self, config):
super().__init__(config, None)
self.save_hyperparameters()
cr = config.model_params.cr
cs = config.model_params.layer_num
cs = [int((cr * x)) for x in cs]
self.pres = self.vres = config.model_params.voxel_size
self.num_classes = config.model_params.num_class
self.stem = nn.Sequential(spnn.Conv3d(config.model_params.input_dims, cs[0], kernel_size=3, stride=1), spnn.BatchNorm(cs[0]), spnn.ReLU(True), spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1), spnn.BatchNorm(cs[0]), spnn.ReLU(True))
self.stage1 = nn.Sequential(basic_blocks.BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1), basic_blocks.ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1))
self.stage2 = nn.Sequential(basic_blocks.BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1), basic_blocks.ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1))
self.stage3 = nn.Sequential(basic_blocks.BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1), basic_blocks.ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1))
self.stage4 = nn.Sequential(basic_blocks.BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1), basic_blocks.ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1))
self.up1 = nn.ModuleList([basic_blocks.BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2), nn.Sequential(basic_blocks.ResidualBlock((cs[5] + cs[3]), cs[5], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1))])
self.up2 = nn.ModuleList([basic_blocks.BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2), nn.Sequential(basic_blocks.ResidualBlock((cs[6] + cs[2]), cs[6], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1))])
self.up3 = nn.ModuleList([basic_blocks.BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2), nn.Sequential(basic_blocks.ResidualBlock((cs[7] + cs[1]), cs[7], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1))])
self.up4 = nn.ModuleList([basic_blocks.BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2), nn.Sequential(basic_blocks.ResidualBlock((cs[8] + cs[0]), cs[8], ks=3, stride=1, dilation=1), basic_blocks.ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1))])
self.classifier = nn.Sequential(nn.Linear(cs[8], self.num_classes))
self.point_transforms = nn.ModuleList([nn.Sequential(nn.Linear(cs[0], cs[4]), nn.BatchNorm1d(cs[4]), nn.ReLU(True)), nn.Sequential(nn.Linear(cs[4], cs[6]), nn.BatchNorm1d(cs[6]), nn.ReLU(True)), nn.Sequential(nn.Linear(cs[6], cs[8]), nn.BatchNorm1d(cs[8]), nn.ReLU(True))])
self.criterion = get_loss(config)
self.weight_initialization()
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, data_dict):
x = data_dict['lidar']
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = (z1.F + self.point_transforms[0](z0.F))
y1 = point_to_voxel(x4, z1)
y1.F = self.dropout(y1.F)
y1 = self.up1[0](y1)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
z2 = voxel_to_point(y2, z1)
z2.F = (z2.F + self.point_transforms[1](z1.F))
y3 = point_to_voxel(y2, z2)
y3.F = self.dropout(y3.F)
y3 = self.up3[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
z3 = voxel_to_point(y4, z2)
z3.F = (z3.F + self.point_transforms[2](z2.F))
output = self.classifier(z3.F)
data_dict['sparse_logits'] = output
data_dict = self.criterion(data_dict)
return data_dict |
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data):
if normalize_data:
data_normalizer = (1.0 / jnp.sqrt(jnp.sqrt(data.shape[(- 1)])))
else:
data_normalizer = 1.0
if (projection_matrix is None):
return (kernel_fn((data_normalizer * data)) + kernel_epsilon)
else:
data_mod_shape = (data.shape[0:len(batch_dims_t)] + projection_matrix.shape)
data_thick_random_matrix = (jnp.zeros(data_mod_shape) + projection_matrix)
data_dash = lax.dot_general((data_normalizer * data), data_thick_random_matrix, ((((data.ndim - 1),), ((data_thick_random_matrix.ndim - 1),)), (batch_dims_t, batch_dims_t)), precision=precision)
data_prime = (kernel_fn(data_dash) + kernel_epsilon)
return data_prime |
.parametrize('modifier', ['lower', 'upper'])
def test_source_add_existing_fails_due_to_other_default(modifier: str, tester: CommandTester, source_existing: Source, source_default: Source, poetry_with_source: Poetry) -> None:
tester.execute(f'--priority=default {source_default.name} {source_default.url}')
tester.io.fetch_output()
name = getattr(source_existing.name, modifier)()
tester.execute(f'--priority=default {name} {source_existing.url}')
assert (tester.io.fetch_error().strip() == f'Source with name {source_default.name} is already set to default. Only one default source can be configured at a time.')
assert (tester.io.fetch_output().strip() == '')
assert (tester.status_code == 1) |
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
makedirs(args.tensorboard_dir)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=args.tensorboard_dir, histogram_freq=0, batch_size=args.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
callbacks.append(tensorboard_callback)
if (args.evaluation and validation_generator):
if (args.dataset_type == 'coco'):
from callbacks import CocoEval
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
if args.snapshots:
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(args.snapshot_path, '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)), verbose=1)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0))
return callbacks |
class GoloLexer(RegexLexer):
name = 'Golo'
url = '
filenames = ['*.golo']
aliases = ['golo']
version_added = '2.0'
tokens = {'root': [('[^\\S\\n]+', Whitespace), ('#.*$', Comment), ('(\\^|\\.\\.\\.|:|\\?:|->|==|!=|=|\\+|\\*|%|/|<=|<|>=|>|=|\\.)', Operator), ('(?<=[^-])(-)(?=[^-])', Operator), ('(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\\b', Operator.Word), ('[]{}|(),[]', Punctuation), ('(module|import)(\\s+)', bygroups(Keyword.Namespace, Whitespace), 'modname'), ('\\b([a-zA-Z_][\\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)), ('\\b([a-zA-Z_][\\w$]*(?:\\.[a-zA-Z_][\\w$]*)+)\\b', Name.Namespace), ('(let|var)(\\s+)', bygroups(Keyword.Declaration, Whitespace), 'varname'), ('(struct)(\\s+)', bygroups(Keyword.Declaration, Whitespace), 'structname'), ('(function)(\\s+)', bygroups(Keyword.Declaration, Whitespace), 'funcname'), ('(null|true|false)\\b', Keyword.Constant), ('(augment|pimp|if|else|case|match|return|case|when|then|otherwise|while|for|foreach|try|catch|finally|throw|local|continue|break)\\b', Keyword), ('(map|array|list|set|vector|tuple)(\\[)', bygroups(Name.Builtin, Punctuation)), ('(print|println|readln|raise|fun|asInterfaceInstance)\\b', Name.Builtin), ('(`?[a-zA-Z_][\\w$]*)(\\()', bygroups(Name.Function, Punctuation)), ('-?[\\d_]*\\.[\\d_]*([eE][+-]?\\d[\\d_]*)?F?', Number.Float), ('0[0-7]+j?', Number.Oct), ('0[xX][a-fA-F0-9]+', Number.Hex), ('-?\\d[\\d_]*L', Number.Integer.Long), ('-?\\d[\\d_]*', Number.Integer), ('`?[a-zA-Z_][\\w$]*', Name), ('[a-zA-Z_][\\w$.]*', Name.Decorator), ('"""', String, combined('stringescape', 'triplestring')), ('"', String, combined('stringescape', 'doublestring')), ("'", String, combined('stringescape', 'singlestring')), ('----((.|\\n)*?)----', String.Doc)], 'funcname': [('`?[a-zA-Z_][\\w$]*', Name.Function, '#pop')], 'modname': [('[a-zA-Z_][\\w$.]*\\*?', Name.Namespace, '#pop')], 'structname': [('`?[\\w.]+\\*?', Name.Class, '#pop')], 'varname': [('`?[a-zA-Z_][\\w$]*', Name.Variable, '#pop')], 'string': [('[^\\\\\\\'"\\n]+', String), ('[\\\'"\\\\]', String)], 'stringescape': [('\\\\([\\\\abfnrtv"\\\']|\\n|N\\{.*?\\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)], 'triplestring': [('"""', String, '#pop'), include('string'), ('\\n', String)], 'doublestring': [('"', String.Double, '#pop'), include('string')], 'singlestring': [("'", String, '#pop'), include('string')], 'operators': [('[#=,./%+\\-?]', Operator), ('(eq|gt|lt|gte|lte|neq|matches)\\b', Operator), ('(==|<=|<|>=|>|!=)', Operator)]} |
def register(manager: AstroidManager) -> None:
for (func_name, func_src) in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, func_src)
manager.register_transform(Attribute, inference_tip(inference_function), functools.partial(attribute_looks_like_numpy_member, func_name)) |
def getRobotFishHumanReefWrecks(mask):
(imw, imh) = (mask.shape[0], mask.shape[1])
Human = np.zeros((imw, imh))
Robot = np.zeros((imw, imh))
Fish = np.zeros((imw, imh))
Reef = np.zeros((imw, imh))
Wreck = np.zeros((imw, imh))
for i in range(imw):
for j in range(imh):
if ((mask[(i, j, 0)] == 0) and (mask[(i, j, 1)] == 0) and (mask[(i, j, 2)] == 1)):
Human[(i, j)] = 1
elif ((mask[(i, j, 0)] == 1) and (mask[(i, j, 1)] == 0) and (mask[(i, j, 2)] == 0)):
Robot[(i, j)] = 1
elif ((mask[(i, j, 0)] == 1) and (mask[(i, j, 1)] == 1) and (mask[(i, j, 2)] == 0)):
Fish[(i, j)] = 1
elif ((mask[(i, j, 0)] == 1) and (mask[(i, j, 1)] == 0) and (mask[(i, j, 2)] == 1)):
Reef[(i, j)] = 1
elif ((mask[(i, j, 0)] == 0) and (mask[(i, j, 1)] == 1) and (mask[(i, j, 2)] == 1)):
Wreck[(i, j)] = 1
else:
pass
return np.stack((Robot, Fish, Human, Reef, Wreck), (- 1)) |
class WideResNet(nn.Module):
def __init__(self, num_classes: int=10, depth: int=28, width: int=10, activation_fn: nn.Module=nn.ReLU, mean: Union[(Tuple[(float, ...)], float)]=TINY_MEAN, std: Union[(Tuple[(float, ...)], float)]=TINY_STD, padding: int=0, num_input_channels: int=3):
super().__init__()
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
num_channels = [16, (16 * width), (32 * width), (64 * width)]
assert (((depth - 4) % 6) == 0)
num_blocks = ((depth - 4) // 6)
self.init_conv = nn.Conv2d(num_input_channels, num_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.layer = nn.Sequential(_BlockGroup(num_blocks, num_channels[0], num_channels[1], 1, activation_fn=activation_fn), _BlockGroup(num_blocks, num_channels[1], num_channels[2], 2, activation_fn=activation_fn), _BlockGroup(num_blocks, num_channels[2], num_channels[3], 2, activation_fn=activation_fn))
self.batchnorm = nn.BatchNorm2d(num_channels[3], momentum=0.01)
self.relu = activation_fn(inplace=True)
self.logits = nn.Linear(num_channels[3], num_classes)
self.num_channels = num_channels[3]
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
if (self.padding > 0):
x = F.pad(x, ((self.padding,) * 4))
if x.is_cuda:
if (self.mean_cuda is None):
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = ((x - self.mean_cuda) / self.std_cuda)
else:
out = ((x - self.mean) / self.std)
out = self.init_conv(out)
out = self.layer(out)
out = self.relu(self.batchnorm(out))
out = self.avgpool(out)
out = out.view((- 1), self.num_channels)
return self.logits(out) |
class MultinodePenalty(PenaltyOption):
def __init__(self, _multinode_penalty_fcn: (Any | type), nodes: tuple[((int | Node), ...)], nodes_phase: tuple[(int, ...)], multinode_penalty: (Any | Callable)=None, custom_function: Callable=None, **params: Any):
if (not isinstance(multinode_penalty, _multinode_penalty_fcn)):
custom_function = multinode_penalty
multinode_penalty = _multinode_penalty_fcn.CUSTOM
super(MultinodePenalty, self).__init__(penalty=multinode_penalty, custom_function=custom_function, **params)
for node in nodes:
if (node not in (Node.START, Node.MID, Node.PENULTIMATE, Node.END)):
if (not isinstance(node, int)):
raise ValueError('Multinode penalties only works with Node.START, Node.MID, Node.PENULTIMATE, Node.END or a node index (int).')
for phase in nodes_phase:
if (not isinstance(phase, int)):
raise ValueError('nodes_phase should be all positive integers corresponding to the phase index')
if (len(nodes) != len(nodes_phase)):
raise ValueError('Each of the nodes must have a corresponding nodes_phase')
self.multinode_penalty = True
self.nodes_phase = nodes_phase
self.nodes = nodes
self.node = Node.MULTINODES
self.dt = 1
self.node_idx = [0]
self.all_nodes_index = []
self.penalty_type = PenaltyType.INTERNAL
self.phase_dynamics = []
self.ns = []
self.control_types = []
def _get_pool_to_add_penalty(self, ocp, nlp):
raise NotImplementedError('This is an abstract method and should be implemented by child')
def _add_penalty_to_pool(self, controller: list[(PenaltyController, ...)]):
ocp = controller[0].ocp
nlp = controller[0].get_nlp
pool = self._get_pool_to_add_penalty(ocp, nlp)
pool[self.list_index] = self
def ensure_penalty_sanity(self, ocp, nlp):
pool = self._get_pool_to_add_penalty(ocp, nlp)
if (self.list_index < 0):
for (i, j) in enumerate(pool):
if (not j):
self.list_index = i
return
else:
pool.append([])
self.list_index = (len(pool) - 1)
else:
while (self.list_index >= len(pool)):
pool.append([])
pool[self.list_index] = [] |
class UAVDataset(Dataset):
def __init__(self, name, dataset_root, load_img=False):
super(UAVDataset, self).__init__(name, dataset_root)
with open(os.path.join(dataset_root, (name + '.json')), 'r') as f:
meta_data = json.load(f)
pbar = tqdm(meta_data.keys(), desc=('loading ' + name), ncols=100)
self.videos = {}
for video in pbar:
pbar.set_postfix_str(video)
self.videos[video] = UAVVideo(video, dataset_root, meta_data[video]['video_dir'], meta_data[video]['init_rect'], meta_data[video]['img_names'], meta_data[video]['gt_rect'], meta_data[video]['attr'])
attr = []
for x in self.videos.values():
attr += x.attr
attr = set(attr)
self.attr = {}
self.attr['ALL'] = list(self.videos.keys())
for x in attr:
self.attr[x] = []
for (k, v) in self.videos.items():
for attr_ in v.attr:
self.attr[attr_].append(k) |
class FileAttributes(FileCopying):
def setUp(self):
FileCopying.setUp(self)
self.noperms = rpath.RPath(self.lc, self.mainprefix, ('noperms',))
self.nowrite = rpath.RPath(self.lc, self.mainprefix, ('nowrite',))
self.exec1 = rpath.RPath(self.lc, self.prefix, ('executable',))
self.exec2 = rpath.RPath(self.lc, self.prefix, ('executable2',))
self.test = rpath.RPath(self.lc, self.prefix, ('test',))
self.nothing = rpath.RPath(self.lc, self.prefix, ('aoeunthoenuouo',))
self.sym = rpath.RPath(self.lc, self.prefix, ('symbolic_link',))
def testComp(self):
testpairs = [(self.hl1, self.hl2)]
for (a, b) in testpairs:
self.assertTrue(a.equal_loose(b))
self.assertTrue(b.equal_loose(a))
def testCompFail(self):
testpairs = [(self.nowrite, self.noperms), (self.exec1, self.exec2), (self.rf, self.hl1)]
for (a, b) in testpairs:
self.assertFalse(a.equal_loose(b))
self.assertFalse(b.equal_loose(a))
def testCheckRaise(self):
self.assertRaises(rpath.RPathException, rpath._check_for_files, self.nothing, self.hl1)
self.assertRaises(rpath.RPathException, rpath._check_for_files, self.hl1, self.nothing)
def testCopyAttribs(self):
t = rpath.RPath(self.lc, self.write_dir, ('testattribs',))
if t.lstat():
t.delete()
for rp in [self.noperms, self.nowrite, self.rf, self.exec1, self.exec2, self.hl1, self.dir]:
rpath.copy(rp, t)
rpath.copy_attribs(rp, t)
self.assertTrue(t.equal_loose(rp))
t.delete()
def testCopyWithAttribs(self):
out = rpath.RPath(self.lc, self.write_dir, ('out',))
if out.lstat():
out.delete()
copy_list = [self.noperms, self.nowrite, self.rf, self.exec1, self.exec2, self.hl1, self.dir]
if (os.name != 'nt'):
copy_list.append(self.sym)
for rp in copy_list:
rpath.copy_with_attribs(rp, out)
self.assertTrue(rpath.cmp(rp, out))
self.assertTrue(rp.equal_loose(out))
out.delete()
def testCopyRaise(self):
self.assertRaises(AssertionError, rpath.copy_attribs, self.hl1, self.nothing)
self.assertRaises(AssertionError, rpath.copy_attribs, self.nothing, self.nowrite) |
def min_weight_simple_paths_brute_force(graph: nx.Graph, weight_fun: Callable[([nx.Graph, List], float)]=path_weight):
best_weights = defaultdict((lambda : float('inf')))
best_paths = {}
nodelist = list(graph.nodes())
for i in range((len(nodelist) - 1)):
for j in range((i + 1), len(nodelist)):
for path in nx.all_simple_paths(graph, nodelist[i], nodelist[j]):
n = len(path)
my_weight = weight_fun(graph, path)
if (my_weight < best_weights[n]):
best_paths[n] = path
best_weights[n] = my_weight
return best_paths |
class DynamicLossScaler():
def __init__(self, init_scale=(2 ** 32), scale_factor=2.0, scale_window=1000, min_scale=1, delayed_shift=1, consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = (- 1)
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
def has_overflow_serial(self, params):
for p in params:
if ((p.grad is not None) and DynamicLossScaler._has_inf_or_nan(p.grad.data)):
return True
return False
def has_overflow(self, params):
overflow = self.has_overflow_serial(params)
overflow_gpu = torch.cuda.ByteTensor([overflow])
torch.distributed.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX, group=mpu.get_model_parallel_group())
overflow = overflow_gpu[0].item()
return bool(overflow)
def _has_inf_or_nan(x):
try:
cpu_sum = float(x.float().sum())
except RuntimeError as instance:
if ('value cannot be converted' not in instance.args[0]):
raise
return True
else:
if ((cpu_sum == float('inf')) or (cpu_sum == (- float('inf'))) or (cpu_sum != cpu_sum)):
return True
return False
def update_scale(self, overflow):
if (not hasattr(self, 'min_scale')):
self.min_scale = 1
if (not hasattr(self, 'delayed_shift')):
self.delayed_shift = 1
if (not hasattr(self, 'cur_hysteresis')):
self.cur_hysteresis = 1
if (not hasattr(self, 'consecutive_hysteresis')):
self.consecutive_hysteresis = True
if overflow:
if ((self.delayed_shift == 1) or (self.cur_hysteresis == 1)):
self.cur_scale = max((self.cur_scale / self.scale_factor), self.min_scale)
else:
self.cur_hysteresis -= 1
self.last_overflow_iter = self.cur_iter
else:
if self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
if (((self.cur_iter - self.last_overflow_iter) % self.scale_window) == 0):
if (not self.consecutive_hysteresis):
self.cur_hysteresis = self.delayed_shift
self.cur_scale *= self.scale_factor
self.cur_iter += 1
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
_overflow_buf = torch.cuda.IntTensor([0])
multi_tensor_applier(amp_C.multi_tensor_scale, _overflow_buf, [grad_in, grad_in], self.loss_scale)
return grad_in
def backward(self, loss, retain_graph=False):
scaled_loss = (loss * self.loss_scale)
scaled_loss.backward(retain_graph=retain_graph) |
class ElpiLexer(RegexLexer):
name = 'Elpi'
url = '
aliases = ['elpi']
filenames = ['*.elpi']
mimetypes = ['text/x-elpi']
version_added = '2.11'
lcase_re = '[a-z]'
ucase_re = '[A-Z]'
digit_re = '[0-9]'
schar2_re = "([+*^?/<>`'#~=&!])"
schar_re = '({}|-|\\$|_)'.format(schar2_re)
idchar_re = '({}|{}|{}|{})'.format(lcase_re, ucase_re, digit_re, schar_re)
idcharstarns_re = '({}*(\\.({}|{}){}*)*)'.format(idchar_re, lcase_re, ucase_re, idchar_re)
symbchar_re = '({}|{}|{}|{}|:)'.format(lcase_re, ucase_re, digit_re, schar_re)
constant_re = '({}{}*|{}{}|{}{}*|_{}+)'.format(ucase_re, idchar_re, lcase_re, idcharstarns_re, schar2_re, symbchar_re, idchar_re)
symbol_re = '(,|<=>|->|:-|;|\\?-|->|&|=>|\\bas\\b|\\buvar\\b|<|=<|=|==|>=|>|\\bi<|\\bi=<|\\bi>=|\\bi>|\\bis\\b|\\br<|\\br=<|\\br>=|\\br>|\\bs<|\\bs=<|\\bs>=|\\bs>||::|\\[\\]|`->|`:|`:=|\\^|-|\\+|\\bi-|\\bi\\+|r-|r\\+|/|\\*|\\bdiv\\b|\\bi\\*|\\bmod\\b|\\br\\*|~|\\bi~|\\br~)'
escape_re = '\\(({}|{})\\)'.format(constant_re, symbol_re)
const_sym_re = '({}|{}|{})'.format(constant_re, symbol_re, escape_re)
tokens = {'root': [include('elpi')], 'elpi': [include('_elpi-comment'), ('(:before|:after|:if|:name)(\\s*)(\\")', bygroups(Keyword.Mode, Text.Whitespace, String.Double), 'elpi-string'), ('(:index)(\\s*\\()', bygroups(Keyword.Mode, Text.Whitespace), 'elpi-indexing-expr'), ('\\b(external pred|pred)(\\s+)({})'.format(const_sym_re), bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), 'elpi-pred-item'), ('\\b(external type|type)(\\s+)(({}(,\\s*)?)+)'.format(const_sym_re), bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), 'elpi-type'), ('\\b(kind)(\\s+)(({}|,)+)'.format(const_sym_re), bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), 'elpi-type'), ('\\b(typeabbrev)(\\s+)({})'.format(const_sym_re), bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), 'elpi-type'), ('\\b(accumulate)(\\s+)(\\")', bygroups(Keyword.Declaration, Text.Whitespace, String.Double), 'elpi-string'), ('\\b(accumulate|namespace|local)(\\s+)({})'.format(constant_re), bygroups(Keyword.Declaration, Text.Whitespace, Text)), ('\\b(shorten)(\\s+)({}\\.)'.format(constant_re), bygroups(Keyword.Declaration, Text.Whitespace, Text)), ('\\b(pi|sigma)(\\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\\\)', bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, Text)), ('\\b(constraint)(\\s+)(({}(\\s+)?)+)'.format(const_sym_re), bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), 'elpi-chr-rule-start'), ('(?=[A-Z_]){}'.format(constant_re), Name.Variable), ('(?=[a-z_]){}\\\\'.format(constant_re), Name.Variable), ('_', Name.Variable), ('({}|!|=>|;)'.format(symbol_re), Keyword.Declaration), (constant_re, Text), ('\\[|\\]|\\||=>', Keyword.Declaration), ('"', String.Double, 'elpi-string'), ('`', String.Double, 'elpi-btick'), ("\\'", String.Double, 'elpi-tick'), ('\\{\\{', Punctuation, 'elpi-quote'), ('\\{[^\\{]', Text, 'elpi-spill'), ('\\(', Text, 'elpi-in-parens'), ('\\d[\\d_]*', Number.Integer), ('-?\\d[\\d_]*(.[\\d_]*)?([eE][+\\-]?\\d[\\d_]*)', Number.Float), ('[\\+\\*\\-/\\^\\.]', Operator)], '_elpi-comment': [('%[^\\n]*\\n', Comment), ('/\\*', Comment, 'elpi-multiline-comment'), ('\\s+', Text.Whitespace)], 'elpi-multiline-comment': [('\\*/', Comment, '#pop'), ('.', Comment)], 'elpi-indexing-expr': [('[0-9 _]+', Number.Integer), ('\\)', Text, '#pop')], 'elpi-type': [('(ctype\\s+)(\\")', bygroups(Keyword.Type, String.Double), 'elpi-string'), ('->', Keyword.Type), (constant_re, Keyword.Type), ('\\(|\\)', Keyword.Type), ('\\.', Text, '#pop'), include('_elpi-comment')], 'elpi-chr-rule-start': [('\\{', Text, 'elpi-chr-rule'), include('_elpi-comment')], 'elpi-chr-rule': [('\\brule\\b', Keyword.Declaration), ('\\\\', Keyword.Declaration), ('\\}', Text, '#pop:2'), include('elpi')], 'elpi-pred-item': [('[io]:', Keyword.Mode, 'elpi-ctype'), ('\\.', Text, '#pop'), include('_elpi-comment')], 'elpi-ctype': [('(ctype\\s+)(\\")', bygroups(Keyword.Type, String.Double), 'elpi-string'), ('->', Keyword.Type), (constant_re, Keyword.Type), ('\\(|\\)', Keyword.Type), (',', Text, '#pop'), ('\\.', Text, '#pop:2'), include('_elpi-comment')], 'elpi-btick': [('[^` ]+', String.Double), ('`', String.Double, '#pop')], 'elpi-tick': [("[^\\' ]+", String.Double), ("\\'", String.Double, '#pop')], 'elpi-string': [('[^\\"]+', String.Double), ('"', String.Double, '#pop')], 'elpi-quote': [('\\{\\{', Punctuation, '#push'), ('\\}\\}', Punctuation, '#pop'), ('(lp:)((?=[A-Z_]){})'.format(constant_re), bygroups(Keyword, Name.Variable)), ('[^l\\}]+', Text), ('l|\\}', Text)], 'elpi-spill': [('\\{[^\\{]', Text, '#push'), ('\\}[^\\}]', Text, '#pop'), include('elpi')], 'elpi-in-parens': [('\\(', Operator, '#push'), ('\\)', Operator, '#pop'), include('elpi')]} |
def _get_all_tables(connection: pymedphys.mosaiq.Connection, patient_ids: List[str]) -> Tuple[(Dict[(str, pd.DataFrame)], Dict[(str, Dict[(str, str)])])]:
tables: Dict[(str, pd.DataFrame)] = {}
types_map: Dict[(str, Dict[(str, str)])] = {}
tables['Ident'] = get_filtered_table(connection, types_map, 'Ident', 'IDA', patient_ids)
pat_id1s = tables['Ident']['Pat_Id1'].unique()
tables['Patient'] = get_filtered_table(connection, types_map, 'Patient', 'Pat_ID1', pat_id1s)
tables['TxField'] = get_filtered_table(connection, types_map, 'TxField', 'Pat_ID1', pat_id1s)
sit_set_ids = tables['TxField']['SIT_Set_ID'].unique()
tables['Site'] = get_filtered_table(connection, types_map, 'Site', 'SIT_Set_ID', sit_set_ids)
fld_ids = tables['TxField']['FLD_ID'].unique()
tables['TrackTreatment'] = get_filtered_table(connection, types_map, 'TrackTreatment', 'FLD_ID', fld_ids)
tables['Chklist'] = get_filtered_table(connection, types_map, 'Chklist', 'Pat_ID1', pat_id1s)
tsk_ids = tables['Chklist']['TSK_ID'].unique()
tables['QCLTask'] = get_filtered_table(connection, types_map, 'QCLTask', 'TSK_ID', tsk_ids)
responsible_staff_ids = tables['Chklist']['Rsp_Staff_ID'].unique()
completed_staff_ids = tables['Chklist']['Com_Staff_ID'].unique()
machine_staff_ids = tables['TrackTreatment']['Machine_ID_Staff_ID'].unique()
staff_ids_with_nans = set(responsible_staff_ids).union(completed_staff_ids).union(machine_staff_ids)
staff_ids = np.array(list(staff_ids_with_nans))
staff_ids = staff_ids[np.logical_not(np.isnan(staff_ids))]
staff_ids = staff_ids.astype(int)
tables['Staff'] = get_filtered_table(connection, types_map, 'Staff', 'Staff_ID', staff_ids.tolist())
tables['Staff']['PasswordBytes'] = tables['Staff']['PasswordBytes'].apply((lambda x: PASSWORD_REPLACE))
for (index, row) in tables['Staff'].iterrows():
first_name = row['First_Name']
if (first_name.strip() == ''):
continue
new_username = FIRST_NAME_USERNAME_MAP[first_name]
tables['Staff'].loc[(index, 'User_Name')] = new_username
tables['TxFieldPoint'] = get_filtered_table(connection, types_map, 'TxFieldPoint', 'FLD_ID', fld_ids)
return (tables, types_map) |
def join_path_with_escaped_name_of_legal_length(path: str, stem: str, ext: str) -> str:
max_stem_length = ((os.pathconf(path, 'PC_NAME_MAX') - 1) - len(ext))
escaped_stem = escape_filename(stem)
while (len(escaped_stem) > max_stem_length):
stem = stem[:max_stem_length]
max_stem_length -= 1
escaped_stem = escape_filename(stem)
return os.path.join(path, f'{escaped_stem}.{ext}') |
class DLRMTrainTest(unittest.TestCase):
def test_basic(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(name='t2', embedding_dim=D, num_embeddings=100, feature_names=['f2'])
ebc = EmbeddingBagCollection(tables=[eb1_config])
dlrm_module = DLRM(embedding_bag_collection=ebc, dense_in_features=dense_in_features, dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])
dlrm = DLRMTrain(dlrm_module)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(keys=['f2'], values=torch.tensor(range(3)), offsets=torch.tensor([0, 2, 3]))
batch = Batch(dense_features=features, sparse_features=sparse_features, labels=torch.randint(2, (B,)))
(_, (_, logits, _)) = dlrm(batch)
self.assertEqual(logits.size(), (B,)) |
class Migration(migrations.Migration):
dependencies = [('objects', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('typeclasses', '0001_initial'), ('comms', '0002_msg_db_hide_from_objects')]
operations = [migrations.AddField(model_name='msg', name='db_hide_from_accounts', field=models.ManyToManyField(related_name='hide_from_accounts_set', null=True, to=settings.AUTH_USER_MODEL), preserve_default=True), migrations.AddField(model_name='msg', name='db_receivers_channels', field=models.ManyToManyField(help_text='channel recievers', related_name='channel_set', null=True, to='comms.ChannelDB'), preserve_default=True), migrations.AddField(model_name='msg', name='db_receivers_objects', field=models.ManyToManyField(help_text='object receivers', related_name='receiver_object_set', null=True, to='objects.ObjectDB'), preserve_default=True), migrations.AddField(model_name='msg', name='db_receivers_accounts', field=models.ManyToManyField(help_text='account receivers', related_name='receiver_account_set', null=True, to=settings.AUTH_USER_MODEL), preserve_default=True), migrations.AddField(model_name='msg', name='db_sender_objects', field=models.ManyToManyField(related_name='sender_object_set', null=True, verbose_name='sender(object)', to='objects.ObjectDB', db_index=True), preserve_default=True), migrations.AddField(model_name='msg', name='db_sender_accounts', field=models.ManyToManyField(related_name='sender_account_set', null=True, verbose_name='sender(account)', to=settings.AUTH_USER_MODEL, db_index=True), preserve_default=True), migrations.AddField(model_name='channeldb', name='db_attributes', field=models.ManyToManyField(help_text='attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).', to='typeclasses.Attribute', null=True), preserve_default=True), migrations.AddField(model_name='channeldb', name='db_subscriptions', field=models.ManyToManyField(related_name='subscription_set', null=True, verbose_name='subscriptions', to=settings.AUTH_USER_MODEL, db_index=True), preserve_default=True), migrations.AddField(model_name='channeldb', name='db_tags', field=models.ManyToManyField(help_text='tags on this object. Tags are simple string markers to identify, group and alias objects.', to='typeclasses.Tag', null=True), preserve_default=True)] |
def test_optional_and_positional_only():
with pytest.raises(ValueError, match=full_match_regex_str("Field 'a' can not be positional only and optional")):
InputShape(constructor=stub_constructor, kwargs=None, fields=(InputField(id='a', type=int, default=NoDefault(), is_required=False, metadata={}, original=None),), params=(Param(field_id='a', name='a', kind=ParamKind.POS_ONLY),), overriden_types=frozenset({'a'})) |
def compute_metrics(a: Union[(np.array, Image.Image)], b: Union[(np.array, Image.Image)], metrics: Optional[List[str]]=None, max_val: float=255.0) -> Dict[(str, float)]:
if (metrics is None):
metrics = ['psnr']
def _convert(x):
if isinstance(x, Image.Image):
x = np.asarray(x)
x = torch.from_numpy(x.copy()).float().unsqueeze(0)
if (x.size(3) == 3):
x = x.permute(0, 3, 1, 2)
return x
a = _convert(a)
b = _convert(b)
out = {}
for metric_name in metrics:
out[metric_name] = _metric_functions[metric_name](a, b, max_val)
return out |
def buttons_string(buttons):
button_names = []
if (buttons & LEFT):
button_names.append('LEFT')
if (buttons & MIDDLE):
button_names.append('MIDDLE')
if (buttons & RIGHT):
button_names.append('RIGHT')
if (buttons & MOUSE4):
button_names.append('MOUSE4')
if (buttons & MOUSE5):
button_names.append('MOUSE5')
return '|'.join(button_names) |
class TestClientError(ClientTestCase):
def setUp(self):
super(TestClientError, self).setUp()
self.base_url = '{}/payments'.format(self.base_url)
def test_payment_with_invalid_options(self):
count = 10000
result = {'error': {'field': 'count', 'code': 'BAD_REQUEST_ERROR', 'description': 'The count may not be greater than 100.'}}
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=400, body=json.dumps(result), match_querystring=True)
self.assertRaises(BadRequestError, self.client.payment.all, {'count': count})
def test_gateway_error(self):
count = 10
result = {'error': {'code': 'GATEWAY_ERROR', 'description': 'Payment processing failed due to error at bank/wallet gateway'}}
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=504, body=json.dumps(result), match_querystring=True)
self.assertRaises(GatewayError, self.client.payment.all, {'count': count})
def test_server_error(self):
count = 10
result = {'error': {'code': 'SERVER_ERROR', 'description': 'The server encountered an error. The incident has been reported to admins.'}}
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=500, body=json.dumps(result), match_querystring=True)
self.assertRaises(ServerError, self.client.payment.all, {'count': count})
def test_unknown_error(self):
count = 10
result = {'error': {'code': 'UNKNOWN_ERROR', 'description': 'No Description'}}
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=500, body=json.dumps(result), match_querystring=True)
self.assertRaises(ServerError, self.client.payment.all, {'count': count}) |
def _ContainedInOther(rect1, rect2):
if ((rect1.left >= rect2.left) and (rect1.top >= rect2.top) and (rect1.right <= rect2.right) and (rect1.bottom <= rect2.bottom)):
return True
elif ((rect2.left >= rect1.left) and (rect2.top >= rect1.top) and (rect2.right <= rect1.right) and (rect2.bottom <= rect1.bottom)):
return True
return False |
def random_traces(nsamples, code='12', deltat=0.01, dtypes=(num.int8, num.int32, num.float32, num.float64), limit=None):
def decorator(func):
(func)
def wrapper(*args, **kwargs):
for dtype in dtypes:
tr = get_random_trace(nsamples, code, deltat, dtype, limit)
func(*(args + (tr,)), **kwargs)
return wrapper
return decorator |
class UpDecoderBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_upsample=True, temb_channels=None):
super().__init__()
resnets = []
for i in range(num_layers):
input_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, temb=None):
for resnet in self.resnets:
hidden_states = resnet(hidden_states, temb=temb)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states |
class PlayOpenWindow(Packet):
id = 45
to = 1
def __init__(self, window_id: int, window_type: int, title: Chat) -> None:
super().__init__()
self.window_id = window_id
self.window_type = window_type
self.title = title
def encode(self) -> bytes:
return ((Buffer.pack_varint(self.window_id) + Buffer.pack_varint(self.window_type)) + Buffer.pack_chat(self.title)) |
def invcompress(quality, metric='mse', pretrained=False, progress=True, **kwargs):
if (metric not in ('mse', 'ms-ssim')):
raise ValueError(f'Invalid metric "{metric}"')
if ((quality < 1) or (quality > 8)):
raise ValueError(f'Invalid quality "{quality}", should be between (1, 13)')
if (pretrained == True):
raise ValueError(f'Invalid pretrain "{pretrain}", not yet supported')
return _load_model('invcompress', metric, quality, pretrained, progress, **kwargs) |
class Scale(object):
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert (img.size == mask.size)
(w, h) = img.size
if (((w >= h) and (w == self.size)) or ((h >= w) and (h == self.size))):
return (img, mask)
if (w > h):
ow = self.size
oh = int(((self.size * h) / w))
return (img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST))
else:
oh = self.size
ow = int(((self.size * w) / h))
return (img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)) |
def get_prefix(cfg: DictConfig) -> str:
string = dict(cfg['clean']).__repr__()
string += dict(cfg[__key__]).__repr__()
string += cfg.model.name
string = string.encode()
string = hashlib.md5(string).hexdigest()
string = string[:6]
task = ('rank' if (cfg.task == '1') else 'cls')
string = f'{task}-{cfg.locale}-{string}'
return string |
def get_incremental_uncovered_lines(abs_path: str, base_commit: str, actual_commit: Optional[str]) -> List[Tuple[(int, str, str)]]:
if (not os.path.isfile(abs_path)):
return []
optional_actual_commit = ([] if (actual_commit is None) else [actual_commit])
unified_diff_lines_str = shell_tools.output_of(['git', 'diff', '--unified=0', base_commit, *optional_actual_commit, '--', abs_path])
unified_diff_lines = [e for e in unified_diff_lines_str.split('\n') if e.strip()]
touched_lines = diff_to_new_interesting_lines(unified_diff_lines)
with open(abs_path, 'r') as actual_file:
ignored_lines = determine_ignored_lines(actual_file.read())
cover_path = (abs_path + ',cover')
has_cover_file = os.path.isfile(cover_path)
content_file = (cover_path if has_cover_file else abs_path)
with open(content_file, 'r') as annotated_coverage_file:
return [(i, fix_line_from_coverage_file(line), touched_lines[i]) for (i, line) in enumerate(annotated_coverage_file, start=1) if ((i in touched_lines) and (i not in ignored_lines)) if line_counts_as_uncovered(line, has_cover_file)] |
class DatatableFactory(factory.Factory):
class Meta():
model = dict
id = factory.Sequence((lambda n: n))
vendor_code = factory.Sequence((lambda n: 'VENDOR_CODE{0}'.format(n)))
datatable_code = factory.Sequence((lambda n: 'DATATABLE_CODE{0}'.format(n)))
name = factory.Sequence((lambda n: 'DATATABLE{0}'.format(n))) |
def test_wcs_downsampling():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['FREQ']
wcs.wcs.crpix = [1.0]
nwcs = slice_wcs(wcs, slice(0, None, 1))
assert (nwcs.wcs.crpix[0] == 1)
nwcs = slice_wcs(wcs, slice(0, None, 2))
assert (nwcs.wcs.crpix[0] == 0.75)
nwcs = slice_wcs(wcs, slice(0, None, 4))
assert (nwcs.wcs.crpix[0] == 0.625)
nwcs = slice_wcs(wcs, slice(2, None, 1))
assert (nwcs.wcs.crpix[0] == (- 1))
nwcs = slice_wcs(wcs, slice(2, None, 2))
assert (nwcs.wcs.crpix[0] == (- 0.25))
nwcs = slice_wcs(wcs, slice(2, None, 4))
assert (nwcs.wcs.crpix[0] == 0.125) |
class KerasModel(tf.keras.Model):
def __init__(self, args, architecture='ResNet50', data='CIFAR10'):
super().__init__(name=architecture)
self.args = args
if self.args.bit64:
raise NotImplementedError()
self.architecture = architecture
self.data = data
if (data == 'CIFAR10'):
self.num_classes = 10
self.expected_shape = (32, 32, 3)
elif (data == 'ImageNet'):
self.num_classes = 1000
self.expected_shape = (224, 224, 3)
self.flatten = layers.Flatten(name='features')
self.dense = Dense(self.num_classes, activation=None, name='logits', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY))
def call(self, inputs=None, params=defaultdict((lambda : None)), preprocessing=True):
if preprocessing:
data = self.data
else:
data = None
if (data == 'CIFAR10'):
(datamean, datastd) = (np.array((0.4914, 0.4822, 0.4465)), np.array((0.2023, 0.1994, 0.201)))
inputs = tf_standardize(inputs, datamean, datastd)
elif (data == 'ImageNet'):
pass
if (self.architecture in ['ResNet50', 'ResNet101', 'ResNet152']):
if (data == 'ImageNet'):
inputs = keras.applications.resnet.preprocess_input(inputs)
(x, _) = getattr(keras_applications.resnet, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['ResNet50V2', 'ResNet101V2', 'ResNet152V2']):
if (data == 'ImageNet'):
inputs = keras_applications.resnet_v2.preprocess_input(inputs)
(x, _) = getattr(keras_applications.resnet_v2, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['ResNeXt50', 'ResNeXt101']):
if (data == 'ImageNet'):
inputs = keras_applications.resnext.preprocess_input(inputs)
(x, _) = getattr(keras_applications.resnext, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['DenseNet40', 'DenseNet121', 'DenseNet169', 'DenseNet201']):
if (data == 'ImageNet'):
inputs = keras_applications.densenet.preprocess_input(inputs)
(x, _) = getattr(keras_applications.densenet, self.architecture)(include_top=False, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['NASNetMobile', 'NASNetLarge']):
if (data == 'ImageNet'):
inputs = keras_applications.nasnet.preprocess_input(inputs)
(x, _) = getattr(keras_applications.nasnet, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['InceptionResNetV2']):
if (data == 'ImageNet'):
inputs = keras_applications.inception_resnet_v2.preprocess_input(inputs)
(x, _) = getattr(keras_applications.inception_resnet_v2, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['InceptionV3']):
if (data == 'ImageNet'):
inputs = keras_applications.inception_v3.preprocess_input(inputs)
(x, _) = getattr(keras_applications.inception_v3, self.architecture)(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['VGG19']):
if (data == 'ImageNet'):
inputs = keras_applications.vgg19.preprocess_input(inputs)
(x, _) = keras_applications.vgg19.VGG19(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='max', classes=self.num_classes, params=params)
elif (self.architecture in ['VGG16']):
if (data == 'ImageNet'):
inputs = keras_applications.vgg16.preprocess_input(inputs)
(x, _) = keras_applications.vgg16.VGG16(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='max', classes=self.num_classes, params=params)
elif (self.architecture in ['VGG13']):
if (data == 'ImageNet'):
inputs = keras_applications.vgg13.preprocess_input(inputs)
(x, _) = keras_applications.vgg13.VGG13(include_top=False, input_tensor=inputs, input_shape=self.expected_shape, pooling='max', classes=self.num_classes, params=params)
elif (self.architecture in ['VGG11']):
if (data == 'ImageNet'):
inputs = keras_applications.vgg11.preprocess_input(inputs)
(x, _) = keras_applications.vgg11.VGG11(include_top=False, input_tensor=inputs, input_shape=self.expected_shape, pooling='max', classes=self.num_classes, params=params)
elif (self.architecture in ['Xception']):
if (data == 'ImageNet'):
inputs = keras_applications.xception.preprocess_input(inputs)
(x, _) = keras_applications.xception.Xception(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['MobileNet']):
if (data == 'ImageNet'):
inputs = keras_applications.mobilenet.preprocess_input(inputs)
(x, _) = keras_applications.mobilenet.MobileNet(include_top=False, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
elif (self.architecture in ['MobileNetV2']):
if (data == 'ImageNet'):
inputs = keras_applications.mobilenet_v2.preprocess_input(inputs)
(x, _) = keras_applications.mobilenet_v2.MobileNetV2(include_top=False, alpha=1.0, weights=None, input_tensor=inputs, input_shape=self.expected_shape, pooling='avg', classes=self.num_classes, params=params)
else:
raise NotImplementedError('Unknown architecture.')
features = self.flatten(x)
logits = self.dense(features, params=params)
return (logits, features)
def construct_weights(self):
input = tf.keras.Input(shape=self.expected_shape)
model = models.Model(input, self.call(input, preprocessing=False), name=self.architecture)
current_scope = tf.get_default_graph().get_name_scope()
scope_len = (len(current_scope) + 1)
params = {param.name[scope_len:]: param for param in model.trainable_weights}
return (params, {})
def forward(self, inputs, params, buffers={}):
(logits, feats) = self.call(inputs, params)
return (logits, feats, buffers) |
def safe_meet(t: Type, s: Type) -> Type:
from mypy.meet import meet_types
if ((not isinstance(t, UnpackType)) and (not isinstance(s, UnpackType))):
return meet_types(t, s)
if (isinstance(t, UnpackType) and isinstance(s, UnpackType)):
unpacked = get_proper_type(t.type)
if isinstance(unpacked, TypeVarTupleType):
fallback_type = unpacked.tuple_fallback.type
elif isinstance(unpacked, TupleType):
fallback_type = unpacked.partial_fallback.type
else:
assert (isinstance(unpacked, Instance) and (unpacked.type.fullname == 'builtins.tuple'))
fallback_type = unpacked.type
res = meet_types(t.type, s.type)
if isinstance(res, UninhabitedType):
res = Instance(fallback_type, [res])
return UnpackType(res)
return UninhabitedType() |
def data_type_format4(signal_data_type, number_of_bytes):
if (signal_data_type == 0):
if (number_of_bytes == 1):
data_type = 'B'
elif (number_of_bytes == 2):
data_type = 'H'
elif (number_of_bytes <= 4):
data_type = 'I'
elif (number_of_bytes <= 8):
data_type = 'Q'
else:
data_type = '{}s'.format(number_of_bytes)
endian = '<'
elif (signal_data_type == 1):
if (number_of_bytes == 1):
data_type = 'B'
elif (number_of_bytes == 2):
data_type = 'H'
elif (number_of_bytes <= 4):
data_type = 'I'
elif (number_of_bytes <= 8):
data_type = 'Q'
else:
data_type = '{}s'.format(number_of_bytes)
endian = '>'
elif (signal_data_type == 2):
if (number_of_bytes == 1):
data_type = 'b'
elif (number_of_bytes == 2):
data_type = 'h'
elif (number_of_bytes <= 4):
data_type = 'i'
elif (number_of_bytes <= 8):
data_type = 'q'
else:
warn('Unsupported number of bytes for signed int {}'.format(signal_data_type))
endian = '<'
elif (signal_data_type == 3):
if (number_of_bytes == 1):
data_type = 'b'
elif (number_of_bytes == 2):
data_type = 'h'
elif (number_of_bytes <= 4):
data_type = 'i'
elif (number_of_bytes <= 8):
data_type = 'q'
else:
warn('Unsupported number of bytes for signed int {}'.format(signal_data_type))
endian = '>'
elif (signal_data_type == 4):
if (number_of_bytes == 2):
data_type = 'e'
elif (number_of_bytes == 4):
data_type = 'f'
elif (number_of_bytes == 8):
data_type = 'd'
else:
warn('Unsupported number of bytes for floating point {}'.format(signal_data_type))
endian = '<'
elif (signal_data_type == 5):
if (number_of_bytes == 2):
data_type = 'e'
elif (number_of_bytes == 4):
data_type = 'f'
elif (number_of_bytes == 8):
data_type = 'd'
else:
warn('Unsupported number of bytes for floating point {}'.format(signal_data_type))
endian = '>'
elif (signal_data_type in (6, 7, 10, 11, 12)):
data_type = '{}s'.format(number_of_bytes)
endian = ''
elif (signal_data_type == 8):
data_type = '{}s'.format(number_of_bytes)
endian = '<'
elif (signal_data_type == 9):
data_type = '{}s'.format(number_of_bytes)
endian = '>'
elif (signal_data_type == 15):
if (number_of_bytes == 2):
data_type = '2e'
elif (number_of_bytes == 4):
data_type = '2f'
elif (number_of_bytes == 8):
data_type = '2d'
else:
warn('Unsupported number of bytes for floating point {}'.format(signal_data_type))
endian = '<'
elif (signal_data_type == 16):
if (number_of_bytes == 2):
data_type = '2e'
elif (number_of_bytes == 4):
data_type = '2f'
elif (number_of_bytes == 8):
data_type = '2d'
else:
warn('Unsupported number of bytes for floating point {}'.format(signal_data_type))
endian = '>'
else:
warn('Unsupported Signal Data Type {} {}'.format(signal_data_type, number_of_bytes))
return (endian, data_type) |
def freeze_batch_norm_2d(module):
res = module
if isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for (name, child) in module.named_children():
new_child = freeze_batch_norm_2d(child)
if (new_child is not child):
res.add_module(name, new_child)
return res |
class OrFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
async def __call__(self, client: 'pyrogram.Client', update: Update):
if inspect.iscoroutinefunction(self.base.__call__):
x = (await self.base(client, update))
else:
x = (await client.loop.run_in_executor(client.executor, self.base, client, update))
if x:
return True
if inspect.iscoroutinefunction(self.other.__call__):
y = (await self.other(client, update))
else:
y = (await client.loop.run_in_executor(client.executor, self.other, client, update))
return (x or y) |
class Describe_Cell():
def it_knows_what_text_it_contains(self, text_get_fixture):
(cell, expected_text) = text_get_fixture
text = cell.text
assert (text == expected_text)
def it_can_replace_its_content_with_a_string_of_text(self, text_set_fixture):
(cell, text, expected_xml) = text_set_fixture
cell.text = text
assert (cell._tc.xml == expected_xml)
def it_knows_its_vertical_alignment(self, alignment_get_fixture):
(cell, expected_value) = alignment_get_fixture
vertical_alignment = cell.vertical_alignment
assert (vertical_alignment == expected_value)
def it_can_change_its_vertical_alignment(self, alignment_set_fixture):
(cell, new_value, expected_xml) = alignment_set_fixture
cell.vertical_alignment = new_value
assert (cell._element.xml == expected_xml)
def it_knows_its_width_in_EMU(self, width_get_fixture):
(cell, expected_width) = width_get_fixture
assert (cell.width == expected_width)
def it_can_change_its_width(self, width_set_fixture):
(cell, value, expected_xml) = width_set_fixture
cell.width = value
assert (cell.width == value)
assert (cell._tc.xml == expected_xml)
def it_provides_access_to_the_paragraphs_it_contains(self, paragraphs_fixture):
cell = paragraphs_fixture
paragraphs = cell.paragraphs
assert (len(paragraphs) == 2)
count = 0
for (idx, paragraph) in enumerate(paragraphs):
assert isinstance(paragraph, Paragraph)
assert (paragraph is paragraphs[idx])
count += 1
assert (count == 2)
def it_provides_access_to_the_tables_it_contains(self, tables_fixture):
(cell, expected_count) = tables_fixture
tables = cell.tables
assert (len(tables) == expected_count)
count = 0
for (idx, table) in enumerate(tables):
assert isinstance(table, Table)
assert (tables[idx] is table)
count += 1
assert (count == expected_count)
def it_can_add_a_paragraph(self, add_paragraph_fixture):
(cell, expected_xml) = add_paragraph_fixture
p = cell.add_paragraph()
assert (cell._tc.xml == expected_xml)
assert isinstance(p, Paragraph)
def it_can_add_a_table(self, add_table_fixture):
(cell, expected_xml) = add_table_fixture
table = cell.add_table(rows=2, cols=2)
assert (cell._element.xml == expected_xml)
assert isinstance(table, Table)
def it_can_merge_itself_with_other_cells(self, merge_fixture):
(cell, other_cell, merged_tc_) = merge_fixture
merged_cell = cell.merge(other_cell)
cell._tc.merge.assert_called_once_with(other_cell._tc)
assert isinstance(merged_cell, _Cell)
assert (merged_cell._tc is merged_tc_)
assert (merged_cell._parent is cell._parent)
(params=[('w:tc', 'w:tc/w:p'), ('w:tc/w:p', 'w:tc/(w:p, w:p)'), ('w:tc/w:tbl', 'w:tc/(w:tbl, w:p)')])
def add_paragraph_fixture(self, request):
(tc_cxml, after_tc_cxml) = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(after_tc_cxml)
return (cell, expected_xml)
def add_table_fixture(self, request):
cell = _Cell(element('w:tc/w:p'), None)
expected_xml = snippet_seq('new-tbl')[1]
return (cell, expected_xml)
(params=[('w:tc', None), ('w:tc/w:tcPr', None), ('w:tc/w:tcPr/w:vAlign{w:val=bottom}', WD_ALIGN_VERTICAL.BOTTOM), ('w:tc/w:tcPr/w:vAlign{w:val=top}', WD_ALIGN_VERTICAL.TOP)])
def alignment_get_fixture(self, request):
(tc_cxml, expected_value) = request.param
cell = _Cell(element(tc_cxml), None)
return (cell, expected_value)
(params=[('w:tc', WD_ALIGN_VERTICAL.TOP, 'w:tc/w:tcPr/w:vAlign{w:val=top}'), ('w:tc/w:tcPr', WD_ALIGN_VERTICAL.CENTER, 'w:tc/w:tcPr/w:vAlign{w:val=center}'), ('w:tc/w:tcPr/w:vAlign{w:val=center}', WD_ALIGN_VERTICAL.BOTTOM, 'w:tc/w:tcPr/w:vAlign{w:val=bottom}'), ('w:tc/w:tcPr/w:vAlign{w:val=center}', None, 'w:tc/w:tcPr'), ('w:tc', None, 'w:tc/w:tcPr'), ('w:tc/w:tcPr', None, 'w:tc/w:tcPr')])
def alignment_set_fixture(self, request):
(cxml, new_value, expected_cxml) = request.param
cell = _Cell(element(cxml), None)
expected_xml = xml(expected_cxml)
return (cell, new_value, expected_xml)
def merge_fixture(self, tc_, tc_2_, parent_, merged_tc_):
(cell, other_cell) = (_Cell(tc_, parent_), _Cell(tc_2_, parent_))
tc_.merge.return_value = merged_tc_
return (cell, other_cell, merged_tc_)
def paragraphs_fixture(self):
return _Cell(element('w:tc/(w:p, w:p)'), None)
(params=[('w:tc', 0), ('w:tc/w:tbl', 1), ('w:tc/(w:tbl,w:tbl)', 2), ('w:tc/(w:p,w:tbl)', 1), ('w:tc/(w:tbl,w:tbl,w:p)', 2)])
def tables_fixture(self, request):
(cell_cxml, expected_count) = request.param
cell = _Cell(element(cell_cxml), None)
return (cell, expected_count)
(params=[('w:tc', ''), ('w:tc/w:p/w:r/w:t"foobar"', 'foobar'), ('w:tc/(w:p/w:r/w:t"foo",w:p/w:r/w:t"bar")', 'foo\nbar'), ('w:tc/(w:tcPr,w:p/w:r/w:t"foobar")', 'foobar'), ('w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)', 'fo\tob\nar\n')])
def text_get_fixture(self, request):
(tc_cxml, expected_text) = request.param
cell = _Cell(element(tc_cxml), None)
return (cell, expected_text)
(params=[('w:tc/w:p', 'foobar', 'w:tc/w:p/w:r/w:t"foobar"'), ('w:tc/w:p', 'fo\tob\rar\n', 'w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)'), ('w:tc/(w:tcPr, w:p, w:tbl, w:p)', 'foobar', 'w:tc/(w:tcPr, w:p/w:r/w:t"foobar")')])
def text_set_fixture(self, request):
(tc_cxml, new_text, expected_cxml) = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_cxml)
return (cell, new_text, expected_xml)
(params=[('w:tc', None), ('w:tc/w:tcPr', None), ('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', None), ('w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}', 914400)])
def width_get_fixture(self, request):
(tc_cxml, expected_width) = request.param
cell = _Cell(element(tc_cxml), None)
return (cell, expected_width)
(params=[('w:tc', Inches(1), 'w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}'), ('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', Inches(2), 'w:tc/w:tcPr/w:tcW{w:w=2880,w:type=dxa}')])
def width_set_fixture(self, request):
(tc_cxml, new_value, expected_cxml) = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_cxml)
return (cell, new_value, expected_xml)
def merged_tc_(self, request):
return instance_mock(request, CT_Tc)
def parent_(self, request):
return instance_mock(request, Table)
def tc_(self, request):
return instance_mock(request, CT_Tc)
def tc_2_(self, request):
return instance_mock(request, CT_Tc) |
def find_subcommand(action: argparse.ArgumentParser, subcmd_names: List[str]) -> argparse.ArgumentParser:
if (not subcmd_names):
return action
cur_subcmd = subcmd_names.pop(0)
for sub_action in action._actions:
if isinstance(sub_action, argparse._SubParsersAction):
for (choice_name, choice) in sub_action.choices.items():
if (choice_name == cur_subcmd):
return find_subcommand(choice, subcmd_names)
break
raise ValueError(f"Could not find subcommand '{subcmd_names}'") |
class Logger(object):
def __init__(self, file_name: Optional[str]=None, file_mode: str='w', should_flush: bool=True):
self.file = None
if (file_name is not None):
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> 'Logger':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: Union[(str, bytes)]) -> None:
if isinstance(text, bytes):
text = text.decode()
if (len(text) == 0):
return
if (self.file is not None):
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
if (self.file is not None):
self.file.flush()
self.stdout.flush()
def close(self) -> None:
self.flush()
if (sys.stdout is self):
sys.stdout = self.stdout
if (sys.stderr is self):
sys.stderr = self.stderr
if (self.file is not None):
self.file.close()
self.file = None |
def check_default_optimizer(optimizer, model, prefix=''):
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups[0]
if OPS_AVAILABLE:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'dcn.weight', 'dcn.conv_offset.weight', 'dcn.conv_offset.bias']
else:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias']
param_dict = dict(model.named_parameters())
assert (len(param_groups['params']) == len(param_names))
for i in range(len(param_groups['params'])):
assert torch.equal(param_groups['params'][i], param_dict[(prefix + param_names[i])]) |
def join_path(path1, path2):
if (path1[(- 1)] == path2[0]):
return (path1 + path2[1:])
elif (path2[(- 1)] == path1[0]):
return (path2 + path1[1:])
elif (path1[(- 1)] == path2[(- 1)]):
return (path1 + path2[1::(- 1)])
elif (path1[0] == path2[0]):
return (path2[:0:(- 1)] + path1)
raise ValueError('Paths cannot be joined as they do not share any ends') |
def main():
args = parse_args()
if (not check(args)):
return
print('Load Embeddings!')
emb_file_path = f'{model_folder}/{args.model}/data/{args.dataset}/{emb_file}'
(train_para, emb_dict) = load(emb_file_path)
print('Start Evaluation!')
(all_tasks, all_scores) = ([], [])
if ((args.task == 'nc') or (args.task == 'both')):
print(f'Evaluate Node Classification Performance for Model {args.model} on Dataset {args.dataset}!')
label_file_path = f'{data_folder}/{args.dataset}/{label_file}'
label_test_path = f'{data_folder}/{args.dataset}/{label_test_file}'
scores = nc_evaluate(args.dataset, args.supervised, label_file_path, label_test_path, emb_dict)
all_tasks.append('nc')
all_scores.append(scores)
if ((args.task == 'lp') or (args.task == 'both')):
print(f'Evaluate Link Prediction Performance for Model {args.model} on Dataset {args.dataset}!')
link_test_path = f'{data_folder}/{args.dataset}/{link_test_file}'
scores = lp_evaluate(link_test_path, emb_dict)
all_tasks.append('lp')
all_scores.append(scores)
print('Record Results!')
record(args, all_tasks, train_para, all_scores)
return |
class UpdatingVM(VM):
need_update_inputs = False
def __init__(self, fgraph, nodes, thunks, pre_call_clear, storage_map: 'StorageMapType', input_storage: list['StorageCellType'], output_storage: list['StorageCellType'], update_vars: dict[(Variable, Variable)]):
super().__init__(fgraph, nodes, thunks, pre_call_clear)
self.storage_map = storage_map
self.input_storage = input_storage
self.output_storage = output_storage
self.inp_storage_and_out_idx = tuple(((inp_storage, self.fgraph.outputs.index(update_vars[inp])) for (inp, inp_storage) in zip(self.fgraph.inputs, self.input_storage) if (inp in update_vars)))
def perform_updates(self) -> list[Any]:
outputs = [cell[0] for cell in self.output_storage]
for (inp_storage, out_idx) in self.inp_storage_and_out_idx:
inp_storage[0] = outputs[out_idx]
return outputs |
class DHTLocalStorage(TimedStorage[(DHTID, Union[(BinaryDHTValue, DictionaryDHTValue)])]):
def store(self, key: DHTID, value: BinaryDHTValue, expiration_time: DHTExpiration, subkey: Optional[Subkey]=None) -> bool:
if (subkey is not None):
return self.store_subkey(key, subkey, value, expiration_time)
else:
return super().store(key, value, expiration_time)
def store_subkey(self, key: DHTID, subkey: Subkey, value: BinaryDHTValue, expiration_time: DHTExpiration) -> bool:
(previous_value, previous_expiration_time) = (self.get(key) or (b'', (- float('inf'))))
if (isinstance(previous_value, BinaryDHTValue) and (expiration_time > previous_expiration_time)):
new_storage = DictionaryDHTValue()
new_storage.store(subkey, value, expiration_time)
return super().store(key, new_storage, new_storage.latest_expiration_time)
elif isinstance(previous_value, DictionaryDHTValue):
if (expiration_time > previous_value.latest_expiration_time):
super().store(key, previous_value, expiration_time)
return previous_value.store(subkey, value, expiration_time)
else:
return False |
class SR(IntEnum):
IDTI = (1 << 0)
VBUSTI = (1 << 1)
SRPI = (1 << 2)
VBERRI = (1 << 3)
BCERRI = (1 << 4)
ROLEEXI = (1 << 5)
HNPERRI = (1 << 6)
STOI = (1 << 7)
VBUSRQ = (1 << 9)
ID = (1 << 10)
VBUS = (1 << 11)
SPEED = (3 << 12)
CLKUSABLE = (1 << 14) |
class ModelSection(object):
def __init__(self, model, inp_sections, join_sections=None, rpt_sections=None, columns=None, geomtype='point'):
self.model = model
self.inp = self.model.inp
self.rpt = self.model.rpt
self.inp_sections = inp_sections
self.join_sections = (join_sections if (join_sections is not None) else [])
self.rpt_sections = (rpt_sections if (rpt_sections is not None) else [])
self.columns = columns
self.geomtype = geomtype
self._df = None
def dataframe(self):
return self.__call__()
def geojson(self):
return write_geojson(self.dataframe, geomtype=self.geomtype)
def geodataframe(self):
try:
import geopandas as gp
except ImportError:
raise ImportError('geopandas module needed. Install GeoPandas with conda: ', 'conda install geopandas')
df = self.__call__()
df['geometry'] = coords_series_to_geometry(df['coords'], geomtype=self.geomtype, dtype='shape')
df = df.drop(['coords'], axis=1)
return gp.GeoDataFrame(df, crs=self.model.crs)
def __call__(self):
headers = get_inp_sections_details(self.inp.path)
dfs = [dataframe_from_inp(self.inp.path, sect) for sect in self.inp_sections if (sect.upper() in headers)]
if (len(dfs) == 0):
return pd.DataFrame()
df = pd.concat(dfs, axis=0, sort=False)
df = df.rename(index=str)
for sect in self.join_sections:
rsuffix = f"_{sect.replace(' ', '_')}"
df = df.join(dataframe_from_inp(self.inp.path, sect), rsuffix=rsuffix)
if df.empty:
return df
if self.rpt:
for rpt_sect in self.rpt_sections:
df = df.join(dataframe_from_rpt(self.rpt.path, rpt_sect))
if (self.geomtype == 'point'):
df = df.join(self.inp.coordinates[['X', 'Y']])
xys = df.apply((lambda r: nodexy(r)), axis=1)
df = df.assign(coords=xys)
elif (self.geomtype == 'linestring'):
xys = df.apply((lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices)), axis=1)
df = df.assign(coords=xys.map((lambda x: x[0])))
df.InletNode = df.InletNode.astype(str)
df.OutletNode = df.OutletNode.astype(str)
elif (self.geomtype == 'polygon'):
p = self.inp.polygons
p.index = p.index.map(str)
xys = p.groupby(by=p.index).apply((lambda r: [(xy['X'], xy['Y']) for (ix, xy) in r.iterrows()]))
xys = xys.apply((lambda r: (r + [r[0]])))
df = df.assign(coords=xys)
if (self.columns is not None):
df = df[[c for c in self.columns if (c in df.columns)]]
self._df = df
return df |
class Monitor(Wrapper):
EXT = 'monitor.csv'
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
self.results_writer = ResultsWriter(filename, header={'t_start': time.time(), 'env_id': (env.spec and env.spec.id)}, extra_keys=(reset_keywords + info_keywords))
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {}
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if (v is None):
raise ValueError(('Expected you to pass kwarg %s into reset' % k))
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if ((not self.allow_early_resets) and (not self.needs_reset)):
raise RuntimeError('Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)')
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError('Tried to step environment that needs reset')
(ob, rew, done, info) = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {'r': round(eprew, 6), 'l': eplen, 't': round((time.time() - self.tstart), 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append((time.time() - self.tstart))
epinfo.update(self.current_reset_info)
self.results_writer.write_row(epinfo)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
if (self.f is not None):
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times |
def get_transform(opt):
transform_list = []
if (opt.resize_or_crop == 'resize_and_crop'):
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'crop'):
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'scale_width'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.fineSize))))
elif (opt.resize_or_crop == 'scale_width_and_crop'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.loadSize))))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if (opt.isTrain and (not opt.no_flip)):
transform_list.append(transforms.RandomHorizontalFlip())
if opt.use_binary:
transform_list += [transforms.ToTensor()]
else:
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
class MaskRCNNInstanceSegmentationNode(LazyTransport):
def __init__(self):
super().__init__()
self._class_names = morefusion.datasets.ycb_video.class_names
self._blacklist = [5, 10, 12]
self._one_instance_per_class = True
pretrained_model = gdown.cached_download(url=' md5='f169417a5bab67e8b48337b2a341e890')
self._model = MaskRCNNFPNResNet50(n_fg_class=len(self._class_names[1:]), pretrained_model=pretrained_model)
self._model.score_thresh = 0.75
self._model.to_gpu()
self._pub_cls = self.advertise('~output/class', ObjectClassArray, queue_size=1)
self._pub_ins = self.advertise('~output/label_ins', Image, queue_size=1)
self._post_init()
def subscribe(self):
self._sub = rospy.Subscriber('~input', Image, callback=self.callback, queue_size=1, buff_size=(2 ** 24))
def unsubscribe(self):
self._sub.unregister()
def callback(self, imgmsg):
bridge = cv_bridge.CvBridge()
rgb = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8')
(masks, labels, confs) = self._model.predict([rgb.astype(np.float32).transpose(2, 0, 1)])
masks = masks[0]
labels = labels[0]
confs = confs[0]
class_ids = (labels + 1)
del labels
if self._blacklist:
keep = (~ np.isin(class_ids, self._blacklist))
masks = masks[keep]
class_ids = class_ids[keep]
confs = confs[keep]
if (len(class_ids) > 0):
keep = (masks.sum(axis=(1, 2)) > 0)
class_ids = class_ids[keep]
masks = masks[keep]
confs = confs[keep]
if (len(class_ids) > 0):
(uniq, counts) = np.unique(class_ids, return_counts=True)
keep = []
for (cls_id, count) in zip(uniq, counts):
if (count == 1):
index = np.argwhere((class_ids == cls_id))[(0, 0)]
else:
index = np.argmax(confs[(class_ids == cls_id)])
keep.append(index)
class_ids = class_ids[keep]
masks = masks[keep]
confs = confs[keep]
if (len(class_ids) > 0):
sort = np.argsort(confs)
class_ids = class_ids[sort]
masks = masks[sort]
confs = confs[sort]
instance_ids = np.arange(0, len(masks))
label_ins = np.full(rgb.shape[:2], (- 1), dtype=np.int32)
for (ins_id, mask) in zip(instance_ids, masks):
label_ins[mask] = ins_id
ins_msg = bridge.cv2_to_imgmsg(label_ins)
ins_msg.header = imgmsg.header
self._pub_ins.publish(ins_msg)
instance_ids_active = np.unique(label_ins)
keep = np.isin(instance_ids, instance_ids_active)
instance_ids = instance_ids[keep]
class_ids = class_ids[keep]
confs = confs[keep]
cls_msg = ObjectClassArray(header=imgmsg.header)
for (ins_id, cls_id, conf) in zip(instance_ids, class_ids, confs):
cls_msg.classes.append(ObjectClass(instance_id=ins_id, class_id=cls_id, confidence=conf))
self._pub_cls.publish(cls_msg) |
def window_by_position(ds: Dataset, *, size: int, step: Optional[int]=None, offset: int=0, variant_contig: Hashable=variables.variant_contig, variant_position: Hashable=variables.variant_position, window_start_position: Optional[Hashable]=None, merge: bool=True) -> Dataset:
if ((step is not None) and (window_start_position is not None)):
raise ValueError('Only one of step or window_start_position may be specified')
step = (step or size)
positions = ds[variant_position].values
window_start_positions = (ds[window_start_position].values if (window_start_position is not None) else None)
return _window_per_contig(ds, variant_contig, merge, _get_windows_by_position, size, step, offset, positions, window_start_positions) |
class AndroguardImp(BaseApkinfo):
__slots__ = ('apk', 'dalvikvmformat', 'analysis')
def __init__(self, apk_filepath: Union[(str, PathLike)]):
super().__init__(apk_filepath, 'androguard')
if (self.ret_type == 'APK'):
(self.apk, self.dalvikvmformat, self.analysis) = AnalyzeAPK(apk_filepath)
elif (self.ret_type == 'DEX'):
(_, _, self.analysis) = AnalyzeDex(apk_filepath)
else:
raise ValueError('Unsupported File type.')
def permissions(self) -> List[str]:
if (self.ret_type == 'APK'):
return self.apk.get_permissions()
if (self.ret_type == 'DEX'):
return []
def application(self) -> XMLElement:
if (self.ret_type == 'DEX'):
return []
manifest_root = self.apk.get_android_manifest_xml()
return manifest_root.find('application')
def activities(self) -> List[XMLElement]:
if (self.ret_type == 'DEX'):
return []
manifest_root = self.apk.get_android_manifest_xml()
application = manifest_root.find('application')
return application.findall('activity')
def receivers(self) -> List[XMLElement]:
if (self.ret_type == 'DEX'):
return []
manifest_root = self.apk.get_android_manifest_xml()
application = manifest_root.find('application')
return application.findall('receiver')
def android_apis(self) -> Set[MethodObject]:
apis = set()
for external_cls in self.analysis.get_external_classes():
for meth_analysis in external_cls.get_methods():
if meth_analysis.is_android_api():
apis.add(meth_analysis)
return {self._convert_to_method_object(api) for api in apis}
def custom_methods(self) -> Set[MethodObject]:
return {self._convert_to_method_object(meth_analysis) for meth_analysis in self.analysis.get_methods() if (not meth_analysis.is_external())}
def all_methods(self) -> Set[MethodObject]:
return {self._convert_to_method_object(meth_analysis) for meth_analysis in self.analysis.get_methods()}
_cache()
def find_method(self, class_name: Optional[str]='.*', method_name: Optional[str]='.*', descriptor: Optional[str]='.*') -> List[MethodObject]:
if (not class_name):
class_name = '.*'
if (class_name != '.*'):
regex_class_name = re.escape(class_name)
else:
regex_class_name = class_name
if (not method_name):
method_name = '.*'
if (method_name != '.*'):
regex_method_name = f'^{re.escape(method_name)}$'
else:
regex_method_name = f'^{method_name}$'
if (not descriptor):
descriptor = '.*'
if (descriptor != '.*'):
regex_descriptor = re.escape(descriptor)
else:
regex_descriptor = descriptor
method_result = self.analysis.find_methods(classname=regex_class_name, methodname=regex_method_name, descriptor=regex_descriptor)
return [self._convert_to_method_object(item) for item in method_result]
_cache()
def upperfunc(self, method_object: MethodObject) -> Set[MethodObject]:
method_analysis = method_object.cache
return {self._convert_to_method_object(call) for (_, call, _) in method_analysis.get_xref_from()}
def lowerfunc(self, method_object: MethodObject) -> Set[MethodObject]:
method_analysis = method_object.cache
return {(self._convert_to_method_object(call), offset) for (_, call, offset) in method_analysis.get_xref_to()}
def get_method_bytecode(self, method_object: MethodObject) -> Set[MethodObject]:
method_analysis = method_object.cache
try:
for (_, ins) in method_analysis.get_method().get_instructions_idx():
bytecode_obj = None
register_list = []
length_operands = len(ins.get_operands())
if (length_operands == 0):
bytecode_obj = BytecodeObject(ins.get_name(), None, None)
else:
index_of_parameter_starts = None
for i in range((length_operands - 1), (- 1), (- 1)):
if ((not isinstance(ins.get_operands()[i][0], Operand)) or (ins.get_operands()[i][0].name != 'REGISTER')):
index_of_parameter_starts = i
break
if (index_of_parameter_starts is not None):
parameter = ins.get_operands()[index_of_parameter_starts]
parameter = (parameter[2] if (len(parameter) == 3) else parameter[1])
for i in range(index_of_parameter_starts):
register_list.append(('v' + str(ins.get_operands()[i][1])))
else:
parameter = None
for i in range(length_operands):
register_list.append(('v' + str(ins.get_operands()[i][1])))
bytecode_obj = BytecodeObject(ins.get_name(), register_list, parameter)
(yield bytecode_obj)
except AttributeError:
pass
def get_strings(self) -> str:
return {str(string_analysis.get_orig_value()) for string_analysis in self.analysis.get_strings()}
_cache()
def _construct_bytecode_instruction(self, instruction):
instruction_list = [instruction.get_name()]
reg_list = []
length_operands = len(instruction.get_operands())
if (length_operands == 0):
return instruction_list
elif (length_operands == 1):
reg_list.append(f'v{instruction.get_operands()[(length_operands - 1)][1]}')
instruction_list.extend(reg_list)
return instruction_list
elif (length_operands >= 2):
parameter = instruction.get_operands()[(length_operands - 1)]
for i in range((length_operands - 1)):
reg_list.append(('v' + str(instruction.get_operands()[i][1])))
parameter = (parameter[2] if (len(parameter) == 3) else parameter[1])
instruction_list.extend(reg_list)
instruction_list.append(parameter)
return instruction_list
_cache()
def get_wrapper_smali(self, parent_method: MethodObject, first_method: MethodObject, second_method: MethodObject) -> Dict[(str, Union[(BytecodeObject, str)])]:
method_analysis = parent_method.cache
result = {'first': None, 'first_hex': None, 'second': None, 'second_hex': None}
first_method_pattern = PyEval.get_method_pattern(first_method.class_name, first_method.name, first_method.descriptor)
second_method_pattern = PyEval.get_method_pattern(second_method.class_name, second_method.name, second_method.descriptor)
for (_, ins) in method_analysis.get_method().get_instructions_idx():
if (first_method_pattern in str(ins)):
result['first'] = self._construct_bytecode_instruction(ins)
result['first_hex'] = ins.get_hex()
if (second_method_pattern in str(ins)):
result['second'] = self._construct_bytecode_instruction(ins)
result['second_hex'] = ins.get_hex()
return result
def superclass_relationships(self) -> Dict[(str, Set[str])]:
hierarchy_dict = defaultdict(set)
for _class in self.analysis.get_classes():
hierarchy_dict[str(_class.name)].add(str(_class.extends))
hierarchy_dict[str(_class.name)].union((str(implements) for implements in _class.implements))
return hierarchy_dict
def subclass_relationships(self) -> Dict[(str, Set[str])]:
hierarchy_dict = defaultdict(set)
for _class in self.analysis.get_classes():
class_name = str(_class.name)
hierarchy_dict[str(_class.extends)].add(class_name)
for implements in _class.implements:
hierarchy_dict[str(implements)].add(class_name)
return hierarchy_dict
_cache
def _convert_to_method_object(method_analysis: MethodAnalysis) -> MethodObject:
return MethodObject(access_flags=method_analysis.access, class_name=str(method_analysis.class_name), name=str(method_analysis.name), descriptor=str(method_analysis.descriptor), cache=method_analysis) |
class Describe_Column():
def it_provides_access_to_its_cells(self, cells_fixture):
(column, column_idx, expected_cells) = cells_fixture
cells = column.cells
column.table.column_cells.assert_called_once_with(column_idx)
assert (cells == expected_cells)
def it_provides_access_to_the_table_it_belongs_to(self, table_fixture):
(column, table_) = table_fixture
assert (column.table is table_)
def it_knows_its_width_in_EMU(self, width_get_fixture):
(column, expected_width) = width_get_fixture
assert (column.width == expected_width)
def it_can_change_its_width(self, width_set_fixture):
(column, value, expected_xml) = width_set_fixture
column.width = value
assert (column.width == value)
assert (column._gridCol.xml == expected_xml)
def it_knows_its_index_in_table_to_help(self, index_fixture):
(column, expected_idx) = index_fixture
assert (column._index == expected_idx)
def cells_fixture(self, _index_, table_prop_, table_):
column = _Column(None, None)
_index_.return_value = column_idx = 4
expected_cells = (3, 2, 1)
table_.column_cells.return_value = list(expected_cells)
return (column, column_idx, expected_cells)
def index_fixture(self):
tbl = element('w:tbl/w:tblGrid/(w:gridCol,w:gridCol,w:gridCol)')
(gridCol, expected_idx) = (tbl.tblGrid[1], 1)
column = _Column(gridCol, None)
return (column, expected_idx)
def table_fixture(self, parent_, table_):
column = _Column(None, parent_)
parent_.table = table_
return (column, table_)
(params=[('w:gridCol{w:w=4242}', 2693670), ('w:gridCol{w:w=1440}', 914400), ('w:gridCol{w:w=2.54cm}', 914400), ('w:gridCol{w:w=54mm}', 1944000), ('w:gridCol{w:w=12.5pt}', 158750), ('w:gridCol', None)])
def width_get_fixture(self, request):
(gridCol_cxml, expected_width) = request.param
column = _Column(element(gridCol_cxml), None)
return (column, expected_width)
(params=[('w:gridCol', 914400, 'w:gridCol{w:w=1440}'), ('w:gridCol{w:w=4242}', 457200, 'w:gridCol{w:w=720}'), ('w:gridCol{w:w=4242}', None, 'w:gridCol'), ('w:gridCol', None, 'w:gridCol')])
def width_set_fixture(self, request):
(gridCol_cxml, new_value, expected_cxml) = request.param
column = _Column(element(gridCol_cxml), None)
expected_xml = xml(expected_cxml)
return (column, new_value, expected_xml)
def _index_(self, request):
return property_mock(request, _Column, '_index')
def parent_(self, request):
return instance_mock(request, Table)
def table_(self, request):
return instance_mock(request, Table)
def table_prop_(self, request, table_):
return property_mock(request, _Column, 'table', return_value=table_) |
class Statistics():
def __init__(self, data):
self.data = data
self.min_length = 5
self.max_length = 100
self.post_num = 0
self.resp_num = 0
self.err_data = 0
def word_freq(self):
seg = pkuseg.pkuseg(model_name='web')
stopwords = []
text = []
new_text = []
with open('stopwords.txt', 'r') as f:
stopwords = f.read()
for line in tqdm(self.data):
(post, resp) = (line[0], line[1:])
text.extend(seg.cut(post))
for r in resp:
text.extend(seg.cut(r))
for word in text:
if (word not in stopwords):
new_text.append(word)
couter = Counter(new_text)
print('Start create user_dictionary')
with open('word_user.txt', 'w') as fout:
for (k, v) in tqdm(couter.most_common()):
fout.write((((k + '\t') + str(v)) + '\n'))
def check_sentence_length(self):
bucket_p = {}
bucket_r = {}
new_data = []
d = ((self.max_length - self.min_length) / 10)
for line in self.data:
resps = []
(post, resp) = (line[0], line[1:])
self.post_num += 1
post = self.check_lenth(post)
k = str(int(((len(post) - self.min_length) / d)))
bucket_p[k] = ((bucket_p[k] + 1) if (k in bucket_p) else 1)
for r in resp:
self.resp_num += 1
r = self.check_lenth(r)
k = str(int(((len(r) - self.min_length) / d)))
bucket_r[k] = ((bucket_r[k] + 1) if (k in bucket_r) else 1)
if r:
resps.append(r)
if ((not post) or (not resps)):
continue
new_data.append(([post] + resps))
print(('Total Post:%d , Response: %d , Pair: %d , Avg_Pair: %f ' % (self.post_num, self.resp_num, self.resp_num, ((1.0 * self.resp_num) / self.post_num))))
with open('sentence_length.txt', 'w') as f:
for kv in sorted(bucket_p.items(), key=(lambda d: int(d[0]))):
key = kv[0]
value = kv[1]
idx = int(key)
f.write(('Post length %d - %d : %d \n' % ((self.min_length + (idx * d)), ((self.min_length + ((idx + 1) * d)) - 1), value)))
for kv2 in sorted(bucket_r.items(), key=(lambda d: int(d[0]))):
key = kv2[0]
value = kv2[1]
idx = int(key)
f.write(('Response length %d - %d : %d \n' % ((self.min_length + (idx * d)), ((self.min_length + ((idx + 1) * d)) - 1), value)))
self.data = new_data
return new_data
def check_lenth(self, sentence):
if ((len(sentence) < self.min_length) or (len(sentence) > self.max_length)):
with open('err_data.txt', 'w') as f:
(f.write('empty data \n') if (len(sentence) == 0) else f.write(('error data: %s, %d\n' % (sentence, len(sentence)))))
self.err_data += 1
return ''
return sentence |
class TestDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
self.tab_processor = get_default_processor(max_cell_length=100, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=args.seq2seq.table_truncation_max_length)
cache_path = os.path.join(cache_root, 'kvret_glmp_test.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
history = kvret_get_constructed_history(history=extend_data['history'])
table_context = {'header': extend_data['kb']['header'], 'rows': extend_data['kb']['rows']}
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, history, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
extend_data.update({'struct_in': linear_table.lower(), 'text_in': history.lower(), 'seq_out': extend_data['response'].lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def apply_version_to_source_files(repo: Repo, version_declarations: Iterable[VersionDeclarationABC], version: Version, noop: bool=False) -> list[str]:
working_dir = (os.getcwd() if (repo.working_dir is None) else repo.working_dir)
paths = [str(declaration.path.resolve().relative_to(working_dir)) for declaration in version_declarations]
if noop:
noop_report(('would have updated versions in the following paths:' + ''.join((f'''
{path}''' for path in paths))))
else:
log.debug('writing version %s to source paths %s', version, paths)
for declaration in version_declarations:
new_content = declaration.replace(new_version=version)
declaration.path.write_text(new_content)
return paths |
_ephem
def test_get_solarposition_method_pyephem(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30), periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude, golden.longitude, method='pyephem')
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns]) |
class Profiler(object):
def __init__(self, verbose):
super(Profiler, self).__init__()
self.initial = {}
self.verbose = verbose
self.final = OrderedDict()
self.relative_time_percentage = {}
def add_entry(self, dictionary, key, verbose, count):
if (count == verbose):
if (key in dictionary):
return
else:
dictionary[key] = {}
return
self.add_entry(dictionary[list(dictionary)[(- 1)]], key, verbose, (count + 1))
def start(self, key, verbose_level, optional=''):
if __debug__:
self.initial[key] = {'start_time': perf_counter(), 'verbose_level': verbose_level}
self.add_entry(self.final, key, verbose_level, 1)
if ((len(optional) != 0) and (self.verbose >= verbose_level)):
print(optional)
def add_time(self, dictionary, key, verbose, count, time_calculated):
if (count == verbose):
executed_more_than_once = 0
if isinstance(dictionary[key], float):
time_calculated = (dictionary[key] + time_calculated)
executed_more_than_once = 1
else:
try:
if (('value' in dictionary[key]) and isinstance(dictionary[key]['value'], float)):
time_calculated = (dictionary[key]['value'] + time_calculated)
executed_more_than_once = 2
except KeyError:
pass
if (executed_more_than_once == 2):
dictionary[key]['value'] = time_calculated
elif (executed_more_than_once == 1):
dictionary[key] = time_calculated
elif (len(dictionary[key]) != 0):
dictionary[key].update({'value': time_calculated})
else:
dictionary[key] = time_calculated
return
self.add_time(dictionary[list(dictionary)[(- 1)]], key, verbose, (count + 1), time_calculated)
def stop(self, key, details):
if __debug__:
items = self.initial.pop(key)
time_calculated = (perf_counter() - items['start_time'])
if (items['verbose_level'] == 1):
self.final = dict(self.final)
self.add_time(self.final, key, items['verbose_level'], 1, time_calculated)
if (self.verbose >= items['verbose_level']):
self._print(items['verbose_level'], details, time_calculated=time_calculated)
def _print(self, verbose_level, details, time_calculated):
if (verbose_level == 1):
print('{0:.2f}s -'.format(time_calculated), details)
elif (verbose_level >= 2):
printg(('...' * (verbose_level - 1)), '{0:.2f}s -'.format(time_calculated), details) |
def read_sentence1516_target(file_path, max_offset_len=83):
tk = MosesTokenizer()
with open(file_path, 'rb') as fopen:
raw = fopen.read()
root = etree.fromstring(raw)
for review_xml in root:
sentences_xml = review_xml.find('sentences')
for sentence_xml in sentences_xml:
example = dict()
example['sentence'] = sentence_xml.find('text').text.lower()
tokens = tk.tokenize(example['sentence'])
opinions_xml = sentence_xml.find('Opinions')
if (opinions_xml is None):
continue
example['aspect_sentiment'] = {}
example['left_right'] = []
example['offset'] = []
for opinion_xml in opinions_xml:
target = opinion_xml.attrib['target'].lower()
if (target == 'null'):
continue
example['aspect_sentiment'][target] = opinion_xml.attrib['polarity']
left_index = int(opinion_xml.attrib['from'])
right_index = int(opinion_xml.attrib['to'])
example['left_right'].append((example['sentence'][:left_index], example['sentence'][right_index:], opinion_xml.attrib['polarity']))
left_word_offset = len(tk.tokenize(example['sentence'][:left_index]))
right_word_offset = len(tk.tokenize(example['sentence'][right_index:]))
token_index = list(range(len(tokens)))
token_length = float(len(token_index))
for i in range(len(tokens)):
if (i < left_word_offset):
token_index[i] = (1 - ((left_word_offset - token_index[i]) / token_length))
elif (i >= (len(tokens) - right_word_offset)):
token_index[i] = (1 - (((token_index[i] - (len(tokens) - right_word_offset)) + 1) / token_length))
else:
token_index[i] = 0
token_index += ([(- 1.0)] * (max_offset_len - len(tokens)))
example['offset'].append((token_index, target, opinion_xml.attrib['polarity']))
if (len(example['aspect_sentiment']) == 0):
continue
(yield example) |
class RootEventHandler(EventTarget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._click_tracker = {}
self._target_tracker = {}
def dispatch_event(self, event: Event):
pointer_id = getattr(event, 'pointer_id', None)
if ((pointer_id is not None) and (pointer_id in EventTarget.pointer_captures)):
(captured_target_ref, event_root_ref) = EventTarget.pointer_captures[pointer_id]
(captured_target, event_root) = (captured_target_ref(), (event_root_ref and event_root_ref()))
if (event_root and (event_root is not self)):
return
if captured_target:
event._retarget(captured_target)
event.stop_propagation()
target = (event.target or self)
if (event.type == EventType.POINTER_MOVE):
previous_target_ref = self._target_tracker.get(pointer_id)
previous_target = ((previous_target_ref and previous_target_ref()) or None)
if (previous_target is not target):
self._target_tracker[pointer_id] = ((target and ref(target)) or None)
if (previous_target is not None):
ev = event.copy(type='pointer_leave', target=previous_target)
self.dispatch_event(ev)
ev = event.copy(type='pointer_enter')
self.dispatch_event(ev)
while target:
event._update_current_target(target)
target.handle_event(event)
if ((pointer_id is not None) and (pointer_id in EventTarget.pointer_captures)):
event._retarget(target)
event.stop_propagation()
if (event.type == EventType.POINTER_UP):
captured_target.release_pointer_capture(pointer_id)
if ((not event.bubbles) or event.cancelled or (target is self)):
break
target = (target.parent or self)
if (event.type == EventType.POINTER_DOWN):
tracked_click = self._click_tracker.get(pointer_id)
if (tracked_click and (((tracked_click['target'] is not None) and (tracked_click['target']() is not None) and (tracked_click['target']() is event.target)) or ((tracked_click['target'] is None) and (event.target is None))) and ((event.time_stamp - tracked_click['time_stamp']) < CLICK_DEBOUNCE)):
tracked_click['count'] += 1
tracked_click['time_stamp'] = event.time_stamp
else:
self._click_tracker[pointer_id] = {'count': 1, 'time_stamp': event.time_stamp, 'target': ((event.target and ref(event.target)) or None)}
elif (event.type == EventType.POINTER_UP):
tracked_click = self._click_tracker.get(pointer_id)
if (tracked_click and (((tracked_click['target'] is not None) and (tracked_click['target']() is not None) and (tracked_click['target']() is event.target)) or ((tracked_click['target'] is None) and (event.target is None)))):
ev = event.copy(type='click', clicks=tracked_click['count'])
self.dispatch_event(ev)
if (tracked_click['count'] == 2):
double_ev = event.copy(type='double_click', clicks=tracked_click['count'])
self.dispatch_event(double_ev) |
class Env(object):
def __new__(cls, *args, **kwargs):
env = super(Env, cls).__new__(cls)
env._env_closer_id = env_closer.register(env)
env._closed = False
env._configured = False
env._unwrapped = None
env.spec = None
return env
metadata = {'render.modes': []}
reward_range = ((- np.inf), np.inf)
def _close(self):
pass
def _configure(self):
pass
action_space = None
observation_space = None
def _step(self, action):
raise NotImplementedError
def _reset(self):
raise NotImplementedError
def _render(self, mode='human', close=False):
if close:
return
raise NotImplementedError
def _seed(self, seed=None):
return []
_owns_render = True
def monitor(self):
if (not hasattr(self, '_monitor')):
self._monitor = monitoring.Monitor(self)
return self._monitor
def step(self, action):
self.monitor._before_step(action)
(observation, reward, done, info) = self._step(action)
done = self.monitor._after_step(observation, reward, done, info)
return (observation, reward, done, info)
def reset(self):
if (self.metadata.get('configure.required') and (not self._configured)):
raise error.Error("{} requires manually calling 'configure()' before 'reset()'".format(self))
elif (not self._configured):
self.configure()
self.monitor._before_reset()
observation = self._reset()
self.monitor._after_reset(observation)
return observation
def render(self, mode='human', close=False):
if close:
return self._render(close=close)
modes = self.metadata.get('render.modes', [])
if (len(modes) == 0):
raise error.UnsupportedMode('{} does not support rendering (requested mode: {})'.format(self, mode))
elif (mode not in modes):
raise error.UnsupportedMode('Unsupported rendering mode: {}. (Supported modes for {}: {})'.format(mode, self, modes))
return self._render(mode=mode, close=close)
def close(self):
if ((not hasattr(self, '_closed')) or self._closed):
return
if hasattr(self, '_monitor'):
self.monitor.close()
if self._owns_render:
self.render(close=True)
self._close()
env_closer.unregister(self._env_closer_id)
self._closed = True
def seed(self, seed=None):
return self._seed(seed)
def configure(self, *args, **kwargs):
self._configured = True
try:
self._configure(*args, **kwargs)
except TypeError as e:
if self.spec:
reraise(suffix='(for {})'.format(self.spec.id))
else:
raise
def unwrapped(self):
if (self._unwrapped is not None):
return self._unwrapped
else:
return self
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__) |
def _infer_column(data) -> Union[(ta.BaseColumn, Unresolved, None)]:
if (data is None):
return None
assert isinstance(data, list)
non_null_item = next((item for item in data if (item is not None)), None)
if (non_null_item is None):
return Unresolved()
elif isinstance(non_null_item, list):
inferred_columns = [_infer_column(item) for item in data]
union_type = get_union_type(inferred_columns)
if (union_type is None):
return Unresolved()
elif isinstance(union_type, Unresolved):
return UnresolvedArray(union_type)
else:
resolved_item_type = union_type
col = ta.Column(ta.VeloxArrayType(resolved_item_type))
for (item_col, item) in zip(inferred_columns, data):
if (item is None):
resolved_item_col = None
elif isinstance(item_col, Unresolved):
resolved_item_col = resolve_column(item, resolved_item_type)
else:
resolved_item_col = item_col
if (resolved_item_col is None):
col.append_null()
else:
col.append(resolved_item_col)
return col
elif isinstance(non_null_item, dict):
keys_array = []
values_array = []
for item in data:
if (item is None):
keys_array.append(None)
values_array.append(None)
elif isinstance(item, dict):
keys_array.append(list(item.keys()))
values_array.append(list(item.values()))
else:
raise ValueError('non-dict item in dict list')
inferred_keys_array_columns = _infer_column(keys_array)
inferred_values_array_columns = _infer_column(values_array)
keys_array_type = inferred_keys_array_columns.type()
values_array_type = inferred_values_array_columns.type()
if (isinstance(keys_array_type, ta.VeloxArrayType) and isinstance(values_array_type, ta.VeloxArrayType)):
col = ta.Column(ta.VeloxMapType(keys_array_type.element_type(), values_array_type.element_type()))
for item in data:
if (item is None):
col.append_null()
else:
key_col = ta.Column(keys_array_type.element_type())
value_col = ta.Column(values_array_type.element_type())
for (key, value) in item.items():
key_col.append(key)
if (value is None):
value_col.append_null()
else:
value_col.append(value)
col.append(key_col, value_col)
return col
else:
raise NotImplementedError()
else:
type_ = {int: ta.VeloxType_BIGINT(), float: ta.VeloxType_REAL(), str: ta.VeloxType_VARCHAR(), bool: ta.VeloxType_BOOLEAN()}.get(type(non_null_item))
if (type_ is None):
raise NotImplementedError(f'Cannot infer {type(non_null_item)}')
else:
col = ta.Column(type_)
for item in data:
if (item is None):
col.append_null()
else:
col.append(item)
return col |
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
try:
import tensorflow as tf
import torch
except ImportError:
logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.')
raise
import transformers
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info('Loading TensorFlow weights from {}'.format(tf_checkpoint_path))
tf_model_class_name = ('TF' + pt_model.__class__.__name__)
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if (tf_inputs is None):
tf_inputs = tf_model.dummy_inputs
if (tf_inputs is not None):
tfo = tf_model(tf_inputs, training=False)
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys) |
class BluezAgentManagerAPI(ABC):
name = 'org.bluez'
interface = 'org.bluez.AgentManager1'
path = ObjPath('/org/bluez')
def connect(cls) -> 'BluezAgentManagerAPI':
return cast(BluezAgentManagerAPI, SystemBus().get_proxy(cls.name, cls.path))
def RegisterAgent(self, agent: ObjPath, capability: Str) -> None:
pass
def RequestDefaultAgent(self, agent: ObjPath) -> None:
pass
def UnregisterAgent(self, agent: ObjPath) -> None:
pass |
class LookupTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
self.nonregr = resources.build_file('data/nonregr.py', 'data.nonregr')
def test_limit(self) -> None:
code = '\n l = [a\n for a,b in list]\n\n a = 1\n b = a\n a = None\n\n def func():\n c = 1\n '
astroid = builder.parse(code, __name__)
a = next(astroid.nodes_of_class(nodes.Name))
self.assertEqual(a.lineno, 2)
self.assertEqual(len(astroid.lookup('b')[1]), 1)
self.assertEqual(len(astroid.lookup('a')[1]), 1)
b = astroid.locals['b'][0]
stmts = a.lookup('a')[1]
self.assertEqual(len(stmts), 1)
self.assertEqual(b.lineno, 6)
b_infer = b.infer()
b_value = next(b_infer)
self.assertEqual(b_value.value, 1)
self.assertRaises(StopIteration, functools.partial(next, b_infer))
func = astroid.locals['func'][0]
self.assertEqual(len(func.lookup('c')[1]), 1)
def test_module(self) -> None:
astroid = builder.parse('pass', __name__)
none = next(astroid.ilookup('None'))
self.assertIsNone(none.value)
obj = next(astroid.ilookup('object'))
self.assertIsInstance(obj, nodes.ClassDef)
self.assertEqual(obj.name, 'object')
self.assertRaises(InferenceError, functools.partial(next, astroid.ilookup('YOAA')))
self.assertEqual(len(list(self.nonregr.ilookup('enumerate'))), 2)
def test_class_ancestor_name(self) -> None:
code = '\n class A:\n pass\n\n class A(A):\n pass\n '
astroid = builder.parse(code, __name__)
cls1 = astroid.locals['A'][0]
cls2 = astroid.locals['A'][1]
name = next(cls2.nodes_of_class(nodes.Name))
self.assertEqual(next(name.infer()), cls1)
def test_method(self) -> None:
method = self.module['YOUPI']['method']
my_dict = next(method.ilookup('MY_DICT'))
self.assertTrue(isinstance(my_dict, nodes.Dict), my_dict)
none = next(method.ilookup('None'))
self.assertIsNone(none.value)
self.assertRaises(InferenceError, functools.partial(next, method.ilookup('YOAA')))
def test_function_argument_with_default(self) -> None:
make_class = self.module2['make_class']
base = next(make_class.ilookup('base'))
self.assertTrue(isinstance(base, nodes.ClassDef), base.__class__)
self.assertEqual(base.name, 'YO')
self.assertEqual(base.root().name, 'data.module')
def test_class(self) -> None:
klass = self.module['YOUPI']
my_dict = next(klass.ilookup('MY_DICT'))
self.assertIsInstance(my_dict, nodes.Dict)
none = next(klass.ilookup('None'))
self.assertIsNone(none.value)
obj = next(klass.ilookup('object'))
self.assertIsInstance(obj, nodes.ClassDef)
self.assertEqual(obj.name, 'object')
self.assertRaises(InferenceError, functools.partial(next, klass.ilookup('YOAA')))
def test_inner_classes(self) -> None:
ddd = list(self.nonregr['Ccc'].ilookup('Ddd'))
self.assertEqual(ddd[0].name, 'Ddd')
def test_loopvar_hiding(self) -> None:
astroid = builder.parse("\n x = 10\n for x in range(5):\n print (x)\n\n if x > 0:\n print ('#' * x)\n ", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'x')]
self.assertEqual(len(xnames[0].lookup('x')[1]), 1)
self.assertEqual(len(xnames[1].lookup('x')[1]), 2)
self.assertEqual(len(xnames[2].lookup('x')[1]), 2)
def test_list_comps(self) -> None:
astroid = builder.parse('\n print ([ i for i in range(10) ])\n print ([ i for i in range(10) ])\n print ( list( i for i in range(10) ) )\n ', __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
self.assertEqual(len(xnames[2].lookup('i')[1]), 1)
self.assertEqual(xnames[2].lookup('i')[1][0].lineno, 4)
def test_list_comp_target(self) -> None:
astroid = builder.parse('\n ten = [ var for var in range(10) ]\n var\n ')
var = astroid.body[1].value
self.assertRaises(NameInferenceError, var.inferred)
def test_dict_comps(self) -> None:
astroid = builder.parse('\n print ({ i: j for i in range(10) for j in range(10) })\n print ({ i: j for i in range(10) for j in range(10) })\n ', __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'j')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
def test_set_comps(self) -> None:
astroid = builder.parse('\n print ({ i for i in range(10) })\n print ({ i for i in range(10) })\n ', __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
def test_set_comp_closure(self) -> None:
astroid = builder.parse('\n ten = { var for var in range(10) }\n var\n ')
var = astroid.body[1].value
self.assertRaises(NameInferenceError, var.inferred)
def test_list_comp_nested(self) -> None:
astroid = builder.parse('\n x = [[i + j for j in range(20)]\n for i in range(10)]\n ', __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 3)
def test_dict_comp_nested(self) -> None:
astroid = builder.parse("\n x = {i: {i: j for j in range(20)}\n for i in range(10)}\n x3 = [{i + j for j in range(20)} # Can't do nested sets\n for i in range(10)]\n ", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 3)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
def test_set_comp_nested(self) -> None:
astroid = builder.parse("\n x = [{i + j for j in range(20)} # Can't do nested sets\n for i in range(10)]\n ", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'i')]
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 3)
def test_lambda_nested(self) -> None:
astroid = builder.parse('\n f = lambda x: (\n lambda y: x + y)\n ')
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'x')]
self.assertEqual(len(xnames[0].lookup('x')[1]), 1)
self.assertEqual(xnames[0].lookup('x')[1][0].lineno, 2)
def test_function_nested(self) -> None:
astroid = builder.parse('\n def f1(x):\n def f2(y):\n return x + y\n\n return f2\n ')
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'x')]
self.assertEqual(len(xnames[0].lookup('x')[1]), 1)
self.assertEqual(xnames[0].lookup('x')[1][0].lineno, 2)
def test_class_variables(self) -> None:
astroid = builder.parse('\n class A:\n a = 10\n\n def f1(self):\n return a # a is not defined\n\n f2 = lambda: a # a is not defined\n\n b = [a for _ in range(10)] # a is not defined\n\n class _Inner:\n inner_a = a + 1\n ')
names = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'a')]
self.assertEqual(len(names), 4)
for name in names:
self.assertRaises(NameInferenceError, name.inferred)
def test_class_in_function(self) -> None:
astroid = builder.parse('\n def f():\n x = 10\n class A:\n a = x\n\n def f1(self):\n return x\n\n f2 = lambda: x\n\n b = [x for _ in range(10)]\n\n class _Inner:\n inner_a = x + 1\n ')
names = [n for n in astroid.nodes_of_class(nodes.Name) if (n.name == 'x')]
self.assertEqual(len(names), 5)
for name in names:
self.assertEqual(len(name.lookup('x')[1]), 1, repr(name))
self.assertEqual(name.lookup('x')[1][0].lineno, 3, repr(name))
def test_generator_attributes(self) -> None:
tree = builder.parse('\n def count():\n "test"\n yield 0\n\n iterer = count()\n num = iterer.next()\n ')
next_node = tree.body[2].value.func
gener = next_node.expr.inferred()[0]
self.assertIsInstance(gener.getattr('__next__')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('send')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('throw')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('close')[0], nodes.FunctionDef)
def test_explicit___name__(self) -> None:
code = '\n class Pouet:\n __name__ = "pouet"\n p1 = Pouet()\n\n class PouetPouet(Pouet): pass\n p2 = Pouet()\n\n class NoName: pass\n p3 = NoName()\n '
astroid = builder.parse(code, __name__)
p1 = next(astroid['p1'].infer())
self.assertTrue(p1.getattr('__name__'))
p2 = next(astroid['p2'].infer())
self.assertTrue(p2.getattr('__name__'))
self.assertTrue(astroid['NoName'].getattr('__name__'))
p3 = next(astroid['p3'].infer())
self.assertRaises(AttributeInferenceError, p3.getattr, '__name__')
def test_function_module_special(self) -> None:
astroid = builder.parse('\n def initialize(linter):\n """initialize linter with checkers in this package """\n package_load(linter, __path__[0])\n ', 'data.__init__')
path = next((n for n in astroid.nodes_of_class(nodes.Name) if (n.name == '__path__')))
self.assertEqual(len(path.lookup('__path__')[1]), 1)
def test_builtin_lookup(self) -> None:
self.assertEqual(nodes.builtin_lookup('__dict__')[1], ())
intstmts = nodes.builtin_lookup('int')[1]
self.assertEqual(len(intstmts), 1)
self.assertIsInstance(intstmts[0], nodes.ClassDef)
self.assertEqual(intstmts[0].name, 'int')
self.assertIs(intstmts[0], nodes.const_factory(1)._proxied)
def test_decorator_arguments_lookup(self) -> None:
code = '\n def decorator(value):\n def wrapper(function):\n return function\n return wrapper\n\n class foo:\n member = 10 #\n\n (member) #This will cause pylint to complain\n def test(self):\n pass\n '
node = builder.extract_node(code, __name__)
assert isinstance(node, nodes.Assign)
member = node.targets[0]
it = member.infer()
obj = next(it)
self.assertIsInstance(obj, nodes.Const)
self.assertEqual(obj.value, 10)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_inner_decorator_member_lookup(self) -> None:
code = '\n class FileA:\n def decorator(bla):\n return bla\n\n __(decorator)\n def funcA():\n return 4\n '
decname = builder.extract_node(code, __name__)
it = decname.infer()
obj = next(it)
self.assertIsInstance(obj, nodes.FunctionDef)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_static_method_lookup(self) -> None:
code = '\n class FileA:\n \n def funcA():\n return 4\n\n\n class Test:\n FileA = [1,2,3]\n\n def __init__(self):\n print (FileA.funcA())\n '
astroid = builder.parse(code, __name__)
it = astroid['Test']['__init__'].ilookup('FileA')
obj = next(it)
self.assertIsInstance(obj, nodes.ClassDef)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_global_delete(self) -> None:
code = '\n def run2():\n f = Frobble()\n\n class Frobble:\n pass\n Frobble.mumble = True\n\n del Frobble\n\n def run1():\n f = Frobble()\n '
astroid = builder.parse(code, __name__)
stmts = astroid['run2'].lookup('Frobbel')[1]
self.assertEqual(len(stmts), 0)
stmts = astroid['run1'].lookup('Frobbel')[1]
self.assertEqual(len(stmts), 0) |
def handler(ql: Qiling):
ah = ql.arch.regs.ah
leaffunc = {2: __leaf_02, 6: __leaf_02, 9: __leaf_09, 12: __leaf_0c, 37: __leaf_25, 38: __leaf_26, 48: __leaf_30, 51: __leaf_33, 53: __leaf_35, 60: __leaf_3c, 61: __leaf_3d, 62: __leaf_3e, 63: __leaf_3f, 64: __leaf_40, 65: __leaf_41, 67: __leaf_43, 76: __leaf_4c}.get(ah)
if (leaffunc is None):
ql.log.exception(f'leaf {ah:02x}h of INT 21h is not implemented')
raise NotImplementedError()
leaffunc(ql) |
def pytest_generate_tests(metafunc: Metafunc) -> None:
related: list[str] = []
for arg2fixturedef in metafunc._arg2fixturedefs.values():
fixturedef = arg2fixturedef[(- 1)]
related_fixtures = getattr(fixturedef.func, '_factoryboy_related', [])
related.extend(related_fixtures)
metafunc.fixturenames.extend(related) |
class HNCMTrainer(object):
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
self.optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, amsgrad=True)
self._num_updates = 0
if args.cuda:
self.model = self.model.cuda()
self.criterion = self.criterion.cuda()
def train_step(self, sample, objective='MLE'):
(loss, log_outputs) = self._forward(sample)
grad_norm = self._backward(loss)
return (loss, log_outputs)
def _forward(self, sample, eval=False):
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
lprobs = self.model(sample['src_seq'], sample['src_lengths'], sample['trg_seq'], sample['fact_seq'], sample['fact_lengths'])
target = sample['target']
loss = self.criterion(lprobs.contiguous().view((- 1), lprobs.size((- 1))), target.contiguous().view((- 1)))
loss = (loss / sample['num_trg_seq'])
logging_outputs = {'loss': loss, 'nsample': sample['target'].size(0)}
return (loss, logging_outputs)
def _backward(self, loss):
loss.backward()
if (self.args.clip_norm > 0):
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_norm)
else:
grad_norm = math.sqrt(sum(((p.grad.data.norm() ** 2) for p in self.model.parameters())))
self.optimizer.step()
self._num_updates += 1
return grad_norm
def valid_step(self, sample):
(loss, log_outputs) = self._forward(sample, eval=True)
return (loss, log_outputs) |
class WarmUPScheduler(LRScheduler):
def __init__(self, optimizer, warmup, normal, epochs=50, last_epoch=(- 1)):
warmup = warmup.lr_spaces
normal = normal.lr_spaces
self.lr_spaces = np.concatenate([warmup, normal])
self.start_lr = normal[0]
super(WarmUPScheduler, self).__init__(optimizer, last_epoch) |
def test_function_complex() -> None:
src = '\n def func(n) -> None:\n return\n for j in range(1, 10):\n continue\n print(j)\n '
cfg = build_cfg(src, is_function=True)
(unreachable, reachable) = extract_blocks(cfg)
assert ({'j', 'range(1, 10)', 'continue', 'print(j)'} == unreachable)
assert ({'n', '', 'return'} == reachable) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.