code stringlengths 281 23.7M |
|---|
.parametrize(('use_ci', 'expected_message'), ((True, f"- AssertionError: {('this_failed' * 100)}"), (False, '- AssertionError: this_failedt...')), ids=('on CI', 'not on CI'))
def test_fail_extra_reporting(pytester: Pytester, monkeypatch, use_ci: bool, expected_message: str) -> None:
if use_ci:
monkeypatch.setenv('CI', 'true')
else:
monkeypatch.delenv('CI', raising=False)
monkeypatch.setenv('COLUMNS', '80')
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = pytester.runpytest('-rN')
result.stdout.no_fnmatch_line('*short test summary*')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*test summary*', f'FAILED test_fail_extra_reporting.py::test_this {expected_message}']) |
class MemEffAttention(Attention):
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
if (not XFORMERS_AVAILABLE):
assert (attn_bias is None), 'xFormers is required for nested tensors usage'
return super().forward(x)
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads))
(q, k, v) = unbind(qkv, 2)
if (attn_bias is not None):
self_att_op = fmha.MemoryEfficientAttentionFlashAttentionOp
else:
self_att_op = None
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias, op=self_att_op)
x = x.reshape([B, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x |
def setUpModule():
global cell, myadc, kadc
cell = gto.Cell()
cell.build(a='\n 0.000000 1.783500 1.783500\n 1.783500 0.000000 1.783500\n 1.783500 1.783500 0.000000\n ', atom='C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375', verbose=5, output='/dev/null', basis='gth-dzv', pseudo='gth-pade')
nmp = [1, 1, 3]
supcell = super_cell(cell, nmp)
mf = scf.RHF(supcell, exxdiv=None).density_fit()
ehf = mf.kernel()
myadc = mol_adc.RADC(mf)
myadc.approx_trans_moments = True
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KRHF(cell, kpts, exxdiv=None).density_fit().run()
kadc = adc.KRADC(kmf) |
def scan_tqdm(n: int, message: typing.Optional[str]=None) -> typing.Callable:
(_update_progress_bar, close_tqdm) = build_tqdm(n, message)
def _scan_tqdm(func):
def wrapper_progress_bar(carry, x):
if (type(x) is tuple):
(iter_num, *_) = x
else:
iter_num = x
_update_progress_bar(iter_num)
result = func(carry, x)
return close_tqdm(result, iter_num)
return wrapper_progress_bar
return _scan_tqdm |
def test_adding_nonwrappers_trylast3(hc: HookCaller, addmeth: AddMeth) -> None:
()
def he_method1_a() -> None:
pass
(trylast=True)
def he_method1_b() -> None:
pass
()
def he_method1_c() -> None:
pass
(trylast=True)
def he_method1_d() -> None:
pass
assert (funcs(hc.get_hookimpls()) == [he_method1_d, he_method1_b, he_method1_a, he_method1_c]) |
class PixelNormLayer(nn.Module):
def __init__(self, eps=1e-08):
super(PixelNormLayer, self).__init__()
self.eps = eps
def forward(self, x):
return (x / torch.sqrt((torch.mean((x ** 2), dim=1, keepdim=True) + 1e-08)))
def __repr__(self):
return (self.__class__.__name__ + ('(eps = %s)' % self.eps)) |
class Solution(object):
def isBalanced(self, root):
if (root is None):
return True
if (self.getDepth(root) < 0):
return False
return True
def getDepth(self, node):
if (node is None):
return 1
ld = self.getDepth(node.left)
if (ld < 0):
return (- 1)
rd = self.getDepth(node.right)
if (rd < 0):
return (- 1)
elif (abs((ld - rd)) > 1):
return (- 1)
else:
return (max(ld, rd) + 1) |
_train('inference-only')
def inference_only(loggers, loaders, model, optimizer=None, scheduler=None):
num_splits = len(loggers)
split_names = ['train', 'val', 'test']
perf = [[] for _ in range(num_splits)]
cur_epoch = 0
start_time = time.perf_counter()
for i in range(0, num_splits):
eval_epoch(loggers[i], loaders[i], model, split=split_names[i])
perf[i].append(loggers[i].write_epoch(cur_epoch))
best_epoch = 0
best_train = best_val = best_test = ''
if (cfg.metric_best != 'auto'):
m = cfg.metric_best
if (m in perf[0][best_epoch]):
best_train = f'train_{m}: {perf[0][best_epoch][m]:.4f}'
else:
best_train = f'train_{m}: {0:.4f}'
best_val = f'val_{m}: {perf[1][best_epoch][m]:.4f}'
best_test = f'test_{m}: {perf[2][best_epoch][m]:.4f}'
logging.info(f"> Inference | train_loss: {perf[0][best_epoch]['loss']:.4f} {best_train} val_loss: {perf[1][best_epoch]['loss']:.4f} {best_val} test_loss: {perf[2][best_epoch]['loss']:.4f} {best_test}")
logging.info(f'Done! took: {(time.perf_counter() - start_time):.2f}s')
for logger in loggers:
logger.close() |
def test_create_user_successful(settings, requests_mock):
settings.PLAIN_API = '
requests_mock.post(settings.PLAIN_API, json={'data': {'upsertCustomer': {'result': 'UPDATED', 'customer': {'id': 'c_ABC25904A1DA4E0AF2'}, 'error': None}}})
user = UserFactory(name='Ester', full_name='Ester', email='', username='')
customer_id = create_customer(user)
assert (customer_id == 'c_ABC25904A1DA4E0AF2') |
class TrajectoryReplayPool(ReplayPool):
def __init__(self, observation_space, action_space, max_size):
super(TrajectoryReplayPool, self).__init__()
max_size = int(max_size)
self._max_size = max_size
self._trajectories = deque(maxlen=max_size)
self._trajectory_lengths = deque(maxlen=max_size)
self._num_samples = 0
self._trajectories_since_save = 0
def num_trajectories(self):
return len(self._trajectories)
def size(self):
return sum(self._trajectory_lengths)
def num_samples(self):
return self._num_samples
def add_paths(self, trajectories):
self._trajectories += trajectories
self._trajectory_lengths += [trajectory[next(iter(trajectory.keys()))].shape[0] for trajectory in trajectories]
self._trajectories_since_save += len(trajectories)
def add_path(self, trajectory):
self.add_paths([trajectory])
def add_sample(self, sample):
raise NotImplementedError('{} only supports adding full paths at once.'.format(self.__class__.__name__))
def add_samples(self, samples):
raise NotImplementedError('{} only supports adding full paths at once.'.format(self.__class__.__name__))
def batch_by_indices(self, episode_indices, step_indices, field_name_filter=None):
assert (len(episode_indices) == len(step_indices))
batch_size = len(episode_indices)
trajectories = [self._trajectories[i] for i in episode_indices]
batch = {field_name: np.empty((batch_size, *values.shape[1:]), dtype=values.dtype) for (field_name, values) in trajectories[0].items()}
for (i, episode) in enumerate(trajectories):
for (field_name, episode_values) in episode.items():
batch[field_name][i] = episode_values[step_indices[i]]
return batch
def random_batch(self, batch_size, *args, **kwargs):
num_trajectories = len(self._trajectories)
if (num_trajectories < 1):
return {}
trajectory_lengths = np.array(self._trajectory_lengths)
trajectory_weights = (trajectory_lengths / np.sum(trajectory_lengths))
trajectory_probabilities = softmax(trajectory_weights)
trajectory_indices = np.random.choice(np.arange(num_trajectories), size=batch_size, replace=True, p=trajectory_probabilities)
first_key = next(iter(self._trajectories[trajectory_indices[0]].keys()))
trajectory_lengths = np.array([self._trajectories[trajectory_index][first_key].shape[0] for trajectory_index in trajectory_indices])
step_indices = random_int_with_variable_range(np.zeros_like(trajectory_lengths, dtype=np.int64), trajectory_lengths)
batch = self.batch_by_indices(trajectory_indices, step_indices)
return batch
def last_n_batch(self, last_n, field_name_filter=None, **kwargs):
num_trajectories = len(self._trajectories)
if (num_trajectories < 1):
return {}
trajectory_indices = []
step_indices = []
trajectory_lengths = 0
for trajectory_index in range((num_trajectories - 1), (- 1), (- 1)):
trajectory = self._trajectories[trajectory_index]
trajectory_length = trajectory[list(trajectory.keys())[0]].shape[0]
steps_from_this_episode = min(trajectory_length, (last_n - trajectory_lengths))
step_indices += list(range((trajectory_length - 1), ((trajectory_length - steps_from_this_episode) - 1), (- 1)))
trajectory_indices += ([trajectory_index] * steps_from_this_episode)
trajectory_lengths += trajectory_length
if (trajectory_lengths >= last_n):
break
trajectory_indices = trajectory_indices[::(- 1)]
step_indices = step_indices[::(- 1)]
batch = self.batch_by_indices(trajectory_indices, step_indices)
return batch
def save_latest_experience(self, pickle_path):
num_trajectories = self.num_trajectories
start_index = max((num_trajectories - self._trajectories_since_save), 0)
end_index = num_trajectories
latest_trajectories = tuple(islice(self._trajectories, start_index, end_index))
with gzip.open(pickle_path, 'wb') as f:
pickle.dump(latest_trajectories, f)
self._trajectories_since_save = 0
def load_experience(self, experience_path):
with gzip.open(experience_path, 'rb') as f:
latest_trajectories = pickle.load(f)
self.add_paths(latest_trajectories)
self._trajectories_since_save = 0 |
def main(seed: int=0, method: str='exact', batch_size: int=100, n_batch: int=200, num_init: int=200, dtype: str='double', output: str=None, problem: str=None, acqf: str='ts', use_full: bool=False, num_inducing: int=500, loss: str='pll', tree_depth: int=4, dim: int=30):
dtype = (torch.double if (dtype == 'double') else torch.float)
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
torch.random.manual_seed(seed)
NUM_RESTARTS = 5
RAW_SAMPLES = 256
if (problem == 'rover'):
from rover_function import create_large_domain
def l2cost(x, point):
return (10 * np.linalg.norm((x - point), 1))
domain = create_large_domain(force_start=False, force_goal=False, start_miss_cost=l2cost, goal_miss_cost=l2cost, n_points=dim)
n_points = domain.traj.npoints
raw_x_range = np.repeat(domain.s_range, n_points, axis=1)
bounded_fn_callable = (lambda X: torch.stack([torch.tensor((domain(x.cpu().numpy()) + 5.0)) for x in X]).to(X))
fn_callable = (lambda X: bounded_fn_callable(((X * 1.2) - 0.1)))
bounds = torch.tensor(raw_x_range, dtype=dtype, device=device)
bounds = torch.zeros(2, raw_x_range.shape[(- 1)], dtype=dtype, device=device)
bounds[1] = 1.0
dim = bounds.shape[(- 1)]
num_batches = 0
N_CANDIDATES = min(5000, max(2000, (200 * dim)))
timing_list = []
while (num_batches < (n_batch + 2)):
next_x = ((torch.rand(num_init, bounds.shape[(- 1)], device=device, dtype=dtype) * (bounds[1] - bounds[0])) + bounds[0])
next_obj = fn_callable(next_x).unsqueeze((- 1))
if (use_full and (num_batches > 0)):
train_x = torch.cat((train_x, next_x))
train_obj = torch.cat((train_obj, next_obj))
else:
train_x = next_x
train_obj = next_obj
num_batches += 1
if (num_batches == 1):
best_observed_value = [train_obj.max().item()]
state = TurboState(bounds.shape[(- 1)], batch_size=batch_size)
while (not state.restart_triggered):
start = time.time()
(mll_gibbon, model) = initialize_model(train_x, train_obj, None, method=method, use_input_transform=False, use_outcome_transform=True, num_inducing=num_inducing, loss=loss)
optimizer_kwargs = {'maxiter': 1000}
fit_gpytorch_torch(mll_gibbon, options=optimizer_kwargs)
X_next = generate_batch(state=state, model=model, X=train_x, Y=train_obj, batch_size=batch_size, n_candidates=N_CANDIDATES, num_restarts=NUM_RESTARTS, raw_samples=RAW_SAMPLES, acqf=acqf, tree_depth=tree_depth)
Y_next = fn_callable(X_next).unsqueeze((- 1))
state = update_state(state=state, Y_next=Y_next)
end = time.time()
train_x = torch.cat((train_x, X_next), dim=0)
train_obj = torch.cat((train_obj, Y_next), dim=0)
mem = (torch.cuda.memory_allocated(device) / (1024 ** 3))
print(f'Iter {num_batches}: {len(train_x)}) Best value: {state.best_value:.2e}, TR length: {state.length:.2e}, Mem used {mem:.2e}')
best_observed_value.append(state.best_value)
num_batches += 1
timing_list.append((end - start))
if ((num_batches % 10) == 0):
output_dict = {'trbo': best_observed_value}
torch.save({'iters': num_batches, 'times': timing_list, 'results': output_dict}, output)
if (num_batches > (n_batch + 2)):
break
output_dict = {'trbo': best_observed_value}
return (output_dict, timing_list) |
def aes_decrypt(word, key=config.aes_key, iv=None, input='base64', padding=True, padding_style='pkcs7', mode=AES.MODE_CBC, no_packb=False):
if ((iv is None) and (not no_packb)):
(word, iv) = umsgpack.unpackb(word)
if no_packb:
input = input.lower()
if (input == 'base64'):
word = base64.decodebytes(word)
elif (input == 'hex'):
word = a2b_hex(word)
if (mode in [AES.MODE_ECB, AES.MODE_CTR]):
aes = AES.new(key, mode)
else:
aes = AES.new(key, mode, iv)
word = aes.decrypt(word)
if (not no_packb):
while word:
try:
return umsgpack.unpackb(word)
except umsgpack.ExtraData:
word = word[:(- 1)]
elif padding:
return unpad(word, AES.block_size, padding_style).decode('utf-8') |
def test_swipe_corner_case():
def __test(x, fs, hopsize, otype):
pysptk.swipe(x, fs, hopsize, otype=otype)
np.random.seed(98765)
fs = 16000
x = np.random.rand(16000)
with pytest.raises(ValueError):
__test(x, fs, 80, (- 1))
with pytest.raises(ValueError):
__test(x, fs, 80, 3)
with pytest.raises(ValueError):
__test(x, fs, 80, 'ff0') |
def _set_partitions(collection):
collection = list(collection)
if (not collection):
return
if (len(collection) == 1):
(yield [collection])
return
first = collection[0]
for smaller in set_partitions(collection[1:]):
for (n, subset) in enumerate(smaller):
(yield ((smaller[:n] + [([first] + subset)]) + smaller[(n + 1):]))
(yield ([[first]] + smaller)) |
class CaptureLastExpression(ast.NodeTransformer):
def __init__(self, tree: ast.AST, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
self.last_node = list(ast.iter_child_nodes(tree))[(- 1)]
def visit_Expr(self, node: ast.Expr) -> (ast.Expr | ast.Assign):
if (node is not self.last_node):
return node
log.trace('Found a trailing last expression in the evaluation code')
log.trace('Creating assignment statement with trailing expression as the right-hand side')
right_hand_side = next(iter(ast.iter_child_nodes(node)))
assignment = ast.Assign(targets=[ast.Name(id='_value_last_expression', ctx=ast.Store())], value=right_hand_side, lineno=node.lineno, col_offset=0)
ast.fix_missing_locations(assignment)
return assignment
def capture(self) -> ast.AST:
if (not isinstance(self.last_node, ast.Expr)):
return self.tree
new_tree = self.visit(self.tree)
return ast.fix_missing_locations(new_tree) |
class PreSeparableConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=1, padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, first_act=True):
super(PreSeparableConv2d, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
self.kernel_size = kernel_size
self.dilation = dilation
self.norm = (norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity())
self.conv_dw = create_conv2d(in_chs, in_chs, kernel_size, stride=stride, padding=padding, dilation=dilation, depthwise=True)
self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1)
def forward(self, x):
x = self.norm(x)
x = self.conv_dw(x)
x = self.conv_pw(x)
return x |
.usefixtures('include_test_etc')
class TestSceneResampling():
def _fake_resample_dataset(self, dataset, dest_area, **kwargs):
return dataset.copy()
def _fake_resample_dataset_force_20x20(self, dataset, dest_area, **kwargs):
data = np.zeros((20, 20))
attrs = dataset.attrs.copy()
attrs['area'] = dest_area
return xr.DataArray(data, dims=('y', 'x'), attrs=attrs)
('satpy.scene.resample_dataset')
.parametrize('datasets', [None, ('comp13', 'ds5', 'ds2')])
def test_resample_scene_copy(self, rs, datasets):
from pyresample.geometry import AreaDefinition
rs.side_effect = self._fake_resample_dataset_force_20x20
proj_str = '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs'
area_def = AreaDefinition('test', 'test', 'test', proj_str, 5, 5, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
area_def.get_area_slices = mock.MagicMock()
scene = Scene(filenames=['fake1_1.txt', 'fake1_highres_1.txt'], reader='fake1')
scene.load(['comp19'])
new_scene = scene.resample(area_def, datasets=datasets)
new_scene['new_ds'] = new_scene['comp19'].copy()
scene.load(['ds1'])
comp19_node = scene._dependency_tree['comp19']
ds5_mod_id = make_dataid(name='ds5', modifiers=('res_change',))
ds5_node = scene._dependency_tree[ds5_mod_id]
comp13_node = scene._dependency_tree['comp13']
assert (comp13_node.data[1][0] is comp19_node.data[1][0])
assert (comp13_node.data[1][0] is ds5_node)
pytest.raises(KeyError, scene._dependency_tree.__getitem__, 'new_ds')
loaded_ids = list(scene.keys())
assert (len(loaded_ids) == 4)
for name in ('comp13', 'ds5', 'ds2', 'ds1'):
assert any(((x['name'] == name) for x in loaded_ids))
loaded_ids = list(new_scene.keys())
assert (len(loaded_ids) == 2)
assert (loaded_ids[0] == make_cid(name='comp19'))
assert (loaded_ids[1] == make_cid(name='new_ds'))
('satpy.scene.resample_dataset')
def test_resample_scene_preserves_requested_dependencies(self, rs):
from pyresample.geometry import AreaDefinition
from pyresample.utils import proj4_str_to_dict
rs.side_effect = self._fake_resample_dataset
proj_dict = proj4_str_to_dict('+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs')
area_def = AreaDefinition('test', 'test', 'test', proj_dict, 5, 5, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
area_def.get_area_slices = mock.MagicMock()
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp26', 'comp14'], generate=False)
scene.resample(area_def, unload=True)
new_scene_2 = scene.resample(area_def, unload=True)
assert ('comp14' not in scene)
assert ('comp26' not in scene)
assert ('comp14' in new_scene_2)
assert ('comp26' in new_scene_2)
assert ('ds1' not in new_scene_2)
('satpy.scene.resample_dataset')
def test_resample_reduce_data_toggle(self, rs):
from pyresample.geometry import AreaDefinition
rs.side_effect = self._fake_resample_dataset_force_20x20
proj_str = '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs'
target_area = AreaDefinition('test', 'test', 'test', proj_str, 4, 4, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
area_def = AreaDefinition('test', 'test', 'test', proj_str, 5, 5, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
area_def.get_area_slices = mock.MagicMock()
get_area_slices = area_def.get_area_slices
get_area_slices.return_value = (slice(0, 3, None), slice(0, 3, None))
area_def_big = AreaDefinition('test', 'test', 'test', proj_str, 10, 10, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
area_def_big.get_area_slices = mock.MagicMock()
get_area_slices_big = area_def_big.get_area_slices
get_area_slices_big.return_value = (slice(0, 6, None), slice(0, 6, None))
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp19'])
scene['comp19'].attrs['area'] = area_def
scene['comp19_big'] = xr.DataArray(da.zeros((10, 10)), dims=('y', 'x'), attrs=scene['comp19'].attrs.copy())
scene['comp19_big'].attrs['area'] = area_def_big
scene['comp19_copy'] = scene['comp19'].copy()
orig_slice_data = scene._slice_data
test_order = [make_cid(**scene['comp19'].attrs), make_cid(**scene['comp19_big'].attrs), make_cid(**scene['comp19_copy'].attrs)]
with mock.patch('satpy.scene.Scene._slice_data') as slice_data, mock.patch('satpy.dataset.dataset_walker') as ds_walker:
ds_walker.return_value = test_order
slice_data.side_effect = orig_slice_data
scene.resample(target_area, reduce_data=False)
slice_data.assert_not_called()
get_area_slices.assert_not_called()
scene.resample(target_area)
assert (slice_data.call_count == 3)
assert (get_area_slices.call_count == 1)
assert (get_area_slices_big.call_count == 1)
scene.resample(target_area, reduce_data=True)
assert (slice_data.call_count == (2 * 3))
assert (get_area_slices.call_count == 2)
assert (get_area_slices_big.call_count == 2)
def test_resample_ancillary(self):
from pyresample.geometry import AreaDefinition
from pyresample.utils import proj4_str_to_dict
proj_dict = proj4_str_to_dict('+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs')
area_def = AreaDefinition('test', 'test', 'test', proj_dict, 5, 5, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp19', 'comp20'])
scene['comp19'].attrs['area'] = area_def
scene['comp19'].attrs['ancillary_variables'] = [scene['comp20']]
scene['comp20'].attrs['area'] = area_def
dst_area = AreaDefinition('dst', 'dst', 'dst', proj_dict, 2, 2, ((- 1000.0), (- 1500.0), 0.0, 0.0))
new_scene = scene.resample(dst_area)
assert (new_scene['comp20'] is new_scene['comp19'].attrs['ancillary_variables'][0])
def test_resample_multi_ancillary(self):
from pyresample import create_area_def
sc = Scene()
n = 5
ar = create_area_def('a', 4087, resolution=1000, center=(0, 0), shape=(n, n))
anc_vars = [xr.DataArray((np.arange((n * n)).reshape(n, n) * i), dims=('y', 'x'), attrs={'name': f'anc{i:d}', 'area': ar}) for i in range(2)]
sc['test'] = xr.DataArray(np.arange((n * n)).reshape(n, n), dims=('y', 'x'), attrs={'area': ar, 'name': 'test', 'ancillary_variables': anc_vars})
subset = create_area_def('b', 4087, resolution=800, center=(0, 0), shape=((n - 1), (n - 1)))
ls = sc.resample(subset)
assert ([av.attrs['name'] for av in sc['test'].attrs['ancillary_variables']] == [av.attrs['name'] for av in ls['test'].attrs['ancillary_variables']])
def test_resample_reduce_data(self):
from pyresample.geometry import AreaDefinition
proj_str = '+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs'
area_def = AreaDefinition('test', 'test', 'test', proj_str, 20, 20, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp19'])
scene['comp19'].attrs['area'] = area_def
dst_area = AreaDefinition('dst', 'dst', 'dst', proj_str, 20, 20, ((- 1000.0), (- 1500.0), 0.0, 0.0))
new_scene1 = scene.resample(dst_area, reduce_data=False)
new_scene2 = scene.resample(dst_area)
new_scene3 = scene.resample(dst_area, reduce_data=True)
assert (new_scene1['comp19'].shape == (20, 20, 3))
assert (new_scene2['comp19'].shape == (20, 20, 3))
assert (new_scene3['comp19'].shape == (20, 20, 3))
('satpy.scene.resample_dataset')
def test_no_generate_comp10(self, rs):
from pyresample.geometry import AreaDefinition
from pyresample.utils import proj4_str_to_dict
rs.side_effect = self._fake_resample_dataset
proj_dict = proj4_str_to_dict('+proj=lcc +datum=WGS84 +ellps=WGS84 +lon_0=-95. +lat_0=25 +lat_1=25 +units=m +no_defs')
area_def = AreaDefinition('test', 'test', 'test', proj_dict, 200, 400, ((- 1000.0), (- 1500.0), 1000.0, 1500.0))
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp10'], generate=False)
assert any(((ds_id['name'] == 'comp10') for ds_id in scene._wishlist))
assert ('comp10' not in scene)
assert (len(scene._datasets) == 2)
assert (len(scene.missing_datasets) == 1)
new_scn = scene.resample(area_def, generate=False)
assert ('comp10' not in scene)
assert (len(scene._datasets) == 2)
assert (len(scene.missing_datasets) == 1)
new_scn._generate_composites_from_loaded_datasets()
assert any(((ds_id['name'] == 'comp10') for ds_id in new_scn._wishlist))
assert ('comp10' in new_scn)
assert (not new_scn.missing_datasets)
new_scn = scene.resample(area_def)
assert any(((ds_id['name'] == 'comp10') for ds_id in new_scn._wishlist))
assert ('comp10' in new_scn)
assert (not new_scn.missing_datasets)
def test_comp_loading_after_resampling_existing_sensor(self):
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['ds1', 'ds2'])
new_scn = scene.resample(resampler='native')
with pytest.raises(KeyError):
new_scn.load(['ds3'])
new_scn.load(['comp2'])
assert ('comp2' in new_scn)
def test_comp_loading_after_resampling_new_sensor(self):
scene1 = Scene(filenames=['fake2_3ds_1.txt'], reader='fake2_3ds')
scene1.load(['ds2'])
new_scn = scene1.resample(resampler='native')
with pytest.raises(KeyError):
new_scn.load(['ds3'])
with pytest.raises(KeyError):
new_scn.load(['comp2'])
user_da = scene1['ds2'].copy()
user_da.attrs['name'] = 'ds1'
user_da.attrs['sensor'] = {'fake_sensor2'}
new_scn['ds1'] = user_da
with pytest.raises(KeyError):
new_scn.load(['comp2'])
assert ('comp2' not in new_scn)
user_da = scene1['ds2'].copy()
user_da.attrs['name'] = 'ds1'
user_da.attrs['sensor'] = {'fake_sensor'}
new_scn['ds1'] = user_da
new_scn.load(['comp2'])
assert ('comp2' in new_scn)
def test_comp_loading_multisensor_composite_created_user(self):
scene1 = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene1.load(['ds1'])
scene2 = Scene(filenames=['fake4_1.txt'], reader='fake4')
scene2.load(['ds4_b'])
scene3 = Scene()
scene3['ds1'] = scene1['ds1']
scene3['ds4_b'] = scene2['ds4_b']
scene3.load(['comp_multi'])
assert ('comp_multi' in scene3)
def test_comps_need_resampling_optional_mod_deps(self):
scene = Scene(filenames=['fake1_1.txt'], reader='fake1')
scene.load(['comp27', 'ds13'])
assert ('comp27' not in scene)
assert ('ds13' in scene)
new_scene = scene.resample(resampler='native')
assert (len(list(new_scene.keys())) == 2)
assert ('comp27' in new_scene)
assert ('ds13' in new_scene) |
class FreeFormatLine(Block):
def deserialize_values(cls, line, version_dialect):
format = cls.format(version_dialect)
values = line.split(None, (len(format) - 1))
values_weeded = []
for (x, v) in zip(format, values):
if isinstance(x, bytes):
if (v.upper() != x):
raise DeserializeError(('expected keyword: %s, found %s' % (x, v.upper())))
else:
if isinstance(x, tuple):
(x, (parse, _)) = x
v = parse(v)
values_weeded.append((x, v))
values_weeded.sort()
return [str(xv[1].decode('ascii')) for xv in values_weeded]
def deserialize(cls, line, version_dialect):
values = cls.deserialize_values(line, version_dialect)
propnames = cls.T.propnames
stuff = dict(zip(propnames, values))
return cls.regularized(**stuff)
def serialize(self, version_dialect):
names = self.T.propnames
props = self.T.properties
out = []
for x in self.format(version_dialect):
if isinstance(x, bytes):
out.append(x.decode('ascii'))
else:
if isinstance(x, tuple):
(x, (_, string)) = x
v = string(getattr(self, names[(x - 1)]))
else:
v = getattr(self, names[(x - 1)])
if (v is None):
break
out.append(props[(x - 1)].to_save(v))
return ' '.join(out).encode('ascii') |
class CompositeMetricAggregator(SupportsCompositeMetricAggregation):
def __init__(self, reduce_mode: ReduceMode=ReduceMode.SUM):
if (reduce_mode not in set(ReduceMode)):
raise ValueError(f'Reduce mode {reduce_mode} not implemented.')
self.reduce_mode = reduce_mode
def aggregate_scenes(self, scene_composite_metric_results: Dict[(int, Dict[(str, float)])]) -> Dict[(str, Any)]:
if (self.reduce_mode == ReduceMode.SUM):
aggregation: DefaultDict[(str, float)] = defaultdict(float)
for (_, cm_dict) in scene_composite_metric_results.items():
for (validator_name, cm_output) in cm_dict.items():
aggregation[validator_name] += cm_output
aggregation_torch = {k: torch.as_tensor(v) for (k, v) in aggregation.items()}
return aggregation_torch
return {}
def aggregate(self, scene_composite_metric_results: Dict[(int, Dict[(str, float)])]) -> Dict[(str, Any)]:
agg_scenes = self.aggregate_scenes(scene_composite_metric_results)
return agg_scenes |
class TestPluginManager(unittest.TestCase):
def setUp(self):
self.pm = qiime2.sdk.PluginManager()
self.plugin = get_dummy_plugin()
self.other_plugin = self.pm.plugins['other-plugin']
def test_plugins(self):
plugins = self.pm.plugins
exp = {'dummy-plugin': self.plugin, 'other-plugin': self.other_plugin}
self.assertEqual(plugins, exp)
def test_validators(self):
self.assertEqual({Kennel[Dog], Kennel[Cat], AscIntSequence, Squid, Octopus, Cuttlefish}, set(self.pm.validators))
self.assertEqual(set([r.validator for r in self.pm.validators[Kennel[Dog]]._validators]), {validator_example_null1, validator_example_null2})
self.assertEqual([r.validator for r in self.pm.validators[Kennel[Cat]]._validators], [validator_example_null1])
self.assertEqual([r.validator for r in self.pm.validators[AscIntSequence]._validators], [validate_ascending_seq])
def test_type_fragments(self):
types = self.pm.type_fragments
exp = {'IntSequence1': SemanticTypeRecord(semantic_type=IntSequence1, plugin=self.plugin), 'IntSequence2': SemanticTypeRecord(semantic_type=IntSequence2, plugin=self.plugin), 'IntSequence3': SemanticTypeRecord(semantic_type=IntSequence3, plugin=self.plugin), 'Mapping': SemanticTypeRecord(semantic_type=Mapping, plugin=self.plugin), 'FourInts': SemanticTypeRecord(semantic_type=FourInts, plugin=self.plugin), 'Kennel': SemanticTypeRecord(semantic_type=Kennel, plugin=self.plugin), 'Dog': SemanticTypeRecord(semantic_type=Dog, plugin=self.plugin), 'Cat': SemanticTypeRecord(semantic_type=Cat, plugin=self.plugin), 'SingleInt': SemanticTypeRecord(semantic_type=SingleInt, plugin=self.plugin), 'C1': SemanticTypeRecord(semantic_type=C1, plugin=self.plugin), 'C2': SemanticTypeRecord(semantic_type=C2, plugin=self.plugin), 'C3': SemanticTypeRecord(semantic_type=C3, plugin=self.plugin), 'Foo': SemanticTypeRecord(semantic_type=Foo, plugin=self.plugin), 'Bar': SemanticTypeRecord(semantic_type=Bar, plugin=self.plugin), 'Baz': SemanticTypeRecord(semantic_type=Baz, plugin=self.plugin), 'AscIntSequence': SemanticTypeRecord(semantic_type=AscIntSequence, plugin=self.plugin), 'Squid': SemanticTypeRecord(semantic_type=Squid, plugin=self.plugin), 'Octopus': SemanticTypeRecord(semantic_type=Octopus, plugin=self.plugin), 'Cuttlefish': SemanticTypeRecord(semantic_type=Cuttlefish, plugin=self.plugin)}
self.assertEqual(types, exp)
def test_get_semantic_types(self):
artifact_classes = self.pm.get_semantic_types()
is1 = ArtifactClassRecord(semantic_type=IntSequence1, format=IntSequenceDirectoryFormat, plugin=self.plugin, description='The first IntSequence', examples={'IntSequence1 import example': is1_use}, type_expression=IntSequence1)
is2 = ArtifactClassRecord(semantic_type=IntSequence2, format=IntSequenceV2DirectoryFormat, plugin=self.plugin, description='The second IntSequence', examples={'IntSequence2 import example': is2_use}, type_expression=IntSequence2)
is3 = ArtifactClassRecord(semantic_type=IntSequence3, format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin, description='', examples={}, type_expression=IntSequence3)
kd = ArtifactClassRecord(semantic_type=Kennel[Dog], format=MappingDirectoryFormat, plugin=self.plugin, description='', examples={}, type_expression=Kennel[Dog])
kc = ArtifactClassRecord(semantic_type=Kennel[Cat], format=MappingDirectoryFormat, plugin=self.plugin, description='', examples={}, type_expression=Kennel[Cat])
self.assertLessEqual({str(e.semantic_type) for e in [is1, is2, is3, kd, kc]}, artifact_classes.keys())
self.assertEqual(is1, artifact_classes['IntSequence1'])
self.assertEqual(is2, artifact_classes['IntSequence2'])
self.assertEqual(is3, artifact_classes['IntSequence3'])
self.assertNotIn('Cat', artifact_classes)
self.assertNotIn('Dog', artifact_classes)
self.assertNotIn('Kennel', artifact_classes)
self.assertIn('Kennel[Dog]', artifact_classes)
self.assertIn('Kennel[Cat]', artifact_classes)
def test_get_formats_no_type_or_filter(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin), 'RedundantSingleIntDirectoryFormat': FormatRecord(format=RedundantSingleIntDirectoryFormat, plugin=self.plugin), 'FourIntsDirectoryFormat': FormatRecord(format=FourIntsDirectoryFormat, plugin=self.plugin), 'EchoFormat': FormatRecord(format=EchoFormat, plugin=self.plugin), 'EchoDirectoryFormat': FormatRecord(format=EchoDirectoryFormat, plugin=self.plugin), 'MappingDirectoryFormat': FormatRecord(format=MappingDirectoryFormat, plugin=self.plugin), 'Cephalapod': FormatRecord(format=Cephalapod, plugin=self.plugin), 'CephalapodDirectoryFormat': FormatRecord(format=CephalapodDirectoryFormat, plugin=self.plugin), 'ImportableOnlyFormat': FormatRecord(format=ImportableOnlyFormat, plugin=self.plugin), 'ExportableOnlyFormat': FormatRecord(format=ExportableOnlyFormat, plugin=self.plugin)}
obs = self.pm.get_formats()
self.assertEqual(obs, exp)
def test_get_formats_SFDF(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin), 'ImportableOnlyFormat': FormatRecord(format=ImportableOnlyFormat, plugin=self.plugin), 'ExportableOnlyFormat': FormatRecord(format=ExportableOnlyFormat, plugin=self.plugin)}
obs = self.pm.get_formats(semantic_type='IntSequence1')
self.assertEqual(exp, obs)
def test_get_formats_SFDF_EXPORTABLE(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'ExportableOnlyFormat': FormatRecord(format=ExportableOnlyFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter=GetFormatFilters.EXPORTABLE, semantic_type=IntSequence1)
self.assertEqual(exp, obs)
def test_get_formats_SFDF_IMPORTABLE(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin), 'ImportableOnlyFormat': FormatRecord(format=ImportableOnlyFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter=GetFormatFilters.IMPORTABLE, semantic_type=IntSequence1)
self.assertEqual(exp, obs)
def test_get_formats_DF(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin)}
obs = self.pm.get_formats(semantic_type='IntSequence3')
self.assertEqual(exp, obs)
def test_get_formats_DF_EXPORTABLE(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter=GetFormatFilters.EXPORTABLE, semantic_type=IntSequence3)
self.assertEqual(exp, obs)
def test_get_formats_DF_exportable_str(self):
exp = {'IntSequenceFormat': FormatRecord(format=IntSequenceFormat, plugin=self.plugin), 'IntSequenceDirectoryFormat': FormatRecord(format=IntSequenceDirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter='EXPORTABLE', semantic_type=IntSequence3)
self.assertEqual(exp, obs)
def test_get_formats_DF_IMPORTABLE(self):
exp = {'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter=GetFormatFilters.IMPORTABLE, semantic_type=IntSequence3)
self.assertEqual(exp, obs)
def test_get_formats_DF_importable_str(self):
exp = {'IntSequenceFormatV2': FormatRecord(format=IntSequenceFormatV2, plugin=self.plugin), 'IntSequenceV2DirectoryFormat': FormatRecord(format=IntSequenceV2DirectoryFormat, plugin=self.plugin), 'IntSequenceMultiFileDirectoryFormat': FormatRecord(format=IntSequenceMultiFileDirectoryFormat, plugin=self.plugin)}
obs = self.pm.get_formats(filter='IMPORTABLE', semantic_type=IntSequence3)
self.assertEqual(exp, obs)
def test_get_formats_invalid_type(self):
with self.assertRaisesRegex(ValueError, 'No formats associated'):
self.pm.get_formats(semantic_type='Random[Frequency]')
def test_get_formats_invalid_filter(self):
with self.assertRaisesRegex(ValueError, 'filter.*is not valid'):
self.pm.get_formats(filter='xyz')
def test_importable_formats_property(self):
imp_f = self.pm.importable_formats
self.assertTrue(isinstance(imp_f, dict))
self.assertTrue(('IntSequenceFormatV2' in imp_f))
self.assertTrue(('CephalapodDirectoryFormat' in imp_f))
self.assertTrue(('ImportableOnlyFormat' in imp_f))
self.assertFalse(('ExportableOnlyFormat' in imp_f))
def test_exportable_formats_property(self):
exp_f = self.pm.exportable_formats
self.assertTrue(isinstance(exp_f, dict))
self.assertTrue(('IntSequenceDirectoryFormat' in exp_f))
self.assertTrue(('IntSequenceV2DirectoryFormat' in exp_f))
self.assertTrue(('ExportableOnlyFormat' in exp_f))
self.assertFalse(('ImportableOnlyFormat' in exp_f))
def test_deprecated_type_formats(self):
self.assertEqual(self.pm.type_formats, list(self.pm.artifact_classes.values())) |
class EventPluginHandler(PluginHandler):
def __init__(self, librarian=None, player=None, songlist=None):
if librarian:
sigs = _map_signals(librarian, blacklist=('notify',))
for (event, _handle) in sigs:
def handler(librarian, *args):
self.__invoke(librarian, args[(- 1)], *args[:(- 1)])
librarian.connect(event, handler, event)
if (librarian and player):
sigs = _map_signals(player, blacklist=('notify',))
for (event, _handle) in sigs:
def cb_handler(librarian, *args):
self.__invoke(librarian, args[(- 1)], *args[:(- 1)])
connect_obj(player, event, cb_handler, librarian, event)
if songlist:
def __selection_changed_cb(songlist, selection):
songs = songlist.get_selected_songs()
self.__invoke(self.librarian, 'songs_selected', songs)
songlist.connect('selection-changed', __selection_changed_cb)
self.librarian = librarian
self.__plugins = {}
self.__sidebars = {}
def __invoke(self, librarian, event, *args):
args = list(args)
if (args and args[0]):
if isinstance(args[0], dict):
args[0] = SongWrapper(args[0])
elif isinstance(args[0], (set | list)):
args[0] = list_wrapper(args[0])
for plugin in list(self.__plugins.values()):
method_name = ('plugin_on_' + event.replace('-', '_'))
handler = getattr(plugin, method_name, None)
def overridden(obj, name):
return (name in type(obj).__dict__)
if overridden(plugin, method_name):
try:
handler(*args)
except Exception:
print_e(f'Error during {method_name} on {type(plugin)}')
errorhook()
if ((event not in ['removed', 'changed']) and args):
songs = args[0]
if (not isinstance(songs, (set | list))):
songs = [songs]
songs = filter(None, songs)
check_wrapper_changed(librarian, songs)
def plugin_handle(self, plugin):
return issubclass(plugin.cls, EventPlugin)
def plugin_enable(self, plugin):
self.__plugins[plugin.cls] = plugin.get_instance()
def plugin_disable(self, plugin):
self.__plugins.pop(plugin.cls) |
def test_dsl_async_cmd_run_save_with_stdout():
context = Context({'cmds': {'run': ['A', 'B'], 'save': True, 'stdout': '/arb1'}})
with pytest.raises(ContextError) as err:
AsyncCmdStep('blah', context)
assert (str(err.value) == "You can't set `stdout` or `stderr` when `save` is True.") |
_ignore_inferred
def _follow_pyname(assignment, pymodule, lineno=None):
assign_node = (assignment.type_hint or assignment.ast_node)
if (lineno is None):
lineno = _get_lineno_for_node(assign_node)
holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
pyname = evaluate.eval_node(holding_scope, assign_node)
if (pyname is not None):
result = pyname.get_object()
if (isinstance(result.get_type(), rope.base.builtins.Property) and (holding_scope.get_kind() == 'Class')):
arg = pynames.UnboundName(pyobjects.PyObject(holding_scope.pyobject))
return (pyname, result.get_type().get_property_object(arguments.ObjectArguments([arg])))
return (pyname, result) |
class DeTTECTEditor():
def __init__(self, port):
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
self.port = port
self. = None
def _signal_handler(self, signal, frame):
print('Shutting down webserver')
self.
self.
def _run_webserver(self):
try:
os.chdir('./editor/dist/dettect-editor')
self. = TCPServer(('', self.port), QuietHTTPRequestHandler)
print(('Editor started at port %d' % self.port))
url = (' % self.port)
if (not os.getenv('DeTTECT_DOCKER_CONTAINER')):
print(('Opening webbrowser: ' + url))
webbrowser.open_new_tab(url)
else:
print(('You can open the Editor on: ' + url))
self.
except Exception as e:
print(('Could not start webserver: ' + str(e)))
def start(self):
thread = threading.Thread(target=self._run_webserver)
thread.start() |
def parse_bdist_wininst(name):
lower = name.lower()
(base, py_ver, plat) = (None, None, None)
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:(- 10)]
plat = 'win32'
elif lower.startswith('.win32-py', (- 16)):
py_ver = name[(- 7):(- 4)]
base = name[:(- 16)]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:(- 14)]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', (- 20)):
py_ver = name[(- 7):(- 4)]
base = name[:(- 20)]
plat = 'win-amd64'
return (base, py_ver, plat) |
def test_assert_key_is_truthy_key_not_there():
with pytest.raises(KeyNotInContextError) as err:
asserts.assert_key_is_truthy(obj={'k1': None}, key='k2', caller='arb caller', parent='parent name')
assert (str(err.value) == "context['parent name']['k2'] doesn't exist. It must exist for arb caller.") |
.gdalbin
def test_set_nodata(tmpdir):
dst_path = str(tmpdir.join('lol.tif'))
with rasterio.open('tests/data/RGB.byte.tif') as src:
meta = src.meta
meta['nodata'] = 42
with rasterio.open(dst_path, 'w', **meta) as dst:
assert (dst.nodata == 42)
assert (dst.meta['nodata'] == 42)
assert (dst.nodatavals == (42, 42, 42))
info = subprocess.check_output(['gdalinfo', dst_path])
pattern = b'Band 1.*?NoData Value=42'
assert (re.search(pattern, info, re.DOTALL) is not None)
pattern = b'Band 2.*?NoData Value=42'
assert (re.search(pattern, info, re.DOTALL) is not None)
pattern = b'Band 2.*?NoData Value=42'
assert (re.search(pattern, info, re.DOTALL) is not None) |
def init_logging():
global LOGGER
if (not os.path.isfile(os.path.join(CONFIG_PATH, LOGGING_CONFIG_FILE))):
print('Copying default logging config file...')
try:
shutil.copy2(os.path.join(SAMPLES_PATH, LOGGING_CONFIG_FILE), CONFIG_PATH)
except IOError as error:
print(f'Unable to copy default logging config file. {str(error)}')
logging_config = read_yaml_file(os.path.join(CONFIG_PATH, LOGGING_CONFIG_FILE))
log_path = os.path.dirname(logging_config['handlers']['file']['filename'])
try:
if (not os.path.exists(log_path)):
os.makedirs(log_path)
except IOError:
print('Unable to create log folder')
logging.config.dictConfig(logging_config)
LOGGER = logging.getLogger('wyzesense2mqtt')
LOGGER.debug('Logging initialized...') |
class Effect3343(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Projectile Turret')), 'falloff', ship.getModifiedItemAttr('eliteBonusHeavyInterdictors1'), skill='Heavy Interdiction Cruisers', **kwargs) |
class POPM(Frame):
_framespec = [Latin1TextSpec('email'), ByteSpec('rating', default=0)]
_optionalspec = [IntegerSpec('count', default=0)]
def HashKey(self):
return ('%s:%s' % (self.FrameID, self.email))
def __eq__(self, other):
return (self.rating == other)
__hash__ = Frame.__hash__
def __pos__(self):
return self.rating
def _pprint(self):
return ('%s=%r %r/255' % (self.email, getattr(self, 'count', None), self.rating)) |
def add_params_from_parameter_sharding(fused_params: Optional[Dict[(str, Any)]], parameter_sharding: ParameterSharding) -> Dict[(str, Any)]:
if (fused_params is None):
fused_params = {}
if (parameter_sharding.cache_params is not None):
cache_params = parameter_sharding.cache_params
if (cache_params.algorithm is not None):
fused_params['cache_algorithm'] = cache_params.algorithm
if (cache_params.load_factor is not None):
fused_params['cache_load_factor'] = cache_params.load_factor
if (cache_params.reserved_memory is not None):
fused_params['cache_reserved_memory'] = cache_params.reserved_memory
if (cache_params.precision is not None):
fused_params['cache_precision'] = cache_params.precision
if (cache_params.prefetch_pipeline is not None):
fused_params['prefetch_pipeline'] = cache_params.prefetch_pipeline
if (parameter_sharding.enforce_hbm is not None):
fused_params['enforce_hbm'] = parameter_sharding.enforce_hbm
if (parameter_sharding.stochastic_rounding is not None):
fused_params['stochastic_rounding'] = parameter_sharding.stochastic_rounding
if (parameter_sharding.bounds_check_mode is not None):
fused_params['bounds_check_mode'] = parameter_sharding.bounds_check_mode
if (parameter_sharding.sharding_type == ShardingType.DATA_PARALLEL.value):
logger.warning(f'Sharding Type is {parameter_sharding.sharding_type}, caching params will be ignored')
elif (parameter_sharding.compute_kernel == EmbeddingComputeKernel.DENSE.value):
logger.warning(f'Compute Kernel is {parameter_sharding.compute_kernel}, caching params will be ignored')
return fused_params |
.route('/profile/')
def profile() -> None:
user_data = plugin.client('user').get()['user']
reg_date = date.fromtimestamp(user_data['reg_date'])
dialog = xbmcgui.Dialog()
message = f'''{localize(32035)}: [B]{user_data['username']}[/B]
{localize(32036)}: [B]{reg_date:%d.%m.%Y}[/B]
{localize(32037)}: [B]{int(user_data['subscription']['days'])}[/B]'''
dialog.ok(localize(32038), message) |
class ZipReader(object):
zip_bank = dict()
def __init__(self):
super(ZipReader, self).__init__()
def get_zipfile(path):
zip_bank = ZipReader.zip_bank
if (path not in zip_bank):
zfile = zipfile.ZipFile(path, 'r')
zip_bank[path] = zfile
return zip_bank[path]
def split_zip_style_path(path):
pos_at = path.index('')
assert (pos_at != (- 1)), ("character '' is not found from the given path '%s'" % path)
zip_path = path[0:pos_at]
folder_path = path[(pos_at + 1):]
folder_path = str.strip(folder_path, '/')
return (zip_path, folder_path)
def list_folder(path):
(zip_path, folder_path) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
folder_list = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if (file_foler_name.startswith(folder_path) and (len(os.path.splitext(file_foler_name)[(- 1)]) == 0) and (file_foler_name != folder_path)):
if (len(folder_path) == 0):
folder_list.append(file_foler_name)
else:
folder_list.append(file_foler_name[(len(folder_path) + 1):])
return folder_list
def list_files(path, extension=None):
if (extension is None):
extension = ['.*']
(zip_path, folder_path) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
file_lists = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if (file_foler_name.startswith(folder_path) and (str.lower(os.path.splitext(file_foler_name)[(- 1)]) in extension)):
if (len(folder_path) == 0):
file_lists.append(file_foler_name)
else:
file_lists.append(file_foler_name[(len(folder_path) + 1):])
return file_lists
def read(path):
(zip_path, path_img) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
return data
def imread(path):
(zip_path, path_img) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
try:
im = Image.open(io.BytesIO(data))
except:
print('ERROR IMG LOADED: ', path_img)
random_img = (np.random.rand(224, 224, 3) * 255)
im = Image.fromarray(np.uint8(random_img))
return im |
def batch_dijkstra(slices, sliced_edges, sliced_adjacency_logits, sliced_weight_logits, initial_vertices, target_vertices, *, k_nearest, max_length, max_length_nearest=None, max_steps=None, deterministic=False, presample_edges=False, soft=False, n_jobs=None, validate=True, **kwargs):
n_jobs = (n_jobs or cpu_count())
if (n_jobs < 0):
n_jobs = ((cpu_count() - n_jobs) + 1)
if (max_steps is None):
max_steps = (- 1)
batch_size = len(initial_vertices)
max_length_nearest = (max_length_nearest or max_length)
if validate:
for arr in (slices, sliced_edges, sliced_adjacency_logits, sliced_weight_logits, initial_vertices):
assert isinstance(arr, np.ndarray), 'expected np array but got {}'.format(type(arr))
assert arr.flags.c_contiguous, 'please make sure array is contiguous (see np.ascontiguousarray)'
assert (arr.ndim == 1), 'all arrays must be 1-dimensional'
assert isinstance(target_vertices, np.ndarray)
assert arr.flags.c_contiguous, 'target paths must be contiguous (see np.ascontiguousarray)'
assert (np.ndim(target_vertices) in (1, 2)), 'target paths must be of either shape [batch_size] or[batch_size, num_targets] (batch_size is len(initial_vertices)'
assert ((slices[0] == 1) and (slices[(- 1)] == len(sliced_edges)))
assert (len(sliced_edges) == len(sliced_adjacency_logits) == len(sliced_weight_logits))
assert (len(initial_vertices) == len(target_vertices) == batch_size)
assert (max(np.max(initial_vertices), np.max(target_vertices)) < (len(slices) - 1)), 'vertex id exceeds n_vertices'
assert (slices.dtype == sliced_edges.dtype == np.int32)
assert (sliced_adjacency_logits.dtype == sliced_weight_logits.dtype == np.float32)
assert (initial_vertices.dtype == target_vertices.dtype == np.int32)
assert ((max_steps == (- 1)) or (max_steps >= k_nearest)), 'it is impossible to find all neighbors in this many steps'
assert ((max_length > 0) and (max_length_nearest > 0) and (k_nearest >= 0))
assert isinstance(deterministic, bool)
should_squeeze_target_paths = (np.ndim(target_vertices) == 1)
if should_squeeze_target_paths:
target_vertices = target_vertices[(..., np.newaxis)]
target_paths = np.zeros([batch_size, target_vertices.shape[(- 1)], max_length], 'int32')
nearest_paths = np.zeros([batch_size, k_nearest, max_length_nearest], 'int32')
if presample_edges:
edge_logits = sliced_adjacency_logits
(min_value, max_value) = (np.finfo(edge_logits.dtype).min, np.finfo(edge_logits.dtype).max)
edge_exists = (torch.rand(len(edge_logits)) < torch.sigmoid(torch.as_tensor(edge_logits))).numpy()
sliced_adjacency_logits = np.where(edge_exists, max_value, min_value)
_bindings.batch_dijkstra(slices, sliced_edges, sliced_adjacency_logits, sliced_weight_logits, initial_vertices, target_vertices, target_paths, nearest_paths, deterministic, soft, max_steps, n_jobs)
if should_squeeze_target_paths:
target_paths = target_paths.reshape([batch_size, max_length])
return (target_paths, nearest_paths) |
def build_benchmark_googlesheet_payload(config):
data = config.copy()
data['hostname'] = socket.gethostname()
QUERY_NUM = get_query_number()
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
query_time = _get_benchmarked_method_time(filename='benchmarked_main.csv', query_start_time=config.get('start_time'))
writing_time = _get_benchmarked_method_time(filename='benchmarked_write_result.csv', query_start_time=config.get('start_time'))
read_graph_creation_time = _get_benchmarked_method_time(filename='benchmarked_read_tables.csv', query_start_time=config.get('start_time'))
if (data['get_read_time'] and read_graph_creation_time and query_time):
compute_read_table_time = _get_benchmarked_method_time(filename='benchmarked_read_tables.csv', field='compute_time_seconds', query_start_time=config.get('start_time'))
query_time = (query_time - compute_read_table_time)
else:
compute_read_table_time = None
library_info = generate_library_information()
data.update(library_info)
payload = OrderedDict({'Query Number': QUERY_NUM, 'Protocol': ('UCX' if (data.get('nvlink') == True) else 'TCP'), 'NVLINK': data.get('nvlink', 'NA'), 'Infiniband': data.get('infiniband', 'NA'), 'Query Type': ('sql' if is_sql_query() else 'dask'), 'File Format': data.get('file_format'), 'Time (seconds)': ((query_time + writing_time) if (query_time and writing_time) else 'NA'), 'Query Time (seconds)': (query_time if query_time else 'NA'), 'Writing Results Time': (writing_time if writing_time else 'NA'), 'Compute Read + Repartition small table Time(seconds)': (compute_read_table_time if compute_read_table_time else 'NA'), 'Graph Creation time(seconds)': (read_graph_creation_time if read_graph_creation_time else 'NA'), 'Hostname': data.get('hostname'), 'RMM Pool Size': os.environ.get('POOL_SIZE'), 'Device Memory Limit': os.environ.get('DEVICE_MEMORY_LIMIT'), 'Number of GPUs': os.environ.get('NUM_WORKERS'), 'Data Location': data.get('data_dir'), 'Current Time': current_time, 'cuDF Version': data.get('cudf'), 'Dask SQL Version': data.get('sql'), 'Dask Version': data.get('dask'), 'Distributed Version': data.get('distributed'), 'Dask-CUDA Version': data.get('dask-cuda'), 'UCX-py Version': data.get('ucx-py'), 'UCX Version': data.get('ucx'), 'RMM Version': data.get('rmm'), 'cuML Version': data.get('cuml'), 'CuPy Version': data.get('cupy'), 'Query Status': data.get('query_status', 'Unknown'), 'Unique Run ID': data.get('run_id')})
payload = list(payload.values())
return payload |
def find_editor():
for var in ('GIT_EDITOR', 'EDITOR'):
editor = os.environ.get(var)
if (editor is not None):
return editor
if (sys.platform == 'win32'):
fallbacks = ['notepad.exe']
else:
fallbacks = ['/etc/alternatives/editor', 'nano']
for fallback in fallbacks:
if os.path.isabs(fallback):
found_path = fallback
else:
found_path = shutil.which(fallback)
if (found_path and os.path.exists(found_path)):
return found_path
error('Could not find an editor! Set the EDITOR environment variable.') |
class TestInferShape(utt.InferShapeTester):
def test_Mean(self):
adtens3 = dtensor3()
adtens3_val = random(3, 4, 5)
aiscal_val = 2
self._compile_and_check([adtens3], [Mean(None)(adtens3)], [adtens3_val], Mean)
self._compile_and_check([adtens3], [Mean(aiscal_val)(adtens3)], [adtens3_val], Mean)
def test_MaxAndArgmax(self):
adtens3 = dtensor3()
adtens3_val = random(4, 5, 3)
self._compile_and_check([adtens3], max_and_argmax(adtens3, None), [adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3], max_and_argmax(adtens3, 0), [adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3], max_and_argmax(adtens3, 1), [adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3], max_and_argmax(adtens3, 2), [adtens3_val], MaxAndArgmax)
self._compile_and_check([adtens3], max_and_argmax(adtens3, [0, 1, 2]), [adtens3_val], MaxAndArgmax)
def test_Dot(self):
rng = np.random.default_rng(seed=utt.fetch_seed())
advec = dvector()
bdvec = dvector()
advec_val = random(4, rng=rng)
bdvec_val = random(4, rng=rng)
self._compile_and_check([advec, bdvec], [Dot()(advec, bdvec)], [advec_val, bdvec_val], (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv))
admat = dmatrix()
bdmat = dmatrix()
admat_val = random(4, 5, rng=rng)
bdmat_val = random(5, 3, rng=rng)
self._compile_and_check([admat, bdmat], [Dot()(admat, bdmat)], [admat_val, bdmat_val], (Dot, blas.Dot22))
bdmat_val = random(4, 5, rng=rng)
self._compile_and_check([advec, bdmat], [Dot()(advec, bdmat)], [advec_val, bdmat_val], (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv))
admat_val = random(5, 4, rng=rng)
self._compile_and_check([admat, bdvec], [Dot()(admat, bdvec)], [admat_val, bdvec_val], (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv)) |
def linear_matmul(inputs, weight):
hid_dim = weight.get_shape().as_list()[0]
origin_shape = inputs.get_shape().as_list()
inputs = tf.reshape(inputs, [(- 1), hid_dim])
outputs = tf.matmul(inputs, weight)
outputs = tf.reshape(outputs, (origin_shape[:(- 1)] + [(- 1)]))
return outputs |
class TestWheelSource():
def test_takes_two_arguments(self):
WheelSource('distribution', 'version')
WheelSource(distribution='distribution', version='version')
def test_correctly_computes_properties(self):
source = WheelSource(distribution='distribution', version='version')
assert (source.data_dir == 'distribution-version.data')
assert (source.dist_info_dir == 'distribution-version.dist-info')
def test_raises_not_implemented_error(self):
source = WheelSource(distribution='distribution', version='version')
with pytest.raises(NotImplementedError):
source.dist_info_filenames
with pytest.raises(NotImplementedError):
source.read_dist_info('METADATA')
with pytest.raises(NotImplementedError):
source.get_contents()
with pytest.raises(NotImplementedError):
source.validate_record() |
def generate_case_ids(alltests):
import random
for c in alltests:
if (c['id'] == ''):
while True:
newid = str('{:04x}'.format(random.randrange((16 ** 4))))
if does_id_exist(alltests, newid):
continue
else:
c['id'] = newid
break
ufilename = []
for c in alltests:
if ('filename' in c):
ufilename.append(c['filename'])
ufilename = get_unique_item(ufilename)
for f in ufilename:
testlist = []
for t in alltests:
if ('filename' in t):
if (t['filename'] == f):
del t['filename']
testlist.append(t)
outfile = open(f, 'w')
json.dump(testlist, outfile, indent=4)
outfile.write('\n')
outfile.close() |
.skipif((K.backend() != 'tensorflow'), reason='sparse operations supported only by TF')
_test
def test_sparse_input_validation_split():
test_input = sparse.random(6, 3, density=0.25).tocsr()
in1 = Input(shape=(3,), sparse=True)
out1 = Dense(4)(in1)
test_output = np.random.random((6, 4))
model = Model(in1, out1)
model.compile('rmsprop', 'mse')
model.fit(test_input, test_output, epochs=1, batch_size=2, validation_split=0.2) |
def fdr(pvals, alpha=0.05, method='fdr_bh'):
assert (method.lower() in ['fdr_bh', 'fdr_by'])
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
num_nan = np.isnan(pvals).sum()
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
ntests = (pvals.size - num_nan)
ecdffactor = (np.arange(1, (ntests + 1)) / float(ntests))
if (method.lower() == 'fdr_by'):
cm = np.sum((1.0 / np.arange(1, (ntests + 1))))
ecdffactor /= cm
pvals_corr = (pvals_sorted[:ntests] / ecdffactor)
pvals_corr = np.minimum.accumulate(pvals_corr[::(- 1)])[::(- 1)]
pvals_corr = np.clip(pvals_corr, None, 1)
pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan))
pvals_corrected = pvals_corr[sortrevind].reshape(shape_init)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
return (reject, pvals_corrected) |
def test_nested():
a = m.NestA()
b = m.NestB()
c = m.NestC()
a += 10
assert (m.get_NestA(a) == 13)
b.a += 100
assert (m.get_NestA(b.a) == 103)
c.b.a += 1000
assert (m.get_NestA(c.b.a) == 1003)
b -= 1
assert (m.get_NestB(b) == 3)
c.b -= 3
assert (m.get_NestB(c.b) == 1)
c *= 7
assert (m.get_NestC(c) == 35)
abase = a.as_base()
assert (abase.value == (- 2))
a.as_base().value += 44
assert (abase.value == 42)
assert (c.b.a.as_base().value == (- 2))
c.b.a.as_base().value += 44
assert (c.b.a.as_base().value == 42)
del c
pytest.gc_collect()
del a
pytest.gc_collect()
assert (abase.value == 42)
del abase, b
pytest.gc_collect() |
def _make_certbuilder(private_key):
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, 'example.org')])
return x509.CertificateBuilder().subject_name(name).issuer_name(name).public_key(private_key.public_key()).serial_number(777).not_valid_before(datetime.datetime(1999, 1, 1)).not_valid_after(datetime.datetime(2020, 1, 1)) |
.parametrize('X_args, Y_args, Z_args, p_val, comp_size, idx_size, extra_indices, join_axis, supported', [((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (), (), (), 0, True), ((np.array([0], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array([0.5], dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array([100], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), None, (), (), 0, True), ((np.array([0], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array([0.5], dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array([100], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), None, (), (slice(None),), 1, True), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (4,), (), (), 0, True), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (4,), (), (slice(None),), 1, True), pytest.param((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.1, 0.3], dtype=pytensor.config.floatX), (4,), (), (), 1, True, marks=pytest.mark.xfail(AssertionError, match='Arrays are not almost equal to 6 decimals', reason='IfElse Mixture logprob fails when indexing mixes across components')), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (2, 3), (), (), 0, True), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (), (6,), (), 0, False), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (2,), (2,), (slice(None),), 0, False), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (2,), (4,), (slice(None),), 1, False), ((np.array([0], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array([0.5], dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array([100], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (2,), (2,), (), 0, False), ((np.array([0, (- 100)], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array([0.5, 1], dtype=pytensor.config.floatX), np.array([2.0, 1], dtype=pytensor.config.floatX)), (np.array([100, 1000], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([[0.1, 0.5, 0.4], [0.4, 0.1, 0.5]], dtype=pytensor.config.floatX), (2,), (2,), (), 0, False), ((np.array([0, (- 100)], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array([0.5, 1], dtype=pytensor.config.floatX), np.array([2.0, 1], dtype=pytensor.config.floatX)), (np.array([100, 1000], dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([[0.1, 0.5, 0.4], [0.4, 0.1, 0.5]], dtype=pytensor.config.floatX), None, None, (), 0, False), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (2, 3), (2, 3), (), 0, False), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (5,), (5,), (np.arange(5),), 0, False), ((np.array(0, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), (np.array(0.5, dtype=pytensor.config.floatX), np.array(2.0, dtype=pytensor.config.floatX)), (np.array(100, dtype=pytensor.config.floatX), np.array(1, dtype=pytensor.config.floatX)), np.array([0.1, 0.5, 0.4], dtype=pytensor.config.floatX), (5,), (5,), (np.arange(5), None), 0, False)])
def test_hetero_mixture_categorical(X_args, Y_args, Z_args, p_val, comp_size, idx_size, extra_indices, join_axis, supported):
X_rv = pt.random.normal(*X_args, size=comp_size, name='X')
Y_rv = pt.random.gamma(Y_args[0], scale=Y_args[1], size=comp_size, name='Y')
Z_rv = pt.random.normal(*Z_args, size=comp_size, name='Z')
p_at = pt.as_tensor(p_val).type()
p_at.name = 'p'
p_at.tag.test_value = np.array(p_val, dtype=pytensor.config.floatX)
I_rv = pt.random.categorical(p_at, size=idx_size, name='I')
i_vv = I_rv.clone()
i_vv.name = 'i'
indices_at = list(extra_indices)
indices_at.insert(join_axis, I_rv)
indices_at = tuple(indices_at)
M_rv = pt.stack([X_rv, Y_rv, Z_rv], axis=join_axis)[indices_at]
M_rv.name = 'M'
m_vv = M_rv.clone()
m_vv.name = 'm'
if supported:
logp_parts = conditional_logp({M_rv: m_vv, I_rv: i_vv}, sum=False)
else:
with pytest.raises(RuntimeError, match='could not be derived: {m}'):
conditional_logp({M_rv: m_vv, I_rv: i_vv}, sum=False)
return
I_logp_fn = pytensor.function([p_at, i_vv], logp_parts[i_vv])
M_logp_fn = pytensor.function([m_vv, i_vv], logp_parts[m_vv])
assert_no_rvs(I_logp_fn.maker.fgraph.outputs[0])
assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0])
decimals = (6 if (pytensor.config.floatX == 'float64') else 4)
test_val_rng = np.random.RandomState(3238)
norm_1_sp = sp.norm(loc=X_args[0], scale=X_args[1])
gamma_sp = sp.gamma(Y_args[0], scale=Y_args[1])
norm_2_sp = sp.norm(loc=Z_args[0], scale=Z_args[1])
real_comp_size = tuple(X_rv.shape.eval())
for i in range(10):
i_val = CategoricalRV.rng_fn(test_val_rng, p_val, idx_size)
indices_val = list(extra_indices)
indices_val.insert(join_axis, i_val)
indices_val = tuple(indices_val)
x_val = np.broadcast_to(norm_1_sp.rvs(size=comp_size, random_state=test_val_rng), real_comp_size)
y_val = np.broadcast_to(gamma_sp.rvs(size=comp_size, random_state=test_val_rng), real_comp_size)
z_val = np.broadcast_to(norm_2_sp.rvs(size=comp_size, random_state=test_val_rng), real_comp_size)
component_logps = np.stack([norm_1_sp.logpdf(x_val), gamma_sp.logpdf(y_val), norm_2_sp.logpdf(z_val)], axis=join_axis)[indices_val]
index_logps = scipy_logprob(i_val, p_val)
exp_obs_logps = (component_logps + index_logps[((Ellipsis,) + ((None,) * join_axis))])
m_val = np.stack([x_val, y_val, z_val], axis=join_axis)[indices_val]
I_logp_vals = I_logp_fn(p_val, i_val)
M_logp_vals = M_logp_fn(m_val, i_val)
logp_vals = (M_logp_vals + I_logp_vals[((Ellipsis,) + ((None,) * join_axis))])
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) |
def weights_init_orthogonal(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('Linear') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('BatchNorm') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0) |
.parametrize('p_val, size, supported', [(np.array(0.0, dtype=pytensor.config.floatX), (), True), (np.array(1.0, dtype=pytensor.config.floatX), (), True), (np.array([0.1, 0.9], dtype=pytensor.config.floatX), (), True), (np.array(0.0, dtype=pytensor.config.floatX), (2,), False), (np.array(1.0, dtype=pytensor.config.floatX), (2, 1), False), (np.array(1.0, dtype=pytensor.config.floatX), (2, 3), False), (np.array([0.1, 0.9], dtype=pytensor.config.floatX), (2, 3), False)])
def test_hetero_mixture_binomial(p_val, size, supported):
X_rv = pt.random.normal(0, 1, size=size, name='X')
Y_rv = pt.random.gamma(0.5, scale=2.0, size=size, name='Y')
if (np.ndim(p_val) == 0):
p_at = pt.scalar('p')
p_at.tag.test_value = p_val
I_rv = pt.random.bernoulli(p_at, size=size, name='I')
p_val_1 = p_val
else:
p_at = pt.vector('p')
p_at.tag.test_value = np.array(p_val, dtype=pytensor.config.floatX)
I_rv = pt.random.categorical(p_at, size=size, name='I')
p_val_1 = p_val[1]
i_vv = I_rv.clone()
i_vv.name = 'i'
M_rv = pt.stack([X_rv, Y_rv])[I_rv]
M_rv.name = 'M'
m_vv = M_rv.clone()
m_vv.name = 'm'
if supported:
M_logp = conditional_logp({M_rv: m_vv, I_rv: i_vv})
M_logp_combined = pt.add(*M_logp.values())
else:
with pytest.raises(RuntimeError, match='could not be derived: {m}'):
conditional_logp({M_rv: m_vv, I_rv: i_vv})
return
M_logp_fn = pytensor.function([p_at, m_vv, i_vv], M_logp_combined)
assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0])
decimals = (6 if (pytensor.config.floatX == 'float64') else 4)
test_val_rng = np.random.RandomState(3238)
bern_sp = sp.bernoulli(p_val_1)
norm_sp = sp.norm(loc=0, scale=1)
gamma_sp = sp.gamma(0.5, scale=2.0)
for i in range(10):
i_val = bern_sp.rvs(size=size, random_state=test_val_rng)
x_val = norm_sp.rvs(size=size, random_state=test_val_rng)
y_val = gamma_sp.rvs(size=size, random_state=test_val_rng)
component_logps = np.stack([norm_sp.logpdf(x_val), gamma_sp.logpdf(y_val)])[i_val]
exp_obs_logps = (component_logps + bern_sp.logpmf(i_val))
m_val = np.stack([x_val, y_val])[i_val]
logp_vals = M_logp_fn(p_val, m_val, i_val)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) |
class ResNet50vd_dcn(nn.Module):
def __init__(self, cout=64, idx=0):
super(ResNet50vd_dcn, self).__init__()
self.cout = cout
self.idx = idx
self.resnet50vd_dcn = ResNet(channels=[64, 128, 256, 512], cout=cout, idx=idx, block=Bottleneck, layers=layers, stem_width=32, stem_type='deep', avg_down=True, bool_DeformableConv2d=True)
def forward(self, x):
x = self.resnet50vd_dcn(x)
return x |
def test_bose_hubbard_2x2_aperiodic():
hubbard_model = bose_hubbard(2, 2, 1.0, 4.0, chemical_potential=0.5, dipole=0.3, periodic=False)
assert (str(hubbard_model).strip() == '\n-1.0 [0 1^] +\n-1.0 [0 2^] +\n-2.5 [0^ 0] +\n2.0 [0^ 0 0^ 0] +\n0.3 [0^ 0 1^ 1] +\n0.3 [0^ 0 2^ 2] +\n-1.0 [0^ 1] +\n-1.0 [0^ 2] +\n-1.0 [1 3^] +\n-2.5 [1^ 1] +\n2.0 [1^ 1 1^ 1] +\n0.3 [1^ 1 3^ 3] +\n-1.0 [1^ 3] +\n-1.0 [2 3^] +\n-2.5 [2^ 2] +\n2.0 [2^ 2 2^ 2] +\n0.3 [2^ 2 3^ 3] +\n-1.0 [2^ 3] +\n-2.5 [3^ 3] +\n2.0 [3^ 3 3^ 3]\n'.strip()) |
def create_identifier(apps, schema_editor):
Catalog = apps.get_model('questions', 'Catalog')
Section = apps.get_model('questions', 'Section')
Subsection = apps.get_model('questions', 'Subsection')
QuestionEntity = apps.get_model('questions', 'QuestionEntity')
for obj in Catalog.objects.all():
if (not obj.key):
obj.key = slugify(obj.title_en)
obj.save()
for obj in Section.objects.all():
if (not obj.key):
obj.key = slugify(obj.title_en)
obj.save()
for obj in Subsection.objects.all():
if (not obj.key):
obj.key = slugify(obj.title_en)
obj.save()
for obj in QuestionEntity.objects.all():
if (not obj.key):
obj.key = slugify(obj.attribute_entity.key)
obj.save() |
class PlayWorldBorder(Packet):
id = 61
to = 1
def __init__(self, action: int, data: dict) -> None:
super().__init__()
self.action = action
self.data = data
def encode(self) -> bytes:
out = Buffer.pack_varint(self.action)
if (self.action == 0):
out += Buffer.pack('d', self.data['diameter'])
elif (self.action == 1):
out += ((Buffer.pack('d', self.data['old_diameter']) + Buffer.pack('d', self.data['new_diameter'])) + Buffer.pack_varint(self.data['speed']))
elif (self.action == 2):
out += (Buffer.pack('d', self.data['x']) + Buffer.pack('d', self.data['z']))
elif (self.action == 3):
out += (((((((Buffer.pack('d', self.data['x']) + Buffer.pack('d', self.data['z'])) + Buffer.pack('d', self.data['old_diameter'])) + Buffer.pack('d', self.data['new_diameter'])) + Buffer.pack_varint(self.data['speed'])) + Buffer.pack_varint(self.data['portal_teleport_boundary'])) + Buffer.pack_varint(self.data['warning_blocks'])) + Buffer.pack_varint(self.data['warning_time']))
elif (self.action == 4):
out += Buffer.pack_varint(self.data['warning_time'])
elif (self.action == 5):
out += Buffer.pack_varint(self.data['warning_blocks'])
return out |
def test_process_queries_full_query(cortex_product: CortexXDR, mocker):
cortex_product._queries = {}
cortex_product._results = {}
cortex_product._url = '
mocker.patch('products.cortex_xdr.CortexXDR._get_default_header', return_value={})
criteria = {'query': ['FieldA=cmd.exe']}
cortex_product.nested_process_search(Tag('single_test'), criteria, {})
cortex_product.log = logging.getLogger('pytest_surveyor')
json_response = {'reply': []}
response_mock = mocker.Mock()
response_mock.json.return_value = json_response
cortex_product._session = mocker.Mock()
mocker.patch('products.cortex_xdr.CortexXDR._get_xql_results', return_value=[[], 0])
mocked_func = mocker.patch.object(cortex_product._session, 'post', return_value=response_mock)
cortex_product._process_queries()
params = {'request_data': {'query': 'FieldA=cmd.exe | fields agent_hostname, action_process_image_path, action_process_username, action_process_image_command_line, actor_process_image_path, actor_primary_username, actor_process_command_line, event_id', 'tenants': [], 'timeframe': {'relativeTime': ((((14 * 24) * 60) * 60) * 1000)}}}
mocked_func.assert_called_once_with(' headers={}, data=json.dumps(params)) |
def test_chord_mode_name_deprecation(caplog):
chord = KeyChord([], 'a', [Key([], 'b', lazy.function(no_op))], mode='persistent_chord')
assert caplog.records
log = caplog.records[0]
assert (log.levelname == 'WARNING')
assert ("name='persistent_chord'" in log.message)
assert (chord.mode is True)
assert (chord.name == 'persistent_chord') |
def plot_longitudinal_profile_intensity(self, longitudinal_profile_E, extent, square_root=False, grid=False, xlim=None, ylim=None, units=mm, z_units=cm, dark_background=True):
from ..util.backend_functions import backend as bd
if (dark_background == True):
plt.style.use('dark_background')
else:
plt.style.use('default')
I = bd.real((longitudinal_profile_E * np.conjugate(longitudinal_profile_E)))
I = I.transpose(1, 0)
if (square_root == False):
if (bd != np):
I = I.get()
else:
I = I
elif (bd != np):
I = np.sqrt(I.get())
else:
I = np.sqrt(I)
fig = plt.figure(figsize=(((16 / 9) * 6), 6))
ax = fig.add_subplot(1, 1, 1)
if (xlim != None):
ax.set_xlim((np.array(xlim) / cm))
if (ylim != None):
ax.set_ylim((np.array(ylim) / units))
if (units == mm):
ax.set_ylabel('[mm]')
elif (units == um):
ax.set_ylabel('[um]')
elif (units == cm):
ax.set_ylabel('[cm]')
elif (units == nm):
ax.set_ylabel('[nm]')
elif (units == m):
ax.set_ylabel('[m]')
if (z_units == mm):
ax.set_xlabel('Screen Distance [mm]')
elif (z_units == um):
ax.set_xlabel('Screen Distance [um]')
elif (z_units == cm):
ax.set_xlabel('Screen Distance [cm]')
elif (z_units == nm):
ax.set_xlabel('Screen Distance [nm]')
elif (z_units == m):
ax.set_xlabel('Screen Distance [m]')
ax.set_title('Longitudinal Profile')
if (grid == True):
ax.grid(alpha=0.2)
dz = ((extent[3] - extent[2]) / I.shape[1])
im = ax.imshow(I, cmap='inferno', extent=[((extent[2] - (dz / 2)) / z_units), ((extent[3] + (dz / 2)) / z_units), (float((extent[0] - (self.dx / 2))) / units), (float((extent[1] + (self.dx / 2))) / units)], interpolation='spline36', aspect='auto')
cb = fig.colorbar(im, orientation='vertical')
if (square_root == False):
cb.set_label('Intensity $\\left[W / m^2 \\right]$', fontsize=13, labelpad=14)
else:
cb.set_label('Square Root Intensity $\\left[ \\sqrt{W / m^2 } \\right]$', fontsize=13, labelpad=14)
plt.show() |
def test_make_valid_identifier():
assert (make_valid_identifier('has whitespaces ') == 'has_whitespaces')
assert (make_valid_identifier('has-hyphon') == 'has_hyphon')
assert (make_valid_identifier('special chars%') == 'special_chars')
assert (make_valid_identifier('UpperCase') == 'uppercase')
with pytest.raises(InvalidIdentifier):
make_valid_identifier('def') |
class F19_Bootloader(F18_Bootloader):
removedKeywords = F18_Bootloader.removedKeywords
removedAttrs = F18_Bootloader.removedAttrs
def __init__(self, writePriority=10, *args, **kwargs):
F18_Bootloader.__init__(self, writePriority, *args, **kwargs)
self.extlinux = kwargs.get('extlinux', False)
def _getArgsAsStr(self):
ret = F18_Bootloader._getArgsAsStr(self)
if self.extlinux:
ret += ' --extlinux'
return ret
def _getParser(self):
op = F18_Bootloader._getParser(self)
op.add_argument('--extlinux', action='store_true', default=False, version=F19, help='\n Use the extlinux bootloader instead of GRUB. This option\n only works on machines that are supported by extlinux.')
return op |
class DynamicLossScaler(object):
def __init__(self, init_scale=(2.0 ** 15), scale_factor=2.0, scale_window=2000, tolerance=0.0, threshold=None, min_loss_scale=0.0001):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = (- 1)
self._last_rescale_iter = (- 1)
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return (self.loss_scale * outputs)
def update(self):
if (((self._iter - self._last_overflow_iter) % self.scale_window) == 0):
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if (self.threshold is not None):
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
if ((grad_norm == float('inf')) or (grad_norm != grad_norm)):
prev_scale = self.loss_scale
iter_since_rescale = (self._iter - self._last_rescale_iter)
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = (self._overflows_since_rescale / float(iter_since_rescale))
if (pct_overflow >= self.tolerance):
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if (self.loss_scale <= self.min_loss_scale):
self.loss_scale = prev_scale
raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.min_loss_scale))
self._iter += 1
raise OverflowError(('setting loss scale to: ' + str(self.loss_scale))) |
def test_current_test_env_var(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
pytest_current_test_vars: List[Tuple[(str, str)]] = []
monkeypatch.setattr(sys, 'pytest_current_test_vars', pytest_current_test_vars, raising=False)
pytester.makepyfile("\n import pytest\n import sys\n import os\n\n \n def fix():\n sys.pytest_current_test_vars.append(('setup', os.environ['PYTEST_CURRENT_TEST']))\n yield\n sys.pytest_current_test_vars.append(('teardown', os.environ['PYTEST_CURRENT_TEST']))\n\n def test(fix):\n sys.pytest_current_test_vars.append(('call', os.environ['PYTEST_CURRENT_TEST']))\n ")
result = pytester.runpytest_inprocess()
assert (result.ret == 0)
test_id = 'test_current_test_env_var.py::test'
assert (pytest_current_test_vars == [('setup', (test_id + ' (setup)')), ('call', (test_id + ' (call)')), ('teardown', (test_id + ' (teardown)'))])
assert ('PYTEST_CURRENT_TEST' not in os.environ) |
class FastLookup(CompleteDirs):
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup |
def crack(args, s):
s.adapter.set_tclk(1)
s.adapter.set_sclk(127)
code = []
while (len(code) != 7):
logging.info('Cracking byte {}/7...'.format((len(code) + 1), 7))
byte_times = []
for try_byte in range(256):
samples = []
for _ in range(args.samples):
bin_code = (''.join((chr(c) for c in code)) + chr(try_byte))
s.unlock(bin_code.ljust(7, ''))
samples.append(s.adapter.busy_timer())
samples = sorted(samples)
median = samples[(args.samples / 2)]
logging.debug('Code {}, times {}, median {}'.format(try_byte, samples, median))
byte_times.append(median)
correct = None
if (len(code) == 6):
correct = byte_times.index(min(byte_times))
else:
correct = byte_times.index(max(byte_times))
logging.info('Byte {}/7 -> {}'.format((len(code) + 1), correct))
code.append(correct)
bin_code = ''.join((chr(c) for c in code)).encode('hex')
logging.info('Finished. Code: {}, {}'.format(code, bin_code)) |
class TaskHandler(object):
def __init__(self):
self.tasks = {}
self.to_save = {}
def load(self):
to_save = False
value = ServerConfig.objects.conf('delayed_tasks', default={})
if isinstance(value, str):
tasks = dbunserialize(value)
else:
tasks = value
for (task_id, value) in tasks.items():
(date, callback, args, kwargs) = dbunserialize(value)
if isinstance(callback, tuple):
(obj, method) = callback
if (obj is None):
to_save = True
continue
callback = getattr(obj, method)
self.tasks[task_id] = (date, callback, args, kwargs)
if to_save:
self.save()
def save(self):
for (task_id, (date, callback, args, kwargs)) in self.tasks.items():
if (task_id in self.to_save):
continue
if getattr(callback, '__self__', None):
obj = callback.__self__
name = callback.__name__
callback = (obj, name)
safe_callback = None
try:
dbserialize(callback)
except (TypeError, AttributeError):
raise ValueError('the specified callback {} cannot be pickled. It must be a top-level function in a module or an instance method.'.format(callback))
else:
safe_callback = callback
self.to_save[task_id] = dbserialize((date, safe_callback, args, kwargs))
ServerConfig.objects.conf('delayed_tasks', self.to_save)
def add(self, timedelay, callback, *args, **kwargs):
persistent = kwargs.get('persistent', False)
if persistent:
del kwargs['persistent']
now = datetime.now()
delta = timedelta(seconds=timedelay)
safe_args = []
safe_kwargs = {}
used_ids = list(self.tasks.keys())
task_id = 1
while (task_id in used_ids):
task_id += 1
for arg in args:
try:
dbserialize(arg)
except (TypeError, AttributeError):
log_err('The positional argument {} cannot be pickled and will not be present in the arguments fed to the callback {}'.format(arg, callback))
else:
safe_args.append(arg)
for (key, value) in kwargs.items():
try:
dbserialize(value)
except (TypeError, AttributeError):
log_err('The {} keyword argument {} cannot be pickled and will not be present in the arguments fed to the callback {}'.format(key, value, callback))
else:
safe_kwargs[key] = value
self.tasks[task_id] = ((now + delta), callback, safe_args, safe_kwargs)
self.save()
callback = self.do_task
args = [task_id]
kwargs = {}
return deferLater(reactor, timedelay, callback, *args, **kwargs)
def remove(self, task_id):
del self.tasks[task_id]
if (task_id in self.to_save):
del self.to_save[task_id]
self.save()
def do_task(self, task_id):
(date, callback, args, kwargs) = self.tasks.pop(task_id)
if (task_id in self.to_save):
del self.to_save[task_id]
self.save()
callback(*args, **kwargs)
def create_delays(self):
now = datetime.now()
for (task_id, (date, callbac, args, kwargs)) in self.tasks.items():
seconds = max(0, (date - now).total_seconds())
deferLater(reactor, seconds, self.do_task, task_id) |
_cache()
def _get_device_calibration(device_name: str):
processor_id = recirq.get_processor_id_by_device_name(device_name)
if (processor_id is None):
device_obj = recirq.get_device_obj_by_name(device_name)
dummy_graph = ccr.gridqubits_to_graph_device((device_obj.metadata.qubit_set if (device_obj.metadata is not None) else ()))
nx.set_edge_attributes(dummy_graph, name='weight', values=0.01)
return dummy_graph
calibration = cg.get_engine_calibration(processor_id)
err_graph = calibration_data_to_graph(calibration)
return err_graph |
def dropout_slim_model():
inputs = tf.keras.Input(shape=(10, 10, 3))
x = slim.conv2d(inputs, 16, [3, 3])
x = slim.dropout(x, keep_prob=0.6)
x = tf.identity(x)
x = slim.conv2d(x, 8, [2, 2])
x = slim.flatten(x)
outputs = slim.fully_connected(x, num_outputs=10, activation_fn=tf.nn.softmax, scope='dropout_slim_model')
return outputs |
class VolatilityVolumeShareTestCase(WithCreateBarData, WithSimParams, WithDataPortal, ZiplineTestCase):
ASSET_START_DATE = pd.Timestamp('2006-02-10')
TRADING_CALENDAR_STRS = ('NYSE', 'us_futures')
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
def init_class_fixtures(cls):
super(VolatilityVolumeShareTestCase, cls).init_class_fixtures()
cls.ASSET = cls.asset_finder.retrieve_asset(1000)
def make_futures_info(cls):
return pd.DataFrame({'sid': [1000, 1001], 'root_symbol': ['CL', 'FV'], 'symbol': ['CLF07', 'FVF07'], 'start_date': [cls.ASSET_START_DATE, cls.START_DATE], 'end_date': [cls.END_DATE, cls.END_DATE], 'multiplier': [500, 500], 'exchange': ['CMES', 'CMES']})
def make_future_minute_bar_data(cls):
data = list(super(VolatilityVolumeShareTestCase, cls).make_future_minute_bar_data())
data[0][1].loc[:cls.ASSET_START_DATE] = np.NaN
return data
def test_calculate_impact_buy(self):
answer_key = [(91485., 5), (91486., 5), (None, None)]
order = Order(dt=pd.Timestamp.now(tz='utc').round('min'), asset=self.ASSET, amount=10)
self._calculate_impact(order, answer_key)
def test_calculate_impact_sell(self):
answer_key = [(91485., (- 5)), (91486., (- 5)), (None, None)]
order = Order(dt=pd.Timestamp.now(tz='utc').round('min'), asset=self.ASSET, amount=(- 10))
self._calculate_impact(order, answer_key)
def _calculate_impact(self, test_order, answer_key):
model = VolatilityVolumeShare(volume_limit=0.05)
first_minute = pd.Timestamp('2006-03-31 11:35AM', tz='UTC')
next_3_minutes = self.trading_calendar.minutes_window(first_minute, 3)
remaining_shares = test_order.open_amount
for (i, minute) in enumerate(next_3_minutes):
data = self.create_bardata(simulation_dt_func=(lambda : minute))
new_order = Order(dt=data.current_dt, asset=self.ASSET, amount=remaining_shares)
(price, amount) = model.process_order(data, new_order)
self.assertEqual(price, answer_key[i][0])
self.assertEqual(amount, answer_key[i][1])
amount = (amount or 0)
if (remaining_shares < 0):
remaining_shares = min(0, (remaining_shares - amount))
else:
remaining_shares = max(0, (remaining_shares - amount))
def test_calculate_impact_without_history(self):
model = VolatilityVolumeShare(volume_limit=1)
late_start_asset = self.asset_finder.retrieve_asset(1000)
early_start_asset = self.asset_finder.retrieve_asset(1001)
cases = [(pd.Timestamp('2006-01-05 11:35AM', tz='UTC'), early_start_asset), (pd.Timestamp('2006-02-10 11:35AM', tz='UTC'), late_start_asset), (pd.Timestamp('2006-02-17 11:35AM', tz='UTC'), late_start_asset)]
for (minute, asset) in cases:
data = self.create_bardata(simulation_dt_func=(lambda : minute))
order = Order(dt=data.current_dt, asset=asset, amount=10)
(price, amount) = model.process_order(data, order)
avg_price = ((data.current(asset, 'high') + data.current(asset, 'low')) / 2)
expected_price = (avg_price * (1 + model.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT))
self.assertAlmostEqual(price, expected_price, delta=0.001)
self.assertEqual(amount, 10)
def test_impacted_price_worse_than_limit(self):
model = VolatilityVolumeShare(volume_limit=0.05)
minute = pd.Timestamp('2006-03-01 11:35AM', tz='UTC')
data = self.create_bardata(simulation_dt_func=(lambda : minute))
order = Order(dt=data.current_dt, asset=self.ASSET, amount=10, limit=59800)
(price, amount) = model.process_order(data, order)
self.assertIsNone(price)
self.assertIsNone(amount)
def test_low_transaction_volume(self):
model = VolatilityVolumeShare(volume_limit=0.001)
minute = pd.Timestamp('2006-03-01 11:35AM', tz='UTC')
data = self.create_bardata(simulation_dt_func=(lambda : minute))
order = Order(dt=data.current_dt, asset=self.ASSET, amount=10)
(price, amount) = model.process_order(data, order)
self.assertIsNone(price)
self.assertIsNone(amount) |
_start_docstrings('\n ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', CONVNEXT_START_DOCSTRING)
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convnext = TFConvNextMainLayer(config, name='convnext')
self.classifier = tf.keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='classifier')
_inputs
_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, pixel_values: Optional[TFModelInputType]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[Union[(np.ndarray, tf.Tensor)]]=None, training: Optional[bool]=False) -> Union[(TFSequenceClassifierOutput, Tuple[tf.Tensor])]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
pooled_output = (outputs.pooler_output if return_dict else outputs[1])
logits = self.classifier(pooled_output)
loss = (None if (labels is None) else self.hf_compute_loss(labels=labels, logits=logits))
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=output.hidden_states) |
class TestCopyPlane(EndianTest):
def setUp(self):
self.req_args_0 = {'bit_plane': , 'dst_drawable': , 'dst_x': (- 25480), 'dst_y': (- 26229), 'gc': , 'height': 60447, 'src_drawable': , 'src_x': (- 4634), 'src_y': (- 17345), 'width': 53771}
self.req_bin_0 = b'?\x00\x08\x00\x8d \xf80H)\xa4o\x85\xed\xf5\x04\xe6\xed?\xbcx\x9c\x8b\x99\x0b\xd2\x1f\xecj7\xec:'
def testPackRequest0(self):
bin = request.CopyPlane._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CopyPlane._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def test_unrecognised_optional_parameters():
client = Client('localhost', 5679)
pdu = DeliverSM('deliver_sm', client=client, allow_unknown_opt_params=True)
pdu.parse(b'\x00\x00\x00\xa8\x00\x00\x00\x05\x00\x00\x00\x00/p\xc6\x9a\x00\x00\x\x00\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00iid: sub:001 dlvrd:001 submit date: done date: stat:DELIVRD err:000 text:\x14\x03\x00\x07(null)\x00\x14\x02\x00\x04612\x00')
with pytest.raises(exceptions.UnknownCommandError):
pdu2 = DeliverSM('deliver_sm', client=client)
pdu2.parse(b'\x00\x00\x00\xa8\x00\x00\x00\x05\x00\x00\x00\x00/p\xc6\x9a\x00\x00\x\x00\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00iid: sub:001 dlvrd:001 submit date: done date: stat:DELIVRD err:000 text:\x14\x03\x00\x07(null)\x00\x14\x02\x00\x04612\x00') |
def test_instance_method() -> None:
nodes_ = builder.extract_node('\n class A:\n def method(self, x):\n return x\n\n A().method(42) #\n\n # In this case, the 1 argument is bound to self, which is ignored in the method\n A.method(1, 42) #\n ')
for node in nodes_:
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert (len(inferred) == 1)
assert isinstance(inferred[0], nodes.Const)
assert (inferred[0].value == 42) |
class TestNanPayloads(unittest.TestCase):
def test_normal_nan(self):
normal_nan = float('nan')
(payload, namespace) = get_payload_from_nan(normal_nan)
self.assertIs(payload, None)
self.assertIs(namespace, None)
def test_roundtrip_payload(self):
for namespace in range(0, 256):
for payload in range((- 50), 500):
nan = make_nan_with_payload(payload, namespace)
(new_payload, new_namespace) = get_payload_from_nan(nan)
self.assertEqual(namespace, new_namespace)
self.assertEqual(payload, new_payload)
self.assertNotEqual(nan, nan)
def test_user_namespace_default(self):
nan = make_nan_with_payload(42)
(payload, namespace) = get_payload_from_nan(nan)
self.assertEqual(42, payload)
self.assertEqual(255, namespace)
self.assertNotEqual(nan, nan) |
.parametrize('run_parameters, expected_error, expected_message', [({}, None, None), ({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException, 'Could not find branch in repository'), ({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException, 'Could not find tag in repository'), ({'refs': {'kind': 'branch', 'name': 'master'}}, None, None), ({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None)])
def test_manual_start(run_parameters, expected_error, expected_message, githost_trigger):
if (expected_error is not None):
with pytest.raises(expected_error) as ipe:
githost_trigger.manual_start(run_parameters)
assert (str(ipe.value) == expected_message)
else:
assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild) |
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
result = OrderedDict()
for (line, row) in enumerate(csv_reader, 1):
try:
(img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name) = row[:10]
if (img_file not in result):
result[img_file] = []
if ((x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', '')):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
if detect_text:
if (class_name == '###'):
continue
else:
class_name = 'text'
if (class_name not in classes):
raise ValueError(f"line {line}: unknown class name: '{class_name}' (classes: {classes})")
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(f"line {line}: format should be 'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name' or 'img_file,,,,,'"), None)
return result |
def get_queries_from_constant_and_query(constant_smiles, query_smiles):
num_constant_attachments = constant_smiles.count('*')
num_query_attachments = query_smiles.count('*')
if (num_constant_attachments != num_query_attachments):
raise click.UsageError(f'Mismatch between the number of attachment points in the --query ({num_query_attachments}) and the --constant ({num_constant_attachments})')
return [(constant_smiles, [query_smiles])] |
class _MarkerFinder():
def __init__(self, stream):
super(_MarkerFinder, self).__init__()
self._stream = stream
def from_stream(cls, stream):
return cls(stream)
def next(self, start):
position = start
while True:
position = self._offset_of_next_ff_byte(start=position)
(position, byte_) = self._next_non_ff_byte(start=(position + 1))
if (byte_ == b'\x00'):
continue
(marker_code, segment_offset) = (byte_, (position + 1))
break
return (marker_code, segment_offset)
def _next_non_ff_byte(self, start):
self._stream.seek(start)
byte_ = self._read_byte()
while (byte_ == b'\xff'):
byte_ = self._read_byte()
offset_of_non_ff_byte = (self._stream.tell() - 1)
return (offset_of_non_ff_byte, byte_)
def _offset_of_next_ff_byte(self, start):
self._stream.seek(start)
byte_ = self._read_byte()
while (byte_ != b'\xff'):
byte_ = self._read_byte()
offset_of_ff_byte = (self._stream.tell() - 1)
return offset_of_ff_byte
def _read_byte(self):
byte_ = self._stream.read(1)
if (not byte_):
raise Exception('unexpected end of file')
return byte_ |
.parametrize('parse_pattern, string, expected_args, expected_kwargs', [('Given I have the number {:d}', 'Given I have the number 5', (5,), {}), ('Given I have the number {number:d}', 'Given I have the number 5', tuple(), {'number': 5}), ('Given I have the number {number:d} and {:d}', 'Given I have the number 4 and 2', (2,), {'number': 4})], ids=['Parse Pattern with unnamed groups', 'Parse Pattern with named groups', 'Parse Pattern with unnamed and named groups'])
def test_parse_step_arguments_object(parse_pattern, string, expected_args, expected_kwargs):
parser = Parser(parse_pattern)
match = parser.search(string, evaluate_result=False)
args = matcher.ParseStepArguments(match)
(actual_args, actual_kwargs) = args.evaluate()
assert (actual_args == expected_args)
assert (actual_kwargs == expected_kwargs) |
class BloombergDataLicenseTypeConverter():
def infer_type(self, series: QFSeries, bbg_data_type: str) -> QFSeries:
field_types = {'String': self._string_conversion, 'Character': self._string_conversion, 'Long Character': self._string_conversion, 'Date or Time': self._date_conversion, 'Integer': self._float_conversion, 'Integer/Real': self._float_conversion, 'Date': self._date_conversion, 'Real': self._float_conversion, 'Month/Year': self._string_conversion, 'Price': self._float_conversion, 'Bulk Format': self._bulk_conversion}
_conversion_fun = field_types.get(bbg_data_type, id)
return _conversion_fun(series)
def _date_conversion(series: QFSeries) -> QFSeries:
return to_datetime(series, format='%Y%m%d', errors='coerce').replace({NaT: None})
def _string_conversion(series: QFSeries) -> QFSeries:
return series.apply((lambda s: (s.strip() if notna(s) else None))).replace({'N.A.': None})
def _float_conversion(series: QFSeries) -> QFSeries:
return series.replace({'N.A.': None}).astype(float64)
def _bulk_conversion(series: QFSeries) -> QFSeries:
def _split_bulk_list(_l: List):
_char = ';4;'
return (_l[(_l.find(_char) + len(_char)):].rstrip(';').split(_char) if (len(_l) > 0) else [])
return series.fillna('').apply(_split_bulk_list) |
class OSM_strategy(Policy):
def __init__(self, observation_space, action_space, config):
Policy.__init__(self, observation_space, action_space, config)
self.osm = OSM(config['alpha'], config['gamma'], config['blocks'])
self.osm.MDP_matrix_init()
(P, R) = self.osm.get_MDP_matrix()
solver = mdptoolbox.mdp.PolicyIteration(P, R, 0.99)
solver.run()
self.blocks = config['blocks']
self.optimal_policy = solver.policy
def OSM_act(self, s):
curr_s = list(s)
if (s[3] == constants.NORMAL):
curr_s[3] = 'normal'
elif (s[3] == constants.FORKING):
curr_s[3] = 'forking'
else:
curr_s[3] = 'catch up'
smaller_state = (curr_s[:2] + [curr_s[3]])
smaller_state = tuple(smaller_state)
if ((curr_s[0] >= self.blocks) or (curr_s[1] >= self.blocks)):
if (curr_s[0] > curr_s[1]):
return 1
else:
return 0
if (smaller_state in self.osm._state_dict):
return self.optimal_policy[self.osm._name_to_index(smaller_state)]
elif (curr_s[0] > curr_s[1]):
return 1
else:
return 0
def compute_actions(self, obs_batch, state_batches, prev_action_batch=None, prev_reward_batch=None, info_batch=None, episodes=None, **kwargs):
actions = []
for obs in obs_batch:
a = int(round(obs[0]))
h = int(round(obs[1]))
o = int(round(obs[2]))
f = int(round(obs[3]))
actions.append(self.OSM_act([a, h, o, f]))
return (actions, [], {})
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass |
def desktop_set_B3():
global REQUIRE_REBOOT
sp.call(shlex.split('systemctl set-default graphical.target'))
if os.path.isfile('/etc/systemd/system/getty.target.wants/.service'):
os.remove('/etc/systemd/system/getty.target.wants/.service')
os.symlink('/lib/systemd/system//etc/systemd/system/getty.target.wants/.service')
if os.path.isfile('/etc/systemd/system/.service.d/autologin.conf'):
os.remove('/etc/systemd/system/.service.d/autologin.conf')
sp.call(shlex.split('update_desktop_login "False" {user}'.format(user=user)))
REQUIRE_REBOOT = True |
class Regex(Token):
def __init__(self, pattern: Any, flags: Union[(re.RegexFlag, int)]=0, as_group_list: bool=False, as_match: bool=False, *, asGroupList: bool=False, asMatch: bool=False):
super().__init__()
asGroupList = (asGroupList or as_group_list)
asMatch = (asMatch or as_match)
if isinstance(pattern, str_type):
if (not pattern):
raise ValueError('null string passed to Regex; use Empty() instead')
self._re = None
self.reString = self.pattern = pattern
self.flags = flags
elif (hasattr(pattern, 'pattern') and hasattr(pattern, 'match')):
self._re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError('Regex may only be constructed with a string or a compiled RE object')
self.errmsg = ('Expected ' + self.name)
self.mayIndexError = False
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
_property
def re(self):
if self._re:
return self._re
try:
return re.compile(self.pattern, self.flags)
except re.error:
raise ValueError(f'invalid pattern ({self.pattern!r}) passed to Regex')
_property
def re_match(self):
return self.re.match
_property
def mayReturnEmpty(self):
return (self.re_match('') is not None)
def _generateDefaultName(self) -> str:
return 'Re:({})'.format(repr(self.pattern).replace('\\\\', '\\'))
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if (not result):
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
for (k, v) in d.items():
ret[k] = v
return (loc, ret)
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if (not result):
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return (loc, ret)
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if (not result):
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return (loc, ret)
def sub(self, repl: str) -> ParserElement:
if self.asGroupList:
raise TypeError('cannot use sub() with Regex(as_group_list=True)')
if (self.asMatch and callable(repl)):
raise TypeError('cannot use sub() with a callable with Regex(as_match=True)')
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.add_parse_action(pa) |
def test_cell_n2(L=5, mesh=([9] * 3)):
cell = pbcgto.Cell()
cell.unit = 'B'
cell.atom.extend([['O', ((L / 2.0), (L / 2.0), (L / 2.0))], ['H', (((L / 2.0) - 0.68944), ((L / 2.0) + 0.578509), (L / 2.0))], ['H', (((L / 2.0) + 0.68944), ((L / 2.0) - 0.578509), (L / 2.0))]])
cell.a = (L * np.identity(3))
cell.basis = 'sto-3g'
cell.pseudo = 'gth-pade'
cell.mesh = mesh
cell.output = '/dev/null'
cell.build()
return cell |
class JobTelecommute(JobLocationMenu, JobList):
template_name = 'jobs/job_telecommute_list.html'
def get_queryset(self):
return super().get_queryset().visible().select_related().filter(telecommuting=True)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['jobs_count'] = len(self.object_list)
context['jobs'] = self.object_list
return context |
def ensemble_models(model_paths: List[str], cxr_filepath: str, cxr_labels: List[str], cxr_pair_template: Tuple[str], cache_dir: str=None, save_name: str=None) -> Tuple[(List[np.ndarray], np.ndarray)]:
predictions = []
model_paths = sorted(model_paths)
for path in model_paths:
model_name = Path(path).stem
(model, loader) = make(model_path=path, cxr_filepath=cxr_filepath)
if (cache_dir is not None):
if (save_name is not None):
cache_path = (Path(cache_dir) / f'{save_name}_{model_name}.npy')
else:
cache_path = (Path(cache_dir) / f'{model_name}.npy')
if ((cache_dir is not None) and os.path.exists(cache_path)):
print('Loading cached prediction for {}'.format(model_name))
y_pred = np.load(cache_path)
else:
print('Inferring model {}'.format(path))
y_pred = run_softmax_eval(model, loader, cxr_labels, cxr_pair_template)
if (cache_dir is not None):
Path(cache_dir).mkdir(exist_ok=True, parents=True)
np.save(file=cache_path, arr=y_pred)
predictions.append(y_pred)
y_pred_avg = np.mean(predictions, axis=0)
return (predictions, y_pred_avg) |
def bot_factory(repo='foo/foo', user_token='foo', bot_token=None, bot_class=Bot, ignore_ssl=False, prs=list()):
bot = bot_class(repo=repo, user_token=user_token, bot_token=bot_token, ignore_ssl=ignore_ssl)
bot._fetched_prs = True
bot.req_bundle.pull_requests = prs
bot.provider = Mock()
bot.config.update_config({'close_prs': True, 'pin': True, 'branch': 'base_branch', 'search': True})
return bot |
(netloc='fakegitlab', path='/api/v4/projects/4/repository/tags$')
def project_tags_handler(_, request):
if (not (request.headers.get('Authorization') == 'Bearer foobar')):
return {'status_code': 401}
return {'status_code': 200, 'headers': {'Content-Type': 'application/json'}, 'content': json.dumps([{'name': 'sometag', 'commit': {'id': '60a8ff033665e1207714d6670fcd7b65304ec02f'}}, {'name': 'someothertag', 'commit': {'id': '60a8ff033665e1207714d6670fcd7b65304ec02f'}}])} |
.parametrize('username,password', users)
def test_update(db, client, username, password):
client.login(username=username, password=password)
instances = Catalog.objects.all()
for instance in instances:
catalog_sections = [{'section': section.section.id, 'order': section.order} for section in instance.catalog_sections.all()]
url = reverse(urlnames['detail'], args=[instance.pk])
data = {'uri_prefix': instance.uri_prefix, 'uri_path': instance.uri_path, 'comment': instance.comment, 'order': instance.order, 'title_en': instance.title_lang1, 'title_de': instance.title_lang2}
response = client.put(url, data, content_type='application/json')
assert (response.status_code == get_obj_perms_status_code(instance, username, 'update')), response.json()
instance.refresh_from_db()
assert (catalog_sections == [{'section': section.section.id, 'order': section.order} for section in instance.catalog_sections.all()]) |
class Chunker():
def __init__(self, grammar: nltk.RegexpParser):
self.grammar = grammar
def chunk_sentence(self, sentence: str):
pos_tagged_sentence = PosTagger(sentence).pos_tag()
return self.chunk_pos_tagged_sentence(pos_tagged_sentence)
def chunk_pos_tagged_sentence(self, pos_tagged_sentence):
chunked_tree = self.grammar.parse(pos_tagged_sentence)
chunk_dict = self.extract_rule_and_chunk(chunked_tree)
return chunk_dict
def extract_rule_and_chunk(self, chunked_tree: nltk.Tree) -> dict:
def recursively_get_pos_only(tree, collector_list=None, depth_limit=100):
if (collector_list is None):
collector_list = []
if (depth_limit <= 0):
return collector_list
for subtree in tree:
if isinstance(subtree, nltk.Tree):
recursively_get_pos_only(subtree, collector_list, (depth_limit - 1))
else:
collector_list.append(subtree)
return collector_list
def get_pos_tagged_and_append_to_chunk_dict(chunk_dict, subtrees):
pos_tagged = recursively_get_pos_only(subtrees)
chunk_dict[subtrees.label()].append(pos_tagged)
chunk_dict = nltk.defaultdict(list)
for subtrees in chunked_tree:
if isinstance(subtrees, nltk.Tree):
get_pos_tagged_and_append_to_chunk_dict(chunk_dict, subtrees)
for sub in subtrees:
if isinstance(sub, nltk.Tree):
get_pos_tagged_and_append_to_chunk_dict(chunk_dict, sub)
return chunk_dict
def get_chunk(pos_tagged_sentence, src_target_grammar_key: str) -> list:
compile_grammar = PatternGrammar().get_source_target_compiled_grammar(clause=src_target_grammar_key)
return Chunker.apply_grammar_on_pos_tagged_chunk(compile_grammar, pos_tagged_sentence)
def apply_grammar_on_pos_tagged_chunk(compile_grammar, pos_tagged_sentence):
chunk_dict = Chunker(compile_grammar).chunk_pos_tagged_sentence(pos_tagged_sentence)
return (list(chunk_dict.values()) if chunk_dict else []) |
def kernel_feature_creator(data, projection_matrix, is_query):
head_dim = tf.constant(data.shape[(- 1)], dtype=tf.dtypes.float32)
support_dim = tf.constant(projection_matrix.shape[0], dtype=tf.dtypes.float32)
data_normalizer = (1.0 / tf.math.sqrt(tf.math.sqrt(head_dim)))
ratio = (1.0 / tf.math.sqrt(support_dim))
data_mod_shape = tf.concat([tf.shape(data)[0:2], tf.shape(projection_matrix)], axis=0)
random_matrix = (tf.zeros(data_mod_shape) + projection_matrix)
normalised_data = (data_normalizer * data)
dot_product_equation = build_kernel_equation(len(data.shape))
data_hat = tf.einsum(dot_product_equation, normalised_data, random_matrix)
diag_data = tf.math.square(data)
diag_data = tf.math.reduce_sum(diag_data, axis=(- 1))
diag_data = (((diag_data / 2.0) * data_normalizer) * data_normalizer)
diag_data = tf.expand_dims(diag_data, axis=(- 1))
if is_query:
last_dims_t = (len(data_hat.shape) - 1)
func = partial(tf.math.reduce_max, axis=last_dims_t, keepdims=True)
else:
func = tf.math.reduce_max
out = (ratio * (tf.math.exp(((data_hat - diag_data) - func(data_hat))) + 0.0001))
return out |
def delete_links(cwd: Optional[Union[(Path, str)]]=None, verbose: Optional[bool]=None) -> List[Path]:
if (cwd is None):
cwd = Path.cwd()
elif isinstance(cwd, str):
cwd = Path(cwd)
delete = []
for path in cwd.iterdir():
if path.is_symlink():
delete.append(path)
if (delete and is_verbose(verbose)):
names = [path.name for path in delete]
log.ok(f"unlink: {' '.join(sorted(names))}")
deleted = []
for path in delete:
path.unlink()
deleted.append(path)
return deleted |
class CompoundLoss(_Loss):
def __init__(self, blocks=[1, 2, 3, 4], mse_weight=1, resnet_weight=0.01):
super(CompoundLoss, self).__init__()
self.mse_weight = mse_weight
self.resnet_weight = resnet_weight
self.blocks = blocks
self.model = ResNet50FeatureExtractor(pretrained=True)
if torch.cuda.is_available():
self.model = self.model.cuda()
self.model.eval()
self.criterion = nn.MSELoss()
def forward(self, input, target):
loss_value = 0
input_feats = self.model(torch.cat([input, input, input], dim=1))
target_feats = self.model(torch.cat([target, target, target], dim=1))
feats_num = len(self.blocks)
for idx in range(feats_num):
loss_value += self.criterion(input_feats[idx], target_feats[idx])
loss_value /= feats_num
loss = ((self.mse_weight * self.criterion(input, target)) + (self.resnet_weight * loss_value))
return loss |
class StopInternalConnectivity(InternalConnectivity):
def forward_propagate_the_masks(self, input_mask_list: List[List[int]], output_mask_list: List[List[int]]) -> bool:
mask_changed = False
return mask_changed
def backward_propagate_the_masks(self, output_mask_list: List[List[int]], input_mask_list: List[List[int]]) -> bool:
mask_changed = False
return mask_changed |
def add(*args, **kwargs):
if (len(args) > 1):
(val1, val2) = (args[0], args[1])
try:
val1 = literal_eval(val1.strip())
except Exception:
pass
try:
val2 = literal_eval(val2.strip())
except Exception:
pass
return (val1 + val2)
raise ValueError('$add requires two arguments.') |
def import_dotted_path(dotted_path: str) -> Callable:
(module_name, component_name) = dotted_path.rsplit('.', 1)
try:
module = import_module(module_name)
except ImportError as error:
raise RuntimeError(f'Failed to import {module_name!r} while loading {component_name!r}') from error
return getattr(module, component_name) |
def all_gatherv(input, return_boundaries=False):
num_elements = torch.tensor(input.size(0), device=input.device)
num_elements_per_process = all_gather(num_elements, cat=False)
max_elements = num_elements_per_process.max()
difference = (max_elements - input.size(0))
if (difference > 0):
input = torch.cat([input, torch.zeros(difference, *input.size()[1:], device=input.device, dtype=input.dtype)], 0)
inputs = all_gather(input, cat=False)
inputs = torch.cat([row[:num_ele] for (row, num_ele) in zip(inputs, num_elements_per_process)], 0)
if return_boundaries:
boundaries = torch.cumsum(num_elements_per_process, dim=0)
boundaries = torch.cat([torch.zeros(1, device=input.device, dtype=torch.int), boundaries], 0)
return (inputs, boundaries.long())
else:
return inputs |
class Scrim(BaseDbModel):
class Meta():
table = 'sm.scrims'
id = fields.BigIntField(pk=True, index=True)
guild_id = fields.BigIntField()
name = fields.TextField(default='Quotient-Scrims')
registration_channel_id = fields.BigIntField(index=True)
slotlist_channel_id = fields.BigIntField()
slotlist_message_id = fields.BigIntField(null=True)
role_id = fields.BigIntField(null=True)
required_mentions = fields.IntField(default=4)
start_from = fields.IntField(default=1)
available_slots = ArrayField(fields.IntField(), default=list)
total_slots = fields.IntField()
host_id = fields.BigIntField()
open_time = fields.DatetimeField()
opened_at = fields.DatetimeField(null=True)
closed_at = fields.DatetimeField(null=True)
autoclean = ArrayField(fields.CharEnumField(AutocleanType), default=(lambda : list(AutocleanType)))
autoclean_done = fields.BooleanField(default=False)
autoclean_time = fields.DatetimeField(null=True)
autoslotlist = fields.BooleanField(default=True)
ping_role_id = fields.BigIntField(null=True)
multiregister = fields.BooleanField(default=False)
stoggle = fields.BooleanField(default=True)
open_role_id = fields.BigIntField(null=True)
autodelete_rejects = fields.BooleanField(default=False)
autodelete_extras = fields.BooleanField(default=True)
teamname_compulsion = fields.BooleanField(default=False)
time_elapsed = fields.CharField(null=True, max_length=100)
show_time_elapsed = fields.BooleanField(default=True)
open_days = ArrayField(fields.CharEnumField(Day), default=(lambda : list(Day)))
slotlist_format = fields.JSONField(default=dict)
no_duplicate_name = fields.BooleanField(default=False)
open_message = fields.JSONField(default=dict)
close_message = fields.JSONField(default=dict)
banlog_channel_id = fields.BigIntField(null=True)
match_time = fields.DatetimeField(null=True)
emojis = fields.JSONField(default=dict)
cdn = fields.JSONField(default={'status': False, 'countdown': 3, 'msg': {}})
required_lines = fields.SmallIntField(default=0)
allow_duplicate_tags = fields.BooleanField(default=True)
assigned_slots: fields.ManyToManyRelation['AssignedSlot'] = fields.ManyToManyField('models.AssignedSlot')
reserved_slots: fields.ManyToManyRelation['ReservedSlot'] = fields.ManyToManyField('models.ReservedSlot')
banned_teams: fields.ManyToManyRelation['BannedTeam'] = fields.ManyToManyField('models.BannedTeam')
slot_reminders: fields.ManyToManyRelation['ScrimsSlotReminder'] = fields.ManyToManyField('models.ScrimsSlotReminder')
def __str__(self):
return f"{getattr(self.registration_channel, 'mention', 'deleted-channel')} (ID: {self.id})"
def is_ignorable(member: discord.Member) -> bool:
return ('scrims-mod' in (role.name.lower() for role in member.roles))
def guild(self) -> Optional[discord.Guild]:
return self.bot.get_guild(self.guild_id)
def role(self):
if (self.guild is not None):
return self.guild.get_role(self.role_id)
def logschan(self):
if (self.guild is not None):
return discord.utils.get(self.guild.text_channels, name='quotient-scrims-logs')
def modrole(self):
if (self.guild is not None):
return discord.utils.get(self.guild.roles, name='scrims-mod')
def registration_channel(self):
return self.bot.get_channel(self.registration_channel_id)
def banlog_channel(self):
return self.bot.get_channel(self.banlog_channel_id)
def slotlist_channel(self):
return self.bot.get_channel(self.slotlist_channel_id)
def host(self):
if (self.guild is not None):
return self.guild.get_member(self.host_id)
return self.bot.get_user(self.host_id)
def check_emoji(self):
return self.emojis.get('tick', '')
def cross_emoji(self):
return self.emojis.get('cross', '')
def available_to_reserve(self):
return range(self.start_from, (self.total_slots + self.start_from))
def opened(self):
if (self.opened_at is None):
return False
if (self.closed_at is not None):
return (self.closed_at < self.opened_at)
return True
def closed(self):
return (not self.opened)
def ping_role(self):
if (self.guild is not None):
return self.guild.get_role(self.ping_role_id)
def open_role(self):
if (self.guild is not None):
if (self.open_role_id is not None):
return self.guild.get_role(self.open_role_id)
return self.guild.default_role
def toggle(self):
return self.stoggle
def teams_registered(self):
return self.assigned_slots.order_by('num')
async def reserved_user_ids(self):
return (i.user_id for i in (await self.reserved_slots.all()))
async def banned_user_ids(self):
return (i.user_id for i in (await self.banned_teams.all()))
async def cleaned_slots(self) -> List['AssignedSlot']:
slots = (await self.assigned_slots.order_by('num'))
_list = []
for _ in {slot.num for slot in slots}:
_list.append(next((i for i in slots if (i.num == _))))
return _list
async def add_tick(self, msg: discord.Message):
with suppress(discord.HTTPException):
(await msg.add_reaction(self.check_emoji))
(await msg.author.add_roles(self.role))
def default_slotlist_format():
return discord.Embed(color=65459, title=f'<<name>> Slotlist', description='```\n<<slots>>\n```').set_footer(text=f'Registration took: <<time_taken>>')
async def create_slotlist(self):
_slots = (await self.cleaned_slots())
desc = '\n'.join((f'Slot {slot.num:02} -> {slot.team_name}' for slot in _slots))
if (len(self.slotlist_format) <= 1):
text = str(self.default_slotlist_format().to_dict())
else:
text = str(self.slotlist_format)
changes = [('<<name>>', self.name), ('<<time_taken>>', (self.time_elapsed or 'N/A')), ('<<open_time>>', discord_timestamp(self.open_time))]
for _ in changes:
text = text.replace(*_)
embed = discord.Embed.from_dict(leval(text))
if (embed.color == None):
embed.color = 3092790
embed.description = embed.description.replace('<<slots>>', desc)
return (embed, self.slotlist_channel)
async def refresh_slotlist_message(self, msg: discord.Message=None):
(embed, channel) = (await self.create_slotlist())
with suppress(discord.HTTPException, AttributeError):
if (not msg):
msg = (await self.bot.get_or_fetch_message(channel, self.slotlist_message_id))
(await msg.edit(embed=embed))
async def send_slotlist(self, channel: discord.TextChannel=None) -> discord.Message:
from cogs.esports.views.smslotlist.button import SlotlistEditButton
channel = (channel or self.slotlist_channel)
_v = SlotlistEditButton(self.bot, self)
(embed, schannel) = (await self.create_slotlist())
_v.message = (await channel.send(embed=embed, view=_v))
if (channel == schannel):
(await self.make_changes(slotlist_message_id=_v.message.id))
return _v.message
async def dispatch_reminders(self, channel: discord.TextChannel, link: str):
reminders = (await self.slot_reminders.all().order_by('created_at'))
if (not reminders):
return
_e = discord.Embed(color=65459, title=f'Slot Available to Claim - {channel.guild.name}', url=link)
_e.description = f'''A slot of {self} is available to claim in {channel.mention}!
Claim it before anyone else do.'''
async for user in self.bot.resolve_member_ids(self.guild, [i.user_id for i in reminders]):
with suppress(discord.HTTPException):
(await user.send(embed=_e))
(await ScrimsSlotReminder.filter(pk__in=(i.pk for i in reminders)).delete())
async def ensure_match_timer(self):
from models import Timer
from .slotm import ScrimsSlotManager
if (not self.match_time):
self.match_time = self.bot.current_time.replace(hour=0, minute=0, microsecond=0, second=0)
_time = self.match_time
while (_time < self.bot.current_time):
_time = (_time + timedelta(hours=24))
if (self.match_time != _time):
(await Scrim.filter(pk=self.pk).update(match_time=_time))
check = (await Timer.filter(event='scrim_match', expires=_time, extra={'args': [], 'kwargs': {'scrim_id': self.pk}}).exists())
if (not check):
(await self.bot.reminders.create_timer(_time, 'scrim_match', scrim_id=self.pk))
(await ScrimsSlotManager.refresh_guild_message(self.guild_id, self.pk))
async def make_changes(self, **kwargs):
(await Scrim.filter(pk=self.pk).update(**kwargs))
return (await self.refresh_from_db())
async def get_text_slotlist(self):
_text = f'''{self} Slot details:
'''
_slots = (await self.cleaned_slots())
for _ in _slots:
_text += f'''{_.num}. {_.team_name} <{_.user_id}>
'''
return _text
async def ban_slot(self, slot: 'AssignedSlot', *, reason, mod: discord.Member, ban_type: str):
(to_ban, scrims) = ([slot.user_id], [self])
if (ban_type == '2'):
to_ban = [_ for _ in slot.members]
elif (ban_type == '3'):
scrims = (await Scrim.filter(guild_id=self.guild_id).order_by('open_time'))
elif (ban_type == '4'):
to_ban = [_ for _ in slot.members]
scrims = (await Scrim.filter(guild_id=self.guild_id).order_by('open_time'))
for _ in to_ban:
for scrim in scrims:
if (_ in (await scrim.banned_user_ids())):
continue
b = (await BannedTeam.create(user_id=_, expires=reason.dt, reason=reason.arg))
(await scrim.banned_teams.add(b))
if (banlog := (await BanLog.get_or_none(guild_id=self.guild_id))):
(await banlog.log_ban(_, mod, scrims, reason.arg, reason.dt))
if reason.dt:
(await self.bot.reminders.create_timer(reason.dt, 'scrim_ban', scrims=[scrim.id for scrim in scrims], user_id=_, mod=mod.id, reason=reason.arg))
return f'Banned {utils.plural(to_ban):player|players} from {utils.plural(scrims):scrim|scrims}.'
async def create_slotlist_img(self):
slots = (await self.teams_registered)
def wrapper():
font = ImageFont.truetype(str(((((Path.cwd() / 'src') / 'data') / 'font') / 'Ubuntu-Regular.ttf')), 16)
rects = []
for slot in slots:
image = Image.new('RGBA', (290, 30), '#2e2e2e')
draw = ImageDraw.Draw(image)
draw.text((10, 5), f'Slot {slot.num:02} | {slot.team_name}', font=font, fill='white')
rects.append(image)
images = []
for group in utils.split_list(rects, 10):
size = (290, (len(group) * 40))
image = Image.new('RGBA', size)
x = 0
y = 0
for rect in group:
image.paste(rect, (x, y))
y += (rect.size[1] + 10)
img_bytes = io.BytesIO()
image.save(img_bytes, 'PNG')
img_bytes.seek(0)
images.append(discord.File(img_bytes, 'slotlist.png'))
return images
return (await asyncio.get_event_loop().run_in_executor(None, wrapper))
async def reg_open_msg(self):
reserved_count = (await self.reserved_slots.all().count())
if (len(self.open_message) <= 1):
return discord.Embed(color=self.bot.color, title='Registration is now open!', description=f''' **`{self.required_mentions}`** mentions required.
Total slots: **`{self.total_slots}`** [`{reserved_count}` slots reserved]''')
changes = [('<<mentions>>', str(self.required_mentions)), ('<<slots>>', str(self.total_slots)), ('<<reserved>>', str(reserved_count)), ('<<slotlist>>', getattr(self.slotlist_channel, 'mention', 'Not Found')), ('<<multireg>>', ('Enabled' if self.multiregister else 'Not Enabled')), ('<<teamname>>', ('Yes' if self.teamname_compulsion else 'No')), ('<<mention_banned>>', ', '.join(map((lambda x: getattr(x, 'mention', 'Left')), map(self.guild.get_member, (await self.banned_user_ids()))))), ('<<mention_reserved>>', ', '.join(map((lambda x: getattr(x, 'mention', 'Left')), map(self.guild.get_member, (await self.reserved_user_ids())))))]
text = str(self.open_message)
for _ in changes:
text = text.replace(*_)
return discord.Embed.from_dict(leval(text))
def reg_close_msg(self):
if (len(self.close_message) <= 1):
return discord.Embed(color=self.bot.config.COLOR, description='**Registration is now Closed!**')
changes = [('<<slots>>', str(self.total_slots)), ('<<filled>>', str((self.total_slots - len(self.available_slots)))), ('<<time_taken>>', (self.time_elapsed or 'N/A')), ('<<open_time>>', discord_timestamp(self.open_time))]
text = str(self.close_message)
for _ in changes:
text = text.replace(*_)
return discord.Embed.from_dict(leval(text))
async def setup_logs(self):
_reason = 'Created for scrims management.'
guild = self.guild
if (not (scrims_mod := self.modrole)):
scrims_mod = (await guild.create_role(name='scrims-mod', color=self.bot.color, reason=_reason))
overwrite = self.registration_channel.overwrites_for(guild.default_role)
overwrite.update(read_messages=True, send_messages=True, read_message_history=True)
(await self.registration_channel.set_permissions(scrims_mod, overwrite=overwrite))
if ((scrims_log_channel := self.logschan) is None):
overwrites = {guild.default_role: discord.PermissionOverwrite(read_messages=False), guild.me: discord.PermissionOverwrite(read_messages=True), scrims_mod: discord.PermissionOverwrite(read_messages=True)}
scrims_log_channel = (await guild.create_text_channel(name='quotient-scrims-logs', overwrites=overwrites, reason=_reason, topic='**DO NOT RENAME THIS CHANNEL**'))
note = (await scrims_log_channel.send(embed=discord.Embed(description=f'''If events related to scrims i.e opening registrations or adding roles, etc are triggered, then they will be logged in this channel. Also I have created {scrims_mod.mention}, you can give that role to your scrims-moderators. User with {scrims_mod.mention} can also send messages in registration channels and they won't be considered as scrims-registration.
`Note`: **Do not rename this channel.**''', color=65459)))
(await note.pin())
async def full_delete(self):
from .slotm import ScrimsSlotManager
_id = self.pk
self.bot.cache.scrim_channels.discard(self.registration_channel_id)
slotm = (await ScrimsSlotManager.filter(guild_id=self.guild_id, scrim_ids__contains=self.pk))
(await ScrimsSlotManager.filter(pk__in=[_.pk for _ in slotm]).update(scrim_ids=ArrayRemove('scrim_ids', _id)))
_d = (await self.assigned_slots.all())
(await AssignedSlot.filter(pk__in=[_.pk for _ in _d]).delete())
_r = (await self.slot_reminders.all())
(await ScrimsSlotReminder.filter(pk__in=[_.pk for _ in _r]).delete())
_re = (await self.reserved_slots.all())
(await ReservedSlot.filter(pk__in=[_.pk for _ in _re]).delete())
(await self.delete())
async def confirm_all_scrims(self, ctx: Context, **kwargs):
if (not ((await Scrim.scrim_count(ctx.guild.id)) > 1)):
return
prompt = (await ctx.prompt('Do you want to apply these changes to all scrims in this server?'))
if (not prompt):
return (await ctx.simple('Alright, this scrim only.', 4))
(await Scrim.filter(guild_id=ctx.guild.id).update(**kwargs))
(await ctx.simple('This change was applied to all your scrims.', 4))
async def close_registration(self):
from cogs.esports.helpers.utils import toggle_channel, wait_and_purge
from .slotm import ScrimsSlotManager
closed_at = self.bot.current_time
registration_channel = self.registration_channel
open_role = self.open_role
self.time_elapsed = humanize.precisedelta((closed_at - self.opened_at))
(await self.make_changes(opened_at=None, time_elapsed=self.time_elapsed, closed_at=closed_at))
channel_update = (await toggle_channel(registration_channel, open_role, False))
_e = self.reg_close_msg()
(await registration_channel.send(embed=_e))
self.bot.dispatch('scrim_log', EsportsLog.closed, self, permission_updated=channel_update)
registered = (await self.teams_registered)
if (self.autoslotlist and registered):
(await self.send_slotlist())
if self.autodelete_extras:
msg_ids = (i.message_id for i in registered)
check = (lambda x: all(((not x.pinned), (not x.reactions), (not x.embeds), (not (x.author == self.bot.user)), (not (x.id in msg_ids)))))
self.bot.loop.create_task(wait_and_purge(registration_channel, check=check, wait_for=60))
slotm = (await ScrimsSlotManager.get_or_none(guild_id=self.guild_id, scrim_ids__contains=self.id))
if slotm:
(await slotm.refresh_public_message())
async def __add_role_to_reserved_users(self, member_ids: set[int]):
role = discord.Object(id=self.role_id)
async for member in self.bot.resolve_member_ids(self.guild, member_ids):
try:
if (not member._roles.has(role.id)):
(await member.add_roles(role, reason=f'Reserved Slot [{self.pk}]'))
(await asyncio.sleep(0.2))
except discord.HTTPException:
continue
async def start_registration(self):
from cogs.esports.helpers.utils import available_to_reserve, scrim_work_role, toggle_channel
oldslots = (await self.assigned_slots)
(await AssignedSlot.filter(id__in=(slot.id for slot in oldslots)).delete())
(await self.assigned_slots.clear())
(await self.bot.db.execute('\n UPDATE public."sm.scrims" SET available_slots = $1 WHERE id = $2\n ', (await available_to_reserve(self)), self.id))
reserved_slots = (await self.reserved_slots.all().order_by('num'))
reserved_user_ids = {slot.user_id for slot in reserved_slots if (slot.user_id is not None)}
for slot in reserved_slots:
assinged_slot = (await AssignedSlot.create(num=slot.num, user_id=slot.user_id, team_name=slot.team_name, jump_url=None))
(await self.assigned_slots.add(assinged_slot))
self.bot.loop.create_task(self.__add_role_to_reserved_users(reserved_user_ids))
(await Scrim.filter(pk=self.id).update(opened_at=self.bot.current_time, closed_at=None, slotlist_message_id=None))
self.bot.loop.create_task(self.ensure_match_timer())
(await asyncio.sleep(0.2))
registration_channel = self.registration_channel
open_role = self.open_role
_e = (await self.reg_open_msg())
(await registration_channel.send(content=scrim_work_role(self, EsportsRole.ping), embed=_e, allowed_mentions=discord.AllowedMentions(roles=True, everyone=True)))
self.bot.cache.scrim_channels.add(registration_channel.id)
(await toggle_channel(registration_channel, open_role, True))
self.bot.dispatch('scrim_log', EsportsLog.open, self)
async def show_selector(*args, **kwargs):
from cogs.esports.views.scrims.selector import prompt_selector
return (await prompt_selector(*args, **kwargs))
async def scrim_posi(self):
from cogs.esports.views.scrims.selector import scrim_position
return (await scrim_position(self.pk, self.guild_id))
(ttl=(60 * 2))
async def scrim_count(guild_id: int):
return (await Scrim.filter(guild_id=guild_id).count())
async def check_fake_tags(self, message: discord.Message):
query = '\n SELECT *\n FROM PUBLIC."sm.scrims_sm.assigned_slots" AS ASSIGNED_SLOT\n INNER JOIN PUBLIC."sm.assigned_slots" AS SLOTS ON SLOTS.ID = ASSIGNED_SLOT.ASSIGNEDSLOT_ID\n WHERE ASSIGNED_SLOT."sm.scrims_id" = $1\n AND $2 && SLOTS.MEMBERS;\n\n '
return (await self.bot.db.fetch(query, self.id, [i.id for i in message.mentions])) |
class PosAlign(nn.Module):
def __init__(self):
super(PosAlign, self).__init__()
self.soft_plus = nn.Softplus()
def forward(self, feature, target):
feature = F.normalize(feature, p=2, dim=1)
feature = torch.matmul(feature, feature.transpose(1, 0))
label_matrix = (target.unsqueeze(1) == target.unsqueeze(0))
positive_pair = torch.masked_select(feature, label_matrix)
loss = (1.0 * self.soft_plus(torch.logsumexp(positive_pair, 0)))
return loss |
def createFile(finalSize=):
chunk = np.random.normal(size=1000000).astype(np.float32)
f = h5py.File('test.hdf5', 'w')
f.create_dataset('data', data=chunk, chunks=True, maxshape=(None,))
data = f['data']
nChunks = (finalSize // (chunk.size * chunk.itemsize))
with pg.ProgressDialog('Generating test.hdf5...', 0, nChunks) as dlg:
for i in range(nChunks):
newshape = [(data.shape[0] + chunk.shape[0])]
data.resize(newshape)
data[(- chunk.shape[0]):] = chunk
dlg += 1
if dlg.wasCanceled():
f.close()
os.remove('test.hdf5')
sys.exit()
dlg += 1
f.close() |
def assert_dropped(iteration: TransitionResult, old_state: Any, reason: Optional[str]=None):
msg = f"State change expected to be dropped ({(reason or 'reason unknown')})."
assert ((iteration.new_state is None) or (iteration.new_state == old_state)), msg
assert (not iteration.events), msg |
def get_vertices(v, degree_v, degrees, a_vertices):
a_vertices_selected = (2 * math.log(a_vertices, 2))
vertices = deque()
try:
c_v = 0
for v2 in degrees[degree_v]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if ('before' not in degrees[degree_v]):
degree_b = (- 1)
else:
degree_b = degrees[degree_v]['before']
if ('after' not in degrees[degree_v]):
degree_a = (- 1)
else:
degree_a = degrees[degree_v]['after']
if ((degree_b == (- 1)) and (degree_a == (- 1))):
raise StopIteration
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
while True:
for v2 in degrees[degree_now]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if (degree_now == degree_b):
if ('before' not in degrees[degree_b]):
degree_b = (- 1)
else:
degree_b = degrees[degree_b]['before']
elif ('after' not in degrees[degree_a]):
degree_a = (- 1)
else:
degree_a = degrees[degree_a]['after']
if ((degree_b == (- 1)) and (degree_a == (- 1))):
raise StopIteration
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
except StopIteration:
return list(vertices)
return list(vertices) |
class MonsterBio(commands.Cog):
def generate_name(self, seeded_random: random.Random) -> str:
n_candidate_strings = seeded_random.randint(2, len(TEXT_OPTIONS['monster_type']))
return ''.join((seeded_random.choice(TEXT_OPTIONS['monster_type'][i]) for i in range(n_candidate_strings)))
(brief='Sends your monster bio!')
async def monsterbio(self, ctx: commands.Context) -> None:
seeded_random = random.Random(ctx.author.id)
name = self.generate_name(seeded_random)
species = self.generate_name(seeded_random)
biography_text = seeded_random.choice(TEXT_OPTIONS['biography_text'])
words = {'monster_name': name, 'monster_species': species}
for (key, value) in biography_text.items():
if (key == 'text'):
continue
options = seeded_random.sample(TEXT_OPTIONS[key], value)
words[key] = ' '.join(options)
embed = discord.Embed(title=f"{name}'s Biography", color=seeded_random.choice([Colours.orange, Colours.purple]), description=biography_text['text'].format_map(words))
(await ctx.send(embed=embed)) |
def evaluate(result_sha, mail, num_hypo, eval_3diou, eval_2diou, thres):
if eval_3diou:
mail.msg('Processing Result for KITTI 3D MOT Benchmark')
elif eval_2diou:
mail.msg('Processing Result for KITTI 2D MOT Benchmark')
else:
assert False, 'error'
classes = []
for c in ('cyclist', 'pedestrian', 'car'):
e = trackingEvaluation(t_sha=result_sha, mail=mail, cls=c, eval_3diou=eval_3diou, eval_2diou=eval_2diou, num_hypo=num_hypo, thres=thres)
try:
if (not e.loadTracker()):
continue
mail.msg('Loading Results - Success')
mail.msg(('Evaluate Object Class: %s' % c.upper()))
classes.append(c)
except:
mail.msg('Feel free to contact us (), if you receive this error message:')
mail.msg(' Caught exception while loading result data.')
break
if (not e.loadGroundtruth()):
raise ValueError('Ground truth not found.')
mail.msg('Loading Groundtruth - Success')
if (len(e.groundtruth) != len(e.tracker)):
mail.msg(('The uploaded data does not provide results for every sequence: %d vs %d' % (len(e.groundtruth), len(e.tracker))))
return False
mail.msg(('Loaded %d Sequences.' % len(e.groundtruth)))
mail.msg('Start Evaluation...')
if eval_3diou:
suffix = 'eval3D'
else:
suffix = 'eval2D'
filename = os.path.join(e.t_path, ('../summary_%s_average_%s.txt' % (c, suffix)))
dump = open(filename, 'w+')
stat_meter = stat(t_sha=result_sha, cls=c, suffix=suffix, dump=dump)
e.compute3rdPartyMetrics()
(best_mota, best_threshold) = (0, (- 10000))
(threshold_list, recall_list) = e.getThresholds(e.scores, e.num_gt)
for (threshold_tmp, recall_tmp) in zip(threshold_list, recall_list):
data_tmp = dict()
e.reset()
e.compute3rdPartyMetrics(threshold_tmp, recall_tmp)
(data_tmp['mota'], data_tmp['motp'], data_tmp['moda'], data_tmp['modp'], data_tmp['precision'], data_tmp['F1'], data_tmp['fp'], data_tmp['fn'], data_tmp['recall'], data_tmp['sMOTA']) = (e.MOTA, e.MOTP, e.MODA, e.MODP, e.precision, e.F1, e.fp, e.fn, e.recall, e.sMOTA)
stat_meter.update(data_tmp)
mota_tmp = e.MOTA
if (mota_tmp > best_mota):
best_threshold = threshold_tmp
best_mota = mota_tmp
e.saveToStats(dump, threshold_tmp, recall_tmp)
e.reset()
e.compute3rdPartyMetrics(best_threshold)
e.saveToStats(dump)
stat_meter.output()
summary = stat_meter.print_summary()
stat_meter.plot()
mail.msg(summary)
dump.close()
if (len(classes) == 0):
mail.msg('The uploaded results could not be evaluated. Check for format errors.')
return False
mail.msg('Thank you for participating in our benchmark!')
return True |
class ReIDModel():
def __init__(self):
self.model = resnet50(num_classes=751, loss='softmax', pretrained=True, use_gpu=True)
load_pretrained_weights(self.model, config.reid_resnet50_market_weight_path)
self.device = 'cuda'
self.model.to(self.device)
self.model.eval()
self.feature_list = ['layer1', 'layer2', 'layer3', 'layer4']
self.size = (256, 128)
self.normalize_mean = torch.Tensor([0.485, 0.456, 0.406])
self.normalize_mean = self.normalize_mean.view(1, 3, 1, 1).to(self.device)
self.normalize_std = torch.Tensor([0.229, 0.224, 0.225])
self.normalize_std = self.normalize_std.view(1, 3, 1, 1).to(self.device)
_grad()
def run_reid_model(self, x):
x = self.preprocess(x)
feature_dict = {}
for (module_name, module) in self.model.named_children():
x = module(x)
if (module_name == 'global_avgpool'):
x = torch.flatten(x, 1)
if (module_name in self.feature_list):
feature_dict[module_name] = torch.flatten(x, 1)
if (module_name == self.feature_list[(- 1)]):
break
return feature_dict
def preprocess(self, data):
data_unnorm = ((data / 2.0) + 0.5)
data_rgb_unnorm = data_unnorm
data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')
data_rgb = ((data_rgb_unnorm - self.normalize_mean) / self.normalize_std)
return data_rgb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.