code stringlengths 281 23.7M |
|---|
def test_read_classifiers_cached(monkeypatch, tmp_path):
def mock_get_cache_dir():
tmp_file = (tmp_path / 'classifiers.lst')
with tmp_file.open('w') as fh:
fh.write('A\nB\nC')
return tmp_path
monkeypatch.setattr(fv, 'get_cache_dir', mock_get_cache_dir)
classifiers = fv._read_classifiers_cached()
assert (classifiers == {'A', 'B', 'C'}) |
def test_suppress_error_removing_lock(tmp_path: Path) -> None:
path = (tmp_path / 'dir')
path.mkdir()
lock = get_lock_path(path)
lock.touch()
mtime = lock.stat().st_mtime
with unittest.mock.patch.object(Path, 'unlink', side_effect=OSError) as m:
assert (not ensure_deletable(path, consider_lock_dead_if_created_before=(mtime + 30)))
assert (m.call_count == 1)
assert lock.is_file()
with unittest.mock.patch.object(Path, 'is_file', side_effect=OSError) as m:
assert (not ensure_deletable(path, consider_lock_dead_if_created_before=(mtime + 30)))
assert (m.call_count == 1)
assert lock.is_file()
assert ensure_deletable(path, consider_lock_dead_if_created_before=(mtime + 30))
assert (not lock.is_file()) |
def split_header_words(header_values):
assert (type(header_values) not in STRING_TYPES)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = token_re.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = quoted_value_re.search(text)
if m:
text = unmatched(m)
value = m.group(1)
value = escape_re.sub('\\1', value)
else:
m = value_re.search(text)
if m:
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
value = None
pairs.append((name, value))
elif text.lstrip().startswith(','):
text = text.lstrip()[1:]
if pairs:
result.append(pairs)
pairs = []
else:
(non_junk, nr_junk_chars) = re.subn('^[=\\s;]*', '', text)
assert (nr_junk_chars > 0), ("split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs))
text = non_junk
if pairs:
result.append(pairs)
return result |
class EventLoop(QEventLoop):
def __init__(self, parent: QObject=None) -> None:
super().__init__(parent)
self._executing = False
def exec(self, flags: _ProcessEventFlagType=QEventLoop.ProcessEventsFlag.AllEvents) -> int:
if self._executing:
raise AssertionError('Eventloop is already running!')
self._executing = True
if machinery.IS_QT5:
flags = cast(QEventLoop.ProcessEventsFlags, flags)
status = super().exec(flags)
self._executing = False
return status |
class AZPReactiveTabu(AZPTabu):
def __init__(self, max_iterations, k1, k2, random_state=None):
self.tabu = deque([], maxlen=1)
super().__init__(random_state=random_state)
self.avg_it_until_rep = 1
self.rep_counter = 1
if (max_iterations <= 0):
raise ValueError('The `max_iterations` argument must be > 0.')
self.maxit = max_iterations
self.visited = []
self.k1 = k1
self.k2 = k2
def _azp_connected_component(self, adj, initial_labels, attr):
self.reset_tabu(1)
distinct_regions = list(np.unique(initial_labels))
if (len(distinct_regions) == 1):
return initial_labels
labels = initial_labels
it_since_tabu_len_changed = 0
obj_val_start = float('inf')
for _it in range(self.maxit):
obj_val_end = self.objective_func(labels, attr)
if (not (obj_val_end < obj_val_start)):
break
obj_val_start = obj_val_end
it_since_tabu_len_changed += 1
possible_moves = []
for area in range(labels.shape[0]):
old_region = labels[area]
sub_adj = sub_adj_matrix(adj, np.where((labels == old_region))[0], wo_nodes=area)
if (is_connected(sub_adj) and (count(labels, old_region) > 1)):
for neigh in neighbors(adj, area):
new_region = labels[neigh]
if (new_region != old_region):
possible_move = Move(area, old_region, new_region)
if (possible_move not in self.tabu):
possible_moves.append(possible_move)
best_move = None
best_move_index = None
best_objval_diff = float('inf')
for (i, move) in enumerate(possible_moves):
obj_val_diff = self.objective_func.update(move.area, move.new_region, labels, attr)
if (obj_val_diff < best_objval_diff):
(best_move_index, best_move) = (i, move)
best_objval_diff = obj_val_diff
if self.allow_move_strategy(best_move.area, best_move.new_region, labels):
self._make_move(best_move.area, best_move.new_region, labels, adj)
label_tup = tuple(labels)
if (label_tup in self.visited):
times_visited = self.visited.count(label_tup)
cycle = list(reversed(self.visited))
cycle = cycle[:(cycle.index(label_tup) + 1)]
cycle = list(reversed(cycle))
it_until_repetition = len(cycle)
if (times_visited > self.k1):
times_cycle_found = 0
if (self.k2 > 0):
for i in range((len(self.visited) - len(cycle))):
if (self.visited[i:(i + len(cycle))] == cycle):
times_cycle_found += 1
if (times_cycle_found >= self.k2):
break
if (times_cycle_found >= self.k2):
last_step = (11, tuple(labels))
self.visited = []
p = math.floor((1 + (self.avg_it_until_rep / 2)))
possible_moves.pop(best_move_index)
for _ in range(p):
move = possible_moves.pop(random.randrange(len(possible_moves)))
if self.allow_move_strategy(move.area, move.new_region, labels):
self._make_move(move.area, move.new_region, labels, adj)
continue
self.rep_counter += 1
avg_it = self.avg_it_until_rep
self.avg_it_until_rep = ((1 / self.rep_counter) * (((self.rep_counter - 1) * avg_it) + it_until_repetition))
self.tabu = deque(self.tabu, (1.1 * self.tabu.maxlen))
if (it_since_tabu_len_changed > self.avg_it_until_rep):
new_tabu_len = max([(0.9 * self.tabu.maxlen), 1])
new_tabu_len = math.floor(new_tabu_len)
self.tabu = deque(self.tabu, new_tabu_len)
it_since_tabu_len_changed = 0
self.visited.append(tuple(labels))
last_step = 10
if (last_step == 10):
try:
return np.array(self.visited[(- 2)])
except IndexError:
return np.array(self.visited[(- 1)])
return np.array(last_step[1]) |
def get_all_datasets(args: argparse.Namespace, tokenizer: object) -> List[Generator[(Dict, None, None)]]:
train_gen_list = []
if (args.mode in ['train']):
for train_data_path in args.train_data.split(','):
train_gen_list.append(get_data_gen(train_data_path, 'train', args, tokenizer))
return train_gen_list |
def test_jump_control_of_flow_instruction_raises():
try:
raise Jump(['one', 'two'], 'sg', 'fg', 'og')
except Jump as err_info:
assert isinstance(err_info, ControlOfFlowInstruction)
assert (err_info.groups == ['one', 'two'])
assert (err_info.success_group == 'sg')
assert (err_info.failure_group == 'fg')
assert (err_info.original_config == 'og') |
class SquareBoxCoderTest(tf.test.TestCase):
def test_correct_relative_codes_with_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = None
expected_rel_codes = [[(- 0.790569), (- 0.263523), (- 0.293893)], [(- 0.068041), (- 0.272166), (- 0.89588)]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_non_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = [2, 3, 4]
expected_rel_codes = [[(- 1.581139), (- 0.790569), (- 1.175573)], [(- 0.136083), (- 0.816497), (- 3.583519)]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_small_width(self):
boxes = [[10.0, 10.0, 10.0000001, 20.0]]
anchors = [[15.0, 12.0, 30.0, 18.0]]
scale_factors = None
expected_rel_codes = [[(- 1.317616), 0.0, (- 20.670586)]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_boxes_with_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[(- 0.5), (- 0.416666), (- 0.405465)], [(- 0.083333), (- 0.222222), (- 0.693147)]]
scale_factors = None
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], [0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
def test_correct_boxes_with_non_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[(- 1.0), (- 1.25), (- 1.62186)], [(- 0.166667), (- 0.666667), (- 2.772588)]]
scale_factors = [2, 3, 4]
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432], [0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes) |
class Effect6641(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Hull Upgrades')), 'armorHPBonusAdd', src.getModifiedItemAttr('shipBonusRole2'), **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Upgrades')), 'capacityBonus', src.getModifiedItemAttr('shipBonusRole2'), **kwargs) |
def test_time_adapt(model, criterion, args=None, logger=None, writer=None):
from utils.norm_stats_utils import CombineNormStatsRegHook_onereg
from utils.relation_map_utils import CombineCossimRegHook
candidate_bn_layers = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
if args.update_only_bn_affine:
from utils.BNS_utils import freeze_except_bn, collect_bn_params
model = freeze_except_bn(model, bn_condidiate_layers=candidate_bn_layers)
(params, param_names) = collect_bn_params(model, bn_candidate_layers=candidate_bn_layers)
optimizer = torch.optim.Adam(params, lr=args.lr, betas=(0.9, 0.999), weight_decay=0.0)
else:
optimizer = torch.optim.SGD(params=model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if (args.arch == 'tanet'):
tta_loader = torch.utils.data.DataLoader(get_dataset_tanet(args, split='val', dataset_type='tta'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
eval_loader = torch.utils.data.DataLoader(get_dataset_tanet(args, split='val', dataset_type='eval'), batch_size=args.batch_size_eval, shuffle=False, num_workers=args.workers, pin_memory=True)
elif (args.arch == 'videoswintransformer'):
tta_loader = torch.utils.data.DataLoader(get_dataset_videoswin(args, split='val', dataset_type='tta'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
eval_loader = torch.utils.data.DataLoader(get_dataset_videoswin(args, split='val', dataset_type='eval'), batch_size=args.batch_size_eval, shuffle=False, num_workers=args.workers, pin_memory=True)
else:
tta_loader = torch.utils.data.DataLoader(get_dataset(args, split='val'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
eval_loader = torch.utils.data.DataLoader(get_dataset(args, split='val'), batch_size=args.batch_size_eval, shuffle=False, num_workers=args.workers, pin_memory=True)
global_iter = 0
if (args.stat_reg == 'mean_var'):
stat_reg_hooks = []
if (not hasattr(args, 'moving_avg')):
args.moving_avg = False
if (not hasattr(args, 'momentum_mvg')):
args.momentum_mvg = 0.1
if isinstance(args.stat_type, str):
raise NotImplementedError('args.stat_type of str is deprecated, use list instead. To add the implementation for case of Video swin transformer. ')
if (args.stat_type == 'temp'):
bn_layers = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
elif (args.stat_type in ['spatial', 'spatiotemp']):
bn_layers = [nn.BatchNorm2d, nn.BatchNorm3d]
chosen_layers = choose_layers(model, bn_layers)
list_stat_mean_clean = list(np.load(args.stat_mean_clean_file, allow_pickle=True))
list_stat_var_clean = list(np.load(args.stat_var_clean_file, allow_pickle=True))
assert (len(list_stat_mean_clean) == len(chosen_layers))
for (layer_id, (chosen_layer_name, chosen_layer)) in enumerate(chosen_layers):
for block_name in args.chosen_blocks:
if (block_name in chosen_layer_name):
stat_reg_hooks.append(NormStatsRegHook(chosen_layer, clip_len=args.clip_length, stats_clean_tuple=(list_stat_mean_clean[layer_id], list_stat_var_clean[layer_id]), reg_type=args.reg_type, moving_avg=args.moving_avg, momentum=args.momentum_mvg, stat_type=args.stat_type, reduce_dim=args.reduce_dim))
break
elif isinstance(args.stat_type, list):
if (args.arch == 'tanet'):
bn_layers = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
chosen_layers = choose_layers(model, bn_layers)
(list_temp_mean_clean, list_temp_var_clean, list_spatiotemp_mean_clean, list_spatiotemp_var_clean, list_spatial_mean_clean, list_spatial_var_clean) = load_precomputed_statistics(args, len(chosen_layers))
if ('spatiotemp' in args.stat_type):
(list_spatiotemp_mean_clean_new, list_spatiotemp_var_clean_new) = ([], [])
else:
(list_spatiotemp_mean_clean_new, list_spatiotemp_var_clean_new) = (([None] * len(chosen_layers)), ([None] * len(chosen_layers)))
if ('spatial' in args.stat_type):
(list_spatial_mean_clean_new, list_spatial_var_clean_new) = ([], [])
else:
(list_spatial_mean_clean_new, list_spatial_var_clean_new) = (([None] * len(chosen_layers)), ([None] * len(chosen_layers)))
counter = 0
for (layer_id, (chosen_layer_name, chosen_layer)) in enumerate(chosen_layers):
if isinstance(chosen_layer, nn.BatchNorm1d):
if ('spatiotemp' in args.stat_type):
list_spatiotemp_mean_clean_new.append(None)
list_spatiotemp_var_clean_new.append(None)
if ('spatial' in args.stat_type):
list_spatial_mean_clean_new.append(None)
list_spatial_var_clean_new.append(None)
elif (isinstance(chosen_layer, nn.BatchNorm2d) or isinstance(chosen_layer, nn.BatchNorm3d)):
if ('spatiotemp' in args.stat_type):
list_spatiotemp_mean_clean_new.append(list_spatiotemp_mean_clean[counter])
list_spatiotemp_var_clean_new.append(list_spatiotemp_var_clean[counter])
if ('spatial' in args.stat_type):
list_spatial_mean_clean_new.append(list_spatial_mean_clean[counter])
list_spatial_var_clean_new.append(list_spatial_var_clean[counter])
counter += 1
elif (args.arch == 'videoswintransformer'):
candidate_layers = [nn.LayerNorm]
chosen_layers = choose_layers(model, candidate_layers)
chosen_layers = chosen_layers[1:]
(list_temp_mean_clean, list_temp_var_clean, list_spatiotemp_mean_clean, list_spatiotemp_var_clean, list_spatial_mean_clean, list_spatial_var_clean) = load_precomputed_statistics(args, len(chosen_layers))
(list_spatial_mean_clean_new, list_spatial_var_clean_new) = (list_spatial_mean_clean, list_spatial_var_clean)
(list_spatiotemp_mean_clean_new, list_spatiotemp_var_clean_new) = (list_spatiotemp_mean_clean, list_spatiotemp_var_clean)
assert (len(list_temp_mean_clean) == len(chosen_layers))
for (layer_id, (chosen_layer_name, chosen_layer)) in enumerate(chosen_layers):
for block_name in args.chosen_blocks:
if (block_name in chosen_layer_name):
stat_reg_hooks.append(CombineNormStatsRegHook_onereg(chosen_layer, clip_len=args.clip_length, spatiotemp_stats_clean_tuple=(list_spatiotemp_mean_clean_new[layer_id], list_spatiotemp_var_clean_new[layer_id]), reg_type=args.reg_type, moving_avg=args.moving_avg, momentum=args.momentum_mvg, stat_type_list=args.stat_type, reduce_dim=args.reduce_dim, before_norm=args.before_norm, if_sample_tta_aug_views=args.if_sample_tta_aug_views, n_augmented_views=args.n_augmented_views))
break
elif (args.stat_reg == 'cossim'):
stat_reg_hooks = []
list_temp_cossim = list(np.load(args.temp_cossim_clean_file, allow_pickle=True))
if (args.arch == 'tanet'):
bn_layers = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
chosen_layers = choose_layers(model, bn_layers)
assert (len(list_temp_cossim) == len(chosen_layers))
for (layer_id, (chosen_layer_name, chosen_layer)) in enumerate(chosen_layers):
if (list_temp_cossim[layer_id] is not None):
for block_name in args.chosen_blocks:
if (block_name in chosen_layer_name):
stat_reg_hooks.append(CombineCossimRegHook(chosen_layer, clip_len=args.clip_length, temp_cossim=list_temp_cossim[layer_id], reg_type=args.reg_type, moving_avg=args.moving_avg, momentum=args.momentum_mvg, stat_type_list=args.stat_type, before_norm=args.before_norm))
break
elif (args.stat_reg == 'BNS'):
bns_feature_hooks = []
chosen_layers = choose_layers(model, candidate_bn_layers)
for chosen_layer in chosen_layers:
bns_feature_hooks.append(BNFeatureHook(chosen_layer, reg_type='l2norm', running_manner=args.running_manner, use_src_stat_in_reg=args.use_src_stat_in_reg, momentum=args.momentum_bns))
else:
raise Exception(f'undefined regularization type {args.stat_reg}')
epoch_result_list = []
if (args.arch == 'tanet'):
n_clips = int(args.sample_style.split('-')[(- 1)])
elif (args.arch == 'videoswintransformer'):
n_clips = args.num_clips
if args.if_sample_tta_aug_views:
assert (n_clips == 1)
n_augmented_views = args.n_augmented_views
if_pred_consistency = (args.if_pred_consistency if args.if_sample_tta_aug_views else False)
for epoch in range(args.n_epoch_adapat):
batch_time = AverageMeter()
losses_ce = AverageMeter()
losses_reg = AverageMeter()
losses_consis = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
pred_concat = []
gt_concat = []
end = time.time()
for (i, (input, target)) in enumerate(tta_loader):
model.train()
if args.fix_BNS:
for m in model.modules():
for candidate in candidate_bn_layers:
if isinstance(m, candidate):
m.eval()
actual_bz = input.shape[0]
input = input.cuda()
target = target.cuda()
if (args.arch == 'tanet'):
input = input.view((- 1), 3, input.size(2), input.size(3))
if args.if_sample_tta_aug_views:
input = input.view(((actual_bz * args.test_crops) * n_augmented_views), args.clip_length, 3, input.size(2), input.size(3))
else:
input = input.view(((actual_bz * args.test_crops) * n_clips), args.clip_length, 3, input.size(2), input.size(3))
output = model(input)
if args.if_sample_tta_aug_views:
output = output.reshape(actual_bz, (args.test_crops * n_augmented_views), (- 1))
if if_pred_consistency:
loss_consis = compute_pred_consis(output)
output = output.mean(1)
else:
output = output.reshape(actual_bz, (args.test_crops * n_clips), (- 1)).mean(1)
elif (args.arch == 'videoswintransformer'):
if args.if_sample_tta_aug_views:
n_views = (args.test_crops * n_augmented_views)
else:
n_views = (args.test_crops * n_clips)
if args.if_sample_tta_aug_views:
if if_pred_consistency:
(output, view_cls_score) = model(input)
loss_consis = compute_pred_consis(view_cls_score)
else:
(output, _) = model(input)
else:
input = input.reshape((((- 1),) + input.shape[2:]))
output = model(input)
output = rearrange(output, '(d0 d1) d2 -> d0 d1 d2', d0=actual_bz)
output = torch.mean(output, dim=1)
loss_ce = criterion(output, target)
loss_reg = torch.tensor(0).float().cuda()
if args.stat_reg:
for hook in stat_reg_hooks:
loss_reg += hook.r_feature.cuda()
else:
for hook in bns_feature_hooks:
loss_reg += hook.r_feature.cuda()
if if_pred_consistency:
loss = ((args.lambda_feature_reg * loss_reg) + (args.lambda_pred_consis * loss_consis))
else:
loss = loss_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_iter += 1
(prec1, prec5) = accuracy(output.data, target, topk=(1, 5))
(_, preds) = torch.max(output, 1)
losses_ce.update(loss_ce.item(), actual_bz)
losses_reg.update(loss_reg.item(), actual_bz)
if if_pred_consistency:
losses_consis.update(loss_consis.item(), actual_bz)
top1.update(prec1.item(), actual_bz)
top5.update(prec5.item(), actual_bz)
batch_time.update((time.time() - end))
end = time.time()
if args.verbose:
if ((i % args.print_freq) == 0):
logger.debug('TTA Epoch{epoch}: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss reg {loss_reg.val:.4f} ({loss_reg.avg:.4f})\tLoss consis {loss_consis.val:.4f} ({loss_consis.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(tta_loader), epoch=epoch, batch_time=batch_time, loss_reg=losses_reg, loss_consis=losses_consis, top1=top1, top5=top5))
if (writer is not None):
writer.add_scalars('loss', {'loss_reg': loss_reg.item()}, global_step=global_iter)
if if_pred_consistency:
writer.add_scalars('loss', {'loss_consis': loss_consis.item()}, global_step=global_iter)
writer.add_scalars('loss', {'loss_ce': loss_ce.item()}, global_step=global_iter)
if (args.stat_reg in ['mean_var', 'cossim']):
for stat_reg_hook in stat_reg_hooks:
stat_reg_hook.close()
elif (args.stat_reg == 'BNS'):
for bns_feature_hook in bns_feature_hooks:
bns_feature_hook.close()
top1_acc = validate_brief(eval_loader=eval_loader, model=model, global_iter=global_iter, epoch=epoch, args=args, logger=logger, writer=writer)
epoch_result_list.append(top1_acc)
return (epoch_result_list, model) |
def rfc2047(value):
def decode_chunk(m):
(data, encoding) = decode_rfc2047_header(m.group(0))[0]
try:
res = data.decode(encoding)
except (LookupError, UnicodeEncodeError):
res = m.group(0)
return res
return _RE_RFC2047.sub(decode_chunk, value, re.I) |
.parametrize('flag, expected', [('--blink-settings=key=value', [('key', 'value')]), ('--blink-settings=key=equal=rights', [('key', 'equal=rights')]), ('--blink-settings=one=1,two=2', [('one', '1'), ('two', '2')]), ('--enable-features=feat', [])])
def test_pass_through_existing_settings(config_stub, flag, expected):
config_stub.val.colors.webpage.darkmode.enabled = True
versions = version.WebEngineVersions.from_pyqt('5.15.2')
settings = darkmode.settings(versions=versions, special_flags=[flag])
dark_mode_expected = [('preferredColorScheme', '2'), ('forceDarkModeEnabled', 'true'), ('forceDarkModeImagePolicy', '2')]
assert (settings['blink-settings'] == (expected + dark_mode_expected)) |
class DiagonalLineDecorator(ChartDecorator, SimpleLegendItem):
def __init__(self, key: str=None, **plot_settings: Any):
ChartDecorator.__init__(self, key)
SimpleLegendItem.__init__(self)
self.plot_settings = plot_settings
def decorate(self, chart: 'Chart') -> None:
self.legend_artist = chart.axes.plot([0, 1], [0, 1], transform=chart.axes.transAxes, **self.plot_settings) |
_model
class Sound(BaseMedia):
file_content_type: str = field(default=None)
file_url: str = field(default=None)
native_sound_id: str = field(default=None)
secret_token: str = field(default=None)
subtype: str = field(default=None)
def from_json(cls, value: JsonResponse, **kwargs) -> 'Sound':
if ('sound' in value):
value.update(value.pop('sound'))
return super(Sound, cls).from_json(value, **kwargs)
def mimetype(self) -> str:
return self.file_content_type
def url(self) -> str:
return self.file_url
def _row(self) -> TableRow:
return {'ID': self.id, 'License': self.license_code, 'URL': self.url} |
def get_model(data, weights='imagenet'):
base_model = InceptionV3(weights=weights, include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(len(data.classes), activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model |
class KLRegSteepestDescent(nn.Module):
def __init__(self, score_predictor, num_iter=1, compute_losses=True, detach_length=float('Inf'), parameter_batch_dim=0, steplength_reg=0.0, hessian_reg=0, init_step_length=1.0, softmax_reg=None):
super().__init__()
self.score_predictor = score_predictor
self.num_iter = num_iter
self.compute_losses = compute_losses
self.detach_length = detach_length
self.steplength_reg = steplength_reg
self.hessian_reg = hessian_reg
self.log_step_length = nn.Parameter((math.log(init_step_length) * torch.ones(1)))
self.softmax_reg = softmax_reg
self._parameter_batch_dim = parameter_batch_dim
def forward(self, meta_parameter: TensorList, num_iter=None, **kwargs):
if (not isinstance(meta_parameter, TensorList)):
meta_parameter = TensorList([meta_parameter])
_residual_batch_dim = 1
torch_grad_enabled = torch.is_grad_enabled()
torch.set_grad_enabled(True)
num_iter = (self.num_iter if (num_iter is None) else num_iter)
step_length_factor = torch.exp(self.log_step_length)
(label_density, sample_weight, reg_weight) = self.score_predictor.init_data(meta_parameter, **kwargs)
exp_reg = (0 if (self.softmax_reg is None) else math.exp(self.softmax_reg))
def _compute_loss(scores, weights):
num_sequences = scores.shape[_residual_batch_dim]
return ((torch.sum((sample_weight.reshape(sample_weight.shape[0], (- 1)) * (torch.log((scores.exp().sum(dim=((- 2), (- 1))) + exp_reg)) - (label_density * scores).sum(dim=((- 2), (- 1)))))) / num_sequences) + ((reg_weight * sum((weights * weights).sum())) / num_sequences))
meta_parameter_iterates = [meta_parameter]
losses = []
for i in range(num_iter):
if ((i > 0) and ((i % self.detach_length) == 0)):
meta_parameter = meta_parameter.detach()
meta_parameter.requires_grad_(True)
scores = self.score_predictor(meta_parameter, **kwargs)
if self.compute_losses:
losses.append(_compute_loss(scores, meta_parameter))
scores_softmax = activation.softmax_reg(scores.reshape(*scores.shape[:2], (- 1)), dim=2, reg=self.softmax_reg).reshape(scores.shape)
dLds = (sample_weight * (scores_softmax - label_density))
weights_grad = (TensorList(torch.autograd.grad(scores, meta_parameter, dLds, create_graph=True)) + (meta_parameter * reg_weight))
scores_grad = torch.autograd.grad(weights_grad, dLds, weights_grad, create_graph=True)[0]
sm_scores_grad = (scores_softmax * scores_grad)
hes_scores_grad = ((sm_scores_grad - (scores_softmax * torch.sum(sm_scores_grad, dim=((- 2), (- 1)), keepdim=True))) + (self.hessian_reg * scores_grad))
grad_hes_grad = (scores_grad * hes_scores_grad).reshape(*scores.shape[:2], (- 1)).sum(dim=2).clamp(min=0)
grad_hes_grad = (sample_weight.reshape(sample_weight.shape[0], (- 1)) * grad_hes_grad).sum(dim=0)
gg = (weights_grad * weights_grad).reshape(scores.shape[1], (- 1)).sum(dim=1)
alpha_num = sum(gg)
alpha_den = ((grad_hes_grad + sum((gg * reg_weight))) + (self.steplength_reg * alpha_num)).clamp(1e-08)
alpha = (step_length_factor * (alpha_num / alpha_den))
step = weights_grad.apply((lambda e: (alpha.reshape([((- 1) if (d == self._parameter_batch_dim) else 1) for d in range(e.dim())]) * e)))
meta_parameter = (meta_parameter - step)
meta_parameter_iterates.append(meta_parameter)
if self.compute_losses:
losses.append(_compute_loss(self.score_predictor(meta_parameter, **kwargs), meta_parameter))
torch.set_grad_enabled(torch_grad_enabled)
if (not torch_grad_enabled):
meta_parameter.detach_()
for w in meta_parameter_iterates:
w.detach_()
for l in losses:
l.detach_()
return (meta_parameter, meta_parameter_iterates, losses) |
class EfficientNet(tf.keras.Model):
def __init__(self, blocks_args=None, global_params=None):
super(EfficientNet, self).__init__()
if (not isinstance(blocks_args, list)):
raise ValueError('blocks_args should be a list.')
self._global_params = global_params
self._blocks_args = blocks_args
self.endpoints = None
self._build()
def _build(self):
self._blocks = []
for block_args in self._blocks_args:
assert (block_args.num_repeat > 0)
block_args = block_args._replace(input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params))
self._blocks.append(MBConvBlock(block_args, self._global_params))
if (block_args.num_repeat > 1):
block_args = block_args._replace(input_filters=block_args.output_filters, strides=[1, 1])
for _ in range((block_args.num_repeat - 1)):
self._blocks.append(MBConvBlock(block_args, self._global_params))
batch_norm_momentum = self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
if (self._global_params.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
self._conv_stem = l.Conv2D(filters=round_filters(32, self._global_params), kernel_size=[3, 3], strides=[2, 2], kernel_initializer=conv_kernel_initializer, padding='same', use_bias=False, name='stem_conv')
self._bn0 = _bn_layer(axis=channel_axis, momentum=batch_norm_momentum, epsilon=batch_norm_epsilon)
self._conv_head = l.Conv2D(filters=round_filters(1280, self._global_params), kernel_size=[1, 1], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', use_bias=False)
self._bn1 = _bn_layer(axis=channel_axis, momentum=batch_norm_momentum, epsilon=batch_norm_epsilon)
self._avg_pooling = l.GlobalAveragePooling2D(data_format=self._global_params.data_format)
self._fc = l.Dense(self._global_params.num_classes, kernel_initializer=dense_kernel_initializer)
if (self._global_params.dropout_rate > 0):
self._dropout = l.Dropout(self._global_params.dropout_rate)
else:
self._dropout = None
def call(self, inputs, training=True, features_only=None):
outputs = None
self.endpoints = {}
with tf.variable_scope('stem'):
outputs = relu_fn(self._bn0(self._conv_stem(inputs), training=training))
tf.logging.info(('Built stem layers with output shape: %s' % outputs.shape))
self.endpoints['stem'] = outputs
reduction_idx = 0
for (idx, block) in enumerate(self._blocks):
is_reduction = False
if ((idx == (len(self._blocks) - 1)) or (self._blocks[(idx + 1)].block_args().strides[0] > 1)):
is_reduction = True
reduction_idx += 1
with tf.variable_scope(('blocks_%s' % idx)):
drop_rate = self._global_params.drop_connect_rate
if drop_rate:
drop_rate *= (float(idx) / len(self._blocks))
tf.logging.info(('block_%s drop_connect_rate: %s' % (idx, drop_rate)))
outputs = block.call(outputs, training=training, output_layer_name=('block_%s' % idx))
self.endpoints[('block_%s' % idx)] = outputs
if is_reduction:
self.endpoints[('reduction_%s' % reduction_idx)] = outputs
if block.endpoints:
for (k, v) in six.iteritems(block.endpoints):
self.endpoints[('block_%s/%s' % (idx, k))] = v
if is_reduction:
self.endpoints[('reduction_%s/%s' % (reduction_idx, k))] = v
self.endpoints['global_pool'] = outputs
if (not features_only):
with tf.variable_scope('head'):
outputs = relu_fn(self._bn1(self._conv_head(outputs), training=training))
outputs = self._avg_pooling(outputs)
if self._dropout:
outputs = self._dropout(outputs, training=training)
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
return outputs
def call_model(self, inputs, training=True, features_only=None):
outputs = None
self.endpoints = {}
with tf.variable_scope('stem'):
outputs = relu_fn(self._bn0(self._conv_stem(inputs), training=training))
tf.logging.info(('Built stem layers with output shape: %s' % outputs.shape))
reduction_idx = 0
for (idx, block) in enumerate(self._blocks):
is_reduction = False
if ((idx == (len(self._blocks) - 1)) or (self._blocks[(idx + 1)].block_args().strides[0] > 1)):
is_reduction = True
reduction_idx += 1
with tf.variable_scope(('blocks_%s' % idx)):
drop_rate = self._global_params.drop_connect_rate
if drop_rate:
drop_rate *= (float(idx) / len(self._blocks))
tf.logging.info(('block_%s drop_connect_rate: %s' % (idx, drop_rate)))
outputs = block.call(outputs, training=training, output_layer_name=('block_%s' % idx))
if (not features_only):
with tf.variable_scope('head'):
outputs = relu_fn(self._bn1(self._conv_head(outputs), training=training))
outputs = self._avg_pooling(outputs)
if self._dropout:
outputs = self._dropout(outputs, training=training)
outputs = self._fc(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model |
def venv(tmp_path_factory, session_app_data):
if CURRENT.is_venv:
return sys.executable
root_python = root(tmp_path_factory, session_app_data)
dest = tmp_path_factory.mktemp('venv')
process = Popen([str(root_python), '-m', 'venv', '--without-pip', str(dest)])
process.communicate()
return CURRENT.discover_exe(prefix=str(dest)).original_executable |
class NormFreeNet(nn.Module):
def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, drop_rate=0.0, drop_path_rate=0.0):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
assert (cfg.act_layer in _nonlin_gamma), f'Please add non-linearity constants for activation ({cfg.act_layer}).'
conv_layer = (ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d)
if cfg.gamma_in_act:
act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer])
conv_layer = partial(conv_layer, eps=cfg.std_conv_eps)
else:
act_layer = get_act_layer(cfg.act_layer)
conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps)
attn_layer = (partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None)
stem_chs = make_divisible(((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor), cfg.ch_div)
(self.stem, stem_stride, stem_feat) = create_stem(in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer)
self.feature_info = [stem_feat]
drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
prev_chs = stem_chs
net_stride = stem_stride
dilation = 1
expected_var = 1.0
stages = []
for (stage_idx, stage_depth) in enumerate(cfg.depths):
stride = (1 if ((stage_idx == 0) and (stem_stride > 2)) else 2)
if ((net_stride >= output_stride) and (stride > 1)):
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = (1 if (dilation in (1, 2)) else 2)
blocks = []
for block_idx in range(cfg.depths[stage_idx]):
first_block = ((block_idx == 0) and (stage_idx == 0))
out_chs = make_divisible((cfg.channels[stage_idx] * cfg.width_factor), cfg.ch_div)
blocks += [NormFreeBlock(in_chs=prev_chs, out_chs=out_chs, alpha=cfg.alpha, beta=(1.0 / (expected_var ** 0.5)), stride=(stride if (block_idx == 0) else 1), dilation=dilation, first_dilation=first_dilation, group_size=cfg.group_size, bottle_ratio=(1.0 if (cfg.reg and first_block) else cfg.bottle_ratio), ch_div=cfg.ch_div, reg=cfg.reg, extra_conv=cfg.extra_conv, skipinit=cfg.skipinit, attn_layer=attn_layer, attn_gain=cfg.attn_gain, act_layer=act_layer, conv_layer=conv_layer, drop_path_rate=drop_path_rates[stage_idx][block_idx])]
if (block_idx == 0):
expected_var = 1.0
expected_var += (cfg.alpha ** 2)
first_dilation = dilation
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')]
stages += [nn.Sequential(*blocks)]
self.stages = nn.Sequential(*stages)
if cfg.num_features:
self.num_features = make_divisible((cfg.width_factor * cfg.num_features), cfg.ch_div)
self.final_conv = conv_layer(prev_chs, self.num_features, 1)
self.feature_info[(- 1)] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv')
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.final_act = act_layer(inplace=(cfg.num_features > 0))
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
for (n, m) in self.named_modules():
if (('fc' in n) and isinstance(m, nn.Linear)):
if cfg.zero_init_fc:
nn.init.zeros_(m.weight)
else:
nn.init.normal_(m.weight, 0.0, 0.01)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear')
if (m.bias is not None):
nn.init.zeros_(m.bias)
.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem='^stem', blocks=[(('^stages\\.(\\d+)' if coarse else '^stages\\.(\\d+)\\.(\\d+)'), None), ('^final_conv', (99999,))])
return matcher
.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
if (self.grad_checkpointing and (not torch.jit.is_scripting())):
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.final_conv(x)
x = self.final_act(x)
return x
def forward_head(self, x):
return self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x |
def upgrade(saveddata_engine):
saveddata_engine.execute(tmpTable)
saveddata_engine.execute('INSERT INTO boostersTemp (ID, itemID, fitID, active) SELECT ID, itemID, fitID, active FROM boosters')
saveddata_engine.execute('DROP TABLE boosters')
saveddata_engine.execute('ALTER TABLE boostersTemp RENAME TO boosters') |
class InequalityToEquality(QuadraticProgramConverter):
_delimiter = ''
def __init__(self, mode: str='auto') -> None:
self._src: Optional[QuadraticProgram] = None
self._dst: Optional[QuadraticProgram] = None
self._mode = mode
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
self._src = copy.deepcopy(problem)
self._dst = QuadraticProgram(name=problem.name)
mode = self._mode
if (mode not in ['integer', 'continuous', 'auto']):
raise QiskitOptimizationError(f'Unsupported mode is selected: {mode}')
for x in self._src.variables:
if (x.vartype == Variable.Type.BINARY):
self._dst.binary_var(name=x.name)
elif (x.vartype == Variable.Type.INTEGER):
self._dst.integer_var(name=x.name, lowerbound=x.lowerbound, upperbound=x.upperbound)
elif (x.vartype == Variable.Type.CONTINUOUS):
self._dst.continuous_var(name=x.name, lowerbound=x.lowerbound, upperbound=x.upperbound)
else:
raise QiskitOptimizationError(f'Unsupported variable type {x.vartype}')
new_linear_constraints = []
for lin_const in self._src.linear_constraints:
if (lin_const.sense == Constraint.Sense.EQ):
new_linear_constraints.append((lin_const.linear.coefficients, lin_const.sense, lin_const.rhs, lin_const.name))
elif (lin_const.sense in [Constraint.Sense.LE, Constraint.Sense.GE]):
new_linear_constraints.append(self._add_slack_var_linear_constraint(lin_const))
else:
raise QiskitOptimizationError(f'Internal error: type of sense in {lin_const.name} is not supported: {lin_const.sense}')
new_quadratic_constraints = []
for quad_const in self._src.quadratic_constraints:
if (quad_const.sense == Constraint.Sense.EQ):
new_quadratic_constraints.append((quad_const.linear.coefficients, quad_const.quadratic.coefficients, quad_const.sense, quad_const.rhs, quad_const.name))
elif (quad_const.sense in [Constraint.Sense.LE, Constraint.Sense.GE]):
new_quadratic_constraints.append(self._add_slack_var_quadratic_constraint(quad_const))
else:
raise QiskitOptimizationError(f'Internal error: type of sense in {quad_const.name} is not supported: {quad_const.sense}')
constant = self._src.objective.constant
linear = self._src.objective.linear.to_dict(use_name=True)
quadratic = self._src.objective.quadratic.to_dict(use_name=True)
if (self._src.objective.sense == QuadraticObjective.Sense.MINIMIZE):
self._dst.minimize(constant, linear, quadratic)
else:
self._dst.maximize(constant, linear, quadratic)
for lin_const_args in new_linear_constraints:
self._dst.linear_constraint(*lin_const_args)
for quad_const_args in new_quadratic_constraints:
self._dst.quadratic_constraint(*quad_const_args)
return self._dst
def _add_slack_var_linear_constraint(self, constraint: LinearConstraint):
linear = constraint.linear
sense = constraint.sense
name = constraint.name
any_float = self._any_float(linear.to_array())
mode = self._mode
if (mode == 'integer'):
if any_float:
raise QiskitOptimizationError(f'"{name}" contains float coefficients. We can not use an integer slack variable for "{{name}}"')
elif (mode == 'auto'):
mode = ('continuous' if any_float else 'integer')
new_rhs = constraint.rhs
if (mode == 'integer'):
if (sense == Constraint.Sense.LE):
new_rhs = math.floor(new_rhs)
if (sense == Constraint.Sense.GE):
new_rhs = math.ceil(new_rhs)
lin_bounds = linear.bounds
lhs_lb = lin_bounds.lowerbound
lhs_ub = lin_bounds.upperbound
var_ub = 0.0
sign = 0
if (sense == Constraint.Sense.LE):
var_ub = (new_rhs - lhs_lb)
if (var_ub > 0):
sign = 1
elif (sense == Constraint.Sense.GE):
var_ub = (lhs_ub - new_rhs)
if (var_ub > 0):
sign = (- 1)
new_linear = linear.to_dict(use_name=True)
if (var_ub > 0):
mode_name = {'integer': 'int', 'continuous': 'continuous'}
slack_name = f'{name}{self._delimiter}{mode_name[mode]}_slack'
if (mode == 'integer'):
self._dst.integer_var(name=slack_name, lowerbound=0, upperbound=var_ub)
elif (mode == 'continuous'):
self._dst.continuous_var(name=slack_name, lowerbound=0, upperbound=var_ub)
new_linear[slack_name] = sign
return (new_linear, '==', new_rhs, name)
def _add_slack_var_quadratic_constraint(self, constraint: QuadraticConstraint):
quadratic = constraint.quadratic
linear = constraint.linear
sense = constraint.sense
name = constraint.name
any_float = (self._any_float(linear.to_array()) or self._any_float(quadratic.to_array()))
mode = self._mode
if (mode == 'integer'):
if any_float:
raise QiskitOptimizationError(f'"{name}" contains float coefficients. We can not use an integer slack variable for "{{name}}"')
elif (mode == 'auto'):
mode = ('continuous' if any_float else 'integer')
new_rhs = constraint.rhs
if (mode == 'integer'):
if (sense == Constraint.Sense.LE):
new_rhs = math.floor(new_rhs)
if (sense == Constraint.Sense.GE):
new_rhs = math.ceil(new_rhs)
lin_bounds = linear.bounds
quad_bounds = quadratic.bounds
lhs_lb = (lin_bounds.lowerbound + quad_bounds.lowerbound)
lhs_ub = (lin_bounds.upperbound + quad_bounds.upperbound)
var_ub = 0.0
sign = 0
if (sense == Constraint.Sense.LE):
var_ub = (new_rhs - lhs_lb)
if (var_ub > 0):
sign = 1
elif (sense == Constraint.Sense.GE):
var_ub = (lhs_ub - new_rhs)
if (var_ub > 0):
sign = (- 1)
new_linear = linear.to_dict(use_name=True)
if (var_ub > 0):
mode_name = {'integer': 'int', 'continuous': 'continuous'}
slack_name = f'{name}{self._delimiter}{mode_name[mode]}_slack'
if (mode == 'integer'):
self._dst.integer_var(name=slack_name, lowerbound=0, upperbound=var_ub)
elif (mode == 'continuous'):
self._dst.continuous_var(name=slack_name, lowerbound=0, upperbound=var_ub)
new_linear[slack_name] = sign
return (new_linear, quadratic.coefficients, '==', new_rhs, name)
def interpret(self, x: Union[(np.ndarray, List[float])]) -> np.ndarray:
names = [var.name for var in self._dst.variables]
sol = {name: x[i] for (i, name) in enumerate(names)}
new_x = np.zeros(self._src.get_num_vars())
for (i, var) in enumerate(self._src.variables):
new_x[i] = sol[var.name]
return new_x
def _any_float(values: np.ndarray) -> bool:
return any(((isinstance(v, float) and (not v.is_integer())) for v in values))
def mode(self) -> str:
return self._mode
def mode(self, mode: str) -> None:
self._mode = mode |
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info('Model:\n{}'.format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
if cfg.TEST.AUG.ENABLED:
logger.info('Running inference with test-time augmentation ...')
model = GeneralizedRCNNWithTTA(cfg, model, batch_size=1)
return do_test(cfg, model)
distributed = (comm.get_world_size() > 1)
if distributed:
model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False, find_unused_parameters=True)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model) |
def test_flake8_per_file_ignores(workspace):
config_str = '[flake8]\nignores = F403\nper-file-ignores =\n **/__init__.py:F401,E402\n test_something.py:E402,\nexclude =\n file_1.py\n file_2.py\n '
doc_str = "print('hi')\nimport os\n"
doc_uri = uris.from_fs_path(os.path.join(workspace.root_path, 'blah/__init__.py'))
workspace.put_document(doc_uri, doc_str)
flake8_settings = get_flake8_cfg_settings(workspace, config_str)
assert ('perFileIgnores' in flake8_settings)
assert (len(flake8_settings['perFileIgnores']) == 2)
assert ('exclude' in flake8_settings)
assert (len(flake8_settings['exclude']) == 2)
doc = workspace.get_document(doc_uri)
res = flake8_lint.pylsp_lint(workspace, doc)
assert (not res)
os.unlink(os.path.join(workspace.root_path, 'setup.cfg')) |
def MCLDNN(weights=None, input_shape1=[2, 128], input_shape2=[128, 1], classes=11, **kwargs):
if ((weights is not None) and (not os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), or the path to the weights file to be loaded.')
dr = 0.5
input1 = Input((input_shape1 + [1]), name='I/Qchannel')
input2 = Input(input_shape2, name='Ichannel')
input3 = Input(input_shape2, name='Qchannel')
x1 = Conv2D(50, (2, 8), padding='same', activation='relu', name='Conv1', kernel_initializer='glorot_uniform')(input1)
x2 = Conv1D(50, 8, padding='causal', activation='relu', name='Conv2', kernel_initializer='glorot_uniform')(input2)
x2_reshape = Reshape([(- 1), 128, 50])(x2)
x3 = Conv1D(50, 8, padding='causal', activation='relu', name='Conv3', kernel_initializer='glorot_uniform')(input3)
x3_reshape = Reshape([(- 1), 128, 50], name='reshap2')(x3)
x = concatenate([x2_reshape, x3_reshape], axis=1, name='Concatenate1')
x = Conv2D(50, (1, 8), padding='same', activation='relu', name='Conv4', kernel_initializer='glorot_uniform')(x)
x = concatenate([x1, x], name='Concatenate2')
x = Conv2D(100, (2, 5), padding='valid', activation='relu', name='Conv5', kernel_initializer='glorot_uniform')(x)
x = Reshape(target_shape=(124, 100))(x)
x = CuDNNLSTM(units=128, return_sequences=True, name='LSTM1')(x)
x = CuDNNLSTM(units=128, name='LSTM2')(x)
x = Dense(128, activation='selu', name='FC1')(x)
x = Dropout(dr)(x)
x = Dense(128, activation='selu', name='FC2')(x)
x = Dropout(dr)(x)
x = Dense(classes, activation='softmax', name='Softmax')(x)
model = Model(inputs=[input1, input2, input3], outputs=x)
if (weights is not None):
model.load_weights(weights)
return model |
class ImplementationWrapper(object):
def __init__(self, instance, field_name, transition, workflow, implementation, hooks=None):
self.instance = instance
self.field_name = field_name
self.transition = transition
self.workflow = workflow
self.hooks = (hooks or {})
self.implementation = implementation
self.__doc__ = implementation.__doc__
def current_state(self):
return getattr(self.instance, self.field_name)
def _pre_transition_checks(self):
current_state = getattr(self.instance, self.field_name)
if (current_state not in self.transition.source):
raise InvalidTransitionError(("Transition '%s' isn't available from state '%s'." % (self.transition.name, current_state.name)))
for check in self._filter_hooks(HOOK_CHECK):
if (not check(self.instance)):
raise ForbiddenTransition(("Transition '%s' was forbidden by custom pre-transition check." % self.transition.name))
def _filter_hooks(self, *hook_kinds):
hooks = sum((self.hooks.get(kind, []) for kind in hook_kinds), [])
return sorted((hook for hook in hooks if hook.applies_to(self.transition, self.current_state)))
def _pre_transition(self, *args, **kwargs):
for hook in self._filter_hooks(HOOK_BEFORE, HOOK_ON_LEAVE):
hook(self.instance, *args, **kwargs)
def _during_transition(self, *args, **kwargs):
return self.implementation(self.instance, *args, **kwargs)
def _log_transition(self, from_state, *args, **kwargs):
self.workflow.log_transition(self.transition, from_state, self.instance, *args, **kwargs)
def _post_transition(self, result, *args, **kwargs):
for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER):
hook(self.instance, result, *args, **kwargs)
def __call__(self, *args, **kwargs):
self._pre_transition_checks()
self._pre_transition(*args, **kwargs)
result = self._during_transition(*args, **kwargs)
from_state = getattr(self.instance, self.field_name)
setattr(self.instance, self.field_name, self.transition.target)
self._log_transition(from_state, *args, **kwargs)
self._post_transition(result, *args, **kwargs)
return result
def is_available(self):
try:
self._pre_transition_checks()
except (InvalidTransitionError, ForbiddenTransition):
return False
return True
def __repr__(self):
return ('<%s for %r on %r: %r>' % (self.__class__.__name__, self.transition.name, self.field_name, self.implementation)) |
def test_upload_pypirc_file(copy_sample):
with temp_pypirc(pypirc3) as pypirc, patch('flit.upload.upload_file') as upload_file:
td = copy_sample('module1_toml')
formats = list(ALL_FORMATS)[:1]
upload.main((td / 'pyproject.toml'), formats=set(formats), repo_name='test123', pypirc_path=pypirc)
(_, _, repo) = upload_file.call_args[0]
assert (repo.url == pypirc3_repo)
assert (repo.username == pypirc3_user)
assert (repo.password == pypirc3_pass) |
def ssim(img1, img2, window_size=11, mask=None, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, mask, size_average) |
class LLL_Net(nn.Module):
def __init__(self, model, remove_existing_head=False):
head_var = model.head_var
assert (type(head_var) == str)
assert ((not remove_existing_head) or hasattr(model, head_var)), 'Given model does not have a variable called {}'.format(head_var)
assert ((not remove_existing_head) or (type(getattr(model, head_var)) in [nn.Sequential, nn.Linear])), "Given model's head {} does is not an instance of nn.Sequential or nn.Linear".format(head_var)
super(LLL_Net, self).__init__()
self.model = model
last_layer = getattr(self.model, head_var)
if remove_existing_head:
if (type(last_layer) == nn.Sequential):
self.out_size = last_layer[(- 1)].in_features
del last_layer[(- 1)]
elif (type(last_layer) == nn.Linear):
self.out_size = last_layer.in_features
setattr(self.model, head_var, nn.Sequential())
else:
self.out_size = last_layer.out_features
self.heads = nn.ModuleList()
self.task_cls = []
self.task_offset = []
self._initialize_weights()
def add_head(self, num_outputs):
self.heads.append(nn.Linear(self.out_size, num_outputs))
self.task_cls = torch.tensor([head.out_features for head in self.heads])
self.task_offset = torch.cat([torch.LongTensor(1).zero_(), self.task_cls.cumsum(0)[:(- 1)]])
def forward(self, x, return_features=False):
x = self.model(x)
assert (len(self.heads) > 0), 'Cannot access any head'
y = []
for head in self.heads:
y.append(head(x))
if return_features:
return (y, x)
else:
return y
def get_copy(self):
return deepcopy(self.state_dict())
def set_state_dict(self, state_dict):
self.load_state_dict(deepcopy(state_dict))
return
def freeze_all(self):
for param in self.parameters():
param.requires_grad = False
def freeze_backbone(self):
for param in self.model.parameters():
param.requires_grad = False
def freeze_bn(self):
for m in self.model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def _initialize_weights(self):
pass |
def uninstall_variables(name=JTOP_VARIABLE_FILE):
if os.path.isfile('/etc/profile.d/{name}'.format(name=name)):
logger.info('Found {name}'.format(name=name))
os.remove('/etc/profile.d/{name}'.format(name=name))
logger.info(' - Remove {name} from /etc/profile.d/'.format(name=name)) |
(description='Upload videos to YouTube')
def upload_videos_to_youtube(modeladmin, request, queryset):
videos = queryset.filter(youtube_video_id__exact='').exclude(video_uploaded_path__exact='')
conference_id = queryset.first().conference_id
start_workflow(workflow=BatchMultipleScheduleItemsVideoUpload.run, id=f'batch-upload-video-conference-{conference_id}', task_queue='default', arg=BatchMultipleScheduleItemsVideoUpload.input(schedule_items_ids=list(videos.values_list('id', flat=True))))
messages.add_message(request, messages.INFO, f'Scheduled {videos.count()} videos to upload') |
def is_proper_subtype(left: Type, right: Type, *, subtype_context: (SubtypeContext | None)=None, ignore_promotions: bool=False, ignore_uninhabited: bool=False, erase_instances: bool=False, keep_erased_types: bool=False) -> bool:
if (subtype_context is None):
subtype_context = SubtypeContext(ignore_promotions=ignore_promotions, ignore_uninhabited=ignore_uninhabited, erase_instances=erase_instances, keep_erased_types=keep_erased_types)
else:
assert (not any({ignore_promotions, ignore_uninhabited, erase_instances, keep_erased_types, ignore_uninhabited})), "Don't pass both context and individual flags"
if type_state.is_assumed_proper_subtype(left, right):
return True
if mypy.typeops.is_recursive_pair(left, right):
with pop_on_exit(type_state.get_assumptions(is_proper=True), left, right):
return _is_subtype(left, right, subtype_context, proper_subtype=True)
return _is_subtype(left, right, subtype_context, proper_subtype=True) |
def test_strict_option_is_deprecated(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n\n .unknown\n def test_foo(): pass\n ')
result = pytester.runpytest('--strict', '-Wdefault::pytest.PytestRemovedIn8Warning')
result.stdout.fnmatch_lines(["'unknown' not found in `markers` configuration option", '*PytestRemovedIn8Warning: The --strict option is deprecated, use --strict-markers instead.']) |
(frozen=True)
class MultiplayerSessionEntry(JsonDataclass):
id: int
name: str
worlds: list[MultiplayerWorld]
users_list: list[MultiplayerUser]
game_details: (GameDetails | None)
visibility: MultiplayerSessionVisibility
generation_in_progress: (int | None)
allowed_games: list[RandovaniaGame]
allow_coop: bool
allow_everyone_claim_world: bool
def users(self) -> dict[(int, MultiplayerUser)]:
return {user.id: user for user in self.users_list}
def num_admins(self) -> int:
return sum((1 for player in self.users.values() if player.admin))
def get_world(self, world_id: uuid.UUID) -> MultiplayerWorld:
for world in self.worlds:
if (world.id == world_id):
return world
raise KeyError(f'No world with id {world_id}')
def get_world_names(self) -> list[str]:
return [world.name for world in self.worlds] |
def msrvtt_zh(msrvtt_train_captions):
print(('-' * 20))
print('Prepare msrvtt_zh')
msrvtt_cn_path = 'data/MSRVTT-CN/msrvtt10kcntrain_google_enc2zh.caption.txt'
assert os.path.exists(msrvtt_cn_path), msrvtt_cn_path
data = open(msrvtt_cn_path, 'r').read().strip().split('\n')
vid2Chinese_captions = defaultdict(list)
for line in data:
(tag, *caption) = line.split(' ')
vid = int(tag.split('#')[0][5:])
caption = ' '.join(caption)
vid2Chinese_captions[vid].append(caption)
data = json.load(open(f'{configs.finetune_root}/msrvtt/videodatainfo_2016.json', 'r'))
splits = defaultdict(list)
for item in data['videos']:
vid = int(item['video_id'][5:])
splits[item['split']].append(vid)
for k in splits.keys():
splits[k] = sorted(splits[k])
splits['val'] = splits.pop('validate')
vid2English_captions = defaultdict(list)
for item in data['sentences']:
vid = int(item['video_id'][5:])
caption = item['caption']
vid2English_captions[vid].append(caption)
columns = ['en', 'zh']
assert ('en' in configs.lang2code)
assert ('zh' in configs.lang2code)
tsv_data = []
for vid in splits['train']:
for (enCap, zhCap) in zip(vid2English_captions[vid], vid2Chinese_captions[vid]):
tsv_data.append([enCap, zhCap])
print(f'There are {len(tsv_data)} training English-Chinese pairs')
df = pd.DataFrame(tsv_data, columns=columns)
df.to_csv(f'{configs.corpus_root}/msrvtt_zh.tsv', sep='\t', index=False)
train_captions = [line[0] for line in tsv_data]
assert (set(msrvtt_train_captions) == set(train_captions))
concept_preparation(train_captions, 'msrvtt', source_lang='en', target_lang='zh') |
class FC3_Duplicate_TestCase(CommandSequenceTest):
def __init__(self, *args, **kwargs):
CommandSequenceTest.__init__(self, *args, **kwargs)
self.version = FC3
def runTest(self):
self.assert_parse('\nlogvol / --size=1024 --name=nameA --vgname=vgA\nlogvol /home --size=1024 --name=nameB --vgname=vgA')
self.assert_parse('\nlogvol / --size=1024 --name=nameA --vgname=vgA\nlogvol /home --size=1024 --name=nameA --vgname=vgB')
self.assert_parse_error('\nlogvol / --size=1024 --name=nameA --vgname=vgA\nlogvol /home --size=1024 --name=nameA --vgname=vgA', KickstartParseWarning) |
class cLSTM(nn.Module):
def __init__(self, emodict, worddict, embedding, args):
super(cLSTM, self).__init__()
self.num_classes = emodict.n_words
self.embeddings = embedding
self.utt_cnn = CNNencoder(args.d_word_vec, 64, 100, [3, 4, 5])
self.dropout_in = nn.Dropout(0.3)
self.cont_lstm = nn.LSTM(100, 100, num_layers=1, bidirectional=False)
self.dropout_mid = nn.Dropout(0.3)
self.d_lin_2 = 100
self.classifier = nn.Linear(self.d_lin_2, self.num_classes)
def forward(self, sents, lengths):
if (len(sents.size()) < 2):
sents = sents.unsqueeze(0)
w_embed = self.embeddings(sents)
s_utt = self.utt_cnn(w_embed)
s_utt = self.dropout_in(s_utt)
s_cont = self.cont_lstm(s_utt.unsqueeze(1))[0].squeeze(1)
s_cont = self.dropout_mid(s_cont)
s_output = self.classifier(s_cont)
pred_s = F.log_softmax(s_output, dim=1)
return (pred_s, 0) |
class AttrVI_ATTR_TRIG_ID(EnumAttribute):
resources = [(constants.InterfaceType.gpib, 'INSTR'), (constants.InterfaceType.gpib, 'INTFC'), (constants.InterfaceType.pxi, 'INSTR'), (constants.InterfaceType.pxi, 'BACKPLANE'), (constants.InterfaceType.asrl, 'INSTR'), (constants.InterfaceType.tcpip, 'INSTR'), (constants.InterfaceType.vxi, 'BACKPLANE'), (constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'SERVANT')]
py_name = ''
visa_name = 'VI_ATTR_TRIG_ID'
visa_type = 'ViInt16'
default = constants.VI_TRIG_SW
(read, write, local) = (True, True, True)
enum_type = constants.TriggerID |
def annotate_streets(df, img, text_col):
if (not os.path.exists(FONT_PATH)):
print('Error loading default font. Check your FONT_PATH')
return None
unique_sts = df[text_col].unique()
for street in unique_sts:
draw_coords = df.loc[((df.ST_NAME == street), 'draw_coords')].tolist()[0]
coords = df.loc[((df.ST_NAME == street), 'coords')].tolist()[0]
font = ImageFont.truetype(FONT_PATH, int(25))
imgTxt = Image.new('L', font.getsize(street))
drawTxt = ImageDraw.Draw(imgTxt)
drawTxt.text((0, 0), street, font=font, fill=(10, 10, 12))
angle = angle_bw_points(coords[0], coords[1])
texrot = imgTxt.rotate(angle, expand=1)
mpt = midpoint(draw_coords[0], draw_coords[1])
img.paste(ImageOps.colorize(texrot, (0, 0, 0), (10, 10, 12)), mpt, texrot) |
class BalanceProofData():
def __init__(self, canonical_identifier):
self._canonical_identifier = canonical_identifier
self._pending_locks = make_empty_pending_locks_state()
self.properties = None
def update(self, amount, lock):
self._pending_locks = channel.compute_locks_with(self._pending_locks, lock)
assert self._pending_locks
if self.properties:
self.properties = factories.replace(self.properties, locked_amount=(self.properties.locked_amount + amount), locksroot=compute_locksroot(self._pending_locks), nonce=(self.properties.nonce + 1))
else:
self.properties = factories.BalanceProofProperties(transferred_amount=TokenAmount(0), locked_amount=amount, nonce=Nonce(1), locksroot=compute_locksroot(self._pending_locks), canonical_identifier=self._canonical_identifier) |
def _iter_num_atoms_for_radii(mol, min_radius, max_radius, start_atoms):
unique_atoms = set(start_atoms)
assert (len(start_atoms) == len(unique_atoms)), 'duplicate start atom'
ignore_atoms = set((a for a in start_atoms if (not is_heavy_atom(mol.GetAtomWithIdx(a)))))
(yield (len(unique_atoms) - len(ignore_atoms)))
border_atoms = unique_atoms.copy()
for radius in range(min_radius, max_radius):
new_atoms = set()
for atom in border_atoms:
for neighbor_atom_obj in mol.GetAtomWithIdx(atom).GetNeighbors():
neighbor_atom = neighbor_atom_obj.GetIdx()
if (neighbor_atom not in unique_atoms):
unique_atoms.add(neighbor_atom)
new_atoms.add(neighbor_atom)
if (not is_heavy_atom(neighbor_atom_obj)):
ignore_atoms.add(neighbor_atom)
border_atoms = new_atoms
(yield (len(unique_atoms) - len(ignore_atoms))) |
def train(model, dataloader, optimizer, criterion, epoch_number, max_gradient_norm):
model.train()
device = model.device
epoch_start = time.time()
batch_time_avg = 0.0
running_loss = 0.0
preds = []
golds = []
for (batch_index, batch) in enumerate(dataloader):
batch_start = time.time()
premises = batch['premise'].to(device)
premises_lengths = batch['premise_length'].to(device)
hypotheses = batch['hypothesis'].to(device)
hypotheses_lengths = batch['hypothesis_length'].to(device)
labels = batch['label'].to(device)
similarity = batch['similarity'].to(device)
optimizer.zero_grad()
logits = model(premises, premises_lengths, hypotheses, hypotheses_lengths, similarity)
loss = criterion(logits.squeeze(1), labels)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm)
optimizer.step()
batch_time_avg += (time.time() - batch_start)
running_loss += loss.item()
preds.extend(logits.squeeze(1).data.cpu().numpy())
golds.extend(labels.data.cpu().numpy())
p = pearsonr(preds, golds)
s = spearmanr(preds, golds)
epoch_time = (time.time() - epoch_start)
epoch_loss = (running_loss / len(dataloader))
return (epoch_time, epoch_loss, p[0], s[0]) |
def test_sub():
x = Bits(4, 5)
y = Bits(4, 4)
assert ((x - y) == 1)
assert ((x - Bits(4, 4)) == 1)
assert ((x - 4) == 1)
y = Bits(4, 5)
assert ((x - y) == 0)
assert ((x - 5) == 0)
y = Bits(4, 7)
assert ((x - y) == 14)
assert ((x - 7) == 14)
assert ((9 - x) == 4)
with pytest.raises(ValueError):
(x - (- 1))
with pytest.raises(ValueError):
(x - )
with pytest.raises(ValueError):
a = (Bits(4, 3) - Bits(3, 1)) |
def verify_module(fscache: FileSystemCache, id: str, path: str, prefix: str) -> bool:
if is_init_file(path):
path = os.path.dirname(path)
for i in range(id.count('.')):
path = os.path.dirname(path)
if (not any((fscache.isfile_case(os.path.join(path, f'__init__{extension}'), prefix) for extension in PYTHON_EXTENSIONS))):
return False
return True |
.parametrize('qubitop, state_binary', [((QubitOperator('Z0 Z1 Z2 Z3', (- 1.0)) + QubitOperator('X0 Y1 Y2 X3', 1.0)), '1100'), ((QubitOperator('X0 X3', (- 1.0)) + QubitOperator('Y1 Y2', 1.0)), '0000')])
def test_expectation_values_paulisum(qubitop, state_binary):
n_qubits = openfermion.count_qubits(qubitop)
state = numpy.zeros((2 ** n_qubits), dtype='complex64')
state[int(state_binary, 2)] = 1.0
qubit_map = {cirq.LineQubit(i): i for i in range(n_qubits)}
pauli_str = qubit_operator_to_pauli_sum(qubitop, list(qubit_map.keys()))
op_mat = openfermion.get_sparse_operator(qubitop, n_qubits)
expct_qop = openfermion.expectation(op_mat, state)
expct_pauli = pauli_str.expectation_from_state_vector(state, qubit_map)
numpy.testing.assert_allclose(expct_qop, expct_pauli) |
class AdminRecord(models.Model):
record_modes = (('ssh', 'ssh'), ('guacamole', 'guacamole'))
admin_login_user = models.ForeignKey('users.UserProfile', verbose_name='', on_delete=models.CASCADE)
admin_server = models.CharField(max_length=32, verbose_name='')
admin_remote_ip = models.GenericIPAddressField(verbose_name='')
admin_start_time = models.CharField(max_length=64, verbose_name='')
admin_login_status_time = models.CharField(max_length=16, verbose_name='')
admin_record_file = models.CharField(max_length=256, verbose_name='')
admin_record_mode = models.CharField(max_length=10, choices=record_modes, verbose_name='', default='ssh')
admin_record_cmds = models.TextField(verbose_name='', default='')
class Meta():
db_table = 'ops_admin_record'
verbose_name = ''
verbose_name_plural = '' |
class Event():
def __init__(self, should_lock: bool=False) -> None:
self._items: list[Callable[(..., Any)]] = []
self._should_lock = should_lock
self._event = threading.Event()
def set(self, *args: Any, **kwargs: Any) -> bool:
def execute():
for func in self._items:
try:
if (len(inspect.signature(func).parameters.values()) == 0):
value = func()
else:
value = func(*args, **kwargs)
return_values.add(value)
except Exception as e:
logger.exception(e)
if self._should_lock:
semaphore.release()
semaphore = threading.Semaphore(0)
return_values: set[Any] = set()
if len(self._items):
t = threading.Thread(target=execute)
t.start()
if self._should_lock:
semaphore.acquire()
false_values = [v for v in return_values if (v is False)]
self._event.set()
return (len(false_values) != 0)
def is_set(self) -> bool:
return self._event.is_set()
def wait(self, timeout: float=0) -> bool:
return self._event.wait(timeout)
def clear(self) -> None:
return self._event.clear()
def __add__(self, item: Callable[(..., Any)]) -> Self:
self._items.append(item)
return self
def __sub__(self, item: Callable[(..., Any)]) -> Self:
self._items.remove(item)
return self
def __iadd__(self, item: Callable[(..., Any)]) -> Self:
self._items.append(item)
return self
def __isub__(self, item: Callable[(..., Any)]) -> Self:
self._items.remove(item)
return self |
.parametrize(('locations_to_collect', 'exists'), [((0,), ()), ((0,), (0,)), ((0, 1), ()), ((0, 1), (0,)), ((0, 1), (0, 1))])
def test_collect_locations_other(flask_app, two_player_session, echoes_resource_database, locations_to_collect: tuple[(int, ...)], exists: tuple[(int, ...)], mocker: pytest_mock.MockerFixture):
mock_get_pickup_target = mocker.patch('randovania.server.multiplayer.world_api._get_pickup_target', autospec=True)
mock_add_pickup_to_inventory = mocker.patch('randovania.server.multiplayer.world_api._add_pickup_to_inventory', autospec=True, return_value=b'bar')
mock_session_description: PropertyMock = mocker.patch('randovania.server.database.MultiplayerSession.layout_description', new_callable=PropertyMock)
mock_emit_session_update = mocker.patch('randovania.server.multiplayer.session_common.emit_session_actions_update', autospec=True)
sa = MagicMock()
sa.get_current_user.return_value = database.User.get_by_id(1234)
mock_get_pickup_target.return_value = PickupTarget(MagicMock(), 1)
w1 = database.World.get_by_id(1)
w2 = database.World.get_by_id(2)
assoc = database.WorldUserAssociation.get_by_instances(world=w2, user=1235)
assoc.inventory = b'boo'
assoc.save()
for existing_id in exists:
database.WorldAction.create(provider=w1, location=existing_id, session=two_player_session, receiver=w2)
with flask_app.test_request_context():
result = world_api.collect_locations(sa, w1, locations_to_collect)
mock_get_pickup_target.assert_has_calls([call(mock_session_description.return_value, 0, location) for location in locations_to_collect])
for location in locations_to_collect:
database.WorldAction.get(provider=w1, location=location)
new_locs = [loc for loc in locations_to_collect if (loc not in exists)]
mock_add_pickup_to_inventory.assert_has_calls([call(inv, mock_get_pickup_target.return_value.pickup, mock_session_description.return_value.get_preset.return_value.game) for (inv, _) in zip([b'boo', b'bar'], new_locs, strict=False)])
mock_emit_session_update.assert_not_called()
if (exists == locations_to_collect):
assert (result == set())
else:
assert (result == {w2}) |
def compute_predictions_logits(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer):
if output_prediction_file:
logger.info(f'Writing predictions to: {output_prediction_file}')
if output_nbest_file:
logger.info(f'Writing nbest to: {output_nbest_file}')
if (output_null_log_odds_file and version_2_with_negative):
logger.info(f'Writing null_log_odds to: {output_null_log_odds_file}')
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit'])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
min_null_feature_index = 0
null_start_logit = 0
null_end_logit = 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2_with_negative:
feature_null_score = (result.start_logits[0] + result.end_logits[0])
if (feature_null_score < score_null):
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
if (start_index >= len(feature.tokens)):
continue
if (end_index >= len(feature.tokens)):
continue
if (start_index not in feature.token_to_orig_map):
continue
if (end_index not in feature.token_to_orig_map):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True)
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit'])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
if (pred.start_index > 0):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
if version_2_with_negative:
if ('' not in seen_predictions):
nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit))
if (len(nbest) == 1):
nbest.insert(0, _NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
if (not nbest):
nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
if (len(nbest) < 1):
raise ValueError('No valid predictions')
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_logit + entry.end_logit))
if (not best_non_null_entry):
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
if (len(nbest_json) < 1):
raise ValueError('No valid predictions')
if (not version_2_with_negative):
all_predictions[example.qas_id] = nbest_json[0]['text']
else:
score_diff = ((score_null - best_non_null_entry.start_logit) - best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if (score_diff > null_score_diff_threshold):
all_predictions[example.qas_id] = ''
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
if output_prediction_file:
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
if output_nbest_file:
with open(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if (output_null_log_odds_file and version_2_with_negative):
with open(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return all_predictions |
def test_read_write_mem(cmdline_opts):
rgen = random.Random()
rgen.seed()
data = [rgen.randrange((- (2 ** 31)), (2 ** 31)) for _ in range(20)]
data_bytes = struct.pack('<{}i'.format(len(data)), *data)
msgs = []
for (i, item) in enumerate(data):
msgs.extend([req('rd', 1, (4096 + (4 * i)), 0, 0), resp('rd', 1, 0, item)])
th = TestHarness(MagicMemoryRTL, 2, ([(req_cls, resp_cls)] * 2), [msgs[::2], []], [msgs[1::2], []], 0, 0, 0, 0, 0, 0)
th.elaborate()
th.mem.write_mem(4096, data_bytes)
run_sim(th)
result_bytes = th.mem.read_mem(4096, len(data_bytes))
result = list(struct.unpack('<{}i'.format(len(data)), result_bytes))
assert (result == data) |
def get_parser():
parser = argparse.ArgumentParser('Prints a table from metrics files\n')
parser.add_argument('--config', '-c', type=str, default='config.csv', help='Path to the config csv with `name` and `path` columns. `name` is a model name, and `path` is a path to metrics file`')
parser.add_argument('--extension', '-e', type=str, choices=['html', 'latex', 'csv'], default='csv', help='Format of a table')
parser.add_argument('--output', '-o', type=str, default='output.csv', help='Path to the output table')
parser.add_argument('--precision', '-p', type=int, default=4, help='Precision in final table')
return parser |
class ConvNeXtBlock(nn.Module):
def __init__(self, in_chs, out_chs=None, kernel_size=7, stride=1, dilation=1, mlp_ratio=4, conv_mlp=False, conv_bias=True, ls_init_value=1e-06, act_layer='gelu', norm_layer=None, drop_path=0.0):
super().__init__()
out_chs = (out_chs or in_chs)
act_layer = get_act_layer(act_layer)
if (not norm_layer):
norm_layer = (LayerNorm2d if conv_mlp else LayerNorm)
mlp_layer = (ConvMlp if conv_mlp else Mlp)
self.use_conv_mlp = conv_mlp
self.conv_dw = create_conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, depthwise=True, bias=conv_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int((mlp_ratio * out_chs)), act_layer=act_layer)
self.gamma = (nn.Parameter((ls_init_value * torch.ones(out_chs))) if (ls_init_value > 0) else None)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if (self.gamma is not None):
x = x.mul(self.gamma.reshape(1, (- 1), 1, 1))
x = (self.drop_path(x) + shortcut)
return x |
def parse_wheel_filename(filename: str) -> Tuple[(NormalizedName, Version, BuildTag, FrozenSet[Tag])]:
if (not filename.endswith('.whl')):
raise InvalidWheelFilename(f"Invalid wheel filename (extension must be '.whl'): {filename}")
filename = filename[:(- 4)]
dashes = filename.count('-')
if (dashes not in (4, 5)):
raise InvalidWheelFilename(f'Invalid wheel filename (wrong number of parts): {filename}')
parts = filename.split('-', (dashes - 2))
name_part = parts[0]
if (('__' in name_part) or (re.match('^[\\w\\d._]*$', name_part, re.UNICODE) is None)):
raise InvalidWheelFilename(f'Invalid project name: {filename}')
name = canonicalize_name(name_part)
version = Version(parts[1])
if (dashes == 5):
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if (build_match is None):
raise InvalidWheelFilename(f"Invalid build number: {build_part} in '{filename}'")
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[(- 1)])
return (name, version, build, tags) |
def test_invert_error_at():
phys_err = 0.001
budgets = np.logspace((- 1), (- 18))
for budget in budgets:
d = qecs.FowlerSuperconductingQubits.code_distance_from_budget(physical_error_rate=phys_err, budget=budget)
assert ((d % 2) == 1)
assert (qecs.FowlerSuperconductingQubits.logical_error_rate(physical_error_rate=phys_err, code_distance=d) <= budget)
if (d > 3):
assert (qecs.FowlerSuperconductingQubits.logical_error_rate(physical_error_rate=phys_err, code_distance=(d - 2)) > budget) |
def main(args):
assert ((len(args) == 3) and isinstance(args[1], str) and isinstance(args[2], str))
dataset_name = args[1]
model_name = args[2]
tf.set_random_seed(1234)
coord_add = get_coord_add(dataset_name)
dataset_size_train = get_dataset_size_train(dataset_name)
dataset_size_test = get_dataset_size_test(dataset_name)
num_classes = get_num_classes(dataset_name)
create_inputs = get_create_inputs(dataset_name, is_train=False, epochs=cfg.epoch)
with tf.Graph().as_default():
num_batches_per_epoch_train = int((dataset_size_train / cfg.batch_size))
num_batches_test = 2
(batch_x, batch_labels) = create_inputs()
batch_squash = tf.divide(batch_x, 255.0)
batch_x_norm = slim.batch_norm(batch_x, center=False, is_training=False, trainable=False)
(output, pose_out) = net.build_arch(batch_x_norm, coord_add, is_train=False, num_classes=num_classes)
tf.logging.debug(pose_out.get_shape())
batch_acc = net.test_accuracy(output, batch_labels)
m_op = tf.constant(0.9)
(loss, spread_loss, mse, recon_img_squash) = net.spread_loss(output, pose_out, batch_squash, batch_labels, m_op)
tf.summary.scalar('spread_loss', spread_loss)
tf.summary.scalar('reconstruction_loss', mse)
tf.summary.scalar('all_loss', loss)
data_size = int(batch_x.get_shape()[1])
recon_img = tf.multiply(tf.reshape(recon_img_squash, shape=[cfg.batch_size, data_size, data_size, 1]), 255.0)
orig_img = tf.reshape(batch_x, shape=[cfg.batch_size, data_size, data_size, 1])
tf.summary.image('orig_image', orig_img)
tf.summary.image('recon_image', recon_img)
saver = tf.train.Saver()
step = 0
tf.summary.scalar('accuracy', batch_acc)
summary_op = tf.summary.merge_all()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if (not os.path.exists((cfg.test_logdir + '/{}/{}/'.format(model_name, dataset_name)))):
os.makedirs((cfg.test_logdir + '/{}/{}/'.format(model_name, dataset_name)))
summary_writer = tf.summary.FileWriter((cfg.test_logdir + '/{}/{}/'.format(model_name, dataset_name)), graph=sess.graph)
files = os.listdir((cfg.logdir + '/{}/{}/'.format(model_name, dataset_name)))
for epoch in range(45, 46):
ckpt_re = ('.ckpt-%d' % (num_batches_per_epoch_train * epoch))
for __file in files:
if __file.endswith((ckpt_re + '.index')):
ckpt = os.path.join((cfg.logdir + '/{}/{}/'.format(model_name, dataset_name)), __file[:(- 6)])
saver.restore(sess, ckpt)
accuracy_sum = 0
for i in range(num_batches_test):
(batch_acc_v, summary_str, orig_image, recon_image) = sess.run([batch_acc, summary_op, orig_img, recon_img])
print(('%d batches are tested.' % step))
summary_writer.add_summary(summary_str, step)
accuracy_sum += batch_acc_v
step += 1
plot_imgs(orig_image, i, 'ori')
plot_imgs(recon_image, i, 'rec')
ave_acc = (accuracy_sum / num_batches_test)
print(('the average accuracy is %f' % ave_acc)) |
class ZGate(Bloq):
_property
def signature(self) -> 'Signature':
return Signature.build(q=1)
def short_name(self) -> 'str':
return 'Z'
def decompose_bloq(self) -> CompositeBloq:
raise DecomposeTypeError(f'{self} is atomic')
def add_my_tensors(self, tn: qtn.TensorNetwork, tag: Any, *, incoming: Dict[(str, SoquetT)], outgoing: Dict[(str, SoquetT)]):
tn.add(qtn.Tensor(data=_PAULIZ, inds=(outgoing['q'], incoming['q']), tags=[self.short_name(), tag]))
def as_cirq_op(self, qubit_manager: 'cirq.QubitManager', q: 'CirqQuregT') -> Tuple[('cirq.Operation', Dict[(str, 'CirqQuregT')])]:
import cirq
(q,) = q
return (cirq.Z(q), {'q': [q]}) |
class Logger(object):
def __init__(self, args):
self.args = args
self.save_dir = args.log_dir
self.is_primary = is_primary()
if self.is_primary:
os.makedirs(self.save_dir, exist_ok=True)
self.config_dir = os.path.join(self.save_dir, 'configs')
os.makedirs(self.config_dir, exist_ok=True)
file_name = os.path.join(self.config_dir, 'args.txt')
write_args(args, file_name)
log_dir = os.path.join(self.save_dir, 'logs')
if (not os.path.exists(log_dir)):
os.makedirs(log_dir, exist_ok=True)
self.text_writer = open(os.path.join(log_dir, 'log.txt'), 'a')
if args.tensorboard:
self.log_info('using tensorboard')
self.tb_writer = torch.utils.tensorboard.SummaryWriter(log_dir=log_dir)
else:
self.tb_writer = None
def save_config(self, config):
if self.is_primary:
save_config_to_yaml(config, os.path.join(self.config_dir, 'config.yaml'))
def log_info(self, info, check_primary=True):
if (self.is_primary or (not check_primary)):
print(info)
if self.is_primary:
info = str(info)
time_str = time.strftime('%Y-%m-%d-%H-%M')
info = '{}: {}'.format(time_str, info)
if (not info.endswith('\n')):
info += '\n'
self.text_writer.write(info)
self.text_writer.flush()
def add_scalar(self, **kargs):
if self.is_primary:
if (self.tb_writer is not None):
self.tb_writer.add_scalar(**kargs)
def add_scalars(self, **kargs):
if self.is_primary:
if (self.tb_writer is not None):
self.tb_writer.add_scalars(**kargs)
def add_image(self, **kargs):
if self.is_primary:
if (self.tb_writer is not None):
self.tb_writer.add_image(**kargs)
def add_images(self, **kargs):
if self.is_primary:
if (self.tb_writer is not None):
self.tb_writer.add_images(**kargs)
def close(self):
if self.is_primary:
self.text_writer.close()
self.tb_writer.close() |
class ArcCos(UnaryScalarOp):
nfunc_spec = ('arccos', 1, 1)
def impl(self, x):
x_dtype = str(getattr(x, 'dtype', ''))
if (x_dtype in ('int8', 'uint8')):
return np.arccos(x, dtype=np.float32)
return np.arccos(x)
def L_op(self, inputs, outputs, gout):
(x,) = inputs
(gz,) = gout
if (gz.type in complex_types):
raise NotImplementedError()
if (outputs[0].type in discrete_types):
if (x.type in discrete_types):
return [x.zeros_like(dtype=config.floatX)]
else:
return [x.zeros_like()]
return (((- gz) / sqrt((np.cast[x.type](1) - sqr(x)))),)
def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
(z,) = outputs
if (node.inputs[0].type in complex_types):
raise NotImplementedError('type not supported', type)
cast = node.outputs[0].type.dtype_specs()[1]
return f'{z} = acos(({cast}){x});' |
def calculate_sentence_transformer_embedding(examples, embedding_model, mean_normal=False):
if (not args.add_prompt):
text_to_encode = [raw_item['text'] for raw_item in examples]
else:
text_to_encode = [['Represent the civil comment; Input: ', raw_item['text'], 0] for raw_item in examples]
num = len(text_to_encode)
emb_model = INSTRUCTOR(embedding_model)
embeddings = []
bar = tqdm(range(0, num, 20), desc='calculate embeddings')
for i in range(0, num, 20):
embeddings += emb_model.encode(text_to_encode[i:(i + 20)]).tolist()
bar.update(1)
embeddings = torch.tensor(embeddings)
if mean_normal:
mean_embeddings = torch.mean(embeddings, 0, True)
embeddings = (embeddings - mean_embeddings)
return embeddings |
class webvision_dataloader():
def __init__(self, batch_size, num_class, num_workers, root_dir, log):
self.batch_size = batch_size
self.num_class = num_class
self.num_workers = num_workers
self.root_dir = root_dir
self.log = log
self.transform_train = transforms.Compose([transforms.Resize(320), transforms.RandomResizedCrop(299), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_test = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_imagenet = transforms.Compose([transforms.Resize(320), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def run(self, mode, pred=[], prob=[]):
if (mode == 'warmup'):
all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='all', num_class=self.num_class)
trainloader = DataLoader(dataset=all_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return trainloader
elif (mode == 'train'):
labeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='labeled', num_class=self.num_class, pred=pred, probability=prob, log=self.log)
labeled_trainloader = DataLoader(dataset=labeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
unlabeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='unlabeled', num_class=self.num_class, pred=pred, log=self.log)
unlabeled_trainloader = DataLoader(dataset=unlabeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return (labeled_trainloader, unlabeled_trainloader)
elif (mode == 'test'):
test_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='test', num_class=self.num_class)
test_loader = DataLoader(dataset=test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
return test_loader
elif (mode == 'eval_train'):
eval_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='all', num_class=self.num_class)
eval_loader = DataLoader(dataset=eval_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
return eval_loader
elif (mode == 'imagenet'):
imagenet_val = imagenet_dataset(root_dir=self.root_dir, transform=self.transform_imagenet, num_class=self.num_class)
imagenet_loader = DataLoader(dataset=imagenet_val, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
return imagenet_loader |
class _NodeTest(unittest.TestCase):
CODE = ''
def astroid(self) -> Module:
try:
return self.__class__.__dict__['CODE_Astroid']
except KeyError:
module = builder.parse(self.CODE)
self.__class__.CODE_Astroid = module
return module |
def _parseASN1PrivateKey(s):
s = ASN1_Node(s)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if (version != 0):
raise SyntaxError('Unrecognized RSAPrivateKey version')
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return list(map((lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER'))), [n, e, d, p, q, dP, dQ, qInv])) |
def test_basic_push_by_manifest_digest(manifest_protocol, basic_images, liveserver_session, app_reloader):
credentials = ('devtable', 'password')
options = ProtocolOptions()
options.push_by_manifest_digest = True
result = manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, credentials=credentials, options=options)
expected_failure = None
digests = [str(manifest.digest) for manifest in list(result.manifests.values())]
manifest_protocol.pull(liveserver_session, 'devtable', 'newrepo', digests, basic_images, credentials=credentials, expected_failure=expected_failure) |
class ValidatorTestMixin(MetaSchemaTestsMixin):
def test_it_implements_the_validator_protocol(self):
self.assertIsInstance(self.Validator({}), protocols.Validator)
def test_valid_instances_are_valid(self):
(schema, instance) = self.valid
self.assertTrue(self.Validator(schema).is_valid(instance))
def test_invalid_instances_are_not_valid(self):
(schema, instance) = self.invalid
self.assertFalse(self.Validator(schema).is_valid(instance))
def test_non_existent_properties_are_ignored(self):
self.Validator({object(): object()}).validate(instance=object())
def test_evolve(self):
(schema, format_checker) = ({'type': 'integer'}, FormatChecker())
original = self.Validator(schema, format_checker=format_checker)
new = original.evolve(schema={'type': 'string'}, format_checker=self.Validator.FORMAT_CHECKER)
expected = self.Validator({'type': 'string'}, format_checker=self.Validator.FORMAT_CHECKER, _resolver=new._resolver)
self.assertEqual(new, expected)
self.assertNotEqual(new, original)
def test_evolve_with_subclass(self):
with self.assertWarns(DeprecationWarning):
class OhNo(self.Validator):
foo = field(factory=(lambda : [1, 2, 3]))
_bar = field(default=37)
validator = OhNo({}, bar=12)
self.assertEqual(validator.foo, [1, 2, 3])
new = validator.evolve(schema={'type': 'integer'})
self.assertEqual(new.foo, [1, 2, 3])
self.assertEqual(new._bar, 12)
def test_is_type_is_true_for_valid_type(self):
self.assertTrue(self.Validator({}).is_type('foo', 'string'))
def test_is_type_is_false_for_invalid_type(self):
self.assertFalse(self.Validator({}).is_type('foo', 'array'))
def test_is_type_evades_bool_inheriting_from_int(self):
self.assertFalse(self.Validator({}).is_type(True, 'integer'))
self.assertFalse(self.Validator({}).is_type(True, 'number'))
def test_it_can_validate_with_decimals(self):
schema = {'items': {'type': 'number'}}
Validator = validators.extend(self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine('number', (lambda checker, thing: (isinstance(thing, (int, float, Decimal)) and (not isinstance(thing, bool))))))
validator = Validator(schema)
validator.validate([1, 1.1, (Decimal(1) / Decimal(8))])
invalid = ['foo', {}, [], True, None]
self.assertEqual([error.instance for error in validator.iter_errors(invalid)], invalid)
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.Validator({'format': 'carrot'}, format_checker=FormatChecker())
validator.validate('bugs')
def test_it_does_not_validate_formats_by_default(self):
validator = self.Validator({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = FormatChecker()
bad = ValueError('Bad!')
('foo', raises=ValueError)
def check(value):
if (value == 'good'):
return True
elif (value == 'bad'):
raise bad
else:
self.fail(f"What is {value}? [Baby Don't Hurt Me]")
validator = self.Validator({'format': 'foo'}, format_checker=checker)
validator.validate('good')
with self.assertRaises(exceptions.ValidationError) as cm:
validator.validate('bad')
self.assertIs(cm.exception.cause, bad)
def test_non_string_custom_type(self):
non_string_type = object()
schema = {'type': [non_string_type]}
Crazy = validators.extend(self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine(non_string_type, (lambda checker, thing: isinstance(thing, int))))
Crazy(schema).validate(15)
def test_it_properly_formats_tuples_in_errors(self):
TupleValidator = validators.extend(self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine('array', (lambda checker, thing: isinstance(thing, tuple))))
with self.assertRaises(exceptions.ValidationError) as e:
TupleValidator({'uniqueItems': True}).validate((1, 1))
self.assertIn('(1, 1) has non-unique elements', str(e.exception))
def test_check_redefined_sequence(self):
schema = {'type': 'array', 'uniqueItems': True}
MyMapping = namedtuple('MyMapping', 'a, b')
Validator = validators.extend(self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine_many({'array': (lambda checker, thing: isinstance(thing, (list, deque))), 'object': (lambda checker, thing: isinstance(thing, (dict, MyMapping)))}))
validator = Validator(schema)
valid_instances = [deque(['a', None, '1', '', True]), deque([[False], [0]]), [deque([False]), deque([0])], [[deque([False])], [deque([0])]], [[[[[deque([False])]]]], [[[[deque([0])]]]]], [deque([deque([False])]), deque([deque([0])])], [MyMapping('a', 0), MyMapping('a', False)], [MyMapping('a', [deque([0])]), MyMapping('a', [deque([False])])], [MyMapping('a', [MyMapping('a', deque([0]))]), MyMapping('a', [MyMapping('a', deque([False]))])], [deque(deque(deque([False]))), deque(deque(deque([0])))]]
for instance in valid_instances:
validator.validate(instance)
invalid_instances = [deque(['a', 'b', 'a']), deque([[False], [False]]), [deque([False]), deque([False])], [[deque([False])], [deque([False])]], [[[[[deque([False])]]]], [[[[deque([False])]]]]], [deque([deque([False])]), deque([deque([False])])], [MyMapping('a', False), MyMapping('a', False)], [MyMapping('a', [deque([False])]), MyMapping('a', [deque([False])])], [MyMapping('a', [MyMapping('a', deque([False]))]), MyMapping('a', [MyMapping('a', deque([False]))])], [deque(deque(deque([False]))), deque(deque(deque([False])))]]
for instance in invalid_instances:
with self.assertRaises(exceptions.ValidationError):
validator.validate(instance)
def test_it_creates_a_ref_resolver_if_not_provided(self):
with self.assertWarns(DeprecationWarning):
resolver = self.Validator({}).resolver
self.assertIsInstance(resolver, validators._RefResolver)
def test_it_upconverts_from_deprecated_RefResolvers(self):
(ref, schema) = ('someCoolRef', {'type': 'integer'})
resolver = validators._RefResolver('', {}, store={ref: schema})
validator = self.Validator({'$ref': ref}, resolver=resolver)
with self.assertRaises(exceptions.ValidationError):
validator.validate(None)
def test_it_upconverts_from_yet_older_deprecated_legacy_RefResolvers(self):
class LegacyRefResolver():
def resolving(this, ref):
self.assertEqual(ref, 'the ref')
(yield {'type': 'integer'})
resolver = LegacyRefResolver()
schema = {'$ref': 'the ref'}
with self.assertRaises(exceptions.ValidationError):
self.Validator(schema, resolver=resolver).validate(None) |
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, datas, images, split):
self.split = split
self.dataset = args.dataset
self.data_path = args.data_path
self.datas = datas
self.images = images
print(' {} set has {} datas'.format(split, len(self.datas)))
def __len__(self):
return len(self.datas)
def __getitem__(self, index):
data = self.datas[index]
batch = {}
img1 = torch.from_numpy(self.images[data['img1']]).transpose(0, 1)
img2 = torch.from_numpy(self.images[data['img2']]).transpose(0, 1)
batch['img1'] = img1
batch['img2'] = img2
batch['class1'] = torch.LongTensor([data['class_id1']])
batch['class2'] = torch.LongTensor([data['class_id2']])
batch['label'] = torch.LongTensor([data['label']])
return batch |
def blackbox(blackbox):
if (tuple(sorted(blackbox.output_indices)) != blackbox.output_indices):
raise ValueError('Output indices {} must be ordered'.format(blackbox.output_indices))
partition(blackbox.partition)
for part in blackbox.partition:
if (not (set(part) & set(blackbox.output_indices))):
raise ValueError('Every blackbox must have an output - {} does not'.format(part)) |
class Discard(ScrimsButton):
def __init__(self, ctx: Context, label='Back', row: int=None):
super().__init__(style=discord.ButtonStyle.red, label=label, row=row)
self.ctx = ctx
async def callback(self, interaction: Interaction):
(await interaction.response.defer())
from .main import ScrimsMain as SM
self.view.stop()
v = SM(self.ctx)
v.message = (await self.view.message.edit(embed=(await v.initial_embed()), view=v)) |
def test_load_successful_with_invalid_distribution(caplog: LogCaptureFixture, mocker: MockerFixture, env: MockEnv, tmp_path: Path) -> None:
invalid_dist_info = ((tmp_path / 'site-packages') / 'invalid-0.1.0.dist-info')
invalid_dist_info.mkdir(parents=True)
mocker.patch('poetry.utils._compat.metadata.Distribution.discover', return_value=[*INSTALLED_RESULTS, metadata.PathDistribution(invalid_dist_info)])
repository_with_invalid_distribution = InstalledRepository.load(env)
assert (len(repository_with_invalid_distribution.packages) == len(INSTALLED_RESULTS))
assert (len(caplog.messages) == 1)
message = caplog.messages[0]
assert message.startswith('Project environment contains an invalid distribution')
assert (str(invalid_dist_info) in message) |
class ImageData(AbstractImage):
_swap1_pattern = re.compile(asbytes('(.)'), re.DOTALL)
_swap2_pattern = re.compile(asbytes('(.)(.)'), re.DOTALL)
_swap3_pattern = re.compile(asbytes('(.)(.)(.)'), re.DOTALL)
_swap4_pattern = re.compile(asbytes('(.)(.)(.)(.)'), re.DOTALL)
_current_texture = None
_current_mipmap_texture = None
def __init__(self, width, height, fmt, data, pitch=None):
super().__init__(width, height)
self._current_format = self._desired_format = fmt.upper()
self._current_data = data
self.pitch = (pitch or (width * len(fmt)))
self._current_pitch = self.pitch
self.mipmap_images = []
def __getstate__(self):
return {'width': self.width, 'height': self.height, '_current_data': self.get_data(self._current_format, self._current_pitch), '_current_format': self._current_format, '_desired_format': self._desired_format, '_current_pitch': self._current_pitch, 'pitch': self.pitch, 'mipmap_images': self.mipmap_images}
def get_image_data(self):
return self
def format(self):
return self._desired_format
def format(self, fmt):
self._desired_format = fmt.upper()
self._current_texture = None
def get_data(self, fmt=None, pitch=None):
fmt = (fmt or self._desired_format)
pitch = (pitch or self._current_pitch)
if ((fmt == self._current_format) and (pitch == self._current_pitch)):
return self._current_data
return self._convert(fmt, pitch)
def set_data(self, fmt, pitch, data):
self._current_format = fmt
self._current_pitch = pitch
self._current_data = data
self._current_texture = None
self._current_mipmap_texture = None
def set_mipmap_image(self, level, image):
if (level == 0):
raise ImageException('Cannot set mipmap image at level 0 (it is this image)')
(width, height) = (self.width, self.height)
for i in range(level):
width >>= 1
height >>= 1
if ((width != image.width) or (height != image.height)):
raise ImageException(('Mipmap image has wrong dimensions for level %d' % level))
self.mipmap_images += ([None] * (level - len(self.mipmap_images)))
self.mipmap_images[(level - 1)] = image
def create_texture(self, cls, rectangle=False):
internalformat = self._get_internalformat(self._desired_format)
texture = cls.create(self.width, self.height, GL_TEXTURE_2D, internalformat, False, blank_data=False)
if (self.anchor_x or self.anchor_y):
texture.anchor_x = self.anchor_x
texture.anchor_y = self.anchor_y
self.blit_to_texture(texture.target, texture.level, self.anchor_x, self.anchor_y, 0, None)
return texture
def get_texture(self, rectangle=False):
if (not self._current_texture):
self._current_texture = self.create_texture(Texture, rectangle)
return self._current_texture
def get_mipmapped_texture(self):
if self._current_mipmap_texture:
return self._current_mipmap_texture
texture = Texture.create(self.width, self.height, GL_TEXTURE_2D, None, blank_data=False)
if (self.anchor_x or self.anchor_y):
texture.anchor_x = self.anchor_x
texture.anchor_y = self.anchor_y
internalformat = self._get_internalformat(self.format)
glBindTexture(texture.target, texture.id)
glTexParameteri(texture.target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
if self.mipmap_images:
self.blit_to_texture(texture.target, texture.level, self.anchor_x, self.anchor_y, 0, internalformat)
level = 0
for image in self.mipmap_images:
level += 1
if image:
image.blit_to_texture(texture.target, level, self.anchor_x, self.anchor_y, 0, internalformat)
else:
glGenerateMipmap(texture.target)
self.blit_to_texture(texture.target, texture.level, self.anchor_x, self.anchor_y, 0, internalformat)
self._current_mipmap_texture = texture
return texture
def get_region(self, x, y, width, height):
return ImageDataRegion(x, y, width, height, self)
def blit(self, x, y, z=0, width=None, height=None):
self.get_texture().blit(x, y, z, width, height)
def blit_to_texture(self, target, level, x, y, z, internalformat=None):
x -= self.anchor_x
y -= self.anchor_y
data_format = self.format
data_pitch = abs(self._current_pitch)
(fmt, gl_type) = self._get_gl_format_and_type(data_format)
if (fmt is None):
data_format = {1: 'R', 2: 'RG', 3: 'RGB', 4: 'RGBA'}.get(len(data_format))
(fmt, gl_type) = self._get_gl_format_and_type(data_format)
data = self._convert(data_format, data_pitch)
if (data_pitch & 1):
align = 1
elif (data_pitch & 2):
align = 2
else:
align = 4
row_length = (data_pitch // len(data_format))
glPixelStorei(GL_UNPACK_ALIGNMENT, align)
glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length)
self._apply_region_unpack()
if ((target == GL_TEXTURE_3D) or (target == GL_TEXTURE_2D_ARRAY)):
assert (not internalformat)
glTexSubImage3D(target, level, x, y, z, self.width, self.height, 1, fmt, gl_type, data)
elif internalformat:
glTexImage2D(target, level, internalformat, self.width, self.height, 0, fmt, gl_type, data)
else:
glTexSubImage2D(target, level, x, y, self.width, self.height, fmt, gl_type, data)
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0)
self._default_region_unpack()
glFlush()
def _apply_region_unpack(self):
pass
def _default_region_unpack(self):
pass
def _convert(self, fmt, pitch):
if ((fmt == self._current_format) and (pitch == self._current_pitch)):
if (type(self._current_data) is str):
return asbytes(self._current_data)
return self._current_data
self._ensure_bytes()
data = self._current_data
current_pitch = self._current_pitch
current_format = self._current_format
sign_pitch = (current_pitch // abs(current_pitch))
if (fmt != self._current_format):
repl = asbytes('')
for c in fmt:
try:
idx = (current_format.index(c) + 1)
except ValueError:
idx = 1
repl += asbytes(('\\%d' % idx))
if (len(current_format) == 1):
swap_pattern = self._swap1_pattern
elif (len(current_format) == 2):
swap_pattern = self._swap2_pattern
elif (len(current_format) == 3):
swap_pattern = self._swap3_pattern
elif (len(current_format) == 4):
swap_pattern = self._swap4_pattern
else:
raise ImageException('Current image format is wider than 32 bits.')
packed_pitch = (self.width * len(current_format))
if (abs(self._current_pitch) != packed_pitch):
new_pitch = abs(self._current_pitch)
rows = [data[i:(i + new_pitch)] for i in range(0, len(data), new_pitch)]
rows = [swap_pattern.sub(repl, r[:packed_pitch]) for r in rows]
data = asbytes('').join(rows)
else:
data = swap_pattern.sub(repl, data)
current_pitch = (sign_pitch * (len(fmt) * self.width))
if (pitch != current_pitch):
diff = (abs(current_pitch) - abs(pitch))
if (diff > 0):
new_pitch = abs(pitch)
rows = [data[i:((i + new_pitch) - diff)] for i in range(0, len(data), new_pitch)]
data = asbytes('').join(rows)
elif (diff < 0):
new_pitch = abs(current_pitch)
padding = (asbytes(1) * (- diff))
rows = [(data[i:(i + new_pitch)] + padding) for i in range(0, len(data), new_pitch)]
data = asbytes('').join(rows)
if ((current_pitch * pitch) < 0):
new_pitch = abs(pitch)
rows = [data[i:(i + new_pitch)] for i in range(0, len(data), new_pitch)]
rows.reverse()
data = asbytes('').join(rows)
return asbytes(data)
def _ensure_bytes(self):
if (type(self._current_data) is not bytes):
self._current_data = asbytes(self._current_data)
def _get_gl_format_and_type(fmt):
if (fmt == 'R'):
return (GL_RED, GL_UNSIGNED_BYTE)
elif (fmt == 'RG'):
return (GL_RG, GL_UNSIGNED_BYTE)
elif (fmt == 'RGB'):
return (GL_RGB, GL_UNSIGNED_BYTE)
elif (fmt == 'BGR'):
return (GL_BGR, GL_UNSIGNED_BYTE)
elif (fmt == 'RGBA'):
return (GL_RGBA, GL_UNSIGNED_BYTE)
elif (fmt == 'BGRA'):
return (GL_BGRA, GL_UNSIGNED_BYTE)
elif (fmt == 'L'):
return (GL_LUMINANCE, GL_UNSIGNED_BYTE)
elif (fmt == 'A'):
return (GL_ALPHA, GL_UNSIGNED_BYTE)
return (None, None)
def _get_internalformat(fmt):
if (fmt == 'R'):
return GL_RED
elif (fmt == 'RG'):
return GL_RG
elif (fmt == 'RGB'):
return GL_RGB
elif (fmt == 'RGBA'):
return GL_RGBA
elif (fmt == 'D'):
return GL_DEPTH_COMPONENT
elif (fmt == 'DS'):
return GL_DEPTH_STENCIL
elif (fmt == 'L'):
return GL_LUMINANCE
elif (fmt == 'A'):
return GL_ALPHA
return GL_RGBA |
def build_loader(data_path, autoaug, batch_size, workers):
rank = dist.get_rank()
world_size = dist.get_world_size()
assert ((batch_size % world_size) == 0), f'The batch size is indivisible by world size {batch_size} // {world_size}'
train_transform = create_transform(input_size=224, is_training=True, auto_augment=autoaug)
train_dataset = datasets.ImageFolder(osp.join(data_path, 'train'), transform=train_transform)
train_sampler = distributed.DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
train_loader = DataLoader(train_dataset, batch_size=(batch_size // world_size), shuffle=False, num_workers=workers, pin_memory=True, sampler=train_sampler)
val_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)])
val_dataset = datasets.ImageFolder(osp.join(data_path, 'val'), transform=val_transform)
val_sampler = distributed.DistributedSampler(val_dataset, world_size, rank)
val_loader = DataLoader(val_dataset, batch_size=(batch_size // world_size), shuffle=False, num_workers=workers, pin_memory=True, sampler=val_sampler)
return (train_loader, val_loader) |
def run_evolution_search(max_time_budget=5000000.0, population_size=50, tournament_size=10, mutation_rate=1.0):
nasbench.reset_budget_counters()
(times, best_valids, best_tests) = ([0.0], [0.0], [0.0])
population = []
for _ in range(population_size):
spec = random_spec()
data = nasbench.query(spec)
(time_spent, _) = nasbench.get_budget_counters()
times.append(time_spent)
population.append((data['validation_accuracy'], spec))
if (data['validation_accuracy'] > best_valids[(- 1)]):
best_valids.append(data['validation_accuracy'])
best_tests.append(data['test_accuracy'])
else:
best_valids.append(best_valids[(- 1)])
best_tests.append(best_tests[(- 1)])
if (time_spent > max_time_budget):
break
while True:
sample = random_combination(population, tournament_size)
best_spec = sorted(sample, key=(lambda i: i[0]))[(- 1)][1]
new_spec = mutate_spec(best_spec, mutation_rate)
data = nasbench.query(new_spec)
(time_spent, _) = nasbench.get_budget_counters()
times.append(time_spent)
population.append((data['validation_accuracy'], new_spec))
population.pop(0)
if (data['validation_accuracy'] > best_valids[(- 1)]):
best_valids.append(data['validation_accuracy'])
best_tests.append(data['test_accuracy'])
else:
best_valids.append(best_valids[(- 1)])
best_tests.append(best_tests[(- 1)])
if (time_spent > max_time_budget):
break
return (times, best_valids, best_tests) |
def main():
api_key = os.environ.get('QUANDL_API_KEY')
start_date = '2014-1-1'
end_date = '2015-1-1'
symbols = ('AAPL', 'BRK_A', 'MSFT', 'ZEN')
url = format_table_query(api_key=api_key, start_date=start_date, end_date=end_date, symbols=symbols)
print(('Fetching equity data from %s' % url))
response = requests.get(url)
response.raise_for_status()
archive_path = zipfile_path('QUANDL_ARCHIVE.zip')
print(('Writing compressed table to %s' % archive_path))
with ZipFile(archive_path, 'w') as zip_file:
zip_file.writestr('QUANDL_SAMPLE_TABLE.csv', BytesIO(response.content).getvalue(), ZIP_DEFLATED)
print('Writing mock metadata')
cols = ('file.link', 'file.status', 'file.data_snapshot_time', 'datatable.last_refreshed_time\n')
row = (' 'fresh', '2017-10-17 23:48:25 UTC', '2017-10-17 23:48:15 UTC\n')
metadata = (','.join(cols) + ','.join(row))
path = zipfile_path('metadata.csv.gz')
print(('Writing compressed metadata to %s' % path))
write_compressed(path, metadata) |
def build_usage_examples(dag: ProvDAG, cfg: ReplayConfig, ns: NamespaceCollections):
sorted_nodes = nx.topological_sort(dag.collapsed_view)
actions = group_by_action(dag, sorted_nodes, ns)
for node_id in actions.no_provenance_nodes:
node = dag.get_node_data(node_id)
build_no_provenance_node_usage(node, node_id, ns, cfg)
for action_id in (std_actions := actions.std_actions):
try:
some_node_id = next(iter(std_actions[action_id]))
node = dag.get_node_data(some_node_id)
except KeyError:
some_output_name = next(iter(ns.result_collection_ns[action_id]))
some_node_id = next(iter(ns.result_collection_ns[action_id][some_output_name].members.values()))
node = dag.get_node_data(some_node_id)
if (node.action.action_type == 'import'):
build_import_usage(node, ns, cfg)
else:
build_action_usage(node, ns, std_actions, action_id, cfg) |
def test_interface_array(do_test):
class Ifc(Interface):
def construct(s):
s.msg = InPort(Bits32)
s.val = InPort(Bits1)
s.rdy = OutPort(Bits1)
class A(Component):
def construct(s):
s.ifc = [Ifc() for _ in range(2)]
a = A()
a._ref_ports = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0), (['ifc[0].msg', 'ifc[1].msg'], 'ifc__msg', rt.Array([2], rt.Port('input', rdt.Vector(32))), 1), (['ifc[0].rdy', 'ifc[1].rdy'], 'ifc__rdy', rt.Array([2], rt.Port('output', rdt.Vector(1))), 1), (['ifc[0].val', 'ifc[1].val'], 'ifc__val', rt.Array([2], rt.Port('input', rdt.Vector(1))), 1)]
a._ref_ports_yosys = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0), (['ifc[0].msg'], 'ifc__0__msg', rt.Port('input', rdt.Vector(32)), 1), (['ifc[0].rdy'], 'ifc__0__rdy', rt.Port('output', rdt.Vector(1)), 1), (['ifc[0].val'], 'ifc__0__val', rt.Port('input', rdt.Vector(1)), 1), (['ifc[1].msg'], 'ifc__1__msg', rt.Port('input', rdt.Vector(32)), 1), (['ifc[1].rdy'], 'ifc__1__rdy', rt.Port('output', rdt.Vector(1)), 1), (['ifc[1].val'], 'ifc__1__val', rt.Port('input', rdt.Vector(1)), 1)]
do_test(a) |
def auto_augment_transform(config_str, hparams):
config = config_str.split('-')
policy_name = config[0]
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
else:
assert AssertionError, 'Unknown AutoAugment config section'
aa_policy = auto_augment_policy(policy_name, hparams=hparams)
return AutoAugment(aa_policy) |
class CheckpointFunction(torch.autograd.Function):
def forward(ctx, run_function, preserve_rng_state, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.preserve_rng_state = preserve_rng_state
ctx.had_autocast_in_fwd = torch.is_autocast_enabled()
ctx.input_tensors = list(args)
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
ctx.had_cuda_in_fwd = False
if torch.cuda._initialized:
ctx.had_cuda_in_fwd = True
(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) = get_device_states(*args)
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
def backward(ctx, *output_grads):
if (not torch.autograd._is_checkpoint_valid()):
raise RuntimeError('Checkpointing is not compatible with .grad(), please use .backward() if possible')
require_grad_indices = list()
non_grad_indices = list()
for i in range(len(ctx.input_tensors)):
temp = ctx.input_tensors[i]
ctx.input_tensors[i] = temp.detach()
ctx.input_tensors[i].requires_grad = temp.requires_grad
if temp.requires_grad:
require_grad_indices.append(i)
else:
non_grad_indices.append(i)
rng_devices = []
if (ctx.preserve_rng_state and ctx.had_cuda_in_fwd):
rng_devices = ctx.fwd_gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_cuda_in_fwd:
set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
with torch.enable_grad(), torch.cuda.amp.autocast(ctx.had_autocast_in_fwd):
output_tensors = ctx.run_function(*ctx.input_tensors)
input_tensors_with_grad = list()
for i in range(len(ctx.input_tensors)):
if (i in require_grad_indices):
input_tensors_with_grad.append(ctx.input_tensors[i])
input_grads = torch.autograd.grad(output_tensors, input_tensors_with_grad, output_grads, allow_unused=True)
return_input_grads = list()
j = 0
for i in range(len(ctx.input_tensors)):
if (i in require_grad_indices):
return_input_grads.append(input_grads[j])
j = (j + 1)
else:
return_input_grads.append(None)
return ((None, None) + tuple(return_input_grads)) |
def test_load_config(track_widget):
original_config_name = track_widget.config_name.currentText()
with patch('btrack.napari.widgets.load_path_dialogue_box') as load_path_dialogue_box:
load_path_dialogue_box.return_value = btrack.datasets.cell_config()
track_widget.load_config_button.click()
new_config_name = track_widget.config_name.currentText()
assert (track_widget.config_name.currentText() == 'Default')
assert (new_config_name != original_config_name) |
class AugmentationCfg():
scale: Tuple[(float, float)] = (0.9, 1.0)
ratio: Optional[Tuple[(float, float)]] = None
color_jitter: Optional[Union[(float, Tuple[(float, float, float)])]] = None
interpolation: Optional[str] = None
re_prob: Optional[float] = None
re_count: Optional[int] = None
use_timm: bool = False |
def test_foo_field_as_writer():
class FooStruct_wrap(Component):
def construct(s):
s.in_ = InPort(Bits16)
s.out = OutPort(Bits32)
s.inner = FooStruct(16)
s.inner.in_ //= s.in_
connect(s.inner.out.b, s.out)
def line_trace(s):
return s.inner.line_trace()
foo_wrap = FooStruct_wrap()
foo_wrap.elaborate()
try:
simple_sim_pass(foo_wrap)
except LeftoverPlaceholderError as e:
print('{} is thrown\n{}'.format(e.__class__.__name__, e))
return
raise Exception("Should've thrown LeftoverPlaceholderError.") |
class HandshakeType(enum.IntEnum):
hello_request = 0
client_hello = 1
server_hello = 2
hello_verify_request = 3
new_session_ticket = 4
end_of_early_data = 4
encrypted_extensions = 8
certificate = 11
server_key_exchange = 12
certificate_request = 13
server_hello_done = 14
certificate_verify = 15
client_key_exchange = 16
finished = 20
certificate_url = 21
certificate_status = 22
supplemental_data = 23
key_update = 24
compressed_certificate = 25
ekt_key = 26
message_hash = 254 |
def get_random_cached_bottlenecks(sess, image_lists, how_many, category, bottleneck_dir, image_dir, jpeg_data_tensor, bottleneck_tensor):
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if (how_many >= 0):
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange((MAX_NUM_IMAGES_PER_CLASS + 1))
image_name = get_image_path(image_lists, label_name, image_index, image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
for (label_index, label_name) in enumerate(image_lists.keys()):
for (image_index, image_name) in enumerate(image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index, image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return (bottlenecks, ground_truths, filenames) |
class PoolFormer(nn.Module):
def __init__(self, layers, embed_dims=(64, 128, 320, 512), mlp_ratios=(4, 4, 4, 4), downsamples=(True, True, True, True), pool_size=3, in_chans=3, num_classes=1000, global_pool='avg', norm_layer=GroupNorm1, act_layer=nn.GELU, in_patch_size=7, in_stride=4, in_pad=2, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, layer_scale_init_value=1e-05, **kwargs):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = embed_dims[(- 1)]
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(patch_size=in_patch_size, stride=in_stride, padding=in_pad, in_chs=in_chans, embed_dim=embed_dims[0])
network = []
for i in range(len(layers)):
network.append(basic_blocks(embed_dims[i], i, layers, pool_size=pool_size, mlp_ratio=mlp_ratios[i], act_layer=act_layer, norm_layer=norm_layer, drop_rate=drop_rate, drop_path_rate=drop_path_rate, layer_scale_init_value=layer_scale_init_value))
if ((i < (len(layers) - 1)) and (downsamples[i] or (embed_dims[i] != embed_dims[(i + 1)]))):
network.append(PatchEmbed(in_chs=embed_dims[i], embed_dim=embed_dims[(i + 1)], patch_size=down_patch_size, stride=down_stride, padding=down_pad))
self.network = nn.Sequential(*network)
self.norm = norm_layer(self.num_features)
self.head = (nn.Linear(self.num_features, num_classes) if (num_classes > 0) else nn.Identity())
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
.ignore
def group_matcher(self, coarse=False):
return dict(stem='^patch_embed', blocks=[('^network\\.(\\d+).*\\.proj', (99999,)), (('^network\\.(\\d+)', None) if coarse else ('^network\\.(\\d+)\\.(\\d+)', None)), ('^norm', (99999,))])
.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if (global_pool is not None):
self.global_pool = global_pool
self.head = (nn.Linear(self.num_features, num_classes) if (num_classes > 0) else nn.Identity())
def forward_features(self, x):
x = self.patch_embed(x)
x = self.network(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool=False):
if (self.global_pool == 'avg'):
x = x.mean([(- 2), (- 1)])
return (x if pre_logits else self.head(x))
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x |
def find_mutated(form):
if isinstance(form, W_Correlated):
return find_mutated(form.get_obj())
elif isinstance(form, values.W_Cons):
if (not form.is_proper_list()):
(elements, _) = to_rpython_list(form, unwrap_correlated=True, improper=True)
return extend_dicts([find_mutated(f) for f in elements])
c = form.car()
if (c is set_bang_sym):
return extend_dict({form.cdr().car(): None}, find_mutated(form.cdr().cdr().car()))
elif (isinstance(c, values.W_Cons) and (c is not values.w_null)):
(all_exprs, _) = to_rpython_list(form, unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in all_exprs])
else:
(rest_exprs, _) = to_rpython_list(form.cdr(), unwrap_correlated=True)
return extend_dicts([find_mutated(f) for f in rest_exprs])
else:
return {} |
def target_directory(output_path: Optional[str]=None) -> str:
if output_path:
if (not os.path.isabs(output_path)):
output_path = os.path.join(os.getcwd(), output_path)
else:
output_path = os.getcwd()
os.makedirs(output_path, exist_ok=True)
return output_path |
def _word_forms_from_xml_elem(elem):
lexeme = []
lex_id = elem.get('id')
if (len(elem) == 0):
return (lex_id, lexeme)
base_info = list(elem.iter('l'))
assert (len(base_info) == 1)
base_grammemes = _grammemes_from_elem(base_info[0])
for form_elem in elem.iter('f'):
grammemes = _grammemes_from_elem(form_elem)
form = form_elem.get('t').lower()
if (not (base_grammemes + grammemes)):
logger.warning(('no information provided for word %s, dropping the whole lexeme' % form))
return (lex_id, [])
if isinstance(form, bytes):
form = form.decode('ascii')
lexeme.append((form, ((base_grammemes + ' ') + grammemes).strip()))
return (lex_id, lexeme) |
def translate_to_vocab(tokens, vocab, vocab_translate, skip_new_tokens=False):
if vocab_translate.contains_same_content(vocab):
return tokens
lang_orig = tokens_to_lang(tokens, vocab, join=False)
tokens_new = []
for word in lang_orig:
if (skip_new_tokens and (word not in vocab_translate.counts)):
word = '<<pad>>'
tokens_new.append(vocab_translate.word2index(word))
if (not skip_new_tokens):
lang_new = tokens_to_lang(tokens_new, vocab_translate, join=False)
assert (lang_orig == lang_new)
return tokens_new |
def enable_oeenclave_debug(oe_enclave_addr):
enclave = oe_debug_enclave_t(oe_enclave_addr)
if (not enclave.is_valid()):
return False
if (enclave.debug == 0):
print(('oegdb: Debugging not enabled for enclave %s' % enclave.path))
return False
if (enclave.simulate != 0):
print(('oegdb: Enclave %s loaded in simulation mode' % enclave.path))
if (load_enclave_symbol(enclave.path, enclave.base_address) != 1):
return False
print('oegdb: Symbols loaded for enclave \n')
for tcs in enclave.tcs:
set_tcs_debug_flag(tcs)
print('oegdb: All tcs set to debug for enclave \n')
return True |
def get_acceleration_bw_models(year1, year2, model_path, selected_ngrams, all_model_vectors, top_k_acc):
model_path1 = os.path.join(model_path, (year1 + '.model'))
model_path2 = os.path.join(model_path, (year2 + '.model'))
(word_pairs, em1, em2) = compute_acc_between_years(selected_ngrams, model_path1, model_path2, all_model_vectors=all_model_vectors, top_k_acc=top_k_acc, skip_same_word_pairs=True, skip_duplicates=True)
return (word_pairs, em1, em2) |
def format_skeleton(skeleton: str, datetime: _Instant=None, tzinfo: (datetime.tzinfo | None)=None, fuzzy: bool=True, locale: ((Locale | str) | None)=LC_TIME) -> str:
locale = Locale.parse(locale)
if (fuzzy and (skeleton not in locale.datetime_skeletons)):
skeleton = match_skeleton(skeleton, locale.datetime_skeletons)
format = locale.datetime_skeletons[skeleton]
return format_datetime(datetime, format, tzinfo, locale) |
def run_query(config, client, query_func, write_func=write_result, sql_context=None):
QUERY_NUM = get_query_number()
if config.get('dask_profile'):
with performance_report(filename=f'q{QUERY_NUM}_profile.html'):
if sql_context:
run_sql_query(config=config, client=client, query_func=query_func, sql_context=sql_context, write_func=write_func)
else:
run_dask_cudf_query(config=config, client=client, query_func=query_func, write_func=write_func)
elif sql_context:
run_sql_query(config=config, client=client, query_func=query_func, sql_context=sql_context, write_func=write_func)
else:
run_dask_cudf_query(config=config, client=client, query_func=query_func, write_func=write_func) |
class CollectSessionComparisonData():
def __init__(self, pathserv, pathserv_other, fn_count_bads):
self.pathserv = pathserv
self.pathserv_other = pathserv_other
samples = {e for e in fs.load_session_playlist(pathserv)}
other_samples = {e for e in fs.load_session_playlist(pathserv_other)}
common_samples = (samples & other_samples)
no_dbg_samples = set()
crashed_samples = set()
compared_samples = set()
cls = OverallDefectsCounter
overall_counters = cls(pathserv, fn_count_bads)
overall_counters_o = cls(pathserv_other, fn_count_bads)
for sample in common_samples:
(perfect, counters) = overall_counters.count_defects(sample)
(perfect_o, counters_o) = overall_counters_o.count_defects(sample)
if (counters['no_dbg'] or counters_o['no_dbg']):
no_dbg_samples.add(sample)
continue
if (counters['crash'] or counters_o['crash']):
crashed_samples.add(sample)
continue
compared_samples.add(sample)
overall_counters.count_defects(sample)
overall_counters_o.count_defects(sample)
self.no_dbg_samples = no_dbg_samples
self.samples = samples
self.other_samples = other_samples
self.common_samples = common_samples
self.crashed_samples = crashed_samples
self.compared_samples = compared_samples
self.overall_counters = overall_counters
self.overall_counters_other = overall_counters_o
def confidence_guess(self):
maxz = max(len(self.samples), len(self.other_samples))
c = ((len(self.compared_samples) / maxz) if (maxz > 0) else 0)
return c |
class ProcessStatCollector(diamond.collector.Collector):
PROC = '/proc/stat'
def get_default_config_help(self):
config_help = super(ProcessStatCollector, self).get_default_config_help()
config_help.update({})
return config_help
def get_default_config(self):
config = super(ProcessStatCollector, self).get_default_config()
config.update({'path': 'proc'})
return config
def collect(self):
if (not os.access(self.PROC, os.R_OK)):
return False
file = open(self.PROC, 'r')
for line in file:
if (line.startswith('ctxt') or line.startswith('processes')):
data = line.split()
metric_name = data[0]
metric_value = int(data[1])
metric_value = int(self.derivative(metric_name, long(metric_value), counter))
self.publish(metric_name, metric_value)
if (line.startswith('procs_') or line.startswith('btime')):
data = line.split()
metric_name = data[0]
metric_value = int(data[1])
self.publish(metric_name, metric_value)
file.close() |
def test_nested_process_search(cbc_product: CbEnterpriseEdr, mocker):
with open(os.path.join(os.getcwd(), 'tests', 'data', 'cbc_surveyor_testing.json')) as f:
programs = json.load(f)
cbc_product.log = logging.getLogger('pytest_surveyor')
cbc_product._sensor_group = None
cbc_product._results = {}
cbc_product._conn = mocker.Mock()
mocker.patch.object(cbc_product, 'perform_query')
expected_calls = [mocker.call(Tag('field_translation'), {}, '(process_name:notepad.exe)'), mocker.call(Tag('field_translation'), {}, '(netconn_ipv4:127.0.0.1)'), mocker.call(Tag('field_translation'), {}, '(process_cmdline:MiniDump)'), mocker.call(Tag('field_translation'), {}, '(process_publisher:Microsoft)'), mocker.call(Tag('field_translation'), {}, '(netconn_domain:raw.githubusercontent.com)'), mocker.call(Tag('field_translation'), {}, '(process_internal_name:powershell)'), mocker.call(Tag('field_translation'), {}, '(hash:asdfasdfasdfasdf)'), mocker.call(Tag('field_translation'), {}, '(hash:zxcvzxcvzxcv)'), mocker.call(Tag('field_translation'), {}, '(netconn_port:80)'), mocker.call(Tag('field_translation'), {}, '(regmod_name:HKLM)'), mocker.call(Tag('multiple_values'), {}, '(process_name:svchost.exe OR process_name:cmd.exe)'), mocker.call(Tag('single_query'), {}, '(process_name:rundll.exe)'), mocker.call(Tag('multiple_query'), {}, '((process_cmdline:-enc) OR (modload_name:malware.dll))')]
for (program, criteria) in programs.items():
cbc_product.nested_process_search(Tag(program), criteria, {})
cbc_product.perform_query.assert_has_calls(expected_calls, any_order=True) |
class TWaveformSeekBar(PluginTestCase):
def setUp(self):
self.mod = self.modules['WaveformSeekBar']
def tearDown(self):
del self.mod
def test_main(self):
WaveformScale = self.mod.WaveformScale
player = NullPlayer()
player.info = AudioFile({'~#length': 10})
scale = WaveformScale(player)
scale.compute_redraw_interval()
scale.compute_redraw_area()
with visible(scale):
scale.compute_redraw_interval()
scale.compute_redraw_area()
def test_no_gstreamer_rms(self):
player = NullPlayer()
library = Library()
bar = self.mod.WaveformSeekBar(player, library)
message = FakeRMSMessage()
bar._on_bus_message(None, message, 1234) |
.django_db
def test_scope_multisite(site1, site2, comment1, comment2):
with scope(site=[site1]):
assert (list(Comment.objects.all()) == [comment1])
with scope(site=[site1, site2]):
assert (list(Comment.objects.all()) == [comment1, comment2])
assert (get_scope() == {'site': [site1, site2], '_enabled': True}) |
def test_fbo_head():
lfb_prefix_path = osp.normpath(osp.join(osp.dirname(__file__), '../data/lfb'))
st_feat_shape = (1, 16, 1, 8, 8)
st_feat = generate_backbone_demo_inputs(st_feat_shape)
rois = torch.randn(1, 5)
rois[0][0] = 0
img_metas = [dict(img_key='video_1, 930')]
fbo_head = FBOHead(lfb_cfg=dict(lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes='unittest', device='cpu'), fbo_cfg=dict(type='non_local', st_feat_channels=16, lt_feat_channels=16, latent_channels=8, num_st_feat=1, num_lt_feat=(5 * 60)))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert (out.shape == (1, 24, 1, 1, 1))
fbo_head = FBOHead(lfb_cfg=dict(lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes='unittest', device='cpu'), fbo_cfg=dict(type='avg'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert (out.shape == (1, 32, 1, 1, 1))
fbo_head = FBOHead(lfb_cfg=dict(lfb_prefix_path=lfb_prefix_path, max_num_sampled_feat=5, window_size=60, lfb_channels=16, dataset_modes='unittest', device='cpu'), fbo_cfg=dict(type='max'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert (out.shape == (1, 32, 1, 1, 1)) |
class WideResNet(nn.Module):
def __init__(self, depth=28, widen_factor=10, num_classes=None, dropout_rate=0.3):
super().__init__()
assert (((depth - 4) % 6) == 0), 'Wide-resnet depth should be 6n+4'
self.dropout_rate = dropout_rate
n = ((depth - 4) // 6)
k = widen_factor
nStages = [16, (16 * k), (32 * k), (64 * k)]
strides = [1, 1, 2, 2]
self.conv1 = nn.Conv2d(3, nStages[0], 3, strides[0], 1, bias=False)
self.layer1 = self._wide_layer(nStages[0:2], n, strides[1])
self.layer2 = self._wide_layer(nStages[1:3], n, strides[2])
self.layer3 = self._wide_layer(nStages[2:4], n, strides[3])
self.bn1 = nn.BatchNorm2d(nStages[3])
self.num_classes = num_classes
if (num_classes is not None):
self.linear = nn.Linear(nStages[3], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
def _wide_layer(self, channels, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
(in_c, out_c) = channels
for stride in strides:
layers.append(WideBasic(in_c, out_c, stride, self.dropout_rate))
in_c = out_c
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.flatten(1)
if (self.num_classes is not None):
out = self.linear(out)
return out |
def _get_backend_kernel(dtype, grid, block, k_type):
kernel = _cupy_kernel_cache[(dtype, k_type)]
if kernel:
return _cupy_channelizer_wrapper(grid, block, kernel)
else:
raise ValueError('Kernel {} not found in _cupy_kernel_cache'.format(k_type))
raise NotImplementedError('No kernel found for datatype {}'.format(dtype)) |
class TestSuite(object):
_KNOWN_CACHES = {'species_pattern_matcher': SpeciesPatternMatcher, 'rule_pattern_matcher': RulePatternMatcher, 'reaction_pattern_matcher': ReactionPatternMatcher}
_COL = {'OK': '\x1b[92m', 'FAIL': '\x1b[91m', 'END': '\x1b[0m'}
def __init__(self, model=None):
self._caches = {}
self.assertions = []
self._model = model
if model:
self._model = model
elif SelfExporter.default_model:
self._model = SelfExporter.default_model
else:
raise Exception('A model must be specified explicitly if the PySB self-exporter is not in use')
def model(self):
return self._model
def model(self, model):
self._caches = {}
self._model = model
def _ensure_required_caches(self, assertion):
for cache in assertion.required_caches:
if (cache not in self._KNOWN_CACHES.keys()):
raise Exception(('Unknown assertion cache: %s' % cache))
self._caches[cache] = self._KNOWN_CACHES[cache](self.model)
def add(self, assertion):
self.assertions.append(assertion)
def check(self, assertion):
self._ensure_required_caches(assertion)
return assertion.check(self.model, **{name: self._caches[name] for name in assertion.required_caches})
def check_all(self, stop_on_exception=False):
for a in self.assertions:
print(('%s... ' % repr(a)), end='')
try:
self.check(a)
print(('%sOK%s' % (self._COL['OK'], self._COL['END'])))
except ModelAssertionFailure as e:
print(('%sFAIL%s' % (self._COL['FAIL'], self._COL['END'])))
print((' ' + str(e.message)))
if stop_on_exception:
return
except Exception as e:
print(('%sERROR%s' % (self._COL['FAIL'], self._COL['END'])))
print((' ' + str(e)))
if stop_on_exception:
return |
def terraform_remote_state_s3(name: str, **body: Any) -> Block:
body['backend'] = 's3'
config = body.get('config', {})
if config.get('profile'):
session = get_session(profile_name=config['profile'])
creds = session.get_credentials()
if (not _profile_creds_definitely_supported_by_terraform(creds)):
del config['profile']
frozen_creds = creds.get_frozen_credentials()
config['access_key'] = frozen_creds.access_key
config['secret_key'] = frozen_creds.secret_key
if creds.token:
config['token'] = frozen_creds.token
return block('data', 'terraform_remote_state', name, body) |
def pseudonymise_buffer_list(file_buffer_list: list):
if ((file_buffer_list is not None) and (len(file_buffer_list) > 0)):
my_date_time = datetime.datetime.now()
str_now_datetime = my_date_time.strftime('%Y%m%d_%H%M%S')
zipfile_basename = f'Pseudonymised_{str_now_datetime}'
bad_data = False
index_to_fifty_mbyte_increment = _gen_index_list_to_fifty_mbyte_increment(file_buffer_list)
st.write(index_to_fifty_mbyte_increment)
zip_count = 0
start_index = 0
for end_index in index_to_fifty_mbyte_increment:
if (start_index == end_index):
break
zip_count += 1
zipfile_name = f'{zipfile_basename}.{zip_count}.zip'
zip_bytes_io = io.BytesIO()
bad_data = _zip_pseudo_fifty_mbytes(file_buffer_list[start_index:end_index], zip_bytes_io)
start_index = end_index
if bad_data:
if (zip_bytes_io is not None):
zip_bytes_io.close()
del zip_bytes_io
else:
remove_file(zipfile_name)
st.text('Problem processing DICOM data')
elif (zip_bytes_io is not None):
link_to_zipbuffer_download(zipfile_name, zip_bytes_io.getvalue())
zip_bytes_io.close()
del zip_bytes_io |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.