code stringlengths 281 23.7M |
|---|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys())))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--per_gpu_train_batch_size', default=1, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--pruning_method', default='topK', type=str, help='Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning).')
parser.add_argument('--head_pruning', action='store_true', help='Head Pruning or not')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--block_rows', type=int, default=(- 1), help='Number of rows in a block')
parser.add_argument('--block_cols', type=int, default=(- 1), help='Number of cols in a block')
parser.add_argument('--block_path', default=None, type=str, help='Path to pretrained block wise model')
args = parser.parse_args()
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
set_seed(args)
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
if (('qqp' in args.model_name_or_path) or ('mnli' in args.model_name_or_path)):
num_labels = 2
if ('mnli' in args.model_name_or_path):
num_labels = 3
config = config_class.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), num_labels=num_labels, finetuning_task='mrpc', cache_dir=None, pruning_method=args.pruning_method, mask_init='constant', mask_scale=0, head_pruning=args.head_pruning)
elif ('squad' in args.model_name_or_path):
print('This one is used!')
config = config_class.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), cache_dir=None, pruning_method=args.pruning_method, mask_init='constant', mask_scale=0, head_pruning=args.head_pruning)
model_class = MaskedBertForQuestionAnswering
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=None)
model.eval()
if (args.block_path is None):
model._make_structural_pruning([None, None])
else:
assert ((args.block_rows >= 1) and (args.block_cols >= 1))
model._make_structural_pruning([args.block_rows, args.block_cols])
for module in model.modules():
if isinstance(module, MaskedLinear):
module.enable_block_pruning([args.block_rows, args.block_cols])
model.load_state_dict(torch.load(f'{args.block_path}/pytorch_model.bin'))
for module in model.modules():
if isinstance(module, MaskedLinear):
module.make_block_wise_inference_pruning()
total_num_params = 0
for (name, param) in model.named_parameters():
if ('encoder' in name):
total_num_params += (param.abs() > 1e-08).sum()
model.to(args.device)
batch_size = args.per_gpu_train_batch_size
length = args.max_seq_length
batch = {'attention_mask': torch.ones([batch_size, length], dtype=torch.long).cuda(), 'input_ids': torch.ones([batch_size, length], dtype=torch.long).cuda(), 'token_type_ids': torch.ones([batch_size, length], dtype=torch.long).cuda()}
inputs = {'input_ids': batch['input_ids'], 'attention_mask': batch['attention_mask']}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch['token_type_ids'] if (args.model_type in ['bert', 'masked_bert', 'xlnet', 'albert']) else None)
for i in range(10):
with torch.no_grad():
outputs = model(**inputs)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
num_runs = 100
torch.cuda.synchronize()
start.record()
for i in range(num_runs):
with torch.no_grad():
outputs = model(**inputs)
end.record()
torch.cuda.synchronize()
total_time = (start.elapsed_time(end) / 1000)
print(('*' * 100))
print('Num of Parameters: ', total_num_params.item())
print(f'Remaining Parameters as compared to baseline: {((total_num_params / ) * 100):.2f}%')
print(f'{((num_runs / total_time) * batch_size)} Sentences / s')
print(f'{(((total_time / num_runs) / batch_size) * 1000)} ms / Sentences ')
print(('*' * 100)) |
class ConvKXBNRELU(nn.Module):
def __init__(self, in_c, out_c, kernel_size, stride, act='silu'):
super(ConvKXBNRELU, self).__init__()
self.conv = ConvKXBN(in_c, out_c, kernel_size, stride)
if (act is None):
self.activation_function = torch.relu
else:
self.activation_function = get_activation(act)
def forward(self, x):
output = self.conv(x)
return self.activation_function(output) |
class _SQLLineageConfigLoader():
config = {'DIRECTORY': (str, os.path.join(os.path.dirname(__file__), 'data')), 'TSQL_NO_SEMICOLON': (bool, False)}
def __getattr__(self, item):
if (item in self.config):
(type_, default) = self.config[item]
return type_(os.environ.get(('SQLLINEAGE_' + item), default))
else:
return super().__getattribute__(item) |
def name_to_array(name: str) -> t.Tuple[(str, ...)]:
tags: t.Dict[(str, t.Callable[([str], bool)])] = {'unstable': (lambda i: (i == 'master')), 'latest': (lambda i: (RELEASE_RE.fullmatch(i) is not None)), name: (lambda i: ((RELEASE_RE.fullmatch(i) is not None) or (PRE_RELEASE_RE.fullmatch(i) is not None)))}
return tuple((tag for (tag, test) in tags.items() if test(name))) |
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data |
def get_valid_stats(cfg: DictConfig, trainer: Trainer, stats: Dict[(str, Any)]) -> Dict[(str, Any)]:
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(cfg.checkpoint.best_checkpoint_metric)
best_function = (max if cfg.checkpoint.maximize_best_checkpoint_metric else min)
stats[key] = best_function(checkpoint_utils.save_checkpoint.best, stats[cfg.checkpoint.best_checkpoint_metric])
return stats |
class ConvModule(enn.EquivariantModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, activation='relu', inplace=False, order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert ((conv_cfg is None) or isinstance(conv_cfg, dict))
assert ((norm_cfg is None) or isinstance(norm_cfg, dict))
self.in_type = build_enn_feature(in_channels)
self.out_type = build_enn_feature(out_channels)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.order = order
assert (isinstance(self.order, tuple) and (len(self.order) == 3))
assert (set(order) == set(['conv', 'norm', 'act']))
self.with_norm = (norm_cfg is not None)
self.with_activatation = (activation is not None)
if (bias == 'auto'):
bias = (False if self.with_norm else True)
self.with_bias = bias
if (self.with_norm and self.with_bias):
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = ennConv(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = False
self.output_padding = padding
self.groups = groups
if self.with_norm:
if (order.index('norm') > order.index('conv')):
norm_channels = out_channels
else:
norm_channels = in_channels
if ((conv_cfg is not None) and (conv_cfg['type'] == 'ORConv')):
norm_channels = int((norm_channels * 8))
(self.norm_name, norm) = build_enn_norm_layer(norm_channels)
self.add_module(self.norm_name, norm)
if self.with_activatation:
if (self.activation not in ['relu']):
raise ValueError(f'{self.activation} is currently not supported.')
if (self.activation == 'relu'):
self.activate = ennReLU(out_channels)
self.init_weights()
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
nonlinearity = ('relu' if (self.activation is None) else self.activation)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if (layer == 'conv'):
x = self.conv(x)
elif ((layer == 'norm') and norm and self.with_norm):
x = self.norm(x)
elif ((layer == 'act') and activate and self.with_activatation):
x = self.activate(x)
return x
def evaluate_output_shape(self, input_shape):
return input_shape |
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif (not isinstance(string, str)):
raise OptionError(('Invalid type %r for option %s; use 1/0, yes/no, true/false, on/off' % (string, optname)))
elif (string.lower() in ('1', 'yes', 'true', 'on')):
return True
elif (string.lower() in ('0', 'no', 'false', 'off')):
return False
else:
raise OptionError(('Invalid value %r for option %s; use 1/0, yes/no, true/false, on/off' % (string, optname))) |
def useCycleGetPrefixData(e, prefixE, data):
copyData = list(copy.deepcopy(data))
flage = 0
for i in copyData[:]:
for j in i[:]:
if set('_').issubset(e):
if set([prefixE, e[1]]).issubset(set(j)):
for l in j[:]:
if ((l == prefixE) or (l == e[1])):
j.remove(l)
break
for k in j[:]:
if (len(j) <= 1):
if (e != k):
j.remove(k)
else:
j.remove(k)
flage = 1
break
elif (e != k):
j.remove(k)
else:
j.remove(k)
j[0] = ('_' + j[0])
flage = 1
break
while ([] in i):
i.remove([])
if (flage == 1):
flage = 0
break
while ([] in copyData):
copyData.remove([])
return copyData |
def get_augmentations():
sometimes = (lambda aug: iaa.Sometimes(0.5, aug))
seq = iaa.Sequential([iaa.SomeOf((0, 5), [iaa.OneOf([iaa.GaussianBlur((0, 3.0)), iaa.AverageBlur(k=(2, 7)), iaa.MedianBlur(k=(3, 11))]), iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), iaa.SimplexNoiseAlpha(iaa.OneOf([iaa.EdgeDetect(alpha=(0.5, 1.0)), iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0))])), iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, (0.05 * 255)), per_channel=0.5), iaa.OneOf([iaa.Dropout((0.01, 0.1), per_channel=0.5), iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2)]), iaa.Add(((- 10), 10), per_channel=0.5), iaa.AddToHueAndSaturation(((- 20), 20)), iaa.OneOf([iaa.Multiply((0.5, 1.5), per_channel=0.5), iaa.FrequencyNoiseAlpha(exponent=((- 4), 0), first=iaa.Multiply((0.5, 1.5), per_channel=True), second=iaa.ContrastNormalization((0.5, 2.0)))]), iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25))], random_order=True)], random_order=True)
return seq |
def test_get_next_page_de_should_return_3_on_page_2(tmp_path):
htmlString = '\n <div class="next-previous-links">\n <div class="previous dl-rounded-borders dl-white-bg">\n <a href="/impfung-covid-19-corona/berlin?ref_visit_motive_ids%5B%5D=6769">\n <svg xmlns=" width="16" height="16" fill="currentColor" viewBox="0 0 16 16"><path fill-rule="evenodd" clip-rule="evenodd" d="M4.863 7.576l4.859-4.859a.6.6 0 01.848 0l.567.567a.6.6 0 01.001.847L7.288 8l3.85 3.869a.6.6 0 01-.001.847l-.567.567a.6.6 0 01-.848 0L4.863 8.424a.6.6 0 010-.848z"></path></svg>\n vorherige Seite\n </a>\n </div>\n <div class="next dl-rounded-borders dl-white-bg">\n <a href="/impfung-covid-19-corona/berlin?page=3&ref_visit_motive_ids%5B%5D=6769">\n Nachste Seite\n <svg xmlns=" width="16" height="16" fill="currentColor" viewBox="0 0 16 16"><path fill-rule="evenodd" clip-rule="evenodd" d="M11.137 8.424l-4.859 4.859a.6.6 0 01-.848 0l-.567-.567a.6.6 0 010-.847L8.712 8l-3.85-3.869a.6.6 0 010-.847l.567-.567a.6.6 0 01.848 0l4.859 4.859a.6.6 0 010 .848z"></path></svg>\n </a>\n </div>\n </div>\n '
doc = html.document_fromstring(htmlString)
response = Response()
response._content = b'{}'
centers_page = CentersPage(browser=Browser(), response=response)
centers_page.doc = doc
next_page = centers_page.get_next_page()
assert (next_page == 3) |
def getAWSGroups(data_path, account_name):
logger.debug("[*] Getting AWS Group data for AWS Account '%s'", account_name)
jqQuery = '.GroupDetailList[]'
data = getAWSIamAccountAuthorizationDetailsInfo(data_path, account_name, jqQuery)
logger.debug("[*] Completed getting AWS Group data for AWS Account '%s'", account_name)
return data |
class ResBlock_myDFNM(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlock_myDFNM, self).__init__()
model1 = []
model2 = []
model1 += [Conv2dBlock_my(dim, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model2 += [Conv2dBlock_my(dim, dim, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
models1 = []
models1 += [Conv2dBlock(dim, dim, 3, 1, 1, norm='in', activation='relu', pad_type=pad_type)]
models1 += [Conv2dBlock(dim, (2 * dim), 3, 1, 1, norm='none', activation='none', pad_type=pad_type)]
models2 = []
models2 += [Conv2dBlock(dim, dim, 3, 1, 1, norm='in', activation='relu', pad_type=pad_type)]
models2 += [Conv2dBlock(dim, (2 * dim), 3, 1, 1, norm='none', activation='none', pad_type=pad_type)]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.models1 = nn.Sequential(*models1)
self.models2 = nn.Sequential(*models2)
def forward(self, x):
style = x[1]
style1 = self.models1(style)
style2 = self.models2(style)
residual = x[0]
out = self.model1([x[0], style1])
out = self.model2([out, style2])
out += residual
return out |
def test_project_location_complex_set_first_project(hatch, config_file, helpers, temp_dir):
with temp_dir.as_cwd():
result = hatch('config', 'set', 'projects.foo.location', '.')
path = str(temp_dir).replace('\\', '\\\\')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
New setting:
project = "foo"
[projects.foo]
location = "{path}"
'''))
config_file.load()
assert (config_file.model.project == 'foo')
assert (config_file.model.projects['foo'].location == str(temp_dir)) |
def total_loss(net, t_inst_dict, params=dict()):
loss_dict_Disc = dict()
loss_dict_Gene = dict()
metrics = dict()
replay_worst = params.get('replay_worst', 0)
t_inst_real = t_inst_dict['instr_real']
t_inst_synt = t_inst_dict['instr_synt']
if replay_worst:
t_inst_wors = t_inst_dict['worst']
(batch_size, h, w, _) = net.imgs['real'].get_shape()
fakes = filter((lambda name: ((name != 'real') and (name != 'unsup') and (name != 'worst'))), net.imgs.keys())
replay_worst = params.get('replay_worst', 0)
ones = tf.ones_like(t_inst_synt, tf.float32)
zeros = tf.zeros_like(t_inst_synt, tf.float32)
bg_type = params.get('bg_type', 'global')
t_bg_synt = tf_background(t_inst_synt, bg_type)
t_bg_real = tf_background(t_inst_real, bg_type)
if replay_worst:
t_bg_wors = tf_background(t_inst_wors, bg_type)
t_synt_mask = tf.where(t_bg_synt, zeros, ones)
t_real_mask = tf.where(t_bg_real, zeros, ones)
if replay_worst:
t_wors_mask = tf.where(t_bg_wors, zeros, ones)
bg_weight = params.get('bg_weight', 0.1)
if isinstance(bg_weight, str):
masked = bg_weight.startswith('mask_')
if masked:
bg_weight = bg_weight[5:]
t_synt_weight = tf_frequency_weight(t_inst_synt, bg_weight)
t_real_weight = tf_frequency_weight(t_inst_real, bg_weight)
if replay_worst:
t_wors_weight = tf_frequency_weight(t_inst_wors, bg_weight)
if masked:
t_synt_weight = tf.where(t_bg_synt, (0.1 * t_synt_weight), t_synt_weight)
t_real_weight = tf.where(t_bg_real, (0.1 * t_real_weight), t_real_weight)
if replay_worst:
t_wors_weight = tf.where(t_bg_wors, (0.1 * t_wors_weight), t_wors_weight)
else:
t_synt_weight = tf.where(t_bg_synt, (bg_weight * ones), ones)
t_real_weight = tf.where(t_bg_real, (bg_weight * ones), ones)
if replay_worst:
t_wors_weight = tf.where(t_bg_wors, (bg_weight * ones), ones)
t_simg_weight = tf.image.resize_bilinear(t_synt_weight, [h, w])
t_rimg_weight = tf.image.resize_bilinear(t_real_weight, [h, w])
if replay_worst:
t_wimg_weight = tf.image.resize_bilinear(t_wors_weight, [h, w])
net.bg = dict()
net.bg['synt'] = t_bg_synt
net.bg['real'] = t_bg_real
if replay_worst:
net.bg['worst'] = t_bg_wors
def get_gt(name):
if name.startswith('real'):
return t_inst_real
elif name.startswith('worst'):
return t_inst_wors
else:
return t_inst_synt
def get_mask(name):
if name.startswith('real'):
return t_real_mask
elif name.startswith('worst'):
return t_wors_mask
else:
return t_synt_mask
def get_inst_weight(name):
if name.startswith('real'):
return t_real_weight
elif name.startswith('worst'):
return t_wors_weight
else:
return t_synt_weight
def get_img_weight(name):
if name.startswith('real'):
return t_rimg_weight
elif name.startswith('worst'):
return t_wimg_weight
else:
return t_simg_weight
def is_real(name):
return ((name == 'real') or (name == 'unsup') or (name == 'worst'))
def looks_real(name):
return (name.endswith('real') or (name == 'unsup') or (name == 'worst'))
if params.get('use_hosyntax', 0):
net.render = dict()
net.render_layers = dict()
for (name, t_logits) in net.logits.items():
t_label = tf.nn.softmax(((t_logits - tf.reduce_mean(t_logits, axis=(- 1), keep_dims=True)) / 2))
print(('rendnet.network ' + name))
(net.render[name], net.render_layers[name]) = rendnet.network(t_label, params, output_layers=True, input_is_softmax=True)
net.resi_outs[name] = net.render[name]
for (name, t_instr) in t_inst_dict.items():
print(('rendnet.network ' + name))
(net.render[name], net.render_layers[name]) = rendnet.network(t_instr, params, output_layers=True, input_is_softmax=False)
net.resi_outs[name] = net.render[name]
with tf.variable_scope('loss'):
for (name, t_logits) in net.logits.items():
if name.startswith('unsup'):
continue
if ((re.search('real', name) is not None) or (not params.get('adapter', 0))):
t_instr = get_gt(name)
t_weight = get_inst_weight(name)
if (params.get('bMILloss', 1) and name.startswith('real')):
loss_xentropy = tf_MILloss_xentropy(labels=tf.squeeze(t_instr), logits=t_logits, weight=t_weight)
else:
loss_xentropy = tf_loss_xentropy(labels=tf.squeeze(t_instr), logits=t_logits, weight=t_weight)
if ('feedback' in name):
loss_prefix = 'loss_feedback/'
else:
loss_prefix = 'loss_xentropy/'
loss_dict_Gene[(loss_prefix + name)] = loss_xentropy
if params.get('use_syntax', 1):
syntax_binary = params.get('syntax_binary', 0)
for name in net.instr.keys():
if (name.startswith('unsup') and (not params.get('unsup_syntax', 0))):
continue
if syntax_binary:
t_instr = net.instr[name]
else:
t_instr = net.logits[name]
loss_syn = syntax_loss(t_instr, params, syntax_binary)
loss_dict_Gene[('loss_syntax/' + name)] = loss_syn
if params.get('use_hosyntax', 0):
render_layers = params.get('hos_layers', (- 1))
if isinstance(render_layers, str):
if (render_layers == ''):
render_layers = []
else:
layer_list = render_layers.split(':')
render_layers = [int(id) for id in layer_list]
elif (not isinstance(render_layers, list)):
render_layers = [int(render_layers)]
hos_loss_type = params.get('hos_loss_type', 'l2')
for name in net.instr.keys():
if name.startswith('unsup'):
continue
if is_real(name):
gt_img = net.render['instr_real']
gt_layers = net.render_layers['instr_real']
else:
gt_img = net.render['instr_synt']
gt_layers = net.render_layers['instr_synt']
out_img = net.render[name]
out_layers = net.render_layers[name]
for layer_id in render_layers:
loss_hos = tf_loss_with_select(gt_layers[layer_id], out_layers[layer_id], which_loss=hos_loss_type)
loss_dict_Gene[((('loss_HOS/' + str(layer_id)) + '/') + name)] = loss_hos
if params.get('hos_img', 0):
loss_hos = tf_loss_with_select(gt_img, out_img, which_loss=hos_loss_type)
loss_dict_Gene[('loss_HOS/img/' + name)] = loss_hos
net.acc = {'full': dict(), 'fg': dict()}
for (name, t_instr) in net.instr.items():
if name.startswith('unsup'):
continue
t_label = get_gt(name)
t_mask = get_mask(name)
if params.get('bMILloss', 1):
(metrics[('accuracy/' + name)], acc_batch) = tf_MILloss_accuracy(t_label, t_instr)
(metrics[('accuracy_fg/' + name)], fac_batch) = tf_MILloss_accuracy(t_label, t_instr, t_mask)
net.acc['full'][name] = acc_batch
net.acc['fg'][name] = fac_batch
else:
metrics[('accuracy/' + name)] = tf_accuracy(t_label, t_instr)
metrics[('accuracy_fg/' + name)] = tf_accuracy(t_label, t_instr, t_mask)
metrics[('confusionmat/' + name)] = comp_confusionmat(t_instr, t_label, num_classes=prog_ch, normalized_row=True, name=name)
return (loss_dict_Disc, loss_dict_Gene, metrics) |
def test_shell_sequence_with_ampersands_save_output():
context = Context({'one': 1, 'two': 2, 'three': 3, 'cmd': {'run': 'echo {one} && echo {two} && echo {three}', 'save': True}})
pypyr.steps.shell.run_step(context)
assert (context['cmdOut'].returncode == 0)
assert (context['cmdOut'].stdout == ('1 \n2 \n3' if is_windows else '1\n2\n3'))
assert (not context['cmdOut'].stderr) |
class TestSysModulesSnapshot():
key = 'my-test-module'
def test_remove_added(self) -> None:
original = dict(sys.modules)
assert (self.key not in sys.modules)
snapshot = SysModulesSnapshot()
sys.modules[self.key] = ModuleType('something')
assert (self.key in sys.modules)
snapshot.restore()
assert (sys.modules == original)
def test_add_removed(self, monkeypatch: MonkeyPatch) -> None:
assert (self.key not in sys.modules)
monkeypatch.setitem(sys.modules, self.key, ModuleType('something'))
assert (self.key in sys.modules)
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
del sys.modules[self.key]
assert (self.key not in sys.modules)
snapshot.restore()
assert (sys.modules == original)
def test_restore_reloaded(self, monkeypatch: MonkeyPatch) -> None:
assert (self.key not in sys.modules)
monkeypatch.setitem(sys.modules, self.key, ModuleType('something'))
assert (self.key in sys.modules)
original = dict(sys.modules)
snapshot = SysModulesSnapshot()
sys.modules[self.key] = ModuleType('something else')
snapshot.restore()
assert (sys.modules == original)
def test_preserve_modules(self, monkeypatch: MonkeyPatch) -> None:
key = [(self.key + str(i)) for i in range(3)]
assert (not any(((k in sys.modules) for k in key)))
for (i, k) in enumerate(key):
mod = ModuleType(('something' + str(i)))
monkeypatch.setitem(sys.modules, k, mod)
original = dict(sys.modules)
def preserve(name):
return (name in (key[0], key[1], 'some-other-key'))
snapshot = SysModulesSnapshot(preserve=preserve)
sys.modules[key[0]] = original[key[0]] = ModuleType('something else0')
sys.modules[key[1]] = original[key[1]] = ModuleType('something else1')
sys.modules[key[2]] = ModuleType('something else2')
snapshot.restore()
assert (sys.modules == original)
def test_preserve_container(self, monkeypatch: MonkeyPatch) -> None:
original = dict(sys.modules)
assert (self.key not in original)
replacement = dict(sys.modules)
replacement[self.key] = ModuleType('life of brian')
snapshot = SysModulesSnapshot()
monkeypatch.setattr(sys, 'modules', replacement)
snapshot.restore()
assert (sys.modules is replacement)
assert (sys.modules == original) |
def test_merge_pass_nested_with_substitutions():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': {'k31': 'value31', 'k32': 'value32'}, 'key5': False, 15: 16})
add_me = {'key2': 'value4', 'key3': {'k33': 'value33'}, 'key4': '444_{key1}_444', 'key5': {'k51': PyString('key1')}, 13: 14, 15: 17}
context.merge(add_me)
assert (context == {'key1': 'value1', 'key2': 'value4', 'key3': {'k31': 'value31', 'k32': 'value32', 'k33': 'value33'}, 'key4': '444_value1_444', 'key5': {'k51': 'value1'}, 13: 14, 15: 17}) |
class GlmMultiResp(Glm):
def get_z(self, x):
return safe_data_mat_coef_mat_dot(X=self.X, coef=x.reshape(self.var_shape_), fit_intercept=self.fit_intercept)
def cat_intercept_coef(self, intercept, coef):
if (intercept.ndim == 1):
intercept = intercept.reshape(1, (- 1))
return np.vstack([intercept, coef]) |
def test_env_only_calls_set():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'env': {'set': {'ARB_SET_ME1': 'key2', 'ARB_SET_ME2': 'key1'}}})
with patch.multiple('pypyr.steps.env', env_get=DEFAULT, env_set=DEFAULT, env_unset=DEFAULT) as mock_env:
pypyr.steps.env.run_step(context)
mock_env['env_get'].assert_called_once()
mock_env['env_set'].assert_called_once()
mock_env['env_unset'].assert_called_once() |
def confirm_booleanbased_sqli(base, parameter, payload_detected, url='', data='', headers='', injection_type='', proxy='', is_multipart=False, timeout=30, delay=0, timesec=5, response_time=8, code=None, match_string=None, not_match_string=None, text_only=False, confirmation=False):
_temp = []
Response = collections.namedtuple('Response', ['vulnerable', 'tests_performed'])
param_key = parameter.key
param_value = parameter.value
test_payloads = [{'true': {'payload': '2*3*8=6*8', 'response': True}, 'false': {'payload': '2*3*8=6*9', 'response': False}}, {'true': {'payload': '3*2>(1*5)', 'response': True}, 'false': {'payload': '3*3<(2*4)', 'response': False}}, {'true': {'payload': '3*2*0>=0', 'response': True}, 'false': {'payload': '3*3*9<(2*4)', 'response': False}}, {'true': {'payload': '5*4=20', 'response': True}, 'false': {'payload': '5*4=21', 'response': False}}, {'true': {'payload': '3*2*1=6', 'response': True}, 'false': {'payload': '3*2*0=6', 'response': False}}]
if (response_time > 8):
test_payloads = test_payloads[0:3]
for entry in test_payloads:
if (delay > 0):
time.sleep(delay)
condition_true = entry.get('true', {}).get('payload')
condition_false = entry.get('false', {}).get('payload')
condition_response_0 = entry.get('true', {}).get('response')
condition_response_1 = entry.get('false', {}).get('response')
string = payload_detected.string
expression = string.replace('[RANDNUM]=[RANDNUM]', condition_true)
expression01 = string.replace('[RANDNUM]=[RANDNUM]', condition_false)
decoded_expression = urldecode(expression)
decoded_expression01 = urldecode(expression01)
logger.payload(f'{decoded_expression}')
try:
attack = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression, is_multipart=is_multipart, injection_type=injection_type)
attack01 = inject_expression(url=url, data=data, proxy=proxy, delay=delay, timesec=timesec, timeout=timeout, headers=headers, parameter=parameter, expression=expression01, is_multipart=is_multipart, injection_type=injection_type)
boolean_confirm_retval = check_boolean_responses(base, attack, attack01, code=code, match_string=conf.string, not_match_string=conf.not_string, text_only=conf.text_only)
confirm_response_type = boolean_confirm_retval.vulnerable
case = boolean_confirm_retval.case
diff = boolean_confirm_retval.difference
if ((case == 'Content Length') and conf._bool_ctf and conf._bool_ctt):
is_bool_ct_ok = bool(((conf._bool_ctt == attack.content_length) and (conf._bool_ctf == attack01.content_length)))
if (not is_bool_ct_ok):
conf._bool_ctt = None
conf._bool_ctf = None
break
if confirm_response_type:
logger.debug(' Test: {}, Response Type {}'.format(decoded_expression, str(condition_response_0)))
logger.debug(' Test: {}, Response Type {}'.format(decoded_expression01, str(condition_response_1)))
_temp.append({'payload': '{}{}'.format(param_value, decoded_expression), 'response_type': condition_response_0, 'attack': attack})
_temp.append({'payload': '{}{}'.format(param_value, decoded_expression01), 'response_type': condition_response_1, 'attack': attack01})
except KeyboardInterrupt as error:
logger.warning('user aborted during boolean-based confirmation phase')
break
except Exception as error:
logger.critical(f'error {error}, during boolean-based confirmation phase.')
break
attempts_count = len(_temp)
total_attempts_to_vulnerable = len(test_payloads)
perc = ((attempts_count // total_attempts_to_vulnerable) * 100)
logger.debug(f'further tests shows that the chances are {perc}% for the target to be injected..')
if (perc >= 80):
vulnerable = check_booleanbased_tests(_temp)
elif (response_time > 8):
if (perc >= 70):
vulnerable = check_booleanbased_tests(_temp)
else:
vulnerable = False
else:
vulnerable = False
ok = Response(vulnerable=vulnerable, tests_performed=_temp)
return ok |
class TestAccount(potr.context.Account):
contextclass = TestContext
def __init__(self, name, post_office):
self.post_office = post_office
super(TestAccount, self).__init__(name, 'test_protocol', 415)
def loadPrivkey(self):
pass
def savePrivkey(self):
pass |
class MultilabelAccuracy(MulticlassAccuracy):
def __init__(self: TMultilabelAccuracy, *, threshold: float=0.5, criteria: str='exact_match', device: Optional[torch.device]=None) -> None:
super().__init__(device=device)
_multilabel_accuracy_param_check(criteria)
self.threshold = threshold
self.criteria = criteria
_mode()
def update(self: TMultilabelAccuracy, input: torch.Tensor, target: torch.Tensor) -> TMultilabelAccuracy:
input = input.to(self.device)
target = target.to(self.device)
(num_correct, num_total) = _multilabel_accuracy_update(input, target, self.threshold, self.criteria)
self.num_correct += num_correct
self.num_total += num_total
return self |
def get_auth_header(protocol, timestamp, client, api_key, api_secret=None, **kwargs):
header = [('sentry_timestamp', timestamp), ('sentry_client', client), ('sentry_version', protocol), ('sentry_key', api_key)]
if api_secret:
header.append(('sentry_secret', api_secret))
return ('Sentry %s' % ', '.join((('%s=%s' % (k, v)) for (k, v) in header))) |
def _bfs_for_latest_version_in_history(merge_base: (((Commit | TagObject) | Blob) | Tree), full_release_tags_and_versions: list[tuple[(Tag, Version)]]) -> (Version | None):
def bfs(visited: set[Commit], q: Queue[Commit]) -> (Version | None):
if q.empty():
log.debug('queue is empty, returning none')
return None
node = q.get()
if (node in visited):
log.debug('commit %s already visited, returning none', node.hexsha)
return None
for (tag, version) in full_release_tags_and_versions:
log.debug('checking if tag %r (%s) matches commit %s', tag.name, tag.commit.hexsha, node.hexsha)
if (tag.commit == node):
log.info('found latest version in branch history: %r (%s)', str(version), node.hexsha[:7])
return version
log.debug("commit %s doesn't match any tags", node.hexsha)
visited.add(node)
for parent in node.parents:
log.debug('queuing parent commit %s', parent.hexsha)
q.put(parent)
return bfs(visited, q)
q: Queue[Commit] = Queue()
q.put(merge_base)
latest_version = bfs(set(), q)
log.info("the latest version in this branch's history is %s", latest_version)
return latest_version |
class FakeKeyparser(QObject):
keystring_updated = pyqtSignal(str)
request_leave = pyqtSignal(usertypes.KeyMode, str, bool)
def __init__(self):
super().__init__()
self.passthrough = False
def handle(self, evt: QKeyEvent, *, dry_run: bool=False) -> QKeySequence.SequenceMatch:
return QKeySequence.SequenceMatch.NoMatch |
class TPreferencesWindow(TestCase):
def setUp(self):
config.init()
init_fake_app()
set_columns(['artist', 'title'])
self.win = PreferencesWindow(None)
def test_ctr(self):
pass
def tearDown(self):
destroy_fake_app()
self.win.destroy()
config.quit() |
def test_valid_manifestlist():
manifestlist = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
assert (len(manifestlist.manifests(retriever)) == 2)
assert (manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json')
assert (manifestlist.bytes.as_encoded_str() == MANIFESTLIST_BYTES)
assert (manifestlist.manifest_dict == json.loads(MANIFESTLIST_BYTES))
assert (manifestlist.get_layers(retriever) is None)
assert (manifestlist.config_media_type is None)
assert (manifestlist.layers_compressed_size is None)
assert (not manifestlist.blob_digests)
for (index, manifest) in enumerate(manifestlist.manifests(retriever)):
if (index == 0):
assert isinstance(manifest.manifest_obj, DockerSchema2Manifest)
assert (manifest.manifest_obj.schema_version == 2)
else:
assert isinstance(manifest.manifest_obj, DockerSchema1Manifest)
assert (manifest.manifest_obj.schema_version == 1)
schema2_manifest = manifestlist.convert_manifest([DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE], 'foo', 'bar', 'baz', retriever)
assert (schema2_manifest is None)
compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
assert (compatible_manifest.schema_version == 1)
schema1_manifest = manifestlist.convert_manifest(DOCKER_SCHEMA1_CONTENT_TYPES, 'foo', 'bar', 'baz', retriever)
assert (schema1_manifest.schema_version == 1)
assert (schema1_manifest.digest == compatible_manifest.digest)
manifestlist.validate(retriever)
assert (manifestlist.amd64_linux_manifest_digest == 'sha256:5b') |
def load_module_from_name(dotted_name: str) -> types.ModuleType:
try:
return sys.modules[dotted_name]
except KeyError:
pass
with redirect_stderr(io.StringIO()) as stderr, redirect_stdout(io.StringIO()) as stdout:
module = importlib.import_module(dotted_name)
stderr_value = stderr.getvalue()
if stderr_value:
logger.error('Captured stderr while importing %s:\n%s', dotted_name, stderr_value)
stdout_value = stdout.getvalue()
if stdout_value:
logger.info('Captured stdout while importing %s:\n%s', dotted_name, stdout_value)
return module |
class LinearQubitOperatorOptions(object):
def __init__(self, processes=10, pool=None):
if (processes <= 0):
raise ValueError('Invalid number of processors specified {} <= 0'.format(processes))
self.processes = min(processes, multiprocessing.cpu_count())
self.pool = pool
def get_processes(self, num):
return max(min(num, self.processes), 1)
def get_pool(self, num=None):
processes = self.get_processes((num or self.processes))
logging.info('Calling multiprocessing.Pool(%d)', processes)
return multiprocessing.Pool(processes) |
def get_logger(args):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: - %(message)s', datefmt='%m-%d %H:%M:%S')
fh = logging.FileHandler('./logs/{}.txt'.format(args.model_id), mode='w+')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.info('------HYPERPARAMETERS-------')
for (k, v) in vars(args).items():
logger.info(((k + ': ') + str(v)))
logger.info('')
return logger |
def ld2table(header, ld, html_id=None, html_class=None, widths={}):
table = '<table'
if html_id:
table += (' id="%s"' % html_id)
if html_class:
table += (' class="%s"' % html_class)
table += '>\n\t<thead>\n\t\t<tr>'
for h in header:
if (not (h in widths)):
table += (('\n\t\t\t<th>' + h) + '</th>')
else:
table += ((('\n\t\t\t<th style="width: %spx">' % widths[h]) + h) + '</th>')
table += '\n\t\t</tr>\n\t</thead>\n\t<tbody>'
for d in ld:
table += '\n\t\t<tr>'
for h in header:
table += (('\n\t\t\t<td>' + str(d.get(h, ''))) + '</td>')
table += '\n\t\t</tr>'
table += '\n\t</tbody>\n</table>'
return table |
def load_properties_data(file_name='../../resources/properties_data.json'):
abs_dir_path = os.path.dirname(os.path.abspath(__file__))
file_name_all = os.path.join(abs_dir_path, file_name)
with open(file_name_all, 'r') as f:
pd_dict = json.load(f)
return {key: [Property[p] for p in props] for (key, props) in pd_dict.items()} |
def test_set_current():
with expected_protocol(AdvantestR6246, [('di 1,0,2.1100e-04,2.1300e-04', None), ('spot 1,2.3120e-03', None), (None, 'ABCD 7.311e-4')]) as inst:
inst.ch_A.current_source(0, 0.000211, 0.000213)
inst.ch_A.change_source_current = 0.002312
assert (inst.read_measurement() == 0.0007311) |
def _build_message(*args, **kwargs):
string = kwargs.get('string', None)
t = _escape(kwargs.get('t', ''))
expected = _escape(kwargs.get('expected', ''))
result = _escape(kwargs.get('result', ''))
if (string is None):
string = "The expected output of '{t}'\n\t\tShould be '{expected}'\n\t\tActually outputs '{result}'\n\t(WARNING: Partial Output of Result!)"
end = (- len(_escape(foot)))
start = (end - len(expected))
return string.format(t=t, result=result[start:end], expected=expected) |
class _XXZZLattice(_RotatedLattice):
stabilizer_shortnames = {'mx': _XXXX, 'mz': _ZZZZ}
def reset_x(self) -> None:
self.circ.reset(self.qregisters['data'])
self.circ.h(self.qregisters['data'])
self.circ.barrier()
def reset_z(self) -> None:
self.circ.reset(self.qregisters['data'])
self.circ.barrier()
def x(self) -> None:
for i in range(0, self.params['num_data'], self.params['d'][self.W]):
self.circ.x(self.qregisters['data'][i])
self.circ.barrier()
def z(self) -> None:
for i in range(self.params['d'][self.W]):
self.circ.z(self.qregisters['data'][i])
self.circ.barrier()
def x_c_if(self, classical: ClassicalRegister, val: int) -> None:
for i in range(0, self.params['num_data'], self.params['d'][self.W]):
self.circ.x(self.qregisters['data'][i]).c_if(classical, val)
self.circ.barrier()
def z_c_if(self, classical: ClassicalRegister, val: int) -> None:
for i in range(self.params['d'][self.W]):
self.circ.z(self.qregisters['data'][i]).c_if(classical, val)
self.circ.barrier()
def cx(self, control: Optional[Qubit]=None, target: Optional[Qubit]=None):
if control:
for i in range(0, self.params['num_data'], self.params['d'][self.W]):
self.circ.cx(control, self.qregisters['data'][i])
self.circ.barrier()
elif target:
self._readout_z_into_ancilla()
self.circ.cx(self.qregisters['ancilla'], target)
def _readout_x_into_ancilla(self) -> None:
self.circ.reset(self.qregisters['ancilla'])
self.circ.h(self.qregisters['ancilla'])
for i in range(0, self.params['num_data'], self.params['d'][self.W]):
self.circ.cx(self.qregisters['ancilla'], self.qregisters['data'][i])
self.circ.h(self.qregisters['ancilla'])
def readout_x(self, readout_creg: Optional[ClassicalRegister]=None) -> None:
if (not readout_creg):
self.params['num_readout'] += 1
creg_name = ((self.name + '_readout_') + str(self.params['num_readout']))
readout = ClassicalRegister(1, name=creg_name)
self.circ.add_register(readout)
self.cregisters[creg_name] = readout
readout_creg = self.cregisters[creg_name]
self._readout_x_into_ancilla()
self.circ.measure(self.qregisters['ancilla'], readout_creg)
self.circ.barrier()
def _readout_z_into_ancilla(self) -> None:
self.circ.reset(self.qregisters['ancilla'])
for i in range(self.params['d'][self.W]):
self.circ.cx(self.qregisters['data'][i], self.qregisters['ancilla'])
def readout_z(self, readout_creg: Optional[ClassicalRegister]=None) -> None:
if (not readout_creg):
self.params['num_readout'] += 1
creg_name = ((self.name + '_readout_') + str(self.params['num_readout']))
readout = ClassicalRegister(1, name=creg_name)
self.circ.add_register(readout)
self.cregisters[creg_name] = readout
readout_creg = self.cregisters[creg_name]
self._readout_z_into_ancilla()
self.circ.measure(self.qregisters['ancilla'], readout_creg)
self.circ.barrier()
def lattice_readout_x(self) -> None:
self.params['num_lattice_readout'] += 1
creg_name = ((self.name + '_lattice_readout_') + str(self.params['num_lattice_readout']))
readout = ClassicalRegister(self.params['num_data'], name=creg_name)
self.circ.add_register(readout)
self.cregisters[creg_name] = readout
self.circ.h(self.qregisters['data'])
self.circ.measure(self.qregisters['data'], self.cregisters[creg_name])
self.circ.barrier()
def lattice_readout_z(self) -> None:
self.params['num_lattice_readout'] += 1
creg_name = ((self.name + '_lattice_readout_') + str(self.params['num_lattice_readout']))
readout = ClassicalRegister(self.params['num_data'], name=creg_name)
self.circ.add_register(readout)
self.cregisters[creg_name] = readout
self.circ.measure(self.qregisters['data'], self.cregisters[creg_name])
self.circ.barrier() |
class Evaluator(object):
def eval_annotation(self, annotation, output):
captions = json.load(open(annotation, 'r'))['audios']
key2refs = {}
for audio_idx in range(len(captions)):
audio_id = captions[audio_idx]['audio_id']
key2refs[audio_id] = []
for caption in captions[audio_idx]['captions']:
key2refs[audio_id].append(caption['caption'])
from fense.fense import Fense
scores = {}
scorer = Fense()
scores[scorer.method()] = evaluate_annotation(copy.deepcopy(key2refs), scorer)
refs4eval = {}
for (key, refs) in key2refs.items():
refs4eval[key] = []
for (idx, ref) in enumerate(refs):
refs4eval[key].append({'audio_id': key, 'id': idx, 'caption': ref})
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
tokenizer = PTBTokenizer()
key2refs = tokenizer.tokenize(refs4eval)
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.spice.spice import Spice
scorers = [Bleu(), Rouge(), Cider(), Meteor(), Spice()]
for scorer in scorers:
scores[scorer.method()] = evaluate_annotation(copy.deepcopy(key2refs), scorer)
spider = 0
with open(output, 'w') as f:
for (name, score) in scores.items():
if (name == 'Bleu'):
for n in range(4):
f.write('Bleu-{}: {:6.3f}\n'.format((n + 1), score[n]))
else:
f.write('{}: {:6.3f}\n'.format(name, score))
if (name in ['CIDEr', 'SPICE']):
spider += score
f.write('SPIDEr: {:6.3f}\n'.format((spider / 2)))
def eval_prediction(self, prediction, annotation, output):
ref_captions = json.load(open(annotation, 'r'))['audios']
key2refs = {}
for audio_idx in range(len(ref_captions)):
audio_id = ref_captions[audio_idx]['audio_id']
key2refs[audio_id] = []
for caption in ref_captions[audio_idx]['captions']:
key2refs[audio_id].append(caption['caption'])
pred_captions = json.load(open(prediction, 'r'))['predictions']
key2pred = {}
for audio_idx in range(len(pred_captions)):
item = pred_captions[audio_idx]
audio_id = item['filename']
key2pred[audio_id] = [item['tokens']]
from fense.fense import Fense
scores = {}
scorer = Fense()
scores[scorer.method()] = evaluate_prediction(key2pred, key2refs, scorer)
refs4eval = {}
for (key, refs) in key2refs.items():
refs4eval[key] = []
for (idx, ref) in enumerate(refs):
refs4eval[key].append({'audio_id': key, 'id': idx, 'caption': ref})
preds4eval = {}
for (key, preds) in key2pred.items():
preds4eval[key] = []
for (idx, pred) in enumerate(preds):
preds4eval[key].append({'audio_id': key, 'id': idx, 'caption': pred})
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
tokenizer = PTBTokenizer()
key2refs = tokenizer.tokenize(refs4eval)
key2pred = tokenizer.tokenize(preds4eval)
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.spice.spice import Spice
scorers = [Bleu(), Rouge(), Cider(), Meteor(), Spice()]
for scorer in scorers:
scores[scorer.method()] = evaluate_prediction(key2pred, key2refs, scorer)
spider = 0
with open(output, 'w') as f:
for (name, score) in scores.items():
if (name == 'Bleu'):
for n in range(4):
f.write('Bleu-{}: {:6.3f}\n'.format((n + 1), score[n]))
else:
f.write('{}: {:6.3f}\n'.format(name, score))
if (name in ['CIDEr', 'SPICE']):
spider += score
f.write('SPIDEr: {:6.3f}\n'.format((spider / 2))) |
def make_save_dirs(args, prefix, suffix=None, with_time=False):
time_str = (datetime.now().strftime('%Y-%m-%dT%H-%M-%S') if with_time else '')
suffix = (suffix if (suffix is not None) else '')
result_path = make_dir(os.path.join(args.result_path, prefix, suffix, time_str))
image_path = make_dir(os.path.join(result_path, 'image'))
log_path = make_dir(os.path.join(result_path, 'log'))
checkpoint_path = make_dir(os.path.join(result_path, 'checkpoint'))
sample_path = make_dir(os.path.join(result_path, 'samples'))
sample_to_eval_path = make_dir(os.path.join(result_path, 'sample_to_eval'))
print(('create output path ' + result_path))
return (image_path, checkpoint_path, log_path, sample_path, sample_to_eval_path) |
class Qtile(CommandObject):
current_screen: Screen
dgroups: DGroups
_eventloop: asyncio.AbstractEventLoop
def __init__(self, kore: base.Core, config: Config, no_spawn: bool=False, state: (str | None)=None, socket_path: (str | None)=None) -> None:
self.core = kore
self.config = config
self.no_spawn = no_spawn
self._state: ((QtileState | str) | None) = state
self.socket_path = socket_path
self._drag: (tuple | None) = None
self._mouse_map: defaultdict[(int, list[Mouse])] = defaultdict(list)
self.windows_map: dict[(int, base.WindowType)] = {}
self.widgets_map: dict[(str, _Widget)] = {}
self.renamed_widgets: list[str]
self.groups_map: dict[(str, _Group)] = {}
self.groups: list[_Group] = []
self.keys_map: dict[(tuple[(int, int)], (Key | KeyChord))] = {}
self.chord_stack: list[KeyChord] = []
self.screens: list[Screen] = []
libqtile.init(self)
self._stopped_event: (asyncio.Event | None) = None
self.server = IPCCommandServer(self)
def load_config(self, initial: bool=False) -> None:
try:
self.config.load()
self.config.validate()
except Exception as e:
logger.exception('Configuration error:')
send_notification('Configuration error', str(e))
if hasattr(self.core, 'wmname'):
self.core.wmname = getattr(self.config, 'wmname', 'qtile')
self.dgroups = DGroups(self, self.config.groups, self.config.dgroups_key_binder)
_Widget.global_defaults = self.config.widget_defaults
_Extension.global_defaults = self.config.extension_defaults
for installed_extension in _Extension.installed_extensions:
installed_extension._configure(self)
for i in self.groups:
self.groups_map[i.name] = i
for grp in self.config.groups:
if isinstance(grp, ScratchPadConfig):
sp = ScratchPad(grp.name, grp.dropdowns, grp.label, grp.single)
sp._configure([self.config.floating_layout], self.config.floating_layout, self)
self.groups.append(sp)
self.groups_map[sp.name] = sp
self._process_screens(reloading=(not initial))
for key in self.config.keys:
self.grab_key(key)
for button in self.config.mouse:
self.grab_button(button)
if (not self.no_spawn):
hook.fire('startup_once')
self.no_spawn = True
hook.fire('startup')
if self._state:
if isinstance(self._state, str):
try:
with open(self._state, 'rb') as f:
st = pickle.load(f)
st.apply(self)
except:
logger.exception('failed restoring state')
finally:
os.remove(self._state)
else:
self._state.apply(self)
self.core.on_config_load(initial)
if self._state:
for screen in self.screens:
screen.group.layout.show(screen.get_rect())
screen.group.layout_all()
self._state = None
self.update_desktops()
hook.subscribe.setgroup(self.update_desktops)
if self.config.reconfigure_screens:
hook.subscribe.screen_change(self.reconfigure_screens)
inhibitor.start()
if initial:
hook.fire('startup_complete')
def _prepare_socket_path(self, socket_path: (str | None)=None) -> str:
if (socket_path is None):
socket_path = ipc.find_sockfile(self.core.display_name)
if os.path.exists(socket_path):
os.unlink(socket_path)
return socket_path
def loop(self) -> None:
asyncio.run(self.async_loop())
async def async_loop(self) -> None:
self._eventloop = asyncio.get_running_loop()
asyncio.set_event_loop_policy(QtileEventLoopPolicy(self))
self._stopped_event = asyncio.Event()
self.core.setup_listener(self)
try:
async with LoopContext({signal.SIGTERM: self.stop, signal.SIGINT: self.stop, signal.SIGHUP: self.stop, signal.SIGUSR1: self.reload_config, signal.SIGUSR2: self.restart}), ipc.Server(self._prepare_socket_path(self.socket_path), self.server.call):
self.load_config(initial=True)
(await self._stopped_event.wait())
finally:
self.finalize()
self.core.remove_listener()
def stop(self) -> None:
hook.fire('shutdown')
lifecycle.behavior = lifecycle.behavior.TERMINATE
self.core.graceful_shutdown()
self._stop()
_command()
def restart(self) -> None:
if (not self.core.supports_restarting):
raise CommandError(f'Backend does not support restarting: {self.core.name}')
try:
self.config.load()
except Exception as error:
logger.exception('Preventing restart because of a configuration error:')
send_notification('Configuration error', str(error.__context__))
return
hook.fire('restart')
lifecycle.behavior = lifecycle.behavior.RESTART
state_file = os.path.join(tempfile.gettempdir(), 'qtile-state')
with open(state_file, 'wb') as f:
self.dump_state(f)
lifecycle.state_file = state_file
self._stop()
def _stop(self) -> None:
logger.debug('Stopping qtile')
if (self._stopped_event is not None):
self._stopped_event.set()
def dump_state(self, buf: Any) -> None:
try:
pickle.dump(QtileState(self), buf, protocol=0)
except:
logger.exception('Unable to pickle qtile state')
_command()
def reload_config(self) -> None:
logger.debug('Reloading the configuration file')
try:
self.config.load()
except Exception as error:
logger.exception('Configuration error:')
send_notification('Configuration error', str(error))
return
self._state = QtileState(self, restart=False)
self._finalize_configurables()
hook.clear()
self.ungrab_keys()
self.chord_stack.clear()
self.core.ungrab_buttons()
self._mouse_map.clear()
self.groups_map.clear()
self.groups.clear()
self.screens.clear()
self.load_config()
def _finalize_configurables(self) -> None:
try:
for widget in self.widgets_map.values():
widget.finalize()
self.widgets_map.clear()
for group in self.groups:
for layout in group.layouts:
layout.finalize()
for screen in self.screens:
for gap in screen.gaps:
gap.finalize()
except:
logger.exception('exception during finalize')
hook.clear()
def finalize(self) -> None:
self._finalize_configurables()
inhibitor.stop()
cancel_tasks()
self.core.finalize()
def add_autogen_group(self, screen_idx: int) -> _Group:
name = f'autogen_{(screen_idx + 1)}'
self.add_group(name)
logger.warning('Too few groups in config. Added group: %s', name)
return self.groups_map[name]
def get_available_group(self, screen_idx: int) -> (_Group | None):
for group in self.groups:
if (group.screen or isinstance(group, ScratchPad)):
continue
if ((group.screen_affinity is None) or (group.screen_affinity == screen_idx)):
return group
return None
def _process_screens(self, reloading: bool=False) -> None:
current_groups = [s.group for s in self.screens]
screens = []
if hasattr(self.config, 'fake_screens'):
screen_info = [(s.x, s.y, s.width, s.height) for s in self.config.fake_screens]
config = self.config.fake_screens
else:
xywh = {}
for (sx, sy, sw, sh) in self.core.get_screen_info():
pos = (sx, sy)
(width, height) = xywh.get(pos, (0, 0))
xywh[pos] = (max(width, sw), max(height, sh))
screen_info = [(x, y, w, h) for ((x, y), (w, h)) in xywh.items()]
config = self.config.screens
for (i, (x, y, w, h)) in enumerate(screen_info):
if ((i + 1) > len(config)):
scr = Screen()
else:
scr = config[i]
if ((not hasattr(self, 'current_screen')) or reloading):
self.current_screen = scr
reloading = False
grp = None
if (i < len(current_groups)):
grp = current_groups[i]
else:
grp = self.get_available_group(i)
if (grp is None):
grp = self.add_autogen_group(i)
reconfigure_gaps = (((x, y, w, h) != (scr.x, scr.y, scr.width, scr.height)) or ((i + 1) > len(self.screens)))
if (not hasattr(scr, 'group')):
scr.group = grp
scr._configure(self, i, x, y, w, h, grp, reconfigure_gaps=reconfigure_gaps)
screens.append(scr)
for screen in self.screens:
if (screen not in screens):
for gap in screen.gaps:
if (isinstance(gap, bar.Bar) and gap.window):
gap.kill_window()
self.screens = screens
_command()
def reconfigure_screens(self, *_: list[Any], **__: dict[(Any, Any)]) -> None:
logger.info('Reconfiguring screens.')
self._process_screens()
for group in self.groups:
if group.screen:
if (group.screen in self.screens):
group.layout_all()
else:
group.hide()
hook.fire('screens_reconfigured')
def paint_screen(self, screen: Screen, image_path: str, mode: (str | None)=None) -> None:
self.core.painter.paint(screen, image_path, mode)
def process_key_event(self, keysym: int, mask: int) -> tuple[(((Key | KeyChord) | None), bool)]:
key = self.keys_map.get((keysym, mask), None)
if (key is None):
logger.debug('Ignoring unknown keysym: %s, mask: %s', keysym, mask)
return (None, False)
if isinstance(key, KeyChord):
self.grab_chord(key)
else:
executed = False
for cmd in key.commands:
if cmd.check(self):
(status, val) = self.server.call((cmd.selectors, cmd.name, cmd.args, cmd.kwargs))
if (status in (interface.ERROR, interface.EXCEPTION)):
logger.error('KB command error %s: %s', cmd.name, val)
executed = True
if (self.chord_stack and ((not self.chord_stack[(- 1)].mode) or (key.key == 'Escape'))):
self.ungrab_chord()
elif (not executed):
return (key, False)
return (key, key.swallow)
def grab_keys(self) -> None:
self.core.ungrab_keys()
for key in self.keys_map.values():
self.core.grab_key(key)
def grab_key(self, key: (Key | KeyChord)) -> None:
syms = self.core.grab_key(key)
if (syms in self.keys_map):
logger.warning('Key spec duplicated, overriding previous: %s', key)
self.keys_map[syms] = key
def ungrab_key(self, key: (Key | KeyChord)) -> None:
(keysym, mask_key) = self.core.ungrab_key(key)
self.keys_map.pop((keysym, mask_key))
def ungrab_keys(self) -> None:
self.core.ungrab_keys()
self.keys_map.clear()
def grab_chord(self, chord: KeyChord) -> None:
self.chord_stack.append(chord)
if self.chord_stack:
hook.fire('enter_chord', chord.name)
self.ungrab_keys()
for key in chord.submappings:
self.grab_key(key)
_command()
def ungrab_chord(self) -> None:
hook.fire('leave_chord')
self.ungrab_keys()
if (not self.chord_stack):
logger.debug('ungrab_chord was called when no chord mode was active')
return
self.chord_stack.pop()
while self.chord_stack:
chord = self.chord_stack.pop()
if chord.mode:
self.grab_chord(chord)
break
else:
for key in self.config.keys:
self.grab_key(key)
_command()
def ungrab_all_chords(self) -> None:
hook.fire('leave_chord')
self.ungrab_keys()
self.chord_stack.clear()
for key in self.config.keys:
self.grab_key(key)
def grab_button(self, button: Mouse) -> None:
try:
button.modmask = self.core.grab_button(button)
except utils.QtileError:
logger.warning('Unknown modifier(s): %s', button.modifiers)
return
self._mouse_map[button.button_code].append(button)
def update_desktops(self) -> None:
try:
index = self.groups.index(self.current_group)
except (ValueError, AttributeError):
index = 0
self.core.update_desktops(self.groups, index)
def add_group(self, name: str, layout: (str | None)=None, layouts: (list[Layout] | None)=None, label: (str | None)=None, index: (int | None)=None, screen_affinity: (int | None)=None) -> bool:
if (name not in self.groups_map.keys()):
g = _Group(name, layout, label=label, screen_affinity=screen_affinity)
if (index is None):
self.groups.append(g)
else:
self.groups.insert(index, g)
if (not layouts):
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groups_map[name] = g
hook.fire('addgroup', name)
hook.fire('changegroup')
self.update_desktops()
return True
return False
def delete_group(self, name: str) -> None:
if (len(self.groups) == len(self.screens)):
raise ValueError("Can't delete all groups.")
if (name in self.groups_map.keys()):
group = self.groups_map[name]
if (group.screen and group.screen.previous_group):
target = group.screen.previous_group
else:
target = group.get_previous_group()
while target.screen:
target = target.get_previous_group()
for i in list(group.windows):
i.togroup(target.name)
if (self.current_group.name == name):
self.current_screen.set_group(target, save_prev=False)
self.groups.remove(group)
del self.groups_map[name]
hook.fire('delgroup', name)
hook.fire('changegroup')
self.update_desktops()
def register_widget(self, w: _Widget) -> None:
name = w.name
i = 0
while (name in self.widgets_map):
i += 1
name = f'{w.name}_{i}'
if (name != w.name):
self.renamed_widgets.append(name)
self.widgets_map[name] = w
def current_layout(self) -> Layout:
return self.current_group.layout
def current_group(self) -> _Group:
return self.current_screen.group
def current_window(self) -> (base.Window | None):
return self.current_screen.group.current_window
def reserve_space(self, reserved_space: tuple[(int, int, int, int)], screen: Screen) -> None:
for (i, pos) in enumerate(['left', 'right', 'top', 'bottom']):
if (space := reserved_space[i]):
if (gap := getattr(screen, pos)):
gap.adjust_reserved_space(space)
elif (0 < space):
gap = bar.Gap(0)
gap.screen = screen
setattr(screen, pos, gap)
gap.adjust_reserved_space(space)
screen.resize()
def free_reserved_space(self, reserved_space: tuple[(int, int, int, int)], screen: Screen) -> None:
reserved_space = tuple(((- i) for i in reserved_space))
self.reserve_space(reserved_space, screen)
def manage(self, win: base.WindowType) -> None:
if isinstance(win, base.Internal):
self.windows_map[win.wid] = win
return
if (win.wid in self.windows_map):
return
hook.fire('client_new', win)
if win.defunct:
return
self.windows_map[win.wid] = win
if (self.current_screen and isinstance(win, base.Window)):
if ((not win.group) and self.current_screen.group):
self.current_screen.group.add(win, focus=win.can_steal_focus)
hook.fire('client_managed', win)
def unmanage(self, wid: int) -> None:
c = self.windows_map.get(wid)
if c:
group = None
if isinstance(c, base.Static):
if c.reserved_space:
self.free_reserved_space(c.reserved_space, c.screen)
elif isinstance(c, base.Window):
if c.group:
group = c.group
c.group.remove(c)
del self.windows_map[wid]
if isinstance(c, base.Window):
c.group = group
hook.fire('client_killed', c)
def find_screen(self, x: int, y: int) -> (Screen | None):
result = []
for i in self.screens:
if ((i.x <= x <= (i.x + i.width)) and (i.y <= y <= (i.y + i.height))):
result.append(i)
if (len(result) == 1):
return result[0]
return None
def find_closest_screen(self, x: int, y: int) -> Screen:
normal = self.find_screen(x, y)
if (normal is not None):
return normal
x_match = []
y_match = []
for i in self.screens:
if (i.x <= x <= (i.x + i.width)):
x_match.append(i)
if (i.y <= y <= (i.y + i.height)):
y_match.append(i)
if (len(x_match) == 1):
return x_match[0]
if (len(y_match) == 1):
return y_match[0]
return self._find_closest_closest(x, y, (x_match + y_match))
def _find_closest_closest(self, x: int, y: int, candidate_screens: list[Screen]) -> Screen:
closest_distance: (float | None) = None
if (not candidate_screens):
candidate_screens = self.screens
candidate_screens = [s for s in candidate_screens if ((x < (s.x + s.width)) and (y < (s.y + s.height)))]
closest_screen = lget(candidate_screens, 0)
for s in candidate_screens:
middle_x = (s.x + (s.width / 2))
middle_y = (s.y + (s.height / 2))
distance = (((x - middle_x) ** 2) + ((y - middle_y) ** 2))
if ((closest_distance is None) or (distance < closest_distance)):
closest_distance = distance
closest_screen = s
return (closest_screen or self.screens[0])
def process_button_click(self, button_code: int, modmask: int, x: int, y: int) -> bool:
handled = False
for m in self._mouse_map[button_code]:
if (not (m.modmask == modmask)):
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
(status, val) = self.server.call((i.selectors, i.name, i.args, i.kwargs))
if (status in (interface.ERROR, interface.EXCEPTION)):
logger.error('Mouse command error %s: %s', i.name, val)
handled = True
elif (isinstance(m, Drag) and self.current_window and (not self.current_window.fullscreen)):
if m.start:
i = m.start
(status, val) = self.server.call((i.selectors, i.name, i.args, i.kwargs))
if (status in (interface.ERROR, interface.EXCEPTION)):
logger.error('Mouse command error %s: %s', i.name, val)
continue
else:
val = (0, 0)
if (m.warp_pointer and (self.current_window is not None)):
win_size = self.current_window.get_size()
win_pos = self.current_window.get_position()
x = (win_size[0] + win_pos[0])
y = (win_size[1] + win_pos[1])
self.core.warp_pointer(x, y)
self._drag = (x, y, val[0], val[1], m.commands)
self.core.grab_pointer()
handled = True
return handled
def process_button_release(self, button_code: int, modmask: int) -> bool:
if (self._drag is not None):
for m in self._mouse_map[button_code]:
if isinstance(m, Drag):
self._drag = None
self.core.ungrab_pointer()
return True
return False
def process_button_motion(self, x: int, y: int) -> None:
if (self._drag is None):
return
(ox, oy, rx, ry, cmd) = self._drag
dx = (x - ox)
dy = (y - oy)
if (dx or dy):
for i in cmd:
if i.check(self):
(status, val) = self.server.call((i.selectors, i.name, (i.args + ((rx + dx), (ry + dy))), i.kwargs))
if (status in (interface.ERROR, interface.EXCEPTION)):
logger.error('Mouse command error %s: %s', i.name, val)
def warp_to_screen(self) -> None:
if self.current_screen:
scr = self.current_screen
self.core.warp_pointer((scr.x + (scr.dwidth // 2)), (scr.y + (scr.dheight // 2)))
def focus_screen(self, n: int, warp: bool=True) -> None:
if (n >= len(self.screens)):
return
old = self.current_screen
self.current_screen = self.screens[n]
if (old != self.current_screen):
hook.fire('current_screen_change')
hook.fire('setgroup')
old.group.layout_all()
self.current_group.focus(self.current_window, warp)
if ((self.current_window is None) and warp):
self.warp_to_screen()
def move_to_group(self, group: str) -> None:
if (self.current_window and group):
self.add_group(group)
self.current_window.togroup(group)
def _items(self, name: str) -> ItemT:
if (name == 'group'):
return (True, list(self.groups_map.keys()))
elif (name == 'layout'):
return (True, list(range(len(self.current_group.layouts))))
elif (name == 'widget'):
return (False, list(self.widgets_map.keys()))
elif (name == 'bar'):
return (False, [x.position for x in self.current_screen.gaps if isinstance(x, bar.Bar)])
elif (name == 'window'):
windows: list[(str | int)]
windows = [k for (k, v) in self.windows_map.items() if (isinstance(v, CommandObject) and (not isinstance(v, _Widget)))]
return (True, windows)
elif (name == 'screen'):
return (True, list(range(len(self.screens))))
elif (name == 'core'):
return (True, [])
return None
def _select(self, name: str, sel: ((str | int) | None)) -> (CommandObject | None):
if (name == 'group'):
if (sel is None):
return self.current_group
else:
return self.groups_map.get(sel)
elif (name == 'layout'):
if (sel is None):
return self.current_group.layout
else:
return lget(self.current_group.layouts, int(sel))
elif (name == 'widget'):
return self.widgets_map.get(sel)
elif (name == 'bar'):
gap = getattr(self.current_screen, sel)
if isinstance(gap, bar.Bar):
return gap
elif (name == 'window'):
if (sel is None):
return self.current_window
else:
windows: dict[((str | int), base.WindowType)]
windows = {k: v for (k, v) in self.windows_map.items() if (isinstance(v, CommandObject) and (not isinstance(v, _Widget)))}
return windows.get(sel)
elif (name == 'screen'):
if (sel is None):
return self.current_screen
else:
return lget(self.screens, int(sel))
elif (name == 'core'):
return self.core
return None
def call_soon(self, func: Callable, *args: Any) -> asyncio.Handle:
def f() -> None:
func(*args)
self.core.flush()
return self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func: Callable, *args: Any) -> asyncio.Handle:
def f() -> None:
func(*args)
self.core.flush()
return self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay: int, func: Callable, *args: Any) -> asyncio.TimerHandle:
def f() -> None:
func(*args)
self.core.flush()
return self._eventloop.call_later(delay, f)
def run_in_executor(self, func: Callable, *args: Any) -> asyncio.Future:
return self._eventloop.run_in_executor(None, func, *args)
_command()
def debug(self) -> None:
logger.setLevel(logging.DEBUG)
logger.debug('Switching to DEBUG threshold')
_command()
def info(self) -> None:
logger.setLevel(logging.INFO)
logger.info('Switching to INFO threshold')
_command()
def warning(self) -> None:
logger.setLevel(logging.WARNING)
logger.warning('Switching to WARNING threshold')
_command()
def error(self) -> None:
logger.setLevel(logging.ERROR)
logger.error('Switching to ERROR threshold')
_command()
def critical(self) -> None:
logger.setLevel(logging.CRITICAL)
logger.critical('Switching to CRITICAL threshold')
_command()
def loglevel(self) -> int:
return logger.level
_command()
def loglevelname(self) -> str:
return logging.getLevelName(logger.level)
_command()
def pause(self) -> None:
import pdb
pdb.set_trace()
_command()
def get_groups(self) -> dict[(str, dict[(str, Any)])]:
return {i.name: i.info() for i in self.groups}
_command()
def display_kb(self) -> str:
class FormatTable():
def __init__(self) -> None:
self.max_col_size: list[int] = []
self.rows: list[list[str]] = []
def add(self, row: list[str]) -> None:
n = (len(row) - len(self.max_col_size))
if (n > 0):
self.max_col_size += ([0] * n)
for (i, f) in enumerate(row):
if (len(f) > self.max_col_size[i]):
self.max_col_size[i] = len(f)
self.rows.append(row)
def getformat(self) -> tuple[(str, int)]:
format_string = ' '.join(('%-{0:d}s'.format((max_col_size + 2)) for max_col_size in self.max_col_size))
return ((format_string + '\n'), len(self.max_col_size))
def expandlist(self, list_: list[str], n: int) -> list[str]:
if (not list_):
return [('-' * max_col_size) for max_col_size in self.max_col_size]
n -= len(list_)
if (n > 0):
list_ += ([''] * n)
return list_
def __str__(self) -> str:
(format_, n) = self.getformat()
return ''.join(((format_ % tuple(self.expandlist(row, n))) for row in self.rows))
result = FormatTable()
result.add(['Mode', 'KeySym', 'Mod', 'Command', 'Desc'])
result.add([])
rows = []
def walk_binding(k: (Key | KeyChord), mode: str) -> None:
nonlocal rows
(modifiers, name) = (', '.join(k.modifiers), k.key)
if isinstance(k, Key):
if (not k.commands):
return
allargs = ', '.join(([(value.__name__ if callable(value) else repr(value)) for value in k.commands[0].args] + [('%s = %s' % (keyword, repr(value))) for (keyword, value) in k.commands[0].kwargs.items()]))
rows.append([mode, name, modifiers, '{:s}({:s})'.format(k.commands[0].name, allargs), k.desc])
return
if isinstance(k, KeyChord):
new_mode_s = (k.name if k.name else '<unnamed>')
new_mode = (k.name if (mode == '<root>') else '{}>{}'.format(mode, (k.name if k.name else '_')))
rows.append([mode, name, modifiers, '', 'Enter {:s} mode'.format(new_mode_s)])
for s in k.submappings:
walk_binding(s, new_mode)
return
raise TypeError('Unexpected type: {}'.format(type(k)))
for k in self.config.keys:
walk_binding(k, '<root>')
rows.sort()
for row in rows:
result.add(row)
return str(result)
_command()
def list_widgets(self) -> list[str]:
return list(self.widgets_map.keys())
_command()
def to_layout_index(self, index: str, name: (str | None)=None) -> None:
if (name is not None):
group = self.groups_map[name]
else:
group = self.current_group
group.use_layout(index)
_command()
def next_layout(self, name: (str | None)=None) -> None:
if (name is not None):
group = self.groups_map[name]
else:
group = self.current_group
group.use_next_layout()
_command()
def prev_layout(self, name: (str | None)=None) -> None:
if (name is not None):
group = self.groups_map[name]
else:
group = self.current_group
group.use_previous_layout()
_command()
def get_screens(self) -> list[dict[(str, Any)]]:
lst = [dict(index=i.index, group=(i.group.name if (i.group is not None) else None), x=i.x, y=i.y, width=i.width, height=i.height, gaps=dict(top=(i.top.geometry() if i.top else None), bottom=(i.bottom.geometry() if i.bottom else None), left=(i.left.geometry() if i.left else None), right=(i.right.geometry() if i.right else None))) for i in self.screens]
return lst
_command()
def simulate_keypress(self, modifiers: list[str], key: str) -> None:
try:
self.core.simulate_keypress(modifiers, key)
except utils.QtileError as e:
raise CommandError(str(e))
_command()
def validate_config(self) -> None:
try:
self.config.load()
except Exception as error:
send_notification('Configuration check', str(error))
else:
send_notification('Configuration check', 'No error found!')
_command()
def spawn(self, cmd: (str | list[str]), shell: bool=False) -> int:
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = list(cmd)
cmd = subprocess.list2cmdline(args)
to_lookup = args[0]
if shell:
args = ['/bin/sh', '-c', cmd]
if (shutil.which(to_lookup) is None):
logger.error("couldn't find `%s`", to_lookup)
return (- 1)
(r, w) = os.pipe()
pid = os.fork()
if (pid < 0):
os.close(r)
os.close(w)
return pid
if (pid == 0):
os.close(r)
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if (pid2 == 0):
os.close(w)
try:
del os.environ['VIRTUAL_ENV']
except KeyError:
pass
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
pass
else:
try:
os.set_inheritable(fd, True)
except AttributeError:
pass
if (fd > 0):
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError:
pass
os._exit(1)
else:
os.write(w, str(pid2).encode())
os.close(w)
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
pid = int(os.read(r, 1024))
os.close(r)
return pid
_command()
def status(self) -> Literal['OK']:
return 'OK'
_command()
def sync(self) -> None:
self.core.flush()
_command()
def to_screen(self, n: int) -> None:
self.focus_screen(n)
_command()
def next_screen(self) -> None:
self.focus_screen(((self.screens.index(self.current_screen) + 1) % len(self.screens)))
_command()
def prev_screen(self) -> None:
self.focus_screen(((self.screens.index(self.current_screen) - 1) % len(self.screens)))
_command()
def windows(self) -> list[dict[(str, Any)]]:
return [i.info() for i in self.windows_map.values() if ((not isinstance(i, (base.Internal, _Widget))) and isinstance(i, CommandObject))]
_command()
def internal_windows(self) -> list[dict[(str, Any)]]:
return [i.info() for i in self.windows_map.values() if isinstance(i, base.Internal)]
_command()
def qtile_info(self) -> dict:
config_path = self.config.file_path
dictionary = {'version': VERSION, 'log_level': self.loglevelname()}
if isinstance(logger.handlers[0], RotatingFileHandler):
log_path = logger.handlers[0].baseFilename
dictionary['log_path'] = log_path
if isinstance(config_path, str):
dictionary['config_path'] = config_path
elif isinstance(config_path, Path):
dictionary['config_path'] = config_path.as_posix()
return dictionary
_command()
def shutdown(self) -> None:
self.stop()
_command()
def switch_groups(self, namea: str, nameb: str) -> None:
if ((namea not in self.groups_map) or (nameb not in self.groups_map)):
return
indexa = self.groups.index(self.groups_map[namea])
indexb = self.groups.index(self.groups_map[nameb])
(self.groups[indexa], self.groups[indexb]) = (self.groups[indexb], self.groups[indexa])
hook.fire('setgroup')
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid: int) -> None:
window = self.windows_map.get(wid)
if (isinstance(window, base.Window) and window.group):
if (not window.group.screen):
self.current_screen.set_group(window.group)
window.group.focus(window, False)
_command()
def findwindow(self, prompt: str='window', widget: str='prompt') -> None:
mb = self.widgets_map.get(widget)
if (not mb):
logger.error("No widget named '%s' present.", widget)
return
mb.start_input(prompt, self.find_window, 'window', strict_completer=True)
_command()
def switch_window(self, location: int) -> None:
windows = self.current_group.windows
if ((location < 1) or (location > len(windows))):
return
self.current_group.focus(windows[(location - 1)])
_command()
def change_window_order(self, new_location: int) -> None:
if ((new_location < 1) or (new_location > len(self.current_group.windows))):
return
windows = self.current_group.windows
current_window_index = windows.index(self.current_window)
temp = windows[current_window_index]
windows[current_window_index] = windows[(new_location - 1)]
windows[(new_location - 1)] = temp
_command()
def next_urgent(self) -> None:
try:
nxt = [w for w in self.windows_map.values() if w.urgent][0]
assert isinstance(nxt, base.Window)
if nxt.group:
nxt.group.toscreen()
nxt.group.focus(nxt)
else:
self.current_screen.group.add(nxt)
self.current_screen.group.focus(nxt)
except IndexError:
pass
_command()
def togroup(self, prompt: str='group', widget: str='prompt') -> None:
if (not self.current_window):
logger.warning('No window to move')
return
mb = self.widgets_map.get(widget)
if (not mb):
logger.error("No widget named '%s' present.", widget)
return
mb.start_input(prompt, self.move_to_group, 'group', strict_completer=True)
_command()
def switchgroup(self, prompt: str='group', widget: str='prompt') -> None:
def f(group: str) -> None:
if group:
try:
self.groups_map[group].toscreen()
except KeyError:
logger.warning("No group named '%s' present.", group)
mb = self.widgets_map.get(widget)
if (not mb):
logger.error("No widget named '%s' present.", widget)
return
mb.start_input(prompt, f, 'group', strict_completer=True)
_command()
def labelgroup(self, prompt: str='label', widget: str='prompt') -> None:
def f(name: str) -> None:
self.current_group.set_label((name or None))
try:
mb = self.widgets_map[widget]
mb.start_input(prompt, f, allow_empty_input=True)
except KeyError:
logger.error("No widget named '%s' present.", widget)
_command()
def spawncmd(self, prompt: str='spawn', widget: str='prompt', command: str='%s', complete: str='cmd', shell: bool=True, aliases: (dict[(str, str)] | None)=None) -> None:
def f(args: str) -> None:
if args:
if (aliases and (args in aliases)):
args = aliases[args]
self.spawn((command % args), shell=shell)
try:
mb = self.widgets_map[widget]
mb.start_input(prompt, f, complete, aliases=aliases)
except KeyError:
logger.error("No widget named '%s' present.", widget)
_command()
def qtilecmd(self, prompt: str='command', widget: str='prompt', messenger: str='xmessage') -> None:
def f(cmd: str) -> None:
if cmd:
q = QtileCommandInterface(self)
c = InteractiveCommandClient(q)
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if (cmd_len == 0):
logger.debug('No command entered.')
return
try:
result = eval('c.{0:s}'.format(cmd))
except (CommandError, CommandException, AttributeError):
logger.exception('Command errored:')
result = None
if (result is not None):
from pprint import pformat
message = pformat(result)
if messenger:
self.spawn('{0:s} "{1:s}"'.format(messenger, message))
logger.debug(result)
mb = self.widgets_map[widget]
if (not mb):
logger.error('No widget named %s present.', widget)
return
mb.start_input(prompt, f, 'qshell')
_command()
def addgroup(self, group: str, label: (str | None)=None, layout: (str | None)=None, layouts: (list[Layout] | None)=None, index: (int | None)=None) -> bool:
return self.add_group(name=group, layout=layout, layouts=layouts, label=label, index=index)
_command()
def delgroup(self, group: str) -> None:
self.delete_group(group)
_command()
def add_rule(self, match_args: dict[(str, Any)], rule_args: dict[(str, Any)], min_priorty: bool=False) -> (int | None):
if (not self.dgroups):
logger.warning('No dgroups created')
return None
match = Match(**match_args)
rule = Rule([match], **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
_command()
def remove_rule(self, rule_id: int) -> None:
self.dgroups.remove_rule(rule_id)
_command()
def hide_show_bar(self, position: Literal[('top', 'bottom', 'left', 'right', 'all')]='all') -> None:
if (position in ['top', 'bottom', 'left', 'right']):
bar = getattr(self.current_screen, position)
if bar:
bar.show((not bar.is_show()))
self.current_group.layout_all()
else:
logger.warning("Not found bar in position '%s' for hide/show.", position)
elif (position == 'all'):
screen = self.current_screen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if isinstance(bar, libqtile.bar.Bar):
if (is_show is None):
is_show = (not bar.is_show())
bar.show(is_show)
if (is_show is not None):
self.current_group.layout_all()
else:
logger.warning('Not found bar for hide/show.')
else:
logger.warning('Invalid position value:%s', position)
_command()
def get_state(self) -> str:
buf = io.BytesIO()
self.dump_state(buf)
state = buf.getvalue().decode(errors='backslashreplace')
logger.debug('State = %s', state)
return state
_command()
def tracemalloc_toggle(self) -> None:
import tracemalloc
if (not tracemalloc.is_tracing()):
tracemalloc.start()
else:
tracemalloc.stop()
_command()
def tracemalloc_dump(self) -> tuple[(bool, str)]:
import tracemalloc
if (not tracemalloc.is_tracing()):
return (False, 'Trace not started')
cache_directory = get_cache_dir()
malloc_dump = os.path.join(cache_directory, 'qtile_tracemalloc.dump')
tracemalloc.take_snapshot().dump(malloc_dump)
return (True, malloc_dump)
_command()
def get_test_data(self) -> Any:
return self.test_data
_command()
def run_extension(self, extension: _Extension) -> None:
extension.run()
_command()
def fire_user_hook(self, hook_name: str, *args: Any) -> None:
hook.fire(f'user_{hook_name}', *args) |
class Time2BumpDistanceGetter(Time2BumpSpeedGetter):
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
tgtMass = (miscParams['tgtMass'] / (10 ** 6))
tgtInertia = miscParams['tgtInertia']
tgtSpeed = Time2BumpSpeedGetter._calculatePoint(self, x=x, miscParams=miscParams, src=src, tgt=tgt, commonData=commonData)
tgtDistance = ((tgtSpeed * tgtMass) * tgtInertia)
return tgtDistance |
class DistributedTrainingConfig(FairseqDataclass):
distributed_world_size: int = field(default=max(1, torch.cuda.device_count()), metadata={'help': 'total number of GPUs across all nodes (default: all visible GPUs)'})
distributed_rank: Optional[int] = field(default=0, metadata={'help': 'rank of the current worker'})
distributed_backend: str = field(default='nccl', metadata={'help': 'distributed backend'})
distributed_init_method: Optional[str] = field(default=None, metadata={'help': 'typically tcp://hostname:port that will be used to establish initial connetion'})
distributed_port: int = field(default=(- 1), metadata={'help': 'port number (not required if using --distributed-init-method)'})
device_id: int = field(default=0, metadata={'help': 'which GPU to use (usually configured automatically)', 'argparse_alias': '--local_rank'})
distributed_no_spawn: bool = field(default=False, metadata={'help': 'do not spawn multiple processes even if multiple GPUs are visible'})
ddp_backend: DDP_BACKEND_CHOICES = field(default='c10d', metadata={'help': 'DistributedDataParallel backend'})
bucket_cap_mb: int = field(default=25, metadata={'help': 'bucket size for reduction'})
fix_batches_to_gpus: bool = field(default=False, metadata={'help': "don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data"})
find_unused_parameters: bool = field(default=False, metadata={'help': 'disable unused parameter detection (not applicable to no_c10d ddp-backend'})
fast_stat_sync: bool = field(default=False, metadata={'help': '[deprecated] this is now defined per Criterion'})
broadcast_buffers: bool = field(default=False, metadata={'help': 'Copy non-trainable parameters between GPUs, such as batchnorm population statistics'})
distributed_wrapper: DISTRIBUTED_WRAPPER_CHOICES = field(default='DDP', metadata={'help': 'DistributedDataParallel backend'})
slowmo_momentum: Optional[float] = field(default=None, metadata={'help': 'SlowMo momentum term; by default use 0.0 for 16 GPUs, 0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs'})
slowmo_algorithm: str = field(default='LocalSGD', metadata={'help': 'whether to use LocalSGD or SGP'})
localsgd_frequency: int = field(default=3, metadata={'help': 'Local SGD allreduce frequency'})
nprocs_per_node: int = field(default=max(1, torch.cuda.device_count()), metadata={'help': 'number of GPUs in each node. An allreduce operation across GPUs in a node is very fast. Hence, we do allreduce across GPUs in a node, and gossip across different nodes'})
pipeline_model_parallel: bool = field(default=False, metadata={'help': 'if set, use pipeline model parallelism across GPUs'})
pipeline_balance: Optional[str] = field(default=None, metadata={'help': 'partition the model into N_K pieces, where each piece contains N_i layers. The sum(args.pipeline_balance) should equal the total number of layers in the model'})
pipeline_devices: Optional[str] = field(default=None, metadata={'help': 'a list of device indices indicating which device to place each of the N_K partitions. The length of this list should equal the length of the --pipeline-balance argument'})
pipeline_chunks: Optional[int] = field(default=0, metadata={'help': 'microbatch count for pipeline model parallelism'})
pipeline_encoder_balance: Optional[str] = field(default=None, metadata={'help': 'partition the pipeline parallel encoder into N_K pieces, where each piece contains N_i layers. The sum(args.pipeline_encoder_balance) should equal the total number of encoder layers in the model'})
pipeline_encoder_devices: Optional[str] = field(default=None, metadata={'help': 'a list of device indices indicating which device to place each of the N_K partitions. The length of this list should equal the length of the --pipeline-encoder-balance argument'})
pipeline_decoder_balance: Optional[str] = field(default=None, metadata={'help': 'partition the pipeline parallel decoder into N_K pieces, where each piece contains N_i layers. The sum(args.pipeline_decoder_balance) should equal the total number of decoder layers in the model'})
pipeline_decoder_devices: Optional[str] = field(default=None, metadata={'help': 'a list of device indices indicating which device to place each of the N_K partitions. The length of this list should equal the length of the --pipeline-decoder-balance argument'})
pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field(default='never', metadata={'help': 'checkpointing mode for pipeline model parallelism'})
zero_sharding: ZERO_SHARDING_CHOICES = field(default='none', metadata={'help': 'ZeRO sharding'})
tpu: bool = II('common.tpu') |
def allocator_hparams():
return PlacerParams(hidden_size=512, forget_bias_init=1.0, grad_bound=1.0, lr=0.01, lr_dec=1.0, decay_steps=50, start_decay_step=400, optimizer_type='adam', name='hierarchical_controller', keep_prob=1.0, seed=1, model_size='small', random_prob=1.0, max_degree=100, epoches=10000, dropout=0.0, n_explore_samples=20, n_replay_samples=5, replay_greedy_sampling=True, n_policy_samples=10, train_ratio=0.8, restore=False, checkpoint=True, checkpoint_folder='checkpoints', cep_program='LogProcessing', graphsage_model='graphsage_maxpool', samples_1=4, samples_2=4, samples_3=0, samples_4=0, samples_5=0, dim_1=128, dim_2=128, strategy='policy', replay_weight=10.0, env_batch_size=1, embedding='original', feat_size=1, decoder='lstm', placement_file='tmp.json', consider_neighbor_placement=False, consider_device_utilization=False, weighed_neighbor_placement=True, utilization_max=2.0, device_scheme=0, real_baseline=True, pool_size=5, num_devices=5, metis_placement=None) |
def gather_tensors_fake(tensor):
tensors_gather = [torch.ones_like(tensor) for _ in range(comm.world_size)]
dist.all_gather(tensors_gather, tensor, async_op=False)
tensors_gather[comm.rank] = tensor
output = torch.cat(tensors_gather, dim=0)
output = torch.cat([output, output.detach()], 0)
return output |
class BaseClientFactoriesTests(unittest.TestCase):
def setUp(self):
transport = mock.Mock(spec=metrics.NullTransport)
self.client = metrics.BaseClient(transport, 'namespace')
def test_make_timer(self):
timer = self.client.timer('some_timer')
self.assertIsInstance(timer, metrics.Timer)
self.assertEqual(timer.name, b'namespace.some_timer')
with self.assertRaises(UnicodeEncodeError):
self.client.timer('')
def test_make_counter(self):
counter = self.client.counter('some_counter')
self.assertIsInstance(counter, metrics.Counter)
self.assertEqual(counter.name, b'namespace.some_counter')
with self.assertRaises(UnicodeEncodeError):
self.client.counter('')
def test_make_gauge(self):
gauge = self.client.gauge('some_gauge')
self.assertIsInstance(gauge, metrics.Gauge)
self.assertEqual(gauge.name, b'namespace.some_gauge')
with self.assertRaises(UnicodeEncodeError):
self.client.gauge('')
def test_make_histogram(self):
histogram = self.client.histogram('some_histogram')
self.assertIsInstance(histogram, metrics.Histogram)
self.assertEqual(histogram.name, b'namespace.some_histogram')
with self.assertRaises(UnicodeEncodeError):
self.client.gauge('') |
class TestWaitForNavigation(BaseTestCase):
async def test_wait_for_navigatoin(self):
(await self.page.goto((self.url + 'empty')))
results = (await asyncio.gather(self.page.waitForNavigation(), self.page.evaluate('(url) => window.location.href = url', self.url)))
response = results[0]
self.assertEqual(response.status, 200)
self.assertEqual(response.url, self.url)
('Need server-side implementation')
async def test_both_domcontentloaded_loaded(self):
pass
async def test_click_anchor_link(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.setContent('<a href="#foobar">foobar</a>'))
results = (await asyncio.gather(self.page.waitForNavigation(), self.page.click('a')))
self.assertIsNone(results[0])
self.assertEqual(self.page.url, (self.url + 'empty#foobar'))
async def test_return_nevigated_response_reload(self):
(await self.page.goto((self.url + 'empty')))
navPromise = asyncio.ensure_future(self.page.waitForNavigation())
(await self.page.reload())
response = (await navPromise)
self.assertEqual(response.url, (self.url + 'empty'))
async def test_history_push_state(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.setContent("\n <a onclick='javascript:pushState()'>SPA</a>\n <script>\n function pushState() { history.pushState({}, '', 'wow.html') }\n </script>\n "))
results = (await asyncio.gather(self.page.waitForNavigation(), self.page.click('a')))
self.assertIsNone(results[0])
self.assertEqual(self.page.url, (self.url + 'wow.html'))
async def test_history_replace_state(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.setContent("\n <a onclick='javascript:replaceState()'>SPA</a>\n <script>\n function replaceState() {\n history.replaceState({}, '', 'replaced.html');\n }\n </script>\n "))
results = (await asyncio.gather(self.page.waitForNavigation(), self.page.click('a')))
self.assertIsNone(results[0])
self.assertEqual(self.page.url, (self.url + 'replaced.html'))
async def test_dom_history_back_forward(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.setContent('\n <a id="back" onclick=\'javascript:goBack()\'>back</a>\n <a id="forward" onclick=\'javascript:goForward()\'>forward</a>\n <script>\n function goBack() { history.back(); }\n function goForward() { history.forward(); }\n history.pushState({}, \'\', \'/first.html\');\n history.pushState({}, \'\', \'/second.html\');\n </script>\n '))
self.assertEqual(self.page.url, (self.url + 'second.html'))
results_back = (await asyncio.gather(self.page.waitForNavigation(), self.page.click('a#back')))
self.assertIsNone(results_back[0])
self.assertEqual(self.page.url, (self.url + 'first.html'))
results_forward = (await asyncio.gather(self.page.waitForNavigation(), self.page.click('a#forward')))
self.assertIsNone(results_forward[0])
self.assertEqual(self.page.url, (self.url + 'second.html'))
async def test_subframe_issues(self):
navigationPromise = asyncio.ensure_future(self.page.goto((self.url + 'static/one-frame.html')))
frame = (await waitEvent(self.page, 'frameattached'))
fut = asyncio.get_event_loop().create_future()
def is_same_frame(f):
if (f == frame):
fut.set_result(True)
self.page.on('framenavigated', is_same_frame)
asyncio.ensure_future(frame.evaluate('window.stop()'))
(await navigationPromise) |
.parametrize('username,password', users)
.parametrize('value_id', values)
def test_file(db, client, files, username, password, value_id):
client.login(username=username, password=password)
value = Value.objects.get(pk=value_id)
url = reverse(urlnames['file'], args=[value_id])
response = client.get(url)
if ((value.value_type == VALUE_TYPE_FILE) and (value.project.id in view_value_permission_map.get(username, []))):
assert (response.status_code == 200)
assert (response['Content-Type'] == value.file_type)
assert (response['Content-Disposition'] == f'attachment; filename={value.file_name}')
assert (response.content == value.file.read())
elif password:
assert (response.status_code == 404)
else:
assert (response.status_code == 401) |
class SchemasTest():
def test_lazyness(self):
schema = schemas.LazySchema('oas-2.0.json')
assert (schema._schema is None)
('' in schema)
assert (schema._schema is not None)
assert isinstance(schema._schema, dict)
def test_oas2_schema_is_present(self):
assert hasattr(schemas, 'OAS_20')
assert isinstance(schemas.OAS_20, schemas.LazySchema) |
class TestWalletHistory_DoubleSpend(TestCaseForTestnet):
transactions = {'a3849040fba12a4389310b58a17b78025d81116a3338595bdefa1625': 'b7ebb40209c234344f57a3365669c8883a3d511fbde5155f11f64dfdffffff024c400fb50d21483fb5e088db90bf766ea79219fb377fef40420faaf5fc4a6297375c32403a9c2768e7029c8dbdefd510954b289829f8f778163b98a2a4039deb93c3b0beb834b00cd0add14fd02201c848315ddc52ced0350a981fe1a7f3cbba145c7a43805db2f126ed549eaaa50da3e812bfc91c11bd2a673ba4628c09f02d78f62157e56d788d1700', '0e2182eadcb0b80afa8baebd30dad42b5e58a24ceea17f1c': 'fade5b5938336a11815d02787ba1580b3189432aa11b150527f8409084afdffffff02acb893c9fbbfb18a3bcdda6f20af0bf8d8ba0df02c2b6cd405bb6bd1f90e9860bec173eb5bdb230a9721aa57396af73d399fb210d795e7dbb8ec1977e101ade035d4006b72bd6dfcf09468d1e8dab37b16b0dbbf776dbb5b20ed21c3bba75ec2a9ff230257d13a2493f6b7da066d8195dcddd1700', '2c9aa33d9c8ec649f9bfb84af027a5414b760be5231fe9eca4a95b9eb3f8a017': 'fade5b5938336a11815d02787ba1580b3189432aa11b150527f8409084afdffffff01d2410fa7c79744b908a5f6d6235f2eb46c174c84fd27c872f09115e57c6acb674cd4da6d0b26656ad967ddb2678ff409714b9502206d91b49cf778ced6ca9e40b4094fb57b86c86fac09ce46ce53aea4afa68ffb5b20ed21c3bba75ec2a9ff230257d13a2493f6b7da066d8195dcddd1700'}
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_wallet_without_manual_delete(self, mock_save_db):
w = restore_wallet_from_text('small rapid pattern language comic denial donate extend tide fever burden barrel', path='if_this_exists_mocking_failed_', gap_limit=5, config=self.config)['wallet']
for txid in self.transactions:
tx = Transaction(self.transactions[txid])
w.add_transaction(tx)
self.assertEqual(999890, sum(w.get_balance()))
.object(wallet.Abstract_Wallet, 'save_db')
def test_restoring_wallet_with_manual_delete(self, mock_save_db):
w = restore_wallet_from_text('small rapid pattern language comic denial donate extend tide fever burden barrel', path='if_this_exists_mocking_failed_', gap_limit=5, config=self.config)['wallet']
txA = Transaction(self.transactions['a3849040fba12a4389310b58a17b78025d81116a3338595bdefa1625'])
w.add_transaction(txA)
txB = Transaction(self.transactions['0e2182eadcb0b80afa8baebd30dad42b5e58a24ceea17f1c'])
w.add_transaction(txB)
w.remove_transaction(txB.txid())
txC = Transaction(self.transactions['2c9aa33d9c8ec649f9bfb84af027a5414b760be5231fe9eca4a95b9eb3f8a017'])
w.add_transaction(txC)
self.assertEqual(999890, sum(w.get_balance())) |
.skipif((sys.platform != 'win32'), reason='no Windows registry')
.usefixtures('_mock_registry')
def test_pep514():
from virtualenv.discovery.windows.pep514 import discover_pythons
interpreters = list(discover_pythons())
assert (interpreters == [('ContinuumAnalytics', 3, 10, 32, 'C:\\Users\\user\\Miniconda3\\python.exe', None), ('ContinuumAnalytics', 3, 10, 64, 'C:\\Users\\user\\Miniconda3-64\\python.exe', None), ('PythonCore', 3, 9, 64, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\python.exe', None), ('PythonCore', 3, 9, 64, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\python.exe', None), ('PythonCore', 3, 8, 64, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38\\python.exe', None), ('PythonCore', 3, 9, 64, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39\\python.exe', None), ('PythonCore', 3, 10, 32, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python310-32\\python.exe', None), ('PythonCore', 3, 12, 64, 'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python312\\python.exe', None), ('CompanyA', 3, 6, 64, 'Z:\\CompanyA\\Python\\3.6\\python.exe', None), ('PythonCore', 2, 7, 64, 'C:\\Python27\\python.exe', None), ('PythonCore', 3, 7, 64, 'C:\\Python37\\python.exe', None)]) |
class InvalidOpcode(Opcode):
mnemonic = 'INVALID'
gas_cost = 0
def __init__(self, value: int) -> None:
self.value = value
super().__init__()
def __call__(self, computation: ComputationAPI) -> None:
raise InvalidInstruction(f'Invalid opcode 0x{self.value:x} {(computation.code.program_counter - 1)}') |
def pytest_unconfigure(config: Config) -> None:
import faulthandler
faulthandler.disable()
if (fault_handler_stderr_fd_key in config.stash):
os.close(config.stash[fault_handler_stderr_fd_key])
del config.stash[fault_handler_stderr_fd_key]
if (fault_handler_original_stderr_fd_key in config.stash):
faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key])
del config.stash[fault_handler_original_stderr_fd_key] |
def _get_frame_recognized_actions_view_from_video(frame_time: float, video_recognized_actions: VideoSoccerRecognizedActions, label_map: LabelMap) -> Optional[FrameRecognizedActionsView]:
action_times = video_recognized_actions.keys()
(time_diff, best_time) = min(((abs((action_time - frame_time)), action_time) for action_time in action_times))
if (time_diff > ((0.5 * COARSE_ACTIONS_CHUNK_SIZE) * SOCCERNET_FEATURES_FREQUENCY)):
return None
scores = video_recognized_actions[best_time].scores
return FrameRecognizedActionsView(best_time, scores, label_map) |
def test_do_not_mistake_JSDoc_for_django_comment(lexer):
text = '/**\n * {*} cool\n */\n func = function(cool) {\n };\n\n /**\n * {*} stuff\n */\n fun = function(stuff) {\n };'
tokens = lexer.get_tokens(text)
assert (not any(((t[0] == Comment) for t in tokens))) |
def count_conseq_double(mol):
bonds = mol.GetBonds()
previous_BType = None
count_conseq_doub = 0
for b in bonds:
curr_BType = b.GetBondType()
if ((previous_BType == curr_BType) and (curr_BType == rdkit.Chem.rdchem.BondType.DOUBLE)):
count_conseq_doub += 1
previous_BType = curr_BType
return count_conseq_doub |
def integral_interval_probaCDF_recall(I, J, E):
def f(J_cut):
if (J_cut is None):
return 0
else:
return integral_mini_interval_Precall_CDFmethod(I, J_cut, E)
def f0(J_middle):
if (J_middle is None):
return 0
else:
return (max(J_middle) - min(J_middle))
cut_into_three = cut_into_three_func(J, I)
d_left = f(cut_into_three[0])
d_middle = f0(cut_into_three[1])
d_right = f(cut_into_three[2])
return ((d_left + d_middle) + d_right) |
def _generate_filler(key_type: bytes, hops_data: Sequence[OnionHopsDataSingle], shared_secrets: Sequence[bytes]) -> bytes:
num_hops = len(hops_data)
filler_size = 0
for hop_data in hops_data[:(- 1)]:
filler_size += len(hop_data.to_bytes())
filler = bytearray(filler_size)
for i in range(0, (num_hops - 1)):
filler_start = HOPS_DATA_SIZE
for hop_data in hops_data[:i]:
filler_start -= len(hop_data.to_bytes())
filler_end = (HOPS_DATA_SIZE + len(hops_data[i].to_bytes()))
stream_key = get_bolt04_onion_key(key_type, shared_secrets[i])
stream_bytes = generate_cipher_stream(stream_key, NUM_STREAM_BYTES)
filler = xor_bytes(filler, stream_bytes[filler_start:filler_end])
filler += bytes((filler_size - len(filler)))
return filler |
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = (kernel_size // 2)
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out |
(id='vmware-node-terminate', name='Reboot VMware VM', description='Wait for the node to be terminated', outputs={'success': NodeScenarioSuccessOutput, 'error': NodeScenarioErrorOutput})
def node_terminate(cfg: NodeScenarioConfig) -> typing.Tuple[(str, typing.Union[(NodeScenarioSuccessOutput, NodeScenarioErrorOutput)])]:
with kube_helper.setup_kubernetes(None) as cli:
vsphere = vSphere(verify=cfg.verify_session)
core_v1 = client.CoreV1Api(cli)
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.TERMINATE, core_v1)
nodes_terminated = {}
for name in node_list:
try:
for _ in range(cfg.runs):
logging.info('Starting node_termination_scenario injection by first stopping the node')
vsphere.stop_instances(name)
vsphere.wait_until_stopped(name, cfg.timeout)
logging.info('Releasing the node with instance ID: %s ', name)
vsphere.release_instances(name)
vsphere.wait_until_released(name, cfg.timeout)
nodes_terminated[int(time.time_ns())] = Node(name=name)
logging.info('Node with instance ID: %s has been released', name)
logging.info('node_terminate_scenario has been successfully injected!')
except Exception as e:
logging.error('Failed to terminate node instance. Test Failed')
logging.error('node_terminate_scenario injection failed! Error was: %s', str(e))
return ('error', NodeScenarioErrorOutput(format_exc(), kube_helper.Actions.TERMINATE))
return ('success', NodeScenarioSuccessOutput(nodes_terminated, kube_helper.Actions.TERMINATE)) |
class MiniImagenet(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False):
dataset = MiniImagenetClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download)
super(MiniImagenet, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) |
def model_with_compat_bn_layers(training_as_placeholder, is_fused):
training = True
if training_as_placeholder:
training = tf.compat.v1.placeholder_with_default(True, shape=())
inputs = tf.keras.Input(shape=(32, 32, 3))
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.compat.v1.layers.batch_normalization(x, momentum=0.3, epsilon=0.65, training=training, fused=is_fused)
x = tf.keras.layers.Conv2D(16, (2, 2))(x)
with tf.compat.v1.variable_scope('foo'):
with tf.compat.v1.variable_scope('bar'):
x = tf.compat.v1.layers.batch_normalization(x, momentum=0.4, epsilon=0.25, training=training, fused=is_fused)
x = tf.nn.relu(x)
x = tf.compat.v1.layers.batch_normalization(x, momentum=0.5, epsilon=0.35, training=False, fused=is_fused)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='keras_model_functional')(x) |
def load(model, dataset_name, uid, root='models_checkpoints', optimizer=None):
checkpoint_path = os.path.join(root, dataset_name, model.name)
save_path = os.path.join(checkpoint_path, ('%s_%s_%s.pth.tar' % (dataset_name, model.name, uid)))
checkpoint = torch.load(save_path)
model.load_state_dict(checkpoint['state_dict'])
if (optimizer is not None):
optimizer.load_state_dict(checkpoint['optimizer']) |
def run_train():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', '-md', default=None, required=True, type=str)
parser.add_argument('--resume_weights', '-r', default=None, type=int)
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8888'
os.environ['NCCL_IB_DISABLE'] = '1'
args = parser.parse_args()
model_root_dir = os.path.join('../model/', args.model_dir)
sys.path.insert(0, model_root_dir)
from config import config
from network import Network
multi_train(args, config, Network) |
class OptionViewSet(ModelViewSet):
permission_classes = ((HasModelPermission | HasObjectPermission),)
serializer_class = OptionSerializer
queryset = Option.objects.annotate(values_count=models.Count('values')).annotate(projects_count=models.Count('values__project', distinct=True)).prefetch_related('optionsets', 'conditions', 'editors')
filter_backends = (SearchFilter, DjangoFilterBackend)
search_fields = ('uri', 'text')
filterset_fields = ('uri', 'uri_prefix', 'uri_path', 'optionsets', 'optionsets__uri', 'optionsets__uri_path', 'comment')
(detail=False)
def index(self, request):
queryset = self.filter_queryset(self.get_queryset())
serializer = OptionIndexSerializer(queryset, many=True)
return Response(serializer.data)
(detail=False, url_path='export(/(?P<export_format>[a-z]+))?')
def export(self, request, export_format='xml'):
queryset = self.filter_queryset(self.get_queryset())
if (export_format == 'xml'):
serializer = OptionExportSerializer(queryset, many=True)
xml = OptionRenderer().render(serializer.data)
return XMLResponse(xml, name='options')
else:
return render_to_format(self.request, export_format, 'options', 'options/export/options.html', {'options': queryset})
(detail=True, url_path='export(/(?P<export_format>[a-z]+))?')
def detail_export(self, request, pk=None, export_format='xml'):
if (export_format == 'xml'):
serializer = OptionExportSerializer(self.get_object())
xml = OptionRenderer().render([serializer.data])
return XMLResponse(xml, name=self.get_object().uri_path)
else:
return render_to_format(self.request, export_format, self.get_object().uri_path, 'options/export/options.html', {'options': [self.get_object()]}) |
class Product(Bloq):
a_bitsize: int
b_bitsize: int
def signature(self):
return Signature([Register('a', self.a_bitsize), Register('b', self.b_bitsize), Register('result', (2 * max(self.a_bitsize, self.b_bitsize)), side=Side.RIGHT)])
def short_name(self) -> str:
return 'a*b'
def t_complexity(self):
num_toff = (((2 * self.a_bitsize) * self.b_bitsize) - max(self.a_bitsize, self.b_bitsize))
return TComplexity(t=(4 * num_toff))
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
num_toff = (((2 * self.a_bitsize) * self.b_bitsize) - max(self.a_bitsize, self.b_bitsize))
return {(Toffoli(), num_toff)} |
class DEVISR(IntEnum):
SUSP = (1 << 0)
MSOF = (1 << 1)
SOF = (1 << 2)
EORST = (1 << 3)
WAKEUP = (1 << 4)
EORSM = (1 << 5)
UPRSM = (1 << 6)
PEP_0 = (1 << 12)
PEP_1 = (1 << 13)
PEP_2 = (1 << 14)
PEP_3 = (1 << 15)
PEP_4 = (1 << 16)
PEP_5 = (1 << 17)
PEP_6 = (1 << 18)
PEP_7 = (1 << 19)
PEP_8 = (1 << 20)
PEP_9 = (1 << 21)
DMA_1 = (1 << 25)
DMA_2 = (1 << 26)
DMA_3 = (1 << 27)
DMA_4 = (1 << 28)
DMA_5 = (1 << 29)
DMA_6 = (1 << 30) |
class Generator_Prune(nn.Module):
def __init__(self, cfg_mask, n_residual_blocks=9):
super(Generator_Prune, self).__init__()
first_conv_out = int(sum(cfg_mask[0]))
model = [nn.ReflectionPad2d(3), nn.Conv2d(3, first_conv_out, 7), nn.InstanceNorm2d(first_conv_out), nn.ReLU(inplace=True)]
in_features = int(sum(cfg_mask[0]))
out_features = int(sum(cfg_mask[1]))
model += [nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True)]
in_features = int(sum(cfg_mask[1]))
out_features = int(sum(cfg_mask[2]))
model += [nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True)]
in_features = int(sum(cfg_mask[2]))
for _ in range(n_residual_blocks):
model += [ResidualBlock(in_features)]
out_features = int(sum(cfg_mask[21]))
model += [nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True)]
in_features = out_features
out_features = int(sum(cfg_mask[22]))
model += [nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(out_features), nn.ReLU(inplace=True)]
in_features = out_features
model += [nn.ReflectionPad2d(3), nn.Conv2d(in_features, 3, 7), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x) |
def main():
args = parse_args()
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print(('Drawing net to %s' % args.output_image_file))
phase = None
if (args.phase == 'TRAIN'):
phase = caffe.TRAIN
elif (args.phase == 'TEST'):
phase = caffe.TEST
elif (args.phase != 'ALL'):
raise ValueError(('Unknown phase: ' + args.phase))
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir, phase) |
class SDFNetwork(nn.Module):
def __init__(self, encoding='hashgrid', num_layers=3, skips=[], hidden_dim=64, clip_sdf=None):
super().__init__()
self.num_layers = num_layers
self.skips = skips
self.hidden_dim = hidden_dim
self.clip_sdf = clip_sdf
(self.encoder, self.in_dim) = get_encoder(encoding)
backbone = []
for l in range(num_layers):
if (l == 0):
in_dim = self.in_dim
elif (l in self.skips):
in_dim = (self.hidden_dim + self.in_dim)
else:
in_dim = self.hidden_dim
if (l == (num_layers - 1)):
out_dim = 1
else:
out_dim = self.hidden_dim
backbone.append(nn.Linear(in_dim, out_dim, bias=False))
self.backbone = nn.ModuleList(backbone)
def forward(self, x):
x = self.encoder(x)
h = x
for l in range(self.num_layers):
if (l in self.skips):
h = torch.cat([h, x], dim=(- 1))
h = self.backbone[l](h)
if (l != (self.num_layers - 1)):
h = F.relu(h, inplace=True)
if (self.clip_sdf is not None):
h = h.clamp((- self.clip_sdf), self.clip_sdf)
return h |
class DataTrainingArguments():
source_lang: str = field(default=None, metadata={'help': 'Source language id for translation.'})
target_lang: str = field(default=None, metadata={'help': 'Target language id for translation.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines or csv file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (self.val_max_target_length is None):
self.val_max_target_length = self.max_target_length |
class IDR(IntEnum):
EOC0 = (1 << 0)
EOC1 = (1 << 1)
EOC2 = (1 << 2)
EOC3 = (1 << 3)
EOC4 = (1 << 4)
EOC5 = (1 << 5)
EOC6 = (1 << 6)
EOC7 = (1 << 7)
EOC8 = (1 << 8)
EOC9 = (1 << 9)
EOC10 = (1 << 10)
EOC11 = (1 << 11)
EOC12 = (1 << 12)
EOC13 = (1 << 13)
EOC14 = (1 << 14)
EOC15 = (1 << 15)
DRDY = (1 << 24)
GOVRE = (1 << 25)
COMPE = (1 << 26)
ENDRX = (1 << 27)
RXBUFF = (1 << 28) |
.requires_user_action
class EVENT_MOVE(InteractiveTestCase):
def on_move(self, x, y):
print(('Window moved to %dx%d.' % (x, y)))
def test_move(self):
w = Window(200, 200)
try:
w.push_handlers(self)
while (not w.has_exit):
w.dispatch_events()
finally:
w.close()
self.user_verify('Pass test?', take_screenshot=False) |
class F13_TestCase(CommandTest):
command = 'sshpw'
def runTest(self):
self.assert_parse('sshpw --username=someguy --iscrypted secrethandshake', 'sshpw --username=someguy --iscrypted secrethandshake\n')
self.assertFalse((self.assert_parse('sshpw --username=A --iscrypted secrethandshake') is None))
self.assertTrue((self.assert_parse('sshpw --username=A --iscrypted secrethandshake') != self.assert_parse('sshpw --username=B --iscrypted secrethandshake')))
self.assertFalse((self.assert_parse('sshpw --username=A --iscrypted secrethandshake') == self.assert_parse('sshpw --username=B --iscrypted secrethandshake')))
self.assert_parse_error('sshpw')
self.assert_parse_error('sshpw --username=someguy --bogus-option')
self.assert_parse_error('sshpw --username=someguy pass-phrase --bogus-option')
self.assert_parse_error('sshpw --username=someguy')
self.assert_parse_error('sshpw --username=someguy --iscrypted=OMGSEKRITZ')
self.assert_parse_error('sshpw --username=someguy --iscrypted')
self.assert_parse('sshpw --username=someguy --lock secrethandshake', 'sshpw --username=someguy --lock --plaintext secrethandshake\n')
self.assert_parse('sshpw --username=someguy --plaintext secrethandshake', 'sshpw --username=someguy --plaintext secrethandshake\n')
self.assert_parse('sshpw --username=someguy --plaintext --iscrypted secrethandshake', 'sshpw --username=someguy --iscrypted secrethandshake\n')
self.assert_parse('sshpw --username=someguy --iscrypted --plaintext secrethandshake\n', 'sshpw --username=someguy --plaintext secrethandshake\n')
self.assert_parse('sshpw --username=someguy --lock --plaintext secrethandshake', 'sshpw --username=someguy --lock --plaintext secrethandshake\n')
self.assert_parse('sshpw --username=someguy --iscrypted --lock secrethandshake', 'sshpw --username=someguy --lock --iscrypted secrethandshake\n')
self.assert_parse('sshpw --username=someguy --lock --iscrypted --plaintext secrethandshake', 'sshpw --username=someguy --lock --plaintext secrethandshake\n')
self.assert_parse('sshpw --username=someguy --lock --plaintext --iscrypted secrethandshake', 'sshpw --username=someguy --lock --iscrypted secrethandshake\n')
self.assert_parse('sshpw --username=someguy --plaintext --iscrypted --lock secrethandshake', 'sshpw --username=someguy --lock --iscrypted secrethandshake\n')
self.assert_parse('sshpw --username=someguy --iscrypted --plaintext --lock secrethandshake', 'sshpw --username=someguy --lock --plaintext secrethandshake\n')
self.assert_parse_error('sshpw --username=someguy --plaintext=ISEEENGLAND secrethandshake')
self.assert_parse_error('sshpw --username=someguy --lock=NOKEYSFORYOU secrethandshake')
self.assert_parse_error('sshpw --username=someguy --plaintext')
self.assert_parse_error('sshpw --username=someguy --lock')
sshpw = self.handler().commands[self.command]
sshpw.sshUserList.append('someguy')
self.assertEqual(sshpw.__str__(), 'someguy') |
def test_add_mod_n_protocols():
with pytest.raises(ValueError, match='must be between'):
_ = AddConstantMod(3, 10)
add_one = AddConstantMod(3, 5, 1)
add_two = AddConstantMod(3, 5, 2, cvs=[1, 0])
assert (add_one == AddConstantMod(3, 5, 1))
assert (add_one != add_two)
assert (hash(add_one) != hash(add_two))
assert (add_two.cvs == (1, 0))
assert (cirq.circuit_diagram_info(add_two).wire_symbols == (('', '(0)') + (('Add_2_Mod_5',) * 3))) |
def parse_args():
parser = argparse.ArgumentParser(description='Train Meta R-CNN network')
parser.add_argument('--dataset', dest='dataset', help='training dataset:coco2017,coco,pascal_07_12', default='pascal_voc_0712', type=str)
parser.add_argument('--net', dest='net', help='metarcnn', default='metarcnn', type=str)
parser.add_argument('--start_epoch', dest='start_epoch', help='starting epoch', default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=21, type=int)
parser.add_argument('--disp_interval', dest='disp_interval', help='number of iterations to display', default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval', help='number of iterations to display', default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir', help='directory to save models', default='./models', type=str)
parser.add_argument('--nw', dest='num_workers', help='number of worker to load data', default=0, type=int)
parser.add_argument('--cuda', dest='cuda', default=True, type=bool, help='whether use CUDA')
parser.add_argument('--bs', dest='batch_size', help='batch_size', default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic', default=False, type=bool, help='whether perform class_agnostic bbox regression')
parser.add_argument('--meta_train', dest='meta_train', default=False, type=bool, help='whether perform meta training')
parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool, help='whether perform adding meta loss')
parser.add_argument('--phase', dest='phase', help='the phase of training process', default=1, type=int)
parser.add_argument('--shots', dest='shots', help='the number meta input of PRN network', default=1, type=int)
parser.add_argument('--meta_type', dest='meta_type', default=1, type=int, help='choose which sets of metaclass')
parser.add_argument('--o', dest='optimizer', help='training optimizer', default='sgd', type=str)
parser.add_argument('--lr', dest='lr', help='starting learning rate', default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=4, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float)
parser.add_argument('--s', dest='session', help='training session', default=1, type=int)
parser.add_argument('--r', dest='resume', help='resume checkpoint or not', default=False, type=bool)
parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=10, type=int)
parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=21985, type=int)
parser.add_argument('--use_tfboard', dest='use_tfboard', help='whether use tensorflow tensorboard', default=True, type=bool)
parser.add_argument('--log_dir', dest='log_dir', help='directory to save logs', default='logs', type=str)
args = parser.parse_args()
return args |
class TestPostgresqlCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if (not allowed_names):
allowed_names = []
config = get_collector_config('PostgresqlCollector', {})
self.collector = PostgresqlCollector(config, None)
def test_import(self):
self.assertTrue(PostgresqlCollector)
('postgres.psycopg2')
def test_connect_with_password(self, psycopg2_mock):
conn_mock = Mock()
psycopg2_mock.connect.return_value = conn_mock
ret = self.collector._connect('test_db')
self.assertTrue(conn_mock.set_isolation_level.called)
self.assertEqual(ret, conn_mock)
psycopg2_mock.connect.assert_called_once_with(database='test_db', host='localhost', password='postgres', port=5432, sslmode='disable', user='postgres')
('postgres.psycopg2')
def test_connect_with_pgpass(self, psycopg2_mock):
config = get_collector_config('PostgresqlCollector', {'password_provider': 'pgpass'})
self.collector = PostgresqlCollector(config, None)
conn_mock = Mock()
psycopg2_mock.connect.return_value = conn_mock
ret = self.collector._connect('test_db')
self.assertTrue(conn_mock.set_isolation_level.called)
self.assertEqual(ret, conn_mock)
psycopg2_mock.connect.assert_called_once_with(database='test_db', host='localhost', port=5432, sslmode='disable', user='postgres')
('postgres.psycopg2')
def test_connect_error(self, psycopg2_mock):
psycopg2_mock.connect.side_effect = Exception('Some db exc')
with self.assertRaises(Exception):
self.collector._connect('test_db') |
class DocDB(object):
def __init__(self, db_path=None, full_docs=False):
self.path = (db_path or config.DOC_DB)
self.full_docs = full_docs
self.connection = sqlite3.connect(self.path, check_same_thread=False)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def path(self):
return self.path
def close(self):
self.connection.close()
def get_doc_titles(self):
cursor = self.connection.cursor()
cursor.execute('SELECT title FROM documents')
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
def get_doc_sentences(self, doc_title):
if self.full_docs:
raise ValueError('This DB is in full docs mode. Try `get_doc_paragraphs`')
cursor = self.connection.cursor()
cursor.execute('SELECT sentences FROM documents WHERE title = ?', (doc_title,))
result = cursor.fetchone()
cursor.close()
if (result is None):
return None
return deserialize_object(result[0])
def get_doc_paragraphs(self, doc_title):
if (not self.full_docs):
raise ValueError('This DB is in Hotpot mode. Try `get_doc_sentences`')
cursor = self.connection.cursor()
cursor.execute('SELECT paragraphs FROM documents WHERE title = ?', (doc_title,))
result = cursor.fetchone()
cursor.close()
if (result is None):
return None
return deserialize_object(result[0]) |
def test_serializer_create(db):
class MockedView():
project = Project.objects.get(id=project_id)
value = Value.objects.get(project_id=project_id, snapshot=None, attribute__path=attribute_path)
validator = ValueConflictValidator()
serializer = ValueSerializer()
serializer.context['view'] = MockedView()
validator({'attribute': value.attribute, 'set_prefix': value.set_prefix, 'set_index': value.set_index, 'collection_index': (value.collection_index + 1)}, serializer) |
class MultisourceLanguagePairDataset(data.LanguagePairDataset):
def __getitem__(self, i):
source = [src_sent.long() for src_sent in self.src[i]]
res = {'id': i, 'source': source}
if self.tgt:
res['target'] = self.tgt[i].long()
return res
def collater(self, samples):
return MultisourceLanguagePairDataset.collate(samples, self.src_dict.pad(), self.src_dict.eos(), (self.tgt is not None), self.left_pad_source, self.left_pad_target)
def collate(samples, pad_idx, eos_idx, has_target=True, left_pad_source=True, left_pad_target=False):
if (len(samples) == 0):
return {}
n_sources = len(samples[0]['source'])
assert all(((len(sample['source']) == n_sources) for sample in samples)), 'All samples in a batch must have the same number of source sentences.'
def merge(key, left_pad, source=False, move_eos_to_beginning=False):
if source:
return data.data_utils.collate_tokens([s[key][src_id] for s in samples for src_id in range(n_sources)], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
else:
return data.data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source, source=True)
src_lengths = torch.LongTensor([s['source'][src_id].numel() for s in samples for src_id in range(n_sources)])
(src_lengths, sort_order) = src_lengths.sort(descending=True)
src_tokens = src_tokens.index_select(0, sort_order)
(_, rev_order) = sort_order.sort()
srcs_ids = [rev_order[k::n_sources] for k in range(n_sources)]
prev_output_tokens = None
target = None
ntokens = None
if has_target:
target = merge('target', left_pad=left_pad_target)
prev_output_tokens = merge('target', left_pad=left_pad_target, move_eos_to_beginning=True)
ntokens = sum((len(s['target']) for s in samples))
return {'id': id, 'ntokens': ntokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths, 'src_ids': srcs_ids, 'prev_output_tokens': prev_output_tokens}, 'target': target} |
class Rouge():
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
token_c = candidate[0].split(' ')
for reference in refs:
token_r = reference.split(' ')
lcs = my_lcs(token_r, token_c)
prec.append((lcs / float(len(token_c))))
rec.append((lcs / float(len(token_r))))
prec_max = max(prec)
rec_max = max(rec)
if ((prec_max != 0) and (rec_max != 0)):
score = ((((1 + (self.beta ** 2)) * prec_max) * rec_max) / float((rec_max + ((self.beta ** 2) * prec_max))))
else:
score = 0.0
return score
def compute_score(self, gts, res):
assert (sorted(gts.keys()) == sorted(res.keys()))
imgIds = sorted(gts.keys())
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) >= 1)
average_score = np.mean(np.array(score))
return (average_score, np.array(score))
def method(self):
return 'Rouge' |
def render_missing_space_in_doctest(msg, _node, source_lines=None):
line = msg.line
(yield from render_context((line - 2), line, source_lines))
(yield (line, slice(None, None), LineType.ERROR, source_lines[(line - 1)]))
(yield from render_context((line + 1), (line + 3), source_lines)) |
.skipif((sys.platform == 'win32'), reason='symlinks to files not supported on windows')
def test_symlink_file(inwd: WorkDir) -> None:
((inwd.cwd / 'adir') / 'file1link').symlink_to('../file1')
inwd.add_and_commit()
assert (set(find_files('adir')) == _sep({'adir/filea', 'adir/file1link'})) |
(config_path='config', config_name='config')
def main(opt):
print(opt.pretty())
pl.seed_everything(42, workers=True)
torch.set_num_threads(10)
datamodule = hydra.utils.instantiate(opt.datamodule, opt.datamodule)
datamodule.setup(stage='fit')
np.savez('meta_info.npz', **datamodule.meta_info)
data_processor = None
if ('processor' in opt.datamodule):
data_processor = hydra.utils.instantiate(opt.datamodule.processor, opt.datamodule.processor, meta_info=datamodule.meta_info)
with open('.hydra/config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
logger = pl.loggers.WandbLogger(project='fast_snarf', config=config, group=opt.expname, name=str(opt.subject))
checkpoint_path = './checkpoints/last.ckpt'
if ((not os.path.exists(checkpoint_path)) or (not opt.resume)):
checkpoint_path = None
checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor=None, dirpath='./checkpoints', save_last=True, every_n_val_epochs=1)
trainer = pl.Trainer(logger=logger, callbacks=[checkpoint_callback], accelerator=None, resume_from_checkpoint=checkpoint_path, **opt.trainer)
model = SNARFModel(opt=opt.model, meta_info=datamodule.meta_info, data_processor=data_processor)
trainer.fit(model, datamodule=datamodule) |
class abstractmethod(IncludeMixin):
def __init__(self, func):
if (not callable(func)):
raise ABCException(f'Function is not callable: {func}')
self.func = func
self.args = self.getargs(func)
def __eq__(self, other):
if isinstance(other, abstractmethod):
return (self.args == other.args)
else:
return (self.args == self.getargs(other))
def __get__(self, instance, owner=None):
return self.func
def getargs(func):
signature = inspect.signature(func)
return [(param.name, param.kind) for param in signature.parameters.values()] |
class CecaModule(nn.Module):
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'):
super(CecaModule, self).__init__()
if (channels is not None):
t = int((abs((math.log(channels, 2) + beta)) / gamma))
kernel_size = max((t if (t % 2) else (t + 1)), 3)
has_act = (act_layer is not None)
assert ((kernel_size % 2) == 1)
self.padding = ((kernel_size - 1) // 2)
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, (- 1))
y = F.pad(y, (self.padding, self.padding), mode='circular')
y = self.conv(y)
y = self.gate(y).view(x.shape[0], (- 1), 1, 1)
return (x * y.expand_as(x)) |
class TestUpdateLon(object):
def setup_method(self):
self.py_inst = None
self.inst_time = pysat.instruments.pysat_testing._test_dates['']['']
return
def teardown_method(self):
del self.py_inst, self.inst_time
return
.parametrize('name', ['testing', 'testing_xarray', 'ndtesting', 'testmodel'])
def test_update_longitude(self, name):
self.py_inst = pysat.Instrument(platform='pysat', name=name, use_header=True)
self.py_inst.load(date=self.inst_time)
assert np.all((self.py_inst.data['longitude'] < 360.0))
assert np.all((self.py_inst.data['longitude'] >= 0.0))
coords.update_longitude(self.py_inst, lon_name='longitude')
assert np.all((self.py_inst.data['longitude'] < 180.0))
assert np.all((self.py_inst.data['longitude'] >= (- 180.0)))
return
def test_bad_lon_name_update_longitude(self):
self.py_inst = pysat.Instrument(platform='pysat', name='testing', use_header=True)
self.py_inst.load(date=self.inst_time)
testing.eval_bad_input(coords.update_longitude, ValueError, 'unknown longitude variable name', [self.py_inst], {'lon_name': 'not longitude'})
return |
def test_conc_str() -> None:
assert (str(Conc(Mult(Charclass('a'), ONE), Mult(Charclass('b'), ONE), Mult(Charclass('c'), ONE), Mult(Charclass('d'), ONE), Mult(Charclass('e'), ONE), Mult((~ Charclass('fg')), STAR), Mult(Charclass('h'), Multiplier(Bound(5), Bound(5))), Mult(Charclass('abcdefghijklmnopqrstuvwxyz'), PLUS))) == 'abcde[^fg]*h{5}[a-z]+') |
def temp_link_blob(repository_id, blob_digest, link_expiration_s):
assert blob_digest
with db_transaction():
try:
storage = ImageStorage.get(content_checksum=blob_digest)
except ImageStorage.DoesNotExist:
return None
_temp_link_blob(repository_id, storage, link_expiration_s)
return storage |
class PPLScorer(Scorer):
def __init__(self):
super(PPLScorer, self).__init__(float('inf'), 'ppl')
def is_improving(self, stats):
return (stats.ppl() < self.best_score)
def is_decreasing(self, stats):
return (stats.ppl() > self.best_score)
def _caller(self, stats):
return stats.ppl() |
('I can iterate over the inline shape collection')
def then_can_iterate_over_inline_shape_collection(context):
inline_shapes = context.inline_shapes
shape_count = 0
for inline_shape in inline_shapes:
shape_count += 1
assert isinstance(inline_shape, InlineShape)
expected_count = 5
assert (shape_count == expected_count), ('expected %d, got %d' % (expected_count, shape_count)) |
class EqualPointLineDistance(BaseSketch):
_id = 13
_entityDef = (_p, _l, _p, _l)
_workplane = True
_iconName = 'Assembly_ConstraintEqualPointLineDistance.svg'
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Add a "{}" to constrain the distance between a point and a\nline to be the same as the distance between another point\nand line.') |
class ProjectMetadata():
def __str__(self):
return 'Default project metadata provider'
def __init__(self, project):
self.project = project
def project_name(self):
return os.path.basename(self.directory)
def directory(self):
return self.project.directory
def version(self):
if self.project.chicken_project:
return self.project.chicken_project.version
return VersionNumber('0.0')
def extras(self):
return {} |
_end_docstrings(PIPELINE_INIT_ARGS)
class ImageClassificationPipeline(Pipeline):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, 'vision')
self.check_model_type((TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if (self.framework == 'tf') else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING))
def _sanitize_parameters(self, top_k=None):
postprocess_params = {}
if (top_k is not None):
postprocess_params['top_k'] = top_k
return ({}, {}, postprocess_params)
def __call__(self, images: Union[(str, List[str], 'Image.Image', List['Image.Image'])], **kwargs):
return super().__call__(images, **kwargs)
def preprocess(self, image):
image = load_image(image)
model_inputs = self.feature_extractor(images=image, return_tensors=self.framework)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
if (top_k > self.model.config.num_labels):
top_k = self.model.config.num_labels
if (self.framework == 'pt'):
probs = model_outputs.logits.softmax((- 1))[0]
(scores, ids) = probs.topk(top_k)
elif (self.framework == 'tf'):
probs = tf.nn.softmax(model_outputs.logits, axis=(- 1))[0]
topk = tf.math.top_k(probs, k=top_k)
(scores, ids) = (topk.values.numpy(), topk.indices.numpy())
else:
raise ValueError(f'Unsupported framework: {self.framework}')
scores = scores.tolist()
ids = ids.tolist()
return [{'score': score, 'label': self.model.config.id2label[_id]} for (score, _id) in zip(scores, ids)] |
class DNSRecord(DNSEntry):
__slots__ = ('ttl', 'created')
def __init__(self, name: str, type_: int, class_: int, ttl: Union[(float, int)], created: Optional[float]=None) -> None:
super().__init__(name, type_, class_)
self.ttl = ttl
self.created = (created or current_time_millis())
def __eq__(self, other: Any) -> bool:
raise AbstractMethodException
def suppressed_by(self, msg: 'DNSIncoming') -> bool:
answers = msg.answers()
for record in answers:
if self._suppressed_by_answer(record):
return True
return False
def _suppressed_by_answer(self, other) -> bool:
return ((self == other) and (other.ttl > (self.ttl / 2)))
def get_expiration_time(self, percent: _int) -> float:
return (self.created + ((percent * self.ttl) * 10))
def get_remaining_ttl(self, now: _float) -> Union[(int, float)]:
remain = (((self.created + (_EXPIRE_FULL_TIME_MS * self.ttl)) - now) / 1000.0)
return (0 if (remain < 0) else remain)
def is_expired(self, now: _float) -> bool:
return ((self.created + (_EXPIRE_FULL_TIME_MS * self.ttl)) <= now)
def is_stale(self, now: _float) -> bool:
return ((self.created + (_EXPIRE_STALE_TIME_MS * self.ttl)) <= now)
def is_recent(self, now: _float) -> bool:
return ((self.created + (_RECENT_TIME_MS * self.ttl)) > now)
def reset_ttl(self, other) -> None:
self.set_created_ttl(other.created, other.ttl)
def set_created_ttl(self, created: _float, ttl: Union[(float, int)]) -> None:
self.created = created
self.ttl = ttl
def write(self, out: 'DNSOutgoing') -> None:
raise AbstractMethodException
def to_string(self, other: Union[(bytes, str)]) -> str:
arg = f'{self.ttl}/{int(self.get_remaining_ttl(current_time_millis()))},{cast(Any, other)}'
return DNSEntry.entry_to_string(self, 'record', arg) |
def draw_meter(x, y, dx, dy, style=None, fill='bgcolor'):
global bgcolor
if (fill == 'bgcolor'):
fill = bgcolor
draw_rectangle(x, y, dx, dy, None, style=style, fill=fill)
radius = min((0.5 * dx), (0.5 * dy))
if style:
style_str = (',' + style)
else:
style_str = ''
print(('\\draw[very thin%s] (%f, %f) arc (90:150:%fpt);' % (style_str, x, ((y - (0.45 * dy)) + radius), radius)))
print(('\\draw[very thin%s] (%f, %f) arc (90:30:%fpt);' % (style_str, x, ((y - (0.45 * dy)) + radius), radius)))
print(('\\draw[->,>=stealth%s] (%f, %f) -- +(%i:%fpt);' % (style_str, x, (y - (0.45 * dy)), 80, (radius * math.sqrt(3))))) |
def get_default_config():
config = {'logdir': 'ppo_torch', 'idx': 0, 'seed': 0, 'num_timesteps': int(.0), 'episode_length': 1000, 'discounting': 0.97, 'learning_rate': 0.0003, 'entropy_cost': 0.01, 'unroll_length': 5, 'batch_size': 1024, 'num_minibatches': 32, 'num_update_epochs': 4, 'reward_scaling': 10, 'lambda_': 0.95, 'ppo_epsilon': 0.3, 'policy_hidden_layer_sizes': '32, 32, 32, 32', 'policy_activation': 'silu', 'v_hidden_layer_sizes': '256,256, 256, 256, 256', 'v_activation': 'silu', 'num_envs': 2048, 'eval_every': int(500000.0), 'env_name': 'ant'}
return config |
class TestProtLib(EvenniaTest):
def setUp(self):
super(TestProtLib, self).setUp()
self.obj1.attributes.add('testattr', 'testval')
self.prot = spawner.prototype_from_object(self.obj1)
def test_prototype_to_str(self):
prstr = protlib.prototype_to_str(self.prot)
self.assertTrue(prstr.startswith('|cprototype-key:|n'))
def test_check_permission(self):
pass
def test_save_prototype(self):
result = protlib.save_prototype(self.prot)
self.assertEqual(result, self.prot)
self.prot['prototype_key'] = None
self.assertRaises(protlib.ValidationError, protlib.save_prototype, self.prot)
def test_search_prototype(self):
protlib.save_prototype(self.prot)
match = protlib.search_prototype('NotFound')
self.assertFalse(match)
match = protlib.search_prototype()
self.assertTrue(match)
match = protlib.search_prototype(self.prot['prototype_key'])
self.assertEqual(match, [self.prot]) |
def parse_kinetics_splits(level, dataset):
def convert_label(s, keep_whitespaces=False):
if (not keep_whitespaces):
return s.replace('"', '').replace(' ', '_')
return s.replace('"', '')
def line_to_map(x, test=False):
if test:
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = (- 1)
return (video, label)
video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if (level == 2):
video = f'{convert_label(x[0])}/{video}'
else:
assert (level == 1)
label = class_mapping[convert_label(x[0])]
return (video, label)
train_file = f'data/{dataset}/annotations/kinetics_train.csv'
val_file = f'data/{dataset}/annotations/kinetics_val.csv'
test_file = f'data/{dataset}/annotations/kinetics_test.csv'
csv_reader = csv.reader(open(train_file))
next(csv_reader)
labels_sorted = sorted({convert_label(row[0]) for row in csv_reader})
class_mapping = {label: i for (i, label) in enumerate(labels_sorted)}
csv_reader = csv.reader(open(train_file))
next(csv_reader)
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(val_file))
next(csv_reader)
val_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(test_file))
next(csv_reader)
test_list = [line_to_map(x, test=True) for x in csv_reader]
splits = ((train_list, val_list, test_list),)
return splits |
def import_optionset(element, save=False, user=None):
try:
optionset = OptionSet.objects.get(uri=element.get('uri'))
except OptionSet.DoesNotExist:
optionset = OptionSet()
set_common_fields(optionset, element)
optionset.order = (element.get('order') or 0)
optionset.provider_key = (element.get('provider_key') or '')
validate_instance(optionset, element, OptionSetLockedValidator, OptionSetUniqueURIValidator)
check_permissions(optionset, element, user)
if (save and (not element.get('errors'))):
if optionset.id:
element['updated'] = True
logger.info('OptionSet %s updated.', element.get('uri'))
else:
element['created'] = True
logger.info('OptionSet created with uri %s.', element.get('uri'))
optionset.save()
set_m2m_instances(optionset, 'conditions', element)
set_m2m_through_instances(optionset, 'options', element, 'optionset', 'option', 'optionset_options')
optionset.editors.add(Site.objects.get_current())
return optionset |
class TestOOVQE(QiskitChemistryTestCase):
def setUp(self):
super().setUp()
self.driver1 = HDF5Driver(hdf5_input=self.get_resource_path('test_oovqe_h4.hdf5'))
self.driver2 = HDF5Driver(hdf5_input=self.get_resource_path('test_oovqe_lih.hdf5'))
self.driver3 = HDF5Driver(hdf5_input=self.get_resource_path('test_oovqe_h4_uhf.hdf5'))
self.energy1_rotation = (- 3.0104)
self.energy1 = (- 2.77)
self.energy2 = (- 7.7)
self.energy3 = (- 2.5)
self.initial_point1 = [0.039374, (- 0.), (- 0.), 0., 0., (- 0.), 0., (- 0.), (- 0.), 0.]
self.seed = 50
self.optimizer = COBYLA(maxiter=1)
self.transformation1 = FermionicTransformation(qubit_mapping=FermionicQubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False)
self.transformation2 = FermionicTransformation(qubit_mapping=FermionicQubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False, freeze_core=True)
self.quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'), shots=1, seed_simulator=self.seed, seed_transpiler=self.seed)
def test_orbital_rotations(self):
optimizer = COBYLA(maxiter=1)
solver = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=False, initial_point=self.initial_point1)
algo_result = calc.solve(self.driver1)
self.assertAlmostEqual(algo_result.computed_electronic_energy, self.energy1_rotation, 4)
def test_oovqe(self):
optimizer = COBYLA(maxiter=3, rhobeg=0.01)
solver = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=False, initial_point=self.initial_point1)
algo_result = calc.solve(self.driver1)
self.assertLessEqual(algo_result.computed_electronic_energy, self.energy1, 4)
def test_iterative_oovqe(self):
optimizer = COBYLA(maxiter=2, rhobeg=0.01)
solver = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=True, initial_point=self.initial_point1, iterative_oo_iterations=2)
algo_result = calc.solve(self.driver1)
self.assertLessEqual(algo_result.computed_electronic_energy, self.energy1)
def test_oovqe_with_frozen_core(self):
optimizer = COBYLA(maxiter=2, rhobeg=1)
solver = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
calc = OrbitalOptimizationVQE(self.transformation2, solver, iterative_oo=False)
algo_result = calc.solve(self.driver2)
self.assertLessEqual(((algo_result.computed_electronic_energy + self.transformation2._energy_shift) + self.transformation2._nuclear_repulsion_energy), self.energy2)
def test_oovqe_with_unrestricted_hf(self):
optimizer = COBYLA(maxiter=2, rhobeg=0.01)
solver = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=False)
algo_result = calc.solve(self.driver3)
self.assertLessEqual(algo_result.computed_electronic_energy, self.energy3)
def test_oovqe_with_unsupported_varform(self):
optimizer = COBYLA(maxiter=2, rhobeg=0.01)
solver = VQE(var_form=RealAmplitudes(), optimizer=optimizer, quantum_instance=self.quantum_instance)
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=False)
with self.assertRaises(AquaError):
calc.solve(self.driver3)
def test_oovqe_with_vqe_uccsd(self):
optimizer = COBYLA(maxiter=3, rhobeg=0.01)
solver_factory = VQEUCCSDFactory(quantum_instance=self.quantum_instance, optimizer=optimizer, excitation_type='d', same_spin_doubles=False, method_doubles='pucc')
self.transformation1.transform(self.driver1)
solver = solver_factory.get_solver(self.transformation1)
calc = OrbitalOptimizationVQE(self.transformation1, solver, iterative_oo=False, initial_point=self.initial_point1)
algo_result = calc.solve(self.driver1)
self.assertLessEqual(algo_result.computed_electronic_energy, self.energy1, 4) |
class Record(object):
def __init__(self, mod, mde, dat, sum, values):
self._mod = mod
self._mde = mde
self._dat = dat
self._sum = sum
self._values = values
self._approx_system_time = None
self._approx_gps_time = None
self._gps = None
def set_approx_times(self, approx_system_time, approx_gps_time, measured_system_time):
self._approx_system_time = approx_system_time
self._approx_gps_time = approx_gps_time
self._measured_system_time = measured_system_time
def time(self):
if (self._mod.reserved1 != 0):
return float(self._mod.reserved1)
return self._approx_system_time
def traces(self):
traces = []
for i in range(self._mod.ncomps):
tr = trace.Trace('', 'ed', '', ('p%i' % i), deltat=(float(self._mod.ncomps) / self._mod.sample_rate), tmin=self.time, ydata=self._values[i::3])
traces.append(tr)
traces.extend(self.traces_delays())
return traces
def traces_delays(self):
traces = []
for (name, val) in (('gp', self.gps_time_or_none), ('sm', self._measured_system_time), ('sp', self._approx_system_time)):
if (val is not None):
tr = trace.Trace('', 'ed', name, 'del', deltat=1.0, tmin=self.time, ydata=num.array([(val - self.time)]))
traces.append(tr)
return traces
def _gps_messages(self):
for line in self._mde.gps_message.splitlines():
if ((len(line) > 4) and (line[0] == '>') and (line.rstrip()[(- 1)] == '<')):
(yield GPSFormat[line[2:4]].unpack(line[2:]))
def gps(self):
if (self._mod.block_count != self._mod.gps_message_block_count):
raise NoGPS()
if (self._gps is not None):
return self._gps
kwargs = {}
for mess in self._gps_messages():
kwargs[mess.type.lower()] = mess
if (sorted(kwargs.keys()) == ['al', 'pv', 'st', 'tm']):
self._gps = GPSRecord(**kwargs)
return self._gps
else:
raise NoGPS()
def gps_time_or_none(self):
try:
return self.gps.time
except GPSError:
return None
def __str__(self):
return ('\n'.join([('%s' % str(x)) for x in (self._mod, self._mde)]) + '\n')
def str_times(self):
return ('--- Record ---\nTime GPS: %s (estimated) %s (measured)\nTime system: %s (estimated) %s (measured)\n' % tuple([stime_none_aware(t) for t in (self._approx_gps_time, self.gps_time_or_none, self._approx_system_time, self._measured_system_time)])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.