code stringlengths 281 23.7M |
|---|
def _resolve_looppart(parts, assign_path, context):
assign_path = assign_path[:]
index = assign_path.pop(0)
for part in parts:
if isinstance(part, util.UninferableBase):
continue
if (not hasattr(part, 'itered')):
continue
try:
itered = part.itered()
except TypeError:
continue
try:
if isinstance(itered[index], (nodes.Const, nodes.Name)):
itered = [part]
except IndexError:
pass
for stmt in itered:
index_node = nodes.Const(index)
try:
assigned = stmt.getitem(index_node, context)
except (AttributeError, AstroidTypeError, AstroidIndexError):
continue
if (not assign_path):
(yield assigned)
elif isinstance(assigned, util.UninferableBase):
break
else:
try:
(yield from _resolve_looppart(assigned.infer(context), assign_path, context))
except InferenceError:
break |
def metrics_traversal_order(state_dict: Dict[(str, Dict[(str, TState)])]) -> List[Tuple[(str, str)]]:
dict_items = []
for outer_key in sorted(state_dict.keys()):
inner_dict = state_dict[outer_key]
for inner_key in sorted(inner_dict.keys()):
dict_items.append((outer_key, inner_key))
return dict_items |
.parametrize('val', [set_test_value(pt.lscalar(), np.array(6, dtype='int64'))])
def test_Bartlett(val):
g = extra_ops.bartlett(val)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))], assert_fn=(lambda x, y: np.testing.assert_allclose(x, y, atol=1e-15))) |
def extract_tarinfo(tarfile, class_to_idx=None, sort=True):
extensions = get_img_extensions(as_set=True)
files = []
labels = []
for ti in tarfile.getmembers():
if (not ti.isfile()):
continue
(dirname, basename) = os.path.split(ti.path)
label = os.path.basename(dirname)
ext = os.path.splitext(basename)[1]
if (ext.lower() in extensions):
files.append(ti)
labels.append(label)
if (class_to_idx is None):
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)}
tarinfo_and_targets = [(f, class_to_idx[l]) for (f, l) in zip(files, labels) if (l in class_to_idx)]
if sort:
tarinfo_and_targets = sorted(tarinfo_and_targets, key=(lambda k: natural_key(k[0].path)))
return (tarinfo_and_targets, class_to_idx) |
class TestIsRoot():
.windows
.parametrize('directory, is_root', [('C:\\foo\\bar', False), ('C:\\foo\\', False), ('C:\\foo', False), ('C:\\', True)])
def test_windows(self, directory, is_root):
assert (filescheme.is_root(directory) == is_root)
.posix
.parametrize('directory, is_root', [('/foo/bar', False), ('/foo/', False), ('/foo', False), ('/', True)])
def test_posix(self, directory, is_root):
assert (filescheme.is_root(directory) == is_root) |
.network
def test_prepare_directory_with_extensions(config: Config, config_cache_dir: Path, artifact_cache: ArtifactCache, fixture_dir: FixtureDirGetter) -> None:
env = EnvManager.get_system_env()
chef = Chef(artifact_cache, env, Factory.create_pool(config))
archive = fixture_dir('extended_with_no_setup').resolve()
wheel = chef.prepare(archive)
assert (wheel.parent.parent == Path(tempfile.gettempdir()))
assert (wheel.name == f'extended-0.1-{env.supported_tags[0]}.whl')
os.unlink(wheel) |
def metrics_df_from_toml_path(toml_path, min_segment_dur, device='cuda', spect_key='s', timebins_key='t'):
toml_path = Path(toml_path)
cfg = config.parse.from_toml(toml_path)
with cfg.eval.labelmap_path.open('r') as f:
labelmap = json.load(f)
model_config_map = config.models.map_from_path(toml_path, cfg.eval.models)
item_transform = transforms.get_defaults('eval', spect_standardizer=None, window_size=cfg.dataloader.window_size, return_padding_mask=True)
eval_dataset = VocalDataset.from_csv(csv_path=cfg.eval.csv_path, split='test', labelmap=labelmap, spect_key=spect_key, timebins_key=timebins_key, item_transform=item_transform)
eval_data = torch.utils.data.DataLoader(dataset=eval_dataset, shuffle=False, batch_size=1, num_workers=cfg.eval.num_workers)
timebin_dur = io.dataframe.validate_and_get_timebin_dur(pd.read_csv(cfg.eval.csv_path))
input_shape = eval_dataset.shape
if (len(input_shape) == 4):
input_shape = input_shape[1:]
models_map = models.from_model_config_map(model_config_map, num_classes=len(labelmap), input_shape=input_shape)
if (device is None):
device = vak.device.get_default_device()
records = defaultdict(list)
to_long_tensor = transforms.ToLongTensor()
for (model_name, model) in models_map.items():
model.load(cfg.eval.checkpoint_path)
metrics = model.metrics
pred_dict = model.predict(pred_data=eval_data, device=device)
error_position_distribution = []
num_err_bin = []
progress_bar = tqdm(eval_data)
for (ind, batch) in enumerate(progress_bar):
(y_true, padding_mask, spect_path) = (batch['annot'], batch['padding_mask'], batch['spect_path'])
spect_path = tuple(spect_path)
records['spect_path'].append(spect_path[0])
y_true = y_true.to(device)
y_true_np = np.squeeze(y_true.cpu().numpy())
t_vec = vak.files.spect.load(spect_path[0])['t']
(y_true_labels, t_ons_s, t_offs_s) = lbl_tb2segments(y_true_np, labelmap, t_vec)
y_true_labels = map_number_labels_to_alphanumeric(y_true_labels)
y_pred_ind = spect_path[0]
y_pred = pred_dict[y_pred_ind]
y_pred = torch.argmax(y_pred, dim=1)
y_pred = torch.flatten(y_pred)
y_pred = y_pred.unsqueeze(0)[padding_mask]
y_pred_np = np.squeeze(y_pred.cpu().numpy())
(y_pred_labels, _, _) = lbl_tb2segments(y_pred_np, labelmap, t_vec, min_segment_dur=None, majority_vote=False)
y_pred_labels = map_number_labels_to_alphanumeric(y_pred_labels)
metric_vals_batch = compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels)
for (metric_name, metric_val) in metric_vals_batch.items():
records[metric_name].append(metric_val)
segment_inds_list = lbl_tb_segment_inds_list(y_pred_np, unlabeled_label=labelmap['unlabeled'])
y_pred_np_mv = majority_vote_transform(y_pred_np, segment_inds_list)
y_pred_mv = to_long_tensor(y_pred_np_mv).to(device)
(y_pred_mv_labels, _, _) = lbl_tb2segments(y_pred_np_mv, labelmap, t_vec, min_segment_dur=None, majority_vote=False)
y_pred_mv_labels = map_number_labels_to_alphanumeric(y_pred_mv_labels)
metric_vals_batch_mv = compute_metrics(metrics, y_true, y_pred_mv, y_true_labels, y_pred_mv_labels)
for (metric_name, metric_val) in metric_vals_batch_mv.items():
records[f'{metric_name}_majority_vote'].append(metric_val)
(y_pred_np_mindur, _) = remove_short_segments(y_pred_np, segment_inds_list, timebin_dur=timebin_dur, min_segment_dur=min_segment_dur, unlabeled_label=labelmap['unlabeled'])
y_pred_mindur = to_long_tensor(y_pred_np_mindur).to(device)
(y_pred_mindur_labels, _, _) = lbl_tb2segments(y_pred_np_mindur, labelmap, t_vec, min_segment_dur=None, majority_vote=False)
y_pred_mindur_labels = map_number_labels_to_alphanumeric(y_pred_mindur_labels)
metric_vals_batch_mindur = compute_metrics(metrics, y_true, y_pred_mindur, y_true_labels, y_pred_mindur_labels)
for (metric_name, metric_val) in metric_vals_batch_mindur.items():
records[f'{metric_name}_min_segment_dur'].append(metric_val)
(y_pred_np_mindur_mv, segment_inds_list) = remove_short_segments(y_pred_np, segment_inds_list, timebin_dur=timebin_dur, min_segment_dur=min_segment_dur, unlabeled_label=labelmap['unlabeled'])
y_pred_np_mindur_mv = majority_vote_transform(y_pred_np_mindur_mv, segment_inds_list)
y_pred_mindur_mv = to_long_tensor(y_pred_np_mindur_mv).to(device)
(y_pred_mindur_mv_labels, _, _) = lbl_tb2segments(y_pred_np_mindur_mv, labelmap, t_vec, min_segment_dur=None, majority_vote=False)
y_pred_mindur_mv_labels = map_number_labels_to_alphanumeric(y_pred_mindur_mv_labels)
metric_vals_batch_mindur_mv = compute_metrics(metrics, y_true, y_pred_mindur_mv, y_true_labels, y_pred_mindur_mv_labels)
for (metric_name, metric_val) in metric_vals_batch_mindur_mv.items():
records[f'{metric_name}_min_dur_maj_vote'].append(metric_val)
num_err_bin.append(sum(((y_true_np - y_pred_np_mindur_mv) != 0)))
err = (((y_true_np - y_pred_np_mindur_mv) != 0) & ((y_true_np == 0) | (y_pred_np_mindur_mv == 0)))
error_position_distribution.append([min(np.abs((np.concatenate((t_ons_s, t_offs_s)) - tm))) for tm in t_vec[(err == True)]])
error_position_distribution = np.concatenate(error_position_distribution)
df = pd.DataFrame.from_records(records)
t1 = t_vec[1]
return (df, error_position_distribution, num_err_bin, t1) |
class TCPCollector(diamond.collector.Collector):
PROC = ['/proc/net/netstat', '/proc/net/snmp']
def process_config(self):
super(TCPCollector, self).process_config()
if (self.config['allowed_names'] is None):
self.config['allowed_names'] = []
if (self.config['gauges'] is None):
self.config['gauges'] = ['CurrEstab', 'MaxConn']
def get_default_config_help(self):
config_help = super(TCPCollector, self).get_default_config_help()
config_help.update({'allowed_names': 'list of entries to collect, empty to collect all', 'gauges': 'list of metrics to be published as gauges'})
return config_help
def get_default_config(self):
config = super(TCPCollector, self).get_default_config()
config.update({'path': 'tcp', 'allowed_names': (((('ListenOverflows, ListenDrops, TCPLoss, ' + 'TCPTimeouts, TCPFastRetrans, TCPLostRetransmit, ') + 'TCPForwardRetrans, TCPSlowStartRetrans, CurrEstab, ') + 'TCPAbortOnMemory, TCPBacklogDrop, AttemptFails, ') + 'EstabResets, InErrs, ActiveOpens, PassiveOpens'), 'gauges': 'CurrEstab, MaxConn'})
return config
def collect(self):
metrics = {}
for filepath in self.PROC:
if (not os.access(filepath, os.R_OK)):
self.log.error('Permission to access %s denied', filepath)
continue
header = ''
data = ''
file = open(filepath)
if (not file):
self.log.error('Failed to open %s', filepath)
continue
while True:
line = file.readline()
if (len(line) == 0):
break
if line.startswith('Tcp'):
header = line
data = file.readline()
break
file.close()
if ((header == '') or (data == '')):
self.log.error('%s has no lines with Tcp', filepath)
continue
header = header.split()
data = data.split()
for i in xrange(1, len(header)):
metrics[header[i]] = data[i]
for metric_name in metrics.keys():
if ((len(self.config['allowed_names']) > 0) and (metric_name not in self.config['allowed_names'])):
continue
value = long(metrics[metric_name])
if (metric_name in self.config['gauges']):
self.publish_gauge(metric_name, value, 0)
else:
self.publish_counter(metric_name, value, 0) |
def test_repr__undefined():
datum_name = 'unknown'
if PROJ_GTE_901:
datum_name = f'{datum_name} using nadgrids='
assert (repr(CRS('+proj=merc +a=6378137.0 +b=6378137.0 +nadgrids= +lon_0=0.0 +x_0=0.0 +y_0=0.0 +units=m +no_defs')) == f'''<Bound CRS: +proj=merc +a=6378137.0 +b=6378137.0 +nadgrids= ...>
Name: unknown
Axis Info [cartesian]:
- E[east]: Easting (metre)
- N[north]: Northing (metre)
Area of Use:
- undefined
Coordinate Operation:
- name: unknown to WGS84
- method: NTv2
Datum: {datum_name}
- Ellipsoid: unknown
- Prime Meridian: Greenwich
Source CRS: unknown
''') |
class FunnelConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:2 $A:0 {sep}:0', pair=f'{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer |
def reconstitute_tpm(subsystem):
node_tpms = [node.tpm.tpm[(..., 1)] for node in subsystem.nodes]
node_tpms = [tpm.squeeze(axis=subsystem.external_indices) for tpm in node_tpms]
node_tpms = [np.expand_dims(tpm, (- 1)) for tpm in node_tpms]
node_tpms = [(tpm * np.ones((([2] * (tpm.ndim - 1)) + [tpm.shape[(- 1)]]))) for tpm in node_tpms]
return np.concatenate(node_tpms, axis=(- 1)) |
def is_valid_total_withdraw(channel_state: NettingChannelState, our_total_withdraw: WithdrawAmount, allow_zero: bool=False) -> SuccessOrError:
balance = get_balance(sender=channel_state.our_state, receiver=channel_state.partner_state)
withdraw_overflow = (not is_valid_channel_total_withdraw(TokenAmount((our_total_withdraw + channel_state.partner_total_withdraw))))
withdraw_amount = (our_total_withdraw - channel_state.our_total_withdraw)
if (get_status(channel_state) != ChannelState.STATE_OPENED):
return SuccessOrError('Invalid withdraw, the channel is not opened')
elif (withdraw_amount < 0):
return SuccessOrError(f'Total withdraw {our_total_withdraw} decreased')
elif ((not allow_zero) and (withdraw_amount == 0)):
return SuccessOrError(f'Total withdraw {our_total_withdraw} did not increase')
elif (balance < withdraw_amount):
return SuccessOrError(f'Insufficient balance: {balance}. Requested {withdraw_amount} for withdraw')
elif withdraw_overflow:
return SuccessOrError(f'The new total_withdraw {our_total_withdraw} will cause an overflow')
else:
return SuccessOrError() |
_label
def equal_hash_ref_loop(data, idx, key, env, cont):
from pycket.interpreter import return_value
from pycket.prims.equal import equal_func_unroll_n, EqualInfo
if (idx >= len(data)):
return return_value(w_missing, env, cont)
(k, v) = data[idx]
info = EqualInfo.BASIC_SINGLETON
cont = catch_ref_is_equal_cont(data, idx, key, v, env, cont)
return equal_func_unroll_n(k, key, info, env, cont, 5) |
def _simple_compact(variables):
var_array = VarEntities.varToEVarArray[variables[0]]
(mins, maxs) = (([float('inf')] * len(var_array.size)), ([float('-inf')] * len(var_array.size)))
for x in variables:
for (i, v) in enumerate(x.indexes):
mins[i] = min(mins[i], v)
maxs[i] = max(maxs[i], v)
size = 1
for i in range(len(mins)):
size *= ((maxs[i] - mins[i]) + 1)
if (size != len(variables)):
return None
compact_form = var_array.id
for i in range(len(mins)):
compact_form += '['
if ((mins[i], maxs[i]) != (0, (var_array.size[i] - 1))):
compact_form += (str(mins[i]) if (mins[i] == maxs[i]) else ((str(mins[i]) + '..') + str(maxs[i])))
compact_form += ']'
return compact_form |
class GraphVAEOptimizer(object):
def __init__(self, model, learning_rate=0.001):
self.kl_weight = tf.placeholder_with_default(1.0, shape=())
self.la = tf.placeholder_with_default(1.0, shape=())
edges_loss = tf.losses.sparse_softmax_cross_entropy(labels=model.edges_labels, logits=model.edges_logits, reduction=tf.losses.Reduction.NONE)
self.edges_loss = tf.reduce_sum(edges_loss, [(- 2), (- 1)])
nodes_loss = tf.losses.sparse_softmax_cross_entropy(labels=model.nodes_labels, logits=model.nodes_logits, reduction=tf.losses.Reduction.NONE)
self.nodes_loss = tf.reduce_sum(nodes_loss, (- 1))
self.loss_ = (self.edges_loss + self.nodes_loss)
self.reconstruction_loss = tf.reduce_mean(self.loss_)
self.p_z = tf.distributions.Normal(tf.zeros_like(model.embeddings_mean), tf.ones_like(model.embeddings_std))
self.kl = tf.reduce_mean(tf.reduce_sum(tf.distributions.kl_divergence(model.q_z, self.p_z), axis=(- 1)))
self.ELBO = ((- self.reconstruction_loss) - self.kl)
self.loss_V = (((model.value_logits_real - model.rewardR) ** 2) + ((model.value_logits_fake - model.rewardF) ** 2))
self.loss_RL = (- model.value_logits_fake)
self.loss_VAE = tf.cond(model.variational, (lambda : (self.reconstruction_loss + (self.kl_weight * self.kl))), (lambda : self.reconstruction_loss))
self.loss_V = tf.reduce_mean(self.loss_V)
self.loss_RL = tf.reduce_mean(self.loss_RL)
self.loss_RL *= tf.abs(tf.stop_gradient((self.loss_VAE / self.loss_RL)))
self.VAE_optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_step_VAE = self.VAE_optim.minimize(loss=(tf.cond(tf.greater(self.la, 0), (lambda : (self.la * self.loss_VAE)), (lambda : 0.0)) + tf.cond(tf.less(self.la, 1), (lambda : ((1 - self.la) * self.loss_RL)), (lambda : 0.0))), var_list=(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder') + tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')))
self.V_optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_step_V = self.V_optim.minimize(loss=self.loss_V, var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='value'))
self.log_likelihood = self.__log_likelihood
self.model = model
def __log_likelihood(self, n):
z = self.model.q_z.sample(n)
log_p_z = self.p_z.log_prob(z)
log_p_z = tf.reduce_sum(log_p_z, axis=(- 1))
log_p_x_z = (- self.loss_)
log_q_z_x = self.model.q_z.log_prob(z)
log_q_z_x = tf.reduce_sum(log_q_z_x, axis=(- 1))
print([a.shape for a in (log_p_z, log_p_x_z, log_q_z_x)])
return tf.reduce_mean(tf.reduce_logsumexp((tf.transpose(((log_p_x_z + log_p_z) - log_q_z_x)) - np.log(n)), axis=(- 1))) |
class DataTrainingArguments():
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default='wikiann', metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default='bn', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
def __post_init__(self):
if (self.dataset_name is None):
raise ValueError('Need a dataset name.')
self.task_name = self.task_name.lower() |
def drop_block_fast_2d(x: torch.Tensor, drop_prob: float=0.1, block_size: int=7, gamma_scale: float=1.0, with_noise: bool=False, inplace: bool=False, batchwise: bool=False):
(B, C, H, W) = x.shape
total_size = (W * H)
clipped_block_size = min(block_size, min(W, H))
gamma = ((((gamma_scale * drop_prob) * total_size) / (clipped_block_size ** 2)) / (((W - block_size) + 1) * ((H - block_size) + 1)))
if batchwise:
block_mask = (torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma)
else:
block_mask = (torch.rand_like(x) < gamma)
block_mask = F.max_pool2d(block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=(clipped_block_size // 2))
if with_noise:
normal_noise = (torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x))
if inplace:
x.mul_((1.0 - block_mask)).add_((normal_noise * block_mask))
else:
x = ((x * (1.0 - block_mask)) + (normal_noise * block_mask))
else:
block_mask = (1 - block_mask)
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-07)).to(dtype=x.dtype)
if inplace:
x.mul_((block_mask * normalize_scale))
else:
x = ((x * block_mask) * normalize_scale)
return x |
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.bos()
self.unit_lm = getattr(args, 'unit_lm', False)
self.lexicon = (load_words(args.lexicon) if args.lexicon else None)
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location='cpu')
if (('cfg' in checkpoint) and (checkpoint['cfg'] is not None)):
lm_args = checkpoint['cfg']
else:
lm_args = convert_namespace_to_omegaconf(checkpoint['args'])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint['model'], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
self.decoder_opts = DecoderOptions(args.beam, int(getattr(args, 'beam_size_token', len(tgt_dict))), args.beam_threshold, args.lm_weight, args.word_score, args.unk_weight, args.sil_weight, 0, False, self.criterion_type)
if self.lexicon:
start_state = self.lm.start(False)
for (i, (word, spellings)) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
(_, score) = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (tgt_dict.unk() not in spelling_idxs), f'{spelling} {spelling_idxs}'
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder = LexiconDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unit_lm)
else:
from wav2letter.decoder import LexiconFreeDecoder
self.decoder = LexiconFreeDecoder(self.decoder_opts, self.lm, self.silence, self.blank, [])
def decode(self, emissions):
(B, T, N) = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {'tokens': self.get_tokens(result.tokens), 'score': result.score}
if self.lexicon:
hypo['words'] = [idx_to_word(x) for x in result.words if (x >= 0)]
return hypo
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos |
def simple_model(simple_model_data):
with pm.Model() as model:
mu_ = pm.Normal('mu', mu=simple_model_data['mu0'], sigma=simple_model_data['sigma0'], initval=0)
pm.Normal('x', mu=mu_, sigma=simple_model_data['sigma'], observed=simple_model_data['data'], total_size=simple_model_data['n'])
return model |
class ClassDefinitionTestCase(unittest.TestCase):
def runTest(self):
errors = 0
commands_dir = os.path.join(os.path.dirname(pykickstart.__file__), 'commands')
commands_dir = os.path.abspath(commands_dir)
self.assertTrue(os.path.exists(commands_dir))
if (commands_dir not in sys.path):
sys.path.append(commands_dir)
for (_dirpath, _dirnames, paths) in os.walk(commands_dir):
for path in paths:
if ((path == '__init__.py') or (not path.endswith('.py'))):
continue
command_module = importlib.import_module(path.replace('.py', ''))
module_commands = []
for (impl_name, impl_class) in command_module.__dict__.items():
if (type(impl_class) is not type):
continue
if (not (issubclass(impl_class, KickstartCommand) or issubclass(impl_class, BaseData))):
continue
if (impl_class.__name__ in ['KickstartCommand', 'DeprecatedCommand', 'RemovedCommand']):
continue
if ((impl_class not in module_commands) and (impl_class.__name__ == impl_name)):
module_commands.append(impl_class)
else:
errors += 1
message = ('ERROR: In `commands/%s` %s = %s' % (path, impl_name, impl_class.__name__))
print(message)
self.assertEqual(0, errors) |
class MembershipDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Membership
slug_field = 'creator__username'
raise_exception = True
= ['post', 'delete']
def get_success_url(self):
return reverse('users:user_detail', kwargs={'slug': self.request.user.username})
def test_func(self):
return (self.get_object().creator == self.request.user) |
def test_gpu_normalize():
def check_normalize(origin_imgs, result_imgs, norm_cfg):
from numpy.testing import assert_array_almost_equal
target_imgs = result_imgs.copy()
target_imgs *= norm_cfg['std']
target_imgs += norm_cfg['mean']
assert_array_almost_equal(origin_imgs, target_imgs, decimal=4)
_gpu_normalize_cfg = dict(input_format='NCTHW', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375])
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert (gpu_normalize._mean.shape == (1, 3, 1, 1))
imgs = np.random.randint(256, size=(2, 240, 320, 3), dtype=np.uint8)
_input = (torch.tensor(imgs).permute(0, 3, 1, 2),)
normalize_hook = gpu_normalize.hook_func()
_input = normalize_hook(torch.nn.Module, _input)
result_imgs = np.array(_input[0].permute(0, 2, 3, 1))
check_normalize(imgs, result_imgs, gpu_normalize_cfg)
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCTHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert (gpu_normalize._mean.shape == (1, 3, 1, 1, 1))
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW_Flow'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert (gpu_normalize._mean.shape == (1, 3, 1, 1))
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NPTCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert (gpu_normalize._mean.shape == (1, 1, 1, 3, 1, 1))
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = '_format'
with pytest.raises(ValueError):
gpu_normalize = GPUNormalize(**gpu_normalize_cfg) |
def test_tdm_fmcw_rx():
print('#### TDM FMCW receiver ####')
tdm = tdm_fmcw_rx()
print('# TDM FMCW receiver parameters #')
assert (tdm.bb_prop['fs'] == 2000000.0)
assert (tdm.rf_prop['noise_figure'] == 4)
assert (tdm.rf_prop['rf_gain'] == 20)
assert (tdm.bb_prop['load_resistor'] == 500)
assert (tdm.bb_prop['baseband_gain'] == 50)
assert (tdm.bb_prop['noise_bandwidth'] == tdm.bb_prop['fs'])
print('# TDM FMCW receiver channel #')
half_wavelength = ((const.c / .0) / 2)
assert (tdm.rxchannel_prop['size'] == 8)
assert np.array_equal(tdm.rxchannel_prop['locations'], np.array([[0, 0, 0], [0, half_wavelength, 0], [0, (half_wavelength * 2), 0], [0, (half_wavelength * 3), 0], [0, (half_wavelength * 4), 0], [0, (half_wavelength * 5), 0], [0, (half_wavelength * 6), 0], [0, (half_wavelength * 7), 0]]))
assert np.array_equal(tdm.rxchannel_prop['az_angles'], [np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180)])
assert np.array_equal(tdm.rxchannel_prop['az_patterns'], [np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2)])
assert np.array_equal(tdm.rxchannel_prop['el_angles'], [np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180), np.arange((- 90), 91, 180)])
assert np.array_equal(tdm.rxchannel_prop['el_patterns'], [np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2), np.zeros(2)]) |
class LambdaWarmUpCosineScheduler2():
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
assert (len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths))
self.lr_warm_up_steps = warm_up_steps
self.f_start = f_start
self.f_min = f_min
self.f_max = f_max
self.cycle_lengths = cycle_lengths
self.cum_cycles = np.cumsum(([0] + list(self.cycle_lengths)))
self.last_f = 0.0
self.verbosity_interval = verbosity_interval
def find_in_interval(self, n):
interval = 0
for cl in self.cum_cycles[1:]:
if (n <= cl):
return interval
interval += 1
def schedule(self, n, **kwargs):
cycle = self.find_in_interval(n)
n = (n - self.cum_cycles[cycle])
if (self.verbosity_interval > 0):
if ((n % self.verbosity_interval) == 0):
print(f'current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}')
if (n < self.lr_warm_up_steps[cycle]):
f = ((((self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle]) * n) + self.f_start[cycle])
self.last_f = f
return f
else:
t = ((n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]))
t = min(t, 1.0)
f = (self.f_min[cycle] + ((0.5 * (self.f_max[cycle] - self.f_min[cycle])) * (1 + np.cos((t * np.pi)))))
self.last_f = f
return f
def __call__(self, n, **kwargs):
return self.schedule(n, **kwargs) |
def main():
code = '18dczc1337a63427fa'
redirect_url = '
app = 0
secret = 'dGbpoJdqNuMlGDECgO9I'
vk_session = vk_api.VkApi(app_id=app, client_secret=secret)
try:
vk_session.code_auth(code, redirect_url)
except vk_api.AuthError as error_msg:
print(error_msg)
return
print(vk_session.token['user_id'])
print(vk_session.token['access_token']) |
class UperNetFCNHead(nn.Module):
def __init__(self, config, in_index: int=2, kernel_size: int=3, dilation: Union[(int, Tuple[(int, int)])]=1) -> None:
super().__init__()
self.config = config
self.in_channels = config.auxiliary_in_channels
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = ((kernel_size // 2) * dilation)
convs = []
convs.append(UperNetConvModule(self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
for i in range((self.num_convs - 1)):
convs.append(UperNetConvModule(self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
if (self.num_convs == 0):
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = UperNetConvModule((self.in_channels + self.channels), self.channels, kernel_size=kernel_size, padding=(kernel_size // 2))
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def init_weights(self):
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if (module.bias is not None):
module.bias.data.zero_()
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output |
class TestNonInjectiveLink(unittest.TestCase):
(ONE_TEST_TIMEOUT)
def test_select_project_forward(self):
(rdf_graph, schema) = get_graph_and_schema('dev', 'concert_singer')
correct_sparql_query = textwrap.dedent(' SELECT ?singer_name\n WHERE\n {\n ?pair_id arc:singer_in_concert:Singer_ID ?singer_in_pair_id.\n ?singer_in_pair_id arc:singer_in_concert:Singer_ID:singer:Singer_ID ?s_id.\n ?s_id arc:singer:Name ?singer_name.\n ?pair_id arc:singer_in_concert:concert_ID ?concert_in_pair_id.\n ?concert_in_pair_id arc:singer_in_concert:concert_ID:concert:concert_ID ?c_id.\n ?c_id arc:concert:concert_Name ?concert_name.\n }')
correct_sparql_query = QueryToRdf(query=correct_sparql_query, output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding('singer', 'Name'))])
qdmr = QdmrInstance(['select', 'project'], [['concert'], ['singer', '#1']])
grounding = {GroundingIndex(0, 0, 'concert'): GroundingKey.make_column_grounding('concert', 'concert_Name'), GroundingIndex(1, 0, 'singer'): GroundingKey.make_column_grounding('singer', 'Name')}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
(equal, message) = result.is_equal_to(result_correct, require_column_order=True, require_row_order=False, return_message=True)
self.assertTrue(equal, message)
(ONE_TEST_TIMEOUT)
def test_select_project_forward_distinct(self):
(rdf_graph, schema) = get_graph_and_schema('dev', 'concert_singer')
correct_sparql_query = textwrap.dedent(' SELECT DISTINCT ?singer_name\n WHERE\n {\n ?pair_id arc:singer_in_concert:Singer_ID ?singer_in_pair_id.\n ?singer_in_pair_id arc:singer_in_concert:Singer_ID:singer:Singer_ID ?s_id.\n ?s_id arc:singer:Name ?singer_name.\n ?pair_id arc:singer_in_concert:concert_ID ?concert_in_pair_id.\n ?concert_in_pair_id arc:singer_in_concert:concert_ID:concert:concert_ID ?c_id.\n ?c_id arc:concert:concert_Name ?concert_name.\n }')
correct_sparql_query = QueryToRdf(query=correct_sparql_query, output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding('singer', 'Name'))])
qdmr = QdmrInstance(['select', 'project'], [['concert'], ['singer', '#1']])
grounding = {GroundingIndex(0, 0, 'concert'): GroundingKey.make_column_grounding('concert', 'concert_Name'), GroundingIndex(1, 0, 'singer'): GroundingKey.make_column_grounding('singer', 'Name'), 'distinct': ['#2']}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
(equal, message) = result.is_equal_to(result_correct, require_column_order=True, require_row_order=False, return_message=True)
self.assertTrue(equal, message)
(ONE_TEST_TIMEOUT)
def test_select_project_backward(self):
(rdf_graph, schema) = get_graph_and_schema('dev', 'concert_singer')
correct_sparql_query = textwrap.dedent(' SELECT ?concert_name\n WHERE\n {\n ?pair_id arc:singer_in_concert:Singer_ID ?singer_in_pair_id.\n ?singer_in_pair_id arc:singer_in_concert:Singer_ID:singer:Singer_ID ?s_id.\n ?s_id arc:singer:Name ?singer_name.\n ?pair_id arc:singer_in_concert:concert_ID ?concert_in_pair_id.\n ?concert_in_pair_id arc:singer_in_concert:concert_ID:concert:concert_ID ?c_id.\n ?c_id arc:concert:concert_Name ?concert_name.\n }')
correct_sparql_query = QueryToRdf(query=correct_sparql_query, output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding('concert', 'concert_Name'))])
qdmr = QdmrInstance(['select', 'project'], [['singer'], ['concert', '#1']])
grounding = {GroundingIndex(0, 0, 'singer'): GroundingKey.make_column_grounding('singer', 'Name'), GroundingIndex(1, 0, 'concert'): GroundingKey.make_column_grounding('concert', 'concert_Name')}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
(equal, message) = result.is_equal_to(result_correct, require_column_order=True, require_row_order=False, return_message=True)
self.assertTrue(equal, message)
(ONE_TEST_TIMEOUT)
def test_select_with_intersect(self):
(rdf_graph, schema) = get_graph_and_schema('dev', 'concert_singer')
correct_sparql_query = textwrap.dedent(' SELECT ?singer_name\n WHERE\n {\n ?s_id arc:singer:Name ?singer_name.\n {\n SELECT ?singer_name\n WHERE\n {\n ?pair_id arc:singer_in_concert:Singer_ID ?singer_in_pair_id.\n ?singer_in_pair_id arc:singer_in_concert:Singer_ID:singer:Singer_ID ?s_id.\n ?s_id arc:singer:Name ?singer_name.\n ?pair_id arc:singer_in_concert:concert_ID ?concert_in_pair_id.\n ?concert_in_pair_id arc:singer_in_concert:concert_ID:concert:concert_ID ?c_id.\n ?c_id arc:concert:concert_Name ?concert_name.\n FILTER(?concert_name = "Super bootcamp"^^xsd:string)\n }\n }\n {\n SELECT ?singer_name\n WHERE\n {\n ?pair_id arc:singer_in_concert:Singer_ID ?singer_in_pair_id.\n ?singer_in_pair_id arc:singer_in_concert:Singer_ID:singer:Singer_ID ?s_id.\n ?s_id arc:singer:Name ?singer_name.\n ?pair_id arc:singer_in_concert:concert_ID ?concert_in_pair_id.\n ?concert_in_pair_id arc:singer_in_concert:concert_ID:concert:concert_ID ?c_id.\n ?c_id arc:concert:concert_Name ?concert_name.\n FILTER(?concert_name = "Week 1"^^xsd:string)\n }\n }\n }')
correct_sparql_query = QueryToRdf(query=correct_sparql_query, output_cols=[OutputColumnId.from_grounding(GroundingKey.make_column_grounding('singer', 'Name'))])
qdmr = QdmrInstance(['select', 'filter', 'filter', 'intersection'], [['singer'], ['#1', 'concert name Super bootcamp'], ['#1', 'concert name Week 1'], ['#1', '#2', '#3']])
grounding = {GroundingIndex(0, 0, 'singer'): GroundingKey.make_column_grounding('singer', 'Name'), GroundingIndex(1, 2, 'concert name Super bootcamp'): GroundingKey.make_comparative_grounding('=', 'Super bootcamp', GroundingKey.make_column_grounding('concert', 'concert_Name')), GroundingIndex(2, 2, 'concert name Week 1'): GroundingKey.make_comparative_grounding('=', 'Week 1', GroundingKey.make_column_grounding('concert', 'concert_Name'))}
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_to_rdf(correct_sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
(equal, message) = result.is_equal_to(result_correct, require_column_order=True, require_row_order=False, return_message=True)
self.assertTrue(equal, message) |
def test_dual_basis_element():
de = DualBasisElement()
de_2 = DualBasisElement()
db_0 = (de + de_2)
assert isinstance(db_0, DualBasis)
db_1 = (db_0 + db_0)
assert isinstance(db_1, DualBasis)
dim = 2
opdm = np.random.random((dim, dim))
opdm = ((opdm.T + opdm) / 2)
opdm = Tensor(tensor=opdm, name='opdm')
rdm = MultiTensor([opdm])
def generate_dual_basis_element(i, j):
element = DualBasisElement(tensor_names=['opdm'], tensor_elements=[(i, j)], tensor_coeffs=[(- 1.0)], bias=(1 if (i == j) else 0), scalar=0)
return element
opdm_to_oqdm_map = DualBasis()
for (_, idx) in opdm.all_iterator():
(i, j) = idx
opdm_to_oqdm_map += generate_dual_basis_element(i, j)
rdm.dual_basis = opdm_to_oqdm_map
(A, b, _) = rdm.synthesize_dual_basis()
Adense = A.todense()
opdm_flat = opdm.data.reshape(((- 1), 1))
oqdm = Adense.dot(opdm_flat)
test_oqdm = (oqdm + b.todense())
assert np.allclose(test_oqdm.reshape((dim, dim)), (np.eye(dim) - opdm.data)) |
def one_hot(indices, depth, no_cuda=False):
shape = (list(indices.size()) + [depth])
indices_dim = len(indices.size())
if no_cuda:
a = torch.zeros(shape, dtype=torch.float)
else:
a = torch.zeros(shape, dtype=torch.float).cuda()
return a.scatter_(indices_dim, indices.unsqueeze(indices_dim), 1) |
def test_hookrelay_registry(pm: PluginManager) -> None:
class Api():
def hello(self, arg: object) -> None:
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, 'hello')
assert (repr(hook.hello).find('hello') != (- 1))
class Plugin():
def hello(self, arg):
return (arg + 1)
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert (out == [4])
assert (not hasattr(hook, 'world'))
pm.unregister(plugin)
assert (hook.hello(arg=3) == []) |
_importer.add('(?:decoder|transformer)/logits/kernel(\\w*)')
def final_logits(opts, key, val, slot):
del opts, key
prefix = ('state/param_states' if slot else 'target')
suffix = (('/' + SLOT_MAP[slot]) if slot else '')
newkey = f'{prefix}/decoder/logits_dense/kernel{suffix}'
return (newkey, val) |
_task('sentence_prediction')
class SentencePredictionTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=(- 1), help='number of classes')
parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, default=None, help='add separator token between inputs')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--truncate-sequence', action='store_true', default=False, help='Truncate sequence to max_sequence_length')
parser.add_argument('--add-prev-output-tokens', action='store_true', default=False, help='Add prev_output_tokens to sample, used for encoder-decoder arch')
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
if (not hasattr(args, 'max_positions')):
self._max_positions = (args.max_source_positions, args.max_target_positions)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
def load_dictionary(cls, args, filename, source=True):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.num_classes > 0), 'Must set --num-classes'
data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True)
print('| [input] dictionary: {} types'.format(len(data_dict)))
label_dict = None
if (not args.regression_target):
label_dict = cls.load_dictionary(args, os.path.join(args.data, 'label', 'dict.txt'), source=False)
print('| [label] dictionary: {} types'.format(len(label_dict)))
else:
label_dict = data_dict
return SentencePredictionTask(args, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
assert (input0 is not None), 'could not find dataset: {}'.format(get_path(type, split))
input1 = make_dataset('input1', self.source_dictionary)
if (self.args.init_token is not None):
input0 = PrependTokenDataset(input0, self.args.init_token)
if (input1 is None):
src_tokens = input0
else:
if (self.args.separator_token is not None):
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
if self.args.truncate_sequence:
src_tokens = TruncateDataset(src_tokens, self.args.max_positions)
dataset = {'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_tokens, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens, reduce=False)}, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens, reduce=True)}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(RollDataset(src_tokens, 1), pad_idx=self.dictionary.pad())
dataset['net_input'].update(prev_output_tokens=prev_tokens_dataset)
if (not self.args.regression_target):
label_dataset = make_dataset('label', self.target_dictionary)
if (label_dataset is not None):
dataset.update(target=OffsetTokensDataset(StripTokenDataset(label_dataset, id_to_strip=self.target_dictionary.eos()), offset=(- self.target_dictionary.nspecial)))
else:
label_path = '{0}.label'.format(get_path('label', split))
if os.path.exists(label_path):
dataset.update(target=RawLabelDataset([float(x.strip()) for x in open(label_path).readlines()]))
nested_dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes])
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
print('| Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(getattr(args, 'classification_head_name', 'sentence_classification_head'), num_classes=self.args.num_classes)
return model
def max_positions(self):
return self._max_positions
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary
def label_dictionary(self):
return self._label_dictionary |
class CustomModel(openlm.BaseModel):
def create_completion(self, model: Union[(str, List[str])], prompt: Union[(str, List[str])], suffix: Optional[str]=None, max_tokens: Optional[int]=None, temperature: Optional[float]=None, top_p: Optional[float]=None, n: Optional[int]=None, stream: Optional[bool]=None, logprobs: Optional[int]=None, echo: Optional[bool]=None, stop: Optional[Union[(str, List[str])]]=None, presence_penalty: Optional[float]=None, frequency_penalty: Optional[float]=None, best_of: Optional[int]=None, logit_bias: Optional[Dict[(str, float)]]=None, user: Optional[str]=None) -> Dict[(str, Any)]:
return {'text': 'Hello world!'}
def list_models(self) -> Dict[(str, Any)]:
return ['your_model_name']
def namespace(self) -> str:
return 'your_namespace' |
class DownloadStats(base.ScriptBaseWithConfig):
ARGS_HELP = ''
VERSION = '1.0'
FIELDS = ('is_active', 'left_bytes', 'size_bytes', 'down.rate', 'priority')
MIN_STALLED_RATE = (5 * 1024)
STALLED_PERCENT = 10
def add_options(self):
super(DownloadStats, self).add_options()
def mainloop(self):
proxy = config.engine.open()
all_items = list(config.engine.multicall('incomplete', self.FIELDS))
pending = [d for d in all_items if ((not d.is_active) and (d.priority > 0))]
print('Queued items: ', fmt.human_size(sum((d.size_bytes for d in pending))), 'in', len(pending), 'item(s)', '[{} free]'.format(fmt.human_size(disk_free(proxy.directory.default())).strip()))
items = [d for d in all_items if d.is_active]
if (not items):
print('No active downloads!')
return
good_rates = [d.down_rate for d in items if (d.down_rate > self.MIN_STALLED_RATE)]
stalled_rate = max(self.MIN_STALLED_RATE, ((((self.STALLED_PERCENT / 100) * sum(good_rates)) / len(good_rates)) if good_rates else 0))
stalled_count = sum(((d.down_rate < stalled_rate) for d in items))
global_down_rate = proxy.throttle.global_down.rate()
total_size = sum((d.size_bytes for d in items))
total_left = sum((d.left_bytes for d in items))
eta_list = [0]
if (stalled_count < len(items)):
eta_list = [(d.left_bytes / d.down_rate) for d in items if (d.down_rate >= stalled_rate)]
eta_max = (total_left / (global_down_rate or 1))
stalled_info = (', {} stalled below {}/s'.format(stalled_count, fmt.human_size(stalled_rate).strip()) if stalled_count else '')
print('Size left to download: ', fmt.human_size(total_left), 'of', fmt.human_size(total_size).strip())
print('Overall download speed:', (fmt.human_size(global_down_rate) + '/s'))
print('ETA (min max): ', fmt_duration(min(eta_list)), '', fmt_duration(eta_max), '...', fmt_duration(max(eta_list)), '[{} item(s){}]'.format(len(items), stalled_info)) |
class InfoThread(PluginThread):
def __init__(self, manager, data, pid=(- 1), rid=(- 1), add=False):
super().__init__(manager)
self.data = data
self.pid = pid
self.rid = rid
self.add = add
self.cache = []
self.start()
def run(self):
plugins = {}
container = []
for (url, plugin) in self.data:
if (plugin in plugins):
plugins[plugin].append(url)
else:
plugins[plugin] = [url]
for name in self.pyload.plugin_manager.container_plugins:
if (name in plugins):
container.extend(((name, url) for url in plugins[name]))
del plugins[name]
if (self.pid > (- 1)):
for (pluginname, urls) in plugins.items():
plugin = self.pyload.plugin_manager.get_plugin(pluginname, True)
if hasattr(plugin, 'get_info'):
self.fetch_for_plugin(pluginname, plugin, urls, self.update_db)
self.pyload.files.save()
elif self.add:
for (pluginname, urls) in plugins.items():
plugin = self.pyload.plugin_manager.get_plugin(pluginname, True)
if hasattr(plugin, 'get_info'):
self.fetch_for_plugin(pluginname, plugin, urls, self.update_cache, True)
else:
result = [(url, 0, 3, url) for url in urls]
self.update_cache(pluginname, result)
packs = parse_names(((name, url) for (name, x, y, url) in self.cache))
self.pyload.log.debug(f'Fetched and generated {len(packs)} packages')
for (k, v) in packs.items():
self.pyload.api.add_package(k, v)
del self.cache[:]
else:
for (name, url) in container:
try:
data = self.decrypt_container(name, url)
except Exception:
self.pyload.log.warning('Could not decrypt container.', exc_info=(self.pyload.debug > 1), stack_info=(self.pyload.debug > 2))
data = []
for (url, plugin) in data:
if (plugin in plugins):
plugins[plugin].append(url)
else:
plugins[plugin] = [url]
self.m.info_results[self.rid] = {}
for (pluginname, urls) in plugins.items():
plugin = self.pyload.plugin_manager.get_plugin(pluginname, True)
if hasattr(plugin, 'get_info'):
self.fetch_for_plugin(pluginname, plugin, urls, self.update_result, True)
if self.cache:
self.update_result(pluginname, [], True)
else:
result = [(url, 0, 3, url) for url in urls]
self.update_result(pluginname, result, True)
self.m.info_results[self.rid]['ALL_INFO_FETCHED'] = {}
self.m.timestamp = (time.time() + timedelta(minutes=5).total_seconds())
def update_db(self, plugin, result):
self.pyload.files.update_file_info(result, self.pid)
def update_result(self, plugin, result, force=False):
self.cache.extend(result)
if ((len(self.cache) >= 20) or force):
tmp = [(name, (url, OnlineStatus(name, plugin, 'unknown', status, int(size)))) for (name, size, status, url) in self.cache]
data = parse_names(tmp)
result = {}
for (k, v) in data.items():
for (url, status) in v:
status.packagename = k
result[url] = status
self.m.set_info_results(self.rid, result)
self.cache = []
def update_cache(self, plugin, result):
self.cache.extend(result)
def fetch_for_plugin(self, pluginname, plugin, urls, cb, err=None):
try:
result = []
process = []
for url in urls:
if (url in self.m.info_cache):
result.append(self.m.info_cache[url])
else:
process.append(url)
if result:
self.pyload.log.debug(f'Fetched {len(result)} values from cache for {pluginname}')
cb(pluginname, result)
if process:
self.pyload.log.debug(f'Run Info Fetching for {pluginname}')
for result in plugin.get_info(process):
if (not isinstance(result, list)):
result = [result]
for res in result:
self.m.info_cache[res[3]] = res
cb(pluginname, result)
self.pyload.log.debug(f'Finished Info Fetching for {pluginname}')
except Exception as exc:
self.pyload.log.warning(self._('Info Fetching for {name} failed | {err}').format(name=pluginname, err=exc), exc_info=(self.pyload.debug > 1), stack_info=(self.pyload.debug > 2))
if err:
result = [(url, 0, 3, url) for url in urls]
cb(pluginname, result)
def decrypt_container(self, plugin, url):
data = []
self.pyload.log.debug(f'Pre-decrypting {url} with {plugin}')
pyfile = PyFile(self.pyload.files, (- 1), url, url, 0, 0, '', plugin, (- 1), (- 1))
pyfile.init_plugin()
try:
pyfile.plugin.setup()
pyfile.plugin.load_to_disk()
pyfile.plugin.decrypt(pyfile)
pyfile.plugin.delete_tmp()
for pack in pyfile.plugin.packages:
pyfile.plugin.urls.extend(pack[1])
data = self.pyload.plugin_manager.parse_urls(pyfile.plugin.urls)
self.pyload.log.debug(f'Got {len(data)} links.')
except Exception as exc:
self.pyload.log.debug(f'Pre decrypting error: {exc}', exc_info=(self.pyload.debug > 1), stack_info=(self.pyload.debug > 2))
finally:
pyfile.release()
return data |
class FakeIterable(Iterable):
def __init__(self, container):
self.iterator = container
self.size = len(self.iterator)
self.step = 0
def __iter__(self) -> Iterator:
return self
def __next__(self):
if (self.step == self.size):
raise StopIteration()
res = self.iterator[self.step]
self.step += 1
return res |
class Lanes(XodrBase):
def __init__(self):
super().__init__()
self.lanesections = []
self.laneoffsets = []
self.roadmarks_adjusted = False
def __eq__(self, other):
if (isinstance(other, Lanes) and super().__eq__(other)):
if ((self.laneoffsets == other.laneoffsets) and (self.lanesections == other.lanesections)):
return True
return False
def add_lanesection(self, lanesection, lanelinks=None):
if lanelinks:
if (not isinstance(lanelinks, list)):
lanelinks = [lanelinks]
for lanelink in lanelinks:
for link in lanelink.links:
if (not link.used):
link.predecessor.add_link('successor', link.successor.lane_id)
link.successor.add_link('predecessor', link.predecessor.lane_id)
link.used = True
self.lanesections.append(lanesection)
return self
def add_laneoffset(self, laneoffset):
if (not isinstance(laneoffset, LaneOffset)):
raise TypeError(('add_laneoffset requires a LaneOffset as input, not ' + str(type(laneoffset))))
self.laneoffsets.append(laneoffset)
return self
def _check_valid_mark_type(self, lane):
return ((lane.roadmark[0].marking_type == RoadMarkType.broken) or (lane.roadmark[0].marking_type == RoadMarkType.broken_broken))
def _adjust_for_missing_line_offset(self, roadmark):
for line in roadmark._line:
if ((line.soffset < 0) or (line.soffset > (line.length + line.soffset))):
roadmark.add_explicit_road_line(ExplicitRoadLine(line.width, (line.length + line.soffset), line.toffset, 0, line.rule))
elif (line.soffset > line.space):
roadmark.add_explicit_road_line(ExplicitRoadLine(line.width, (line.soffset - line.space), line.toffset, 0, line.rule))
if (line.soffset < 0):
line.shift_soffset()
def _validity_check_for_roadmark_adjustment(self):
self._right_lanes_adjustable = (len(self.lanesections[0].rightlanes) > 0)
self._left_lanes_adjustable = (len(self.lanesections[0].leftlanes) > 0)
self._center_lane_adjustable = True
for ls in range((len(self.lanesections) - 1)):
if (len(self.lanesections[ls].centerlane.roadmark) != 1):
self.center_lane_adjustable = False
if (self.lanesections[ls].centerlane.roadmark != self.lanesections[(ls + 1)].centerlane.roadmark):
self.center_lane_adjustable = False
if ((self.lanesections[ls].centerlane.roadmark[0].marking_type != RoadMarkType.broken) and (self.lanesections[ls].centerlane.roadmark[0].marking_type != RoadMarkType.broken_broken)):
self.center_lane_adjustable = False
for rl in range(len(self.lanesections[ls].rightlanes)):
if self._right_lanes_adjustable:
if (len(self.lanesections[ls].rightlanes[rl].roadmark) != 1):
self._right_lanes_adjustable = False
for ll in range(len(self.lanesections[ls].leftlanes)):
if self._left_lanes_adjustable:
if (len(self.lanesections[ls].leftlanes[ll].roadmark) != 1):
self._left_lanes_adjustable = False
def _get_previous_remainder(self, connected_lane_section, i_line, lane_side, contact_point, lane_index, lane_section_index, start_or_end):
active_lane_sec = self.lanesections[lane_section_index]
neighbor_lane_sec = None
if (start_or_end == 'end'):
on_edge = (lane_section_index == (len(self.lanesections) - 1))
connection = 'successor'
if (not on_edge):
neighbor_lane_sec = self.lanesections[(lane_section_index + 1)]
else:
on_edge = (lane_section_index == 0)
connection = 'predecessor'
if (not on_edge):
neighbor_lane_sec = self.lanesections[(lane_section_index - 1)]
linked_lane_id = 0
found_linked_lane_id = None
if (lane_side == 'right'):
found_linked_lane_id = active_lane_sec.rightlanes[lane_index].get_linked_lane_id(connection)
if neighbor_lane_sec:
neighboring_lane = neighbor_lane_sec.rightlanes[linked_lane_id]
elif (lane_side == 'left'):
found_linked_lane_id = active_lane_sec.leftlanes[lane_index].get_linked_lane_id(connection)
if neighbor_lane_sec:
neighboring_lane = neighbor_lane_sec.leftlanes[linked_lane_id]
elif neighbor_lane_sec:
neighboring_lane = neighbor_lane_sec.centerlane
if found_linked_lane_id:
linked_lane_id = (abs(found_linked_lane_id) - 1)
prev_remainder = 0
if on_edge:
if (lane_side == 'right'):
if ((contact_point == ContactPoint.end) and connected_lane_section.rightlanes[linked_lane_id].roadmark[0]._line):
prev_remainder = connected_lane_section.rightlanes[linked_lane_id].roadmark[0]._line[i_line]._remainder
elif ((contact_point == ContactPoint.start) and connected_lane_section.leftlanes[linked_lane_id].roadmark[0]._line):
prev_remainder = connected_lane_section.leftlanes[linked_lane_id].roadmark[0]._line[i_line].soffset
if (lane_side == 'left'):
if ((contact_point == ContactPoint.end) and connected_lane_section.leftlanes[linked_lane_id].roadmark[0]._line):
prev_remainder = connected_lane_section.leftlanes[linked_lane_id].roadmark[0]._line[i_line]._remainder
elif ((contact_point == ContactPoint.start) and connected_lane_section.rightlanes[linked_lane_id].roadmark[0]._line):
prev_remainder = connected_lane_section.rightlanes[linked_lane_id].roadmark[0]._line[i_line].soffset
if ((lane_side == 'center') and connected_lane_section.centerlane.roadmark[0]._line):
if (contact_point == ContactPoint.end):
prev_remainder = connected_lane_section.centerlane.roadmark[0]._line[i_line]._remainder
elif (contact_point == ContactPoint.start):
prev_remainder = connected_lane_section.centerlane.roadmark[0]._line[i_line].soffset
elif (start_or_end == 'start'):
prev_remainder = neighboring_lane.roadmark[0]._line[i_line]._remainder
else:
prev_remainder = neighboring_lane.roadmark[0]._line[i_line].soffset
return prev_remainder
def _get_seg_length(self, total_road_length, lane_section_index):
if (len(self.lanesections) == 1):
seg_length = total_road_length
elif (lane_section_index == 0):
seg_length = self.lanesections[1].s
elif (lane_section_index == (len(self.lanesections) - 1)):
seg_length = (total_road_length - self.lanesections[lane_section_index].s)
else:
seg_length = (self.lanesections[(lane_section_index + 1)].s - self.lanesections[lane_section_index].s)
return seg_length
def adjust_road_marks_from_start(self, total_road_length, connected_lane_section=None, contact_point=ContactPoint.end):
if (not self.roadmarks_adjusted):
self._validity_check_for_roadmark_adjustment()
self.roadmarks_adjusted = True
def set_zero_offset_to_lines(lane, seg_length):
for i_line in range(len(lane.roadmark[0]._line)):
lane.roadmark[0]._line[i_line].adjust_remainder(seg_length, soffset=0)
for ls in range(0, len(self.lanesections)):
seg_length = self._get_seg_length(total_road_length, ls)
if self._right_lanes_adjustable:
for rl in range(len(self.lanesections[ls].rightlanes)):
if self._check_valid_mark_type(self.lanesections[ls].rightlanes[rl]):
if ((ls == 0) and (connected_lane_section is None)):
set_zero_offset_to_lines(self.lanesections[ls].rightlanes[rl], seg_length)
else:
for i_line in range(len(self.lanesections[ls].rightlanes[rl].roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'right', contact_point, rl, ls, 'start')
self.lanesections[ls].rightlanes[rl].roadmark[0]._line[i_line].adjust_remainder(seg_length, previous_remainder=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].rightlanes[rl].roadmark[0])
if self._left_lanes_adjustable:
for ll in range(len(self.lanesections[ls].leftlanes)):
if self._check_valid_mark_type(self.lanesections[ls].leftlanes[ll]):
if ((ls == 0) and (connected_lane_section is None)):
set_zero_offset_to_lines(self.lanesections[ls].leftlanes[ll], seg_length)
else:
for i_line in range(len(self.lanesections[ls].leftlanes[ll].roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'left', contact_point, ll, ls, 'start')
self.lanesections[ls].leftlanes[ll].roadmark[0]._line[i_line].adjust_remainder(seg_length, previous_remainder=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].leftlanes[ll].roadmark[0])
if self._center_lane_adjustable:
if self._check_valid_mark_type(self.lanesections[ls].centerlane):
if ((ls == 0) and (connected_lane_section is None)):
set_zero_offset_to_lines(self.lanesections[ls].centerlane, seg_length)
else:
for i_line in range(len(self.lanesections[ls].centerlane.roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'center', contact_point, None, ls, 'start')
self.lanesections[ls].centerlane.roadmark[0]._line[i_line].adjust_remainder(seg_length, previous_remainder=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].centerlane.roadmark[0])
def adjust_road_marks_from_end(self, total_road_length, connected_lane_section=None, contact_point=ContactPoint.end):
if (not self.roadmarks_adjusted):
self._validity_check_for_roadmark_adjustment()
self.roadmarks_adjusted = True
def set_zero_remainder_to_lines(lane, seg_length):
for i_line in range(len(lane.roadmark[0]._line)):
lane.roadmark[0]._line[i_line].adjust_soffset(seg_length, remainder=0)
for ls in range((len(self.lanesections) - 1), (- 1), (- 1)):
seg_length = self._get_seg_length(total_road_length, ls)
if self._right_lanes_adjustable:
for rl in range(len(self.lanesections[ls].rightlanes)):
if self._check_valid_mark_type(self.lanesections[ls].rightlanes[rl]):
if ((ls == (len(self.lanesections) - 1)) and (connected_lane_section is None)):
set_zero_remainder_to_lines(self.lanesections[ls].rightlanes[rl], seg_length)
else:
for i_line in range(len(self.lanesections[ls].rightlanes[rl].roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'right', contact_point, rl, ls, 'end')
self.lanesections[ls].rightlanes[rl].roadmark[0]._line[i_line].adjust_soffset(seg_length, previous_offset=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].rightlanes[rl].roadmark[0])
if self._left_lanes_adjustable:
for ll in range(len(self.lanesections[ls].leftlanes)):
if self._check_valid_mark_type(self.lanesections[ls].leftlanes[ll]):
if ((ls == (len(self.lanesections) - 1)) and (connected_lane_section is None)):
set_zero_remainder_to_lines(self.lanesections[ls].leftlanes[ll], seg_length)
else:
for i_line in range(len(self.lanesections[ls].leftlanes[ll].roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'left', contact_point, ll, ls, 'end')
self.lanesections[ls].leftlanes[ll].roadmark[0]._line[i_line].adjust_soffset(seg_length, previous_offset=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].leftlanes[ll].roadmark[0])
if self._center_lane_adjustable:
if self._check_valid_mark_type(self.lanesections[ls].centerlane):
if ((ls == (len(self.lanesections) - 1)) and (connected_lane_section is None)):
set_zero_remainder_to_lines(self.lanesections[ls].centerlane, seg_length)
else:
for i_line in range(len(self.lanesections[ls].centerlane.roadmark[0]._line)):
prev_remainder = self._get_previous_remainder(connected_lane_section, i_line, 'center', contact_point, None, ls, 'end')
self.lanesections[ls].centerlane.roadmark[0]._line[i_line].adjust_soffset(seg_length, previous_offset=prev_remainder)
self._adjust_for_missing_line_offset(self.lanesections[ls].centerlane.roadmark[0])
def get_element(self):
element = ET.Element('lanes')
self._add_additional_data_to_element(element)
for l in self.laneoffsets:
element.append(l.get_element())
for l in self.lanesections:
element.append(l.get_element())
return element |
def run_gamma(filepath_ref, filepath_eval, random_subset=None, max_gamma=1.1, dose_threshold=1, distance_threshold=1):
if (random_subset is not None):
np.random.seed(42)
ds_ref = pydicom.read_file(filepath_ref)
ds_eval = pydicom.read_file(filepath_eval)
axes_reference = load_yx_from_dicom(ds_ref)
dose_reference = dose_from_dataset(ds_ref)
axes_evaluation = load_yx_from_dicom(ds_eval)
dose_evaluation = dose_from_dataset(ds_eval)
gamma = pymedphys.gamma(axes_reference, dose_reference, axes_evaluation, dose_evaluation, dose_threshold, distance_threshold, lower_percent_dose_cutoff=20, interp_fraction=10, max_gamma=max_gamma, local_gamma=True, skip_once_passed=True, random_subset=random_subset)
return gamma |
def write_train_pkl(obj_list_path, meta, output_dir, cate):
obj_list = read_obj_list(obj_list_path)
model = DeformedImplicitField(**meta)
model.load_state_dict(torch.load(meta['checkpoint_path']))
assert (len(obj_list) == model.latent_codes.weight.size()[0])
pkl_info = {}
for i in range(len(obj_list)):
pkl_info.update({obj_list[i]: model.get_latent_code(torch.tensor(i)).detach().numpy()})
with open((output_dir + '{0}_train.pkl'.format(cate)), 'wb') as f:
pkl.dump(pkl_info, f) |
def convert_esm_checkpoint_to_pytorch(model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str):
if model.startswith('esmfold'):
esm = MODEL_MAPPING[model]()
else:
(esm, alphabet) = MODEL_MAPPING[model]()
esm.eval()
if model.startswith('esmfold'):
embed_dim = esm.esm.embed_dim
num_layers = esm.esm.num_layers
num_attention_heads = esm.esm.attention_heads
intermediate_size = (4 * embed_dim)
token_dropout = esm.esm.token_dropout
emb_layer_norm_before = False
position_embedding_type = 'rotary'
is_folding_model = True
esmfold_config = EsmFoldConfig()
for (key, val) in esm.cfg.items():
if (hasattr(esmfold_config, key) and (key != 'trunk')):
setattr(esmfold_config, key, val)
for (key, val) in esm.cfg.trunk.items():
if (hasattr(esmfold_config.trunk, key) and (key != 'structure_module')):
setattr(esmfold_config.trunk, key, val)
for (key, val) in esm.cfg.trunk.structure_module.items():
if hasattr(esmfold_config.trunk.structure_module, key):
setattr(esmfold_config.trunk.structure_module, key, val)
elif hasattr(esm, 'args'):
embed_dim = esm.args.embed_dim
num_layers = esm.args.layers
num_attention_heads = esm.args.attention_heads
intermediate_size = esm.args.ffn_embed_dim
token_dropout = esm.args.token_dropout
emb_layer_norm_before = (True if esm.emb_layer_norm_before else False)
position_embedding_type = 'absolute'
is_folding_model = False
esmfold_config = None
else:
embed_dim = esm.embed_dim
num_layers = esm.num_layers
num_attention_heads = esm.attention_heads
intermediate_size = (4 * embed_dim)
token_dropout = esm.token_dropout
emb_layer_norm_before = False
position_embedding_type = 'rotary'
is_folding_model = False
esmfold_config = None
if is_folding_model:
alphabet = esm.esm.alphabet
vocab_list = tuple(alphabet.all_toks)
mask_token_id = alphabet.mask_idx
pad_token_id = alphabet.padding_idx
if is_folding_model:
original_esm_model = esm.esm
else:
original_esm_model = esm
config = EsmConfig(vocab_size=original_esm_model.embed_tokens.num_embeddings, mask_token_id=mask_token_id, hidden_size=embed_dim, num_hidden_layers=num_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, max_position_embeddings=1026, layer_norm_eps=1e-05, attention_probs_dropout_prob=0.0, hidden_dropout_prob=0.0, pad_token_id=pad_token_id, emb_layer_norm_before=emb_layer_norm_before, token_dropout=token_dropout, position_embedding_type=position_embedding_type, is_folding_model=is_folding_model, esmfold_config=esmfold_config, vocab_list=vocab_list)
if classification_head:
config.num_labels = esm.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our ESM config:', config)
if model.startswith('esmfold'):
model_class = EsmForProteinFolding
elif classification_head:
model_class = EsmForSequenceClassification
else:
model_class = EsmForMaskedLM
model = model_class(config)
model.eval()
model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight
if (position_embedding_type == 'absolute'):
model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight
if config.emb_layer_norm_before:
model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight
model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias
model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight
model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias
for i in range(config.num_hidden_layers):
layer: EsmLayer = model.esm.encoder.layer[i]
esm_layer = original_esm_model.layers[i]
self_attn: EsmSelfAttention = layer.attention.self
assert (esm_layer.self_attn.k_proj.weight.data.shape == esm_layer.self_attn.q_proj.weight.data.shape == esm_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)))
self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight
self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias
self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight
self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias
self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight
self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias
if (getattr(esm_layer.self_attn, 'rot_emb', None) is not None):
self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq
layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight
layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias
layer.LayerNorm.weight = esm_layer.final_layer_norm.weight
layer.LayerNorm.bias = esm_layer.final_layer_norm.bias
self_output: EsmSelfOutput = layer.attention.output
assert (self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape)
self_output.dense.weight = esm_layer.self_attn.out_proj.weight
self_output.dense.bias = esm_layer.self_attn.out_proj.bias
intermediate: EsmIntermediate = layer.intermediate
assert (intermediate.dense.weight.shape == esm_layer.fc1.weight.shape)
intermediate.dense.weight = esm_layer.fc1.weight
intermediate.dense.bias = esm_layer.fc1.bias
bert_output: EsmOutput = layer.output
assert (bert_output.dense.weight.shape == esm_layer.fc2.weight.shape)
bert_output.dense.weight = esm_layer.fc2.weight
bert_output.dense.bias = esm_layer.fc2.bias
if is_folding_model:
model.esm_s_combine.data = esm.esm_s_combine.data
model.af2_to_esm.data = esm.af2_to_esm.data
transfer_and_check_weights(esm.embedding, model.embedding)
transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp)
transfer_and_check_weights(esm.trunk, model.trunk)
transfer_and_check_weights(esm.distogram_head, model.distogram_head)
transfer_and_check_weights(esm.ptm_head, model.ptm_head)
transfer_and_check_weights(esm.lm_head, model.lm_head)
transfer_and_check_weights(esm.lddt_head, model.lddt_head)
elif classification_head:
model.classifier.dense.weight = esm.esm.classification_heads['mnli'].dense.weight
model.classifier.dense.bias = esm.classification_heads['mnli'].dense.bias
model.classifier.out_proj.weight = esm.classification_heads['mnli'].out_proj.weight
model.classifier.out_proj.bias = esm.classification_heads['mnli'].out_proj.bias
else:
model.lm_head.dense.weight = esm.lm_head.dense.weight
model.lm_head.dense.bias = esm.lm_head.dense.bias
model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias
model.lm_head.decoder.weight = esm.lm_head.weight
model.lm_head.bias = esm.lm_head.bias
transfer_and_check_weights(esm.contact_head, model.esm.contact_head)
if is_folding_model:
sample_data = SAMPLE_DATA[:2]
else:
sample_data = SAMPLE_DATA
if is_folding_model:
hf_tokenizer = get_esmfold_tokenizer()
hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors='pt', padding=True, add_special_tokens=False)
(esmfold_aas, esmfold_mask, _, _, _) = esmfold_encode_sequences([row[1] for row in sample_data])
success = (torch.all((hf_tokens['input_ids'] == esmfold_aas)) and torch.all((hf_tokens['attention_mask'] == esmfold_mask)))
else:
batch_converter = alphabet.get_batch_converter()
(batch_labels, batch_strs, batch_tokens) = batch_converter(sample_data)
with TemporaryDirectory() as tempdir:
vocab = '\n'.join(alphabet.all_toks)
vocab_file = (Path(tempdir) / 'vocab.txt')
vocab_file.write_text(vocab)
hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors='pt', padding=True)
success = torch.all((hf_tokens['input_ids'] == batch_tokens))
print('Do both models tokenizers output the same tokens?', ('' if success else ''))
if (not success):
raise Exception('Tokenization does not match!')
with torch.no_grad():
if is_folding_model:
their_output = esm.cuda().infer([row[1] for row in sample_data])
our_output = model.cuda()(input_ids=hf_tokens['input_ids'].cuda(), attention_mask=hf_tokens['attention_mask'].cuda())
else:
our_output = model(**hf_tokens, output_hidden_states=True)
our_output = our_output['logits']
if classification_head:
their_output = esm.model.classification_heads['mnli'](esm.extract_features(batch_tokens))
else:
their_output = esm(hf_tokens['input_ids'], repr_layers=list(range(999)))
their_output = their_output['logits']
if is_folding_model:
max_absolute_diff = torch.max(torch.abs((our_output['positions'] - their_output['positions']))).item()
success = torch.allclose(our_output['positions'], their_output['positions'], atol=1e-05)
else:
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
success = torch.allclose(our_output, their_output, atol=1e-05)
print(f'max_absolute_diff = {max_absolute_diff}')
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
if (not is_folding_model):
our_output = model.predict_contacts(hf_tokens['input_ids'], hf_tokens['attention_mask'])
their_output = esm.predict_contacts(hf_tokens['input_ids'])
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
success = torch.allclose(our_output, their_output, atol=1e-05)
print('Contact prediction testing:')
print(f'max_absolute_diff = {max_absolute_diff}')
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(pytorch_dump_folder_path)
del esm
print(f'Saving tokenizer to {pytorch_dump_folder_path}')
hf_tokenizer.save_pretrained(pytorch_dump_folder_path)
if push_to_repo:
model.push_to_hub(repo_id=push_to_repo, use_auth_token=auth_token)
hf_tokenizer.push_to_hub(repo_id=push_to_repo, use_auth_token=auth_token) |
class ErrorBox(QtWidgets.QWidget):
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
parent.installEventFilter(self)
self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents)
self._resize()
self.setVisible(False)
def eventFilter(self, obj, ev):
if (ev.type() == QtCore.QEvent.Type.Resize):
self._resize()
return False
def _resize(self):
self.setGeometry(0, 0, self.parent().width(), self.parent().height())
def paintEvent(self, ev):
p = QtGui.QPainter(self)
p.setPen(fn.mkPen(color='r', width=2))
p.drawRect(self.rect())
p.end() |
class webvision_dataset(Dataset):
def __init__(self, root_dir, transform, mode, num_class, num_samples=None, losses=[]):
self.root = root_dir
self.transform = transform
self.mode = mode
if (self.mode == 'test'):
with open(os.path.join(self.root, 'info/val_filelist.txt')) as f:
lines = f.readlines()
self.val_imgs = []
self.val_labels = {}
for line in lines:
(img, target) = line.split()
target = int(target)
if (target < num_class):
self.val_imgs.append(img)
self.val_labels[img] = target
else:
with open(os.path.join(self.root, 'info/train_filelist_google.txt')) as f:
lines = f.readlines()
if (num_class == 1000):
with open(os.path.join(self.root, 'info/train_filelist_flickr.txt')) as f:
lines += f.readlines()
train_imgs = []
self.train_labels = {}
for line in lines:
(img, target) = line.split()
target = int(target)
if (target < num_class):
train_imgs.append(img)
self.train_labels[img] = target
if (self.mode == 'all'):
if (num_samples is not None):
self.train_imgs = sample_traning_set(train_imgs, self.train_labels, num_class, num_samples)
else:
self.train_imgs = train_imgs
elif (self.mode == 'meta'):
idx_to_meta = []
all_labels = [self.train_labels[imgs] for imgs in train_imgs]
data_list = {}
for j in range(num_class):
data_list[j] = [i for (i, label) in enumerate(all_labels) if (label == j)]
for (cls_idx, img_id_list) in data_list.items():
(_, indexs) = torch.topk(losses[img_id_list], 20, largest=False)
idx_to_meta.extend(torch.tensor(img_id_list)[indexs].tolist())
self.train_imgs = [train_imgs[i] for i in idx_to_meta]
print(('%s data has a size of %d' % (self.mode, len(self.train_imgs))))
def __getitem__(self, index):
if (self.mode == 'meta'):
img_path = self.train_imgs[index]
target = self.train_labels[img_path]
image = Image.open(os.path.join(self.root, img_path)).convert('RGB')
img = self.transform(image)
return (img, target)
return (img1, img2)
elif (self.mode == 'all'):
img_path = self.train_imgs[index]
target = self.train_labels[img_path]
image = Image.open(os.path.join(self.root, img_path)).convert('RGB')
img = self.transform(image)
return (img, target, index)
elif (self.mode == 'test'):
img_path = self.val_imgs[index]
target = self.val_labels[img_path]
image = Image.open(os.path.join(self.root, 'val_images_256/', img_path)).convert('RGB')
img = self.transform(image)
return (img, target)
def __len__(self):
if (self.mode != 'test'):
return len(self.train_imgs)
else:
return len(self.val_imgs) |
def test_dtype_rescaling_uint8_half(tmpdir, runner):
outputname = str(tmpdir.join('test.tif'))
result = runner.invoke(main_group, ['convert', 'tests/data/RGB.byte.tif', outputname, '--scale-ratio', '0.5'])
assert (result.exit_code == 0)
with rasterio.open(outputname) as src:
for band in src.read():
assert (round((band.min() - 0), 6) == 0.0)
assert (round((band.max() - 127), 6) == 0.0) |
def test_interactive_with_file_dependency(tester: CommandTester, repo: TestRepository, source_dir: Path, fixture_dir: FixtureDirGetter) -> None:
repo.add_package(get_package('pendulum', '2.0.0'))
repo.add_package(get_package('pytest', '3.6.0'))
demo = (fixture_dir('distributions') / 'demo-0.1.0-py2.py3-none-any.whl')
shutil.copyfile(str(demo), str((source_dir / demo.name)))
inputs = ['my-package', '1.2.3', 'This is a description', 'n', 'MIT', '~2.7 || ^3.6', '', './demo-0.1.0-py2.py3-none-any.whl', '', '', 'pytest', '0', '', '', '\n']
tester.execute(inputs='\n'.join(inputs))
expected = '[tool.poetry]\nname = "my-package"\nversion = "1.2.3"\ndescription = "This is a description"\nauthors = ["Your Name <>"]\nlicense = "MIT"\nreadme = "README.md"\n\n[tool.poetry.dependencies]\npython = "~2.7 || ^3.6"\ndemo = {path = "demo-0.1.0-py2.py3-none-any.whl"}\n\n[tool.poetry.group.dev.dependencies]\npytest = "^3.6.0"\n'
assert (expected in tester.io.fetch_output()) |
def dfs_visit(node: PackageNode, back_edges: dict[(DFSNodeID, list[PackageNode])], visited: set[DFSNodeID], sorted_nodes: list[PackageNode]) -> None:
if (node.id in visited):
return
visited.add(node.id)
for neighbor in node.reachable():
back_edges[neighbor.id].append(node)
dfs_visit(neighbor, back_edges, visited, sorted_nodes)
sorted_nodes.insert(0, node) |
class TestRunner(InferenceRunner):
def __init__(self, test_cfg, inference_cfg, base_cfg=None):
super().__init__(inference_cfg, base_cfg)
self.test_dataloader = self._build_dataloader(test_cfg['data'])
extra_data = (len(self.test_dataloader.dataset) % self.world_size)
self.test_exclude_num = ((self.world_size - extra_data) if (extra_data != 0) else 0)
self.tta = test_cfg.get('tta', False)
def __call__(self):
self.metric.reset()
self.model.eval()
res = {}
self.logger.info('Start testing')
with torch.no_grad():
for (idx, (image, mask)) in enumerate(self.test_dataloader):
if self.use_gpu:
image = image.cuda()
mask = mask.cuda()
if self.tta:
output = self._tta_compute(image)
else:
output = self.model(image)
output = self.compute(output)
output = gather_tensor(output)
mask = gather_tensor(mask)
if (((idx + 1) == len(self.test_dataloader)) and (self.test_exclude_num > 0)):
output = output[:(- self.test_exclude_num)]
mask = mask[:(- self.test_exclude_num)]
self.metric(output.cpu().numpy(), mask.cpu().numpy())
res = self.metric.accumulate()
self.logger.info('Test, Iter {}, {}'.format((idx + 1), ', '.join(['{}: {}'.format(k, np.round(v, 4)) for (k, v) in res.items()])))
self.logger.info('Test Result: {}'.format(', '.join(['{}: {}'.format(k, np.round(v, 4)) for (k, v) in res.items()])))
return res
def _tta_compute(self, image):
(b, c, h, w) = image.size()
probs = []
for (scale, bias) in zip(self.tta['scales'], self.tta['biases']):
(new_h, new_w) = (int(((h * scale) + bias)), int(((w * scale) + bias)))
new_img = F.interpolate(image, size=(new_h, new_w), mode='bilinear', align_corners=True)
output = self.model(new_img)
probs.append(output)
if self.tta['flip']:
flip_img = new_img.flip(3)
flip_output = self.model(flip_img)
prob = flip_output.flip(3)
probs.append(prob)
for (idx, prob) in enumerate(probs):
probs[idx] = F.interpolate(prob, size=(h, w), mode='bilinear', align_corners=True)
if self.multi_label:
prob = torch.stack(probs, dim=0).sigmoid().mean(dim=0)
prob = torch.where((prob >= 0.5), torch.full_like(prob, 1), torch.full_like(prob, 0)).long()
else:
prob = torch.stack(probs, dim=0).softmax(dim=2).mean(dim=0)
(_, prob) = torch.max(prob, dim=1)
return prob |
class OptionalActions():
def __init__(self, args, input_images, alignments):
logger.debug('Initializing %s', self.__class__.__name__)
self.args = args
self.input_images = input_images
self.alignments = alignments
self.remove_skipped_faces()
logger.debug('Initialized %s', self.__class__.__name__)
def remove_skipped_faces(self):
logger.debug('Filtering Faces')
face_hashes = self.get_face_hashes()
if (not face_hashes):
logger.debug('No face hashes. Not skipping any faces')
return
pre_face_count = self.alignments.faces_count
self.alignments.filter_hashes(face_hashes, filter_out=False)
logger.info('Faces filtered out: %s', (pre_face_count - self.alignments.faces_count))
def get_face_hashes(self):
face_hashes = list()
input_aligned_dir = self.args.input_aligned_dir
if (input_aligned_dir is None):
logger.verbose('Aligned directory not specified. All faces listed in the alignments file will be converted')
elif (not os.path.isdir(input_aligned_dir)):
logger.warning('Aligned directory not found. All faces listed in the alignments file will be converted')
else:
file_list = [path for path in get_image_paths(input_aligned_dir)]
logger.info('Getting Face Hashes for selected Aligned Images')
for face in tqdm(file_list, desc='Hashing Faces'):
face_hashes.append(hash_image_file(face))
logger.debug('Face Hashes: %s', len(face_hashes))
if (not face_hashes):
raise FaceswapError('Aligned directory is empty, no faces will be converted!')
if (len(face_hashes) <= (len(self.input_images) / 3)):
logger.warning('Aligned directory contains far fewer images than the input directory, are you sure this is the right folder?')
return face_hashes |
class ucred_t(ctypes.Structure):
class cr_entry(ctypes.Structure):
_fields_ = (('tqe_next', POINTER64), ('tqe_prev', POINTER64))
class posix_cred_t(ctypes.Structure):
_fields_ = (('cr_uid', ctypes.c_uint32), ('cr_ruid', ctypes.c_uint32), ('cr_svuid', ctypes.c_uint32), ('cr_ngroups', ctypes.c_short), ('cr_groups', (ctypes.c_uint32 * 16)), ('cr_rgid', ctypes.c_uint32), ('cr_svgid', ctypes.c_uint32), ('cr_gmuid', ctypes.c_uint32), ('cr_flags', ctypes.c_int32))
class au_session_t(ctypes.Structure):
_fields_ = (('as_aia_p', POINTER64), ('as_mask', POINTER64))
_fields_ = (('cr_link', cr_entry), ('cr_ref', ctypes.c_ulong), ('cr_posix', posix_cred_t), ('cr_label', POINTER64), ('cr_audit', au_session_t))
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj |
class QtileEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def __init__(self, qtile: Qtile) -> None:
asyncio.DefaultEventLoopPolicy.__init__(self)
self.qtile = qtile
def get_event_loop(self) -> asyncio.AbstractEventLoop:
if isinstance(self.qtile._eventloop, asyncio.AbstractEventLoop):
return self.qtile._eventloop
raise RuntimeError |
class YInflictedDamageMixin():
def _getDamagePerKey(self, src, time):
if (time is None):
raise ValueError
return self._getTimeCacheDataPoint(src=src, time=time)
def _prepareTimeCache(self, src, maxTime):
self.graph._timeCache.prepareDmgData(src=src, maxTime=maxTime)
def _getTimeCacheData(self, src):
return self.graph._timeCache.getDmgData(src=src)
def _getTimeCacheDataPoint(self, src, time):
return self.graph._timeCache.getDmgDataPoint(src=src, time=time) |
class nyudepthv2(BaseDataset):
def __init__(self, data_path, filenames_path='./dataset/filenames/', is_train=True, crop_size=(448, 576), scale_size=None):
super().__init__(crop_size)
if (crop_size[0] > 480):
scale_size = (int(((crop_size[0] * 640) / 480)), crop_size[0])
self.scale_size = scale_size
self.is_train = is_train
self.data_path = os.path.join(data_path, 'nyu_depth_v2')
self.image_path_list = []
self.depth_path_list = []
with open('nyu_class_list.json', 'r') as f:
self.class_list = json.load(f)
txt_path = os.path.join(filenames_path, 'nyudepthv2')
if is_train:
txt_path += '/train_list.txt'
self.data_path = (self.data_path + '/sync')
else:
txt_path += '/test_list.txt'
self.data_path = (self.data_path + '/official_splits/test/')
self.filenames_list = self.readTXT(txt_path)
phase = ('train' if is_train else 'test')
print('Dataset: NYU Depth V2')
print(('# of %s images: %d' % (phase, len(self.filenames_list))))
def __len__(self):
return len(self.filenames_list)
def __getitem__(self, idx):
img_path = (self.data_path + self.filenames_list[idx].split(' ')[0])
gt_path = (self.data_path + self.filenames_list[idx].split(' ')[1])
filename = ((img_path.split('/')[(- 2)] + '_') + img_path.split('/')[(- 1)])
class_id = (- 1)
for (i, name) in enumerate(self.class_list):
if (name in filename):
class_id = i
break
assert (class_id >= 0)
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
depth = cv2.imread(gt_path, cv2.IMREAD_UNCHANGED).astype('float32')
if self.scale_size:
image = cv2.resize(image, (self.scale_size[0], self.scale_size[1]))
depth = cv2.resize(depth, (self.scale_size[0], self.scale_size[1]))
if self.is_train:
(image, depth) = self.augment_training_data(image, depth)
else:
(image, depth) = self.augment_test_data(image, depth)
depth = (depth / 1000.0)
return {'image': image, 'depth': depth, 'filename': filename, 'class_id': class_id} |
.parametrize('version, part, expected', [('1.0.4-rc.1', 'major', '2.0.0'), ('1.1.0-rc.1', 'major', '2.0.0'), ('1.1.4-rc.1', 'major', '2.0.0'), ('1.2.3', 'major', '2.0.0'), ('1.0.0-rc.1', 'major', '1.0.0'), ('0.2.0-rc.1', 'minor', '0.2.0'), ('0.2.5-rc.1', 'minor', '0.3.0'), ('1.3.1', 'minor', '1.4.0'), ('1.3.2', 'patch', '1.3.3'), ('0.1.5-rc.2', 'patch', '0.1.5'), ('0.1.4', 'prerelease', '0.1.5-rc.1'), ('0.1.5-rc.1', 'prerelease', '0.1.5-rc.2'), ('0.2.0-rc.1', 'patch', '0.2.0'), ('1.0.0-rc.1', 'patch', '1.0.0'), ('1.0.0-rc.1', 'minor', '1.0.0')])
def test_next_version_with_versioninfo(version, part, expected):
ver = Version.parse(version)
next_version = ver.next_version(part)
assert isinstance(next_version, Version)
assert (str(next_version) == expected) |
def get_norm_layer(opt, norm_type='instance'):
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
def add_norm_layer(layer, opt):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type = norm_type
if ((subnorm_type == 'none') or (len(subnorm_type) == 0)):
return layer
if (getattr(layer, 'bias', None) is not None):
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if (subnorm_type == 'instance'):
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
elif ((subnorm_type == 'sync_batch') and opt.mpdist):
norm_layer = nn.SyncBatchNorm(get_out_channel(layer), affine=True)
else:
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
return nn.Sequential(layer, norm_layer)
return add_norm_layer |
()
def session_api(skip_qtbot):
network_client = MagicMock()
network_client.server_call = AsyncMock()
root = QtWidgets.QWidget()
skip_qtbot.addWidget(root)
api = MultiplayerSessionApi(network_client, 1234)
api.logger.setLevel(logging.DEBUG)
api.widget_root = root
return api |
def get_temporary_copy(path: Union[(str, Path)]) -> Path:
path = env.get_path(path)
assert ((not path.is_dir()) and (not path.is_symlink()))
tmp_path = path.with_name((((path.stem + '___') + str(uuid.uuid4()).replace('-', '')) + path.suffix))
shutil.copyfile(path, tmp_path)
atexit.register((lambda : tmp_path.unlink()))
return tmp_path |
def read_and_store_bindings(f: Callable, bindings: Dict[(str, type)]) -> None:
function_bindings = (getattr(f, '__bindings__', None) or {})
if (function_bindings == 'deferred'):
function_bindings = {}
merged_bindings = dict(function_bindings, **bindings)
if hasattr(f, '__func__'):
f = cast(Any, f).__func__
cast(Any, f).__bindings__ = merged_bindings |
def test_handler_bad_request(api_gw_url):
body_str = json.dumps({'order_item_count': 5})
response = requests.post(api_gw_url, data=body_str)
assert (response.status_code == HTTPStatus.BAD_REQUEST)
body_dict = json.loads(response.text)
assert (body_dict == {'error': 'invalid input'}) |
class TestSources():
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.sources == builder.config.sources == {})
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('src', 'foo', 'bar.py'))
def test_global_invalid_type(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': ''}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match='Field `tool.hatch.build.sources` must be a mapping or array of strings'):
_ = builder.config.sources
def test_global_array(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': ['src']}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', '')] == '')
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('foo', 'bar.py'))
def test_global_array_source_not_string(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': [0]}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match='Source #1 in field `tool.hatch.build.sources` must be a string'):
_ = builder.config.sources
def test_global_array_source_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': ['']}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(ValueError, match='Source #1 in field `tool.hatch.build.sources` cannot be an empty string'):
_ = builder.config.sources
def test_global_mapping(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': {'src/foo': 'renamed'}}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', 'foo', '')] == pjoin('renamed', ''))
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('renamed', 'bar.py'))
def test_global_mapping_source_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': {'': 'renamed'}}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[''] == pjoin('renamed', ''))
assert (builder.config.get_distribution_path('bar.py') == pjoin('renamed', 'bar.py'))
assert (builder.config.get_distribution_path(pjoin('foo', 'bar.py')) == pjoin('renamed', 'foo', 'bar.py'))
def test_global_mapping_path_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': {'src/foo': ''}}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', 'foo', '')] == '')
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == 'bar.py')
def test_global_mapping_replacement_not_string(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': {'src/foo': 0}}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match='Path for source `src/foo` in field `tool.hatch.build.sources` must be a string'):
_ = builder.config.sources
def test_target_invalid_type(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': ''}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Field `tool.hatch.build.targets.foo.sources` must be a mapping or array of strings'):
_ = builder.config.sources
def test_target_array(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': ['src']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', '')] == '')
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('foo', 'bar.py'))
def test_target_array_source_not_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': [0]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Source #1 in field `tool.hatch.build.targets.foo.sources` must be a string'):
_ = builder.config.sources
def test_target_array_source_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': ['']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(ValueError, match='Source #1 in field `tool.hatch.build.targets.foo.sources` cannot be an empty string'):
_ = builder.config.sources
def test_target_mapping(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': {'src/foo': 'renamed'}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', 'foo', '')] == pjoin('renamed', ''))
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('renamed', 'bar.py'))
def test_target_mapping_source_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': {'': 'renamed'}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[''] == pjoin('renamed', ''))
assert (builder.config.get_distribution_path(pjoin('bar.py')) == pjoin('renamed', 'bar.py'))
def test_target_mapping_path_empty_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': {'src/foo': ''}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', 'foo', '')] == '')
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == 'bar.py')
def test_target_mapping_replacement_not_string(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'sources': {'src/foo': 0}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Path for source `src/foo` in field `tool.hatch.build.targets.foo.sources` must be a string'):
_ = builder.config.sources
def test_target_overrides_global(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': ['src'], 'targets': {'foo': {'sources': ['pkg']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('pkg', '')] == '')
assert (builder.config.get_distribution_path(pjoin('pkg', 'foo', 'bar.py')) == pjoin('foo', 'bar.py'))
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('src', 'foo', 'bar.py'))
def test_no_source(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': ['bar']}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('bar', '')] == '')
assert (builder.config.get_distribution_path(pjoin('foo', 'bar.py')) == pjoin('foo', 'bar.py'))
def test_compatible_with_packages(self, isolation):
config = {'tool': {'hatch': {'build': {'sources': {'src/foo': 'renamed'}, 'packages': ['src/foo']}}}}
builder = MockBuilder(str(isolation), config=config)
assert (len(builder.config.sources) == 1)
assert (builder.config.sources[pjoin('src', 'foo', '')] == pjoin('renamed', ''))
assert (builder.config.get_distribution_path(pjoin('src', 'foo', 'bar.py')) == pjoin('renamed', 'bar.py')) |
class BinaryBinnedAUROC(Metric[Tuple[(torch.Tensor, torch.Tensor)]]):
def __init__(self: TBinaryBinnedAUROC, *, num_tasks: int=1, threshold: Union[(int, List[float], torch.Tensor)]=DEFAULT_NUM_THRESHOLD, device: Optional[torch.device]=None) -> None:
super().__init__(device=device)
threshold = _create_threshold_tensor(threshold, self.device)
_binary_binned_auroc_param_check(num_tasks, threshold)
self.num_tasks = num_tasks
self.threshold = threshold
self._add_state('inputs', [])
self._add_state('targets', [])
_mode()
def update(self: TBinaryBinnedAUROC, input: torch.Tensor, target: torch.Tensor) -> TBinaryBinnedAUROC:
input = input.to(self.device)
target = target.to(self.device)
_binary_binned_auroc_update_input_check(input, target, self.num_tasks, self.threshold)
self.inputs.append(input)
self.targets.append(target)
return self
_mode()
def compute(self: TBinaryBinnedAUROC) -> Tuple[(torch.Tensor, torch.Tensor)]:
return _binary_binned_auroc_compute(torch.cat(self.inputs, (- 1)), torch.cat(self.targets, (- 1)), self.threshold)
_mode()
def merge_state(self: TBinaryBinnedAUROC, metrics: Iterable[TBinaryBinnedAUROC]) -> TBinaryBinnedAUROC:
for metric in metrics:
if metric.inputs:
metric_inputs = torch.cat(metric.inputs, (- 1)).to(self.device)
metric_targets = torch.cat(metric.targets, (- 1)).to(self.device)
self.inputs.append(metric_inputs)
self.targets.append(metric_targets)
return self
_mode()
def _prepare_for_merge_state(self: TBinaryBinnedAUROC) -> None:
if (self.inputs and self.targets):
self.inputs = [torch.cat(self.inputs, (- 1))]
self.targets = [torch.cat(self.targets, (- 1))] |
_flags(compute_test_value='raise')
def test_mvnormal_ShapeFeature():
M_pt = iscalar('M')
M_pt.tag.test_value = 2
d_rv = multivariate_normal(pt.ones((M_pt,)), pt.eye(M_pt), size=2)
fg = FunctionGraph([i for i in graph_inputs([d_rv]) if (not isinstance(i, Constant))], [d_rv], clone=False, features=[ShapeFeature()])
(s1, s2) = fg.shape_feature.shape_of[d_rv]
assert (get_test_value(s1) == 2)
assert (M_pt in graph_inputs([s2]))
mean = tensor(dtype=config.floatX, shape=(1, None))
mean.tag.test_value = np.array([[0, 1, 2]], dtype=config.floatX)
test_covar = np.diag(np.array([1, 10, 100], dtype=config.floatX))
test_covar = np.stack([test_covar, (test_covar * 10.0)])
cov = pt.as_tensor(test_covar).type()
cov.tag.test_value = test_covar
d_rv = multivariate_normal(mean, cov, size=[2, 3, 2])
fg = FunctionGraph(outputs=[d_rv], clone=False, features=[ShapeFeature()])
(s1, s2, s3, s4) = fg.shape_feature.shape_of[d_rv]
assert (s1.get_test_value() == 2)
assert (s2.get_test_value() == 3)
assert (s3.get_test_value() == 2)
assert (s4.get_test_value() == 3) |
def print_args(args):
args_verbosity = getattr(args, 'args_verbosity', 1)
if (args_verbosity == 2):
args_sorted = sorted(vars(args).items())
for (name, value) in args_sorted:
print(f'{name}={value}')
elif (args_verbosity == 1):
print(args)
elif (args_verbosity == 0):
return
else:
raise ValueError('Please specify an argument verbosity level between 0 and 2') |
class TestBotCommandScopeWithoutRequest():
def test_slot_behaviour(self, bot_command_scope):
for attr in bot_command_scope.__slots__:
assert (getattr(bot_command_scope, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(bot_command_scope)) == len(set(mro_slots(bot_command_scope)))), 'duplicate slot'
def test_de_json(self, bot, scope_class_and_type, chat_id):
cls = scope_class_and_type[0]
type_ = scope_class_and_type[1]
assert (cls.de_json({}, bot) is None)
json_dict = {'type': type_, 'chat_id': chat_id, 'user_id': 42}
bot_command_scope = BotCommandScope.de_json(json_dict, bot)
assert (set(bot_command_scope.api_kwargs.keys()) == ({'chat_id', 'user_id'} - set(cls.__slots__)))
assert isinstance(bot_command_scope, BotCommandScope)
assert isinstance(bot_command_scope, cls)
assert (bot_command_scope.type == type_)
if ('chat_id' in cls.__slots__):
assert (bot_command_scope.chat_id == chat_id)
if ('user_id' in cls.__slots__):
assert (bot_command_scope.user_id == 42)
def test_de_json_invalid_type(self, bot):
json_dict = {'type': 'invalid', 'chat_id': chat_id, 'user_id': 42}
bot_command_scope = BotCommandScope.de_json(json_dict, bot)
assert (type(bot_command_scope) is BotCommandScope)
assert (bot_command_scope.type == 'invalid')
def test_de_json_subclass(self, scope_class, bot, chat_id):
json_dict = {'type': 'invalid', 'chat_id': chat_id, 'user_id': 42}
assert (type(scope_class.de_json(json_dict, bot)) is scope_class)
def test_to_dict(self, bot_command_scope):
bot_command_scope_dict = bot_command_scope.to_dict()
assert isinstance(bot_command_scope_dict, dict)
assert (bot_command_scope['type'] == bot_command_scope.type)
if hasattr(bot_command_scope, 'chat_id'):
assert (bot_command_scope['chat_id'] == bot_command_scope.chat_id)
if hasattr(bot_command_scope, 'user_id'):
assert (bot_command_scope['user_id'] == bot_command_scope.user_id)
def test_equality(self, bot_command_scope, bot):
a = BotCommandScope('base_type')
b = BotCommandScope('base_type')
c = bot_command_scope
d = deepcopy(bot_command_scope)
e = Dice(4, 'emoji')
assert (a == b)
assert (hash(a) == hash(b))
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
assert (c == d)
assert (hash(c) == hash(d))
assert (c != e)
assert (hash(c) != hash(e))
if hasattr(c, 'chat_id'):
json_dict = c.to_dict()
json_dict['chat_id'] = 0
f = c.__class__.de_json(json_dict, bot)
assert (c != f)
assert (hash(c) != hash(f))
if hasattr(c, 'user_id'):
json_dict = c.to_dict()
json_dict['user_id'] = 0
g = c.__class__.de_json(json_dict, bot)
assert (c != g)
assert (hash(c) != hash(g)) |
.parametrize('dtype', ['u1', 'int64', 'float32', 'float64'])
def test_tofile(tmp_path, xp, dtype):
filepath = str((tmp_path / 'test_tofile'))
src = xp.arange(100, dtype=dtype)
tofile(src, filepath)
dst = xp.fromfile(filepath, dtype=dtype)
xp.testing.assert_array_equal(src, dst)
tofile(src[::2], filepath)
dst = xp.fromfile(filepath, dtype=dtype)
xp.testing.assert_array_equal(src[::2], dst) |
def reverse_by_epsilon(forward_process, predicted_noise, x, t):
fs = forward_process.forward_schedule
exs = fs.extract(t, x.shape)
betas_t = exs['betas']
sqrt_one_minus_alphas_cumprod_t = exs['sqrt_one_minus_alphas_cumprod']
sqrt_recip_alphas_t = exs['sqrt_recip_alphas']
posterior_variance_t = exs['posterior_variance']
model_mean = (sqrt_recip_alphas_t * (x - ((betas_t * predicted_noise) / sqrt_one_minus_alphas_cumprod_t)))
noise = (torch.randn_like(x) if (t[0].item() > 0) else 0)
res = (model_mean + (torch.sqrt(posterior_variance_t) * noise))
return res |
def anchor(parser, token):
bits = [b.strip('"\'') for b in token.split_contents()]
if (len(bits) < 2):
raise template.TemplateSyntaxError('anchor tag takes at least 1 argument')
try:
title = bits[2]
except IndexError:
title = bits[1].capitalize()
return SortAnchorNode(bits[1].strip(), title.strip()) |
.parametrize('method_name', ['waitExposed', 'waitActive'])
def test_wait_window_propagates_other_exception(method_name, qtbot):
method = getattr(qtbot, method_name)
widget = qt_api.QtWidgets.QWidget()
qtbot.add_widget(widget)
with pytest.raises(ValueError, match='some other error'):
with method(widget, timeout=100):
widget.show()
raise ValueError('some other error') |
def test_delitem() -> None:
d: Dict[(str, object)] = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert ('x' not in d)
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, monkeypatch.delitem, d, 'y')
assert (not d)
monkeypatch.setitem(d, 'y', 1700)
assert (d['y'] == 1700)
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert (d['x'] == 1500)
monkeypatch.undo()
assert (d == {'hello': 'world', 'x': 1}) |
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5, stride=1, padding=2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(400, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.T_revision = nn.Linear(10, 10, False)
def forward(self, x, revision=True):
correction = self.T_revision.weight
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = F.relu(self.fc1(out))
out_1 = F.relu(self.fc2(out))
out_2 = self.fc3(out_1)
if (revision == True):
return (out_1, out_2, correction)
else:
return (out_1, out_2) |
def ptb_raw_data(data_path=None):
train_path = os.path.join(data_path, 'ptb.train.txt')
valid_path = os.path.join(data_path, 'ptb.valid.txt')
test_path = os.path.join(data_path, 'ptb.test.txt')
(word_to_id, _) = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return (train_data, valid_data, test_data, vocabulary) |
class MockCapsNumLockIndicator():
CalledProcessError = None
info: List[List[str]] = []
is_error = False
index = 0
def reset(cls):
cls.info = [['Keyboard Control:', ' auto repeat: on key click percent: 0 LED mask: ', ' XKB indicators:', ' 00: Caps Lock: off 01: Num Lock: on 02: Scroll Lock: off', ' 03: Compose: off 04: Kana: off 05: Sleep: off'], ['Keyboard Control:', ' auto repeat: on key click percent: 0 LED mask: ', ' XKB indicators:', ' 00: Caps Lock: on 01: Num Lock: on 02: Scroll Lock: off', ' 03: Compose: off 04: Kana: off 05: Sleep: off']]
cls.index = 0
cls.is_error = False
def call_process(cls, cmd):
if cls.is_error:
raise subprocess.CalledProcessError((- 1), cmd=cmd, output="Couldn't call xset.")
if (cmd[1:] == ['q']):
track = cls.info[cls.index]
output = '\n'.join(track)
return output |
def test_setup_cfg_2line_description(tmpfolder):
(_, opts) = actions.get_default_options({}, {'project_path': tmpfolder})
opts['description'] = '2 line\ndescription'
text = templates.setup_cfg(opts)
setup_cfg = ConfigParser()
setup_cfg.read_string(text)
assert (setup_cfg['metadata']['description'].strip() == '2 line\ndescription')
Path(tmpfolder, 'setup.cfg').write_text(text)
opts = info.project({})
assert (opts['description'].strip() == '2 line\ndescription') |
def patch(cls):
if hasattr(cls, '__orig__init__'):
return
cls.__orig__init__ = cls.__init__
def patched_init(self, form_, *args, **kwargs):
form = kwargs.pop('form', None)
cls.__orig__init__(self, form_, *args, **kwargs)
if form:
self.attrs['form'] = form
cls.__init__ = patched_init |
class MetaCUB(CUB):
def __init__(self, args, partition='base', train_transform=None, test_transform=None, fix_seed=True):
super(MetaCUB, self).__init__(args, partition)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.classes = list(self.data.keys())
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
self.resize_transform_train = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.Resize([int((self.image_size * 1.15)), int((self.image_size * 1.15))]), transforms.RandomCrop(size=84)])
self.resize_transform_test = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.Resize([int((self.image_size * 1.15)), int((self.image_size * 1.15))]), transforms.CenterCrop(self.image_size)])
if (train_transform is None):
self.train_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), (lambda x: np.asarray(x).copy()), transforms.ToTensor(), self.normalize])
else:
self.train_transform = train_transform
if (test_transform is None):
self.test_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.ToTensor(), self.normalize])
else:
self.test_transform = test_transform
self.data = {}
for idx in range(len(self.imgs)):
if (self.labels[idx] not in self.data):
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].append(self.imgs[idx])
self.classes = list(self.data.keys())
def _load_imgs(self, img_paths, transform):
imgs = []
for image_path in img_paths:
img = Image.open(image_path).convert('RGB')
img = np.array(img).astype('uint8')
img = transform(img)
imgs.append(np.asarray(img).astype('uint8'))
return np.asarray(imgs).astype('uint8')
def __getitem__(self, item):
if self.fix_seed:
np.random.seed(item)
cls_sampled = np.random.choice(self.classes, self.n_ways, False)
support_xs = []
support_ys = []
query_xs = []
query_ys = []
for (idx, cls) in enumerate(cls_sampled):
imgs_paths = self.data[cls]
support_xs_ids_sampled = np.random.choice(range(len(imgs_paths)), self.n_shots, False)
support_paths = [imgs_paths[i] for i in support_xs_ids_sampled]
support_imgs = self._load_imgs(support_paths, transform=self.resize_transform_train)
support_xs.append(support_imgs)
support_ys.append(([idx] * self.n_shots))
query_xs_ids = np.setxor1d(np.arange(len(imgs_paths)), support_xs_ids_sampled)
query_xs_ids = np.random.choice(query_xs_ids, self.n_queries, False)
query_paths = [imgs_paths[i] for i in query_xs_ids]
query_imgs = self._load_imgs(query_paths, transform=self.resize_transform_test)
query_xs.append(query_imgs)
query_ys.append(([idx] * query_xs_ids.shape[0]))
(support_xs, support_ys, query_xs, query_ys) = (np.array(support_xs), np.array(support_ys), np.array(query_xs), np.array(query_ys))
(num_ways, n_queries_per_way, height, width, channel) = query_xs.shape
query_xs = query_xs.reshape(((num_ways * n_queries_per_way), height, width, channel))
query_ys = query_ys.reshape(((num_ways * n_queries_per_way),))
support_xs = support_xs.reshape(((- 1), height, width, channel))
if (self.n_aug_support_samples > 1):
support_xs = np.tile(support_xs, (self.n_aug_support_samples, 1, 1, 1))
support_ys = np.tile(support_ys.reshape(((- 1),)), self.n_aug_support_samples)
support_xs = np.split(support_xs, support_xs.shape[0], axis=0)
query_xs = query_xs.reshape(((- 1), height, width, channel))
query_xs = np.split(query_xs, query_xs.shape[0], axis=0)
support_xs = torch.stack(list(map((lambda x: self.train_transform(x.squeeze())), support_xs)))
query_xs = torch.stack(list(map((lambda x: self.test_transform(x.squeeze())), query_xs)))
return (support_xs, support_ys, query_xs, query_ys)
def __len__(self):
return self.n_test_runs |
class TestPrinting(TestCase):
def print_to_string(self, formula):
return formula.to_smtlib(daggify=False)
def test_real(self):
f = Plus([Real(1), Symbol('x', REAL), Symbol('y', REAL)])
self.assertEqual(f.to_smtlib(daggify=False), '(+ 1.0 x y)')
self.assertEqual(f.to_smtlib(daggify=True), '(let ((.def_0 (+ 1.0 x y))) .def_0)')
def test_boolean(self):
(x, y, z) = (Symbol('x'), Symbol('y'), Symbol('z'))
f = Or(And(Not(x), Iff(x, y)), Implies(x, z))
self.assertEqual(f.to_smtlib(daggify=False), '(or (and (not x) (= x y)) (=> x z))')
self.assertEqual(f.to_smtlib(daggify=True), '(let ((.def_0 (=> x z))) (let ((.def_1 (= x y))) (let ((.def_2 (not x))) (let ((.def_3 (and .def_2 .def_1))) (let ((.def_4 (or .def_3 .def_0))) .def_4)))))')
def test_int(self):
(p, q) = (Symbol('p', INT), Symbol('q', INT))
f = Or(Equals(Times(p, Int(5)), Minus(p, q)), LT(p, q), LE(Int(6), Int(1)))
self.assertEqual(f.to_smtlib(daggify=False), '(or (= (* p 5) (- p q)) (< p q) (<= 6 1))')
self.assertEqual(f.to_smtlib(daggify=True), '(let ((.def_0 (<= 6 1))) (let ((.def_1 (< p q))) (let ((.def_2 (- p q))) (let ((.def_3 (* p 5))) (let ((.def_4 (= .def_3 .def_2))) (let ((.def_5 (or .def_4 .def_1 .def_0))) .def_5))))))')
def test_ite(self):
x = Symbol('x')
(p, q) = (Symbol('p', INT), Symbol('q', INT))
f = Ite(x, p, q)
self.assertEqual(f.to_smtlib(daggify=False), '(ite x p q)')
self.assertEqual(f.to_smtlib(daggify=True), '(let ((.def_0 (ite x p q))) .def_0)')
def test_quantifiers(self):
x = Symbol('x')
fa = ForAll([x], And(x, Not(x)))
fe = Exists([x], And(x, Not(x)))
self.assertEqual(fa.to_smtlib(daggify=False), '(forall ((x Bool)) (and x (not x)))')
self.assertEqual(fe.to_smtlib(daggify=False), '(exists ((x Bool)) (and x (not x)))')
self.assertEqual(fa.to_smtlib(daggify=True), '(let ((.def_0 (forall ((x Bool)) (let ((.def_0 (not x))) (let ((.def_1 (and x .def_0))) .def_1))))).def_0)')
self.assertEqual(fe.to_smtlib(daggify=True), '(let ((.def_0 (exists ((x Bool)) (let ((.def_0 (not x))) (let ((.def_1 (and x .def_0))) .def_1))))).def_0)')
def test_constant(self):
b1 = Bool(True)
b2 = Bool(False)
r1 = Real(5.5)
r2 = Real(5)
r3 = Real((- 5.5))
i1 = Int(4)
i2 = Int((- 4))
self.assertEqual(b1.to_smtlib(daggify=True), 'true')
self.assertEqual(b2.to_smtlib(daggify=True), 'false')
self.assertEqual(r1.to_smtlib(daggify=True), '(/ 11 2)')
self.assertEqual(r2.to_smtlib(daggify=True), '5.0')
self.assertEqual(r3.to_smtlib(daggify=True), '(- (/ 11 2))')
self.assertEqual(i1.to_smtlib(daggify=True), '4')
self.assertEqual(i2.to_smtlib(daggify=True), '(- 4)')
self.assertEqual(b1.to_smtlib(daggify=False), 'true')
self.assertEqual(b2.to_smtlib(daggify=False), 'false')
self.assertEqual(r1.to_smtlib(daggify=False), '(/ 11 2)')
self.assertEqual(r2.to_smtlib(daggify=False), '5.0')
self.assertEqual(r3.to_smtlib(daggify=False), '(- (/ 11 2))')
self.assertEqual(i1.to_smtlib(daggify=False), '4')
self.assertEqual(i2.to_smtlib(daggify=False), '(- 4)')
def test_function(self):
f1_type = FunctionType(REAL, [REAL, REAL])
f2_type = FunctionType(REAL, [])
(p, q) = (Symbol('p', REAL), Symbol('q', REAL))
f1_symbol = Symbol('f1', f1_type)
f2_symbol = Symbol('f2', f2_type)
f1 = Function(f1_symbol, [p, q])
f2 = Function(f2_symbol, [])
self.assertEqual(f1.to_smtlib(daggify=False), '(f1 p q)')
self.assertEqual(f2.to_smtlib(daggify=False), 'f2')
self.assertEqual(f1.to_smtlib(daggify=True), '(let ((.def_0 (f1 p q))) .def_0)')
self.assertEqual(f2.to_smtlib(daggify=True), 'f2')
def test_toreal(self):
p = Symbol('p', INT)
rp = ToReal(p)
self.assertEqual(rp.to_smtlib(daggify=False), '(to_real p)')
self.assertEqual(rp.to_smtlib(daggify=True), '(let ((.def_0 (to_real p))) .def_0)')
def test_threshold_printing(self):
x = Symbol('x')
f = And(x, x)
for _ in range(10):
f = And(f, f)
short_f_str = str(f)
long_f_str = f.serialize()
self.assertTrue((len(short_f_str) < len(long_f_str)))
def test_daggify(self):
x = Symbol('x')
f = And(x, x)
for _ in range(10):
f = And(f, f)
tree_buf = StringIO()
dag_buf = StringIO()
tree_printer = SmtPrinter(tree_buf)
dag_printer = SmtDagPrinter(dag_buf)
dag_printer.printer(f)
tree_printer.printer(f)
short_f_str = dag_buf.getvalue()
long_f_str = tree_buf.getvalue()
self.assertTrue((len(short_f_str) < len(long_f_str)))
def test_examples(self):
for (s, f, logic) in get_str_example_formulae(environment=None):
str_f = f.serialize()
self.assertTrue((len(str_f) >= 1), str_f)
self.assertEqual(str_f, s)
def test_smart_serialize(self):
(x, y) = (Symbol('x'), Symbol('y'))
f1 = And(x, y)
f = Implies(x, f1)
substitutions = {f1: 'f1'}
res = smart_serialize(f, subs=substitutions)
self.assertEqual('(x -> f1)', res)
res = smart_serialize(f)
self.assertIsNotNone(res)
self.assertEqual(str(f), res)
fvars = [Symbol(('x%d' % i)) for i in range(5)]
ex = ExactlyOne(fvars)
substitutions = {ex: ('ExactlyOne(%s)' % ','.join((str(v) for v in fvars)))}
old_str = ex.serialize()
smart_str = smart_serialize(ex, subs=substitutions)
self.assertTrue((len(old_str) > len(smart_str)))
self.assertEqual('ExactlyOne(x0,x1,x2,x3,x4)', smart_str)
def test_stack_recursion(self):
import sys
limit = sys.getrecursionlimit()
f = FreshSymbol()
p = FreshSymbol()
for _ in range(limit):
f = Or(p, And(f, p))
self.assertTrue((f.size() >= limit))
s = f.serialize()
self.assertIsNotNone(s)
def test_annotations(self):
x = Symbol('x')
x_next = Symbol('x.next')
f = Iff(x, Not(x_next))
ann = Annotations()
ann.add(x, 'next', x_next.symbol_name())
ann.add(f, 'trans', 'true')
ann.add(x, 'init', 'true')
tree_buf = StringIO()
dag_buf = StringIO()
tree_printer = SmtPrinter(tree_buf, annotations=ann)
dag_printer = SmtDagPrinter(dag_buf, annotations=ann)
dag_printer.printer(f)
tree_printer.printer(f)
self.assertEqual(tree_buf.getvalue(), '(! (= (! x :next x.next :init true) (not x.next)) :trans true)')
self.assertEqual(dag_buf.getvalue(), '(let ((.def_0 (not x.next))) (let ((.def_1 (= (! x :next x.next :init true) .def_0))) (! .def_1 :trans true)))') |
def iterate_minibatches_generic(input_lst=None, batchsize=None, shuffle=False):
if (batchsize is None):
batchsize = len(input_lst[0])
assert all(((len(x) == len(input_lst[0])) for x in input_lst))
if shuffle:
indices = np.arange(len(input_lst[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(input_lst[0]), batchsize):
if shuffle:
excerpt = indices[start_idx:(start_idx + batchsize)]
else:
excerpt = slice(start_idx, (start_idx + batchsize))
(yield [input[excerpt] for input in input_lst]) |
def test_hook_auto_num_workers_arg(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
from xdist.plugin import pytest_cmdline_main as check_options
pytester.makeconftest("\n def pytest_xdist_auto_num_workers(config):\n if config.option.numprocesses == 'auto':\n return 42\n if config.option.numprocesses == 'logical':\n return 8\n ")
config = pytester.parseconfigure('-nauto')
check_options(config)
assert (config.getoption('numprocesses') == 42)
config = pytester.parseconfigure('-nlogical')
check_options(config)
assert (config.getoption('numprocesses') == 8) |
def parse_bool(arg):
if isinstance(arg, bool):
return arg
if (arg is None):
return False
if (arg.lower() in ['1', 'true', 't', 'yes', 'y']):
return True
if (arg.lower() in ['0', 'false', 'f', 'no', 'n', 'none', 'null']):
return False
raise ValueError(f'`{arg}` cannot be converted to boolean!') |
def compare_cfg(cfg_main, cfg_secondary, field_name, strict=False):
(main_val, secondary_val) = (cfg_main, cfg_secondary)
for f in field_name.split('.'):
main_val = main_val[f]
secondary_val = secondary_val[f]
if (main_val != secondary_val):
if strict:
raise ValueError(f"Main and pretrained configs must match on '{field_name}'")
else:
logging.warning(f"Pretrained models '{field_name}' differs, using: {main_val}") |
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']['open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.get_cell_temperature(irrads, temps, winds, model='sapm')
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, temp_model_params['a'], temp_model_params['b'], temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1) |
def masked_cross_entropy(a, b, mask):
b_c = torch.nn.functional.one_hot(b, num_classes=a.shape[(- 1)])
a_c = F.log_softmax(a, dim=2)
loss = ((- a_c) * b_c).sum(axis=2)
non_zero_elements = sum_flat(mask)
loss = sum_flat((loss * mask.float()))
loss = (loss / (non_zero_elements + 0.0001))
return loss |
def true_and_pred(out, label_ids, label_mask):
tplist = []
outputs = np.argmax(out, axis=2)
for i in range(len(label_ids)):
trues = []
preds = []
for (true, pred, mask) in zip(label_ids[i], outputs[i], label_mask[i]):
if mask:
trues.append(true)
preds.append(pred)
tplist.append((trues, preds))
return tplist |
def test_load_nist_vectors():
vector_data = textwrap.dedent('\n # CAVS 11.1\n # Config info for aes_values\n # AESVS GFSbox test data for CBC\n # State : Encrypt and Decrypt\n # Key Length : 128\n # Generated on Fri Apr 22 15:11:33 2011\n\n [ENCRYPT]\n\n COUNT = 0\n KEY = \n IV = \n PLAINTEXT = f34481ec3cc627bacd5dc3fb08f273e6\n CIPHERTEXT = 0336763e966d92595a567cc9ce537f5e\n\n COUNT = 1\n KEY = \n IV = \n PLAINTEXT = 9798c4640bad75c7c3227db910174e72\n CIPHERTEXT = a9a1631bf4996954ebc093957b234589\n\n [DECRYPT]\n\n COUNT = 0\n KEY = \n IV = \n CIPHERTEXT = 0336763e966d92595a567cc9ce537f5e\n PLAINTEXT = f34481ec3cc627bacd5dc3fb08f273e6\n\n COUNT = 1\n KEY = \n IV = \n CIPHERTEXT = a9a1631bf4996954ebc093957b234589\n PLAINTEXT = 9798c4640bad75c7c3227db910174e72\n ').splitlines()
assert (load_nist_vectors(vector_data) == [{'key': b'', 'iv': b'', 'plaintext': b'f34481ec3cc627bacd5dc3fb08f273e6', 'ciphertext': b'0336763e966d92595a567cc9ce537f5e'}, {'key': b'', 'iv': b'', 'plaintext': b'9798c4640bad75c7c3227db910174e72', 'ciphertext': b'a9a1631bf4996954ebc093957b234589'}, {'key': b'', 'iv': b'', 'plaintext': b'f34481ec3cc627bacd5dc3fb08f273e6', 'ciphertext': b'0336763e966d92595a567cc9ce537f5e'}, {'key': b'', 'iv': b'', 'plaintext': b'9798c4640bad75c7c3227db910174e72', 'ciphertext': b'a9a1631bf4996954ebc093957b234589'}]) |
class BNBeforeConvTranspose(torch.nn.Module):
def __init__(self, padding=0, stride=1, dilation=1, groups=1, output_padding=0):
super(BNBeforeConvTranspose, self).__init__()
self.conv1 = torch.nn.Conv2d(10, 10, 3, bias=False)
self.relu1 = torch.nn.ReLU()
self.bn1 = torch.nn.BatchNorm2d(10)
self.conv2 = torch.nn.ConvTranspose2d(10, 10, 3, padding=padding, stride=stride, dilation=dilation, groups=groups, output_padding=output_padding)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.bn1(x)
x = self.conv2(x)
return x |
def test_update_grant(graphql_client, user, conference_factory, grant_factory):
graphql_client.force_login(user)
conference = conference_factory(active_grants=True)
grant = grant_factory(conference=conference, gender='female', user_id=user.id)
response = _update_grant(graphql_client, grant, name='Marcotte', fullName='Marcotte B. A.', ageGroup='range_25_34', gender='male', occupation='student', grantType='diversity', pythonUsage='random', communityContribution='Soft toys meetups', beenToOtherEvents='no', interestedInVolunteering='yes', needsFundsForTravel=True, needVisa=True, needAccommodation=True, why='why not', notes='', travellingFrom='GB', website=' twitterHandle='', githubHandle='marcottebear', linkedinUrl='www.linkedin.com/in/marcotteba', mastodonHandle='')
grant.refresh_from_db()
assert (not response.get('errors'))
assert (response['data']['updateGrant']['__typename'] == 'Grant') |
def collect_art_info(root_path, split, ratio, print_every=1000):
annotation_path = osp.join(root_path, 'annotations/train_labels.json')
if (not osp.exists(annotation_path)):
raise Exception(f'{annotation_path} not exists, please check and try again.')
annotation = mmcv.load(annotation_path)
img_prefixes = annotation.keys()
(trn_files, val_files) = ([], [])
if (ratio > 0):
for (i, file) in enumerate(img_prefixes):
if (i % math.floor((1 / ratio))):
trn_files.append(file)
else:
val_files.append(file)
else:
(trn_files, val_files) = (img_prefixes, [])
print(f'training #{len(trn_files)}, val #{len(val_files)}')
if (split == 'train'):
img_prefixes = trn_files
elif (split == 'val'):
img_prefixes = val_files
else:
raise NotImplementedError
img_infos = []
for (i, prefix) in enumerate(img_prefixes):
if ((i > 0) and ((i % print_every) == 0)):
print(f'{i}/{len(img_prefixes)}')
img_file = osp.join(root_path, 'imgs', (prefix + '.jpg'))
if (not osp.exists(img_file)):
continue
img = mmcv.imread(img_file)
img_info = dict(file_name=osp.join(osp.basename(img_file)), height=img.shape[0], width=img.shape[1], segm_file=osp.join(osp.basename(annotation_path)))
anno_info = []
for ann in annotation[prefix]:
segmentation = []
for (x, y) in ann['points']:
segmentation.append(max(0, x))
segmentation.append(max(0, y))
(xs, ys) = (segmentation[::2], segmentation[1::2])
(x, y) = (min(xs), min(ys))
(w, h) = ((max(xs) - x), (max(ys) - y))
bbox = [x, y, w, h]
if ((ann['transcription'] == '###') or ann['illegibility']):
iscrowd = 1
else:
iscrowd = 0
anno = dict(iscrowd=iscrowd, category_id=1, bbox=bbox, area=(w * h), segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
img_infos.append(img_info)
return img_infos |
class Describe_BaseHeaderFooter():
.parametrize(('has_definition', 'expected_value'), [(False, True), (True, False)])
def it_knows_when_its_linked_to_the_previous_header_or_footer(self, has_definition: bool, expected_value: bool, _has_definition_prop_: Mock):
_has_definition_prop_.return_value = has_definition
header = _BaseHeaderFooter(None, None, None)
is_linked = header.is_linked_to_previous
assert (is_linked is expected_value)
.parametrize(('has_definition', 'value', 'drop_calls', 'add_calls'), [(False, True, 0, 0), (True, False, 0, 0), (True, True, 1, 0), (False, False, 0, 1)])
def it_can_change_whether_it_is_linked_to_previous_header_or_footer(self, has_definition: bool, value: bool, drop_calls: int, add_calls: int, _has_definition_prop_: Mock, _drop_definition_: Mock, _add_definition_: Mock):
_has_definition_prop_.return_value = has_definition
header = _BaseHeaderFooter(None, None, None)
header.is_linked_to_previous = value
assert (_drop_definition_.call_args_list == ([call(header)] * drop_calls))
assert (_add_definition_.call_args_list == ([call(header)] * add_calls))
def it_provides_access_to_the_header_or_footer_part_for_BlockItemContainer(self, _get_or_add_definition_: Mock, header_part_: Mock):
_get_or_add_definition_.return_value = header_part_
header = _BaseHeaderFooter(None, None, None)
header_part = header.part
_get_or_add_definition_.assert_called_once_with(header)
assert (header_part is header_part_)
def it_provides_access_to_the_hdr_or_ftr_element_to_help(self, _get_or_add_definition_: Mock, header_part_: Mock):
hdr = element('w:hdr')
_get_or_add_definition_.return_value = header_part_
header_part_.element = hdr
header = _BaseHeaderFooter(None, None, None)
hdr_elm = header._element
_get_or_add_definition_.assert_called_once_with(header)
assert (hdr_elm is hdr)
def it_gets_the_definition_when_it_has_one(self, _has_definition_prop_: Mock, _definition_prop_: Mock, header_part_: Mock):
_has_definition_prop_.return_value = True
_definition_prop_.return_value = header_part_
header = _BaseHeaderFooter(None, None, None)
header_part = header._get_or_add_definition()
assert (header_part is header_part_)
def but_it_gets_the_prior_definition_when_it_is_linked(self, _has_definition_prop_: Mock, _prior_headerfooter_prop_: Mock, prior_headerfooter_: Mock, header_part_: Mock):
_has_definition_prop_.return_value = False
_prior_headerfooter_prop_.return_value = prior_headerfooter_
prior_headerfooter_._get_or_add_definition.return_value = header_part_
header = _BaseHeaderFooter(None, None, None)
header_part = header._get_or_add_definition()
prior_headerfooter_._get_or_add_definition.assert_called_once_with()
assert (header_part is header_part_)
def and_it_adds_a_definition_when_it_is_linked_and_the_first_section(self, _has_definition_prop_: Mock, _prior_headerfooter_prop_: Mock, _add_definition_: Mock, header_part_: Mock):
_has_definition_prop_.return_value = False
_prior_headerfooter_prop_.return_value = None
_add_definition_.return_value = header_part_
header = _BaseHeaderFooter(None, None, None)
header_part = header._get_or_add_definition()
_add_definition_.assert_called_once_with(header)
assert (header_part is header_part_)
def _add_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, '_add_definition')
def _definition_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, '_definition')
def _drop_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, '_drop_definition')
def _get_or_add_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, '_get_or_add_definition')
def _has_definition_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, '_has_definition')
def header_part_(self, request: FixtureRequest):
return instance_mock(request, HeaderPart)
def prior_headerfooter_(self, request: FixtureRequest):
return instance_mock(request, _BaseHeaderFooter)
def _prior_headerfooter_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, '_prior_headerfooter') |
def test_dump_version_doesnt_bail_on_value_error(tmp_path: Path) -> None:
write_to = 'VERSION'
version = str(VERSIONS['exact'].tag)
scm_version = meta(VERSIONS['exact'].tag, config=c)
with pytest.raises(ValueError, match='^bad file format:'):
dump_version(tmp_path, version, write_to, scm_version=scm_version) |
class Pizza(ABC):
name: str
dough: str
sauce: str
toppings: List[str]
def getName(self) -> str:
return self.name
def prepare(self) -> None:
print(f'Preparing {self.name}')
def bake(self) -> None:
print(f'Baking {self.name}')
def cut(self) -> None:
print(f'Cutting {self.name}')
def box(self) -> None:
print(f'Boxing {self.name}')
def toString(self) -> str:
display: StringBuffer = StringBuffer()
display.append(f'''---- {self.name} ----
''')
display.append(f'''{self.dough}
''')
display.append(f'''{self.sauce}
''')
for topping in self.toppings:
display.append(f'''{topping}
''')
return display.toString() |
class ConfigWithFiles(Fixture):
def new_config_dir(self):
return temp_dir()
def new_config_bootstrap_file(self):
contents = ("\nreahlsystem.root_egg = '%s'\nreahlsystem.connection_uri = None\nreahlsystem.debug = False\n" % self.root_egg_name)
return self.new_config_file(filename='reahl.config.py', contents=contents)
def new_root_egg_name(self):
return easter_egg.as_requirement_string()
def new_config_file_name(self):
return 'some_file.py'
def new_config_file(self, filename=None, contents=''):
return self.config_dir.file_with((filename or self.config_file_name), contents)
_up
def set_up_easter_egg(self):
self.config_bootstrap_file
easter_egg.clear()
easter_egg.stubbed_metadata['reahl-component.toml'] = 'metadata_version = "1.0.0"'
ReahlEgg.clear_cache()
def set_config_spec(self, egg, code_locator_string):
egg.stubbed_metadata['reahl-component.toml'] = ('metadata_version = "1.0.0"\nconfiguration = "%s"' % code_locator_string) |
class FakeCounter():
def __init__(self, batch: FakeBatch, name: str, tags: Dict[(str, Any)]):
self.batch = batch
self.name = name
self.tags = tags
def increment(self, delta: float=1.0, sample_rate: float=1.0) -> None:
self.send(delta, sample_rate)
def decrement(self, delta: float=1.0, sample_rate: float=1.0) -> None:
self.increment((- delta), sample_rate)
def send(self, delta: float, sample_rate: float) -> None:
self.batch.counters.append({'name': self.name, 'delta': delta, 'sample_rate': sample_rate, 'tags': self.tags}) |
class TerminusCopyCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if (not view.settings().get('terminus_view')):
return
text = ''
for s in view.sel():
if text:
text += '\n'
text += view.substr(s)
text = text.replace((CONTINUATION + '\n'), '')
text = text.replace(CONTINUATION, '')
sublime.set_clipboard(text) |
def convert_to_distributed_tensor(tensor: torch.Tensor) -> Tuple[(torch.Tensor, str)]:
orig_device = ('cpu' if (not tensor.is_cuda) else 'gpu')
if (torch.distributed.is_available() and (torch.distributed.get_backend() == torch.distributed.Backend.NCCL) and (not tensor.is_cuda)):
tensor = tensor.cuda()
return (tensor, orig_device) |
def gpg_command(*cmd, with_user_id=False, with_trustdb=False, quiet=True, minimum_version='2.1.15', log=None):
global gpg_exe, gpg_exe
if (not gpg_mode):
raise Exception('Attempt to use GPG before setting mode')
if (not gpg_exe):
try:
output = subprocess.check_output(('gpg2', '--version'), stderr=subprocess.STDOUT).decode('utf8')
except Exception:
output = subprocess.check_output(('gpg', '--version'), stderr=subprocess.STDOUT).decode('utf8')
gpg_exe = 'gpg'
else:
gpg_exe = 'gpg2'
match = re.search('^gpg \\(GnuPG\\) (\\d+(?:\\.\\d+(?:\\.\\d+)?)?)', output, re.MULTILINE)
if (not match):
raise Exception('Could not determine GnuPG version in output:\n{}'.format(output))
if (LooseVersion(match.group(1)) < LooseVersion(minimum_version)):
raise Exception('GnuPG version {} or newer is required. You have version {}.'.format(minimum_version, match.group(1)))
if with_trustdb:
trustdb_args = ()
else:
trustdb_args = ('--trust-model', 'always')
if with_user_id:
user_id_args = ('-u', gpg_user_ids[gpg_mode])
else:
user_id_args = ()
if quiet:
quiet_args = ('--quiet',)
else:
quiet_args = ()
cmd = tuple(chain((gpg_exe, '--batch', '--yes'), quiet_args, trustdb_args, user_id_args, cmd))
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf8')
except subprocess.CalledProcessError as e:
if log:
log.exception('Gpg command {} failed. Output:\n{}'.format(cmd, e.output.decode('utf8')))
raise |
def solve_metric_tsptw(distmat, timew, threads=0):
n = len(distmat)
M = timew[(0, 1)]
def subtour(edges):
unvisited = list(range(n))
cycle = range((n + 1))
while unvisited:
thiscycle = []
neighbors = unvisited
while neighbors:
current = neighbors[0]
thiscycle.append(current)
unvisited.remove(current)
neighbors = [j for (i, j) in edges.select(current, '*') if (j in unvisited)]
if (len(cycle) > len(thiscycle)):
cycle = thiscycle
return cycle
dist = {(i, j): distmat[(i, j)] for i in range(n) for j in range(n) if (i != j)}
m = Model()
m.Params.outputFlag = False
vars = m.addVars(dist.keys(), obj=dist, vtype=GRB.BINARY, name='e')
lb = {i: timew[(i, 0)] for i in range(n)}
ub = {i: timew[(i, 1)] for i in range(n)}
t_vars = m.addVars(range(n), vtype=GRB.INTEGER, name='t', lb=lb, ub=ub)
m.addConstrs(((vars.sum(i, '*') == 1) for i in range(n)))
m.addConstrs(((vars.sum('*', j) == 1) for j in range(n)))
m.addConstrs((((t_vars[j] - (t_vars[i] if (i != 0) else 0)) >= (((d + M) * vars[(i, j)]) - M)) for ((i, j), d) in dist.items()))
m._vars = vars
m.Params.threads = threads
m.optimize()
vals = m.getAttr('x', vars)
selected = tuplelist(((i, j) for (i, j) in vals.keys() if (vals[(i, j)] > 0.5)))
tour = subtour(selected)
assert (len(tour) == n)
return (m.objVal, tour) |
def pool_feature(data, num_proposals=100, num_sample_bins=3, pool_type='mean'):
if (len(data) == 1):
return np.concatenate(([data] * num_proposals))
x_range = list(range(len(data)))
f = scipy.interpolate.interp1d(x_range, data, axis=0)
eps = 0.0001
(start, end) = (eps, ((len(data) - 1) - eps))
anchor_size = ((end - start) / num_proposals)
ptr = start
feature = []
for i in range(num_proposals):
x_new = [(ptr + ((i / num_sample_bins) * anchor_size)) for i in range(num_sample_bins)]
y_new = f(x_new)
if (pool_type == 'mean'):
y_new = np.mean(y_new, axis=0)
elif (pool_type == 'max'):
y_new = np.max(y_new, axis=0)
else:
raise NotImplementedError('Unsupported pool type')
feature.append(y_new)
ptr += anchor_size
feature = np.stack(feature)
return feature |
def test_paint_when_debug_shapes(view):
with patch('beeref.selection.commandline_args') as args_mock:
with patch('beeref.items.BeePixmapItem.draw_debug_shape') as m:
args_mock.debug_shapes = True
args_mock.debug_boundingrects = False
args_mock.debug_handles = False
item = BeePixmapItem(QtGui.QImage())
painter = MagicMock(combinedTransform=MagicMock(return_value=MagicMock(m11=MagicMock(return_value=0.5))))
item.paint(painter, None, None)
m.assert_called_once() |
class SecurityContextTestCase(_GSSAPIKerberosTestCase):
def setUp(self):
super(SecurityContextTestCase, self).setUp()
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = False
self.client_name = gssnames.Name(self.USER_PRINC)
self.client_creds = gsscreds.Credentials(name=None, usage='initiate')
if (sys.platform == 'darwin'):
spn = ((TARGET_SERVICE_NAME + b'') + FQDN)
self.target_name = gssnames.Name(spn, gb.NameType.hostbased_service)
else:
self.target_name = gssnames.Name(TARGET_SERVICE_NAME, gb.NameType.hostbased_service)
self.server_name = gssnames.Name(SERVICE_PRINCIPAL)
self.server_creds = gsscreds.Credentials(name=self.server_name, usage='accept')
def _create_client_ctx(self, **kwargs):
return gssctx.SecurityContext(name=self.target_name, **kwargs)
def test_create_from_other(self):
(raw_client_ctx, raw_server_ctx) = self._create_completed_contexts()
high_level_ctx = gssctx.SecurityContext(raw_client_ctx)
expected = self.target_name
if (self.realm.provider.lower() == 'heimdal'):
expected = gssnames.Name(self.realm.host_princ.encode('utf-8'), name_type=gb.NameType.kerberos_principal)
self.assertEqual(high_level_ctx.target_name, expected)
_perms(lifetime=30, flags=[], mech=gb.MechType.kerberos, channel_bindings=None)
def test_create_new_init(self, str_name, kwargs):
client_ctx = gssctx.SecurityContext(name=self.target_name, creds=self.client_creds, **kwargs)
self.assertEqual(client_ctx.usage, 'initiate')
client_ctx = self._create_client_ctx(**kwargs)
self.assertEqual(client_ctx.usage, 'initiate')
def test_create_new_accept(self):
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
self.assertEqual(server_ctx.usage, 'accept')
def test_init_throws_error_on_invalid_args(self):
self.assertRaises(TypeError, gssctx.SecurityContext, usage='accept', name=self.target_name)
def _create_completed_contexts(self):
client_ctx = self._create_client_ctx(lifetime=400)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
server_token = server_ctx.step(client_token)
self.assertIsInstance(server_token, bytes)
client_ctx.step(server_token)
return (client_ctx, server_ctx)
def test_complete_on_partially_completed(self):
client_ctx = self._create_client_ctx()
client_tok = client_ctx.step()
self.assertFalse(client_ctx.complete)
server_ctx = gssctx.SecurityContext(creds=self.server_creds)
server_tok = server_ctx.step(client_tok)
client_ctx.step(server_tok)
self.assertTrue(client_ctx.complete)
self.assertTrue(server_ctx.complete)
def test_initiate_accept_steps(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
self.assertLessEqual(server_ctx.lifetime, (400 + 300))
self.assertEqual(server_ctx.initiator_name, client_ctx.initiator_name)
self.assertIsInstance(server_ctx.mech, gb.OID)
self.assertIsInstance(server_ctx.actual_flags, gb.IntEnumFlagSet)
self.assertFalse(server_ctx.locally_initiated)
self.assertTrue(server_ctx.complete)
self.assertLessEqual(client_ctx.lifetime, 400)
expected = self.target_name
if (self.realm.provider.lower() == 'heimdal'):
expected = gssnames.Name(self.realm.host_princ.encode('utf-8'), name_type=gb.NameType.kerberos_principal)
self.assertEqual(client_ctx.target_name, expected)
self.assertIsInstance(client_ctx.mech, gb.OID)
self.assertIsInstance(client_ctx.actual_flags, gb.IntEnumFlagSet)
self.assertTrue(client_ctx.locally_initiated)
self.assertTrue(client_ctx.complete)
def test_channel_bindings(self):
bdgs = gb.ChannelBindings(application_data=b'abcxyz', initiator_address_type=gb.AddressType.ip, initiator_address=b'127.0.0.1', acceptor_address_type=gb.AddressType.ip, acceptor_address=b'127.0.0.1')
client_ctx = self._create_client_ctx(lifetime=400, channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
server_ctx = gssctx.SecurityContext(creds=self.server_creds, channel_bindings=bdgs)
server_token = server_ctx.step(client_token)
self.assertIsInstance(server_token, bytes)
client_ctx.step(server_token)
def test_bad_channel_bindings_raises_error(self):
if (sys.platform == 'darwin'):
self.skipTest("macOS Heimdal doesn't fail as expected")
bdgs = gb.ChannelBindings(application_data=b'abcxyz', initiator_address_type=gb.AddressType.ip, initiator_address=b'127.0.0.1', acceptor_address_type=gb.AddressType.ip, acceptor_address=b'127.0.0.1')
client_ctx = self._create_client_ctx(lifetime=400, channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.acceptor_address = b'127.0.1.0'
server_ctx = gssctx.SecurityContext(creds=self.server_creds, channel_bindings=bdgs)
self.assertRaises(gb.BadChannelBindingsError, server_ctx.step, client_token)
def test_export_create_from_token(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
token = client_ctx.export()
self.assertIsInstance(token, bytes)
imported_ctx = gssctx.SecurityContext(token=token)
self.assertEqual(imported_ctx.usage, 'initiate')
expected = self.target_name
if (self.realm.provider.lower() == 'heimdal'):
expected = gssnames.Name(self.realm.host_princ.encode('utf-8'), name_type=gb.NameType.kerberos_principal)
self.assertEqual(imported_ctx.target_name, expected)
def test_pickle_unpickle(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
pickled_ctx = pickle.dumps(client_ctx)
unpickled_ctx = pickle.loads(pickled_ctx)
self.assertIsInstance(unpickled_ctx, gssctx.SecurityContext)
self.assertEqual(unpickled_ctx.usage, 'initiate')
expected = self.target_name
if (self.realm.provider.lower() == 'heimdal'):
expected = gssnames.Name(self.realm.host_princ.encode('utf-8'), name_type=gb.NameType.kerberos_principal)
self.assertEqual(unpickled_ctx.target_name, expected)
def test_encrypt_decrypt(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
encrypted_msg = client_ctx.encrypt(b'test message')
self.assertIsInstance(encrypted_msg, bytes)
decrypted_msg = server_ctx.decrypt(encrypted_msg)
self.assertIsInstance(decrypted_msg, bytes)
self.assertEqual(decrypted_msg, b'test message')
def test_encrypt_decrypt_throws_error_on_no_encryption(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
wrap_res = client_ctx.wrap(b'test message', False)
self.assertIsInstance(wrap_res, gb.WrapResult)
self.assertFalse(wrap_res.encrypted)
self.assertIsInstance(wrap_res.message, bytes)
self.assertRaises(excs.EncryptionNotUsed, server_ctx.decrypt, wrap_res.message)
def test_wrap_unwrap(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
wrap_res = client_ctx.wrap(b'test message', True)
self.assertIsInstance(wrap_res, gb.WrapResult)
self.assertTrue(wrap_res.encrypted)
self.assertIsInstance(wrap_res.message, bytes)
unwrap_res = server_ctx.unwrap(wrap_res.message)
self.assertIsInstance(unwrap_res, gb.UnwrapResult)
self.assertIsInstance(unwrap_res.message, bytes)
self.assertEqual(unwrap_res.message, b'test message')
self.assertTrue(unwrap_res.encrypted)
def test_get_wrap_size_limit(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
with_conf = client_ctx.get_wrap_size_limit(100)
without_conf = client_ctx.get_wrap_size_limit(100, encrypted=True)
self.assertIsInstance(with_conf, int)
self.assertIsInstance(without_conf, int)
self.assertLessEqual(with_conf, 100)
self.assertLessEqual(without_conf, 100)
def test_get_signature(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
mic_token = client_ctx.get_signature(b'some message')
self.assertIsInstance(mic_token, bytes)
self.assertGreater(len(mic_token), 0)
def test_verify_signature_raise(self):
(client_ctx, server_ctx) = self._create_completed_contexts()
mic_token = client_ctx.get_signature(b'some message')
server_ctx.verify_signature(b'some message', mic_token)
self.assertRaises(gb.GSSError, server_ctx.verify_signature, b'other message', mic_token)
_minversion_test('1.11', 'returning tokens', provider='mit')
_provider_test(['mit'], 'returning tokens')
def test_defer_step_error_on_method(self):
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = True
bdgs = gb.ChannelBindings(application_data=b'abcxyz')
client_ctx = self._create_client_ctx(lifetime=400, channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.application_data = b'defuvw'
server_ctx = gssctx.SecurityContext(creds=self.server_creds, channel_bindings=bdgs)
self.assertIsInstance(server_ctx.step(client_token), bytes)
self.assertRaises(gb.BadChannelBindingsError, server_ctx.encrypt, b'test')
_minversion_test('1.11', 'returning tokens', provider='mit')
_provider_test(['mit'], 'returning tokens')
def test_defer_step_error_on_complete_property_access(self):
gssctx.SecurityContext.__DEFER_STEP_ERRORS__ = True
bdgs = gb.ChannelBindings(application_data=b'abcxyz')
client_ctx = self._create_client_ctx(lifetime=400, channel_bindings=bdgs)
client_token = client_ctx.step()
self.assertIsInstance(client_token, bytes)
bdgs.application_data = b'defuvw'
server_ctx = gssctx.SecurityContext(creds=self.server_creds, channel_bindings=bdgs)
self.assertIsInstance(server_ctx.step(client_token), bytes)
self.assertRaises(gb.BadChannelBindingsError, (lambda : server_ctx.complete)) |
def test_concise_attrib_metadata() -> None:
class A():
x: datetime.datetime = desert.ib(marshmallow.fields.NaiveDateTime(), metadata={'foo': 1})
timestring = '2019-10-21T10:25:00'
dt = datetime.datetime(year=2019, month=10, day=21, hour=10, minute=25, second=0)
schema = desert.schema(A)
assert (schema.load({'x': timestring}) == A(x=dt))
assert (attr.fields(A).x.metadata['foo'] == 1) |
class ElementInfo(object):
def __repr__(self):
return '<{0}, {1}>'.format(self.__str__(), self.handle)
def __str__(self):
module = self.__class__.__module__
module = module[(module.rfind('.') + 1):]
type_name = ((module + '.') + self.__class__.__name__)
return "{0} - '{1}', {2}".format(type_name, self.name, self.class_name)
def set_cache_strategy(self, cached):
raise NotImplementedError()
def handle(self):
raise NotImplementedError()
def name(self):
raise NotImplementedError()
def rich_text(self):
raise NotImplementedError()
def control_id(self):
raise NotImplementedError()
def process_id(self):
raise NotImplementedError()
def framework_id(self):
raise NotImplementedError()
def class_name(self):
raise NotImplementedError()
def enabled(self):
raise NotImplementedError()
def visible(self):
raise NotImplementedError()
def parent(self):
raise NotImplementedError()
def top_level_parent(self):
parent = self.parent
if (parent and (parent != self.__class__())):
return parent.top_level_parent
else:
return self
def children(self, **kwargs):
raise NotImplementedError()
def iter_children(self, **kwargs):
raise NotImplementedError()
def has_depth(self, root, depth):
if (self.control_id != root.control_id):
if (depth > 0):
parent = self.parent
return parent.has_depth(root, (depth - 1))
else:
return False
else:
return True
def filter_with_depth(elements, root, depth):
if (depth is not None):
if (isinstance(depth, integer_types) and (depth > 0)):
return [element for element in elements if element.has_depth(root, depth)]
else:
raise Exception('Depth must be natural number')
else:
return elements
def get_descendants_with_depth(self, depth=None, **kwargs):
descendants = []
def walk_the_tree(root, depth, **kwargs):
if (depth == 0):
return
for child in root.children(**kwargs):
descendants.append(child)
next_depth = (None if (depth is None) else (depth - 1))
walk_the_tree(child, next_depth, **kwargs)
walk_the_tree(self, depth, **kwargs)
return descendants
def descendants(self, **kwargs):
raise NotImplementedError()
def iter_descendants(self, **kwargs):
depth = kwargs.pop('depth', None)
if (depth == 0):
return
for child in self.iter_children(**kwargs):
(yield child)
if (depth is not None):
kwargs['depth'] = (depth - 1)
for c in child.iter_descendants(**kwargs):
(yield c)
def rectangle(self):
raise NotImplementedError()
def dump_window(self):
raise NotImplementedError() |
_REGISTRY.register()
def resnet18_ms_l12(pretrained=True, **kwargs):
from dassl.modeling.ops import MixStyle
model = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], ms_class=MixStyle, ms_layers=['layer1', 'layer2'])
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.