code stringlengths 281 23.7M |
|---|
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if (torch.min(y) < (- 1.0)):
print('min value is ', torch.min(y))
if (torch.max(y) > 1.0):
print('max value is ', torch.max(y))
global mel_basis, hann_window
dtype_device = ((str(y.dtype) + '_') + str(y.device))
fmax_dtype_device = ((str(fmax) + '_') + dtype_device)
wnsize_dtype_device = ((str(win_size) + '_') + dtype_device)
if (fmax_dtype_device not in mel_basis):
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
if (wnsize_dtype_device not in hann_window):
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int(((n_fft - hop_size) / 2)), int(((n_fft - hop_size) / 2))), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode='reflect', normalized=False, onesided=True)
spec = torch.sqrt((spec.pow(2).sum((- 1)) + 1e-06))
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
spec = spectral_normalize_torch(spec)
return spec |
def search_func(model, column, key, iter_, handledirs):
check = model.get_value(iter_, 0)
if (check is None):
return True
elif ((not handledirs) or (os.sep not in key)):
check = (os.path.basename(check) or os.sep)
return ((key not in check.lower()) and (key not in check)) |
def main():
keylist = [('system\\currentcontrolset\\control\\timezoneinformation', 'OPS_EXTRA_TIMEZONE_KEY', ONE_DAY, True), ('SYSTEM\\CurrentControlSet\\Enum\\USB', 'OPS_USB_USB_KEY', ONE_DAY, True), ('SYSTEM\\CurrentControlSet\\Enum\\USBSTOR', 'OPS_USB_USBSTOR_KEY', ONE_DAY, True), ('Software\\Policies\\Microsoft\\Windows\\WindowsUpdate', 'OPS_EXTRA_WUPOLICY_KEY', ONE_DAY, True), ('Software\\Microsoft\\Windows\\CurrentVersion\\WindowsUpdate', 'OPS_EXTRA_WU_KEY', ONE_DAY, True), ('SYSTEM\\CurrentControlSet\\Services\\Dhcp\\Parameters', 'OPS_EXTRA_DHCP_KEY', ONE_DAY, True), ('SYSTEM\\CurrentControlSet\\Services\\wuauserv', 'OPS_EXTRA_WUSERV_KEY', ONE_DAY, True), ('Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\\Connection\\WinHttpSettings', 'OPS_EXTRA_WINHTTP_KEY', ONE_DAY, True), ('HARDWARE\\DESCRIPTION\\System\\CentralProcessor', 'OPS_EXTRA_CPU_KEY', ONE_DAY, True), ('SYSTEM\\CurrentControlSet\\Control\\LSA\\Data', 'OPS_SLRO_LSADATA', ONE_YEAR, False), ('SYSTEM\\CurrentControlSet\\Control\\LSA\\GBG', 'OPS_SLRO_LSAGBG', ONE_YEAR, False), ('SYSTEM\\CurrentControlSet\\Control\\LSA\\JD', 'OPS_SLRO_LSAJD', ONE_YEAR, False), ('SYSTEM\\CurrentControlSet\\Control\\LSA\\Skew1', 'OPS_SLRO_LSASKEWL', ONE_YEAR, False), ('SECURITY\\Policy\\Secrets\\DPAPI_SYSTEM\\CurrVal', 'OPS_SLRO_DPAPICURRVAL', ONE_YEAR, False), ('SECURITY\\Policy\\PolSecretEncryptionKey', 'OPS_SLRO_POLSECRET', ONE_YEAR, False), ('SECURITY\\Policy\\L$HYDRAENCKEY_28ada6da-d622-11d1-9cb9-00c04fb16e75\\CurrVal', 'OPS_SLRO_HYDRAKEY', ONE_YEAR, False), ('SYSTEM\\CurrentControlSet\\Control\\Terminal Server\\RCM\\Secrets\\L$HYDRAENCKEY_28ada6da-d622-11d1-9cb9-00c04fb16e75\\CurrVal', 'OPS_SLRO_RCMSECRETS', ONE_YEAR, False), ('SECURITY\\Policy\\PolEKList', 'OPS_SLRO_POLEKLIST', ONE_YEAR, False), ('system\\RAdmin\\v2.0\\Server\\Parameters', 'OPS_SLRO_RADMIN2', ONE_MONTH, False), ('SOFTWARE\\RAdmin\\v3.0\\Server\\Parameters\\Radmin Security', 'OPS_SLRO_RADMIN3', ONE_MONTH, False), ('SECURITY\\Policy\\Secrets', 'OPS_SLRO_POLICYSECRETS', ONE_MONTH, True), ('software\\pgp corporation\\pgp', 'OPS_SLRO_PGP', ONE_MONTH, True), ('software\\network associates\\pgp60', 'OPS_SLRO_NAPGP60', ONE_MONTH, True), ('software\\network associates\\pgp55', 'OPS_SLRO_NAPGP55', ONE_MONTH, True), ('software\\network associates\\pgp', 'OPS_SLRO_NAPGP', ONE_MONTH, True), ('Software\\TeamViewer', 'OPS_SLRO_TEAMVIEWER', ONE_DAY, True)]
winroot = ops.env.get('OPS_WINDOWSDIR')
sysroot = ops.env.get('OPS_SYSTEMDIR')
programdata = ops.system.environment.get_environment_var('ALLUSERSPROFILE', maxage=datetime.timedelta(seconds=14400)).value
progfiles = ops.system.environment.get_environment_var('PROGRAMFILES', maxage=datetime.timedelta(seconds=14400)).value
dirwalklist = [(os.path.join(sysroot, 'Microsoft\\Protect\\S-1-5-18'), 'OPS_SLRO_PROTECT18', ONE_DAY, True), (os.path.join(sysroot, 'Microsoft\\Protect\\S-1-5-20'), 'OPS_SLRO_PROTECT20', ONE_DAY, True), (os.path.join(programdata, 'Microsoft\\Crypto\\RSA'), 'OPS_SLRO_RSAKEYS', ONE_DAY, True), (os.path.join(progfiles, 'InfoTeCS\\ViPNet Client'), 'OPS_SLRO_VIPNET', ONE_DAY, False), (os.path.join(winroot, 'Fonts'), 'OPS_WINDOWS_FONTS', ONE_DAY, False)]
results = []
for pair in keylist:
try:
results.append(ops.system.registry.get_registrykey('L', pair[0], cache_tag=pair[1], cache_size=1, maxage=datetime.timedelta(pair[2]), dszquiet=False, dszlog=True, recursive=pair[3]))
except:
pass
for pair in dirwalklist:
try:
results.append(ops.files.dirs.get_dirlisting(pair[0], cache_tag=pair[1], cache_size=1, maxage=datetime.timedelta(pair[2]), dszquiet=False, dszlog=True, recursive=pair[3]))
except:
pass |
class ExtensionTests(unittest.TestCase):
def test_encode(self):
with self.assertRaises(NotImplementedError):
Extension().encode(Frame(Opcode.TEXT, b''))
def test_decode(self):
with self.assertRaises(NotImplementedError):
Extension().decode(Frame(Opcode.TEXT, b'')) |
def get_crystal_class(cell, ops=None, tol=SYMPREC):
if (ops is None):
ops = search_space_group_ops(cell, tol=tol)
rotations = []
for op in ops:
rotations.append(op.rot)
rotations = np.unique(np.asarray(rotations), axis=0)
maps = {(- 6): 0, (- 4): 1, (- 3): 2, (- 2): 3, (- 1): 4, 1: 5, 2: 6, 3: 7, 4: 8, 6: 9}
table = ([0] * 10)
for rot in rotations:
trace = np.trace(rot)
det = np.linalg.det(rot)
if (trace == 3):
assert (det == 1)
table[maps[1]] += 1
elif (trace == (- 3)):
assert (det == (- 1))
table[maps[(- 1)]] += 1
elif (trace == 2):
assert (det == 1)
table[maps[6]] += 1
elif (trace == (- 2)):
assert (det == (- 1))
table[maps[(- 6)]] += 1
elif (trace == 0):
if (det == 1):
table[maps[3]] += 1
elif (det == (- 1)):
table[maps[(- 3)]] += 1
elif (trace == 1):
if (det == 1):
table[maps[4]] += 1
elif (det == (- 1)):
table[maps[(- 2)]] += 1
elif (trace == (- 1)):
if (det == 1):
table[maps[2]] += 1
elif (det == (- 1)):
table[maps[(- 4)]] += 1
else:
raise ValueError(('Input rotation matrix is wrong: %s' % rot))
from pyscf.pbc.symm.tables import CrystalClass, LaueClass
laue_class = None
crystal_class = None
for (k, v) in CrystalClass.items():
count = 0
for i in range(10):
if (table[i] == v[i]):
count += 1
if (count == 10):
crystal_class = k
break
for (k, v) in LaueClass.items():
if (crystal_class in v):
laue_class = k
break
if ((crystal_class is None) or (laue_class is None)):
raise RuntimeError('Unable to determine crystal class.')
return (crystal_class, laue_class) |
('hyperlink.contains_page_break is {value}')
def then_hyperlink_contains_page_break_is_value(context: Context, value: str):
actual_value = context.hyperlink.contains_page_break
expected_value = {'True': True, 'False': False}[value]
assert (actual_value == expected_value), f'expected: {expected_value}, got: {actual_value}' |
def weights_init_normal(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('Linear') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('BatchNorm') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0) |
def _reduce_terms(terms, stabilizer_list, manual_input, fixed_positions):
if (manual_input is False):
fixed_positions = []
for (i, _) in enumerate(stabilizer_list):
selected_stab = list(stabilizer_list[0].terms)[0]
if (manual_input is False):
for qubit_pauli in selected_stab:
if (qubit_pauli[0] not in fixed_positions):
fixed_positions += [qubit_pauli[0]]
fixed_op = qubit_pauli[1]
break
else:
for qubit_pauli in selected_stab:
if (qubit_pauli[0] == fixed_positions[i]):
fixed_op = qubit_pauli[1]
break
if (fixed_op in ['X', 'Z']):
other_op = 'Y'
else:
other_op = 'X'
new_terms = QubitOperator()
for qubit_pauli in terms:
new_terms += fix_single_term(qubit_pauli, fixed_positions[i], fixed_op, other_op, stabilizer_list[0])
updated_stabilizers = []
for update_stab in stabilizer_list[1:]:
updated_stabilizers += [fix_single_term(update_stab, fixed_positions[i], fixed_op, other_op, stabilizer_list[0])]
terms = new_terms
stabilizer_list = updated_stabilizers
check_stabilizer_linearity(stabilizer_list, msg='Linearly dependent stabilizers.')
check_commuting_stabilizers(stabilizer_list, msg='Stabilizers anti-commute.')
return (terms, fixed_positions) |
class ResNetShortCut(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int=2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state |
.supported(only_if=(lambda backend: backend.pkcs7_supported()), skip_message='Requires OpenSSL with PKCS7 support')
class TestPKCS7Loading():
def test_load_invalid_der_pkcs7(self, backend):
with pytest.raises(ValueError):
pkcs7.load_der_pkcs7_certificates(b'nonsense')
def test_load_invalid_pem_pkcs7(self, backend):
with pytest.raises(ValueError):
pkcs7.load_pem_pkcs7_certificates(b'nonsense')
def test_not_bytes_der(self, backend):
with pytest.raises(TypeError):
pkcs7.load_der_pkcs7_certificates(38)
def test_not_bytes_pem(self, backend):
with pytest.raises(TypeError):
pkcs7.load_pem_pkcs7_certificates(38)
def test_load_pkcs7_pem(self, backend):
certs = load_vectors_from_file(os.path.join('pkcs7', 'isrg.pem'), (lambda pemfile: pkcs7.load_pem_pkcs7_certificates(pemfile.read())), mode='rb')
assert (len(certs) == 1)
assert (certs[0].subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) == [x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, 'ISRG Root X1')])
.parametrize('filepath', [os.path.join('pkcs7', 'amazon-roots.der'), os.path.join('pkcs7', 'amazon-roots.p7b')])
def test_load_pkcs7_der(self, filepath, backend):
certs = load_vectors_from_file(filepath, (lambda derfile: pkcs7.load_der_pkcs7_certificates(derfile.read())), mode='rb')
assert (len(certs) == 2)
assert (certs[0].subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) == [x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, 'Amazon Root CA 3')])
assert (certs[1].subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) == [x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, 'Amazon Root CA 2')])
def test_load_pkcs7_unsupported_type(self, backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
load_vectors_from_file(os.path.join('pkcs7', 'enveloped.pem'), (lambda pemfile: pkcs7.load_pem_pkcs7_certificates(pemfile.read())), mode='rb')
def test_load_pkcs7_empty_certificates(self):
der = b'0\x0b\x06\t*\x86H\x86\xf7\r\x01\x07\x02'
with pytest.raises(ValueError):
pkcs7.load_der_pkcs7_certificates(der) |
def apply_find_items_viewed(df, item_mappings):
import pandas as pd
import numpy as np
df = df.sort_values(by=['wcs_user_sk', 'tstamp', 'wcs_sales_sk', 'wcs_item_sk'], ascending=[False, False, False, False])
df.reset_index(drop=True, inplace=True)
df['relevant_flag'] = ((df.wcs_sales_sk != 0) & (df.wcs_item_sk == q03_purchased_item_IN))
df['relevant_idx_pos'] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
sample = df.loc[(df.relevant_flag == True)]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
out_arr = np.zeros((size * N), dtype=df['wcs_item_sk'].dtype, like=df['wcs_item_sk'].values)
if isinstance(df, cudf.DataFrame):
find_items_viewed_before_purchase_kernel_gpu.forall(size)(sample['relevant_idx_pos'], df['wcs_user_sk'], df['tstamp'], df['wcs_item_sk'], out_arr, N)
result = cudf.DataFrame({'prior_item_viewed': out_arr})
else:
find_items_viewed_before_purchase_kernel_cpu(sample['relevant_idx_pos'].to_numpy(), df['wcs_user_sk'].to_numpy(), df['tstamp'].to_numpy(), df['wcs_item_sk'].to_numpy(), out_arr, N)
result = pd.DataFrame({'prior_item_viewed': out_arr})
del out_arr
del df
del sample
filtered = result.merge(item_mappings, how='inner', left_on=['prior_item_viewed'], right_on=['i_item_sk'])
return filtered |
class CICleanSetup(object):
def setup_method(self):
self.ci_env = (os.environ.get('CI') == 'true')
reload(pysat)
self.saved_path = copy.deepcopy(pysat.params['data_dirs'])
if (not self.ci_env):
pytest.skip('Skipping local tests to avoid breaking user setup')
else:
self.root = os.path.join(os.path.expanduser('~'), '.pysat')
self.new_root = os.path.join(os.path.expanduser('~'), '.saved_pysat')
try:
shutil.rmtree(self.new_root)
except FileNotFoundError:
pass
shutil.move(self.root, self.new_root)
return
def teardown_method(self):
if self.ci_env:
shutil.rmtree(self.root)
shutil.move(self.new_root, self.root)
reload(pysat)
pysat.params.restore_defaults()
pysat.params['data_dirs'] = self.saved_path
del self.ci_env, self.saved_path
return |
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = create_dataset(opt)
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batchSize, shuffle=(not opt.serial_batches), num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for (i, data) in enumerate(self.dataloader):
if ((i * self.opt.batchSize) >= self.opt.max_dataset_size):
break
(yield data) |
def evaluate(args):
opt = vars(args)
opt['dataset_splitBy'] = ((opt['dataset'] + '_') + opt['splitBy'])
if (args.cfg_file is not None):
cfg_from_file(args.cfg_file)
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
data_json = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.json')
data_h5 = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.h5')
loader = GtMRCNLoader(data_json, data_h5)
opt['vocab_size'] = loader.vocab_size
opt['C4_feat_dim'] = 512
net = vgg16(opt, batch_size=1)
net.create_architecture(81, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS)
sfile = osp.join(opt['dataset_splitBy'], 'output_{}'.format(opt['output_postfix']), 'vgg16_faster_rcnn_iter_{}.pth'.format(opt['model_iter']))
print('Restoring model snapshots from {:s}'.format(sfile))
saved_state_dict = torch.load(str(sfile))
count_1 = 0
new_params = net.state_dict().copy()
for (name, param) in new_params.items():
if ((name in saved_state_dict) and (param.size() == saved_state_dict[name].size())):
new_params[name].copy_(saved_state_dict[name])
else:
print(name, '----')
count_1 += 1
print('size not match:', count_1)
net.load_state_dict(new_params)
net.eval()
net.cuda()
split = opt['split']
crit = None
(acc, num_sent) = eval_split(loader, net, crit, split, opt)
print(("Comprehension on %s's %s (%s sents) is %.2f%%" % (opt['dataset_splitBy'], split, num_sent, (acc * 100.0))))
f = open('experiments/det_results.txt', 'a')
f.write(("[%s][%s], id[%s]'s acc is %.2f%%\n" % (opt['dataset_splitBy'], opt['split'], opt['id'], (acc * 100.0)))) |
def qdot_sideinfo_simple(qp: QP, env_sys: System):
dp_pos = (qp.vel * env_sys.active_pos)
def op(qp_ang: jnp.ndarray, qp_rot: jnp.ndarray, active_rot: jnp.ndarray):
return jnp.matmul(ang_to_quat((qp_ang * active_rot)), qp_rot)
return (dp_pos, op(qp.ang, qp.rot, env_sys.active_rot)) |
def _load_or_init_impl(session, method_order, allow_drop_layers, allow_lr_init=True):
for method in method_order:
if (method == 'best'):
ckpt_path = _checkpoint_path_or_none('best_dev_checkpoint')
if ckpt_path:
log_info('Loading best validating checkpoint from {}'.format(ckpt_path))
return _load_checkpoint(session, ckpt_path, allow_drop_layers, allow_lr_init=allow_lr_init)
log_info('Could not find best validating checkpoint.')
elif (method == 'last'):
ckpt_path = _checkpoint_path_or_none('checkpoint')
if ckpt_path:
log_info('Loading most recent checkpoint from {}'.format(ckpt_path))
return _load_checkpoint(session, ckpt_path, allow_drop_layers, allow_lr_init=allow_lr_init)
log_info('Could not find most recent checkpoint.')
elif (method == 'init'):
log_info('Initializing all variables.')
return _initialize_all_variables(session)
else:
log_error('Unknown initialization method: {}'.format(method))
sys.exit(1)
log_error('All initialization methods failed ({}).'.format(method_order))
sys.exit(1) |
def pytest_runtest_teardown(item, nextitem):
reruns = get_reruns_count(item)
if (reruns is None):
return
if (not hasattr(item, 'execution_count')):
return
_test_failed_statuses = getattr(item, '_test_failed_statuses', {})
if ((item.execution_count <= reruns) and any(_test_failed_statuses.values())):
_remove_cached_results_from_failed_fixtures(item)
if (item in item.session._setupstate.stack):
for key in list(item.session._setupstate.stack.keys()):
if (key != item):
del item.session._setupstate.stack[key] |
class RagRetriever():
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
self._init_retrieval = init_retrieval
requires_backends(self, ['datasets', 'faiss'])
super().__init__()
self.index = (index or self._build_index(config))
self.generator_tokenizer = generator_tokenizer
self.question_encoder_tokenizer = question_encoder_tokenizer
self.n_docs = config.n_docs
self.batch_size = config.retrieval_batch_size
self.config = config
if self._init_retrieval:
self.init_retrieval()
self.ctx_encoder_tokenizer = None
self.return_tokenized_docs = False
def _build_index(config):
if (config.index_name == 'legacy'):
return LegacyIndex(config.retrieval_vector_size, (config.index_path or LEGACY_INDEX_PATH))
elif (config.index_name == 'custom'):
return CustomHFIndex.load_from_disk(vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path)
else:
return CanonicalHFIndex(vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset)
def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
requires_backends(cls, ['datasets', 'faiss'])
config = (kwargs.pop('config', None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs))
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
if (indexed_dataset is not None):
config.index_name = 'custom'
index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
else:
index = cls._build_index(config)
return cls(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index)
def save_pretrained(self, save_directory):
if isinstance(self.index, CustomHFIndex):
if (self.config.index_path is None):
index_path = os.path.join(save_directory, 'hf_dataset_index.faiss')
self.index.dataset.get_index('embeddings').save(index_path)
self.config.index_path = index_path
if (self.config.passages_path is None):
passages_path = os.path.join(save_directory, 'hf_dataset')
faiss_index = self.index.dataset._indexes.pop('embeddings')
self.index.dataset.save_to_disk(passages_path)
self.index.dataset._indexes['embeddings'] = faiss_index
self.config.passages_path = passages_path
self.config.save_pretrained(save_directory)
rag_tokenizer = RagTokenizer(question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer)
rag_tokenizer.save_pretrained(save_directory)
def init_retrieval(self):
logger.info('initializing retrieval')
self.index.init_index()
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
if doc_title.startswith('"'):
doc_title = doc_title[1:]
if doc_title.endswith('"'):
doc_title = doc_title[:(- 1)]
if (prefix is None):
prefix = ''
out = (((((prefix + doc_title) + self.config.title_sep) + doc_text) + self.config.doc_sep) + input_string).replace(' ', ' ')
return out
rag_input_strings = [cat_input_and_doc(docs[i]['title'][j], docs[i]['text'][j], input_strings[i], prefix) for i in range(len(docs)) for j in range(n_docs)]
contextualized_inputs = self.generator_tokenizer.batch_encode_plus(rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding='max_length', truncation=True)
return (contextualized_inputs['input_ids'], contextualized_inputs['attention_mask'])
def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
return [t[i:(i + chunk_size)] for i in range(0, len(t), chunk_size)]
def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[(np.ndarray, np.ndarray)]:
question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
ids_batched = []
vectors_batched = []
for question_hidden_states in question_hidden_states_batched:
start_time = time.time()
(ids, vectors) = self.index.get_top_docs(question_hidden_states, n_docs)
logger.debug(f'index search time: {(time.time() - start_time)} sec, batch size {question_hidden_states.shape}')
ids_batched.extend(ids)
vectors_batched.extend(vectors)
return (np.array(ids_batched), np.array(vectors_batched))
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[(np.ndarray, List[dict])]:
(doc_ids, retrieved_doc_embeds) = self._main_retrieve(question_hidden_states, n_docs)
return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))
def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
self.return_tokenized_docs = True
def __call__(self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None) -> BatchEncoding:
n_docs = (n_docs if (n_docs is not None) else self.n_docs)
prefix = (prefix if (prefix is not None) else self.config.generator.prefix)
(retrieved_doc_embeds, doc_ids, docs) = self.retrieve(question_hidden_states, n_docs)
input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
(context_input_ids, context_attention_mask) = self.postprocess_docs(docs, input_strings, prefix, n_docs, return_tensors=return_tensors)
if self.return_tokenized_docs:
retrieved_doc_text = []
retrieved_doc_title = []
for b_idx in range(len(docs)):
for doc_idx in range(n_docs):
retrieved_doc_text.append(docs[b_idx]['text'][doc_idx])
retrieved_doc_title.append(docs[b_idx]['title'][doc_idx])
tokenized_docs = self.ctx_encoder_tokenizer(retrieved_doc_title, retrieved_doc_text, truncation=True, padding='longest', return_tensors=return_tensors)
return BatchEncoding({'context_input_ids': context_input_ids, 'context_attention_mask': context_attention_mask, 'retrieved_doc_embeds': retrieved_doc_embeds, 'doc_ids': doc_ids, 'tokenized_doc_ids': tokenized_docs['input_ids'], 'tokenized_doc_attention_mask': tokenized_docs['attention_mask']}, tensor_type=return_tensors)
else:
return BatchEncoding({'context_input_ids': context_input_ids, 'context_attention_mask': context_attention_mask, 'retrieved_doc_embeds': retrieved_doc_embeds, 'doc_ids': doc_ids}, tensor_type=return_tensors) |
def generate_costing_table(pyscf_mf: scf.HF, cutoffs: np.ndarray, name: str='pbc', chi: int=10, beta: int=20, dE_for_qpe: float=0.0016, energy_method: str='MP2') -> pd.DataFrame:
kmesh = kpts_to_kmesh(pyscf_mf.cell, pyscf_mf.kpts)
cc_inst = build_cc_inst(pyscf_mf)
exact_eris = cc_inst.ao2mo()
if (energy_method.lower() == 'mp2'):
energy_function = (lambda x: cc_inst.init_amps(x))
(reference_energy, _, _) = energy_function(exact_eris)
elif (energy_method.lower() == 'ccsd'):
energy_function = (lambda x: cc_inst.kernel(eris=x))
(reference_energy, _, _) = energy_function(exact_eris)
else:
raise ValueError(f'Unknown value for energy_method: {energy_method}')
(hcore, chol) = build_hamiltonian(pyscf_mf)
num_spin_orbs = (2 * hcore[0].shape[(- 1)])
num_kpts = np.prod(kmesh)
num_aux = chol[(0, 0)].shape[0]
num_aux_df = ((2 * num_aux) * num_kpts)
df_resource_obj = DFResources(system_name=name, num_spin_orbitals=num_spin_orbs, num_kpts=num_kpts, dE=dE_for_qpe, chi=chi, beta=beta, energy_method=energy_method, exact_energy=np.real(reference_energy), num_aux=num_aux_df)
approx_eris = exact_eris
if (beta is None):
num_kpts = np.prod(kmesh)
beta = compute_beta_for_resources(num_spin_orbs, num_kpts, dE_for_qpe)
for cutoff in cutoffs:
df_helper = DFABKpointIntegrals(cholesky_factor=chol, kmf=pyscf_mf)
df_helper.double_factorize(thresh=cutoff)
df_lambda = compute_lambda(hcore, df_helper)
if (pyscf_mf.cell.spin == 0):
approx_eris = build_approximate_eris(cc_inst, df_helper, eris=approx_eris)
else:
approx_eris = build_approximate_eris_rohf(cc_inst, df_helper, eris=approx_eris)
(approx_energy, _, _) = energy_function(approx_eris)
df_res_cost = compute_cost(num_spin_orbs, df_lambda.lambda_total, num_aux_df, df_lambda.num_eig, list(kmesh), chi=chi, beta=beta, dE_for_qpe=dE_for_qpe)
df_resource_obj.add_resources(ham_properties=df_lambda, resource_estimates=df_res_cost, cutoff=cutoff, approx_energy=np.real(approx_energy))
return df_resource_obj.to_dataframe() |
class TaskRc(dict):
UDA_TYPE_MAP = {'date': DateField, 'duration': DurationField, 'numeric': NumericField, 'string': StringField}
def __init__(self, path=None, overrides=None):
self.overrides = (overrides if overrides else {})
if path:
self.path = os.path.normpath(os.path.expanduser(path))
config = self._read(self.path)
else:
self.path = None
config = {}
super(TaskRc, self).__init__(config)
def _add_to_tree(self, config, key, value):
key_parts = key.split('.')
cursor = config
for part in key_parts[0:(- 1)]:
if (part not in cursor):
cursor[part] = {}
if (not isinstance(cursor[part], dict)):
cursor[part] = {}
cursor = cursor[part]
cursor[key_parts[(- 1)]] = value
return config
def _merge_trees(self, left, right):
if (left is None):
left = {}
for (key, value) in right.items():
if (not isinstance(left, dict)):
left = {}
if isinstance(value, dict):
left[key] = self._merge_trees(left.get(key), value)
else:
left[key] = value
return left
def _read(self, path):
config = {}
with open(path, 'r') as config_file:
for raw_line in config_file.readlines():
line = sanitize(raw_line)
if (not line):
continue
if line.startswith('include '):
try:
(left, right) = line.split(' ')
config = self._merge_trees(config, TaskRc(right.strip()))
except ValueError:
logger.exception("Error encountered while adding TaskRc at '%s' (from TaskRc file at '%s')", right.strip(), self.path)
else:
try:
(left, right) = line.split('=', 1)
key = left.strip()
value = right.strip()
config = self._add_to_tree(config, key, value)
except ValueError:
logger.exception("Error encountered while processing configuration setting '%s' (from TaskRc file at '%s')", line, self.path)
return self._merge_trees(config, self.overrides)
def __delitem__(self, *args):
raise TypeError('TaskRc objects are immutable')
def __setitem__(self, item, value):
raise TypeError('TaskRc objects are immutable')
def update(self, value):
raise TypeError('TaskRc objects are immutable')
def get_udas(self):
raw_udas = self.get('uda', {})
udas = {}
for (k, v) in raw_udas.items():
tw_type = v.get('type', '')
label = v.get('label', None)
choices = v.get('values', None)
kwargs = {}
cls = self.UDA_TYPE_MAP.get(tw_type, StringField)
if choices:
cls = ChoiceField
kwargs['choices'] = choices.split(',')
if label:
kwargs['label'] = label
udas[k] = cls(**kwargs)
return udas
def __str__(self):
return 'TaskRc file at {path}'.format(path=self.path) |
class VmfsFileSystem(LoopbackFileSystemMixin, MountFileSystem):
type = 'vmfs'
aliases = ['vmfs_volume_member']
guids = ['2AE031AA-0F40-DB11-9590-000C2911D1B8']
def mount(self):
self._make_mountpoint()
self._find_loopback()
try:
_util.check_call_(['vmfs-fuse', self.loopback, self.mountpoint], stdout=subprocess.PIPE)
except Exception:
self._free_loopback()
self._clear_mountpoint()
raise |
.parametrize('screenshot_manager', [{}, {'type': 'box'}, {'type': 'line'}, {'type': 'line', 'line_width': 1}, {'start_pos': 'top'}], indirect=True)
def ss_memorygraph(screenshot_manager):
widget = screenshot_manager.c.widget['memorygraph']
widget.eval(f'self.values={values}')
widget.eval('self.draw()')
screenshot_manager.take_screenshot() |
class InteractivePolicy(Policy):
def __init__(self, env, agent_index):
super(InteractivePolicy, self).__init__()
self.env = env
self.move = [False for i in range(4)]
self.comm = [False for i in range(env.world.dim_c)]
env.viewers[agent_index].window.on_key_press = self.key_press
env.viewers[agent_index].window.on_key_release = self.key_release
def action(self, obs):
if self.env.discrete_action_input:
u = 0
if self.move[0]:
u = 1
if self.move[1]:
u = 2
if self.move[2]:
u = 4
if self.move[3]:
u = 3
else:
u = np.zeros(5)
if self.move[0]:
u[1] += 1.0
if self.move[1]:
u[2] += 1.0
if self.move[3]:
u[3] += 1.0
if self.move[2]:
u[4] += 1.0
if (True not in self.move):
u[0] += 1.0
return np.concatenate([u, np.zeros(self.env.world.dim_c)])
def key_press(self, k, mod):
if (k == key.LEFT):
self.move[0] = True
if (k == key.RIGHT):
self.move[1] = True
if (k == key.UP):
self.move[2] = True
if (k == key.DOWN):
self.move[3] = True
def key_release(self, k, mod):
if (k == key.LEFT):
self.move[0] = False
if (k == key.RIGHT):
self.move[1] = False
if (k == key.UP):
self.move[2] = False
if (k == key.DOWN):
self.move[3] = False |
class Projector():
def __init__(self, projector_model: Model, video_chunk_iterator_provider: VideoChunkIteratorProvider) -> None:
self._projector_model = projector_model
self._video_chunk_iterator_provider = video_chunk_iterator_provider
def project(self, video_features: np.ndarray) -> np.ndarray:
(input_chunk_batch, valid_chunk_sizes) = self._prepare_input_batch(video_features)
chunk_output_batch = self._projector_model.predict(input_chunk_batch)
valid_chunk_outputs = [chunk_output_batch[c][0:valid_chunk_size] for (c, valid_chunk_size) in enumerate(valid_chunk_sizes)]
return self._accumulate_outputs(valid_chunk_outputs, video_features)
def _prepare_input_batch(self, features: np.ndarray) -> Tuple[(np.ndarray, List[int])]:
input_chunk_iterator = self._video_chunk_iterator_provider.provide(features)
return input_chunk_iterator.prepare_input_batch()
def _accumulate_outputs(self, valid_chunk_outputs: List[np.ndarray], features: np.ndarray) -> np.ndarray:
chunk_output_iterator = self._video_chunk_iterator_provider.provide(features)
num_frames = features.shape[0]
num_projected_features = valid_chunk_outputs[0].shape[2]
projected_features = np.zeros((num_frames, 1, num_projected_features))
chunk_output_iterator.accumulate_chunk_outputs(projected_features, valid_chunk_outputs)
return np.squeeze(projected_features, axis=1) |
class _VCTKBaseDataSource(FileDataSource):
def __init__(self, data_root, speakers, labelmap, max_files):
self.data_root = data_root
for idx in range(len(speakers)):
if (speakers[idx][0] == 'p'):
speakers[idx] = speakers[idx][1:]
if (speakers == 'all'):
speakers = available_speakers
for speaker in speakers:
if (speaker not in available_speakers):
raise ValueError("Unknown speaker '{}'. It should be one of {}".format(speaker, available_speakers))
self.speakers = speakers
if (labelmap is None):
labelmap = {}
for (idx, speaker) in enumerate(speakers):
labelmap[speaker] = idx
self.labelmap = labelmap
self.labels = None
self.max_files = max_files
self.speaker_info = _parse_speaker_info(data_root)
self._validate()
def _validate(self):
for (_, speaker) in enumerate(self.speakers):
txt_files = sorted(glob(join(self.data_root, 'txt', ('p' + speaker), 'p{}_*.txt'.format(speaker))))
wav_files = sorted(glob(join(self.data_root, 'wav48', ('p' + speaker), 'p{}_*.wav'.format(speaker))))
assert (len(txt_files) > 0)
for (txt_path, wav_path) in zip(txt_files, wav_files):
assert (splitext(basename(txt_path))[0] == splitext(basename(wav_path))[0])
def collect_files(self, is_wav):
if is_wav:
root = join(self.data_root, 'wav48')
ext = '.wav'
else:
root = join(self.data_root, 'txt')
ext = '.txt'
paths = []
labels = []
if (self.max_files is None):
max_files_per_speaker = None
else:
max_files_per_speaker = (self.max_files // len(self.speakers))
for (idx, speaker) in enumerate(self.speakers):
speaker_dir = join(root, ('p' + speaker))
files = sorted(glob(join(speaker_dir, 'p{}_*{}'.format(speaker, ext))))
files = files[:max_files_per_speaker]
if (not is_wav):
files = list(map((lambda s: open(s, 'rb').read().decode('utf-8')[:(- 1)]), files))
for f in files:
paths.append(f)
labels.append(self.labelmap[self.speakers[idx]])
self.labels = np.array(labels, dtype=np.int16)
return paths |
def create_model(feat_dim, num_classes=1000, scale=16, stage1_weights=False, dataset=None, shot_phase='stage1', test=False, *args):
print('Loading Dot Product Classifier.')
print(num_classes, feat_dim)
clf = CosNorm_Classifier(num_classes, feat_dim, scale)
if (not test):
if stage1_weights:
assert dataset
print(('Loading %s Stage 1 Classifier Weights.' % dataset))
clf.weight = init_weights(model=clf.weight, weights_path=('./logs/%s/%s/final_model_checkpoint.pth' % (dataset, shot_phase)), classifier=True)
else:
print('Random initialized classifier weights.')
return clf |
_tf
class TFTransfoXLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ())
all_generative_model_classes = (() if is_tf_available() else ())
pipeline_model_mapping = ({'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification} if is_tf_available() else {})
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
test_mismatched_shapes = False
def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name):
if (pipeline_test_casse_name == 'TextGenerationPipelineTests'):
return True
return False
def setUp(self):
self.model_tester = TFTransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs)
def test_transfo_xl_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs)
def test_model_common_attributes(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
list_other_models_with_output_ebd = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if (model_class in list_other_models_with_output_ebd):
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert (name is None)
else:
x = model.get_output_embeddings()
assert (x is None)
name = model.get_bias()
assert (name is None)
def test_xla_mode(self):
pass
def test_model_from_pretrained(self):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFTransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
(reason="This model doesn't play well with fit() due to not returning a single loss.")
def test_dataset_conversion(self):
pass |
class IntegratorDiag(Integrator):
integrator_options = {'eigensolver_dtype': 'dense'}
support_time_dependant = False
supports_blackbox = False
method = 'diag'
def __init__(self, system, options):
if (not system.isconstant):
raise ValueError('Hamiltonian system must be constant to use diagonalized method')
super().__init__(system, options)
def _prepare(self):
self._dt = 0.0
self._expH = None
H0 = self.system(0).to(self.options['eigensolver_dtype'])
(self.diag, self.U) = _data.eigs(H0.data, False)
self.diag = self.diag.reshape(((- 1), 1))
self.Uinv = _data.inv(self.U)
self.name = 'qutip diagonalized'
def integrate(self, t, copy=True):
dt = (t - self._t)
if (dt == 0):
return self.get_state()
elif (self._dt != dt):
self._expH = np.exp((self.diag * dt))
self._dt = dt
self._y *= self._expH
self._t = t
return self.get_state(copy)
def mcstep(self, t, copy=True):
return self.integrate(t, copy=copy)
def get_state(self, copy=True):
return (self._t, _data.matmul(self.U, _data.dense.Dense(self._y)))
def set_state(self, t, state0):
self._t = t
self._y = _data.matmul(self.Uinv, state0).to_array()
self._is_set = True
def options(self):
return self._options
def options(self, new_options):
Integrator.options.fset(self, new_options) |
_fixtures(WebFixture, SessionScopeFixture)
def test_logging_in(web_fixture, session_scope_fixture):
browser = Browser(web_fixture.new_wsgi_app(site_root=SessionScopeUI))
user = session_scope_fixture.user
browser.open('/')
browser.click(XPath.link().with_text('Log in'))
browser.type(XPath.input_labelled('Email'), '')
browser.type(XPath.input_labelled('Password'), 'topsecret')
browser.click(XPath.button_labelled('Log in'))
browser.click(XPath.link().with_text('Home'))
assert browser.is_element_present(XPath.paragraph().including_text('Welcome John Doe')) |
class DeepAdversarialMetricLearning(TrainWithClassifier):
def __init__(self, metric_alone_epochs=0, g_alone_epochs=0, g_triplets_per_anchor=100, *args, **kwargs):
super().__init__(*args, **kwargs)
self.original_loss_weights = copy.deepcopy(self.loss_weights)
self.metric_alone_epochs = metric_alone_epochs
self.g_alone_epochs = g_alone_epochs
assert isinstance(self.loss_funcs['g_adv_loss'], TripletMarginLoss)
self.loss_funcs['g_adv_loss'].margin *= (- 1)
self.loss_funcs['g_adv_loss'].distance = NegativeLpDistance()
self.g_triplets_per_anchor = g_triplets_per_anchor
def custom_setup(self):
synth_packaged_as_triplets = EmbeddingsAlreadyPackagedAsTriplets()
self.mining_funcs['synth_packaged_as_triplets'] = synth_packaged_as_triplets
self.loss_names += ['g_hard_loss', 'g_reg_loss']
def calculate_loss(self, curr_batch):
(data, labels) = curr_batch
penultimate_embeddings = self.get_trunk_output(data)
if self.do_metric:
authentic_final_embeddings = self.get_final_embeddings(penultimate_embeddings)
indices_tuple = self.maybe_mine_embeddings(authentic_final_embeddings, labels)
self.losses['metric_loss'] = self.loss_funcs['metric_loss'](authentic_final_embeddings, labels, indices_tuple)
logits = self.maybe_get_logits(authentic_final_embeddings)
self.losses['classifier_loss'] = self.maybe_get_classifier_loss(logits, labels)
if self.do_adv:
self.calculate_synth_loss(penultimate_embeddings, labels)
def update_loss_weights(self):
self.do_metric_alone = (self.epoch <= self.metric_alone_epochs)
self.do_adv_alone = (self.metric_alone_epochs < self.epoch <= (self.metric_alone_epochs + self.g_alone_epochs))
self.do_both = ((not self.do_adv_alone) and (not self.do_metric_alone))
self.do_adv = (self.do_adv_alone or self.do_both)
self.do_metric = (self.do_metric_alone or self.do_both)
non_zero_weight_list = []
if self.do_adv:
non_zero_weight_list += ['g_hard_loss', 'g_reg_loss', 'g_adv_loss']
if self.do_metric:
non_zero_weight_list += ['metric_loss', 'classifier_loss']
if self.do_both:
non_zero_weight_list += ['synth_loss']
for k in self.loss_weights.keys():
if (k in non_zero_weight_list):
self.loss_weights[k] = self.original_loss_weights[k]
else:
self.loss_weights[k] = 0
self.maybe_exclude_networks_from_gradient()
def maybe_exclude_networks_from_gradient(self):
self.set_to_train()
if self.do_adv_alone:
no_grad_list = ['trunk', 'classifier']
elif self.do_metric_alone:
no_grad_list = ['generator']
else:
no_grad_list = []
for k in self.models.keys():
if (k in no_grad_list):
c_f.set_requires_grad(self.models[k], requires_grad=False)
self.models[k].eval()
else:
c_f.set_requires_grad(self.models[k], requires_grad=True)
def step_optimizers(self):
step_list = []
if self.do_metric:
step_list += ['trunk_optimizer', 'embedder_optimizer', 'classifier_optimizer']
if self.do_adv:
step_list += ['generator_optimizer']
for k in self.optimizers.keys():
if (k in step_list):
self.optimizers[k].step()
def calculate_synth_loss(self, penultimate_embeddings, labels):
(a_indices, p_indices, n_indices) = lmu.convert_to_triplets(None, labels, t_per_anchor=self.g_triplets_per_anchor)
real_anchors = penultimate_embeddings[a_indices]
real_positives = penultimate_embeddings[p_indices]
real_negatives = penultimate_embeddings[n_indices]
penultimate_embeddings_cat = torch.cat([real_anchors, real_positives, real_negatives], dim=1)
synthetic_negatives = self.models['generator'](c_f.to_device(penultimate_embeddings_cat, device=self.data_device))
penultimate_embeddings_with_negative_synth = c_f.unslice_by_n([real_anchors, real_positives, synthetic_negatives])
final_embeddings = self.get_final_embeddings(penultimate_embeddings_with_negative_synth)
labels = torch.tensor([val for tup in zip(*[labels[a_indices], labels[p_indices], labels[n_indices]]) for val in tup])
indices_tuple = self.mining_funcs['synth_packaged_as_triplets'](final_embeddings, labels)
if self.do_both:
self.losses['synth_loss'] = self.loss_funcs['synth_loss'](final_embeddings, labels, indices_tuple)
self.losses['g_adv_loss'] = self.loss_funcs['g_adv_loss'](final_embeddings, labels, indices_tuple)
self.losses['g_hard_loss'] = torch.nn.functional.mse_loss(torch.nn.functional.normalize(synthetic_negatives, p=2, dim=1), torch.nn.functional.normalize(real_anchors, p=2, dim=1))
self.losses['g_reg_loss'] = torch.nn.functional.mse_loss(torch.nn.functional.normalize(synthetic_negatives, p=2, dim=1), torch.nn.functional.normalize(real_negatives, p=2, dim=1))
def modify_schema(self):
super().modify_schema()
self.schema['models'].keys += ['generator']
self.schema['models'].essential += ['generator']
self.schema['loss_funcs'].keys += ['synth_loss', 'g_adv_loss']
self.schema['loss_funcs'].essential += ['synth_loss', 'g_adv_loss']
self.schema['mining_funcs'].keys += ['synth_packaged_as_triplets'] |
def generate_summary_html(root_dir: Path):
test_groups = get_test_groups(root_dir)
test_cases = get_test_cases(test_groups, (root_dir / 'tests'))
summary_html = ''
for type_checker in TYPE_CHECKERS:
version_file = (((root_dir / 'results') / type_checker.name) / 'version.toml')
try:
with open(version_file, 'rb') as f:
existing_info = tomli.load(f)
except FileNotFoundError:
existing_info = {}
version = (existing_info['version'] or 'Unknown version')
test_duration = existing_info.get('test_duration')
summary_html += f"<div class='tc-header'><span class='tc-name'>{version}"
if (test_duration is not None):
summary_html += f'''<span class='tc-time'>({test_duration:.2f}sec)</span>
'''
summary_html += '</div>\n'
summary_html += '<div class="table_container"><table>\n'
summary_html += '<tr><th class="column spacer" colspan="4"></th></tr>\n'
for (test_group_name, test_group) in test_groups.items():
tests_in_group = [case for case in test_cases if case.name.startswith(f'{test_group_name}_')]
tests_in_group.sort(key=(lambda x: x.name))
if (len(tests_in_group) > 0):
summary_html += '<tr><th class="column" colspan="4">\n'
summary_html += f'<a class="test_group" href="{test_group.href}">{test_group.name}</a>'
summary_html += '</th></tr>\n'
for test_case in tests_in_group:
test_case_name = test_case.stem
try:
results_file = (((root_dir / 'results') / type_checker.name) / f'{test_case_name}.toml')
with open(results_file, 'rb') as f:
results = tomli.load(f)
except FileNotFoundError:
results = {}
conformance = results.get('conformant', 'Unknown')
notes = results.get('notes', '').replace('\n', '<br>')
conformance_class = ('conformant' if (conformance == 'Pass') else ('partially-conformant' if (conformance == 'Partial') else 'not-conformant'))
summary_html += f'<tr><th> </th>'
summary_html += f'<th class="column col1">{test_case_name}</th>'
summary_html += f'<th class="column col2 {conformance_class}">{conformance}</th>'
summary_html += f'''<th class="column col3">{notes}</th></tr>
'''
summary_html += '<tr><th class="column spacer" colspan="4"></th></tr>\n'
summary_html += '</table></div>\n'
return summary_html |
def make_KeyPress_from_keydescr(keydescr):
keyinfo = KeyPress()
if ((len(keydescr) > 2) and (keydescr[:1] == u'"') and (keydescr[(- 1):] == u'"')):
keydescr = keydescr[1:(- 1)]
while 1:
lkeyname = keydescr.lower()
if lkeyname.startswith(u'control-'):
keyinfo.control = True
keydescr = keydescr[8:]
elif lkeyname.startswith(u'ctrl-'):
keyinfo.control = True
keydescr = keydescr[5:]
elif keydescr.lower().startswith(u'\\c-'):
keyinfo.control = True
keydescr = keydescr[3:]
elif keydescr.lower().startswith(u'\\m-'):
keyinfo.meta = True
keydescr = keydescr[3:]
elif (keydescr in escape_sequence_to_special_key):
keydescr = escape_sequence_to_special_key[keydescr]
elif lkeyname.startswith(u'meta-'):
keyinfo.meta = True
keydescr = keydescr[5:]
elif lkeyname.startswith(u'alt-'):
keyinfo.meta = True
keydescr = keydescr[4:]
elif lkeyname.startswith(u'shift-'):
keyinfo.shift = True
keydescr = keydescr[6:]
else:
if (len(keydescr) > 1):
if (keydescr.strip().lower() in validkey):
keyinfo.keyname = keydescr.strip().lower()
keyinfo.char = ''
else:
raise IndexError((u"Not a valid key: '%s'" % keydescr))
else:
keyinfo.char = keydescr
return keyinfo |
class TaskbarTestCases(unittest.TestCase):
def setUp(self):
Timings.defaults()
self.tm = _ready_timeout
app = Application(backend='win32')
app.start(os.path.join(mfc_samples_folder, u'TrayMenu.exe'), wait_for_idle=False)
self.app = app
self.dlg = app.top_window()
mouse.move(((- 500), 200))
self.dlg.wait('ready', timeout=self.tm)
def tearDown(self):
self.dlg.send_message(win32defines.WM_CLOSE)
self.dlg.wait_not('ready')
l = pywinauto.actionlogger.ActionLogger()
try:
for i in range(2):
l.log('Look for unclosed sample apps')
app = Application()
app.connect(path='TrayMenu.exe')
l.log('Forse closing a leftover app: {0}'.format(app))
app.kill()
except ProcessNotFoundError:
l.log('No more leftovers. All good.')
def testTaskbar(self):
taskbar.TaskBar.wait('visible', timeout=self.tm)
'\n def testStartButton(self): # TODO: fix it for AppVeyor\n taskbar.StartButton.click_input()\n\n sample_app_exe = os.path.join(mfc_samples_folder, u"TrayMenu.exe")\n start_menu = taskbar.explorer_app.window(class_name=\'DV2ControlHost\')\n start_menu.SearchEditBoxWrapperClass.click_input()\n start_menu.SearchEditBoxWrapperClass.type_keys(\n sample_app_exe() + \'{ENTER}\',\n with_spaces=True, set_foreground=False\n )\n\n time.sleep(5)\n app = Application.connect(path=sample_app_exe())\n dlg = app.top_window()\n Wait(\'ready\', timeout=self.tm)\n '
def testSystemTray(self):
taskbar.SystemTray.wait('visible', timeout=self.tm)
def testClock(self):
self.dlg.minimize()
_wait_minimized(self.dlg)
taskbar.Clock.click_input()
ClockWindow = taskbar.explorer_app.window(class_name='ClockFlyoutWindow')
ClockWindow.wait('visible', timeout=self.tm)
taskbar.Clock.type_keys('{ESC}', set_foreground=False)
ClockWindow.wait_not('visible', timeout=self.tm)
def testClickVisibleIcon(self):
if (is_x64_Python() != is_x64_OS()):
return
orig_hid_state = _toggle_notification_area_icons(show_all=True, debug_img=('%s_01' % self.id()))
self.dlg.minimize()
_wait_minimized(self.dlg)
menu_window = [None]
def _show_popup_menu():
taskbar.explorer_app.wait_cpu_usage_lower(threshold=5, timeout=self.tm)
taskbar.RightClickSystemTrayIcon('MFCTrayDemo')
children = self.app.top_window().children()
if (not children):
menu = self.app.windows(visible=True)[0].children()[0]
else:
menu = children[0]
res = (isinstance(menu, ToolbarWrapper) and menu.is_visible())
menu_window[0] = menu
return res
wait_until(self.tm, _retry_interval, _show_popup_menu)
menu_window[0].menu_bar_click_input('#2', self.app)
popup_window = self.app.top_window()
hdl = self.dlg.popup_window()
self.assertEqual(popup_window.handle, hdl)
taskbar.ClickSystemTrayIcon('MFCTrayDemo', double=True)
self.dlg.wait('active', timeout=self.tm)
_toggle_notification_area_icons(show_all=orig_hid_state, debug_img=('%s_02' % self.id()))
def testClickHiddenIcon(self):
if (is_x64_Python() != is_x64_OS()):
return
orig_hid_state = _toggle_notification_area_icons(show_all=False, debug_img=('%s_01' % self.id()))
self.dlg.minimize()
_wait_minimized(self.dlg)
app2 = Application()
app2.start(os.path.join(mfc_samples_folder, u'TrayMenu.exe'))
dlg2 = app2.top_window()
dlg2.wait('visible', timeout=self.tm)
dlg2.minimize()
_wait_minimized(dlg2)
taskbar.explorer_app.wait_cpu_usage_lower(threshold=5, timeout=40)
taskbar.ClickHiddenSystemTrayIcon('MFCTrayDemo', double=True)
self.dlg.wait('visible', timeout=self.tm)
_toggle_notification_area_icons(show_all=orig_hid_state, debug_img=('%s_02' % self.id()))
dlg2.send_message(win32defines.WM_CLOSE)
def testClickCustomizeButton(self):
self.dlg.minimize()
_wait_minimized(self.dlg)
orig_hid_state = _toggle_notification_area_icons(show_all=False, debug_img=('%s_01' % self.id()))
app2 = Application()
app2.start(os.path.join(mfc_samples_folder, u'TrayMenu.exe'))
dlg2 = app2.top_window()
dlg2.wait('visible', timeout=self.tm)
dlg2.minimize()
_wait_minimized(dlg2)
taskbar.ShowHiddenIconsButton.click_input()
niow_dlg = taskbar.explorer_app.window(class_name='NotifyIconOverflowWindow')
niow_dlg.OverflowNotificationAreaToolbar.wait('ready', timeout=self.tm)
niow_dlg.SysLink.click_input()
nai = Desktop().window(name='Notification Area Icons', class_name='CabinetWClass')
nai.wait('ready')
origAlwaysShow = nai.CheckBox.get_check_state()
if (not origAlwaysShow):
nai.CheckBox.click_input()
nai.OK.click()
_toggle_notification_area_icons(show_all=orig_hid_state, debug_img=('%s_02' % self.id()))
dlg2.send_message(win32defines.WM_CLOSE) |
def test_retry_exec_iteration_raises_on_error_not_in_retryon():
rd = RetryDecorator({'max': 3, 'retryOn': ['KeyError', 'BlahError']})
context = Context({})
mock = MagicMock()
mock.side_effect = ValueError('arb')
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
with pytest.raises(ValueError) as err_info:
rd.exec_iteration(2, context, mock, 3)
assert (str(err_info.value) == 'arb')
assert (context['retryCounter'] == 2)
assert (rd.retry_counter == 2)
assert (len(context) == 1)
mock.assert_called_once_with({'retryCounter': 2})
mock_logger_error.assert_called_once_with('ValueError not in retryOn. Raising error and exiting retry.') |
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
(m, s) = (f['mu'][:], f['sigma'][:])
f.close()
else:
path = pathlib.Path(path)
files = (list(path.glob('*.jpg')) + list(path.glob('*.png')))
(m, s) = calculate_activation_statistics(files, model, batch_size, dims, cuda)
return (m, s) |
def export_chunks_from_ultrastar_data(audio_filename: str, ultrastar_data: UltrastarTxtValue, folder_name: str) -> None:
print(f'{ULTRASINGER_HEAD} Export Ultrastar data as vocal chunks wav files')
create_folder(folder_name)
wave_file = wave.open(audio_filename, 'rb')
(sample_rate, n_channels) = (wave_file.getparams()[2], wave_file.getparams()[0])
for (i, word) in enumerate(ultrastar_data.words):
start_time = get_start_time_from_ultrastar(ultrastar_data, i)
end_time = get_end_time_from_ultrastar(ultrastar_data, i)
start_byte = int(((start_time * sample_rate) * n_channels))
end_byte = int(((end_time * sample_rate) * n_channels))
chunk = get_chunk(end_byte, start_byte, wave_file)
export_chunk_to_wav_file(chunk, folder_name, i, word, wave_file) |
def validate_metrics_list(metrics_list):
metric_names = [metric.get_name() for metric in metrics_list]
if (len(metric_names) != len(set(metric_names))):
raise TrackEvalException('Code being run with multiple metrics of the same name')
fields = []
for m in metrics_list:
fields += m.fields
if (len(fields) != len(set(fields))):
raise TrackEvalException('Code being run with multiple metrics with fields of the same name')
return metric_names |
def get_padding_shape(filter_shape, stride):
def _pad_top_bottom(filter_dim, stride_val):
pad_along = max((filter_dim - stride_val), 0)
pad_top = (pad_along // 2)
pad_bottom = (pad_along - pad_top)
return (pad_top, pad_bottom)
padding_shape = []
for (filter_dim, stride_val) in zip(filter_shape, stride):
(pad_top, pad_bottom) = _pad_top_bottom(filter_dim, stride_val)
padding_shape.append(pad_top)
padding_shape.append(pad_bottom)
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape) |
def get_all_pods(label_selector=None):
pods = []
if label_selector:
ret = cli.list_pod_for_all_namespaces(pretty=True, label_selector=label_selector)
else:
ret = cli.list_pod_for_all_namespaces(pretty=True)
for pod in ret.items:
pods.append([pod.metadata.name, pod.metadata.namespace])
return pods |
class InlineQueryResultAudio(InlineQueryResult):
def __init__(self, audio_url: str, title: str, id: str=None, performer: str='', audio_duration: int=0, caption: str='', parse_mode: Optional['enums.ParseMode']=None, caption_entities: List['types.MessageEntity']=None, reply_markup: 'types.InlineKeyboardMarkup'=None, input_message_content: 'types.InputMessageContent'=None):
super().__init__('audio', id, input_message_content, reply_markup)
self.audio_url = audio_url
self.title = title
self.performer = performer
self.audio_duration = audio_duration
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
async def write(self, client: 'pyrogram.Client'):
audio = raw.types.InputWebDocument(url=self.audio_url, size=0, mime_type='audio/mpeg', attributes=[raw.types.DocumentAttributeAudio(duration=self.audio_duration, title=self.title, performer=self.performer)])
(message, entities) = (await utils.parse_text_entities(client, self.caption, self.parse_mode, self.caption_entities)).values()
return raw.types.InputBotInlineResult(id=self.id, type=self.type, title=self.title, content=audio, send_message=((await self.input_message_content.write(client, self.reply_markup)) if self.input_message_content else raw.types.InputBotInlineMessageMediaAuto(reply_markup=((await self.reply_markup.write(client)) if self.reply_markup else None), message=message, entities=entities))) |
class TestNormalization():
def test_normalize():
with pytest.raises(TypeError):
Normalize(dict(mean=[123.675, 116.28, 103.53]), [58.395, 57.12, 57.375])
with pytest.raises(TypeError):
Normalize([123.675, 116.28, 103.53], dict(std=[58.395, 57.12, 57.375]))
target_keys = ['imgs', 'img_norm_cfg', 'modality']
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
check_normalize(imgs, normalize_results['imgs'], normalize_results['img_norm_cfg'])
imgs = list(np.random.rand(4, 240, 320).astype(np.float32))
results = dict(imgs=imgs, modality='Flow')
config = dict(mean=[128, 128], std=[128, 128])
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
assert (normalize_results['imgs'].shape == (2, 240, 320, 2))
x_components = np.array(imgs[0::2])
y_components = np.array(imgs[1::2])
x_components = ((x_components - config['mean'][0]) / config['std'][0])
y_components = ((y_components - config['mean'][1]) / config['std'][1])
result_imgs = np.stack([x_components, y_components], axis=(- 1))
assert np.all(np.isclose(result_imgs, normalize_results['imgs']))
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=True)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
check_normalize(imgs, normalize_results['imgs'], normalize_results['img_norm_cfg'])
assert (normalize.__repr__() == ((normalize.__class__.__name__ + f'(mean={np.array([123.675, 116.28, 103.53])}, ') + f'std={np.array([58.395, 57.12, 57.375])}, to_bgr={True}, adjust_magnitude={False})')) |
class ThresholdValueTest(unittest.TestCase):
_grad()
def _test_accuracy_helper(self, labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor, expected_accuracy: torch.Tensor, threshold: float) -> None:
num_task = labels.shape[0]
batch_size = labels.shape[0]
task_list = []
inputs: Dict[(str, Union[(Dict[(str, torch.Tensor)], torch.Tensor)])] = {'predictions': {}, 'labels': {}, 'weights': {}}
for i in range(num_task):
task_info = RecTaskInfo(name=f'Task:{i}', label_name='label', prediction_name='prediction', weight_name='weight')
task_list.append(task_info)
inputs['predictions'][task_info.name] = predictions[i]
inputs['labels'][task_info.name] = labels[i]
inputs['weights'][task_info.name] = weights[i]
accuracy = AccuracyMetric(world_size=1, my_rank=0, batch_size=batch_size, tasks=task_list, threshold=threshold)
accuracy.update(**inputs)
actual_accuracy = accuracy.compute()
for (task_id, task) in enumerate(task_list):
cur_actual_accuracy = actual_accuracy[f'accuracy-{task.name}|window_accuracy']
cur_expected_accuracy = expected_accuracy[task_id].unsqueeze(dim=0)
torch.testing.assert_close(cur_actual_accuracy, cur_expected_accuracy, atol=0.0001, rtol=0.0001, check_dtype=False, msg=f'Actual: {cur_actual_accuracy}, Expected: {cur_expected_accuracy}')
def test_accuracy(self) -> None:
test_data = generate_model_outputs_cases()
for inputs in test_data:
try:
self._test_accuracy_helper(**inputs)
except AssertionError:
print('Assertion error caught with data set ', inputs)
raise |
class WithDescriptors(Serialisable):
descriptor = Nested(expected_type=str)
set_tuple = NestedSet(values=('a', 1, 0.0))
set_list = NestedSet(values=['a', 1, 0.0])
set_tuple_none = NestedSet(values=('a', 1, 0.0, None))
noneset_tuple = NestedNoneSet(values=('a', 1, 0.0))
noneset_list = NestedNoneSet(values=['a', 1, 0.0])
convertible_default = NestedValue(expected_type=int)
convertible_not_none = NestedValue(expected_type=int, allow_none=False)
convertible_none = NestedValue(expected_type=int, allow_none=True)
text_default = NestedText(expected_type=str)
text_str_not_none = NestedText(expected_type=str, allow_none=False)
text_str_none = NestedText(expected_type=str, allow_none=True)
text_int_not_none = NestedText(expected_type=int, allow_none=False)
text_int_none = NestedText(expected_type=int, allow_none=True)
minmax_default = NestedMinMax(min=0, max=0)
minmax_float = NestedMinMax(min=0, max=0, expected_type=float, allow_none=False)
minmax_float_none = NestedMinMax(min=0, max=0, expected_type=float, allow_none=True)
minmax_int = NestedMinMax(min=0.0, max=0.0, expected_type=int, allow_none=False)
minmax_int_none = NestedMinMax(min=0.0, max=0.0, expected_type=int, allow_none=True)
bool_default = NestedBool()
bool_not_none = NestedBool(allow_none=False)
bool_none = NestedBool(allow_none=True)
emptytag_default = EmptyTag()
emptytag_not_none = EmptyTag(allow_none=False)
emptytag_none = EmptyTag(allow_none=True)
string_default = NestedString()
string_not_none = NestedString(allow_none=False)
string_none = NestedString(allow_none=True)
float_default = NestedFloat()
float_not_none = NestedFloat(allow_none=False)
float_none = NestedFloat(allow_none=True)
integer_default = NestedInteger()
integer_not_none = NestedInteger(allow_none=False)
integer_none = NestedInteger(allow_none=True)
assert_type(descriptor, Nested[str])
assert_type(set_tuple, NestedSet[Union[(Literal[('a', 1)], float)]])
assert_type(set_list, NestedSet[Union[(str, int, float)]])
assert_type(set_tuple_none, NestedSet[Union[(Literal[('a', 1, None)], float)]])
assert_type(noneset_tuple, NestedNoneSet[Union[(Literal[('a', 1)], float)]])
assert_type(noneset_list, NestedNoneSet[Union[(str, float)]])
assert_type(convertible_default, NestedValue[(int, Literal[False])])
assert_type(convertible_not_none, NestedValue[(int, Literal[False])])
assert_type(convertible_none, NestedValue[(int, Literal[True])])
assert_type(text_default, NestedText[(str, Literal[False])])
assert_type(text_str_not_none, NestedText[(str, Literal[False])])
assert_type(text_str_none, NestedText[(str, Literal[True])])
assert_type(text_int_not_none, NestedText[(int, Literal[False])])
assert_type(text_int_none, NestedText[(int, Literal[True])])
assert_type(minmax_default, NestedMinMax[(float, Literal[False])])
assert_type(minmax_float, NestedMinMax[(float, Literal[False])])
assert_type(minmax_float_none, NestedMinMax[(float, Literal[True])])
assert_type(minmax_int, NestedMinMax[(int, Literal[False])])
assert_type(minmax_int_none, NestedMinMax[(int, Literal[True])])
assert_type(bool_default, NestedBool[Literal[False]])
assert_type(bool_not_none, NestedBool[Literal[False]])
assert_type(bool_none, NestedBool[Literal[True]])
assert_type(emptytag_default, EmptyTag[Literal[False]])
assert_type(emptytag_not_none, EmptyTag[Literal[False]])
assert_type(emptytag_none, EmptyTag[Literal[True]])
assert_type(string_default, NestedString[Literal[False]])
assert_type(string_not_none, NestedString[Literal[False]])
assert_type(string_none, NestedString[Literal[True]])
assert_type(float_default, NestedFloat[Literal[False]])
assert_type(float_not_none, NestedFloat[Literal[False]])
assert_type(float_none, NestedFloat[Literal[True]])
assert_type(integer_default, NestedInteger[Literal[False]])
assert_type(integer_not_none, NestedInteger[Literal[False]])
assert_type(integer_none, NestedInteger[Literal[True]]) |
_ordering
class Condition():
def __init__(self, operator):
self.operator = operator
assert isinstance(operator, TypeConditionOperator), (('the operator ' + str(operator)) + ' should be of type TypeConditionOperator')
def _key(self):
return (str(type(self)), str(self.operator))
def __hash__(self):
return hash(self._key())
def __eq__(self, other):
return ((type(self) == type(other)) and (self._key() == other._key()))
def __lt__(self, other):
return (True if (isinstance(other, (int, str)) or (other == ANY)) else (self._key() < other._key()))
def build_condition(condition):
if (condition is None):
return None
condition = (tuple(condition) if isinstance(condition, list) else condition)
assert (isinstance(condition, tuple) and (len(condition) == 2)), 'a condition must a pair, given as a tuple (or a list)'
operator = (TypeConditionOperator.value_of(condition[0]) if isinstance(condition[0], str) else condition[0])
right_operand = (list(condition[1]) if isinstance(condition[1], (set, frozenset, GeneratorType)) else condition[1])
checkType(right_operand, (int, Variable, range, [int, Variable]))
if (isinstance(right_operand, range) and (right_operand.step != 1)):
right_operand = list(right_operand)
if isinstance(right_operand, int):
return ConditionValue(operator, right_operand)
if isinstance(right_operand, Variable):
return ConditionVariable(operator, right_operand)
if isinstance(right_operand, range):
return ConditionInterval(operator, right_operand.start, (right_operand.stop - 1))
if isinstance(right_operand, list):
return ConditionSet(operator, right_operand)
def filtering(self, values):
pass
def str_tuple(self):
pass
def right_operand(self):
pass
def infix_string(self):
return ((self.operator.to_str() + ' ') + str(self.right_operand()))
def __str__(self):
return (((('(' + str(self.operator)) + ',') + str(self.right_operand())) + ')') |
class EmpiricalAPSParams(KiteParameterGroup):
def __init__(self, model, **kwargs):
scene = model.getScene()
kwargs['type'] = 'group'
kwargs['name'] = 'Scene.APS (empirical)'
KiteParameterGroup.__init__(self, model=model, model_attr='scene', **kwargs)
p = {'name': 'applied', 'type': 'bool', 'value': scene.aps.config.applied, 'tip': 'detrend the scene'}
self.applied = pTypes.SimpleParameter(**p)
def toggle_applied(param, checked):
self.model.getScene().aps.set_enabled(checked)
self.applied.sigValueChanged.connect(toggle_applied)
self.pushChild(self.applied) |
class Lookahead(nn.Module):
def __init__(self, n_features, context):
super(Lookahead, self).__init__()
self.n_features = n_features
self.weight = Parameter(torch.Tensor(n_features, (context + 1)))
assert (context > 0)
self.context = context
self.register_parameter('bias', None)
self.init_parameters()
def init_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
def forward(self, input):
seq_len = input.size(0)
padding = torch.zeros(self.context, *input.size()[1:]).type_as(input.data)
x = torch.cat((input, Variable(padding)), 0)
x = [x[i:((i + self.context) + 1)] for i in range(seq_len)]
x = torch.stack(x)
x = x.permute(0, 2, 3, 1)
x = torch.mul(x, self.weight).sum(dim=3)
return x
def __repr__(self):
return ((((((self.__class__.__name__ + '(') + 'n_features=') + str(self.n_features)) + ', context=') + str(self.context)) + ')') |
class IntelHex16bit(IntelHex):
def __init__(self, source=None):
if isinstance(source, IntelHex):
self.padding = source.padding
self.start_addr = source.start_addr
self._buf = source._buf
self._offset = source._offset
elif isinstance(source, dict):
raise IntelHexError('IntelHex16bit does not support initialization from dictionary yet.\nPatches are welcome.')
else:
IntelHex.__init__(self, source)
if (self.padding == 255):
self.padding = 65535
def __getitem__(self, addr16):
addr1 = (addr16 * 2)
addr2 = (addr1 + 1)
byte1 = self._buf.get(addr1, None)
byte2 = self._buf.get(addr2, None)
if ((byte1 != None) and (byte2 != None)):
return (byte1 | (byte2 << 8))
if ((byte1 == None) and (byte2 == None)):
return self.padding
raise BadAccess16bit(address=addr16)
def __setitem__(self, addr16, word):
addr_byte = (addr16 * 2)
b = divmod(word, 256)
self._buf[addr_byte] = b[1]
self._buf[(addr_byte + 1)] = b[0]
def minaddr(self):
aa = dict_keys(self._buf)
if (aa == []):
return 0
else:
return (min(aa) >> 1)
def maxaddr(self):
aa = dict_keys(self._buf)
if (aa == []):
return 0
else:
return (max(aa) >> 1)
def tobinarray(self, start=None, end=None, size=None):
bin = array('H')
if ((self._buf == {}) and (None in (start, end))):
return bin
if ((size is not None) and (size <= 0)):
raise ValueError('tobinarray: wrong value for size')
(start, end) = self._get_start_end(start, end, size)
for addr in range_g(start, (end + 1)):
bin.append(self[addr])
return bin |
def test_get_index(textpage):
(x, y) = (60, (textpage.page.get_height() - 66))
index = textpage.get_index(x, y, 5, 5)
assert ((index < textpage.count_chars()) and (index == 0))
charbox = textpage.get_charbox(index)
char = textpage.get_text_bounded(*charbox)
assert (char == 'L') |
class OutputList(RecycleView):
def __init__(self, **kwargs):
super(OutputList, self).__init__(**kwargs)
self.app = App.get_running_app()
def update(self, outputs: Sequence['TxOutput']):
res = []
for o in outputs:
value = self.app.format_amount_and_units(o.value)
res.append({'address': o.get_ui_address_str(), 'value': value})
self.data = res |
class MultiplyResponse(FrequencyResponse):
responses = List.T(FrequencyResponse.T())
def __init__(self, responses=None, **kwargs):
if (responses is None):
responses = []
FrequencyResponse.__init__(self, responses=responses, **kwargs)
def get_fmax(self):
fmaxs = [resp.get_fmax() for resp in self.responses]
fmaxs = [fmax for fmax in fmaxs if (fmax is not None)]
if (not fmaxs):
return None
else:
return min(fmaxs)
def evaluate(self, freqs):
a = num.ones(freqs.size, dtype=complex)
for resp in self.responses:
a *= resp.evaluate(freqs)
return a
def is_scalar(self):
return all((resp.is_scalar() for resp in self.responses))
def get_scalar(self):
if self.is_scalar():
return num.prod((resp.get_scalar() for resp in self.responses))
else:
raise IsNotScalar()
def simplify(self):
self.responses = simplify_responses(self.responses)
def construction(self):
breakpoints = []
for resp in self.responses:
breakpoints.extend(resp.construction())
return finalize_construction(breakpoints)
def summary(self):
if self.is_scalar():
return str_gain(self.get_scalar())
else:
xs = [x.summary for x in self.responses]
return ('(%s)' % ('*'.join((x for x in xs if (x != 'one'))) or 'one')) |
def to_vertex_format(format):
if (format in wgpu.VertexFormat):
return format
elif (format in wgpu.IndexFormat):
return format
primitives = {'i1': 'sint8', 'u1': 'uint8', 'i2': 'sint16', 'u2': 'uint16', 'i4': 'sint32', 'u4': 'uint32', 'f2': 'float16', 'f4': 'float32'}
primitive = primitives[format[(- 2):]]
if (len(format) == 2):
return primitive
elif ((len(format) == 4) and (format[1] == 'x')):
if (format[0] == '1'):
return primitive
elif (format[0] in '234'):
return ((primitive + 'x') + str(format[0]))
raise ValueError(f"Unexpected tuple size in index/vertex format '{format}'")
else:
raise ValueError(f"Unexpected length of index/vertex format '{format}'") |
def rebuild_open(img: np.ndarray, kernel: np.ndarray, erode_time: int=1) -> np.ndarray:
temp_img = img.copy()
for i in range(erode_time):
temp_img = cv2.erode(temp_img, kernel)
''
dialate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
last_img = temp_img.copy()
while True:
current_img = cv2.dilate(last_img, dialate_kernel)
current_img = np.logical_and((current_img == 1), (current_img == img))
current_img = current_img.astype(float)
if (current_img == last_img).all():
break
else:
last_img = current_img
return current_img |
class TestContextManagerFixtureFuncs():
def test_simple(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n \n def arg1():\n print("setup")\n yield 1\n print("teardown")\n def test_1(arg1):\n print("test1", arg1)\n def test_2(arg1):\n print("test2", arg1)\n assert 0\n ')
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines('\n *setup*\n *test1 1*\n *teardown*\n *setup*\n *test2 1*\n *teardown*\n ')
def test_scoped(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n (scope="module")\n def arg1():\n print("setup")\n yield 1\n print("teardown")\n def test_1(arg1):\n print("test1", arg1)\n def test_2(arg1):\n print("test2", arg1)\n ')
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines('\n *setup*\n *test1 1*\n *test2 1*\n *teardown*\n ')
def test_setup_exception(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n (scope="module")\n def arg1():\n pytest.fail("setup")\n yield 1\n def test_1(arg1):\n pass\n ')
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines('\n *pytest.fail*setup*\n *1 error*\n ')
def test_teardown_exception(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n (scope="module")\n def arg1():\n yield 1\n pytest.fail("teardown")\n def test_1(arg1):\n pass\n ')
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines('\n *pytest.fail*teardown*\n *1 passed*1 error*\n ')
def test_yields_more_than_one(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n (scope="module")\n def arg1():\n yield 1\n yield 2\n def test_1(arg1):\n pass\n ')
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines('\n *fixture function*\n *test_yields*:2*\n ')
def test_custom_name(self, pytester: Pytester) -> None:
pytester.makepyfile("\n import pytest\n (name='meow')\n def arg1():\n return 'mew'\n def test_1(meow):\n print(meow)\n ")
result = pytester.runpytest('-s')
result.stdout.fnmatch_lines(['*mew*']) |
_environment_variables(model=MyHandlerEnvVars)
def my_handler(event: dict[(str, Any)], context: LambdaContext) -> dict[(str, Any)]:
env_vars: MyHandlerEnvVars = get_environment_variables(model=MyHandlerEnvVars)
return {'statusCode': HTTPStatus.OK, 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({'message': 'success'})} |
def fast_inplace_check(fgraph, inputs):
Supervisor = pytensor.compile.function.types.Supervisor
protected_inputs = [f.protected for f in fgraph._features if isinstance(f, Supervisor)]
protected_inputs = sum(protected_inputs, [])
protected_inputs.extend(fgraph.outputs)
inputs = [i for i in inputs if ((not isinstance(i, Constant)) and (not fgraph.has_destroyers([i])) and (i not in protected_inputs))]
return inputs |
.end_to_end()
.skipif((not IS_PEXPECT_INSTALLED), reason='pexpect is not installed.')
.skipif((sys.platform == 'win32'), reason='pexpect cannot spawn on Windows.')
def test_trace(tmp_path):
source = '\n def task_example():\n i = \n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
child = pexpect.spawn(f'pytask --trace {tmp_path.as_posix()}')
child.expect('Pdb')
child.sendline('n;; p i;; p i + 1;; p i + 2;; continue')
rest = child.read().decode('utf-8')
assert all(((str(i) in rest) for i in (, , )))
_flush(child) |
def data_loader(args, test_path=False, segmentation=False):
mean_vals = [0.485, 0.456, 0.406]
std_vals = [0.229, 0.224, 0.225]
input_size = (int(args.input_size), int(args.input_size))
crop_size = (int(args.crop_size), int(args.crop_size))
tsfm_train = transforms.Compose([transforms.Resize(input_size), transforms.RandomCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean_vals, std_vals)])
if (args.tencrop == 'True'):
func_transforms = [transforms.Resize(input_size), transforms.TenCrop(crop_size), transforms.Lambda((lambda crops: torch.stack([transforms.Normalize(mean_vals, std_vals)(transforms.ToTensor()(crop)) for crop in crops])))]
else:
func_transforms = [transforms.Resize(crop_size), transforms.CenterCrop(crop_size), transforms.ToTensor(), transforms.Normalize(mean_vals, std_vals)]
tsfm_clstest = transforms.Compose(func_transforms)
tsfm_loctest = transforms.Compose([transforms.Resize(crop_size), transforms.ToTensor(), transforms.Normalize(mean_vals, std_vals)])
img_train = my_dataset(args.train_list, root_dir=args.img_dir, transform=tsfm_train, with_path=True, num_classes=200, datalist_file_root=args.train_root_list, datalist_file_parent=args.train_parent_list)
img_clstest = my_dataset(args.test_list, root_dir=args.img_dir, transform=tsfm_clstest, with_path=test_path, num_classes=200)
img_loctest = my_dataset(args.test_list, root_dir=args.img_dir, transform=tsfm_loctest, with_path=test_path, num_classes=200)
train_loader = DataLoader(img_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valcls_loader = DataLoader(img_clstest, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
valloc_loader = DataLoader(img_loctest, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
return (train_loader, valcls_loader, valloc_loader) |
def camel_to_snake(naming, name):
if naming.prefix:
assert name.startswith(naming.prefix)
name = name[len(naming.prefix):]
if naming.suffix:
assert name.endswith(naming.suffix)
name = name[:(- len(naming.suffix))]
return re.sub('(?<!^)(?=[A-Z])', '_', name).lower() |
def glc_to_material(l, item=None):
line = l.strip().split()
name = line.pop(0)
nd = sfloat(line.pop(0))
vd = sfloat(line.pop(0))
density = sfloat(line.pop(0))
del line[:6]
del line[:2]
(a, num) = (sint(line.pop(0)), sint(line.pop(0)))
coeff = np.array([sfloat(_) for _ in line[:num]])
del line[:num]
try:
typ = 'schott sellmeier_squared_transposed conrady unknown unknown hikari'.split()[(a - 1)]
except IndexError:
typ = 'unknown'
mat = CoefficientsMaterial(name=name, coefficients=coeff, typ=typ)
mat.density = density
return mat
if (not line):
return mat
(a, num) = (sint(line.pop(0)), sint(line.pop(0)))
if (a != 1):
del line[:num]
(a, num) = (sint(line.pop(0)), sint(line.pop(0)))
assert (a == 1), l
num *= 2
del line[:num]
return mat |
class Ametek7270(Instrument):
SENSITIVITIES = [0.0, 2e-09, 5e-09, 1e-08, 2e-08, 5e-08, 1e-07, 2e-07, 5e-07, 1e-06, 2e-06, 5e-06, 1e-05, 2e-05, 5e-05, 0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0]
SENSITIVITIES_IMODE = {0: SENSITIVITIES, 1: [(sen * 1e-06) for sen in SENSITIVITIES], 2: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2e-15, 5e-15, 1e-14, 2e-14, 5e-14, 1e-13, 2e-13, 5e-13, 1e-12, 2e-12]}
TIME_CONSTANTS = [1e-05, 2e-05, 5e-05, 0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0]
sensitivity = Instrument.control('SEN', 'SEN %d', ' A floating point property that controls the sensitivity\n range in Volts, which can take discrete values from 2 nV to\n 1 V. This property can be set. ', validator=truncated_discrete_set, values=SENSITIVITIES, map_values=True, check_set_errors=True, dynamic=True)
slope = Instrument.control('SLOPE', 'SLOPE %d', ' A integer property that controls the filter slope in\n dB/octave, which can take the values 6, 12, 18, or 24 dB/octave.\n This property can be set. ', validator=truncated_discrete_set, values=[6, 12, 18, 24], map_values=True, check_set_errors=True)
time_constant = Instrument.control('TC', 'TC %d', ' A floating point property that controls the time constant\n in seconds, which takes values from 10 microseconds to 100,000\n seconds. This property can be set. ', validator=truncated_discrete_set, values=TIME_CONSTANTS, map_values=True, check_set_errors=True)
x = Instrument.measurement('X.', ' Reads the X value in Volts ', get_process=check_read_not_empty)
y = Instrument.measurement('Y.', ' Reads the Y value in Volts ', get_process=check_read_not_empty)
x1 = Instrument.measurement('X1.', ' Reads the first harmonic X value in Volts ', get_process=check_read_not_empty)
y1 = Instrument.measurement('Y1.', ' Reads the first harmonic Y value in Volts ', get_process=check_read_not_empty)
x2 = Instrument.measurement('X2.', ' Reads the second harmonic X value in Volts ', get_process=check_read_not_empty)
y2 = Instrument.measurement('Y2.', ' Reads the second harmonic Y value in Volts ', get_process=check_read_not_empty)
xy = Instrument.measurement('XY.', ' Reads both the X and Y values in Volts ', get_process=check_read_not_empty)
mag = Instrument.measurement('MAG.', ' Reads the magnitude in Volts ', get_process=check_read_not_empty)
theta = Instrument.measurement('PHA.', ' Reads the signal phase in degrees ', get_process=check_read_not_empty)
harmonic = Instrument.control('REFN', 'REFN %d', ' An integer property that represents the reference\n harmonic mode control, taking values from 1 to 127.\n This property can be set. ', validator=truncated_discrete_set, values=list(range(1, 128)), check_set_errors=True)
phase = Instrument.control('REFP.', 'REFP. %g', ' A floating point property that represents the reference\n harmonic phase in degrees. This property can be set. ', validator=modular_range, values=[0, 360], check_set_errors=True)
voltage = Instrument.control('OA.', 'OA. %g', ' A floating point property that represents the voltage\n in Volts. This property can be set. ', validator=truncated_range, values=[0, 5], check_set_errors=True)
frequency = Instrument.control('OF.', 'OF. %g', ' A floating point property that represents the lock-in\n frequency in Hz. This property can be set. ', validator=truncated_range, values=[0, 250000.0], check_set_errors=True)
dac1 = Instrument.control('DAC. 1', 'DAC. 1 %g', ' A floating point property that represents the output\n value on DAC1 in Volts. This property can be set. ', validator=truncated_range, values=[(- 10), 10], check_set_errors=True)
dac2 = Instrument.control('DAC. 2', 'DAC. 2 %g', ' A floating point property that represents the output\n value on DAC2 in Volts. This property can be set. ', validator=truncated_range, values=[(- 10), 10], check_set_errors=True)
dac3 = Instrument.control('DAC. 3', 'DAC. 3 %g', ' A floating point property that represents the output\n value on DAC3 in Volts. This property can be set. ', validator=truncated_range, values=[(- 10), 10], check_set_errors=True)
dac4 = Instrument.control('DAC. 4', 'DAC. 4 %g', ' A floating point property that represents the output\n value on DAC4 in Volts. This property can be set. ', validator=truncated_range, values=[(- 10), 10], check_set_errors=True)
adc1 = Instrument.measurement('ADC. 1', ' Reads the input value of ADC1 in Volts ', get_process=check_read_not_empty)
adc2 = Instrument.measurement('ADC. 2', ' Reads the input value of ADC2 in Volts ', get_process=check_read_not_empty)
adc3 = Instrument.measurement('ADC. 3', ' Reads the input value of ADC3 in Volts ', get_process=check_read_not_empty)
adc4 = Instrument.measurement('ADC. 4', ' Reads the input value of ADC4 in Volts ', get_process=check_read_not_empty)
def __init__(self, adapter, name='Ametek DSP 7270', read_termination='\x00', write_termination='\x00', **kwargs):
super().__init__(adapter, name, read_termination=read_termination, write_termination=write_termination, **kwargs)
def check_set_errors(self):
if (self.read() == ''):
return []
else:
return ['Incorrect return from previously set property']
def ask(self, command, query_delay=0):
return super().ask(command, query_delay).strip()
def set_reference_mode(self, mode: int=0):
if (mode not in [0, 1, 2]):
raise ValueError('Invalid reference mode')
self.ask(f'REFMODE {mode}')
def set_voltage_mode(self):
self.ask('IMODE 0')
self.sensitivity_values = self.SENSITIVITIES_IMODE[0]
def set_differential_mode(self, lineFiltering=True):
self.ask('VMODE 3')
self.ask((('LF %d 0' % 3) if lineFiltering else 0))
def set_current_mode(self, low_noise=False):
if low_noise:
self.ask('IMODE 2')
self.sensitivity_values = self.SENSITIVITIES_IMODE[2]
else:
self.ask('IMODE 1')
self.sensitivity_values = self.SENSITIVITIES_IMODE[1]
def set_channel_A_mode(self):
self.ask('VMODE 1')
def id(self):
return f"{self.ask('ID')}/{self.ask('VER')}"
def auto_gain(self):
return (int(self.ask('AUTOMATIC')) == 1)
_gain.setter
def auto_gain(self, setval):
if setval:
self.ask('AUTOMATIC 1')
else:
self.ask('AUTOMATIC 0')
def shutdown(self):
log.info(('Shutting down %s' % self.name))
self.voltage = 0.0
super().shutdown() |
def iter_benchmark(iterator, num_iter: int, warmup: int=5, max_time_seconds: float=60) -> Tuple[(float, List[float])]:
(num_iter, warmup) = (int(num_iter), int(warmup))
iterator = iter(iterator)
for _ in range(warmup):
next(iterator)
timer = Timer()
all_times = []
for curr_iter in tqdm.trange(num_iter):
start = timer.seconds()
if (start > max_time_seconds):
num_iter = curr_iter
break
next(iterator)
all_times.append((timer.seconds() - start))
avg = (timer.seconds() / num_iter)
return (avg, all_times) |
def has_node_changed(task: PTask, node: (PTask | PNode)) -> bool:
node_state = node.state()
if (node_state is None):
return True
with DatabaseSession() as session:
db_state = session.get(State, (task.signature, node.signature))
if (db_state is None):
return True
return (node_state != db_state.hash_) |
def main(unused_argv):
parse_cmdline_gin_configurations()
try:
checkpoint_to_reload = gin.query_parameter('LearnerConfig.checkpoint_for_eval')
except ValueError:
try:
checkpoint_to_reload = gin.query_parameter('LearnerConfig.pretrained_checkpoint')
except ValueError:
checkpoint_to_reload = None
if (checkpoint_to_reload and FLAGS.reload_checkpoint_gin_config):
reload_checkpoint_dir = os.path.dirname(checkpoint_to_reload)
load_operative_gin_configurations(reload_checkpoint_dir)
parse_cmdline_gin_configurations()
try:
learner_config = trainer.LearnerConfig()
(train_datasets, eval_datasets, restrict_classes, restrict_num_per_class) = trainer.get_datasets_and_restrictions()
train_learner = None
if (FLAGS.is_training or (FLAGS.eval_finegrainedness and (FLAGS.eval_finegrainedness_split == trainer.TRAIN_SPLIT))):
train_learner = trainer.NAME_TO_LEARNER[learner_config.train_learner]
eval_learner = trainer.NAME_TO_LEARNER[learner_config.eval_learner]
trainer_kwargs = {'train_learner': train_learner, 'eval_learner': eval_learner, 'is_training': FLAGS.is_training, 'train_dataset_list': train_datasets, 'eval_dataset_list': eval_datasets, 'restrict_classes': restrict_classes, 'restrict_num_per_class': restrict_num_per_class, 'checkpoint_dir': FLAGS.train_checkpoint_dir, 'summary_dir': FLAGS.summary_dir, 'records_root_dir': FLAGS.records_root_dir, 'eval_finegrainedness': FLAGS.eval_finegrainedness, 'eval_finegrainedness_split': FLAGS.eval_finegrainedness_split, 'eval_imbalance_dataset': FLAGS.eval_imbalance_dataset, 'omit_from_saving_and_reloading': FLAGS.omit_from_saving_and_reloading}
if learner_config.episodic:
trainer_instance = trainer.EpisodicTrainer(**trainer_kwargs)
if (learner_config.train_learner not in trainer.EPISODIC_LEARNER_NAMES):
raise ValueError('When "episodic" is True, "train_learner" should be an episodic one, among {}.'.format(trainer.EPISODIC_LEARNER_NAMES))
else:
trainer_instance = trainer.BatchTrainer(**trainer_kwargs)
if (learner_config.train_learner not in trainer.BATCH_LEARNER_NAMES):
raise ValueError('When "episodic" is False, "train_learner" should be a batch one, among {}.'.format(trainer.BATCH_LEARNER_NAMES))
except ValueError as e:
logging.info('Full Gin configurations:\n%s', gin.config_str())
raise e
logging.info('Operative Gin configurations:\n%s', gin.operative_config_str())
if (FLAGS.is_training and FLAGS.train_checkpoint_dir):
record_operative_gin_configurations(FLAGS.train_checkpoint_dir)
datasets = (train_datasets if FLAGS.is_training else eval_datasets)
logging.info('Starting %s for dataset(s) %s...', ('training' if FLAGS.is_training else 'evaluation'), datasets)
if FLAGS.is_training:
trainer_instance.train()
elif set(datasets).intersection(trainer.DATASETS_WITH_EXAMPLE_SPLITS):
if (not data.POOL_SUPPORTED):
raise NotImplementedError('Example-level splits or pools not supported.')
else:
if (len(datasets) != 1):
raise ValueError('Requested datasets {} for evaluation, but evaluation should be performed on individual datasets only.'.format(datasets))
eval_split = trainer.TEST_SPLIT
if FLAGS.eval_finegrainedness:
eval_split = FLAGS.eval_finegrainedness_split
trainer_instance.evaluate(eval_split)
if trainer_instance.summary_writer:
trainer_instance.summary_writer.close() |
class TestDetermineAttribEqOrder():
def test_default(self):
assert ((42, None, 42, None) == _determine_attrib_eq_order(None, None, None, 42))
def test_eq_callable_order_boolean(self):
assert ((True, str.lower, False, None) == _determine_attrib_eq_order(None, str.lower, False, True))
def test_eq_callable_order_callable(self):
assert ((True, str.lower, True, abs) == _determine_attrib_eq_order(None, str.lower, abs, True))
def test_eq_boolean_order_callable(self):
assert ((True, None, True, str.lower) == _determine_attrib_eq_order(None, True, str.lower, True))
.parametrize('eq', [True, False])
def test_order_mirrors_eq_by_default(self, eq):
assert ((eq, None, eq, None) == _determine_attrib_eq_order(None, eq, None, True))
def test_order_without_eq(self):
with pytest.raises(ValueError, match='`order` can only be True if `eq` is True too.'):
_determine_attrib_eq_order(None, False, True, True)
(cmp=booleans(), eq=optional_bool, order=optional_bool)
def test_mix(self, cmp, eq, order):
assume(((eq is not None) or (order is not None)))
with pytest.raises(ValueError, match="Don't mix `cmp` with `eq' and `order`."):
_determine_attrib_eq_order(cmp, eq, order, True) |
class NELDER_MEAD(Optimizer):
_OPTIONS = ['maxiter', 'maxfev', 'disp', 'xatol', 'adaptive']
def __init__(self, maxiter: Optional[int]=None, maxfev: int=1000, disp: bool=False, xatol: float=0.0001, tol: Optional[float]=None, adaptive: bool=False) -> None:
super().__init__()
for (k, v) in list(locals().items()):
if (k in self._OPTIONS):
self._options[k] = v
self._tol = tol
def get_support_level(self):
return {'gradient': OptimizerSupportLevel.ignored, 'bounds': OptimizerSupportLevel.ignored, 'initial_point': OptimizerSupportLevel.required}
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
res = minimize(objective_function, initial_point, tol=self._tol, method='Nelder-Mead', options=self._options)
return (res.x, res.fun, res.nfev) |
def start_tunnel(dsz_cmd=None):
assert verify_dsz_cmd_redirect_object(dsz_cmd), 'Given dsz_cmd must be a valid ops.cmd object for the redirect plugin.'
tunnel = verify_tunnel(id=None, dsz_cmd=dsz_cmd, return_status=False)
if (tunnel is not False):
return int(tunnel.id)
redir_obj = dsz_cmd.execute()
if (dsz_cmd.success == 1):
return int(redir_obj.cmdid)
elif (dsz_cmd.success == 0):
return get_tunnel_failure_information(id=redir_obj.cmdid)
else:
return False |
class TestPairingWithSymmetries(unittest.TestCase):
def test_two_fermions(self):
bins = [[1], [2], [3], [4]]
count = 0
for pairing in pair_within_simultaneously_binned(bins):
count += 1
self.assertEqual(len(pairing), 2)
print(pairing)
self.assertEqual(count, 1)
count = 0
for pairing in pair_within_simultaneously_symmetric(2, 2):
count += 1
self.assertEqual(len(pairing), 2)
self.assertEqual(count, 1)
count = 0
for pairing in pair_within_simultaneously_symmetric(2, 1):
count += 1
self.assertEqual(len(pairing), 2)
self.assertEqual(count, 1)
count = 0
for pairing in pair_within_simultaneously_symmetric(2, 0):
count += 1
self.assertEqual(len(pairing), 2)
self.assertEqual(count, 1)
def test_four_fermions(self):
print('Trying with 0 symmetries')
count = 0
for pairing in pair_within_simultaneously_symmetric(4, 0):
print(pairing)
count += 1
self.assertEqual(len(pairing), 4)
self.assertEqual(count, 18)
print('Trying with 1 symmetry')
count = 0
for pairing in pair_within_simultaneously_symmetric(4, 1):
print(pairing)
count += 1
self.assertEqual(len(pairing), 4)
self.assertEqual(count, 10)
print('Trying with 2 symmetries')
count = 0
for pairing in pair_within_simultaneously_symmetric(4, 2):
print(pairing)
count += 1
self.assertEqual(len(pairing), 4)
self.assertEqual(count, 5)
print('Trying with 3 symmetries')
count = 0
for pairing in pair_within_simultaneously_symmetric(4, 3):
print(pairing)
count += 1
self.assertEqual(len(pairing), 4)
self.assertEqual(count, 3)
def test_four_symmetries(self):
for num_fermions in [5, 8, 9]:
for _ in pair_within_simultaneously_symmetric(num_fermions, 3):
pass |
def optimize(struc, ff, optimizations=['conp', 'conp'], exe='gulp', pstress=None, path='tmp', label='_', clean=True, adjust=False):
time_total = 0
for opt in optimizations:
(struc, energy, time, error) = single_optimize(struc, ff, pstress=pstress, opt=opt, exe=exe, path=path, label=label, clean=clean)
time_total += time
if error:
return (None, None, 0, True)
elif (adjust and (abs(energy) < 1e-08)):
matrix = struc.lattice.matrix
struc.lattice.set_matrix((matrix * 0.8))
return (struc, energy, time_total, False) |
class EGICheckinOpenIdConnect(OpenIdConnectAuth):
name = 'egi-checkin'
CHECKIN_ENV = 'prod'
USERNAME_KEY = 'voperson_id'
EXTRA_DATA = [('expires_in', 'expires_in', True), ('refresh_token', 'refresh_token', True), ('id_token', 'id_token', True)]
DEFAULT_SCOPE = ['openid', 'profile', 'email', 'voperson_id', 'eduperson_entitlement', 'offline_access']
ALLOWED_ENTITLEMENTS = []
def oidc_endpoint(self):
endpoint = self.setting('OIDC_ENDPOINT', self.OIDC_ENDPOINT)
if endpoint:
return endpoint
checkin_env = self.setting('CHECKIN_ENV', self.CHECKIN_ENV)
return CHECKIN_ENV_ENDPOINTS.get(checkin_env, '')
def get_user_details(self, response):
username_key = self.setting('USERNAME_KEY', default=self.USERNAME_KEY)
(fullname, first_name, last_name) = self.get_user_names((response.get('name') or ''), (response.get('given_name') or ''), (response.get('family_name') or ''))
return {'username': response.get(username_key), 'email': response.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def entitlement_allowed(self, user_entitlements):
allowed = True
allowed_ent = self.setting('ALLOWED_ENTITLEMENTS', self.ALLOWED_ENTITLEMENTS)
if allowed_ent:
allowed = any(((e in user_entitlements) for e in allowed_ent))
return allowed
def auth_allowed(self, response, details):
allowed = super().auth_allowed(response, details)
if allowed:
user_entitlements = (response.get('eduperson_entitlement') or [])
allowed = self.entitlement_allowed(user_entitlements)
return allowed |
class PFS_No_Ksappend(ParserTest):
def __init__(self, *args, **kwargs):
ParserTest.__init__(self, *args, **kwargs)
self.ks = '\nlang en_US\nkeyboard us\nautopart\n'
def setUp(self):
ParserTest.setUp(self)
self._path = None
def runTest(self):
self._path = preprocessFromString(self.ks)
with open(self._path) as f:
self.assertEqual(f.read(), self.ks)
def tearDown(self):
ParserTest.tearDown(self)
if self._path:
os.unlink(self._path) |
class ScanInplaceOptimizer(GraphRewriter):
alloc_ops = (Alloc, AllocEmpty)
def add_requirements(self, fgraph):
fgraph.attach_feature(ReplaceValidate())
fgraph.attach_feature(DestroyHandler())
def attempt_scan_inplace(self, fgraph: FunctionGraph, node: Apply[Scan], output_indices: list[int]) -> Optional[Apply]:
op = node.op
ls_begin = node.inputs[:(1 + op.info.n_seqs)]
ls = op.outer_mitmot(node.inputs)
ls += op.outer_mitsot(node.inputs)
ls += op.outer_sitsot(node.inputs)
ls_end = op.outer_shared(node.inputs)
ls_end += op.outer_nitsot(node.inputs)
ls_end += op.outer_non_seqs(node.inputs)
for i in range(len(ls)):
inp = ls[i]
if ((len(fgraph.clients[inp]) > 1) and inp.owner and isinstance(inp.owner.op, self.alloc_ops)):
new_lsi = inp.owner.op.make_node(*inp.owner.inputs)
if (config.compute_test_value != 'off'):
compute_test_value(new_lsi)
new_lsi_out = new_lsi.outputs
if (len(new_lsi_out) == 1):
new_lsi_out = new_lsi_out[0]
ls[i] = new_lsi_out
n_outs = len(ls)
for idx in range(n_outs):
if (ls[idx] in ls[:idx]):
ls[idx] = deep_copy_op(ls[idx])
inputs = ((ls_begin + ls) + ls_end)
new_op = op.clone()
destroy_map = op.destroy_map.copy()
for out_idx in output_indices:
destroy_map[out_idx] = [((out_idx + 1) + op.info.n_seqs)]
new_op.destroy_map = destroy_map
new_outs = new_op(*inputs, return_list=True)
assert isinstance(new_outs, list)
try:
fgraph.replace_all_validate_remove(list(zip(node.outputs, new_outs)), remove=[node], reason='scan_make_inplace')
return cast(Apply[Scan], new_outs[0].owner)
except InconsistencyError:
return None
def apply(self, fgraph):
for (scan_idx, original_node) in enumerate(reversed(fgraph.toposort())):
if (not isinstance(original_node.op, Scan)):
continue
op = original_node.op
n_outs = ((op.info.n_mit_mot + op.info.n_mit_sot) + op.info.n_sit_sot)
out_indices = []
for out_idx in range(n_outs):
inp_idx = ((1 + op.info.n_seqs) + out_idx)
inp = original_node.inputs[inp_idx]
if (inp.owner and isinstance(inp.owner.op, self.alloc_ops)):
out_indices.append(out_idx)
continue
input_used_inplace = False
for c in fgraph.clients[original_node.inputs[inp_idx]]:
client = c[0]
if client.op.destroy_map:
inplace_inp_indices = sum(client.op.destroy_map.values(), [])
inplace_inps = [client.inputs[i] for i in inplace_inp_indices]
if (original_node.inputs[inp_idx] in inplace_inps):
input_used_inplace = True
break
if (not input_used_inplace):
out_indices.append(out_idx)
if (len(out_indices) == 0):
continue
new_node = self.attempt_scan_inplace(fgraph, original_node, out_indices)
if (new_node is None):
new_node = original_node
for pos in out_indices:
new_node = (self.attempt_scan_inplace(fgraph, new_node, [pos]) or new_node) |
def _iter_params_for_processing(invocation_order: Sequence[Parameter], declaration_order: Sequence[Parameter]) -> list[Parameter]:
def sort_key(item: Parameter) -> tuple[(bool, float)]:
if (item.name == 'paths'):
return (False, (- 3))
if (item.name == 'config'):
return (False, (- 2))
if (item.name == 'hook_module'):
return (False, (- 1))
try:
idx: float = invocation_order.index(item)
except ValueError:
idx = float('inf')
return ((not item.is_eager), idx)
return sorted(declaration_order, key=sort_key) |
class BasicBlock(nn.Module):
def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, **block_kwargs):
super(BasicBlock, self).__init__()
self.conv = layers.convnxn(in_channels, channels, kernel_size, stride=stride, padding=padding)
self.relu = layers.relu()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--predict_with_generate', type=bool, default=True, help='')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--num_beams', type=int, default=None, help='Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--max_source_length', type=int, default=1024, help='The maximum total input sequence length after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', type=int, default=128, help='The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.during ``evaluate`` and ``predict``.')
parser.add_argument('--val_max_target_length', type=int, default=None, help='The maximum total sequence length for validation target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--pad_to_max_length', type=bool, default=False, help='Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. Moreefficient on GPU but very bad for TPU.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--ignore_pad_token_for_loss', type=bool, default=True, help='Whether to ignore the tokens corresponding to padded labels in the loss computation or not.')
parser.add_argument('--source_lang', type=str, default=None, help='Source language id for translation.')
parser.add_argument('--target_lang', type=str, default=None, help='Target language id for translation.')
parser.add_argument('--source_prefix', type=str, default=None, help='A prefix to add before every source text (useful for T5 models).')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', type=bool, default=None, help='Overwrite the cached training and evaluation sets')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
args = parser.parse_args()
if ((args.dataset_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
class VOTLTDataset(Dataset):
def __init__(self, name, dataset_root, load_img=False):
super(VOTLTDataset, self).__init__(name, dataset_root)
with open(os.path.join(dataset_root, (name + '.json')), 'r') as f:
meta_data = json.load(f)
pbar = tqdm(meta_data.keys(), desc=('loading ' + name), ncols=100)
self.videos = {}
for video in pbar:
pbar.set_postfix_str(video)
self.videos[video] = VOTLTVideo(video, dataset_root, meta_data[video]['video_dir'], meta_data[video]['init_rect'], meta_data[video]['img_names'], meta_data[video]['gt_rect']) |
class SubcommandsExample(cmd2.Cmd):
def __init__(self):
super().__init__()
def base_foo(self, args):
self.poutput((args.x * args.y))
def base_bar(self, args):
self.poutput(('((%s))' % args.z))
def base_sport(self, args):
self.poutput('Sport is {}'.format(args.sport))
parser_foo.set_defaults(func=base_foo)
parser_bar.set_defaults(func=base_bar)
parser_sport.set_defaults(func=base_sport)
.with_argparser(base_parser)
def do_base(self, args):
func = getattr(args, 'func', None)
if (func is not None):
func(self, args)
else:
self.do_help('base')
.with_argparser(base2_parser)
def do_alternate(self, args):
func = getattr(args, 'func', None)
if (func is not None):
func(self, args)
else:
self.do_help('alternate') |
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
parser.add_argument('--fast_align_dir', help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and')
parser.add_argument('--source_file', help='path to a file with sentences in the source language')
parser.add_argument('--target_file', help='path to a file with sentences in the target language')
parser.add_argument('--output_dir', help='output directory')
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(args.mosesdecoder_dir, 'scripts', 'ems', 'support', 'symmetrize-fast-align.perl')
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r', encoding='utf-8') as src, open(args.target_file, 'r', encoding='utf-8') as tgt:
with open(joined_file, 'w', encoding='utf-8') as joined:
for (s, t) in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file)
assert (os.system(fwd_fast_align_cmd) == 0)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file)
assert (os.system(bwd_fast_align_cmd) == 0)
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin)
assert (os.system(sym_cmd) == 0) |
def main(argv=None):
op = argparse.ArgumentParser()
op.add_argument('-i', '--input', dest='input', help=_('a basis file to use for seeding the kickstart data (optional)'))
op.add_argument('-o', '--output', dest='output', help=_('the location to write the finished kickstart file, or stdout if not given'))
op.add_argument('-v', '--version', dest='version', default=DEVEL, help=_('version of kickstart syntax to validate against'))
opts = op.parse_args(argv)
try:
kshandler = makeVersion(opts.version)
except KickstartVersionError:
print((_('The version %s is not supported by pykickstart') % opts.version))
return 1
ksparser = KickstartParser(kshandler, followIncludes=True, errorsAreFatal=False)
if opts.input:
try:
processedFile = preprocessKickstart(opts.input)
ksparser.readKickstart(processedFile)
os.remove(processedFile)
except KickstartError as e:
print((_('Warning: The following error occurred when processing the input file:\n%s\n') % e))
internalCommands = {'.clear': ClearCommand(), '.show': ShowCommand(), '.quit': QuitCommand()}
readline.parse_and_bind('tab: complete')
readline.set_completer(KickstartCompleter(kshandler, internalCommands).complete)
delims = readline.get_completer_delims()
readline.set_completer_delims(delims.replace('-', ''))
print('Press ^D to exit.')
while True:
try:
line = input('ks> ')
except EOFError:
break
except KeyboardInterrupt:
break
if line.startswith('.'):
words = line.split()
if (words[0] in internalCommands):
try:
internalCommands[words[0]].execute(ksparser)
except EOFError:
break
else:
print((_('Internal command %s not recognized.') % words[0]))
continue
try:
ksparser.readKickstartFromString(line)
except KickstartError as e:
print(e)
if opts.output:
with open(opts.output, 'w') as fd:
fd.write(str(ksparser.handler))
else:
print(('\n' + str(ksparser.handler)))
return 0 |
class MobileNetV2ImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[Dict[(str, int)]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = (size if (size is not None) else {'shortest_edge': 256})
size = get_size_dict(size, default_to_square=False)
crop_size = (crop_size if (crop_size is not None) else {'height': 224, 'width': 224})
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_STANDARD_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_STANDARD_STD)
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=False)
if ('shortest_edge' not in size):
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
output_size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False)
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def center_crop(self, image: np.ndarray, size: Dict[(str, int)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size)
if (('height' not in size) or ('width' not in size)):
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}')
return center_crop(image, size=(size['height'], size['width']), data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: float, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: Union[(str, ChannelDimension)]=ChannelDimension.FIRST, **kwargs):
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, default_to_square=False)
resample = (resample if (resample is not None) else self.resample)
do_center_crop = (do_center_crop if (do_center_crop is not None) else self.do_center_crop)
crop_size = (crop_size if (crop_size is not None) else self.crop_size)
crop_size = get_size_dict(crop_size, param_name='crop_size')
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if (do_resize and (size is None)):
raise ValueError('Size must be specified if do_resize is True.')
if (do_center_crop and (crop_size is None)):
raise ValueError('Crop size must be specified if do_center_crop is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple]=None):
logits = outputs.logits
if (target_sizes is not None):
if (len(logits) != len(target_sizes)):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation |
def rename_key(orig_key):
if ('model' in orig_key):
orig_key = orig_key.replace('model.', '')
if ('norm1' in orig_key):
orig_key = orig_key.replace('norm1', 'attention.output.LayerNorm')
if ('norm2' in orig_key):
orig_key = orig_key.replace('norm2', 'output.LayerNorm')
if ('norm' in orig_key):
orig_key = orig_key.replace('norm', 'LayerNorm')
if ('transformer' in orig_key):
layer_num = orig_key.split('.')[0].split('_')[(- 1)]
orig_key = orig_key.replace(f'transformer_{layer_num}', f'encoder.layer.{layer_num}')
if ('mha.attn' in orig_key):
orig_key = orig_key.replace('mha.attn', 'attention.self')
if ('mha' in orig_key):
orig_key = orig_key.replace('mha', 'attention')
if ('W_q' in orig_key):
orig_key = orig_key.replace('W_q', 'self.query')
if ('W_k' in orig_key):
orig_key = orig_key.replace('W_k', 'self.key')
if ('W_v' in orig_key):
orig_key = orig_key.replace('W_v', 'self.value')
if ('ff1' in orig_key):
orig_key = orig_key.replace('ff1', 'intermediate.dense')
if ('ff2' in orig_key):
orig_key = orig_key.replace('ff2', 'output.dense')
if ('ff' in orig_key):
orig_key = orig_key.replace('ff', 'output.dense')
if ('mlm_class' in orig_key):
orig_key = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder')
if ('mlm' in orig_key):
orig_key = orig_key.replace('mlm', 'cls.predictions.transform')
if ('cls' not in orig_key):
orig_key = ('nystromformer.' + orig_key)
return orig_key |
.skipif((sys.platform == 'win32'), reason='Windows only applies R/O to files')
def test_destination_not_write_able(tmp_path, capsys):
if (hasattr(os, 'geteuid') and (os.geteuid() == 0)):
pytest.skip('no way to check permission restriction when running under root')
target = tmp_path
prev_mod = target.stat().st_mode
target.chmod(((S_IREAD | S_IRGRP) | S_IROTH))
try:
err = _non_success_exit_code(capsys, str(target))
msg = f'the destination . is not write-able at {target!s}'
assert (msg in err), err
finally:
target.chmod(prev_mod) |
class W_ComposableContinuation(W_Procedure):
errorname = 'composable-continuation'
_attrs_ = _immutable_fields_ = ['cont', 'prompt_tag']
def __init__(self, cont, prompt_tag=None):
self.cont = cont
self.prompt_tag = prompt_tag
def get_arity(self, promote=False):
from pycket.arity import Arity
return Arity.unknown
def call(self, args, env, cont):
from pycket.prims.control import install_continuation
return install_continuation(self.cont, self.prompt_tag, args, env, cont, extend=True)
def tostring(self):
return '#<continuation>' |
def optimize_module(quant_module: QcQuantizeWrapper, x: torch.Tensor, xq: torch.Tensor, params: SeqMseParams):
if quant_module.param_quantizers['weight'].use_symmetric_encodings:
per_channel_max = torch.max(quant_module.weight.abs(), dim=1)[0].detach()
per_channel_min = None
else:
per_channel_max = torch.max(quant_module.weight, dim=1)[0].detach()
per_channel_min = torch.min(quant_module.weight, dim=1)[0].detach()
candidates = get_candidates(params.num_candidates, per_channel_max, per_channel_min)
total_loss = []
for (cand_max, cand_min) in candidates:
compute_param_encodings(quant_module.param_quantizers['weight'], cand_min, cand_max)
w = quant_module.weight
wq = quant_module.param_quantizers['weight'].quantize_dequantize(w, libpymo.RoundingMode.ROUND_NEAREST)
loss = torch.zeros(len(cand_max), device=w.device)
with torch.no_grad():
for batch_idx in range(params.num_batches):
(xqwq, xw) = compute_outputs(quant_module, x[batch_idx], xq[batch_idx], w, wq)
loss += compute_recon_loss(xqwq, xw, params)
total_loss.append(loss)
best_indices = torch.stack(total_loss).min(0, keepdim=True)[1]
print(best_indices.squeeze(0)[:params.num_candidates])
best_max = torch.stack([cand_max for (cand_max, _) in candidates]).gather(0, best_indices)[0]
best_min = torch.stack([cand_min for (_, cand_min) in candidates]).gather(0, best_indices)[0]
compute_param_encodings(quant_module.param_quantizers['weight'], best_min, best_max)
quant_module.param_quantizers['weight'].freeze_encoding() |
class LoggedInteractiveConsole(code.InteractiveConsole):
def __init__(self, _locals: Dict[(str, Any)], logpath: str) -> None:
code.InteractiveConsole.__init__(self, _locals)
self.output_file = logpath
self.pid = os.getpid()
self.pri = (syslog.LOG_USER | syslog.LOG_NOTICE)
self.hostname = os.uname().nodename
self.log_event(message='Start InteractiveConsole logging', message_id='CSTR')
def raw_input(self, prompt: Optional[str]='') -> str:
data = input(prompt)
self.log_event(message=data, message_id='CEXC')
return data
def log_event(self, message: str, message_id: Optional[str]='-', structured: Optional[str]='-') -> None:
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
prompt = f'<{self.pri}>1 {timestamp} {self.hostname} baseplate-shell {self.pid} {message_id} {structured} {message}'
with open(self.output_file, 'w', encoding='UTF-8') as f:
print(prompt, file=f)
f.flush() |
class PetitionCreateWizardViewTest(TestCase):
def setUpTestData(cls):
User = get_user_model()
for org in orgs:
o = Organization.objects.create(name=org)
o.save()
for user in users:
u = User.objects.create_user(user, password=user)
u.first_name = user
u.last_name = (user + 'Last')
u.save()
for orgname in org_members:
org = Organization.objects.get(name=orgname)
for username in org_members[orgname]:
user = PytitionUser.objects.get(user__username=username)
org.members.add(user)
permission = Permission.objects.get(organization=org, user=user)
permission.can_modify_permissions = True
permission.save()
user = PytitionUser.objects.get(user__username='julia')
org = Organization.objects.get(name='Les Amis de la Terre')
perm = Permission.objects.get(organization=org, user=user)
perm.can_modify_petitions = True
perm.save()
def login(self, name, password=None):
self.client.login(username=name, password=(password if password else name))
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def test_model_transfer_method(self):
org = Organization.objects.get(name='Les Amis de la Terre')
user = PytitionUser.objects.get(user__username='julia')
user_petition = Petition.objects.create(title='Petition 1', user=user)
self.assertEqual(user_petition.user, user)
self.assertEqual(user_petition.org, None)
user_petition.transfer_to(org=org)
self.assertEqual(user_petition.user, None)
self.assertEqual(user_petition.org, org)
user_petition.transfer_to(user=user)
self.assertEqual(user_petition.user, user)
self.assertEqual(user_petition.org, None)
with self.assertRaises(ValueError):
user_petition.transfer_to()
user_petition.transfer_to(org=org, user=user)
def test_transfer_view(self):
self.login('julia')
org = Organization.objects.get(name='Les Amis de la Terre')
user = PytitionUser.objects.get(user__username='julia')
user_petition = Petition.objects.create(title='Petition 1', user=user)
url = reverse('transfer_petition', args=[user_petition.id])
response = self.client.post(url, data={'new_owner_type': 'org', 'new_owner_name': org.slugname}, follow=True)
self.assertEqual(response.status_code, 200)
user_petition = Petition.objects.get(id=user_petition.id)
self.assertEqual(user_petition.org, org)
response = self.client.post(url, data={'new_owner_type': 'user', 'new_owner_name': user.user.username}, follow=True)
self.assertEqual(response.status_code, 200)
user_petition = Petition.objects.get(id=user_petition.id)
self.assertEqual(user_petition.user, user)
with override_settings(DISABLE_USER_PETITION=True):
user_petition = Petition.objects.create(title='Petition 1', org=org)
response = self.client.post(url, data={'new_owner_type': 'user', 'new_owner_name': user.user.username}, follow=True)
self.assertContains(response, 'Users are not allowed to transfer petitions to organizations on this instance.')
user_petition = Petition.objects.get(id=user_petition.id)
self.assertIsNone(user_petition.user) |
class TestFileMonitor():
def test_create_delete(self, temp_dir: Path):
path = temp_dir
monitor = BasicMonitor(path)
some_file = (path / 'foo.txt')
some_file.write_text('test')
sleep(SLEEP_SECS)
run_gtk_loop()
assert monitor.changed, 'No events after creation'
assert (monitor.event_types >= {Event.CREATED})
monitor.changed.clear()
some_file.unlink()
sleep(SLEEP_SECS)
run_gtk_loop()
assert monitor.changed, 'No events after deletion'
assert (monitor.event_types >= {Event.DELETED})
def test_move(self, temp_dir: Path):
monitor = BasicMonitor(temp_dir)
with temp_filename(dir=temp_dir, suffix='.txt', as_path=True) as path:
path.write_text('test\n')
sleep(SLEEP_SECS)
run_gtk_loop()
assert monitor.changed, 'No events after creation'
monitor.changed.clear()
new_name = f'new-{time()}.txt'
path.rename((path.parent / new_name))
sleep(SLEEP_SECS)
run_gtk_loop()
assert monitor.changed
assert (monitor.event_types >= {Event.RENAMED}), f'Got {monitor.changed}' |
def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu6'), norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(model_kwargs, variant, pretrained)
return model |
class QuadraticProgram():
Status = QuadraticProgramStatus
def __init__(self, name: str='') -> None:
if (not name.isprintable()):
warn('Problem name is not printable')
self._name = ''
self.name = name
self._status = QuadraticProgram.Status.VALID
self._variables: List[Variable] = []
self._variables_index: Dict[(str, int)] = {}
self._linear_constraints: List[LinearConstraint] = []
self._linear_constraints_index: Dict[(str, int)] = {}
self._quadratic_constraints: List[QuadraticConstraint] = []
self._quadratic_constraints_index: Dict[(str, int)] = {}
self._objective = QuadraticObjective(self)
def __repr__(self) -> str:
from ..translators.prettyprint import DEFAULT_TRUNCATE, expr2str
objective = expr2str(constant=self._objective.constant, linear=self.objective.linear, quadratic=self._objective.quadratic, truncate=DEFAULT_TRUNCATE)
num_constraints = (self.get_num_linear_constraints() + self.get_num_quadratic_constraints())
return f"<{self.__class__.__name__}: {self.objective.sense.name.lower()} {objective}, {self.get_num_vars()} variables, {num_constraints} constraints, '{self._name}'>"
def __str__(self) -> str:
num_constraints = (self.get_num_linear_constraints() + self.get_num_quadratic_constraints())
return f"{str(self.objective)} ({self.get_num_vars()} variables, {num_constraints} constraints, '{self._name}')"
def clear(self) -> None:
self._name = ''
self._status = QuadraticProgram.Status.VALID
self._variables.clear()
self._variables_index.clear()
self._linear_constraints.clear()
self._linear_constraints_index.clear()
self._quadratic_constraints.clear()
self._quadratic_constraints_index.clear()
self._objective = QuadraticObjective(self)
def name(self) -> str:
return self._name
def name(self, name: str) -> None:
self._check_name(name, 'Problem')
self._name = name
def status(self) -> QuadraticProgramStatus:
return self._status
def variables(self) -> List[Variable]:
return self._variables
def variables_index(self) -> Dict[(str, int)]:
return self._variables_index
def _add_variable(self, lowerbound: Union[(float, int)], upperbound: Union[(float, int)], vartype: VarType, name: Optional[str]) -> Variable:
if (not name):
name = 'x'
key_format = '{}'
else:
key_format = ''
return self._add_variables(1, lowerbound, upperbound, vartype, name, key_format)[1][0]
def _add_variables(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)], upperbound: Union[(float, int)], vartype: VarType, name: Optional[str], key_format: str) -> Tuple[(List[str], List[Variable])]:
if (isinstance(keys, int) and (keys < 1)):
raise QiskitOptimizationError(f'Cannot create non-positive number of variables: {keys}')
if (not name):
name = 'x'
if ('{{}}' in key_format):
raise QiskitOptimizationError(f'Formatter cannot contain nested substitutions: {key_format}')
if (key_format.count('{}') > 1):
raise QiskitOptimizationError(f'Formatter cannot contain more than one substitution: {key_format}')
def _find_name(name, key_format, k):
prev = None
while True:
new_name = (name + key_format.format(k))
if (new_name == prev):
raise QiskitOptimizationError(f'Variable name already exists: {new_name}')
if (new_name in self._variables_index):
k += 1
prev = new_name
else:
break
return (new_name, (k + 1))
names = []
variables = []
k = self.get_num_vars()
lst = (keys if isinstance(keys, Sequence) else range(keys))
for key in lst:
if isinstance(keys, Sequence):
indexed_name = (name + key_format.format(key))
else:
(indexed_name, k) = _find_name(name, key_format, k)
if (indexed_name in self._variables_index):
raise QiskitOptimizationError(f'Variable name already exists: {indexed_name}')
self._check_name(indexed_name, 'Variable')
names.append(indexed_name)
self._variables_index[indexed_name] = self.get_num_vars()
variable = Variable(self, indexed_name, lowerbound, upperbound, vartype)
self._variables.append(variable)
variables.append(variable)
return (names, variables)
def _var_dict(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)], upperbound: Union[(float, int)], vartype: VarType, name: Optional[str], key_format: str) -> Dict[(str, Variable)]:
return dict(zip(*self._add_variables(keys, lowerbound, upperbound, vartype, name, key_format)))
def _var_list(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)], upperbound: Union[(float, int)], vartype: VarType, name: Optional[str], key_format: str) -> List[Variable]:
return self._add_variables(keys, lowerbound, upperbound, vartype, name, key_format)[1]
def continuous_var(self, lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None) -> Variable:
return self._add_variable(lowerbound, upperbound, Variable.Type.CONTINUOUS, name)
def continuous_var_dict(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None, key_format: str='{}') -> Dict[(str, Variable)]:
return self._var_dict(keys, lowerbound, upperbound, Variable.Type.CONTINUOUS, name, key_format)
def continuous_var_list(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None, key_format: str='{}') -> List[Variable]:
return self._var_list(keys, lowerbound, upperbound, Variable.Type.CONTINUOUS, name, key_format)
def binary_var(self, name: Optional[str]=None) -> Variable:
return self._add_variable(0, 1, Variable.Type.BINARY, name)
def binary_var_dict(self, keys: Union[(int, Sequence)], name: Optional[str]=None, key_format: str='{}') -> Dict[(str, Variable)]:
return self._var_dict(keys, 0, 1, Variable.Type.BINARY, name, key_format)
def binary_var_list(self, keys: Union[(int, Sequence)], name: Optional[str]=None, key_format: str='{}') -> List[Variable]:
return self._var_list(keys, 0, 1, Variable.Type.BINARY, name, key_format)
def integer_var(self, lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None) -> Variable:
return self._add_variable(lowerbound, upperbound, Variable.Type.INTEGER, name)
def integer_var_dict(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None, key_format: str='{}') -> Dict[(str, Variable)]:
return self._var_dict(keys, lowerbound, upperbound, Variable.Type.INTEGER, name, key_format)
def integer_var_list(self, keys: Union[(int, Sequence)], lowerbound: Union[(float, int)]=0, upperbound: Union[(float, int)]=INFINITY, name: Optional[str]=None, key_format: str='{}') -> List[Variable]:
return self._var_list(keys, lowerbound, upperbound, Variable.Type.INTEGER, name, key_format)
def get_variable(self, i: Union[(int, str)]) -> Variable:
if isinstance(i, (int, np.integer)):
return self.variables[i]
else:
return self.variables[self._variables_index[i]]
def get_num_vars(self, vartype: Optional[VarType]=None) -> int:
if vartype:
return sum(((variable.vartype == vartype) for variable in self._variables))
else:
return len(self._variables)
def get_num_continuous_vars(self) -> int:
return self.get_num_vars(Variable.Type.CONTINUOUS)
def get_num_binary_vars(self) -> int:
return self.get_num_vars(Variable.Type.BINARY)
def get_num_integer_vars(self) -> int:
return self.get_num_vars(Variable.Type.INTEGER)
def linear_constraints(self) -> List[LinearConstraint]:
return self._linear_constraints
def linear_constraints_index(self) -> Dict[(str, int)]:
return self._linear_constraints_index
def linear_constraint(self, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(int, str)], float)])]=None, sense: Union[(str, ConstraintSense)]='<=', rhs: float=0.0, name: Optional[str]=None) -> LinearConstraint:
if name:
if (name in self.linear_constraints_index):
raise QiskitOptimizationError(f"Linear constraint's name already exists: {name}")
self._check_name(name, 'Linear constraint')
else:
k = self.get_num_linear_constraints()
while (f'c{k}' in self.linear_constraints_index):
k += 1
name = f'c{k}'
self.linear_constraints_index[name] = len(self.linear_constraints)
if (linear is None):
linear = {}
constraint = LinearConstraint(self, name, linear, Constraint.Sense.convert(sense), rhs)
self.linear_constraints.append(constraint)
return constraint
def get_linear_constraint(self, i: Union[(int, str)]) -> LinearConstraint:
if isinstance(i, int):
return self._linear_constraints[i]
else:
return self._linear_constraints[self._linear_constraints_index[i]]
def get_num_linear_constraints(self) -> int:
return len(self._linear_constraints)
def quadratic_constraints(self) -> List[QuadraticConstraint]:
return self._quadratic_constraints
def quadratic_constraints_index(self) -> Dict[(str, int)]:
return self._quadratic_constraints_index
def quadratic_constraint(self, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(int, str)], float)])]=None, quadratic: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]=None, sense: Union[(str, ConstraintSense)]='<=', rhs: float=0.0, name: Optional[str]=None) -> QuadraticConstraint:
if name:
if (name in self.quadratic_constraints_index):
raise QiskitOptimizationError(f'Quadratic constraint name already exists: {name}')
self._check_name(name, 'Quadratic constraint')
else:
k = self.get_num_quadratic_constraints()
while (f'q{k}' in self.quadratic_constraints_index):
k += 1
name = f'q{k}'
self.quadratic_constraints_index[name] = len(self.quadratic_constraints)
if (linear is None):
linear = {}
if (quadratic is None):
quadratic = {}
constraint = QuadraticConstraint(self, name, linear, quadratic, Constraint.Sense.convert(sense), rhs)
self.quadratic_constraints.append(constraint)
return constraint
def get_quadratic_constraint(self, i: Union[(int, str)]) -> QuadraticConstraint:
if isinstance(i, int):
return self._quadratic_constraints[i]
else:
return self._quadratic_constraints[self._quadratic_constraints_index[i]]
def get_num_quadratic_constraints(self) -> int:
return len(self._quadratic_constraints)
def remove_linear_constraint(self, i: Union[(str, int)]) -> None:
if isinstance(i, str):
i = self._linear_constraints_index[i]
del self._linear_constraints[i]
self._linear_constraints_index = {cst.name: j for (j, cst) in enumerate(self._linear_constraints)}
def remove_quadratic_constraint(self, i: Union[(str, int)]) -> None:
if isinstance(i, str):
i = self._quadratic_constraints_index[i]
del self._quadratic_constraints[i]
self._quadratic_constraints_index = {cst.name: j for (j, cst) in enumerate(self._quadratic_constraints)}
def objective(self) -> QuadraticObjective:
return self._objective
def minimize(self, constant: float=0.0, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(str, int)], float)])]=None, quadratic: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]=None) -> None:
self._objective = QuadraticObjective(self, constant, linear, quadratic, QuadraticObjective.Sense.MINIMIZE)
def maximize(self, constant: float=0.0, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(str, int)], float)])]=None, quadratic: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]=None) -> None:
self._objective = QuadraticObjective(self, constant, linear, quadratic, QuadraticObjective.Sense.MAXIMIZE)
def _copy_from(self, other: 'QuadraticProgram', include_name: bool) -> None:
for (attr, val) in vars(other).items():
if ((attr == '_name') and (not include_name)):
continue
if isinstance(val, QuadraticProgramElement):
val.quadratic_program = self
if isinstance(val, list):
for elem in val:
if isinstance(elem, QuadraticProgramElement):
elem.quadratic_program = self
setattr(self, attr, val)
def export_as_lp_string(self) -> str:
from ..translators.docplex_mp import to_docplex_mp
return to_docplex_mp(self).export_as_lp_string()
_optionals.HAS_CPLEX.require_in_call
def read_from_lp_file(self, filename: str) -> None:
def _parse_problem_name(filename: str) -> str:
prefix = '\\Problem name:'
model_name = ''
with open(filename, encoding='utf8') as file:
for line in file:
if line.startswith(prefix):
model_name = line[len(prefix):].strip()
if (not line.startswith('\\')):
break
return model_name
from ..translators.docplex_mp import from_docplex_mp
model = ModelReader().read(filename, model_name=_parse_problem_name(filename))
other = from_docplex_mp(model)
self._copy_from(other, include_name=True)
def write_to_lp_file(self, filename: str) -> None:
from ..translators.docplex_mp import to_docplex_mp
mdl = to_docplex_mp(self)
mdl.export_as_lp(filename)
def substitute_variables(self, constants: Optional[Dict[(Union[(str, int)], float)]]=None, variables: Optional[Dict[(Union[(str, int)], Tuple[(Union[(str, int)], float)])]]=None) -> 'QuadraticProgram':
from .substitute_variables import substitute_variables
return substitute_variables(self, constants, variables)
def to_ising(self) -> Tuple[(SparsePauliOp, float)]:
from ..translators.ising import to_ising
return to_ising(self)
def from_ising(self, qubit_op: BaseOperator, offset: float=0.0, linear: bool=False) -> None:
from ..translators.ising import from_ising
other = from_ising(qubit_op, offset, linear)
self._copy_from(other, include_name=False)
def get_feasibility_info(self, x: Union[(List[float], np.ndarray)]) -> Tuple[(bool, List[Variable], List[Constraint])]:
if (len(x) != self.get_num_vars()):
raise QiskitOptimizationError(f'The size of solution `x`: {len(x)}, does not match the number of problem variables: {self.get_num_vars()}')
violated_variables = []
for (i, val) in enumerate(x):
variable = self.get_variable(i)
if ((val < variable.lowerbound) or (variable.upperbound < val)):
violated_variables.append(variable)
violated_constraints = []
for constraint in (cast(List[Constraint], self._linear_constraints) + cast(List[Constraint], self._quadratic_constraints)):
lhs = constraint.evaluate(x)
if ((constraint.sense == ConstraintSense.LE) and (lhs > constraint.rhs)):
violated_constraints.append(constraint)
elif ((constraint.sense == ConstraintSense.GE) and (lhs < constraint.rhs)):
violated_constraints.append(constraint)
elif ((constraint.sense == ConstraintSense.EQ) and (not isclose(lhs, constraint.rhs))):
violated_constraints.append(constraint)
feasible = ((not violated_variables) and (not violated_constraints))
return (feasible, violated_variables, violated_constraints)
def is_feasible(self, x: Union[(List[float], np.ndarray)]) -> bool:
(feasible, _, _) = self.get_feasibility_info(x)
return feasible
def prettyprint(self, wrap: int=80) -> str:
from qiskit_optimization.translators.prettyprint import prettyprint
return prettyprint(self, wrap)
def _check_name(name: str, name_type: str) -> None:
if (not name.isprintable()):
warn(f'{name_type} name is not printable: {repr(name)}') |
def get_xml_type(val):
LOG.info(('Inside get_xml_type(). val = "%s", type(val) = "%s"' % (val, type(val).__name__)))
if (type(val).__name__ == 'NoneType'):
LOG.info("type(val).__name__ == 'NoneType', returning 'null'")
return 'null'
elif (type(val).__name__ == 'bool'):
LOG.info("type(val).__name__ == 'bool', returning 'bool'")
return 'bool'
elif (type(val).__name__ in ('str', 'unicode')):
LOG.info("type(val).__name__ in ('str', unicode'), returning 'str'")
return 'str'
elif (type(val).__name__ in ('int', 'long')):
LOG.info("type(val).__name__ in ('int', long'), returning 'int'")
return 'int'
elif (type(val).__name__ == 'float'):
LOG.info("type(val).__name__ == 'float', returning 'float'")
return 'float'
elif isinstance(val, numbers.Number):
LOG.info("isinstance(val, numbers.Number), returning 'number'")
return 'number'
elif isinstance(val, dict):
LOG.info("isinstance(val, dict), returning 'dict'")
return 'dict'
elif isinstance(val, iterable):
LOG.info("isinstance(val, iterable), returning 'list'")
return 'list'
LOG.info(("type not found, returning '%s'" % type(val).__name__))
return type(val).__name__ |
def test_paintEvent(skip_qtbot, canvas, mocker):
event = MagicMock()
mock_painter: MagicMock = mocker.patch('PySide6.QtGui.QPainter')
canvas.paintEvent(event)
mock_painter.assert_called_once_with(canvas)
draw_ellipse: MagicMock = mock_painter.return_value.drawEllipse
draw_ellipse.assert_any_call(ANY, 5, 5)
draw_ellipse.assert_any_call(ANY, 7, 7)
mock_painter.return_value.drawText.assert_called() |
def basic_multivector_operations_2D():
Print_Function()
(ex, ey) = MV.setup('e*x|y')
print('g_{ij} =', MV.metric)
X = MV('X', 'vector')
A = MV('A', 'spinor')
X.Fmt(1, 'X')
A.Fmt(1, 'A')
(X | A).Fmt(2, 'X|A')
(X < A).Fmt(2, 'X<A')
(A > X).Fmt(2, 'A>X')
return |
class TestRandomRegionEmpirical():
def setup_method(self):
self.mexico = MEXICO.copy()
self.cards = self.mexico.groupby(by='HANSON03').count().NAME.values.tolist()
self.ids = self.mexico.index.values.tolist()
self.w = libpysal.weights.Queen.from_dataframe(self.mexico)
def test_random_region_6_card(self):
known_regions = [[27, 12, 18, 3, 15, 8], [0, 25, 21, 20, 7, 6, 24], [23, 10, 13, 11, 19, 16, 26, 14, 17, 22], [28, 31], [30, 9, 4], [1, 29, 5, 2]]
numpy.random.seed(RANDOM_STATE)
kwargs = {'num_regions': 6, 'cardinality': self.cards}
model = RandomRegion(self.ids, **kwargs)
numpy.testing.assert_array_equal(numpy.array(known_regions, dtype=object), numpy.array(model.regions, dtype=object))
def test_random_region_6_card_contig_compact(self):
known_regions = [[27, 29, 5, 26, 3, 24, 4, 30, 23, 2], [12, 31, 7, 15, 18, 10, 17], [8, 11], [21, 19, 20, 14, 13, 16], [0, 22, 1, 25], [28, 6, 9]]
numpy.random.seed(RANDOM_STATE)
kwargs = {'num_regions': 6, 'cardinality': self.cards, 'contiguity': self.w, 'compact': True}
model = RandomRegion(self.ids, **kwargs)
numpy.testing.assert_array_equal(numpy.array(known_regions, dtype=object), numpy.array(model.regions, dtype=object)) |
class LightningModel(pl.LightningModule):
def __init__(self, model, learning_rate):
super().__init__()
self.learning_rate = learning_rate
self.model = model
if hasattr(model, 'dropout_proba'):
self.dropout_proba = model.dropout_proba
self.save_hyperparameters(ignore=['model'])
self.train_acc = torchmetrics.Accuracy()
self.valid_acc = torchmetrics.Accuracy()
self.test_acc = torchmetrics.Accuracy()
def forward(self, x):
return self.model(x)
def _shared_step(self, batch):
(features, true_labels) = batch
logits = self(features)
loss = torch.nn.functional.cross_entropy(logits, true_labels)
predicted_labels = torch.argmax(logits, dim=1)
return (loss, true_labels, predicted_labels)
def training_step(self, batch, batch_idx):
(loss, true_labels, predicted_labels) = self._shared_step(batch)
self.log('train_loss', loss)
self.model.eval()
with torch.no_grad():
(_, true_labels, predicted_labels) = self._shared_step(batch)
self.train_acc(predicted_labels, true_labels)
self.log('train_acc', self.train_acc, on_epoch=True, on_step=False)
self.model.train()
return loss
def validation_step(self, batch, batch_idx):
(loss, true_labels, predicted_labels) = self._shared_step(batch)
self.log('valid_loss', loss)
self.valid_acc(predicted_labels, true_labels)
self.log('valid_acc', self.valid_acc, on_epoch=True, on_step=False, prog_bar=True)
def test_step(self, batch, batch_idx):
(loss, true_labels, predicted_labels) = self._shared_step(batch)
self.test_acc(predicted_labels, true_labels)
self.log('test_acc', self.test_acc, on_epoch=True, on_step=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer |
class untitled(App):
def main(self):
mainContainer = Container(width=706, height=445, margin='0px auto', style='position: relative')
subContainer = HBox(width=630, height=277, style='position: absolute; left: 40px; top: 150px; background-color: #b6b6b6')
vbox = VBox(width=300, height=250)
bt1 = Button('bt1', width=100, height=30)
vbox.append(bt1, 'bt1')
bt3 = Button('bt3', width=100, height=30)
vbox.append(bt3, 'bt3')
bt2 = Button('bt2', width=100, height=30)
vbox.append(bt2, 'bt2')
subContainer.append(vbox, 'vbox')
hbox = HBox(width=300, height=250)
lbl1 = Label('lbl1', width=50, height=50, style='background-color: #ffb509')
hbox.append(lbl1, 'lbl1')
lbl2 = Label('lbl2', width=50, height=50, style='background-color: #40ff2b')
hbox.append(lbl2, 'lbl2')
lbl3 = Label('lbl3', width=50, height=50, style='background-color: #e706ff')
hbox.append(lbl3, 'lbl3')
subContainer.append(hbox, 'hbox')
mainContainer.append(subContainer, 'subContainer')
comboJustifyContent = gui.DropDown.new_from_list(('flex-start', 'flex-end', 'center', 'space-between', 'space-around'), style='left: 160px; position: absolute; top: 60px; width: 148px; height: 30px')
mainContainer.append(comboJustifyContent, 'comboJustifyContent')
lblJustifyContent = Label('justify-content', style='left: 40px; position: absolute; top: 60px; width: 100px; height: 30px')
mainContainer.append(lblJustifyContent, 'lblJustifyContent')
comboAlignItems = gui.DropDown.new_from_list(('stretch', 'center', 'flex-start', 'flex-end', 'baseline'), style='left:160px; position:absolute; top:100px; width:152px; height: 30px')
mainContainer.append(comboAlignItems, 'comboAlignItems')
lblAlignItems = Label('align-items', style='left:40px; position:absolute; top:100px; width:100px; height:30px')
mainContainer.append(lblAlignItems, 'lblAlignItems')
mainContainer.children['comboJustifyContent'].onchange.do(self.onchange_comboJustifyContent, vbox, hbox)
mainContainer.children['comboAlignItems'].onchange.do(self.onchange_comboAlignItems, vbox, hbox)
lblTitle = gui.Label('The following example shows the two main layout style properties for the VBox and HBox containers. Change the value of the two combo boxes.', style='position:absolute; left:0px; top:0px')
mainContainer.append(lblTitle)
self.mainContainer = mainContainer
return self.mainContainer
def onchange_comboJustifyContent(self, emitter, new_value, vbox, hbox):
vbox.style['justify-content'] = new_value
hbox.style['justify-content'] = new_value
def onchange_comboAlignItems(self, emitter, new_value, vbox, hbox):
vbox.style['align-items'] = new_value
hbox.style['align-items'] = new_value |
def test_lineanchors_with_startnum():
optdict = dict(lineanchors='foo', linenostart=5)
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
assert re.search('<pre>\\s*<span>\\s*</span>\\s*<a id="foo-5" name="foo-5" href="#foo-5">', html) |
def get_default_image_compressor(**kwargs):
if imagecodecs.JPEGXL:
this_kwargs = {'effort': 3, 'distance': 0.3, 'decodingspeed': 1}
this_kwargs.update(kwargs)
return JpegXl(**this_kwargs)
else:
this_kwargs = {'level': 50}
this_kwargs.update(kwargs)
return Jpeg2k(**this_kwargs) |
class CQADupstackPhysicsRetrieval(AbsTaskRetrieval, BeIRTask):
def description(self):
return {'name': 'CQADupstackPhysicsRetrieval', 'beir_name': 'cqadupstack/physics', 'description': 'CQADupStack: A Benchmark Data Set for Community Question-Answering Research', 'reference': ' 'type': 'Retrieval', 'category': 's2p', 'eval_splits': ['test'], 'eval_langs': ['en'], 'main_score': 'ndcg_at_10'} |
def assert_gc_integrity(expect_storage_removed=True):
removed_image_storages = []
remove_callback = model.config.register_image_cleanup_callback(removed_image_storages.extend)
existing_digests = set()
for storage_row in ImageStorage.select():
if storage_row.cas_path:
existing_digests.add(storage_row.content_checksum)
for blob_row in ApprBlob.select():
existing_digests.add(blob_row.digest)
existing_storage_count = _get_dangling_storage_count()
existing_label_count = _get_dangling_label_count()
existing_manifest_count = _get_dangling_manifest_count()
with check_transitive_modifications():
try:
(yield)
finally:
remove_callback()
updated_storage_count = _get_dangling_storage_count()
assert (updated_storage_count == existing_storage_count)
updated_label_count = _get_dangling_label_count()
assert (updated_label_count == existing_label_count), _get_dangling_labels()
updated_manifest_count = _get_dangling_manifest_count()
assert (updated_manifest_count == existing_manifest_count)
preferred = storage.preferred_locations[0]
for storage_row in ImageStorage.select():
if (storage_row.content_checksum in existing_digests):
continue
if storage_row.cas_path:
storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
for blob_row in ApprBlob.select():
if (blob_row.digest in existing_digests):
continue
storage.get_content({preferred}, storage.blob_path(blob_row.digest))
for manifest in {t.manifest for t in Tag.select()}:
found_blobs = {b.blob.content_checksum for b in ManifestBlob.select().where((ManifestBlob.manifest == manifest))}
parsed = parse_manifest_from_bytes(Bytes.for_string_or_unicode(manifest.manifest_bytes), manifest.media_type.name)
assert (set(parsed.local_blob_digests) == found_blobs) |
class unit_fusion(nn.Module):
def __init__(self, num_class, in_channels=256):
super(unit_fusion, self).__init__()
self.fc = nn.Linear(in_channels, num_class)
nn.init.normal(self.fc.weight, 0, math.sqrt((2.0 / num_class)))
def forward(self, x1, x2):
y = (x1 + x2)
return self.fc(y) |
def get_core_candidates(pathtocheck):
cmd = ops.cmd.getDszCommand('dir', path=('"%s"' % os.path.dirname(pathtocheck)), mask=('"%s"' % os.path.basename(pathtocheck)))
obj = cmd.execute()
if cmd.success:
candidates = [f for d in obj.diritem for f in d.fileitem if (f.attributes.directory == 0) if (f.size in CODE_CORE_KNOWN_SIZES)]
return candidates
return [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.