code stringlengths 281 23.7M |
|---|
def main(train_file, valid_file, test_file, embeddings_file, target_dir, hidden_size=300, dropout=0.5, num_classes=2, epochs=64, batch_size=32, lr=0.0004, patience=5, max_grad_norm=10.0, checkpoint=None, proportion=1, output=None):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
print((20 * '='), ' Preparing for training ', (20 * '='))
if (not os.path.exists(target_dir)):
os.makedirs(target_dir)
print('\t* Loading training data...')
with open(train_file, 'rb') as pkl:
train_data = NLIDataset(pickle.load(pkl), proportion, isRandom=True)
train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size, drop_last=False)
print('\t* Loading validation data...')
with open(valid_file, 'rb') as pkl:
valid_data = NLIDataset(pickle.load(pkl))
valid_loader = DataLoader(valid_data, shuffle=False, batch_size=batch_size, drop_last=False)
print('\t* Loading test data...')
with open(test_file, 'rb') as pkl:
test_data = NLIDataset(pickle.load(pkl))
test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size, drop_last=False)
print('\t* Building model...')
with open(embeddings_file, 'rb') as pkl:
embeddings = torch.tensor(pickle.load(pkl), dtype=torch.float).to(device)
model = ESIM(embeddings.shape[0], embeddings.shape[1], hidden_size, embeddings=embeddings, dropout=dropout, num_classes=num_classes, device=device, isSTS=True).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=0)
best_score = 0.0
start_epoch = 1
epochs_count = []
train_losses = []
valid_losses = []
if checkpoint:
checkpoint = torch.load(checkpoint)
start_epoch = (checkpoint['epoch'] + 1)
best_score = checkpoint['best_score']
print('\t* Training will continue on existing model from epoch {}...'.format(start_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
epochs_count = checkpoint['epochs_count']
train_losses = checkpoint['train_losses']
valid_losses = checkpoint['valid_losses']
(_, valid_loss, p, s) = validate(model, valid_loader, criterion)
print('\t* Validation loss before training: {:.4f}, p correlation: {:.4f}, s correlation: {:.4f}'.format(valid_loss, (p * 100), (s * 100)))
print('\n', (20 * '='), 'Training ESIM model on device: {}'.format(device), (20 * '='))
patience_counter = 0
for epoch in range(start_epoch, (epochs + 1)):
epochs_count.append(epoch)
print('* Training epoch {}:'.format(epoch))
(epoch_time, epoch_loss, p, s) = train(model, train_loader, optimizer, criterion, epoch, max_grad_norm)
train_losses.append(epoch_loss)
print('-> Training time: {:.4f}s, loss = {:.4f}, p correlation: {:.4f}, s correlation: {:.4f}'.format(epoch_time, epoch_loss, (p * 100), (s * 100)))
print('* Validation for epoch {}:'.format(epoch))
(epoch_time, epoch_loss, val_p, s) = validate(model, valid_loader, criterion)
valid_losses.append(epoch_loss)
print('-> Valid. time: {:.4f}s, loss: {:.4f}, p correlation: {:.4f}, s correlation: {:.4f}'.format(epoch_time, epoch_loss, (val_p * 100), (s * 100)))
scheduler.step(val_p)
print('* Testing for epoch {}:'.format(epoch))
(batch_time, total_time, p, s) = test(model, test_loader)
print('-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, p correlation: {:.4f}, s correlation: {:.4f}'.format(batch_time, total_time, (p * 100), (s * 100)))
print((40 * '=='))
if (epoch > 10):
if (val_p <= best_score):
patience_counter += 1
else:
best_score = val_p
patience_counter = 0
torch.save({'epoch': epoch, 'model': model.state_dict(), 'best_score': best_score, 'epochs_count': epochs_count, 'train_losses': train_losses, 'valid_losses': valid_losses}, os.path.join(target_dir, (((output + '_') + str(proportion)) + '_best.pth.tar')))
if (patience_counter >= patience):
print('-> Early stopping: patience limit reached, stopping...')
checkpoint = torch.load(os.path.join(target_dir, (((output + '_') + str(proportion)) + '_best.pth.tar')))
vocab_size = checkpoint['model']['_word_embedding.weight'].size(0)
embedding_dim = checkpoint['model']['_word_embedding.weight'].size(1)
hidden_size = checkpoint['model']['_projection.0.weight'].size(0)
num_classes = checkpoint['model']['_classification.4.weight'].size(0)
print('\t* Final test...')
model = ESIM(vocab_size, embedding_dim, hidden_size, num_classes=num_classes, device=device, isSTS=True).to(device)
model.load_state_dict(checkpoint['model'])
(batch_time, total_time, p, s) = test(model, test_loader)
print('-> Final p correlation: {:.4f}, s correlation: {:.4f}'.format((p * 100), (s * 100)))
os.remove(os.path.join(target_dir, (((output + '_') + str(proportion)) + '_best.pth.tar')))
break
if (epoch == 30):
checkpoint = torch.load(os.path.join(target_dir, (((output + '_') + str(proportion)) + '_best.pth.tar')))
vocab_size = checkpoint['model']['_word_embedding.weight'].size(0)
embedding_dim = checkpoint['model']['_word_embedding.weight'].size(1)
hidden_size = checkpoint['model']['_projection.0.weight'].size(0)
num_classes = checkpoint['model']['_classification.4.weight'].size(0)
print('\t* Final test...')
model = ESIM(vocab_size, embedding_dim, hidden_size, num_classes=num_classes, device=device, isSTS=True).to(device)
model.load_state_dict(checkpoint['model'])
(batch_time, total_time, p, s) = test(model, test_loader)
print('-> Final p correlation: {:.4f}, s correlation: {:.4f}'.format((p * 100), (s * 100)))
os.remove(os.path.join(target_dir, (((output + '_') + str(proportion)) + '_best.pth.tar')))
break |
class SASLexer(RegexLexer):
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
url = '
version_added = '2.2'
flags = (re.IGNORECASE | re.MULTILINE)
builtins_macros = ('bquote', 'nrbquote', 'cmpres', 'qcmpres', 'compstor', 'datatyp', 'display', 'do', 'else', 'end', 'eval', 'global', 'goto', 'if', 'index', 'input', 'keydef', 'label', 'left', 'length', 'let', 'local', 'lowcase', 'macro', 'mend', 'nrquote', 'nrstr', 'put', 'qleft', 'qlowcase', 'qscan', 'qsubstr', 'qsysfunc', 'qtrim', 'quote', 'qupcase', 'scan', 'str', 'substr', 'superq', 'syscall', 'sysevalf', 'sysexec', 'sysfunc', 'sysget', 'syslput', 'sysprod', 'sysrc', 'sysrput', 'then', 'to', 'trim', 'unquote', 'until', 'upcase', 'verify', 'while', 'window')
builtins_conditionals = ('do', 'if', 'then', 'else', 'end', 'until', 'while')
builtins_statements = ('abort', 'array', 'attrib', 'by', 'call', 'cards', 'cards4', 'catname', 'continue', 'datalines', 'datalines4', 'delete', 'delim', 'delimiter', 'display', 'dm', 'drop', 'endsas', 'error', 'file', 'filename', 'footnote', 'format', 'goto', 'in', 'infile', 'informat', 'input', 'keep', 'label', 'leave', 'length', 'libname', 'link', 'list', 'lostcard', 'merge', 'missing', 'modify', 'options', 'output', 'out', 'page', 'put', 'redirect', 'remove', 'rename', 'replace', 'retain', 'return', 'select', 'set', 'skip', 'startsas', 'stop', 'title', 'update', 'waitsas', 'where', 'window', 'x', 'systask')
builtins_sql = ('add', 'and', 'alter', 'as', 'cascade', 'check', 'create', 'delete', 'describe', 'distinct', 'drop', 'foreign', 'from', 'group', 'having', 'index', 'insert', 'into', 'in', 'key', 'like', 'message', 'modify', 'msgtype', 'not', 'null', 'on', 'or', 'order', 'primary', 'references', 'reset', 'restrict', 'select', 'set', 'table', 'unique', 'update', 'validate', 'view', 'where')
builtins_functions = ('abs', 'addr', 'airy', 'arcos', 'arsin', 'atan', 'attrc', 'attrn', 'band', 'betainv', 'blshift', 'bnot', 'bor', 'brshift', 'bxor', 'byte', 'cdf', 'ceil', 'cexist', 'cinv', 'close', 'cnonct', 'collate', 'compbl', 'compound', 'compress', 'cos', 'cosh', 'css', 'curobs', 'cv', 'daccdb', 'daccdbsl', 'daccsl', 'daccsyd', 'dacctab', 'dairy', 'date', 'datejul', 'datepart', 'datetime', 'day', 'dclose', 'depdb', 'depdbsl', 'depsl', 'depsyd', 'deptab', 'dequote', 'dhms', 'dif', 'digamma', 'dim', 'dinfo', 'dnum', 'dopen', 'doptname', 'doptnum', 'dread', 'dropnote', 'dsname', 'erf', 'erfc', 'exist', 'exp', 'fappend', 'fclose', 'fcol', 'fdelete', 'fetch', 'fetchobs', 'fexist', 'fget', 'fileexist', 'filename', 'fileref', 'finfo', 'finv', 'fipname', 'fipnamel', 'fipstate', 'floor', 'fnonct', 'fnote', 'fopen', 'foptname', 'foptnum', 'fpoint', 'fpos', 'fput', 'fread', 'frewind', 'frlen', 'fsep', 'fuzz', 'fwrite', 'gaminv', 'gamma', 'getoption', 'getvarc', 'getvarn', 'hbound', 'hms', 'hosthelp', 'hour', 'ibessel', 'index', 'indexc', 'indexw', 'input', 'inputc', 'inputn', 'int', 'intck', 'intnx', 'intrr', 'irr', 'jbessel', 'juldate', 'kurtosis', 'lag', 'lbound', 'left', 'length', 'lgamma', 'libname', 'libref', 'log', 'log10', 'log2', 'logpdf', 'logpmf', 'logsdf', 'lowcase', 'max', 'mdy', 'mean', 'min', 'minute', 'mod', 'month', 'mopen', 'mort', 'n', 'netpv', 'nmiss', 'normal', 'note', 'npv', 'open', 'ordinal', 'pathname', 'pdf', 'peek', 'peekc', 'pmf', 'point', 'poisson', 'poke', 'probbeta', 'probbnml', 'probchi', 'probf', 'probgam', 'probhypr', 'probit', 'probnegb', 'probnorm', 'probt', 'put', 'putc', 'putn', 'qtr', 'quote', 'ranbin', 'rancau', 'ranexp', 'rangam', 'range', 'rank', 'rannor', 'ranpoi', 'rantbl', 'rantri', 'ranuni', 'repeat', 'resolve', 'reverse', 'rewind', 'right', 'round', 'saving', 'scan', 'sdf', 'second', 'sign', 'sin', 'sinh', 'skewness', 'soundex', 'spedis', 'sqrt', 'std', 'stderr', 'stfips', 'stname', 'stnamel', 'substr', 'sum', 'symget', 'sysget', 'sysmsg', 'sysprod', 'sysrc', 'system', 'tan', 'tanh', 'time', 'timepart', 'tinv', 'tnonct', 'today', 'translate', 'tranwrd', 'trigamma', 'trim', 'trimn', 'trunc', 'uniform', 'upcase', 'uss', 'var', 'varfmt', 'varinfmt', 'varlabel', 'varlen', 'varname', 'varnum', 'varray', 'varrayx', 'vartype', 'verify', 'vformat', 'vformatd', 'vformatdx', 'vformatn', 'vformatnx', 'vformatw', 'vformatwx', 'vformatx', 'vinarray', 'vinarrayx', 'vinformat', 'vinformatd', 'vinformatdx', 'vinformatn', 'vinformatnx', 'vinformatw', 'vinformatwx', 'vinformatx', 'vlabel', 'vlabelx', 'vlength', 'vlengthx', 'vname', 'vnamex', 'vtype', 'vtypex', 'weekday', 'year', 'yyq', 'zipfips', 'zipname', 'zipnamel', 'zipstate')
tokens = {'root': [include('comments'), include('proc-data'), include('cards-datalines'), include('logs'), include('general'), ('.', Text)], 'comments': [('^\\s*\\*.*?;', Comment), ('/\\*.*?\\*/', Comment), ('^\\s*\\*(.|\\n)*?;', Comment.Multiline), ('/[*](.|\\n)*?[*]/', Comment.Multiline)], 'proc-data': [('(^|;)\\s*(proc \\w+|data|run|quit)[\\s;]', Keyword.Reserved)], 'cards-datalines': [('^\\s*(datalines|cards)\\s*;\\s*$', Keyword, 'data')], 'data': [('(.|\\n)*^\\s*;\\s*$', Other, '#pop')], 'logs': [('\\n?^\\s*%?put ', Keyword, 'log-messages')], 'log-messages': [('NOTE(:|-).*', Generic, '#pop'), ('WARNING(:|-).*', Generic.Emph, '#pop'), ('ERROR(:|-).*', Generic.Error, '#pop'), include('general')], 'general': [include('keywords'), include('vars-strings'), include('special'), include('numbers')], 'keywords': [(words(builtins_statements, prefix='\\b', suffix='\\b'), Keyword), (words(builtins_sql, prefix='\\b', suffix='\\b'), Keyword), (words(builtins_conditionals, prefix='\\b', suffix='\\b'), Keyword), (words(builtins_macros, prefix='%', suffix='\\b'), Name.Builtin), (words(builtins_functions, prefix='\\b', suffix='\\('), Name.Builtin)], 'vars-strings': [('&[a-z_]\\w{0,31}\\.?', Name.Variable), ('%[a-z_]\\w{0,31}', Name.Function), ("\\'", String, 'string_squote'), ('"', String, 'string_dquote')], 'string_squote': [("'", String, '#pop'), ('|\\\\"|\\\\\\n', String.Escape), ("[^$\\'\\\\]+", String), ("[$\\'\\\\]", String)], 'string_dquote': [('"', String, '#pop'), ('|\\\\"|\\\\\\n', String.Escape), ('&', Name.Variable, 'validvar'), ('[^$&"\\\\]+', String), ('[$"\\\\]', String)], 'validvar': [('[a-z_]\\w{0,31}\\.?', Name.Variable, '#pop')], 'numbers': [('\\b[+-]?([0-9]+(\\.[0-9]+)?|\\.[0-9]+|\\.)(E[+-]?[0-9]+)?i?\\b', Number)], 'special': [('(null|missing|_all_|_automatic_|_character_|_n_|_infile_|_name_|_null_|_numeric_|_user_|_webout_)', Keyword.Constant)]} |
class PointwiseSamplerV2(Sampler):
def __init__(self, dataset, batch_size=1024, shuffle=True, drop_last=False):
super(Sampler, self).__init__()
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
self.item_num = dataset.num_items
self.user_pos_dict = dataset.get_user_train_dict()
self.num_trainings = sum([len(item) for (u, item) in self.user_pos_dict.items()])
(self.user_pos_len, self.users_list, self.pos_items_list) = _generate_positive_items(self.user_pos_dict)
def __iter__(self):
data_iter = DataIterator(self.users_list, self.pos_items_list, batch_size=self.batch_size, shuffle=self.shuffle, drop_last=self.drop_last)
for (bat_users, bat_items) in data_iter:
(yield (bat_users, bat_items))
def __len__(self):
n_sample = len(self.users_list)
if self.drop_last:
return (n_sample // self.batch_size)
else:
return (((n_sample + self.batch_size) - 1) // self.batch_size) |
class TimeSimulation():
def __init__(self, hamiltonian, method='split-step'):
self.H = hamiltonian
implemented_solvers = ('split-step', 'split-step-cupy', 'crank-nicolson', 'crank-nicolson-cupy')
if (method == 'split-step'):
if (self.H.potential_type == 'grid'):
self.method = SplitStep(self)
else:
raise NotImplementedError(f'split-step can only be used with grid potential_type. Use crank-nicolson instead')
elif (method == 'split-step-cupy'):
if (self.H.potential_type == 'grid'):
self.method = SplitStepCupy(self)
else:
raise NotImplementedError(f'split-step can only be used with grid potential_type. Use crank-nicolson instead')
elif (method == 'crank-nicolson'):
self.method = CrankNicolson(self)
elif (method == 'crank-nicolson-cupy'):
self.method = CrankNicolsonCupy(self)
else:
raise NotImplementedError(f'{method} solver has not been implemented. Use one of {implemented_solvers}')
def run(self, initial_wavefunction, total_time, dt, store_steps=1):
self.method.run(initial_wavefunction, total_time, dt, store_steps) |
def allow_access_to_confdir(confdir, allow):
from errno import EEXIST
if allow:
try:
os.makedirs(confdir)
except OSError as err:
if (err.errno != EEXIST):
print('This configuration directory could not be created:')
print(confdir)
print('To run ranger without the need for configuration')
print('files, use the --clean option.')
raise SystemExit
else:
LOG.debug("Created config directory '%s'", confdir)
if (confdir not in sys.path):
sys.path[0:0] = [confdir]
elif (sys.path[0] == confdir):
del sys.path[0] |
class SampleClassIDsUniformlyTest(tf.test.TestCase):
def test_num_ways_respected(self):
num_classes = test_utils.MAX_WAYS_UPPER_BOUND
num_ways = test_utils.MIN_WAYS
for _ in range(10):
class_ids = sampling.sample_class_ids_uniformly(num_ways, num_classes)
self.assertLen(set(class_ids), num_ways)
self.assertLen(class_ids, num_ways)
def test_num_classes_respected(self):
num_classes = test_utils.MAX_WAYS_UPPER_BOUND
num_ways = test_utils.MIN_WAYS
for _ in range(10):
class_ids = sampling.sample_class_ids_uniformly(num_ways, num_classes)
self.assertContainsSubset(class_ids, list(range(num_classes)))
def test_unique_class_ids(self):
num_classes = test_utils.MAX_WAYS_UPPER_BOUND
num_ways = test_utils.MIN_WAYS
for _ in range(10):
class_ids = sampling.sample_class_ids_uniformly(num_ways, num_classes)
self.assertCountEqual(class_ids, set(class_ids)) |
.requires_internet
def test_pre_install_commands(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'pre-install-commands': ['python -c "with open(\'test.txt\', \'w\') as f: f.write(\'content\')"'], **project.config.envs['default']})
helpers.update_project_environment(project, 'test', {})
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('env', 'create', 'test')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Creating environment: test\n Running pre-installation commands\n Installing project in development mode\n Checking dependencies\n '))
assert (project_path / 'test.txt').is_file() |
def summary_bssids(pkts, res):
table_data = [['SSID', 'BSSID', 'CHANNEL', 'DBM', 'ENCRYPTED', 'ENCRYPTION TYPE']]
dot11beacon_packets_count = 0
for pkt in pkts:
if pkt.haslayer(Dot11Beacon):
dot11beacon_packets_count += 1
stats = packet[Dot11Beacon].network_stats()
try:
dbm_signal = packet.dBm_AntSignal
except:
dbm_signal = 'N/A'
enc = red('x')
ssid = p[Dot11Elt].info.decode()
bssid = p[Dot11].addr3
try:
channel = int(ord(p[Dot11Elt:3].info))
except:
channel = stats.get('channel')
capability = p.sprintf('{Dot11Beacon:%Dot11Beacon.cap%} {Dot11ProbeResp:%Dot11ProbeResp.cap%}')
if re.search('privacy', capability):
enc = green('+')
enc_type = stats.get('crypto')
table_data.append([ssid, bssid, channel, dbm_signal, enc, enc_type])
print(f'{s} AP discovery {s}')
if (len(pkts) == 0):
table = SingleTable([[red('No packets captured')]])
elif (dot11beacon_packets_count == 0):
table = SingleTable([[red('No Dot11Beacon packets found')]])
else:
table = SingleTable(table_data)
print(table.table)
if res.OUTPUT:
with open(res.OUTPUT, 'a') as out_file:
out_file.write(f'''
{table.table}
''')
print('') |
.parametrize('sparse', [False, True], ids=['Dense', 'Sparse'])
def test_eigen_transform(sparse):
a = qutip.destroy(5)
f = (lambda t: t)
op = qutip.QobjEvo([(a * a.dag()), [(a + a.dag()), f]])
eigenT = _EigenBasisTransform(op, sparse=sparse)
evecs_qevo = eigenT.as_Qobj()
for t in [0, 1, 1.5]:
(eigenvals, ekets) = op(t).eigenstates()
np.testing.assert_allclose(eigenvals, eigenT.eigenvalues(t), rtol=1e-14, atol=1e-14)
np.testing.assert_allclose(np.abs(np.hstack([eket.full() for eket in ekets])), np.abs(eigenT.evecs(t).to_array()), rtol=1e-14, atol=1e-14)
np.testing.assert_allclose(np.abs(evecs_qevo(t).full()), np.abs(eigenT.evecs(t).to_array()), rtol=1e-14, atol=1e-14) |
def _tensor_test_case(dtype: torch.dtype, shape: List[int], logical_path: str, rank: int, replicated: bool) -> Tuple[(torch.Tensor, Entry, List[WriteReq])]:
tensor = rand_tensor(shape, dtype=dtype)
(entry, wrs) = prepare_write(obj=tensor, logical_path=logical_path, rank=rank, replicated=replicated)
return (tensor, entry, wrs) |
class PipelineIterator(IterableDataset):
def __init__(self, loader, infer, params, loader_batch_size=None):
self.loader = loader
self.infer = infer
self.params = params
if (loader_batch_size == 1):
loader_batch_size = None
self.loader_batch_size = loader_batch_size
self._loader_batch_index = None
self._loader_batch_data = None
def __len__(self):
return len(self.loader)
def __iter__(self):
self.iterator = iter(self.loader)
return self
def loader_batch_item(self):
if isinstance(self._loader_batch_data, torch.Tensor):
result = self._loader_batch_data[self._loader_batch_index]
else:
loader_batched = {}
for (k, element) in self._loader_batch_data.items():
if isinstance(element, ModelOutput):
element = element.to_tuple()
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple((el[self._loader_batch_index].unsqueeze(0) for el in element))
elif isinstance(element[0], np.ndarray):
loader_batched[k] = tuple((np.expand_dims(el[self._loader_batch_index], 0) for el in element))
continue
if ((k in {'hidden_states', 'past_key_values', 'attentions'}) and isinstance(element, tuple)):
if isinstance(element[0], torch.Tensor):
loader_batched[k] = tuple((el[self._loader_batch_index].unsqueeze(0) for el in element))
elif isinstance(element[0], np.ndarray):
loader_batched[k] = tuple((np.expand_dims(el[self._loader_batch_index], 0) for el in element))
continue
if (element is None):
loader_batched[k] = None
elif isinstance(element[self._loader_batch_index], torch.Tensor):
loader_batched[k] = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index], np.ndarray):
loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0)
else:
loader_batched[k] = element[self._loader_batch_index]
result = self._loader_batch_data.__class__(loader_batched)
self._loader_batch_index += 1
return result
def __next__(self):
if ((self._loader_batch_index is not None) and (self._loader_batch_index < self.loader_batch_size)):
return self.loader_batch_item()
item = next(self.iterator)
processed = self.infer(item, **self.params)
if (self.loader_batch_size is not None):
if isinstance(processed, torch.Tensor):
first_tensor = processed
else:
key = list(processed.keys())[0]
first_tensor = processed[key]
if isinstance(first_tensor, list):
observed_batch_size = len(first_tensor)
else:
observed_batch_size = first_tensor.shape[0]
if (0 < observed_batch_size < self.loader_batch_size):
self.loader_batch_size = observed_batch_size
self._loader_batch_data = processed
self._loader_batch_index = 0
return self.loader_batch_item()
else:
return processed |
class DepthToNormalNode(topic_tools.LazyTransport):
def __init__(self):
super().__init__()
self._pub_normal = self.advertise('~output/normal', Image, queue_size=1)
self._pub_jet = self.advertise('~output/jet', Image, queue_size=1)
self._post_init()
def subscribe(self):
sub_cam = message_filters.Subscriber('~input/camera_info', CameraInfo, queue_size=1)
sub_depth = message_filters.Subscriber('~input/depth', Image, queue_size=1)
self._subscribers = [sub_cam, sub_depth]
sync = message_filters.TimeSynchronizer(self._subscribers, queue_size=5)
sync.registerCallback(self._callback)
def unsubscribe(self):
for sub in self._subscribers:
sub.unregister()
def _callback(self, cam_msg, depth_msg):
bridge = cv_bridge.CvBridge()
depth = bridge.imgmsg_to_cv2(depth_msg)
if (depth.dtype == np.uint16):
depth = (depth.astype(np.float32) / 1000)
depth[(depth == 0)] = np.nan
assert (depth.dtype == np.float32)
if (self._pub_normal.get_num_connections() > 0):
K = np.array(cam_msg.K).reshape(3, 3)
points = reorientbot.geometry.pointcloud_from_depth(depth, fx=K[(0, 0)], fy=K[(1, 1)], cx=K[(0, 2)], cy=K[(1, 2)])
normal = reorientbot.geometry.normals_from_pointcloud(points)
normal = np.uint8((((normal + 1) / 2) * 255))
out_msg = bridge.cv2_to_imgmsg(normal, 'rgb8')
out_msg.header = cam_msg.header
self._pub_normal.publish(out_msg)
if (self._pub_jet.get_num_connections() > 0):
jet = imgviz.depth2rgb(depth, min_value=0.3, max_value=1)
out_msg = bridge.cv2_to_imgmsg(jet, 'rgb8')
out_msg.header = cam_msg.header
self._pub_jet.publish(out_msg) |
_function('mypy_extensions.i16')
def translate_i16(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> (Value | None):
if ((len(expr.args) != 1) or (expr.arg_kinds[0] != ARG_POS)):
return None
arg = expr.args[0]
arg_type = builder.node_type(arg)
if is_int16_rprimitive(arg_type):
return builder.accept(arg)
elif (is_int32_rprimitive(arg_type) or is_int64_rprimitive(arg_type)):
val = builder.accept(arg)
return builder.add(Truncate(val, int16_rprimitive, line=expr.line))
elif is_uint8_rprimitive(arg_type):
val = builder.accept(arg)
return builder.add(Extend(val, int16_rprimitive, signed=False, line=expr.line))
elif (is_int_rprimitive(arg_type) or is_bool_rprimitive(arg_type)):
val = builder.accept(arg)
val = truncate_literal(val, int16_rprimitive)
return builder.coerce(val, int16_rprimitive, expr.line)
return None |
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >\n |\n exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >\n "
def transform(self, node, results):
assert results
syms = self.syms
a = results['a']
b = results.get('b')
c = results.get('c')
args = [a.clone()]
args[0].prefix = ''
if (b is not None):
args.extend([Comma(), b.clone()])
if (c is not None):
args.extend([Comma(), c.clone()])
return Call(Name('exec'), args, prefix=node.prefix) |
class TensorDictBase(MutableMapping):
_safe = False
_lazy = False
_inplace_set = False
is_meta = False
_is_locked = False
_cache = None
def __bool__(self) -> bool:
raise RuntimeError('Converting a tensordict to boolean value is not permitted')
def __ne__(self, other: object) -> T:
...
def __xor__(self, other):
...
def __or__(self, other):
...
def __eq__(self, other: object) -> T:
...
def __repr__(self) -> str:
fields = _td_fields(self)
field_str = indent(f'fields={{{fields}}}', (4 * ' '))
batch_size_str = indent(f'batch_size={self.batch_size}', (4 * ' '))
device_str = indent(f'device={self.device}', (4 * ' '))
is_shared_str = indent(f'is_shared={self.is_shared()}', (4 * ' '))
string = ',\n'.join([field_str, batch_size_str, device_str, is_shared_str])
return f'''{type(self).__name__}(
{string})'''
def __iter__(self) -> Generator:
if (not self.batch_dims):
raise StopIteration
(yield from self.unbind(0))
def __len__(self) -> int:
return (self.shape[0] if self.batch_dims else 0)
def __contains__(self, key: NestedKey) -> bool:
raise NotImplementedError('TensorDict does not support membership checks with the `in` keyword. If you want to check if a particular key is in your TensorDict, please use `key in tensordict.keys()` instead.')
def __getitem__(self, index: IndexType) -> T:
istuple = isinstance(index, tuple)
if (istuple or isinstance(index, str)):
idx_unravel = _unravel_key_to_tuple(index)
if idx_unravel:
return self._get_tuple(idx_unravel, NO_DEFAULT)
if ((istuple and (not index)) or ((not istuple) and (index is Ellipsis))):
return self
if (not istuple):
if isinstance(index, int):
return self._index_tensordict(index)
index = (index,)
if (istuple and any(((idx is Ellipsis) for idx in index))):
index = convert_ellipsis_to_idx(index, self.batch_size)
if all(((isinstance(idx, slice) and (idx == slice(None))) for idx in index)):
return self
return self._index_tensordict(index)
__getitems__ = __getitem__
def _get_sub_tensordict(self, idx: IndexType) -> T:
from tensordict._td import _SubTensorDict
return _SubTensorDict(source=self, idx=idx)
def get_sub_tensordict(self, idx: IndexType) -> T:
warnings.warn('get_sub_tensordict will be made private in v0.4.', category=DeprecationWarning)
return self._get_sub_tensordict(idx)
def __setitem__(self, index: IndexType, value: (((T | dict) | numbers.Number) | CompatibleType)) -> None:
...
def __delitem__(self, key: NestedKey) -> T:
return self.del_(key)
def __torch_function__(cls, func: Callable, types: tuple[(type, ...)], args: tuple[(Any, ...)]=(), kwargs: (dict[(str, Any)] | None)=None) -> Callable:
from tensordict._torch_func import TD_HANDLED_FUNCTIONS
if (kwargs is None):
kwargs = {}
if ((func not in TD_HANDLED_FUNCTIONS) or (not all((issubclass(t, (Tensor, TensorDictBase)) for t in types)))):
return NotImplemented
return TD_HANDLED_FUNCTIONS[func](*args, **kwargs)
def all(self, dim: int=None) -> (bool | TensorDictBase):
...
def any(self, dim: int=None) -> (bool | TensorDictBase):
...
def from_module(cls, module, as_module: bool=False, lock: bool=True, use_state_dict: bool=False):
...
def to_module(self, module: nn.Module, return_swap: bool=False, swap_dest=None, memo=None):
...
def shape(self) -> torch.Size:
return self.batch_size
def batch_size(self) -> torch.Size:
...
def size(self, dim: (int | None)=None) -> (torch.Size | int):
if (dim is None):
return self.batch_size
return self.batch_size[dim]
def _batch_size_setter(self, new_batch_size: torch.Size) -> None:
if (new_batch_size == self.batch_size):
return
if self._lazy:
raise RuntimeError('modifying the batch size of a lazy representation of a tensordict is not permitted. Consider instantiating the tensordict first by calling `td = td.to_tensordict()` before resetting the batch size.')
if (not isinstance(new_batch_size, torch.Size)):
new_batch_size = torch.Size(new_batch_size)
for key in self.keys():
if _is_tensor_collection(self.entry_class(key)):
tensordict = self.get(key)
if (len(tensordict.batch_size) < len(new_batch_size)):
tensordict.batch_size = new_batch_size
self._set_str(key, tensordict, inplace=True, validated=True)
self._check_new_batch_size(new_batch_size)
self._change_batch_size(new_batch_size)
if self._has_names():
names = self.names
if (len(names) < len(new_batch_size)):
self.names = (names + ([None] * (len(new_batch_size) - len(names))))
else:
self.names = names[:self.batch_dims]
def batch_dims(self) -> int:
return len(self.batch_size)
def ndimension(self) -> int:
return self.batch_dims
def ndim(self) -> int:
return self.batch_dims
def dim(self) -> int:
return self.batch_dims
def numel(self) -> int:
return max(1, self.batch_size.numel())
def expand(self, *shape: int) -> T:
...
def expand(self, shape: torch.Size) -> T:
...
def expand(self, *args: (int | torch.Size)) -> T:
...
def unbind(self, dim: int) -> tuple[(T, ...)]:
...
def chunk(self, chunks: int, dim: int=0) -> tuple[(TensorDictBase, ...)]:
if (chunks < 1):
raise ValueError(f'chunks must be a strictly positive integer, got {chunks}.')
split_size = (- (self.batch_size[dim] // (- chunks)))
return self.split(split_size, dim=dim)
def unsqueeze(self, dim: int) -> T:
...
def unsqueeze(self):
if lazy_legacy():
return self._legacy_unsqueeze
else:
return self._unsqueeze
def _unsqueeze(self, dim):
if (dim < 0):
newdim = ((self.batch_dims + dim) + 1)
else:
newdim = dim
if ((newdim > self.batch_dims) or (newdim < 0)):
raise RuntimeError(f'unsqueezing is allowed for dims comprised between `-td.batch_dims - 1` and `td.batch_dims` only. Got dim={dim} with a batch size of {self.batch_size}.')
batch_size = list(self.batch_size)
batch_size.insert(newdim, 1)
batch_size = torch.Size(batch_size)
names = copy(self.names)
names.insert(dim, None)
def _unsqueeze(tensor):
return tensor.unsqueeze(newdim)
return self._fast_apply(_unsqueeze, batch_size=batch_size, names=names, inplace=False, call_on_nested=True)
def _legacy_unsqueeze(self, dim: int) -> T:
if (dim < 0):
dim = ((self.batch_dims + dim) + 1)
if ((dim > self.batch_dims) or (dim < 0)):
raise RuntimeError(f'unsqueezing is allowed for dims comprised between `-td.batch_dims` and `td.batch_dims` only. Got dim={dim} with a batch size of {self.batch_size}.')
from tensordict._lazy import _UnsqueezedTensorDict
return _UnsqueezedTensorDict(source=self, custom_op='unsqueeze', inv_op='squeeze', custom_op_kwargs={'dim': dim}, inv_op_kwargs={'dim': dim})
def squeeze(self, dim: (int | None)=None) -> T:
...
def squeeze(self):
if lazy_legacy():
return self._legacy_squeeze
else:
return self._squeeze
def _squeeze(self, dim=None):
batch_size = self.batch_size
if (dim is None):
names = list(self.names)
(batch_size, names) = zip(*[(size, name) for (size, name) in zip(batch_size, names) if (size != 1)])
batch_size = torch.Size(batch_size)
if (batch_size == self.batch_size):
return self
def _squeeze(tensor):
return tensor.view(*batch_size, *tensor.shape[self.batch_dims:])
return self._fast_apply(_squeeze, batch_size=batch_size, names=names, inplace=False, call_on_nested=True)
if (dim < 0):
newdim = (self.batch_dims + dim)
else:
newdim = dim
if ((newdim >= self.batch_dims) or (newdim < 0)):
raise RuntimeError(f'squeezing is allowed for dims comprised between `-td.batch_dims` and `td.batch_dims - 1` only. Got dim={dim} with a batch size of {self.batch_size}.')
if (batch_size[dim] != 1):
return self
batch_size = list(batch_size)
batch_size.pop(dim)
batch_size = list(batch_size)
names = list(self.names)
names.pop(dim)
return self._fast_apply((lambda x: x.squeeze(newdim)), batch_size=batch_size, names=names, inplace=False, call_on_nested=True)
def _legacy_squeeze(self, dim: (int | None)=None) -> T:
from tensordict._lazy import _SqueezedTensorDict
if (dim is None):
size = self.size()
if ((len(self.size()) == 1) or (size.count(1) == 0)):
return self
first_singleton_dim = size.index(1)
squeezed_dict = _SqueezedTensorDict(source=self, custom_op='squeeze', inv_op='unsqueeze', custom_op_kwargs={'dim': first_singleton_dim}, inv_op_kwargs={'dim': first_singleton_dim})
return squeezed_dict.squeeze(dim=None)
if (dim < 0):
dim = (self.batch_dims + dim)
if (self.batch_dims and ((dim >= self.batch_dims) or (dim < 0))):
raise RuntimeError(f'squeezing is allowed for dims comprised between 0 and td.batch_dims only. Got dim={dim} and batch_size={self.batch_size}.')
if ((dim >= self.batch_dims) or (self.batch_size[dim] != 1)):
return self
return _SqueezedTensorDict(source=self, custom_op='squeeze', inv_op='unsqueeze', custom_op_kwargs={'dim': dim}, inv_op_kwargs={'dim': dim})
def reshape(self, *shape: int):
...
def reshape(self, shape: (list | tuple)):
...
def reshape(self, *args, **kwargs) -> T:
...
def split(self, split_size: (int | list[int]), dim: int=0) -> list[TensorDictBase]:
...
def gather(self, dim: int, index: Tensor, out: (T | None)=None) -> T:
return torch.gather(self, dim, index, out=out)
def view(self, *shape: int):
...
def view(self, shape: torch.Size):
...
def _view(self, *args, **kwargs) -> T:
...
def view(self):
if lazy_legacy():
return self._legacy_view
else:
return self._view
def _legacy_view(self, *shape: int, size: (((list | tuple) | torch.Size) | None)=None) -> T:
if ((len(shape) == 0) and (size is not None)):
return self.view(*size)
elif ((len(shape) == 1) and isinstance(shape[0], (list, tuple, torch.Size))):
return self.view(*shape[0])
elif (not isinstance(shape, torch.Size)):
shape = infer_size_impl(shape, self.numel())
shape = torch.Size(shape)
if (shape == self.shape):
return self
from tensordict._lazy import _ViewedTensorDict
return _ViewedTensorDict(source=self, custom_op='view', inv_op='view', custom_op_kwargs={'size': shape}, inv_op_kwargs={'size': self.batch_size})
def transpose(self, dim0, dim1):
...
def transpose(self):
if lazy_legacy():
return self._legacy_transpose
else:
return self._transpose
def _transpose(self, dim0, dim1):
...
def _legacy_transpose(self, dim0, dim1):
if (dim0 < 0):
dim0 = (self.ndim + dim0)
if (dim1 < 0):
dim1 = (self.ndim + dim1)
if any(((dim0 < 0), (dim1 < 0))):
raise ValueError('The provided dimensions are incompatible with the tensordict batch-size.')
if (dim0 == dim1):
return self
from tensordict._lazy import _TransposedTensorDict
return _TransposedTensorDict(source=self, custom_op='transpose', inv_op='transpose', custom_op_kwargs={'dim0': dim0, 'dim1': dim1}, inv_op_kwargs={'dim0': dim0, 'dim1': dim1})
def permute(self, *dims: int):
...
def permute(self, dims: (list | tuple)):
...
def permute(self):
if lazy_legacy():
return self._legacy_permute
else:
return self._permute
def _permute(self, *args, **kwargs):
...
def _legacy_permute(self, *dims_list: int, dims: (list[int] | None)=None) -> T:
if (len(dims_list) == 0):
dims_list = dims
elif ((len(dims_list) == 1) and (not isinstance(dims_list[0], int))):
dims_list = dims_list[0]
if (len(dims_list) != len(self.shape)):
raise RuntimeError(f"number of dims don't match in permute (got {len(dims_list)}, expected {len(self.shape)}")
if ((not len(dims_list)) and (not self.batch_dims)):
return self
if np.array_equal(dims_list, range(self.batch_dims)):
return self
(min_dim, max_dim) = ((- self.batch_dims), (self.batch_dims - 1))
seen = [False for dim in range((max_dim + 1))]
for idx in dims_list:
if ((idx < min_dim) or (idx > max_dim)):
raise IndexError(f'dimension out of range (expected to be in range of [{min_dim}, {max_dim}], but got {idx})')
if seen[idx]:
raise RuntimeError('repeated dim in permute')
seen[idx] = True
from tensordict._lazy import _PermutedTensorDict
return _PermutedTensorDict(source=self, custom_op='permute', inv_op='permute', custom_op_kwargs={'dims': list(map(int, dims_list))}, inv_op_kwargs={'dims': list(map(int, dims_list))})
def _erase_cache(self):
self._cache = None
def names(self):
...
def _erase_names(self):
...
def _rename_subtds(self, value):
...
def _check_dim_name(self, name):
if (name is None):
return False
if (self._has_names() and (name in self.names)):
return True
for key in self.keys():
if _is_tensor_collection(self.entry_class(key)):
if self._get_str(key, NO_DEFAULT)._check_dim_name(name):
return True
else:
return False
def refine_names(self, *names):
names_copy = copy(names)
if any(((name is Ellipsis) for name in names)):
ellipsis_name = [NO_DEFAULT for _ in range(((self.ndim - len(names)) + 1))]
names = []
for name in names_copy:
if (name is Ellipsis):
names += ellipsis_name
else:
names.append(name)
curr_names = self.names
for (i, name) in enumerate(names):
if (name is NO_DEFAULT):
names[i] = curr_names[i]
continue
else:
if (curr_names[i] is None):
continue
if (self.names[i] == name):
continue
else:
raise RuntimeError(f'refine_names: cannot coerce TensorDict names {self.names} with {names_copy}.')
self.names = names
return self
def rename(self, *names, **rename_map):
clone = self.clone(recurse=False)
if ((len(names) == 1) and (names[0] is None)):
clone.names = None
if (rename_map and names):
raise ValueError('Passed both a name map and a name list. Only one is accepted.')
elif ((not rename_map) and (not names)):
raise ValueError('Neither a name map nor a name list was passed. Only one is accepted.')
elif rename_map:
cnames = list(clone.names)
for (i, name) in enumerate(cnames):
new_name = rename_map.pop(name, NO_DEFAULT)
if (new_name is not NO_DEFAULT):
cnames[i] = new_name
clone.names = cnames
if rename_map:
raise ValueError(f'Some names to be renamed were not part of the tensordict names: {rename_map.keys()} vs {self.names}.')
else:
clone.names = names
return clone
def rename_(self, *names, **rename_map):
if ((len(names) == 1) and (names[0] is None)):
self.names = None
if (rename_map and names):
raise ValueError('Passed both a name map and a name list. Only one is accepted.')
elif ((not rename_map) and (not names) and self.batch_dims):
raise ValueError('Neither a name map nor a name list was passed. Only one is accepted.')
elif rename_map:
cnames = list(self.names)
for (i, name) in enumerate(cnames):
new_name = rename_map.pop(name, NO_DEFAULT)
if (new_name is not NO_DEFAULT):
cnames[i] = new_name
if rename_map:
raise ValueError(f'Some names to be renamed were not part of the tensordict names: {rename_map.keys()} vs {self.names}.')
self.names = cnames
else:
self.names = names
return self
def _has_names(self):
...
def device(self) -> (torch.device | None):
...
def device(self, value: DeviceType) -> None:
...
def clear_device_(self) -> T:
self._device = None
for value in self.values():
if _is_tensor_collection(value.__class__):
value.clear_device_()
return self
def pin_memory(self) -> T:
...
def cpu(self) -> T:
return self.to('cpu')
def cuda(self, device: int=None) -> T:
if (device is None):
return self.to(torch.device('cuda'))
return self.to(f'cuda:{device}')
def state_dict(self, destination=None, prefix='', keep_vars=False, flatten=False) -> OrderedDict[(str, Any)]:
out = collections.OrderedDict()
source = self
if flatten:
source = source.flatten_keys('.')
for (key, item) in source.items():
if (not _is_tensor_collection(item.__class__)):
if (not keep_vars):
out[(prefix + key)] = item.detach().clone()
else:
out[(prefix + key)] = item
else:
out[(prefix + key)] = item.state_dict(keep_vars=keep_vars)
if ('__batch_size' in out):
raise KeyError("Cannot retrieve the state_dict of a TensorDict with `'__batch_size'` key")
if ('__device' in out):
raise KeyError("Cannot retrieve the state_dict of a TensorDict with `'__batch_size'` key")
out[(prefix + '__batch_size')] = source.batch_size
out[(prefix + '__device')] = source.device
if (destination is not None):
destination.update(out)
return destination
return out
def load_state_dict(self, state_dict: OrderedDict[(str, Any)], strict=True, assign=False, from_flatten=False) -> T:
if from_flatten:
self_flatten = self.flatten_keys('.')
self_flatten.load_state_dict(state_dict, strict=strict, assign=assign)
if (not assign):
return self
else:
DOT_ERROR = 'Cannot use load_state_dict(..., from_flatten=True, assign=True) when some keys contain a dot character.'
for key in self.keys(True, True):
if isinstance(key, tuple):
for subkey in key:
if ('.' in subkey):
raise RuntimeError(DOT_ERROR)
elif ('.' in key):
raise RuntimeError(DOT_ERROR)
return self.update(self_flatten.unflatten_keys('.'))
state_dict = copy(state_dict)
self.batch_size = state_dict.pop('__batch_size')
device = state_dict.pop('__device', None)
if ((device is not None) and (self.device is not None) and (device != self.device)):
raise RuntimeError('Loading data from another device is not yet supported.')
for (key, item) in state_dict.items():
if isinstance(item, dict):
dest = self.get(key, default=None)
if (dest is None):
dest = self.empty()
dest.load_state_dict(item, assign=assign, strict=strict)
self.set(key, dest, inplace=(not assign))
else:
self.set(key, item, inplace=(not assign))
if (strict and (set(state_dict.keys()) != set(self.keys()))):
set_sd = set(state_dict.keys())
set_td = set(self.keys())
raise RuntimeError(f'''Cannot load state-dict because the key sets don't match: got state_dict extra keys
{(set_sd - set_td)}
and tensordict extra keys
{(set_td - set_sd)}
''')
return self
def is_shared(self) -> bool:
if (self.device and (not self._is_memmap)):
return ((self.device.type == 'cuda') or self._is_shared)
return self._is_shared
def is_memmap(self) -> bool:
return self._is_memmap
def share_memory_(self) -> T:
...
def _memmap_(self, *, prefix: (str | None), copy_existing: bool, executor, futures, inplace, like) -> T:
...
def memmap_(self, prefix: (str | None)=None, copy_existing: bool=False, *, num_threads: int=0, return_early: bool=False) -> T:
if (num_threads > 1):
with (ThreadPoolExecutor(max_workers=num_threads) if (not return_early) else contextlib.nullcontext()) as executor:
if return_early:
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = []
result = self._memmap_(prefix=prefix, copy_existing=copy_existing, executor=executor, futures=futures, inplace=True, like=False)
if (not return_early):
concurrent.futures.wait(futures)
return result
else:
return TensorDictFuture(futures, result)
return self._memmap_(prefix=prefix, copy_existing=copy_existing, inplace=True, futures=None, executor=None, like=False).lock_()
def memmap(self, prefix: (str | None)=None, copy_existing: bool=False, *, num_threads: int=0, return_early: bool=False) -> T:
if (num_threads > 1):
with (ThreadPoolExecutor(max_workers=num_threads) if (not return_early) else contextlib.nullcontext()) as executor:
if return_early:
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = []
result = self._memmap_(prefix=prefix, copy_existing=copy_existing, executor=executor, futures=futures, inplace=False, like=False)
if (not return_early):
concurrent.futures.wait(futures)
return result
else:
return TensorDictFuture(futures, result)
return self._memmap_(prefix=prefix, copy_existing=copy_existing, inplace=False, executor=None, like=False, futures=None).lock_()
def memmap_like(self, prefix: (str | None)=None, copy_existing: bool=False, *, num_threads: int=0, return_early: bool=False) -> T:
if (num_threads > 1):
with (ThreadPoolExecutor(max_workers=num_threads) if (not return_early) else contextlib.nullcontext()) as executor:
if return_early:
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = []
result = self._memmap_(prefix=prefix, copy_existing=copy_existing, executor=executor, futures=futures, inplace=False, like=True)
if (not return_early):
concurrent.futures.wait(futures)
return result
else:
return TensorDictFuture(futures, result)
return self._memmap_(prefix=prefix, copy_existing=copy_existing, inplace=False, like=True, executor=None, futures=None).lock_()
def load_memmap(cls, prefix: (str | Path)) -> T:
prefix = Path(prefix)
def load_metadata(filepath):
with open(filepath) as json_metadata:
metadata = json.load(json_metadata)
return metadata
metadata = load_metadata((prefix / 'meta.json'))
type_name = metadata['_type']
if (type_name != str(cls)):
import tensordict
for other_cls in tensordict.base._ACCEPTED_CLASSES:
if (str(other_cls) == type_name):
return other_cls._load_memmap(prefix, metadata)
else:
raise RuntimeError(f'Could not find name {type_name} in {tensordict.base._ACCEPTED_CLASSES}. Did you call _register_tensor_class(cls) on {type_name}?')
return cls._load_memmap(prefix, metadata)
def _load_memmap(cls, prefix: Path, metadata: dict):
...
def entry_class(self, key: NestedKey) -> type:
...
def set(self, key: NestedKey, item: CompatibleType, inplace: bool=False, **kwargs: Any) -> T:
key = _unravel_key_to_tuple(key)
inplace = (BEST_ATTEMPT_INPLACE if inplace else False)
return self._set_tuple(key, item, inplace=inplace, validated=False)
def _set_str(self, key, value, *, inplace, validated):
...
def _set_tuple(self, key, value, *, inplace, validated):
...
_blocked
def set_non_tensor(self, key: NestedKey, value: Any):
key = unravel_key(key)
return self._set_non_tensor(key, value)
def _set_non_tensor(self, key: NestedKey, value: Any):
if isinstance(key, tuple):
if (len(key) == 1):
return self._set_non_tensor(key[0], value)
sub_td = self._get_str(key[0], None)
if (sub_td is None):
sub_td = self._create_nested_str(key[0])
sub_td._set_non_tensor(key[1:], value)
return self
from tensordict.tensorclass import NonTensorData
self._set_str(key, NonTensorData(value, batch_size=self.batch_size, device=self.device, names=(self.names if self._has_names() else None)), validated=True, inplace=False)
return self
def get_non_tensor(self, key: NestedKey, default=NO_DEFAULT):
key = unravel_key(key)
return self._get_non_tensor(key, default=default)
def _get_non_tensor(self, key: NestedKey, default=NO_DEFAULT):
if isinstance(key, tuple):
if (len(key) == 1):
return self._get_non_tensor(key[0], default=default)
subtd = self._get_str(key[0], default=default)
if (subtd is default):
return subtd
return subtd._get_non_tensor(key[1:], default=default)
value = self._get_str(key, default=default)
from tensordict.tensorclass import NonTensorData
if isinstance(value, NonTensorData):
return value.data
return value
def filter_non_tensor_data(self) -> T:
from tensordict.tensorclass import NonTensorData
def _filter(x):
if (not isinstance(x, NonTensorData)):
if is_tensor_collection(x):
return x.filter_non_tensor_data()
return x
return self._apply_nest(_filter, call_on_nested=True)
def _convert_inplace(self, inplace, key):
if (inplace is not False):
has_key = (key in self.keys())
if ((inplace is True) and (not has_key)):
raise KeyError(_KEY_ERROR.format(key, self.__class__.__name__, sorted(self.keys())))
inplace = has_key
return inplace
def set_at_(self, key: NestedKey, value: CompatibleType, index: IndexType) -> T:
key = _unravel_key_to_tuple(key)
return self._set_at_tuple(key, value, index, validated=False)
def _set_at_str(self, key, value, idx, *, validated):
...
def _set_at_tuple(self, key, value, idx, *, validated):
...
def set_(self, key: NestedKey, item: CompatibleType) -> T:
key = _unravel_key_to_tuple(key)
return self._set_tuple(key, item, inplace=True, validated=False)
def _stack_onto_(self, list_item: list[CompatibleType], dim: int) -> T:
...
def _stack_onto_at_(self, key: str, list_item: list[CompatibleType], dim: int, idx: IndexType) -> T:
raise RuntimeError(f'Cannot call _stack_onto_at_ with {self.__class__.__name__}. Make sure your sub-classed tensordicts are turned into regular tensordicts by calling to_tensordict() before calling __getindex__ and stack.')
def _default_get(self, key: str, default: (str | CompatibleType)=NO_DEFAULT) -> CompatibleType:
if (default is not NO_DEFAULT):
return default
else:
raise KeyError(_KEY_ERROR.format(key, self.__class__.__name__, sorted(self.keys())))
def get(self, key: NestedKey, default: (str | CompatibleType)=NO_DEFAULT) -> CompatibleType:
key = _unravel_key_to_tuple(key)
if (not key):
raise KeyError(_GENERIC_NESTED_ERR.format(key))
return self._get_tuple(key, default=default)
def _get_str(self, key, default):
...
def _get_tuple(self, key, default):
...
def get_at(self, key: NestedKey, index: IndexType, default: CompatibleType=NO_DEFAULT) -> CompatibleType:
key = _unravel_key_to_tuple(key)
if (not key):
raise KeyError(_GENERIC_NESTED_ERR.format(key))
return self._get_at_tuple(key, index, default)
def _get_at_str(self, key, idx, default):
out = self._get_str(key, default)
if (out is default):
return out
return out[idx]
def _get_at_tuple(self, key, idx, default):
out = self._get_tuple(key, default)
if (out is default):
return out
return out[idx]
def get_item_shape(self, key: NestedKey):
return _shape(self.get(key))
def update(self, input_dict_or_td: (dict[(str, CompatibleType)] | T), clone: bool=False, inplace: bool=False, *, keys_to_update: (Sequence[NestedKey] | None)=None) -> T:
from tensordict._lazy import LazyStackedTensorDict
if (input_dict_or_td is self):
return self
if (keys_to_update is not None):
if (len(keys_to_update) == 0):
return self
keys_to_update = unravel_key_list(keys_to_update)
for (key, value) in input_dict_or_td.items():
key = _unravel_key_to_tuple(key)
(firstkey, subkey) = (key[0], key[1:])
if (keys_to_update and (not any((((firstkey == ktu) if isinstance(ktu, str) else (firstkey == ktu[0])) for ktu in keys_to_update)))):
continue
target = self._get_str(firstkey, None)
if (clone and hasattr(value, 'clone')):
value = value.clone()
elif clone:
value = tree_map(torch.clone, value)
if (target is not None):
if _is_tensor_collection(type(target)):
if subkey:
sub_keys_to_update = _prune_selected_keys(keys_to_update, firstkey)
target.update({subkey: value}, inplace=inplace, clone=clone, keys_to_update=sub_keys_to_update)
continue
elif (isinstance(value, (dict,)) or _is_tensor_collection(value.__class__)):
if (isinstance(value, LazyStackedTensorDict) and (not isinstance(target, LazyStackedTensorDict))):
sub_keys_to_update = _prune_selected_keys(keys_to_update, firstkey)
self._set_tuple(key, LazyStackedTensorDict(*target.unbind(value.stack_dim), stack_dim=value.stack_dim).update(value, inplace=inplace, clone=clone, keys_to_update=sub_keys_to_update), validated=True, inplace=False)
else:
sub_keys_to_update = _prune_selected_keys(keys_to_update, firstkey)
target.update(value, inplace=inplace, clone=clone, keys_to_update=sub_keys_to_update)
continue
self._set_tuple(key, value, inplace=(BEST_ATTEMPT_INPLACE if inplace else False), validated=False)
return self
def update_(self, input_dict_or_td: (dict[(str, CompatibleType)] | T), clone: bool=False, *, keys_to_update: (Sequence[NestedKey] | None)=None) -> T:
if (input_dict_or_td is self):
return self
if (keys_to_update is not None):
if (len(keys_to_update) == 0):
return self
keys_to_update = unravel_key_list(keys_to_update)
for (key, value) in input_dict_or_td.items():
(firstkey, *nextkeys) = _unravel_key_to_tuple(key)
if (keys_to_update and (not any((((firstkey == ktu) if isinstance(ktu, str) else (firstkey == ktu[0])) for ktu in keys_to_update)))):
continue
if clone:
value = value.clone()
self.set_((firstkey, *nextkeys), value)
return self
def update_at_(self, input_dict_or_td: (dict[(str, CompatibleType)] | T), idx: IndexType, clone: bool=False, *, keys_to_update: (Sequence[NestedKey] | None)=None) -> T:
if (keys_to_update is not None):
if (len(keys_to_update) == 0):
return self
keys_to_update = unravel_key_list(keys_to_update)
for (key, value) in input_dict_or_td.items():
(firstkey, *nextkeys) = _unravel_key_to_tuple(key)
if (keys_to_update and (not any((((firstkey == ktu) if isinstance(ktu, str) else (firstkey == ktu[0])) for ktu in keys_to_update)))):
continue
if (not isinstance(value, tuple(_ACCEPTED_CLASSES))):
raise TypeError(f'Expected value to be one of types {_ACCEPTED_CLASSES} but got {type(value)}')
if clone:
value = value.clone()
self.set_at_((firstkey, *nextkeys), value, idx)
return self
_blocked
def create_nested(self, key):
key = _unravel_key_to_tuple(key)
self._create_nested_tuple(key)
return self
def _create_nested_str(self, key):
out = self.empty()
self._set_str(key, out, inplace=False, validated=True)
return out
def _create_nested_tuple(self, key):
td = self._create_nested_str(key[0])
if (len(key) > 1):
td._create_nested_tuple(key[1:])
def copy_(self, tensordict: T, non_blocking: bool=None) -> T:
if (non_blocking is False):
raise ValueError("non_blocking=False isn't supported in TensorDict.")
return self.update_(tensordict)
def copy_at_(self, tensordict: T, idx: IndexType) -> T:
return self.update_at_(tensordict, idx)
def is_empty(self) -> bool:
for _ in self.keys(True, True):
return False
return True
def setdefault(self, key: NestedKey, default: CompatibleType, inplace: bool=False) -> CompatibleType:
if (key not in self.keys(include_nested=isinstance(key, tuple))):
self.set(key, default, inplace=inplace)
return self.get(key)
def items(self, include_nested: bool=False, leaves_only: bool=False, is_leaf=None) -> Iterator[tuple[(str, CompatibleType)]]:
if (is_leaf is None):
is_leaf = _default_is_leaf
if (include_nested and leaves_only):
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
if (not is_leaf(val.__class__)):
(yield from ((_unravel_key_to_tuple((k, _key)), _val) for (_key, _val) in val.items(include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf)))
else:
(yield (k, val))
elif include_nested:
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
(yield (k, val))
if (not is_leaf(val.__class__)):
(yield from ((_unravel_key_to_tuple((k, _key)), _val) for (_key, _val) in val.items(include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf)))
elif leaves_only:
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
if is_leaf(val.__class__):
(yield (k, val))
else:
for k in self.keys():
(yield (k, self._get_str(k, NO_DEFAULT)))
def values(self, include_nested: bool=False, leaves_only: bool=False, is_leaf=None) -> Iterator[CompatibleType]:
if (is_leaf is None):
is_leaf = _default_is_leaf
if (include_nested and leaves_only):
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
if (not is_leaf(val.__class__)):
(yield from val.values(include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf))
else:
(yield val)
elif include_nested:
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
(yield val)
if (not is_leaf(val.__class__)):
(yield from val.values(include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf))
elif leaves_only:
for k in self.keys():
val = self._get_str(k, NO_DEFAULT)
if is_leaf(val.__class__):
(yield val)
else:
for k in self.keys():
(yield self._get_str(k, NO_DEFAULT))
def keys(self, include_nested: bool=False, leaves_only: bool=False, is_leaf: Callable[([Type], bool)]=None):
...
def pop(self, key: NestedKey, default: Any=NO_DEFAULT) -> CompatibleType:
key = _unravel_key_to_tuple(key)
if (not key):
raise KeyError(_GENERIC_NESTED_ERR.format(key))
try:
out = self.get(key, default)
self.del_(key)
except KeyError as err:
if (default == NO_DEFAULT):
raise KeyError(f'You are trying to pop key `{key}` which is not in dict without providing default value.') from err
return out
def sorted_keys(self) -> list[NestedKey]:
return sorted(self.keys())
def flatten(self, start_dim=0, end_dim=(- 1)):
if (end_dim < 0):
end_dim = (self.ndim + end_dim)
if (end_dim < 0):
raise ValueError(f'Incompatible end_dim {end_dim} for tensordict with shape {self.shape}.')
if (end_dim <= start_dim):
raise ValueError('The end dimension must be strictly greater than the start dim.')
def flatten(tensor):
return torch.flatten(tensor, start_dim, end_dim)
nelt = prod(self.batch_size[start_dim:(end_dim + 1)])
if (start_dim > 0):
batch_size = ((list(self.batch_size)[:start_dim] + [nelt]) + list(self.batch_size[(end_dim + 1):]))
else:
batch_size = ([nelt] + list(self.batch_size[(end_dim + 1):]))
out = self._fast_apply(flatten, batch_size=batch_size)
if self._has_names():
names = [name for (i, name) in enumerate(self.names) if ((i < start_dim) or (i > end_dim))]
names.insert(start_dim, None)
out.names = names
return out
def unflatten(self, dim, unflattened_size):
if (dim < 0):
dim = (self.ndim + dim)
if (dim < 0):
raise ValueError(f'Incompatible dim {dim} for tensordict with shape {self.shape}.')
def unflatten(tensor):
return torch.unflatten(tensor, dim, unflattened_size)
if (dim > 0):
batch_size = ((list(self.batch_size)[:dim] + list(unflattened_size)) + list(self.batch_size[(dim + 1):]))
else:
batch_size = (list(unflattened_size) + list(self.batch_size[1:]))
out = self._fast_apply(unflatten, batch_size=batch_size)
if self._has_names():
names = copy(self.names)
for _ in range((len(unflattened_size) - 1)):
names.insert(dim, None)
out.names = names
return out
def rename_key_(self, old_key: str, new_key: str, safe: bool=False) -> T:
...
def del_(self, key: NestedKey) -> T:
...
def gather_and_stack(self, dst: int) -> (T | None):
output = ([None for _ in range(dist.get_world_size())] if (dst == dist.get_rank()) else None)
dist.gather_object(self, output, dst=dst)
if (dst == dist.get_rank()):
output = [item for (i, item) in enumerate(output) if (i != dst)]
self.update(torch.stack(output, 0), inplace=True)
return self
return None
def send(self, dst: int, init_tag: int=0, pseudo_rand: bool=False) -> None:
self._send(dst, _tag=(init_tag - 1), pseudo_rand=pseudo_rand)
def _send(self, dst: int, _tag: int=(- 1), pseudo_rand: bool=False) -> int:
for key in self.sorted_keys:
value = self._get_str(key, NO_DEFAULT)
if isinstance(value, Tensor):
pass
elif _is_tensor_collection(value.__class__):
_tag = value._send(dst, _tag=_tag, pseudo_rand=pseudo_rand)
continue
else:
raise NotImplementedError(f'Type {type(value)} is not supported.')
if (not pseudo_rand):
_tag += 1
else:
_tag = int_generator((_tag + 1))
dist.send(value, dst=dst, tag=_tag)
return _tag
def recv(self, src: int, init_tag: int=0, pseudo_rand: bool=False) -> int:
return self._recv(src, _tag=(init_tag - 1), pseudo_rand=pseudo_rand)
def _recv(self, src: int, _tag: int=(- 1), pseudo_rand: bool=False) -> int:
for key in self.sorted_keys:
value = self._get_str(key, NO_DEFAULT)
if isinstance(value, Tensor):
pass
elif _is_tensor_collection(value.__class__):
_tag = value._recv(src, _tag=_tag, pseudo_rand=pseudo_rand)
continue
else:
raise NotImplementedError(f'Type {type(value)} is not supported.')
if (not pseudo_rand):
_tag += 1
else:
_tag = int_generator((_tag + 1))
dist.recv(value, src=src, tag=_tag)
self._set_str(key, value, inplace=True, validated=True)
return _tag
def isend(self, dst: int, init_tag: int=0, pseudo_rand: bool=False) -> int:
return self._isend(dst, (init_tag - 1), pseudo_rand=pseudo_rand)
def _isend(self, dst: int, _tag: int=(- 1), _futures: (list[torch.Future] | None)=None, pseudo_rand: bool=False) -> int:
root = False
if (_futures is None):
root = True
_futures = []
for key in self.sorted_keys:
value = self._get_str(key, NO_DEFAULT)
if _is_tensor_collection(value.__class__):
_tag = value._isend(dst, _tag=_tag, pseudo_rand=pseudo_rand, _futures=_futures)
continue
elif isinstance(value, Tensor):
pass
else:
raise NotImplementedError(f'Type {type(value)} is not supported.')
if (not pseudo_rand):
_tag += 1
else:
_tag = int_generator((_tag + 1))
_future = dist.isend(value, dst=dst, tag=_tag)
_futures.append(_future)
if root:
for _future in _futures:
_future.wait()
return _tag
def irecv(self, src: int, return_premature: bool=False, init_tag: int=0, pseudo_rand: bool=False) -> ((tuple[(int, list[torch.Future])] | list[torch.Future]) | None):
return self._irecv(src, return_premature, _tag=(init_tag - 1), pseudo_rand=pseudo_rand)
def _irecv(self, src: int, return_premature: bool=False, _tag: int=(- 1), _future_list: list[torch.Future]=None, pseudo_rand: bool=False) -> ((tuple[(int, list[torch.Future])] | list[torch.Future]) | None):
root = False
if (_future_list is None):
_future_list = []
root = True
for key in self.sorted_keys:
value = self._get_str(key, NO_DEFAULT)
if _is_tensor_collection(value.__class__):
(_tag, _future_list) = value._irecv(src, _tag=_tag, _future_list=_future_list, pseudo_rand=pseudo_rand)
continue
elif isinstance(value, Tensor):
pass
else:
raise NotImplementedError(f'Type {type(value)} is not supported.')
if (not pseudo_rand):
_tag += 1
else:
_tag = int_generator((_tag + 1))
_future_list.append(dist.irecv(value, src=src, tag=_tag))
if (not root):
return (_tag, _future_list)
elif return_premature:
return _future_list
else:
for future in _future_list:
future.wait()
return
def reduce(self, dst, op=dist.ReduceOp.SUM, async_op=False, return_premature=False):
return self._reduce(dst, op, async_op, return_premature)
def _reduce(self, dst, op=dist.ReduceOp.SUM, async_op=False, return_premature=False, _future_list=None):
root = False
if (_future_list is None):
_future_list = []
root = True
for key in self.sorted_keys:
value = self._get_str(key, NO_DEFAULT)
if _is_tensor_collection(value.__class__):
_future_list = value._reduce(dst=dst, op=op, async_op=async_op, _future_list=_future_list)
continue
elif isinstance(value, Tensor):
pass
else:
raise NotImplementedError(f'Type {type(value)} is not supported.')
_future_list.append(dist.reduce(value, dst=dst, op=op, async_op=async_op))
if (not root):
return _future_list
elif (async_op and return_premature):
return _future_list
elif async_op:
for future in _future_list:
future.wait()
return
def apply_(self, fn: Callable, *others) -> T:
return self.apply(fn, *others, inplace=True)
def apply(self, fn: Callable, *others: T, batch_size: (Sequence[int] | None)=None, device: (torch.device | None)=None, names: (Sequence[str] | None)=None, inplace: bool=False, default: Any=NO_DEFAULT, **constructor_kwargs) -> T:
return self._apply_nest(fn, *others, batch_size=batch_size, device=device, names=names, inplace=inplace, checked=False, default=default, **constructor_kwargs)
def named_apply(self, fn: Callable, *others: T, batch_size: (Sequence[int] | None)=None, device: (torch.device | None)=None, names: (Sequence[str] | None)=None, inplace: bool=False, default: Any=NO_DEFAULT, **constructor_kwargs) -> T:
return self._apply_nest(fn, *others, batch_size=batch_size, device=device, names=names, inplace=inplace, checked=False, default=default, named=True, **constructor_kwargs)
def _apply_nest(self, fn: Callable, *others: T, batch_size: (Sequence[int] | None)=None, device: (torch.device | None)=None, names: (Sequence[str] | None)=None, inplace: bool=False, checked: bool=False, call_on_nested: bool=False, default: Any=NO_DEFAULT, named: bool=False, **constructor_kwargs) -> T:
...
def _fast_apply(self, fn: Callable, *others: T, batch_size: (Sequence[int] | None)=None, device: (torch.device | None)=None, names: (Sequence[str] | None)=None, inplace: bool=False, call_on_nested: bool=False, default: Any=NO_DEFAULT, named: bool=False, **constructor_kwargs) -> T:
return self._apply_nest(fn, *others, batch_size=batch_size, device=device, names=names, inplace=inplace, checked=True, call_on_nested=call_on_nested, named=named, default=default, **constructor_kwargs)
def map(self, fn: Callable, dim: int=0, num_workers: (int | None)=None, chunksize: (int | None)=None, num_chunks: (int | None)=None, pool: (mp.Pool | None)=None, generator: (torch.Generator | None)=None, max_tasks_per_child: (int | None)=None):
from torch import multiprocessing as mp
if (pool is None):
if (num_workers is None):
num_workers = mp.cpu_count()
if (generator is None):
generator = torch.Generator()
seed = torch.empty((), dtype=torch.int64).random_(generator=generator).item()
queue = mp.Queue(maxsize=num_workers)
for i in range(num_workers):
queue.put(i)
with mp.Pool(processes=num_workers, initializer=_proc_init, initargs=(seed, queue), maxtasksperchild=max_tasks_per_child) as pool:
return self.map(fn, dim=dim, chunksize=chunksize, num_chunks=num_chunks, pool=pool)
num_workers = pool._processes
dim_orig = dim
if (dim < 0):
dim = (self.ndim + dim)
if ((dim < 0) or (dim >= self.ndim)):
raise ValueError(f'Got incompatible dimension {dim_orig}')
self_split = _split_tensordict(self, chunksize, num_chunks, num_workers, dim)
call_chunksize = 1
imap = pool.imap(fn, self_split, call_chunksize)
if (chunksize == 0):
out = torch.stack(list(imap), dim)
else:
out = torch.cat(list(imap), dim)
return out
def _add_batch_dim(self, *, in_dim, vmap_level):
...
def _remove_batch_dim(self, vmap_level, batch_size, out_dim):
...
def _convert_to_tensor(self, array: np.ndarray) -> Tensor:
if isinstance(array, np.bool_):
array = array.item()
if isinstance(array, list):
array = np.asarray(array)
if ((not isinstance(array, np.ndarray)) and hasattr(array, 'numpy')):
array = array.numpy()
try:
return torch.as_tensor(array, device=self.device)
except Exception:
if hasattr(array, 'shape'):
return torch.full(array.shape, float('NaN'))
from tensordict.tensorclass import NonTensorData
return NonTensorData(array, batch_size=self.batch_size, device=self.device, names=(self.names if self._has_names() else None))
def _convert_to_tensordict(self, dict_value: dict[(str, Any)]) -> T:
...
def _check_batch_size(self) -> None:
batch_dims = self.batch_dims
for value in self.values():
if _is_tensor_collection(type(value)):
value._check_batch_size()
if (_shape(value)[:batch_dims] != self.batch_size):
raise RuntimeError(f'batch_size are incongruent, got value with shape {_shape(value)}, -- expected {self.batch_size}')
def _check_is_shared(self) -> bool:
...
def _check_new_batch_size(self, new_size: torch.Size) -> None:
batch_dims = len(new_size)
for (key, tensor) in self.items():
if (_shape(tensor)[:batch_dims] != new_size):
raise RuntimeError(f'the tensor {key} has shape {_shape(tensor)} which is incompatible with the batch-size {new_size}.')
def _check_device(self) -> None:
...
def _validate_key(self, key: NestedKey) -> NestedKey:
key = _unravel_key_to_tuple(key)
if (not key):
raise KeyError(_GENERIC_NESTED_ERR.format(key))
return key
def _validate_value(self, value: (CompatibleType | dict[(str, CompatibleType)]), *, check_shape: bool=True) -> (CompatibleType | dict[(str, CompatibleType)]):
cls = type(value)
is_tc = _is_tensor_collection(cls)
if (is_tc or issubclass(cls, tuple(_ACCEPTED_CLASSES))):
pass
elif issubclass(cls, dict):
value = self._convert_to_tensordict(value)
is_tc = True
else:
try:
value = self._convert_to_tensor(value)
except ValueError as err:
raise ValueError(f'TensorDict conversion only supports tensorclasses, tensordicts, numeric scalars and tensors. Got {type(value)}') from err
batch_size = self.batch_size
batch_dims = len(batch_size)
if (check_shape and batch_size and (_shape(value)[:batch_dims] != batch_size)):
if is_tc:
value = value.clone(recurse=False)
value.batch_size = self.batch_size
else:
raise RuntimeError(f'batch dimension mismatch, got self.batch_size={self.batch_size} and value.shape={_shape(value)}.')
device = self.device
if ((device is not None) and (value.device != device)):
value = value.to(device, non_blocking=True)
if (is_tc and check_shape):
has_names = self._has_names()
if (has_names and (value.names[:batch_dims] != self.names)):
value = value.clone(False).refine_names(*self.names)
elif ((not has_names) and value._has_names()):
self.names = value.names[:self.batch_dims]
return value
def _last_op_queue(self):
last_op_queue = self.__dict__.get('__last_op_queue', None)
if (last_op_queue is None):
last_op_queue = collections.deque()
self.__dict__['__last_op_queue'] = last_op_queue
return last_op_queue
def __enter__(self):
self._last_op_queue.append(self._last_op)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if ((exc_type is not None) and issubclass(exc_type, Exception)):
return False
_last_op = self._last_op_queue.pop()
if (_last_op is not None):
(last_op, (args, kwargs, out)) = _last_op
if (last_op == self.__class__.lock_.__name__):
return self.unlock_()
elif (last_op == self.__class__.unlock_.__name__):
return self.lock_()
if (last_op == self.__class__.to_module.__name__):
if is_tensor_collection(out):
return self.to_module(*args, **kwargs, swap_dest=out)
else:
raise RuntimeError('to_module cannot be used as a decorator when return_swap=False.')
else:
raise NotImplementedError(f'Unrecognised function {last_op}.')
return self
def select(self, *keys: str, inplace: bool=False, strict: bool=True) -> T:
...
def exclude(self, *keys: str, inplace: bool=False) -> T:
target = (self if inplace else self.clone(recurse=False))
for key in keys:
if (key in self.keys(True)):
del target[key]
return target
def to_tensordict(self) -> T:
from tensordict import TensorDict
return TensorDict({key: (value.clone() if (not _is_tensor_collection(value.__class__)) else value.to_tensordict()) for (key, value) in self.items()}, device=self.device, batch_size=self.batch_size, names=(self.names if self._has_names() else None))
def clone(self, recurse: bool=True) -> T:
...
def copy(self):
return self.clone(recurse=False)
def as_tensor(self):
def as_tensor(tensor):
try:
return tensor.as_tensor()
except AttributeError:
return tensor
return self._fast_apply(as_tensor)
def to_dict(self) -> dict[(str, Any)]:
return {key: (value.to_dict() if _is_tensor_collection(type(value)) else value) for (key, value) in self.items()}
def to_h5(self, filename, **kwargs):
from tensordict.persistent import PersistentTensorDict
out = PersistentTensorDict.from_dict(self, filename=filename, **kwargs)
if self._has_names():
out.names = self.names
return out
def empty(self, recurse=False) -> T:
if (not recurse):
return self.select()
return self.exclude(*self.keys(True, True))
def zero_(self) -> T:
for key in self.keys():
self.fill_(key, 0)
return self
def fill_(self, key: NestedKey, value: (float | bool)) -> T:
key = _unravel_key_to_tuple(key)
data = self._get_tuple(key, NO_DEFAULT)
if _is_tensor_collection(type(data)):
data._fast_apply((lambda x: x.fill_(value)), inplace=True)
else:
data = data.fill_(value)
self._set_tuple(key, data, inplace=True, validated=True)
return self
def masked_fill_(self, mask: Tensor, value: (float | bool)) -> T:
...
def masked_fill(self, mask: Tensor, value: (float | bool)) -> T:
...
def where(self, condition, other, *, out=None, pad=None):
...
def masked_select(self, mask: Tensor) -> T:
...
def _change_batch_size(self, new_size: torch.Size) -> None:
...
def is_contiguous(self) -> bool:
...
def contiguous(self) -> T:
...
def flatten_keys(self, separator: str='.', inplace: bool=False, is_leaf: (Callable[([Type], bool)] | None)=None) -> T:
if (is_leaf is None):
is_leaf = _is_leaf_nontensor
all_leaves = list(self.keys(include_nested=True, leaves_only=True, is_leaf=is_leaf))
all_leaves_flat = [(separator.join(key) if isinstance(key, tuple) else key) for key in all_leaves]
if (len(set(all_leaves_flat)) < len(set(all_leaves))):
seen = set()
conflicts = []
for (leaf, leaf_flat) in zip(all_leaves, all_leaves_flat):
if (leaf_flat in seen):
conflicts.append(leaf)
else:
seen.add(leaf_flat)
raise KeyError(f'Flattening keys in tensordict causes keys {conflicts} to collide.')
if inplace:
root_keys = set(self.keys())
for (leaf, leaf_flat) in zip(all_leaves, all_leaves_flat):
self.rename_key_(leaf, leaf_flat)
if isinstance(leaf, str):
root_keys.discard(leaf)
self.exclude(*root_keys, inplace=True)
return self
else:
result = self.empty()
for (leaf, leaf_flat) in zip(all_leaves, all_leaves_flat):
result._set_str(leaf_flat, self.get(leaf), validated=True, inplace=False)
shared = result._is_shared = self._is_shared
mmap = result._is_memmap = self._is_memmap
if (shared or mmap):
result._is_locked = True
return result
def unflatten_keys(self, separator: str='.', inplace: bool=False) -> T:
if (not inplace):
return self.copy().unflatten_keys(separator=separator, inplace=True)
else:
for key in list(self.keys()):
if (separator in key):
new_key = tuple(key.split(separator))
try:
self.rename_key_(key, new_key, safe=True)
except KeyError:
raise KeyError(f'Unflattening key(s) in tensordict will override an existing for unflattened key {new_key}.')
return self
def _index_tensordict(self, index: IndexType, new_batch_size: (torch.Size | None)=None, names: (List[str] | None)=None) -> T:
...
def is_locked(self) -> bool:
return self._is_locked
_locked.setter
def is_locked(self, value: bool) -> None:
if value:
self.lock_()
else:
self.unlock_()
def _propagate_lock(self, lock_parents_weakrefs=None):
self._is_locked = True
is_root = (lock_parents_weakrefs is None)
if is_root:
lock_parents_weakrefs = []
self._lock_parents_weakrefs = (self._lock_parents_weakrefs + lock_parents_weakrefs)
lock_parents_weakrefs = (copy(lock_parents_weakrefs) + [weakref.ref(self)])
for value in self.values():
if _is_tensor_collection(type(value)):
value._propagate_lock(lock_parents_weakrefs)
def _lock_parents_weakrefs(self):
_lock_parents_weakrefs = self.__dict__.get('__lock_parents_weakrefs', None)
if (_lock_parents_weakrefs is None):
self.__dict__['__lock_parents_weakrefs'] = []
_lock_parents_weakrefs = self.__dict__['__lock_parents_weakrefs']
return _lock_parents_weakrefs
_lock_parents_weakrefs.setter
def _lock_parents_weakrefs(self, value: list):
self.__dict__['__lock_parents_weakrefs'] = value
_decorator('is_locked')
def lock_(self) -> T:
if self.is_locked:
return self
self._propagate_lock()
return self
_cache
def _propagate_unlock(self):
self._is_locked = False
self._is_shared = False
self._is_memmap = False
sub_tds = []
for value in self.values():
if _is_tensor_collection(type(value)):
sub_tds.extend(value._propagate_unlock())
sub_tds.append(value)
return sub_tds
def _check_unlock(self):
for ref in self._lock_parents_weakrefs:
obj = ref()
if ((obj is not None) and obj._is_locked):
raise RuntimeError(f'Cannot unlock a tensordict that is part of a locked graph. Unlock the root tensordict first. If the tensordict is part of multiple graphs, group the graphs under a common tensordict an unlock this root. self: {self}, obj: {obj}')
try:
self._lock_parents_weakrefs = []
except AttributeError:
pass
_decorator('is_locked')
def unlock_(self) -> T:
try:
sub_tds = self._propagate_unlock()
for sub_td in sub_tds:
sub_td._check_unlock()
self._check_unlock()
except RuntimeError as err:
self.lock_()
raise err
return self
def to(self: T, device: Optional[Union[(int, device)]]=..., dtype: Optional[Union[(torch.device, str)]]=..., non_blocking: bool=...) -> T:
...
def to(self: T, dtype: Union[(torch.device, str)], non_blocking: bool=...) -> T:
...
def to(self: T, tensor: Tensor, non_blocking: bool=...) -> T:
...
def to(self: T, *, other: T, non_blocking: bool=...) -> T:
...
def to(self: T, *, batch_size: torch.Size) -> T:
...
def to(self, *args, **kwargs) -> T:
...
def is_floating_point(self):
for item in self.values(include_nested=True, leaves_only=True):
if (not item.is_floating_point()):
return False
else:
return True
def double(self):
return self._fast_apply((lambda x: x.double()))
def float(self):
return self._fast_apply((lambda x: x.float()))
def int(self):
return self._fast_apply((lambda x: x.int()))
def bool(self):
return self._fast_apply((lambda x: x.bool()))
def half(self):
return self._fast_apply((lambda x: x.half()))
def bfloat16(self):
return self._fast_apply((lambda x: x.bfloat16()))
def type(self, dst_type):
return self._fast_apply((lambda x: x.type(dst_type)))
def requires_grad(self) -> bool:
return any((v.requires_grad for v in self.values()))
def detach_(self) -> T:
...
def detach(self) -> T:
return self._fast_apply((lambda x: x.detach())) |
class FixtureManager():
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session: 'Session') -> None:
self.session = session
self.config: Config = session.config
self._arg2fixturedefs: Final[Dict[(str, List[FixtureDef[Any]])]] = {}
self._holderobjseen: Final[Set[object]] = set()
self._nodeid_autousenames: Final[Dict[(str, List[str])]] = {'': self.config.getini('usefixtures')}
session.config.pluginmanager.register(self, 'funcmanage')
def getfixtureinfo(self, node: nodes.Item, func: Optional[Callable[(..., object)]], cls: Optional[type]) -> FuncFixtureInfo:
if ((func is not None) and (not getattr(node, 'nofuncargs', False))):
argnames = getfuncargnames(func, name=node.name, cls=cls)
else:
argnames = ()
usefixturesnames = self._getusefixturesnames(node)
autousenames = self._getautousenames(node.nodeid)
initialnames = deduplicate_names(autousenames, usefixturesnames, argnames)
direct_parametrize_args = _get_direct_parametrize_args(node)
(names_closure, arg2fixturedefs) = self.getfixtureclosure(parentnode=node, initialnames=initialnames, ignore_args=direct_parametrize_args)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
nodeid = None
try:
p = absolutepath(plugin.__file__)
except AttributeError:
pass
else:
if (p.name == 'conftest.py'):
try:
nodeid = str(p.parent.relative_to(self.config.rootpath))
except ValueError:
nodeid = ''
if (nodeid == '.'):
nodeid = ''
if (os.sep != nodes.SEP):
nodeid = nodeid.replace(os.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid: str) -> Iterator[str]:
for parentnodeid in nodes.iterparentnodeids(nodeid):
basenames = self._nodeid_autousenames.get(parentnodeid)
if basenames:
(yield from basenames)
def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]:
for mark in node.iter_markers(name='usefixtures'):
(yield from mark.args)
def getfixtureclosure(self, parentnode: nodes.Node, initialnames: Tuple[(str, ...)], ignore_args: AbstractSet[str]) -> Tuple[(List[str], Dict[(str, Sequence[FixtureDef[Any]])])]:
parentid = parentnode.nodeid
fixturenames_closure = list(initialnames)
arg2fixturedefs: Dict[(str, Sequence[FixtureDef[Any]])] = {}
lastlen = (- 1)
while (lastlen != len(fixturenames_closure)):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if (argname in ignore_args):
continue
if (argname in arg2fixturedefs):
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
for arg in fixturedefs[(- 1)].argnames:
if (arg not in fixturenames_closure):
fixturenames_closure.append(arg)
def sort_by_scope(arg_name: str) -> Scope:
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return Scope.Function
else:
return fixturedefs[(- 1)]._scope
fixturenames_closure.sort(key=sort_by_scope, reverse=True)
return (fixturenames_closure, arg2fixturedefs)
def pytest_generate_tests(self, metafunc: 'Metafunc') -> None:
def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]:
(args, _) = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs)
return args
for argname in metafunc.fixturenames:
fixture_defs = metafunc._arg2fixturedefs.get(argname)
if (not fixture_defs):
continue
if any(((argname in get_parametrize_mark_argnames(mark)) for mark in metafunc.definition.iter_markers('parametrize'))):
continue
for fixturedef in reversed(fixture_defs):
if (fixturedef.params is not None):
metafunc.parametrize(argname, fixturedef.params, indirect=True, scope=fixturedef.scope, ids=fixturedef.ids)
break
if (argname not in fixturedef.argnames):
break
def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None:
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj: nodes.Node, *, unittest: bool=...) -> None:
raise NotImplementedError()
def parsefactories(self, node_or_obj: object, nodeid: Optional[str], *, unittest: bool=...) -> None:
raise NotImplementedError()
def parsefactories(self, node_or_obj: Union[(nodes.Node, object)], nodeid: Union[(str, NotSetType, None)]=NOTSET, *, unittest: bool=False) -> None:
if (nodeid is not NOTSET):
holderobj = node_or_obj
else:
assert isinstance(node_or_obj, nodes.Node)
holderobj = cast(object, node_or_obj.obj)
assert isinstance(node_or_obj.nodeid, str)
nodeid = node_or_obj.nodeid
if (holderobj in self._holderobjseen):
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
if (isinstance(holderobj, nodes.Node) and (name == 'fspath')):
continue
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if (not isinstance(marker, FixtureFunctionMarker)):
continue
if marker.name:
name = marker.name
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(fixturemanager=self, baseid=nodeid, argname=name, func=obj, scope=marker.scope, params=marker.params, unittest=unittest, ids=marker.ids, _ispytest=True)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
i = len([f for f in faclist if (not f.has_location)])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_autousenames.setdefault((nodeid or ''), []).extend(autousenames)
def getfixturedefs(self, argname: str, nodeid: str) -> Optional[Sequence[FixtureDef[Any]]]:
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str) -> Iterator[FixtureDef[Any]]:
parentnodeids = set(nodes.iterparentnodeids(nodeid))
for fixturedef in fixturedefs:
if (fixturedef.baseid in parentnodeids):
(yield fixturedef) |
def show_array_list(array_list, title):
(fig, ax) = plt.subplots(1, 1)
if isinstance(array_list[0][0], str):
integers = []
for array in array_list:
integers.append(list(map((lambda s: ord(s)), array)))
ax.imshow(np.array(integers), cmap=plt.cm.Oranges)
else:
ax.imshow(np.array(array_list), cmap=plt.cm.Oranges)
for i in range(len(array_list)):
for j in range(len(array_list[0])):
ax.text(j, i, array_list[i][j], ha='center', va='center')
ax.tick_params(left=False, labelleft=False)
ax.set_title(title) |
class CheetahXmlLexer(DelegatingLexer):
name = 'XML+Cheetah'
aliases = ['xml+cheetah', 'xml+spitfire']
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
url = '
version_added = ''
def __init__(self, **options):
super().__init__(XmlLexer, CheetahLexer, **options) |
.parametrize('multi_optimziers, max_iters', [(True, 10), (True, 2), (False, 10), (False, 2)])
def test_one_cycle_runner_hook(multi_optimziers, max_iters):
with pytest.raises(AssertionError):
OneCycleLrUpdaterHook(max_lr=0.1, by_epoch=True)
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, pct_start=(- 0.1))
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, anneal_strategy='sin')
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimziers=multi_optimziers)
hook_cfg = dict(type='OneCycleMomentumUpdaterHook', base_momentum=0.85, max_momentum=0.95, pct_start=0.5, anneal_strategy='cos', three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='OneCycleLrUpdaterHook', max_lr=0.01, pct_start=0.5, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimziers:
calls = [call('train', {'learning_rate/model1': 0., 'learning_rate/model2': 0., 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 1), call('train', {'learning_rate/model1': 0., 'learning_rate/model2': 0., 'momentum/model1': 0., 'momentum/model2': 0.}, 6), call('train', {'learning_rate/model1': 4e-08, 'learning_rate/model2': 4e-08, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 10)]
else:
calls = [call('train', {'learning_rate': 0., 'momentum': 0.95}, 1), call('train', {'learning_rate': 0., 'momentum': 0.}, 6), call('train', {'learning_rate': 4e-08, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters)
args = dict(max_lr=0.01, total_steps=5, pct_start=0.5, anneal_strategy='linear', div_factor=25, final_div_factor=10000.0)
hook = OneCycleLrUpdaterHook(**args)
runner.register_hook(hook)
if (max_iters == 10):
with pytest.raises(ValueError):
runner.run([loader], [('train', 1)])
else:
runner.run([loader], [('train', 1)])
lr_last = runner.current_lr()
t = torch.tensor([0.0], requires_grad=True)
optim = torch.optim.SGD([t], lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optim, **args)
lr_target = []
for _ in range(max_iters):
optim.step()
lr_target.append(optim.param_groups[0]['lr'])
lr_scheduler.step()
assert (lr_target[(- 1)] == lr_last[0]) |
class Input():
def __init__(self, definition: (Definition | None)=None) -> None:
self._definition: Definition
self._stream: TextIO = None
self._options: dict[(str, Any)] = {}
self._arguments: dict[(str, Any)] = {}
self._interactive: (bool | None) = None
if (definition is None):
self._definition = Definition()
else:
self.bind(definition)
self.validate()
def arguments(self) -> dict[(str, Any)]:
return {**self._definition.argument_defaults, **self._arguments}
def options(self) -> dict[(str, Any)]:
return {**self._definition.option_defaults, **self._options}
def stream(self) -> TextIO:
return self._stream
def first_argument(self) -> (str | None):
raise NotImplementedError
def script_name(self) -> (str | None):
raise NotImplementedError
def read(self, length: int, default: str='') -> str:
if (not self.is_interactive()):
return default
return self._stream.read(length)
def read_line(self, length: int=(- 1), default: str='') -> str:
if (not self.is_interactive()):
return default
return self._stream.readline(length)
def close(self) -> None:
self._stream.close()
def is_closed(self) -> bool:
return self._stream.closed
def is_interactive(self) -> bool:
return (True if (self._interactive is None) else self._interactive)
def interactive(self, interactive: bool=True) -> None:
self._interactive = interactive
def bind(self, definition: Definition) -> None:
self._arguments = {}
self._options = {}
self._definition = definition
self._parse()
def validate(self) -> None:
missing_arguments = []
for argument in self._definition.arguments:
if ((argument.name not in self._arguments) and argument.is_required()):
missing_arguments.append(argument.name)
if missing_arguments:
raise CleoMissingArgumentsError(f"""Not enough arguments (missing: "{', '.join(missing_arguments)}")""")
def argument(self, name: str) -> Any:
if (not self._definition.has_argument(name)):
raise CleoValueError(f'The argument "{name}" does not exist')
if (name in self._arguments):
return self._arguments[name]
return self._definition.argument(name).default
def set_argument(self, name: str, value: Any) -> None:
if (not self._definition.has_argument(name)):
raise CleoValueError(f'The argument "{name}" does not exist')
self._arguments[name] = value
def has_argument(self, name: str) -> bool:
return self._definition.has_argument(name)
def option(self, name: str) -> Any:
if (not self._definition.has_option(name)):
raise CleoValueError(f'The option "--{name}" does not exist')
if (name in self._options):
return self._options[name]
return self._definition.option(name).default
def set_option(self, name: str, value: Any) -> None:
if (not self._definition.has_option(name)):
raise CleoValueError(f'The option "--{name}" does not exist')
self._options[name] = value
def has_option(self, name: str) -> bool:
return self._definition.has_option(name)
def escape_token(self, token: str) -> str:
if re.match('^[\\w-]+$', token):
return token
return shell_quote(token)
def set_stream(self, stream: TextIO) -> None:
self._stream = stream
def has_parameter_option(self, values: (str | list[str]), only_params: bool=False) -> bool:
raise NotImplementedError
def parameter_option(self, values: (str | list[str]), default: Any=False, only_params: bool=False) -> Any:
raise NotImplementedError
def _parse(self) -> None:
raise NotImplementedError |
class RestrictedImageNet(DataSet):
def __init__(self, data_path, **kwargs):
name = 'restricted_imagenet'
super(RestrictedImageNet, self).__init__(name)
self.data_path = data_path
self.mean = constants.IMAGENET_MEAN
self.std = constants.IMAGENET_STD
self.num_classes = len(constants.RESTRICTED_RANGES)
self.transform_train = constants.TRAIN_TRANSFORMS_224
self.transform_test = constants.TEST_TRANSFORMS_224
self.label_mapping = get_label_mapping(self.ds_name, constants.RESTRICTED_RANGES)
def get_model(self, arch):
return models.__dict__[arch](num_classes=self.num_classes) |
class FreshSeedPrioritizerWorklist(SeedScheduler):
def __init__(self, manager: 'SeedManager'):
self.manager = manager
self.fresh = []
self.worklist = dict()
def __len__(self) -> int:
s = set()
for seeds in list(self.worklist.values()):
s.update(seeds)
return (len(self.fresh) + len(s))
def has_seed_remaining(self) -> bool:
return (len(self) != 0)
def add(self, seed: Seed) -> None:
if seed.coverage_objectives:
for item in seed.coverage_objectives:
if (item in self.worklist):
self.worklist[item].add(seed)
else:
self.worklist[item] = {seed}
else:
self.fresh.append(seed)
def update_worklist(self, coverage: GlobalCoverage) -> None:
to_remove = [x for x in self.worklist if coverage.is_covered(x)]
for item in to_remove:
for seed in self.worklist.pop(item):
seed.coverage_objectives.remove(item)
if (not seed.coverage_objectives):
self.manager.drop_seed(seed)
def can_solve_models(self) -> bool:
return (not self.fresh)
def pick(self) -> Optional[Seed]:
if self.fresh:
return self.fresh.pop(0)
if (... in self.worklist):
it = self.worklist[...].pop()
if (not self.worklist[...]):
self.worklist.pop(...)
return it
if (not self.worklist):
return None
k = list(self.worklist.keys())[0]
seed = self.worklist[k].pop()
for it in seed.coverage_objectives:
if (it != k):
self.worklist[it].remove(seed)
if (not self.worklist[it]):
self.worklist.pop(it)
return seed
def post_execution(self) -> None:
logger.info(f'Seed Scheduler: worklist:{len(self)} Coverage objectives:{len(self.worklist)} (fresh:{len(self.fresh)})')
def post_exploration(self, workspace: Workspace) -> None:
workspace.save_metadata_file('coverage_objectives.json', json.dumps(list(self.worklist.keys()))) |
.end_to_end()
def test_parametrization_in_for_loop_from_decorator(tmp_path, runner):
source = '\n import pytask\n\n for i in range(2):\n\n .task(name="deco_task", kwargs={"i": i, "produces": f"out_{i}.txt"})\n def example(produces, i):\n produces.write_text(str(i))\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('deco_task[out_0.txt-0]' in result.output)
assert ('deco_task[out_1.txt-1]' in result.output) |
class TripletMarginLoss(BaseMetricLossFunction):
def __init__(self, margin=0.05, swap=False, smooth_loss=False, triplets_per_anchor='all', **kwargs):
super().__init__(**kwargs)
self.margin = margin
self.swap = swap
self.smooth_loss = smooth_loss
self.triplets_per_anchor = triplets_per_anchor
self.add_to_recordable_attributes(list_of_names=['margin'], is_stat=False)
def compute_loss(self, embeddings, labels, indices_tuple):
indices_tuple = lmu.convert_to_triplets(indices_tuple, labels, t_per_anchor=self.triplets_per_anchor)
(anchor_idx, positive_idx, negative_idx) = indices_tuple
if (len(anchor_idx) == 0):
return self.zero_losses()
mat = self.distance(embeddings)
ap_dists = mat[(anchor_idx, positive_idx)]
an_dists = mat[(anchor_idx, negative_idx)]
if self.swap:
pn_dists = mat[(positive_idx, negative_idx)]
an_dists = self.distance.smallest_dist(an_dists, pn_dists)
current_margins = self.distance.margin(ap_dists, an_dists)
violation = (current_margins + self.margin)
if self.smooth_loss:
loss = torch.nn.functional.softplus(violation)
else:
loss = torch.nn.functional.relu(violation)
return {'loss': {'losses': loss, 'indices': indices_tuple, 'reduction_type': 'triplet'}}
def get_default_reducer(self):
return AvgNonZeroReducer() |
class AioClient(BaseClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, isasync=True)
self._closed = False
self._events = {}
async def register_event(self, event: str, func: callable, args=None):
if (args is None):
args = {}
if (not inspect.iscoroutinefunction(func)):
raise InvalidArgument('Coroutine', 'Subroutine', 'Event function must be a coroutine')
elif (len(inspect.signature(func).parameters) != 1):
raise ArgumentError
(await self.subscribe(event, args))
self._events[event.lower()] = func
async def unregister_event(self, event: str, args=None):
if (args is None):
args = {}
event = event.lower()
if (event not in self._events):
raise EventNotFound
(await self.unsubscribe(event, args))
del self._events[event]
async def on_event(self, data):
if self.sock_reader._eof:
raise PyPresenceException('feed_data after feed_eof')
if (not data):
return
self.sock_reader._buffer.extend(data)
self.sock_reader._wakeup_waiter()
if ((self.sock_reader._transport is not None) and (not self.sock_reader._paused) and (len(self.sock_reader._buffer) > (2 * self.sock_reader._limit))):
try:
self.sock_reader._transport.pause_reading()
except NotImplementedError:
self.sock_reader._transport = None
else:
self.sock_reader._paused = True
payload = json.loads(data[8:].decode('utf-8'))
if (payload['evt'] is not None):
evt = payload['evt'].lower()
if (evt in self._events):
(await self._events[evt](payload['data']))
elif (evt == 'error'):
raise DiscordError(payload['data']['code'], payload['data']['message'])
async def authorize(self, client_id: str, scopes: List[str]):
payload = Payload.authorize(client_id, scopes)
self.send_data(1, payload)
return (await self.read_output())
async def authenticate(self, token: str):
payload = Payload.authenticate(token)
self.send_data(1, payload)
return (await self.read_output())
async def get_guilds(self):
payload = Payload.get_guilds()
self.send_data(1, payload)
return (await self.read_output())
async def get_guild(self, guild_id: str):
payload = Payload.get_guild(guild_id)
self.send_data(1, payload)
return (await self.read_output())
async def get_channel(self, channel_id: str):
payload = Payload.get_channel(channel_id)
self.send_data(1, payload)
return (await self.read_output())
async def get_channels(self, guild_id: str):
payload = Payload.get_channels(guild_id)
self.send_data(1, payload)
return (await self.read_output())
async def set_user_voice_settings(self, user_id: str, pan_left: float=None, pan_right: float=None, volume: int=None, mute: bool=None):
payload = Payload.set_user_voice_settings(user_id, pan_left, pan_right, volume, mute)
self.send_data(1, payload)
return (await self.read_output())
async def select_voice_channel(self, channel_id: str):
payload = Payload.select_voice_channel(channel_id)
self.send_data(1, payload)
return (await self.read_output())
async def get_selected_voice_channel(self):
payload = Payload.get_selected_voice_channel()
self.send_data(1, payload)
return (await self.read_output())
async def select_text_channel(self, channel_id: str):
payload = Payload.select_text_channel(channel_id)
self.send_data(1, payload)
return (await self.read_output())
async def set_activity(self, pid: int=os.getpid(), state: str=None, details: str=None, start: int=None, end: int=None, large_image: str=None, large_text: str=None, small_image: str=None, small_text: str=None, party_id: str=None, party_size: list=None, join: str=None, spectate: str=None, buttons: list=None, match: str=None, instance: bool=True):
payload = Payload.set_activity(pid, state, details, start, end, large_image, large_text, small_image, small_text, party_id, party_size, join, spectate, match, buttons, instance, activity=True)
self.send_data(1, payload)
return (await self.read_output())
async def clear_activity(self, pid: int=os.getpid()):
payload = Payload.set_activity(pid, activity=None)
self.send_data(1, payload)
return (await self.read_output())
async def subscribe(self, event: str, args=None):
if (args is None):
args = {}
payload = Payload.subscribe(event, args)
self.send_data(1, payload)
return (await self.read_output())
async def unsubscribe(self, event: str, args=None):
if (args is None):
args = {}
payload = Payload.unsubscribe(event, args)
self.send_data(1, payload)
return (await self.read_output())
async def get_voice_settings(self):
payload = Payload.get_voice_settings()
self.send_data(1, payload)
return (await self.read_output())
async def set_voice_settings(self, _input: dict=None, output: dict=None, mode: dict=None, automatic_gain_control: bool=None, echo_cancellation: bool=None, noise_suppression: bool=None, qos: bool=None, silence_warning: bool=None, deaf: bool=None, mute: bool=None):
payload = Payload.set_voice_settings(_input, output, mode, automatic_gain_control, echo_cancellation, noise_suppression, qos, silence_warning, deaf, mute)
self.send_data(1, payload)
return (await self.read_output())
async def capture_shortcut(self, action: str):
payload = Payload.capture_shortcut(action)
self.send_data(1, payload)
return (await self.read_output())
async def send_activity_join_invite(self, user_id: str):
payload = Payload.send_activity_join_invite(user_id)
self.send_data(1, payload)
return (await self.read_output())
async def close_activity_request(self, user_id: str):
payload = Payload.close_activity_request(user_id)
self.send_data(1, payload)
return (await self.read_output())
def close(self):
self.send_data(2, {'v': 1, 'client_id': self.client_id})
self.sock_writer.close()
self._closed = True
self.loop.close()
async def start(self):
(await self.handshake())
async def read(self):
return (await self.read_output()) |
def test_direct(debug_ctx, debug_trail, extra_policy, trail_select):
loader_getter = make_loader_getter(shape=shape(TestField('a', ParamKind.POS_OR_KW, is_required=True), TestField('b', ParamKind.POS_OR_KW, is_required=True)), name_layout=InputNameLayout(crown=InpDictCrown({'a': InpFieldCrown('a'), 'b': InpFieldCrown('b')}, extra_policy=extra_policy), extra_move=None), debug_trail=debug_trail, debug_ctx=debug_ctx)
if (extra_policy == ExtraCollect()):
pytest.raises(ValueError, loader_getter).match('Cannot create loader that collect extra data if InputShape does not take extra data')
return
loader = loader_getter()
assert (loader({'a': 1, 'b': 2}) == gauge(1, 2))
if (extra_policy == ExtraSkip()):
assert (loader({'a': 1, 'b': 2, 'c': 3}) == gauge(1, 2))
if (extra_policy == ExtraForbid()):
data = {'a': 1, 'b': 2, 'c': 3}
raises_exc(trail_select(disable=ExtraFieldsError({'c'}, data), first=ExtraFieldsError({'c'}, data), all=AggregateLoadError(f'while loading model {Gauge}', [ExtraFieldsError({'c'}, data)])), (lambda : loader(data)))
raises_exc(trail_select(disable=LoadError(), first=with_trail(LoadError(), ['b']), all=AggregateLoadError(f'while loading model {Gauge}', [with_trail(LoadError(), ['b'])])), (lambda : loader({'a': 1, 'b': LoadError()})))
data = {'a': 1}
raises_exc(trail_select(disable=NoRequiredFieldsError({'b'}, data), first=NoRequiredFieldsError({'b'}, data), all=AggregateLoadError(f'while loading model {Gauge}', [NoRequiredFieldsError({'b'}, data)])), (lambda : loader({'a': 1})))
raises_exc(trail_select(disable=TypeLoadError(CollectionsMapping, 'bad input value'), first=TypeLoadError(CollectionsMapping, 'bad input value'), all=AggregateLoadError(f'while loading model {Gauge}', [TypeLoadError(CollectionsMapping, 'bad input value')])), (lambda : loader('bad input value'))) |
def grokverify(input):
storageSuccessFlag = True
success = True
if dsz.file.Exists('tm154d.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154d.da dump file exists ... this should not be here', dsz.ERROR)
if dsz.file.Exists('tm154p.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154p.da overflow file exists ... log may be full', dsz.ERROR)
if dsz.file.Exists('tm154_.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154_.da config file exists ... ', dsz.GOOD)
if dsz.file.Exists('tm154o.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154o.da storage file exists ... SUCCESSFUL', dsz.GOOD)
else:
dsz.ui.Echo('tm154o.da storage file missing ... FAILED', dsz.ERROR)
storageSuccessFlag = False
if (storageSuccessFlag == True):
dsz.ui.Echo('GROK should be installed on target... only way to confirm is with DOUBLEFEATURE', dsz.GOOD)
else:
dsz.ui.Echo("GROK doesn't look like it is on target... only way to confirm is with DOUBLEFEATURE", dsz.ERROR)
success = False
return success |
class OpenExecutor(ActionExecutor):
def __init__(self, close: bool):
self.close = close
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_openable(state, node, info, char_index):
new_node = node.copy()
new_node.states.discard((State.OPEN if self.close else State.CLOSED))
new_node.states.add((State.CLOSED if self.close else State.OPEN))
if modify:
(yield state.change_state([ChangeNode(new_node)], in_place=in_place))
else:
(yield state)
def check_openable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
if ((Property.CAN_OPEN not in node.properties) and (node.class_name not in ['desk', 'window'])):
info.error('{} can not be opened', node)
return False
if (not _is_character_close_to(state, node, char_index)):
char_node = _get_character_node(state, char_index)
info.error('{} is not close to {}', char_node, node)
return False
if ((not self.close) and (_find_free_hand(state, char_index) is None)):
char_node = _get_character_node(state, char_index)
info.error('{} does not have a free hand', char_node)
return False
s = (State.OPEN if self.close else State.CLOSED)
if (s not in node.states):
info.error('{} is not {}', node, s.name.lower())
return False
if ((not self.close) and (State.ON in node.states)):
info.error('{} is still on'.format(node))
return False
return True |
class SDE(abc.ABC):
def __init__(self, N):
super().__init__()
self.N = N
def T(self):
pass
def sde(self, x, t):
pass
def marginal_prob(self, x, t):
pass
def prior_sampling(self, rng, shape):
pass
def prior_logp(self, z):
pass
def discretize(self, x, t):
dt = (1 / self.N)
(drift, diffusion) = self.sde(x, t)
f = (drift * dt)
G = (diffusion * jnp.sqrt(dt))
return (f, G)
def reverse(self, score_fn, probability_flow=False):
N = self.N
T = self.T
sde_fn = self.sde
discretize_fn = self.discretize
class RSDE(self.__class__):
def __init__(self):
self.N = N
self.probability_flow = probability_flow
def T(self):
return T
def sde(self, x, t):
(drift, diffusion) = sde_fn(x, t)
score = score_fn(x, t)
drift = (drift - batch_mul((diffusion ** 2), (score * (0.5 if self.probability_flow else 1.0))))
diffusion = (jnp.zeros_like(t) if self.probability_flow else diffusion)
return (drift, diffusion)
def discretize(self, x, t):
(f, G) = discretize_fn(x, t)
rev_f = (f - batch_mul((G ** 2), (score_fn(x, t) * (0.5 if self.probability_flow else 1.0))))
rev_G = (jnp.zeros_like(t) if self.probability_flow else G)
return (rev_f, rev_G)
return RSDE() |
class TestExactCover(QiskitOptimizationTestCase):
def setUp(self):
super().setUp()
input_file = self.get_resource_path('sample.exactcover')
with open(input_file, encoding='utf8') as file:
self.list_of_subsets = json.load(file)
(self.qubit_op, _) = exact_cover.get_operator(self.list_of_subsets)
def _brute_force(self):
has_sol = False
def bitfield(n, length):
result = np.binary_repr(n, length)
return [int(digit) for digit in result]
subsets = len(self.list_of_subsets)
maximum = (2 ** subsets)
for i in range(maximum):
cur = bitfield(i, subsets)
cur_v = exact_cover.check_solution_satisfiability(cur, self.list_of_subsets)
if cur_v:
has_sol = True
break
return has_sol
def test_exact_cover(self):
algo = NumPyMinimumEigensolver(self.qubit_op, aux_operators=[])
result = algo.run()
x = sample_most_likely(result.eigenstate)
ising_sol = exact_cover.get_solution(x)
np.testing.assert_array_equal(ising_sol, [0, 1, 1, 0])
oracle = self._brute_force()
self.assertEqual(exact_cover.check_solution_satisfiability(ising_sol, self.list_of_subsets), oracle)
def test_exact_cover_vqe(self):
aqua_globals.random_seed = 10598
result = VQE(self.qubit_op, EfficientSU2(self.qubit_op.num_qubits, reps=5), COBYLA(), max_evals_grouped=2).run(QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed))
x = sample_most_likely(result.eigenstate)
ising_sol = exact_cover.get_solution(x)
oracle = self._brute_force()
self.assertEqual(exact_cover.check_solution_satisfiability(ising_sol, self.list_of_subsets), oracle) |
def _organize_tasks(tasks: list[PTaskWithPath]) -> dict[(Path, list[PTaskWithPath])]:
dictionary: dict[(Path, list[PTaskWithPath])] = defaultdict(list)
for task in tasks:
dictionary[task.path].append(task)
sorted_dict = {}
for k in sorted(dictionary):
sorted_dict[k] = sorted(dictionary[k], key=(lambda x: x.name))
return sorted_dict |
def setUpModule():
global cell, kpts, disp
cell = gto.Cell()
cell.atom = [['C', [0.0, 0.0, 0.0]], ['C', [1., 1., 1.]]]
cell.a = '\n 0., 3., 3.\n 3., 0., 3.\n 3., 3., 0.'
cell.basis = [[0, [1.3, 1]], [1, [0.8, 1]]]
cell.verbose = 5
cell.pseudo = 'gth-pade'
cell.unit = 'bohr'
cell.mesh = ([13] * 3)
cell.output = '/dev/null'
cell.build()
kpts = cell.make_kpts([1, 1, 3])
disp = 1e-05 |
def test():
spi = SPI(1, baudrate=, sck=Pin(14), mosi=Pin(13))
display = Display(spi, dc=Pin(4), cs=Pin(16), rst=Pin(17))
display.draw_image('images/RaspberryPiWB128x128.raw', 0, 0, 128, 128)
sleep(2)
display.draw_image('images/MicroPython128x128.raw', 0, 129, 128, 128)
sleep(2)
display.draw_image('images/Tabby128x128.raw', 112, 0, 128, 128)
sleep(2)
display.draw_image('images/Tortie128x128.raw', 112, 129, 128, 128)
sleep(9)
display.cleanup() |
def ext_modules():
if have_c_files:
source_extension = 'c'
else:
source_extension = 'pyx'
ext_modules = [Extension('pymssql._mssql', [join('src', 'pymssql', ('_mssql.%s' % source_extension))], extra_compile_args=['-DMSDBLIB'], include_dirs=include_dirs, library_dirs=library_dirs), Extension('pymssql._pymssql', [join('src', 'pymssql', ('_pymssql.%s' % source_extension))], extra_compile_args=['-DMSDBLIB'], include_dirs=include_dirs, library_dirs=library_dirs)]
for e in ext_modules:
e.cython_directives = {'language_level': sys.version_info[0]}
return ext_modules |
class ScaledDotAttention(nn.Module):
def __init__(self):
super(ScaledDotAttention, self).__init__()
self.eps = np.finfo(float).eps
self.max = torch.finfo().max
self.act_fn = nn.Softmax(dim=(- 1))
def forward(self, Query, Key, Value, mask=None):
hidden_size = Key.size()[1]
if (mask is None):
return torch.mm(self.act_fn((torch.mm(Query, torch.transpose(Key, 0, 1)) / np.sqrt(hidden_size))), Value)
else:
assert ((mask.dim() == 2) and (mask.size(0) == Query.size(0)) and (mask.size(1) == Key.size(1))), f'masking size mismatch in attention layer: mask_dim: {mask.dim()} mask_size: {mask.size()} query_size: {Query.size()} Key_size: {Key.size()}'
energy = (self.act_fn((torch.matmul(Query, Key.T) / np.sqrt(hidden_size))) * mask)
att = (energy / torch.sum(energy, dim=(- 1), keepdim=True))
return att.matmul(Value) |
def is_simple_literal(t: ProperType) -> bool:
if isinstance(t, LiteralType):
return (t.fallback.type.is_enum or (t.fallback.type.fullname == 'builtins.str'))
if isinstance(t, Instance):
return ((t.last_known_value is not None) and isinstance(t.last_known_value.value, str))
return False |
class PenParameterItem(GroupParameterItem):
def __init__(self, param, depth):
self.defaultBtn = self.makeDefaultButton()
super().__init__(param, depth)
self.itemWidget = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
self.penLabel = PenPreviewLabel(param)
for child in (self.penLabel, self.defaultBtn):
layout.addWidget(child)
self.itemWidget.setLayout(layout)
def optsChanged(self, param, opts):
if (('enabled' in opts) or ('readonly' in opts)):
self.updateDefaultBtn()
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tw = self.treeWidget()
if (tw is None):
return
tw.setItemWidget(self, 1, self.itemWidget)
defaultClicked = WidgetParameterItem.defaultClicked
makeDefaultButton = WidgetParameterItem.makeDefaultButton
def valueChanged(self, param, val):
self.updateDefaultBtn()
def updateDefaultBtn(self):
self.defaultBtn.setEnabled(((not self.param.valueIsDefault()) and self.param.opts['enabled'] and self.param.writable())) |
(cc=STDCALL, params={'hWnd': HWND, 'lpdwProcessId': LPDWORD})
def hook_GetWindowThreadProcessId(ql: Qiling, address: int, params):
target = params['hWnd']
if ((target == ql.os.profile.getint('KERNEL', 'pid')) or (target == ql.os.profile.getint('KERNEL', 'shell_pid'))):
pid = ql.os.profile.getint('KERNEL', 'parent_pid')
else:
raise QlErrorNotImplemented('API not implemented')
dst = params['lpdwProcessId']
if dst:
ql.mem.write_ptr(dst, pid, 4)
return pid |
class Effect6720(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'shieldBonus', src.getModifiedItemAttr('shipBonusMC'), skill='Minmatar Cruiser', **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'structureDamageAmount', src.getModifiedItemAttr('shipBonusMC'), skill='Minmatar Cruiser', **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'armorDamageAmount', src.getModifiedItemAttr('shipBonusMC'), skill='Minmatar Cruiser', **kwargs) |
class Isotope(Molecule):
def __init__(self, molecule_name, isotope, isotope_name='', abundance=None, **kwargs):
super(Isotope, self).__init__(name=molecule_name, **kwargs)
if (not isinstance(isotope, int)):
raise TypeError('Wrong format for isotope {0}: expected int, got {1}'.format(isotope, type(isotope)))
self.iso = isotope
self.isotope_name = isotope_name
self.abundance = abundance |
def render_pep8_errors_e116(msg, _node, source_lines=None):
line = msg.line
curr_idx = (len(source_lines[(line - 1)]) - len(source_lines[(line - 1)].lstrip()))
(yield from render_context((line - 2), line, source_lines))
(yield (line, slice(0, curr_idx), LineType.ERROR, source_lines[(line - 1)]))
(yield from render_context((line + 1), (line + 3), source_lines)) |
('pypyr.steps.filewriteyaml.Path')
def test_filewriteyaml_pass_with_empty_payload(mock_path):
context = Context({'k1': 'v1', 'fileWriteYaml': {'path': '/arb/blah', 'payload': ''}})
with io.StringIO() as out_text:
with patch('pypyr.steps.filewriteyaml.open', mock_open()) as mock_output:
mock_output.return_value.write.side_effect = out_text.write
filewrite.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['k1'] == 'v1')
assert (context['fileWriteYaml']['path'] == '/arb/blah')
assert (context['fileWriteYaml']['payload'] == '')
mock_path.assert_called_once_with('/arb/blah')
mocked_path = mock_path.return_value
mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_output.assert_called_once_with(mocked_path, 'w', encoding=None)
assert (out_text.getvalue() == "''\n") |
class TestBatchNormFoldToScale():
(autouse=True)
def clear_sessions(self):
tf.keras.backend.clear_session()
(yield)
(autouse=True)
def set_random_seed(self):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(43)
if (version.parse(tf.version.VERSION) >= version.parse('2.10')):
tf.keras.utils.set_random_seed(43)
else:
np.random.seed(43)
(yield)
(scope='session', autouse=True)
def cleanup(request):
import shutil
try:
shutil.rmtree('/tmp/test_batch_norm_fold_to_scale', ignore_errors=True)
except FileNotFoundError:
pass
def _test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(self, model, layer_nums_to_check):
for layer_num in layer_nums_to_check:
is_enabled = model.layers[layer_num].output_quantizers[0].is_enabled()
output_quantizers_quant_mode = model.layers[layer_num].output_quantizers[0].quant_mode
assert (is_enabled and (output_quantizers_quant_mode == int(libpymo.TensorQuantizerOpMode.quantizeDequantize))), 'Quantizer layer num {} -> Enabled: {}, quantMode: {}'.format(layer_num, is_enabled, output_quantizers_quant_mode)
def test_fold_bn_before_conv_no_bias(self):
input_shape = (20, 4, 4, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv2D(20, 2, use_bias=False)(inp)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(40, 2, use_bias=False)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert model.layers[3].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[3].param_quantizers)))
layer_list = [(model.layers[3], model.layers[(- 1)])]
with pytest.raises(RuntimeError):
fold_given_batch_norms(model, layer_list)
def test_fold_bn_before_conv_with_bias(self):
input_shape = (2, 24, 24, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv2D(20, 3)(inp)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(30, 3)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert model.layers[3].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[3].param_quantizers)))
layer_list = [(model.layers[3], model.layers[(- 1)])]
with pytest.raises(RuntimeError):
fold_given_batch_norms(model, layer_list)
def test_fold_bn_after_conv_no_bias(self):
input_shape = (2, 24, 24, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv2D(20, 3)(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert (not model.layers[2].output_quantizers[0].is_enabled())
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
assert model.layers[(- 1)].output_quantizers[0].is_enabled()
baseline_output = model(random_input)
layer_list = [(model.layers[1], model.layers[2])]
model = fold_given_batch_norms(model, layer_list)
output_after_fold = model(random_input)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
assert (not model.layers[1].output_quantizers[0].is_enabled())
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[(- 1)])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
relu_output_encoding = model.layers[(- 1)].output_quantizers[0].encoding
delta = float(((relu_output_encoding.max - relu_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
def test_fold_bn_after_conv_depthwise(self):
input_shape = (2, 24, 24, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.DepthwiseConv2D(3)(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert (not model.layers[2].output_quantizers[0].is_enabled())
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
assert model.layers[(- 1)].output_quantizers[0].is_enabled()
baseline_output = model(random_input)
_ = fold_all_batch_norms_to_scale(sim)
model = sim.model
output_after_fold = model(random_input)
sim.export(path='/tmp', filename_prefix='temp_bn_fold_to_scale')
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
assert (not model.layers[1].output_quantizers[0].is_enabled())
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[(- 1)])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
relu_output_encoding = model.layers[(- 1)].output_quantizers[0].encoding
delta = float(((relu_output_encoding.max - relu_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
def test_fold_bn_after_conv_with_bias(self):
input_shape = (2, 24, 24, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv2D(20, 3)(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert (not model.layers[2].output_quantizers[0].is_enabled())
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
assert model.layers[(- 1)].output_quantizers[0].is_enabled()
baseline_output = model(random_input)
layer_list = [(model.layers[1], model.layers[2])]
model = fold_given_batch_norms(model, layer_list)
output_after_fold = model(random_input)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
assert (not model.layers[1].output_quantizers[0].is_enabled())
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[(- 1)])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
relu_output_encoding = model.layers[(- 1)].output_quantizers[0].encoding
delta = float(((relu_output_encoding.max - relu_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
def test_fold_bn_before_linear_layer_no_bias(self):
input_shape = (32, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Dense(20, use_bias=False)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim
assert model.layers[2].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers)))
assert model.layers[1].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[1].param_quantizers)))
layer_list = [(model.layers[1], model.layers[2])]
with pytest.raises(RuntimeError):
fold_given_batch_norms(model, layer_list)
def test_fold_bn_before_linear_layer_with_bias(self):
input_shape = (32, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Dense(20)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim
assert model.layers[2].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers)))
assert model.layers[1].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[1].param_quantizers)))
layer_list = [(model.layers[1], model.layers[2])]
with pytest.raises(RuntimeError):
fold_given_batch_norms(model, layer_list)
def test_fold_bn_after_linear_layer_no_bias(self):
input_shape = (32, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Dense(20, use_bias=False)(inp)
x = tf.keras.layers.BatchNormalization()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert model.layers[2].output_quantizers[0].is_enabled()
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
baseline_output = model(random_input)
layer_list = [(model.layers[1], model.layers[2])]
model = fold_given_batch_norms(model, layer_list)
output_after_fold = model(random_input)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[1])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
fc_output_encoding = model.layers[1].output_quantizers[0].encoding
delta = float(((fc_output_encoding.max - fc_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
def test_fold_bn_after_linear_layer_with_bias(self):
input_shape = (32, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Dense(20)(inp)
x = tf.keras.layers.BatchNormalization()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert model.layers[2].output_quantizers[0].is_enabled()
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
baseline_output = model(random_input)
layer_list = [(model.layers[1], model.layers[2])]
model = fold_given_batch_norms(model, layer_list)
output_after_fold = model(random_input)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[1])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
fc_output_encoding = model.layers[1].output_quantizers[0].encoding
delta = float(((fc_output_encoding.max - fc_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
def test_bn_fold_auto_mode_transposed_conv2d(self):
model = transposed_conv_model()
random_input = np.random.rand(10, *model.input_shape[1:])
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
baseline_output = model(random_input)
folded_pairs = fold_all_batch_norms_to_scale(sim)
model = sim.model
output_after_fold = model(random_input)
sim.export(path='/tmp', filename_prefix='temp_bn_fold_to_scale')
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[1, 3])
conv2_output_encoding = model.layers[3].output_quantizers[0].encoding
delta = float(((conv2_output_encoding.max - conv2_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=(delta * 2))
assert (len(folded_pairs) == 2)
def test_bn_fold_auto_mode(self):
input_shape = (2, 24, 24, 10)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv2D(20, 3)(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(15, 3)(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(20, 3)(x)
x = tf.keras.layers.Conv2D(20, 3)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(10)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
with pytest.raises(RuntimeError):
fold_all_batch_norms_to_scale(sim)
.skip('Possible Batch norms to fold is returning None?')
def test_fold_auto_mode_with_bn_after_Conv1d_layer(self):
input_shape = (2, 10, 32)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv1D(20, 2)(inp)
x = tf.keras.layers.BatchNormalization()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert model.layers[2].output_quantizers[0].is_enabled()
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
baseline_output = model(random_input)
bn_pairs = fold_all_batch_norms_to_scale(sim)
model = sim.model
output_after_fold = model(random_input)
sim.export(path='/tmp', filename_prefix='temp_bn_fold_to_scale')
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[1])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
conv_output_encoding = model.layers[1].output_quantizers[0].encoding
delta = float(((conv_output_encoding.max - conv_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
assert (len(bn_pairs) == 1)
.skip('Conv1D not in _supported_layers')
def test_fold_manual_with_bn_after_Conv1d_layer_no_bias(self):
input_shape = (2, 10, 32)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv1D(20, 2, use_bias=False)(inp)
x = tf.keras.layers.BatchNormalization()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert (not model.layers[1].output_quantizers[0].is_enabled())
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
assert model.layers[2].output_quantizers[0].is_enabled()
assert (not np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers))))
baseline_output = model(random_input)
layer_list = [(model.layers[1], model.layers[2])]
model = fold_given_batch_norms(model, layer_list)
output_after_fold = model(random_input)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
self._test_output_quantizers_enabled_and_quant_mode_is_quantize_dequantize(model, layer_nums_to_check=[1])
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
conv_output_encoding = model.layers[1].output_quantizers[0].encoding
delta = float(((conv_output_encoding.max - conv_output_encoding.min) / 255))
assert np.allclose(baseline_output, output_after_fold, atol=delta)
.skip('Conv1D not found in potential BN layers')
def test_fold_bn_before_Conv1d_with_bias(self):
input_shape = (2, 10, 32)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Conv1D(20, 2)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert model.layers[2].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers)))
assert model.layers[1].output_quantizers[0].is_enabled()
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
with pytest.raises(RuntimeError):
fold_all_batch_norms_to_scale(sim)
.skip('Conv1D not in supported layers')
def test_fold_bn_before_Conv1d_no_bias(self):
input_shape = (2, 10, 32)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Conv1D(20, 2, use_bias=False)(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
sim = create_quantsim_model_and_compute_encodings(model, random_input)
model = sim.model
assert model.layers[2].output_quantizers[0].is_enabled()
assert np.all(np.vectorize((lambda x: x.is_enabled()))(get_wrappers_weight_quantizer(model.layers[2].param_quantizers)))
assert model.layers[1].output_quantizers[0].is_enabled()
assert get_wrappers_weight_quantizer(model.layers[1].param_quantizers).is_enabled()
layer_list = [(model.layers[1], model.layers[2])]
with pytest.raises(RuntimeError):
fold_given_batch_norms(model, layer_list)
.skip('Conv3D not found in possible layers')
def test_bn_fold_conv3d_fold_backward(self):
input_shape = (1, 24, 24, 24, 3)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv3D(6, 3)(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv3D(8, 3)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
baseline_output = model(random_input)
_ = fold_all_batch_norms(model)
output_after_fold = model(random_input)
assert np.allclose(baseline_output, output_after_fold, atol=1e-05)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization))
.skip('Conv3D not found in possible layers')
def test_bn_fold_conv3d_fold_forward(self):
input_shape = (1, 24, 24, 24, 3)
inp = tf.keras.Input(input_shape[1:])
x = tf.keras.layers.Conv3D(6, 3)(inp)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv3D(8, 3)(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
model = tf.keras.Model(inputs=[inp], outputs=[x])
random_input = np.random.rand(*input_shape)
_ = model(random_input)
baseline_output = model(random_input)
(_, model) = fold_all_batch_norms(model)
output_after_fold = model(random_input)
assert np.allclose(baseline_output, output_after_fold, atol=1e-05)
for wrapper in model.layers[1:]:
assert (not isinstance(wrapper._layer_to_wrap, tf.keras.layers.BatchNormalization)) |
def setUpModule():
global cell, auxcell, auxcell1, cell_sr, auxcell_sr, basis, auxbasis, kpts, nkpts
basis = '\n He S\n 38.00 0.05\n 5.00 0.25\n 0.20 0.60\n He S\n 0.25 1.00\n He P\n 1.27 1.00\n '
auxbasis = '\n He S\n 50.60 0.06\n 12.60 0.21\n 3.80 0.37\n He S\n 1.40 0.29\n He S\n 0.30 0.06\n He P\n 4.00 1.00\n 1.00 1.00\n He D\n 4.00 1.00\n '
cell = pgto.M(a=(np.eye(3) * 3.5), atom='He 3. 2. 3.\n He 1. 1. 1.', basis=basis, verbose=7, output='/dev/null', max_memory=1000, precision=1e-09)
kpts = cell.make_kpts([3, 5, 6])[[0, 2, 3, 4, 6, 12, 20]]
kpts[3] = ((kpts[0] - kpts[1]) + kpts[2])
nkpts = len(kpts)
auxcell = df.make_auxcell(cell, auxbasis)
auxcell1 = make_auxcell(cell, auxbasis)
cell_sr = cell.copy()
cell_sr.omega = (- 1.2)
auxcell_sr = df.make_auxcell(cell_sr, auxbasis) |
.parametrize(['method', 'kwargs'], [pytest.param('direct', {}, id='direct'), pytest.param('direct', {'sparse': False}, id='direct_dense'), pytest.param('eigen', {'sparse': False}, id='eigen'), pytest.param('power', {'power_tol': 1e-05}, id='power'), pytest.param('iterative-lgmres', {'tol': 1e-07, 'atol': 1e-07}, id='iterative-lgmres'), pytest.param('iterative-gmres', {'tol': 1e-07, 'atol': 1e-07}, id='iterative-gmres'), pytest.param('iterative-bicgstab', {'tol': 1e-07, 'atol': 1e-07}, id='iterative-bicgstab')])
def test_ho(method, kwargs):
a = qutip.destroy(30)
H = ((((0.5 * 2) * np.pi) * a.dag()) * a)
gamma1 = 0.05
wth_vec = np.linspace(0.1, 3, 11)
p_ss = np.zeros(np.shape(wth_vec))
for (idx, wth) in enumerate(wth_vec):
n_th = (1.0 / (np.exp((1.0 / wth)) - 1))
c_op_list = []
rate = (gamma1 * (1 + n_th))
c_op_list.append((np.sqrt(rate) * a))
rate = (gamma1 * n_th)
c_op_list.append((np.sqrt(rate) * a.dag()))
rho_ss = qutip.steadystate(H, c_op_list, method=method, **kwargs)
p_ss[idx] = np.real(qutip.expect((a.dag() * a), rho_ss))
p_ss_analytic = (1.0 / (np.exp((1.0 / wth_vec)) - 1))
np.testing.assert_allclose(p_ss_analytic, p_ss, rtol=0.001, atol=0.001) |
def get_list_of_files(dir_name):
all_files = []
try:
file_list = os.listdir(dir_name)
for entry in file_list:
full_path = os.path.join(dir_name, entry)
if os.path.isdir(full_path):
all_files = (all_files + get_list_of_files(full_path))
else:
all_files.append(full_path)
except FileNotFoundError:
pass
return all_files |
class SplAtConv2d_dcn(Module):
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm=None, dropblock_prob=0.0, deform_conv_op=None, deformable_groups=1, deform_modulated=False, **kwargs):
super(SplAtConv2d_dcn, self).__init__()
self.deform_modulated = deform_modulated
padding = _pair(padding)
self.rectify = (rectify and ((padding[0] > 0) or (padding[1] > 0)))
self.rectify_avg = rectify_avg
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, (channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = deform_conv_op(in_channels, (channels * radix), kernel_size, stride, padding[0], dilation, groups=(groups * radix), bias=bias, deformable_groups=deformable_groups, **kwargs)
self.use_bn = (norm is not None)
if self.use_bn:
self.bn0 = get_norm(norm, (channels * radix))
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = get_norm(norm, inter_channels)
self.fc2 = Conv2d(inter_channels, (channels * radix), 1, groups=self.cardinality)
if (dropblock_prob > 0.0):
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x, offset_input):
if self.deform_modulated:
(offset_x, offset_y, mask) = torch.chunk(offset_input, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
x = self.conv(x, offset, mask)
else:
x = self.conv(x, offset_input)
if self.use_bn:
x = self.bn0(x)
if (self.dropblock_prob > 0.0):
x = self.dropblock(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
if (self.radix > 1):
splited = torch.split(x, (rchannel // self.radix), dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
attens = torch.split(atten, (rchannel // self.radix), dim=1)
out = sum([(att * split) for (att, split) in zip(attens, splited)])
else:
out = (atten * x)
return out.contiguous() |
def _prepareconfig(args: Optional[Union[(List[str], 'os.PathLike[str]')]]=None, plugins: Optional[Sequence[Union[(str, _PluggyPlugin)]]]=None) -> 'Config':
if (args is None):
args = sys.argv[1:]
elif isinstance(args, os.PathLike):
args = [os.fspath(args)]
elif (not isinstance(args, list)):
msg = '`args` parameter expected to be a list of strings, got: {!r} (type: {})'
raise TypeError(msg.format(args, type(args)))
config = get_config(args, plugins)
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, str):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
config = pluginmanager.hook.pytest_cmdline_parse(pluginmanager=pluginmanager, args=args)
return config
except BaseException:
config._ensure_unconfigure()
raise |
def _borders_touch(window, x, y, snap_dist):
overlap_args = {'x': x, 'y': y}
borders = _get_borders(window)
for b in borders:
if any(((i in [window.edges[0], (window.edges[2] + (2 * window.borderwidth))]) for i in [b[0], b[2]])):
if ((window.edges[1] < b[3]) and (window.edges[3] > b[1])):
if any(((abs((window.edges[i] - x)) < snap_dist) for i in [0, 2])):
try:
del overlap_args['x']
except Exception:
pass
if any(((i in [window.edges[1], (window.edges[3] + (2 * window.borderwidth))]) for i in [b[1], b[3]])):
if ((window.edges[0] < b[2]) and (window.edges[2] > b[0])):
if any(((abs((window.edges[i] - y)) < snap_dist) for i in [1, 3])):
try:
del overlap_args['y']
except Exception:
pass
return overlap_args |
def convert_list(items, ids, parent, attr_type, item_func, cdata):
LOG.info('Inside convert_list()')
output = []
addline = output.append
item_name = item_func(parent)
if ids:
this_id = get_unique_id(parent)
for (i, item) in enumerate(items):
LOG.info(('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % (unicode_me(item), item_name, type(item).__name__)))
attr = ({} if (not ids) else {'id': ('%s_%s' % (this_id, (i + 1)))})
if (isinstance(item, numbers.Number) or (type(item) in (str, unicode))):
addline(convert_kv(item_name, item, attr_type, cdata, attr))
elif hasattr(item, 'isoformat'):
addline(convert_kv(item_name, item.isoformat(), attr_type, cdata, attr))
elif (type(item) == bool):
addline(convert_bool(item_name, item, attr_type, cdata, attr))
elif isinstance(item, dict):
if (not attr_type):
addline(('<%s>%s</%s>' % (item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name)))
else:
addline(('<%s type="dict">%s</%s>' % (item_name, convert_dict(item, ids, parent, attr_type, item_func, cdata), item_name)))
elif isinstance(item, iterable):
if (not attr_type):
addline(('<%s %s>%s</%s>' % (item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name)))
else:
addline(('<%s type="list"%s>%s</%s>' % (item_name, make_attrstring(attr), convert_list(item, ids, item_name, attr_type, item_func, cdata), item_name)))
elif (item is None):
addline(convert_none(item_name, None, attr_type, cdata, attr))
else:
raise TypeError(('Unsupported data type: %s (%s)' % (item, type(item).__name__)))
return ''.join(output) |
class COCOClsDataset(Dataset):
def __init__(self, img_name_list_path, coco_root, label_file_path, train=True, transform=None, gen_attn=False):
img_name_list_path = os.path.join(img_name_list_path, f"{('train' if (train or gen_attn) else 'val')}_id.txt")
self.img_name_list = load_img_name_list(img_name_list_path)
self.label_list = load_image_label_list_from_npy(self.img_name_list, label_file_path)
self.coco_root = coco_root
self.transform = transform
self.train = train
self.gen_attn = gen_attn
def __getitem__(self, idx):
name = self.img_name_list[idx]
if (self.train or self.gen_attn):
img = PIL.Image.open(os.path.join(self.coco_root, 'train2014', (name + '.jpg'))).convert('RGB')
else:
img = PIL.Image.open(os.path.join(self.coco_root, 'val2014', (name + '.jpg'))).convert('RGB')
label = torch.from_numpy(self.label_list[idx])
if self.transform:
img = self.transform(img)
return (img, label)
def __len__(self):
return len(self.img_name_list) |
class SerializerMixin(object):
def handle_fk_field(self, obj, field):
if isinstance(field, SingleTagField):
self._current[field.name] = str(getattr(obj, field.name))
else:
super(SerializerMixin, self).handle_fk_field(obj, field)
def handle_m2m_field(self, obj, field):
if isinstance(field, TagField):
self._current[field.name] = [tag.name for tag in getattr(obj, field.name).all()]
else:
super(SerializerMixin, self).handle_m2m_field(obj, field) |
def test_class_scope(fixture_path):
result = fixture_path.runpytest('-v', '--order-scope=class')
result.assert_outcomes(passed=10, failed=0)
result.stdout.fnmatch_lines(['test_classes.py::Test1::test_one PASSED', 'test_classes.py::Test1::test_two PASSED', 'test_classes.py::Test2::test_one PASSED', 'test_classes.py::Test2::test_two PASSED', 'test_classes.py::test_one PASSED', 'test_classes.py::test_two PASSED', 'test_functions1.py::test1_one PASSED', 'test_functions1.py::test1_two PASSED', 'test_functions2.py::test2_one PASSED', 'test_functions2.py::test2_two PASSED']) |
def compare_avgplaycount(a1, a2):
(a1, a2) = (a1.album, a2.album)
if (a1 is None):
return (- 1)
if (a2 is None):
return 1
if (not a1.title):
return 1
if (not a2.title):
return (- 1)
return ((- cmp(a1('~#playcount:avg'), a2('~#playcount:avg'))) or cmpa(a1.date, a2.date) or cmpa(a1.sort, a2.sort) or cmp(a1.key, a2.key)) |
class TestBinaryConfusionMatrix(unittest.TestCase):
def _test_binary_confusion_matrix_with_input(self, input: torch.Tensor, target: torch.Tensor, normalize: Optional[str]=None) -> None:
sklearn_result = torch.tensor(skcm(target, input, labels=[0, 1], normalize=normalize)).to(torch.float32)
torch.testing.assert_close(bcm(input, target, normalize=normalize).to(torch.float32), sklearn_result, equal_nan=True, atol=1e-08, rtol=1e-05)
def test_binary_confusion_matrix_base(self) -> None:
num_classes = 2
input = torch.randint(high=num_classes, size=(BATCH_SIZE,))
target = torch.randint(high=num_classes, size=(BATCH_SIZE,))
self._test_binary_confusion_matrix_with_input(input, target)
self._test_binary_confusion_matrix_with_input(input, target, normalize='all')
self._test_binary_confusion_matrix_with_input(input, target, normalize='true')
self._test_binary_confusion_matrix_with_input(input, target, normalize='pred')
def test_binary_confusion_matrix_score_thresholding(self) -> None:
num_classes = 2
threshold = 0.7
input = torch.tensor([0.7, 0.6, 0.5, 0.3, 0.9, 0.1, 1.0, 0.95, 0.2])
input_thresholded = torch.tensor([1, 0, 0, 0, 1, 0, 1, 1, 0])
target = torch.randint(high=num_classes, size=(9,))
sklearn_result = torch.tensor(skcm(target, input_thresholded, labels=[0, 1])).to(torch.float32)
my_result = bcm(input, target, threshold=threshold).to(torch.float32)
torch.testing.assert_close(my_result, sklearn_result, equal_nan=True, atol=1e-08, rtol=1e-05)
def test_binary_confusion_matrix_invalid_input(self) -> None:
with self.assertRaisesRegex(ValueError, 'input should be a one-dimensional tensor for binary confusion matrix, got shape torch.Size\\(\\[5, 10\\]\\).'):
input = torch.randint(high=2, size=(5, 10))
target = torch.randint(high=2, size=(5, 10))
bcm(input, target)
with self.assertRaisesRegex(ValueError, 'target should be a one-dimensional tensor for binary confusion matrix, got shape torch.Size\\(\\[5, 10\\]\\).'):
input = torch.randint(high=2, size=(10,))
target = torch.randint(high=2, size=(5, 10))
bcm(input, target)
with self.assertRaisesRegex(ValueError, 'The `input` and `target` should have the same dimensions, got shapes torch.Size\\(\\[11\\]\\) and torch.Size\\(\\[10\\]\\).'):
input = torch.randint(high=2, size=(11,))
target = torch.randint(high=2, size=(10,))
bcm(input, target)
with self.assertRaisesRegex(ValueError, "normalize must be one of 'all', 'pred', 'true', or 'none'."):
input = torch.randint(high=2, size=(10,))
target = torch.randint(high=2, size=(10,))
bcm(input, target, normalize='this is not a valid option') |
class ProductDomain():
def __init__(self, domains):
self.vals = list(it.product(*(d.vals for d in domains)))
self.shape = ((len(domains),) + domains[0].shape)
self.lower = [d.lower for d in domains]
self.upper = [d.upper for d in domains]
self.dtype = domains[0].dtype |
(device=True)
def imt_func_o21(value, other_value):
return ((((((0 + ((1.0 * value[28]) * other_value[2])) + (((- 1.0) * value[10]) * other_value[31])) + (((- 1.0) * value[2]) * other_value[28])) + (((- 1.0) * value[3]) * other_value[29])) + (((- 1.0) * value[31]) * other_value[10])) + ((1.0 * value[29]) * other_value[3])) |
def _tbl_bldr(rows, cols):
tblGrid_bldr = a_tblGrid()
for i in range(cols):
tblGrid_bldr.with_child(a_gridCol())
tbl_bldr = a_tbl().with_nsdecls().with_child(tblGrid_bldr)
for i in range(rows):
tr_bldr = _tr_bldr(cols)
tbl_bldr.with_child(tr_bldr)
return tbl_bldr |
class Downsample(nn.Module):
def __init__(self, in_embed_dim, out_embed_dim, patch_size):
super().__init__()
self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.proj(x)
x = x.permute(0, 2, 3, 1)
return x |
def _find_line_qubits(size: int, origin: Tuple[(int, int)], rotation: int) -> Tuple[(List[cirq.GridQubit], List[cirq.GridQubit])]:
if ((rotation % 90) != 0):
raise ValueError('Layout rotation must be a multiple of 90 degrees')
def generate(row, col, drow, dcol) -> List[cirq.GridQubit]:
return [cirq.GridQubit((row + (i * drow)), (col + (i * dcol))) for i in range(size)]
rotations = [(1, 0), (0, 1), ((- 1), 0), (0, (- 1))]
(drow, dcol) = rotations[((rotation % 360) // 90)]
(up_row, up_col) = origin
up_qubits = generate(up_row, up_col, drow, dcol)
(down_drow, down_dcol) = rotations[(((rotation + 270) % 360) // 90)]
(down_row, down_col) = ((up_row + down_drow), (up_col + down_dcol))
down_qubits = generate(down_row, down_col, drow, dcol)
return (up_qubits, down_qubits) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_text', default='input_text.txt')
parser.add_argument('--length', default=10, type=int)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--temperature', default=1, type=float)
parser.add_argument('--model_name', default='117M')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--nsamples', default=10, type=int)
parser.add_argument('--cutoffs', default='fairy', type=str)
parser.add_argument('--write_sfdp', default=False, type=bool)
parser.add_argument('--random', default=False, type=bool)
return parser.parse_args() |
class LxClkCoreLookup(gdb.Function):
def __init__(self):
super(LxClkCoreLookup, self).__init__('lx_clk_core_lookup')
def lookup_hlist(self, hlist_head, name):
for child in clk_core_for_each_child(hlist_head):
if (child['name'].string() == name):
return child
result = self.lookup_hlist(child['children'], name)
if result:
return result
def invoke(self, name):
name = name.string()
return (self.lookup_hlist(gdb.parse_and_eval('clk_root_list'), name) or self.lookup_hlist(gdb.parse_and_eval('clk_orphan_list'), name)) |
def ltout(label, n, x, key, im, doinp=False, **kwargs):
if doinpprt(label, x, doinp=False, **kwargs):
return
if (key > 0):
thresh = 0.0
else:
thresh = (10.0 ** (key - 6))
ntt = ((n * (n + 1)) // 2)
if (im > 0):
print(('%s, matrix %6d:' % (label, im)), **kwargs)
imoff = ((im - 1) * ntt)
else:
imoff = 0
if (type(x[0]) == np.complex128):
nc = 4
fmthead = '%19i '
else:
nc = 5
fmthead = '%14i'
for ist in range(0, n, nc):
iend = min((ist + nc), n)
for irow in range(ist, iend):
print((fmthead % (irow + 1)), end='', **kwargs)
print(**kwargs)
for irow in range(ist, n):
ir = min(((irow - ist) + 1), nc)
l = ((((irow * (irow + 1)) // 2) + ist) + imoff)
print(('%4d' % (irow + 1)), end='', **kwargs)
for i in range(ir):
s = x[l]
l = (l + 1)
s = formatx('%14s', '', '%14.6e', thresh, s)
print(s, end='', **kwargs)
print(**kwargs) |
.parametrize('version', [*stdlib_list.short_versions, *stdlib_list.long_versions])
def test_self_consistent(version):
list_path = f'lists/{stdlib_list.get_canonical_version(version)}.txt'
modules = pkgutil.get_data('stdlib_list', list_path).decode().splitlines()
for mod_name in modules:
assert stdlib_list.in_stdlib(mod_name, version)
assert (modules == stdlib_list.stdlib_list(version)) |
.parametrize('lower, upper', [(2, np.inf), (2, 5), ((- np.inf), 5)])
.parametrize('op_type', ['icdf', 'rejection'])
def test_truncation_discrete_logcdf(op_type, lower, upper):
p = 0.7
op = (icdf_geometric if (op_type == 'icdf') else rejection_geometric)
x = op(p, name='x')
xt = Truncated.dist(x, lower=lower, upper=upper)
assert isinstance(xt.owner.op, TruncatedRV)
xt_vv = xt.clone()
xt_logcdf_fn = pytensor.function([xt_vv], logcdf(xt, xt_vv))
ref_xt = scipy.stats.geom(p)
log_norm = np.log((ref_xt.cdf(upper) - ref_xt.cdf((lower - 1))))
def ref_xt_logcdf(value):
if (value < lower):
return (- np.inf)
elif (value > upper):
return 0.0
return (np.log((ref_xt.cdf(value) - ref_xt.cdf((lower - 1)))) - log_norm)
for bound in (lower, upper):
if np.isinf(bound):
continue
for offset in ((- 1), 0, 1):
test_xt_v = (bound + offset)
assert np.isclose(xt_logcdf_fn(test_xt_v), ref_xt_logcdf(test_xt_v)) |
def gen_srcs_dep_taken_test():
return [gen_br2_srcs_dep_test(5, 'bne', 1, 2, True), gen_br2_srcs_dep_test(4, 'bne', 2, 3, True), gen_br2_srcs_dep_test(3, 'bne', 3, 4, True), gen_br2_srcs_dep_test(2, 'bne', 4, 5, True), gen_br2_srcs_dep_test(1, 'bne', 5, 6, True), gen_br2_srcs_dep_test(0, 'bne', 6, 7, True)] |
def convert_bool(value):
if isinstance(value, str):
if ((value == 'true') or (value == '1')):
return True
elif ((value == 'false') or (value == '0')):
return False
elif (value[0] == '$'):
return value
else:
raise ValueError((value + 'is not a valid type of float input to openscenario, if a string is used as a float value (parameter or expression), it should have a $ as the first char..'))
if value:
return 'true'
else:
return 'false' |
class WhisperTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = WhisperTokenizer
rust_tokenizer_class = WhisperTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = False
test_seq2seq = False
def setUp(self):
super().setUp()
tokenizer = WhisperTokenizer.from_pretrained('openai/whisper-tiny')
tokenizer.pad_token_id = 50256
tokenizer.pad_token = '<|endoftext|>'
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
token = 'Where'
token_id = 14436
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '!')
self.assertEqual(vocab_keys[1], '"')
self.assertEqual(vocab_keys[(- 1)], '<|notimestamps|>')
self.assertEqual(len(vocab_keys), 50364)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 50258)
def test_full_tokenizer(self):
tokenizer = WhisperTokenizer.from_pretrained(self.tmpdirname)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'Gis', 'Ga', 'G', 'test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [5723, 307, 257, 220, 31636])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, ['I', 'Gwas', 'Gborn', 'Gin', 'G9', '2000', ',', 'Gand', 'G', 'this', 'Gis', 'Gfals', 'A', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 220, 11176, 307, 16720, 526, 13])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, ['I', 'Gwas', 'Gborn', 'Gin', 'G9', '2000', ',', 'Gand', 'G', 'this', 'Gis', 'Gfals', 'A', '.'])
def test_tokenizer_slow_store_full_signature(self):
pass
def test_tokenizer_fast_store_full_signature(self):
pass
def test_special_tokens_initialization(self):
pass
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='openai/whisper-tiny.en', padding=False)
def test_output_offsets(self):
tokenizer = self.get_tokenizer()
previous_sequence = [51492, 406, 3163, 1953, 466, 13, 51612, 51612]
self.assertEqual(tokenizer.decode(previous_sequence, output_offsets=True), {'text': ' not worth thinking about.', 'offsets': [{'text': ' not worth thinking about.', 'timestamp': (22.56, 24.96)}]})
next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
self.assertEqual(tokenizer.decode(next_sequences_1, output_offsets=True), {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.<|endoftext|>', 'offsets': [{'text': ' of spectators, retrievality is not worth thinking about.', 'timestamp': (0.0, 5.0)}, {'text': ' His instant panic was followed by a small, sharp blow high on his chest.', 'timestamp': (5.0, 9.4)}]})
def test_find_longest_common_subsequence(self):
previous_sequence = [1, 2, 3]
next_sequence = [2, 3, 4, 5]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5])
previous_sequence = [1, 2, 3, 4, 5, 6, 7]
next_sequence = [2, 3, 4, 5]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5])
previous_sequence = [1, 2, 3]
next_sequence = [4, 5, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6])
previous_sequence = [1, 2, 3, 4, 99]
next_sequence = [2, 98, 4, 5, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6])
previous_sequence = [1, 2, 99, 4, 5]
next_sequence = [2, 3, 4, 98, 6]
merge = _find_longest_common_sequence([previous_sequence, next_sequence])
self.assertEqual(merge, [1, 2, 99, 4, 98, 6])
seq1 = [1, 2, 3]
seq2 = [2, 3, 4]
seq3 = [3, 4, 5]
merge = _find_longest_common_sequence([seq1, seq2, seq3])
self.assertEqual(merge, [1, 2, 3, 4, 5])
seq1 = [1, 2, 3, 98, 5]
seq2 = [2, 99, 4, 5, 6, 7]
seq3 = [4, 97, 6, 7, 8]
merge = _find_longest_common_sequence([seq1, seq2, seq3])
self.assertEqual(merge, [1, 2, 3, 4, 5, 6, 7, 8]) |
.parametrize('reverse', (False, True))
def test_measurable_join_interdependent(reverse):
x = pt.random.normal(name='x')
y_rvs = []
prev_rv = x
for i in range(3):
next_rv = pt.random.normal((prev_rv + 1), name=f'y{i}', size=(1, 2))
y_rvs.append(next_rv)
prev_rv = next_rv
if reverse:
y_rvs = y_rvs[::(- 1)]
ys = pt.concatenate(y_rvs, axis=0)
ys.name = 'ys'
x_vv = x.clone()
ys_vv = ys.clone()
logp = conditional_logp({x: x_vv, ys: ys_vv})
logp_combined = pt.sum([pt.sum(factor) for factor in logp.values()])
assert_no_rvs(logp_combined)
y0_vv = y_rvs[0].clone()
y1_vv = y_rvs[1].clone()
y2_vv = y_rvs[2].clone()
ref_logp = conditional_logp({x: x_vv, y_rvs[0]: y0_vv, y_rvs[1]: y1_vv, y_rvs[2]: y2_vv})
ref_logp_combined = pt.sum([pt.sum(factor) for factor in ref_logp.values()])
rng = np.random.default_rng()
x_vv_test = rng.normal()
ys_vv_test = rng.normal(size=(3, 2))
np.testing.assert_allclose(logp_combined.eval({x_vv: x_vv_test, ys_vv: ys_vv_test}), ref_logp_combined.eval({x_vv: x_vv_test, y0_vv: ys_vv_test[0:1], y1_vv: ys_vv_test[1:2], y2_vv: ys_vv_test[2:3]})) |
def main():
enhance_print()
coords = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('ex ey ez', metric='[1,1,1]', coords=coords)
mfvar = (u, v) = symbols('u v')
eu = (ex + ey)
ev = (ex - ey)
(eu_r, ev_r) = ReciprocalFrame([eu, ev])
oprint('Frame', (eu, ev), 'Reciprocal Frame', (eu_r, ev_r))
print('eu.eu_r =', (eu | eu_r))
print('eu.ev_r =', (eu | ev_r))
print('ev.eu_r =', (ev | eu_r))
print('ev.ev_r =', (ev | ev_r))
eu = ((ex + ey) + ez)
ev = (ex - ey)
(eu_r, ev_r) = ReciprocalFrame([eu, ev])
oprint('Frame', (eu, ev), 'Reciprocal Frame', (eu_r, ev_r))
print('eu.eu_r =', (eu | eu_r))
print('eu.ev_r =', (eu | ev_r))
print('ev.eu_r =', (ev | eu_r))
print('ev.ev_r =', (ev | ev_r))
print('eu =', eu)
print('ev =', ev)
def_prec(locals())
print(GAeval('eu^ev|ex', True))
print(GAeval('eu^ev|ex*eu', True))
return |
def _timed_hash_bucket(input: HashBucketInput):
task_id = get_current_ray_task_id()
worker_id = get_current_ray_worker_id()
with (memray.Tracker(f'hash_bucket_{worker_id}_{task_id}.bin') if input.enable_profiler else nullcontext()):
(delta_file_envelope_groups, total_record_count, total_size_bytes) = _group_file_records_by_pk_hash_bucket(annotated_delta=input.annotated_delta, num_hash_buckets=input.num_hash_buckets, primary_keys=input.primary_keys, read_kwargs_provider=input.read_kwargs_provider, deltacat_storage=input.deltacat_storage, deltacat_storage_kwargs=input.deltacat_storage_kwargs)
hash_bucket_group_to_obj_id_tuple = group_hash_bucket_indices(hash_bucket_object_groups=delta_file_envelope_groups, num_buckets=input.num_hash_buckets, num_groups=input.num_hash_groups, object_store=input.object_store)
peak_memory_usage_bytes = get_current_node_peak_memory_usage_in_bytes()
return HashBucketResult(hash_bucket_group_to_obj_id_tuple, np.int64(total_size_bytes), np.int64(total_record_count), np.double(peak_memory_usage_bytes), np.double(0.0), np.double(time.time())) |
class LWLBoxActor(BaseActor):
def __init__(self, net, objective, loss_weight=None):
super().__init__(net, objective)
if (loss_weight is None):
loss_weight = {'segm': 1.0}
self.loss_weight = loss_weight
def __call__(self, data):
train_imgs = data['train_images']
bb_train = data['train_anno']
num_sequences = train_imgs.shape[1]
num_train_frames = train_imgs.shape[0]
train_feat = self.net.extract_backbone_features(train_imgs.view((- 1), train_imgs.shape[(- 3)], train_imgs.shape[(- 2)], train_imgs.shape[(- 1)]))
train_feat_clf = self.net.extract_target_model_features(train_feat)
bb_train = bb_train.view((- 1), *bb_train.shape[(- 1):])
train_box_enc = self.net.box_label_encoder(bb_train, train_feat_clf, train_imgs.shape)
train_box_enc = train_box_enc.view(num_train_frames, num_sequences, *train_box_enc.shape[(- 3):])
(mask_pred_box_train, decoder_feat_train) = self.net.decoder(train_box_enc, train_feat, train_imgs.shape[(- 2):])
loss_segm_box = (self.loss_weight['segm_box'] * self.objective['segm'](mask_pred_box_train, data['train_masks'].view(mask_pred_box_train.shape)))
loss_segm_box = (loss_segm_box / num_train_frames)
stats = {}
loss = loss_segm_box
acc_box = 0
cnt_box = 0
acc_lbox = [davis_jaccard_measure((torch.sigmoid(rm.detach()).cpu().numpy() > 0.5), lb.cpu().numpy()) for (rm, lb) in zip(mask_pred_box_train.view((- 1), *mask_pred_box_train.shape[(- 2):]), data['train_masks'].view((- 1), *mask_pred_box_train.shape[(- 2):]))]
acc_box += sum(acc_lbox)
cnt_box += len(acc_lbox)
stats['Loss/total'] = loss.item()
stats['Stats/acc_box_train'] = (acc_box / cnt_box)
return (loss, stats) |
def test_hypersphere_log_exp_maps(sphere_dim):
sphere = HyperSphere(dim=sphere_dim)
pt_a = sphere.project(torch.randn((sphere_dim + 1)))
pt_b = sphere.project(torch.randn((sphere_dim + 1)))
log_ab = sphere.log_map(pt_a, pt_b)
assert torch.allclose(log_ab, sphere.to_tangent(pt_a, log_ab)), 'log map not close to tangent vector!'
assert torch.isclose(torch.linalg.norm(log_ab), sphere.length(pt_a, pt_b)), 'length of log map is not length(a,b)'
exp_log_ab = sphere.exp_map(pt_a, log_ab)
assert torch.allclose(pt_b, exp_log_ab), 'exp(a, log(a, b)) does not return pt_b' |
class PyreTypeChecker(TypeChecker):
def name(self) -> str:
return 'pyre'
def install(self) -> bool:
try:
run(f'{sys.executable} -m pip install pyre-check --upgrade', check=True, shell=True)
pyre_config = '{"site_package_search_strategy": "pep561", "source_directories": ["."]}\n'
with open('.pyre_configuration', 'w') as f:
f.write(pyre_config)
return True
except:
print('Unable to install pyre')
return False
def get_version(self) -> str:
proc = run('pyre --version', stdout=PIPE, text=True, shell=True)
version = proc.stdout.strip()
version = version.replace('Client version:', 'pyre')
return version
def run_tests(self, test_files: Sequence[str]) -> dict[(str, str)]:
proc = run('pyre check', stdout=PIPE, text=True, shell=True)
lines = proc.stdout.split('\n')
results_dict: dict[(str, str)] = {}
for line in lines:
file_name = line.split(':')[0].strip()
results_dict[file_name] = ((results_dict.get(file_name, '') + line) + '\n')
return results_dict |
def get_optimizer(cfg: DictConfig, model: nn.Module) -> Tuple[(Optimizer, Optional[LambdaLR])]:
args = dict(cfg[__key__].args)
args = {str(k).lower(): v for (k, v) in args.items()}
optimizer = eval(f'{cfg[__key__].version}')
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': cfg[__key__].weight_decay}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = optimizer(optimizer_grouped_parameters, **args)
if cfg[__key__].scheduler.use:
scheduler = eval(cfg[__key__].scheduler.version)
args = dict(cfg[__key__].scheduler.args)
args = {str(k).lower(): v for (k, v) in args.items()}
args['optimizer'] = optimizer
if ('num_training_steps' not in args):
args['num_training_steps'] = cfg['trainer'].max_steps
scheduler = scheduler(**args)
scheduler = {'scheduler': scheduler, 'interval': 'step'}
else:
scheduler = None
return (optimizer, scheduler) |
def on_error(stop_on_error: bool, exception: BaseException, package: str) -> None:
if isinstance(exception, KeyboardInterrupt):
logger.info(('Cancelling, all downloads are forcibly stopped, data may be ' + 'corrupted.'))
elif (isinstance(exception, TypeError) or isinstance(exception, ValueError)):
pass
else:
if package:
logger.exception(f'Error syncing package: {package}')
if stop_on_error:
logger.error('Exiting early after error.')
sys.exit(1) |
class _GenericOpMixin():
def op_numpy(self, *args):
raise NotImplementedError
atol = 1e-10
rtol = 1e-07
shapes = []
bad_shapes = []
specialisations = []
def generate_mathematically_correct(self, metafunc):
parameters = ((['op'] + [x for x in metafunc.fixturenames if x.startswith('data_')]) + ['out_type'])
cases = []
for p_op in self.specialisations:
(op, *types, out_type) = p_op.values
args = (op, types, self.shapes, out_type)
cases.extend(cases_type_shape_product(_ALL_CASES, *args))
metafunc.parametrize(parameters, cases)
def generate_incorrect_shape_raises(self, metafunc):
parameters = (['op'] + [x for x in metafunc.fixturenames if x.startswith('data_')])
if (not self.bad_shapes):
reason = ''.join(["no shapes are 'incorrect' for ", metafunc.cls.__name__, '::', metafunc.function.__name__])
false_case = pytest.param(*([None] * len(parameters)), marks=pytest.mark.skip(reason), id='no test')
metafunc.parametrize(parameters, [false_case])
return
cases = []
for p_op in self.specialisations:
(op, *types, _) = p_op.values
args = (op, types, self.bad_shapes)
cases.extend(cases_type_shape_product(_RANDOM, *args))
metafunc.parametrize(parameters, cases)
def pytest_generate_tests(self, metafunc):
generator_name = ('generate_' + metafunc.function.__name__.replace('test_', ''))
try:
generator = getattr(self, generator_name)
except AttributeError:
return
generator(metafunc) |
def handle_holding_registers(client):
_logger.info('### write holding register and read holding registers')
client.write_register(1, 10, slave=SLAVE)
rr = client.read_holding_registers(1, 1, slave=SLAVE)
assert (not rr.isError())
assert (rr.registers[0] == 10)
client.write_registers(1, ([10] * 8), slave=SLAVE)
rr = client.read_holding_registers(1, 8, slave=SLAVE)
assert (not rr.isError())
assert (rr.registers == ([10] * 8))
_logger.info('### write read holding registers, using **kwargs')
arguments = {'read_address': 1, 'read_count': 8, 'write_address': 1, 'values': [256, 128, 100, 50, 25, 10, 5, 1]}
client.readwrite_registers(slave=SLAVE, **arguments)
rr = client.read_holding_registers(1, 8, slave=SLAVE)
assert (not rr.isError())
assert (rr.registers == arguments['values']) |
def open_with_os(path):
sys = platform.system()
if (sys == 'Darwin'):
os.system(('open "%s"' % path))
elif (sys == 'Windows'):
os.startfile(path)
elif (sys == 'Linux'):
os.system(('evince "%s"' % path))
else:
raise NotImplementedError(('Unable to open files in this particular system: %s' % sys)) |
def fix_missing_fields(ds: Dataset) -> Dataset:
ds = ds.drop_vars('call_genotype_phased')
ds = ds.drop_vars('variant_filter')
ds = ds.drop_vars('filter_id')
del ds.attrs['filters']
del ds.attrs['max_alt_alleles_seen']
del ds.attrs['vcf_zarr_version']
del ds.attrs['vcf_header']
for var in ds.data_vars:
if (ds[var].dtype == np.int32):
ds[var] = ds[var].where((ds[var] != INT_MISSING), INT_FILL)
return ds |
def test_InterHand3D_dataset():
dataset = 'InterHand3DDataset'
dataset_info = Config.fromfile('configs/_base_/datasets/interhand3d.py').dataset_info
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(num_output_channels=42, dataset_joints=42, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41])
data_cfg = dict(image_size=[256, 256], heatmap_size=[64, 64, 64], heatmap3d_depth_bound=400.0, heatmap_size_root=64, root_depth_bound=400.0, num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'])
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json', camera_file='tests/data/interhand2.6m/test_interhand2.6m_camera.json', joint_file='tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json', img_prefix='tests/data/interhand2.6m/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=True)
custom_dataset = dataset_class(ann_file='tests/data/interhand2.6m/test_interhand2.6m_data.json', camera_file='tests/data/interhand2.6m/test_interhand2.6m_camera.json', joint_file='tests/data/interhand2.6m/test_interhand2.6m_joint_3d.json', img_prefix='tests/data/interhand2.6m/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=False)
assert (custom_dataset.dataset_name == 'interhand3d')
assert (custom_dataset.test_mode is False)
assert (custom_dataset.num_images == 4)
assert (len(custom_dataset.db) == 4)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db, keys=['rel_root_depth', 'hand_type'], is_3d=True)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['MRRPE', 'MPJPE', 'Handedness_acc'])
assert_almost_equal(infos['MRRPE'], 0.0, decimal=5)
assert_almost_equal(infos['MPJPE_all'], 0.0, decimal=5)
assert_almost_equal(infos['MPJPE_single'], 0.0, decimal=5)
assert_almost_equal(infos['MPJPE_interacting'], 0.0, decimal=5)
assert_almost_equal(infos['Handedness_acc'], 1.0)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP') |
class BaseModel(torch.nn.Module):
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = (torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if (len(gpu_ids) and torch.cuda.is_available()):
network.cuda()
def load_network(self, network, network_label, epoch_label, save_dir=''):
save_filename = ('%s_net_%s.pth' % (epoch_label, network_label))
if (not save_dir):
save_dir = self.save_dir
save_path = os.path.join(save_dir, save_filename)
if (not os.path.isfile(save_path)):
print(('%s not exists yet!' % save_path))
if (network_label == 'G'):
raise 'Generator must exist!'
else:
print('Model {} is loaded.'.format(save_path))
try:
network.load_state_dict(torch.load(save_path))
except:
pretrained_dict = torch.load(save_path)
model_dict = network.state_dict()
try:
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
network.load_state_dict(pretrained_dict)
if self.opt.verbose:
print(('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label))
except:
print(('Pretrained network %s has fewer layers; The following are not initialized:' % network_label))
for (k, v) in pretrained_dict.items():
if (v.size() == model_dict[k].size()):
model_dict[k] = v
if (sys.version_info >= (3, 0)):
not_initialized = set()
else:
from sets import Set
not_initialized = Set()
for (k, v) in model_dict.items():
if ((k not in pretrained_dict) or (v.size() != pretrained_dict[k].size())):
not_initialized.add(k.split('.')[0])
print(sorted(not_initialized))
network.load_state_dict(model_dict)
def update_learning_rate():
pass |
def test_check_cask(bf, caplog, tmp_path):
os.chdir(tmp_path)
if (not brew_file.is_mac()):
with pytest.raises(RuntimeError) as excinfo:
bf.check_cask()
assert (str(excinfo.value) == 'Cask is not available on Linux!')
return
bf.check_cask()
assert ('# Starting to check applications for Cask...' in caplog.messages[0])
assert ('# Summary' in ''.join(caplog.messages))
assert Path('Caskfile').exists()
with open('Caskfile', 'r') as f:
lines = f.readlines()
assert (lines[0] == '# Cask applications\n') |
def clean_duplicates_mro(sequences: list[list[ClassDef]], cls: ClassDef, context: (InferenceContext | None)) -> list[list[ClassDef]]:
for sequence in sequences:
seen = set()
for node in sequence:
lineno_and_qname = (node.lineno, node.qname())
if (lineno_and_qname in seen):
raise DuplicateBasesError(message='Duplicates found in MROs {mros} for {cls!r}.', mros=sequences, cls=cls, context=context)
seen.add(lineno_and_qname)
return sequences |
def subdispatch_mediatortask(chain_state: ChainState, state_change: StateChange, token_network_address: TokenNetworkAddress, secrethash: SecretHash) -> TransitionResult[ChainState]:
block_number = chain_state.block_number
block_hash = chain_state.block_hash
sub_task = chain_state.payment_mapping.secrethashes_to_task.get(secrethash)
if (not sub_task):
is_valid_subtask = True
mediator_state = None
elif (sub_task and isinstance(sub_task, MediatorTask)):
is_valid_subtask = (token_network_address == sub_task.token_network_address)
mediator_state = sub_task.mediator_state
else:
is_valid_subtask = False
events: List[Event] = []
if is_valid_subtask:
token_network_state = get_token_network_by_address(chain_state, token_network_address)
if token_network_state:
pseudo_random_generator = chain_state.pseudo_random_generator
iteration = mediator.state_transition(mediator_state=mediator_state, state_change=state_change, channelidentifiers_to_channels=token_network_state.channelidentifiers_to_channels, addresses_to_channel=chain_state.addresses_to_channel, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash)
events = iteration.events
if iteration.new_state:
sub_task = MediatorTask(token_network_address, iteration.new_state)
if (sub_task is not None):
chain_state.payment_mapping.secrethashes_to_task[secrethash] = sub_task
elif (secrethash in chain_state.payment_mapping.secrethashes_to_task):
del chain_state.payment_mapping.secrethashes_to_task[secrethash]
return TransitionResult(chain_state, events) |
.parametrize('data, msg', [(b'\x80\n', 'invalid utf-8'), (b'\n', 'invalid json'), (b'{"is this invalid json?": true\n', 'invalid json'), (b'{"valid json without args": true}\n', 'Missing args'), (b'{"args": []}\n', 'Missing target_arg'), (((b'{"args": [], "target_arg": null, "protocol_version": ' + OLD_VERSION) + b'}\n'), 'incompatible version'), (((b'{"args": [], "target_arg": null, "protocol_version": ' + NEW_VERSION) + b'}\n'), 'incompatible version'), (b'{"args": [], "target_arg": null, "protocol_version": "foo"}\n', 'invalid version'), (b'{"args": [], "target_arg": null}\n', 'invalid version')])
def test_invalid_data(qtbot, ipc_server, connected_socket, caplog, data, msg):
signals = [ipc_server.got_invalid_data, connected_socket.disconnected]
with caplog.at_level(logging.ERROR):
with qtbot.assert_not_emitted(ipc_server.got_args):
with qtbot.wait_signals(signals, order='strict'):
connected_socket.write(data)
invalid_msg = 'Ignoring invalid IPC data from socket '
assert caplog.messages[(- 1)].startswith(invalid_msg)
assert caplog.messages[(- 2)].startswith(msg) |
class TxsBTCAPPRSpider(TxsBTCSpider):
name = 'txs.btc.appr'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.task_map = dict()
self.alpha = float(kwargs.get('alpha', 0.15))
self.epsilon = float(kwargs.get('epsilon', 0.0001))
def start_requests(self):
source_nodes = set()
if (self.filename is not None):
with open(self.filename, 'r') as f:
for row in csv.reader(f):
source_nodes.add(row[0])
self.task_map[row[0]] = SyncSubgraphTask(strategy=APPR(source=row[0], alpha=self.alpha, epsilon=self.epsilon), source=row[0])
elif (self.source is not None):
source_nodes.add(self.source)
self.task_map[self.source] = SyncSubgraphTask(strategy=APPR(source=self.source, alpha=self.alpha, epsilon=self.epsilon), source=self.source)
for node in source_nodes:
now = time.time()
self.task_map[node].wait(now)
(yield self.get_tx_request(node, **{'source': node, 'residual': 1.0, 'wait_key': now}))
def parse_tx(self, response, **kwargs):
if (response.status != 200):
logging.warning(('On parse: Get error status from:%s' % response.url))
return
data = json.loads(response.text)
logging.info('On parse: Extend {} from seed of {}, residual {}'.format(kwargs['hash'], kwargs['source'], kwargs['residual']))
in_txs = self.parse_input_txs(data, **kwargs)
(yield from in_txs)
out_txs = self.parse_output_txs(data, **kwargs)
(yield from out_txs)
task = self.task_map[kwargs['source']]
task.push(node=kwargs['hash'], edges=[item['tx'] for item in (in_txs + out_txs) if (item['tx']['to'] != '')], wait_key=kwargs['wait_key'])
item = task.pop()
if (item is not None):
now = time.time()
task.wait(now)
(yield self.get_tx_request(item['node'], **{'source': kwargs['source'], 'residual': item['residual'], 'wait_key': now}))
else:
(yield ImportanceItem(source=kwargs['source'], importance=task.strategy.p)) |
class ScreenFeatures(collections.namedtuple('ScreenFeatures', ['height_map', 'visibility_map', 'creep', 'power', 'player_relative', 'unit_type', 'unit_density', 'unit_density_aa'])):
__slots__ = ()
def __new__(cls, **kwargs):
feats = {}
for (name, (scale, type_, palette, clip)) in six.iteritems(kwargs):
feats[name] = Feature(index=ScreenFeatures._fields.index(name), name=name, layer_set='renders', full_name=('screen ' + name), scale=scale, type=type_, palette=(palette(scale) if callable(palette) else palette), clip=clip)
return super(ScreenFeatures, cls).__new__(cls, **feats) |
def test_bind_completion_no_binding(qtmodeltester, cmdutils_stub, config_stub, key_config_stub, configdata_stub, info):
model = configmodel.bind('x', info=info)
model.set_pattern('')
qtmodeltester.check(model)
_check_completions(model, {'Commands': [('open', 'open a url', ''), ('q', "Alias for 'quit'", ''), ('quit', 'quit qutebrowser', 'ZQ, <Ctrl+q>'), ('scroll', 'Scroll the current tab in the given direction.', ''), ('tab-close', 'Close the current tab.', '')]}) |
def get_wx_user_info(access_data: dict):
openid = access_data.get('openid')
access_token = access_data.get('access_token')
try:
fields = parse.urlencode({'access_token': access_token, 'openid': openid})
url = '
print(url)
req = request.Request(url=url, method='GET')
res = request.urlopen(req, timeout=10)
wx_user_info = json.loads(res.read().decode())
print(wx_user_info)
except Exception as e:
print(e)
return None
if ('openid' in wx_user_info):
return wx_user_info
else:
return None |
def calculate_js_div(R1, R2):
subset_overlap = []
for n in range(len(R1)):
sim_per_pair = []
for i in range(len(R1[n])):
s1 = R1[n][i]
s2 = R2[n][i]
try:
sim = js_divergence(s1, s2)
except TypeError:
print(s1)
print(s2)
print('---')
sim_per_pair.append(sim)
subset_overlap.append(sim_per_pair)
return subset_overlap |
class TNSR():
def unfold(tensor, mode):
lst = range(0, len(tensor.get_shape().as_list()))
return tf.reshape(tensor=tf.transpose(tensor, (([mode] + lst[:mode]) + lst[(mode + 1):])), shape=[tensor.get_shape().as_list()[mode], (- 1)])
def fold(tensor, mode, shape):
full_shape = list(shape)
mode_dim = full_shape.pop(mode)
full_shape.insert(0, mode_dim)
if (None in full_shape):
full_shape[full_shape.index(None)] = (- 1)
lst = range(1, len(full_shape))
lst.insert(mode, 0)
return tf.transpose(tf.reshape(tensor=tensor, shape=full_shape), lst)
def mode_dot(tensor, matrix, mode):
new_shape = tensor.get_shape().as_list()
if (matrix.get_shape().as_list()[1] != tensor.get_shape().as_list()[mode]):
raise ValueError("Shape error. {0}(matrix's 2nd dimension) is not as same as {1} (dimension of the tensor)".format(matrix.get_shape().as_list()[1], tensor.get_shape().as_list()[mode]))
new_shape[mode] = matrix.get_shape().as_list()[0]
res = tf.matmul(matrix, TNSR.unfold(tensor, mode))
return TNSR.fold(res, mode, new_shape)
def tucker_to_tensor(core, factors):
for (i, factor) in enumerate(factors):
core = TNSR.mode_dot(core, factor, i)
return core
def tt_to_tensor(cores):
tensor_size = []
for c in cores:
tensor_size.append(c.get_shape().as_list()[1])
md = 2
new_shape = (cores[0].get_shape().as_list()[:(- 1)] + cores[1].get_shape().as_list()[1:])
t = tf.reshape(TNSR.mode_dot(cores[0], tf.transpose(TNSR.unfold(cores[1], 0)), md), new_shape)
for i in range(1, (len(tensor_size) - 1)):
md = (md + 1)
new_shape = (t.get_shape().as_list()[:(- 1)] + cores[(i + 1)].get_shape().as_list()[1:])
t = tf.reshape(TNSR.mode_dot(t, tf.transpose(TNSR.unfold(cores[(i + 1)], 0)), md), new_shape)
return tf.reshape(t, tensor_size)
def cp_to_tensor(rank1_tnsrs):
tnsr = rank1_tnsrs[0][0]
for i in range(1, len(rank1_tnsrs[0])):
if (tf.__version__ == '0.11.0rc2'):
tnsr = tf.mul(tf.reshape(tnsr, [(- 1), 1]), tf.reshape(rank1_tnsrs[0][i], [1, (- 1)]))
else:
tnsr = tf.multiply(tf.reshape(tnsr, [(- 1), 1]), tf.reshape(rank1_tnsrs[0][i], [1, (- 1)]))
for j in range(1, len(rank1_tnsrs)):
t = rank1_tnsrs[j][0]
for k in range(1, len(rank1_tnsrs[j])):
if (tf.__version__ == '0.11.0rc2'):
t = tf.mul(tf.reshape(t, [(- 1), 1]), tf.reshape(rank1_tnsrs[j][k], [1, (- 1)]))
else:
t = tf.multiply(tf.reshape(t, [(- 1), 1]), tf.reshape(rank1_tnsrs[j][k], [1, (- 1)]))
tnsr = tf.add(tnsr, t)
return tnsr
def np_unfold(tensor, mode):
return np.moveaxis(tensor, mode, 0).reshape((tensor.shape[mode], (- 1)))
def np_fold(unfolded_tensor, mode, shape):
full_shape = list(shape)
mode_dim = full_shape.pop(mode)
full_shape.insert(0, mode_dim)
return np.moveaxis(unfolded_tensor.reshape(full_shape), 0, mode)
def np_mode_dot(tensor, matrix, mode):
new_shape = list(tensor.shape)
if (matrix.ndim == 2):
if (matrix.shape[1] != tensor.shape[mode]):
raise ValueError('shapes {0} and {1} not aligned in mode-{2} multiplication: {3} (mode {2}) != {4} (dim 1 of matrix)'.format(tensor.shape, matrix.shape, mode, tensor.shape[mode], matrix.shape[1]))
new_shape[mode] = matrix.shape[0]
res = np.dot(matrix, TNSR.np_unfold(tensor, mode))
return TNSR.np_fold(res, mode, new_shape) |
class DrawMav():
def __init__(self, state, window):
(self.mav_points, self.mav_meshColors) = self.get_points()
mav_position = np.array([[state.north], [state.east], [(- state.altitude)]])
R = Euler2Rotation(state.phi, state.theta, state.psi)
rotated_points = self.rotate_points(self.mav_points, R)
translated_points = self.translate_points(rotated_points, mav_position)
R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, (- 1)]])
translated_points = (R translated_points)
mesh = self.points_to_mesh(translated_points)
self.mav_body = gl.GLMeshItem(vertexes=mesh, vertexColors=self.mav_meshColors, drawEdges=True, smooth=False, computeNormals=False)
window.addItem(self.mav_body)
def update(self, state):
mav_position = np.array([[state.north], [state.east], [(- state.altitude)]])
R = Euler2Rotation(state.phi, state.theta, state.psi)
rotated_points = self.rotate_points(self.mav_points, R)
translated_points = self.translate_points(rotated_points, mav_position)
R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, (- 1)]])
translated_points = (R translated_points)
mesh = self.points_to_mesh(translated_points)
self.mav_body.setMeshData(vertexes=mesh, vertexColors=self.mav_meshColors)
def rotate_points(self, points, R):
rotated_points = (R points)
return rotated_points
def translate_points(self, points, translation):
translated_points = (points + np.dot(translation, np.ones([1, points.shape[1]])))
return translated_points
def get_points(self):
points = np.array([[0, 0, 0], [1, 1, 1], [1, 1, 0]]).T
scale = 20
points = (scale * points)
red = np.array([1.0, 0.0, 0.0, 1])
green = np.array([0.0, 1.0, 0.0, 1])
blue = np.array([0.0, 0.0, 1.0, 1])
yellow = np.array([1.0, 1.0, 0.0, 1])
meshColors = np.empty((13, 3, 4), dtype=np.float32)
meshColors[0] = yellow
return (points, meshColors)
def points_to_mesh(self, points):
points = points.T
mesh = np.array([[points[0], points[1], points[2]]])
return mesh |
def gcd(u, v):
from rpython.rlib.rbigint import _v_isub, _v_rshift, SHIFT
if (not u.tobool()):
return v.abs()
if (not v.tobool()):
return u.abs()
if ((v.sign == (- 1)) and (u.sign == (- 1))):
sign = (- 1)
else:
sign = 1
if ((u.size == 1) and (v.size == 1)):
result = gcd1(u.digit(0), v.digit(0))
return rbigint([result], sign, 1)
shiftu = count_trailing_zeros(u)
shiftv = count_trailing_zeros(v)
shift = min(shiftu, shiftv)
if shiftu:
u = u.rshift(shiftu, dont_invert=True)
if (u.sign == (- 1)):
u.sign = 1
else:
u = rbigint(u._digits[:], 1, u.numdigits())
if shiftv:
v = v.rshift(shiftv, dont_invert=True)
if (v.sign == (- 1)):
v.sign = 1
else:
v = rbigint(v._digits[:], 1, v.numdigits())
while True:
if ((u.size == 1) and (v.size == 1)):
digit = gcd1(u.digit(0), v.digit(0))
u = rbigint([digit], 1, 1)
break
if u.gt(v):
(u, v) = (v, u)
assert (_v_isub(v, 0, v.numdigits(), u, u.numdigits()) == 0)
v._normalize()
if (not v.tobool()):
break
rshift = count_trailing_zeros(v)
while (rshift >= SHIFT):
assert (_v_rshift(v, v, v.numdigits(), (SHIFT - 1)) == 0)
rshift -= (SHIFT - 1)
assert (_v_rshift(v, v, v.numdigits(), rshift) == 0)
v._normalize()
result = u.lshift(shift)
result.sign = sign
return result |
class Section():
def __init__(self, sectPr: CT_SectPr, document_part: DocumentPart):
super(Section, self).__init__()
self._sectPr = sectPr
self._document_part = document_part
def bottom_margin(self) -> (Length | None):
return self._sectPr.bottom_margin
_margin.setter
def bottom_margin(self, value: ((int | Length) | None)):
self._sectPr.bottom_margin = value
def different_first_page_header_footer(self) -> bool:
return self._sectPr.titlePg_val
_first_page_header_footer.setter
def different_first_page_header_footer(self, value: bool):
self._sectPr.titlePg_val = value
def even_page_footer(self) -> _Footer:
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.EVEN_PAGE)
def even_page_header(self) -> _Header:
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.EVEN_PAGE)
def first_page_footer(self) -> _Footer:
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.FIRST_PAGE)
def first_page_header(self) -> _Header:
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.FIRST_PAGE)
def footer(self) -> _Footer:
return _Footer(self._sectPr, self._document_part, WD_HEADER_FOOTER.PRIMARY)
def footer_distance(self) -> (Length | None):
return self._sectPr.footer
_distance.setter
def footer_distance(self, value: ((int | Length) | None)):
self._sectPr.footer = value
def gutter(self) -> (Length | None):
return self._sectPr.gutter
def gutter(self, value: ((int | Length) | None)):
self._sectPr.gutter = value
def header(self) -> _Header:
return _Header(self._sectPr, self._document_part, WD_HEADER_FOOTER.PRIMARY)
def header_distance(self) -> (Length | None):
return self._sectPr.header
_distance.setter
def header_distance(self, value: ((int | Length) | None)):
self._sectPr.header = value
def iter_inner_content(self) -> Iterator[(Paragraph | Table)]:
for element in self._sectPr.iter_inner_content():
(yield (Paragraph(element, self) if isinstance(element, CT_P) else Table(element, self)))
def left_margin(self) -> (Length | None):
return self._sectPr.left_margin
_margin.setter
def left_margin(self, value: ((int | Length) | None)):
self._sectPr.left_margin = value
def orientation(self) -> WD_ORIENTATION:
return self._sectPr.orientation
def orientation(self, value: (WD_ORIENTATION | None)):
self._sectPr.orientation = value
def page_height(self) -> (Length | None):
return self._sectPr.page_height
_height.setter
def page_height(self, value: (Length | None)):
self._sectPr.page_height = value
def page_width(self) -> (Length | None):
return self._sectPr.page_width
_width.setter
def page_width(self, value: (Length | None)):
self._sectPr.page_width = value
def part(self) -> StoryPart:
return self._document_part
def right_margin(self) -> (Length | None):
return self._sectPr.right_margin
_margin.setter
def right_margin(self, value: (Length | None)):
self._sectPr.right_margin = value
def start_type(self) -> WD_SECTION_START:
return self._sectPr.start_type
_type.setter
def start_type(self, value: (WD_SECTION_START | None)):
self._sectPr.start_type = value
def top_margin(self) -> (Length | None):
return self._sectPr.top_margin
_margin.setter
def top_margin(self, value: (Length | None)):
self._sectPr.top_margin = value |
def test_class_weights():
(X, Y) = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3, shuffle=False)
X = np.hstack([X, np.ones((X.shape[0], 1))])
(X, Y) = (X[:170], Y[:170])
pbl = MultiClassClf(n_features=3, n_classes=3)
svm = OneSlackSSVM(pbl, C=10)
svm.fit(X, Y)
weights = (1.0 / np.bincount(Y))
weights *= (len(weights) / np.sum(weights))
pbl_class_weight = MultiClassClf(n_features=3, n_classes=3, class_weight=weights)
svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10)
svm_class_weight.fit(X, Y)
assert_greater(f1_score(Y, svm_class_weight.predict(X), average='macro'), f1_score(Y, svm.predict(X), average='macro')) |
def test(test_episodes):
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
test_data = f.readlines()
f.close()
test_num = len(test_data)
test_data = test_data[(- test_episodes):]
print(len(test_data))
success = 0
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, ('models/policy_supervised_' + relation))
print('Model reloaded')
for episode in range(len(test_data)):
print(('Test sample %d: %s' % (episode, test_data[episode][:(- 1)])))
env = Env(dataPath, test_data[episode])
sample = test_data[episode].split()
state_idx = [env.entity2id_[sample[0]], env.entity2id_[sample[1]], 0]
for t in count():
state_vec = env.idx_state(state_idx)
action_probs = policy_nn.predict(state_vec)
action_chosen = np.random.choice(np.arange(action_space), p=np.squeeze(action_probs))
(reward, new_state, done) = env.interact(state_idx, action_chosen)
if (done or (t == max_steps_test)):
if done:
print('Success')
success += 1
print('Episode ends\n')
break
state_idx = new_state
print('Success persentage:', (success / test_episodes)) |
class UtilsSplitStripTest(TestCase):
def test_empty(self):
split = tag_utils.split_strip(None)
self.assertEqual(split, [])
split = tag_utils.split_strip('')
self.assertEqual(split, [])
def test_spaceless(self):
split = tag_utils.split_strip('adam,brian')
self.assertEqual(len(split), 2)
self.assertEqual(split[0], 'adam')
self.assertEqual(split[1], 'brian')
def test_spaced(self):
split = tag_utils.split_strip(' adam , brian ')
self.assertEqual(len(split), 2)
self.assertEqual(split[0], 'adam')
self.assertEqual(split[1], 'brian') |
class ClassifiersFactory(AbstractFactory):
def add_classifier(self, name, classifier):
if isinstance(classifier, Classifier):
self[name] = classifier
elif (isinstance(classifier, BaseEstimator) and isinstance(classifier, ClassifierMixin)):
self[name] = SklearnClassifier(classifier)
else:
raise NotImplementedError('Supports only instances of sklearn.base.BaseEstimator or rep.estimators.interface.Classifier')
def predict(self, X, parallel_profile=None):
return self._predict_method(X, parallel_profile=parallel_profile, prediction_type='classification')
def predict_proba(self, X, parallel_profile=None):
return self._predict_method(X, parallel_profile=parallel_profile, prediction_type='classification-proba')
def _predict_method(self, X, parallel_profile=None, prediction_type='classification'):
predictions = OrderedDict()
start_time = time.time()
result = utils.map_on_cluster(parallel_profile, predict_estimator, list(self.keys()), list(self.values()), ([X] * len(self)), ([prediction_type] * len(self)))
for (status, data) in result:
if (status == 'success'):
(name, prob, spent_time) = data
predictions[name] = prob
print('data was predicted by {:12} in {:.2f} seconds'.format(name, spent_time))
else:
print('Problem while predicting on the node, report:\n', data)
print('Totally spent {:.2f} seconds on prediction'.format((time.time() - start_time)))
return predictions
def staged_predict_proba(self, X):
generators_dict = OrderedDict()
for (name, classifier) in self.items():
try:
generators_dict[name] = classifier.staged_predict_proba(X)
except AttributeError:
pass
return generators_dict
def test_on_lds(self, lds):
return classification.ClassificationReport(self, lds) |
class Accumulator():
def __init__(self):
self._embeddings = []
self._filled = False
def state(self) -> Dict[(str, torch.Tensor)]:
return {'embeddings': self.embeddings}
def filled(self) -> bool:
return self._filled
def set_filled(self):
self._filled = True
def embeddings(self):
return (torch.cat(self._embeddings) if len(self._embeddings) else torch.Tensor())
def update(self, **kwargs) -> None:
raise NotImplementedError()
def reset(self):
self._embeddings = []
self._filled = False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.