code stringlengths 281 23.7M |
|---|
def dataloader():
class DataLoader():
def __init__(self, batch_size: int):
self.batch_size = batch_size
def __iter__(self):
dummy_input = np.random.rand(1, 3, 32, 32).astype(np.float32)
(yield dummy_input)
def __len__(self):
return 4
dummy_dataloader = DataLoader(batch_size=2)
return dummy_dataloader |
class ZipReader(BaseReader):
reader_cache = dict()
def open(path):
zip_files = ZipReader.reader_cache
if (path not in zip_files):
zip_files[path] = zipfile.ZipFile(path, 'r')
return zip_files[path]
def close(path):
zip_files = ZipReader.reader_cache
zip_file = zip_files.pop(path, None)
if (zip_file is not None):
zip_file.close()
def open_anno_file(path, anno_filename=None):
zip_file = ZipReader.open(path)
if (not anno_filename):
return None
if (anno_filename not in zip_file.namelist()):
return None
return zip_file.open(anno_filename, 'r')
def _get_file_list(path):
zip_file = ZipReader.open(path)
return [info.filename for info in zip_file.infolist() if (not info.is_dir())]
def fetch_file(path, filename):
zip_file = ZipReader.open(path)
return zip_file.read(filename) |
('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context: Context, margin_side: str, inches: str):
prop_name = {'left': 'left_margin', 'right': 'right_margin', 'top': 'top_margin', 'bottom': 'bottom_margin', 'gutter': 'gutter', 'header': 'header_distance', 'footer': 'footer_distance'}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert (actual_value == expected_value) |
def is_sat(formula, solver_name=None, logic=None, portfolio=None):
env = get_env()
if (formula not in env.formula_manager):
warnings.warn('Warning: Contextualizing formula during is_sat')
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula, solver_name=solver_name, logic=logic, portfolio=portfolio) |
class TestLayerSelector(unittest.TestCase):
def test_select_all_conv_layers(self):
if (version.parse(tf.version.VERSION) >= version.parse('2.00')):
tf.keras.backend.clear_session()
model = get_model()
conv1_op = model.layers[1]
conv2_op = model.layers[2]
conv3_op = model.layers[3]
matmul1_op = model.layers[5]
conv1_op_output_shape = conv1_op.output_shape
conv2_op_output_shape = conv2_op.output_shape
conv3_op_output_shape = conv3_op.output_shape
matmul1_op_output_shape = matmul1_op.output_shape
layer1 = Layer(conv1_op, conv1_op.name, output_shape=conv1_op_output_shape)
layer2 = Layer(conv2_op, conv2_op.name, output_shape=conv2_op_output_shape)
layer3 = Layer(conv3_op, conv3_op.name, output_shape=conv3_op_output_shape)
layer4 = Layer(matmul1_op, matmul1_op.name, output_shape=matmul1_op_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2, layer3, layer4]
layer_selector = ConvNoDepthwiseLayerSelector()
layer_selector.select(layer_db, [])
layer_db.mark_picked_layers.assert_called_once_with([layer1, layer2])
layer_db.mark_picked_layers.reset_mock()
layer_selector.select(layer_db, [layer2.module])
layer_db.mark_picked_layers.assert_called_once_with([layer1])
def test_select_all_conv_and_fc_layers(self):
if (version.parse(tf.version.VERSION) >= version.parse('2.00')):
tf.keras.backend.clear_session()
model = get_model()
conv1_op = model.layers[1]
conv2_op = model.layers[2]
conv3_op = model.layers[3]
matmul1_op = model.layers[5]
conv1_op_output_shape = conv1_op.output_shape
conv2_op_output_shape = conv2_op.output_shape
conv3_op_output_shape = conv3_op.output_shape
matmul1_op_output_shape = matmul1_op.output_shape
layer1 = Layer(conv1_op, conv1_op.name, output_shape=conv1_op_output_shape)
layer2 = Layer(conv2_op, conv2_op.name, output_shape=conv2_op_output_shape)
layer3 = Layer(conv3_op, conv3_op.name, output_shape=conv3_op_output_shape)
layer4 = Layer(matmul1_op, matmul1_op.name, output_shape=matmul1_op_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2, layer3, layer4]
layer_selector = ConvFcLayerSelector()
layer_selector.select(layer_db, [])
layer_db.mark_picked_layers.assert_called_once_with([layer1, layer2, layer4])
layer_db.mark_picked_layers.reset_mock()
layer_selector.select(layer_db, [layer2.module])
layer_db.mark_picked_layers.assert_called_once_with([layer1, layer4]) |
.parametrize('perturb_prob', [1.0, pytest.param(0.0, marks=pytest.mark.xfail)])
def test_perturbation_is_applied(perturb_prob: float, dmg: LocalDataManager, cfg: dict, zarr_dataset: ChunkedDataset) -> None:
rasterizer = build_rasterizer(cfg, dmg)
dataset = EgoDataset(cfg, zarr_dataset, rasterizer, None)
data_no_perturb = dataset[0]
perturb = AckermanPerturbation(ReplayRandomGenerator(np.asarray([[4.0, 1.0, 0.33]])), perturb_prob=perturb_prob)
dataset = EgoDataset(cfg, zarr_dataset, rasterizer, perturb)
data_perturb = dataset[0]
assert (np.linalg.norm((data_no_perturb['target_positions'] - data_perturb['target_positions'])) > 0)
assert (np.linalg.norm((data_no_perturb['target_yaws'] - data_perturb['target_yaws'])) > 0) |
class WriteToConn():
def __init__(self, server: IPCBase, output_key: str='stdout') -> None:
self.server = server
self.output_key = output_key
def write(self, output: str) -> int:
resp: dict[(str, Any)] = {}
resp[self.output_key] = output
send(self.server, resp)
return len(output)
def writelines(self, lines: Iterable[str]) -> None:
for s in lines:
self.write(s) |
def next_start_segment(str, is_segment):
str = ''.join(str)
result = []
for start in mark_start_segment_index(str, is_segment):
result[len(result):start] = [start for x in range((start - len(result)))]
result[len(result):len(str)] = [len(str) for x in range(((len(str) - len(result)) + 1))]
return result |
def main():
(fic_ids, csv_out, headers, restart, is_csv, only_first_chap, lang, include_bookmarks, metadata_only) = get_args()
os.chdir(os.getcwd())
output_directory = os.path.dirname(csv_out)
print(output_directory)
if (output_directory and (not os.path.isdir(output_directory))):
print(('Creating output directory ' + output_directory))
os.mkdir(output_directory)
with open(csv_out, 'a', newline='') as f_out:
writer = csv.writer(f_out)
with open(os.path.join(os.path.dirname(csv_out), ('errors_' + os.path.basename(csv_out))), 'a', newline='') as e_out:
errorwriter = csv.writer(e_out)
if (os.stat(csv_out).st_size == 0):
print('Writing a header row for the csv.')
header = ['work_id', 'title', 'author', 'rating', 'category', 'fandom', 'relationship', 'character', 'additional tags', 'language', 'published', 'status', 'status date', 'words', 'chapters', 'comments', 'kudos', 'bookmarks', 'hits', 'all_kudos', 'all_bookmarks', 'body']
writer.writerow(header)
if is_csv:
csv_fname = fic_ids[0]
with open(csv_fname, 'r+', newline='') as f_in:
reader = csv.reader(f_in)
if (restart == ''):
for row in reader:
if (not row):
continue
write_fic_to_csv(row[0], only_first_chap, lang, include_bookmarks, metadata_only, writer, errorwriter, headers)
time.sleep(delay)
else:
found_restart = False
for row in reader:
if (not row):
continue
found_restart = process_id(row[0], restart, found_restart)
if found_restart:
write_fic_to_csv(row[0], only_first_chap, lang, include_bookmarks, metadata_only, writer, errorwriter, headers)
time.sleep(delay)
else:
print('Skipping already processed fic')
else:
for fic_id in fic_ids:
write_fic_to_csv(fic_id, only_first_chap, lang, include_bookmarks, metadata_only, writer, errorwriter, headers)
time.sleep(delay) |
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert (shared_memory is not None)
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
(command, data) = pipe.recv()
if (command == 'reset'):
observation = env.reset()
write_to_shared_memory(index, observation, shared_memory, observation_space)
pipe.send((None, True))
elif (command == 'step'):
(observation, reward, done, info) = env.step(data)
write_to_shared_memory(index, observation, shared_memory, observation_space)
pipe.send(((None, reward, done, info), True))
elif (command == 'seed'):
env.seed(data)
pipe.send((None, True))
elif (command == 'close'):
pipe.send((None, True))
break
elif (command == '_call'):
(name, args, kwargs) = data
if (name in ['reset', 'step', 'seed', 'close']):
raise ValueError(f'Trying to call function `{name}` with `_call`. Use `{name}` directly instead.')
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif (command == '_setattr'):
(name, value) = data
setattr(env, name, value)
pipe.send((None, True))
elif (command == '_check_observation_space'):
pipe.send(((data == observation_space), True))
else:
raise RuntimeError('Received unknown command `{0}`. Must be one of {`reset`, `step`, `seed`, `close`, `_check_observation_space`}.'.format(command))
except (KeyboardInterrupt, Exception):
error_queue.put(((index,) + sys.exc_info()[:2]))
pipe.send((None, False))
finally:
env.close() |
class CoordAtt(nn.Module):
def __init__(self, inp, oup, reduction=32):
super(CoordAtt, self).__init__()
self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
self.pool_w = nn.AdaptiveAvgPool2d((1, None))
mip = max(8, (inp // reduction))
self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(mip)
self.act = h_swish()
self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
def forward(self, x):
identity = x
(n, c, h, w) = x.size()
x_h = self.pool_h(x)
x_w = self.pool_w(x).permute(0, 1, 3, 2)
y = torch.cat([x_h, x_w], dim=2)
y = self.conv1(y)
y = self.bn1(y)
y = self.act(y)
(x_h, x_w) = torch.split(y, [h, w], dim=2)
x_w = x_w.permute(0, 1, 3, 2)
a_h = self.conv_h(x_h).sigmoid()
a_w = self.conv_w(x_w).sigmoid()
out = ((identity * a_w) * a_h)
return out |
def skipgram_cmn(filename, min_cnt, max_vocab, n_embedding, n_window, word_list=None, word_level=True):
n_worker = multiprocessing.cpu_count()
logger.info(("This machine has %d processors. We'll use %d of them" % (n_worker, n_worker)))
model = gensim.models.Word2Vec(min_count=min_cnt, workers=n_worker, size=n_embedding, window=n_window, max_vocab_size=((max_vocab * 3) if (max_vocab is not None) else None), sg=1, negative=7)
model.build_vocab(sentence_cmn(filename, word_level))
for _ in xrange(7):
model.train(sentence_cmn(filename, word_level))
if (word_list is None):
idx2word = [w for w in model.index2word if (w != u'<unk>')]
if (max_vocab is not None):
idx2word = idx2word[:(max_vocab - 1)]
idx2word.append(u'<unk>')
else:
with codecs.open(word_list, 'rb', 'utf8') as fp:
idx2word = [line.strip().split()[0] for line in fp]
word_vector = numpy.ndarray((len(idx2word), model.layer1_size), numpy.float32)
for i in xrange((len(idx2word) - 1)):
word_vector[i] = model[idx2word[i]]
word_vector[(len(idx2word) - 1)] = word_vector[:(len(idx2word) - 1)].mean(0)
return (word_vector, idx2word) |
def generate_from_asin_reg(docs: List[List[str]], samples_per_asin=3):
negative_pairs = []
for doc in docs:
if (((len(doc) * (len(doc) - 1)) / 2) < samples_per_asin):
continue
pairs = utils.Rnd.random_pairs(doc, samples_per_asin)
for pair in pairs:
negative_pairs.append(tuple(pair))
return negative_pairs |
class EsmTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, **kwargs):
super().__init__(**kwargs)
self.all_tokens = load_vocab_file(vocab_file)
self._id_to_token = dict(enumerate(self.all_tokens))
self._token_to_id = {tok: ind for (ind, tok) in enumerate(self.all_tokens)}
self.unk_token = '<unk>'
self.cls_token = '<cls>'
self.pad_token = '<pad>'
self.mask_token = '<mask>'
self.eos_token = '<eos>'
self.unique_no_split_tokens = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def _convert_id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def _convert_token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def _tokenize(self, text, **kwargs):
return text.split()
def get_vocab_size(self, with_added_tokens=False):
return len(self._id_to_token)
def get_vocab(self):
return {token: i for (i, token) in enumerate(self.all_tokens)}
def token_to_id(self, token: str) -> int:
return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
def id_to_token(self, index: int) -> str:
return self._id_to_token.get(index, self.unk_token)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.eos_token_id])
cls = [self.cls_token_id]
sep = [self.eos_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [(1 if (token in self.all_special_ids) else 0) for token in token_ids_0]
mask = (([1] + ([0] * len(token_ids_0))) + [1])
if (token_ids_1 is not None):
mask += (([0] * len(token_ids_1)) + [1])
return mask
def save_vocabulary(self, save_directory, filename_prefix):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + 'vocab.txt'))
with open(vocab_file, 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
def vocab_size(self) -> int:
return self.get_vocab_size(with_added_tokens=False)
def _add_tokens(self, new_tokens: Union[(List[str], List[AddedToken])], special_tokens: bool=False) -> int:
return super()._add_tokens(new_tokens, special_tokens=True) |
class SbmlExporter(Exporter):
def __init__(self, *args, **kwargs):
if (not libsbml):
raise ImportError('The SbmlExporter requires the libsbml python package')
super(SbmlExporter, self).__init__(*args, **kwargs)
def _sympy_to_sbmlast(self, sympy_expr):
return _xml_to_ast(MathMLContentPrinter().to_xml(sympy_expr))
def convert(self, level=(3, 2)):
doc = libsbml.SBMLDocument(3, 2)
smodel = doc.createModel()
_check(smodel)
_check(smodel.setName(self.model.name))
pysb.bng.generate_equations(self.model)
if self.docstring:
notes_str = ('\n <notes>\n <body xmlns=" <p>%s</p>\n </body>\n </notes>' % self.docstring.replace('\n', ('<br />\n' + (' ' * 20))))
_check(smodel.setNotes(notes_str))
if self.model.compartments:
for cpt in self.model.compartments:
c = smodel.createCompartment()
_check(c)
_check(c.setId(cpt.name))
_check(c.setSpatialDimensions(cpt.dimension))
_check(c.setSize((1 if (cpt.size is None) else cpt.size.value)))
_check(c.setConstant(True))
else:
c = smodel.createCompartment()
_check(c)
_check(c.setId('default'))
_check(c.setSpatialDimensions(3))
_check(c.setSize(1))
_check(c.setConstant(True))
for expr in itertools.chain(self.model.expressions_constant(), self.model.expressions_dynamic(include_local=False), self.model._derived_expressions):
e = smodel.createParameter()
_check(e)
_check(e.setId(expr.name))
_check(e.setName(expr.name))
_check(e.setConstant(False))
expr_rule = smodel.createAssignmentRule()
_check(expr_rule)
_check(expr_rule.setVariable(e.getId()))
expr_mathml = self._sympy_to_sbmlast(expr.expand_expr(expand_observables=True))
_check(expr_rule.setMath(expr_mathml))
fixed_species_idx = set()
initial_species_idx = set()
for ic in self.model.initials:
sp_idx = self.model.get_species_index(ic.pattern)
ia = smodel.createInitialAssignment()
_check(ia)
_check(ia.setSymbol('__s{}'.format(sp_idx)))
init_mathml = self._sympy_to_sbmlast(Symbol(ic.value.name))
_check(ia.setMath(init_mathml))
initial_species_idx.add(sp_idx)
if ic.fixed:
fixed_species_idx.add(sp_idx)
for (i, s) in enumerate(self.model.species):
sp = smodel.createSpecies()
_check(sp)
_check(sp.setId('__s{}'.format(i)))
if self.model.compartments:
mon_cpt = set((mp.compartment for mp in s.monomer_patterns if (mp.compartment is not None)))
if ((len(mon_cpt) == 0) and s.compartment):
compartment_name = s.compartment_name
elif (len(mon_cpt) == 1):
mon_cpt = mon_cpt.pop()
if ((s.compartment is not None) and (mon_cpt != s.compartment)):
raise ValueError('Species {} has different monomer and species compartments, which is not supported in SBML'.format(s))
compartment_name = mon_cpt.name
else:
raise ValueError('Species {} has more than one different monomer compartment, which is not supported in SBML'.format(s))
else:
compartment_name = 'default'
_check(sp.setCompartment(compartment_name))
_check(sp.setName(str(s).replace('% ', '._br_')))
_check(sp.setBoundaryCondition((i in fixed_species_idx)))
_check(sp.setConstant(False))
_check(sp.setHasOnlySubstanceUnits(True))
if (i not in initial_species_idx):
_check(sp.setInitialAmount(0.0))
for param in itertools.chain(self.model.parameters, self.model._derived_parameters):
p = smodel.createParameter()
_check(p)
_check(p.setId(param.name))
_check(p.setName(param.name))
_check(p.setValue(param.value))
_check(p.setConstant(True))
for (i, reaction) in enumerate(self.model.reactions_bidirectional):
rxn = smodel.createReaction()
_check(rxn)
_check(rxn.setId('r{}'.format(i)))
_check(rxn.setName(' + '.join(reaction['rule'])))
_check(rxn.setReversible(reaction['reversible']))
for sp in reaction['reactants']:
reac = rxn.createReactant()
_check(reac)
_check(reac.setSpecies('__s{}'.format(sp)))
_check(reac.setConstant(True))
for sp in reaction['products']:
prd = rxn.createProduct()
_check(prd)
_check(prd.setSpecies('__s{}'.format(sp)))
_check(prd.setConstant(True))
for symbol in reaction['rate'].free_symbols:
if isinstance(symbol, pysb.Expression):
expr = symbol.expand_expr(expand_observables=True)
for sym in expr.free_symbols:
if (not isinstance(sym, (pysb.Parameter, pysb.Expression))):
modifier = rxn.createModifier()
_check(modifier)
_check(modifier.setSpecies(str(sym)))
rate = rxn.createKineticLaw()
_check(rate)
rate_mathml = self._sympy_to_sbmlast(reaction['rate'])
_check(rate.setMath(rate_mathml))
for (i, observable) in enumerate(self.model.observables):
obs = smodel.createParameter()
_check(obs)
_check(obs.setId('__obs{}'.format(i)))
_check(obs.setName(observable.name))
_check(obs.setConstant(False))
obs_rule = smodel.createAssignmentRule()
_check(obs_rule)
_check(obs_rule.setVariable(obs.getId()))
obs_mathml = self._sympy_to_sbmlast(observable.expand_obs())
_check(obs_rule.setMath(obs_mathml))
if (level != (3, 2)):
prop = libsbml.ConversionProperties(libsbml.SBMLNamespaces(*level))
prop.addOption('strict', False)
prop.addOption('setLevelAndVersion', True)
prop.addOption('ignorePackages', True)
_check(doc.convert(prop))
return doc
def export(self, level=(3, 2)):
return libsbml.writeSBMLToString(self.convert(level=level)) |
class Templates(object):
def __init__(self, templates=[], finalized=False):
self.templates = templates
self.template_id = len(templates)
self.finalized = finalized
def from_pickle(cls, path):
templates = read_pickle(path)
return cls(templates=templates, finalized=True)
def add_template(self, utterance, dialogue_state):
raise NotImplementedError
def finalize(self):
self.templates = pd.DataFrame(self.templates)
self.score_templates()
self.finalized = True
def save(self, output):
assert self.finalized
write_pickle(self.templates, output)
def score_templates(self):
sequences = [s.split() for s in self.templates.template.values]
vocab = build_vocabulary(1, *sequences)
counter = count_ngrams(3, vocab, sequences, pad_left=True, pad_right=False)
model = MLENgramModel(counter)
scores = [(((- 1.0) * model.entropy(s)) * len(s)) for s in sequences]
if (not ('logp' in self.templates.columns)):
self.templates.insert(0, 'logp', 0)
self.templates['logp'] = scores |
class SUMMA_CrossEntropy(torch.autograd.Function):
def forward(ctx, _vocab_parallel_logits, target, vocab_start, vocab_end):
logits_max = torch.max(_vocab_parallel_logits, dim=(- 1))[0]
torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=get_summa_row_group())
vocab_parallel_logits = (_vocab_parallel_logits - logits_max.unsqueeze(dim=(- 1)))
target_mask = ((target < vocab_start) | (target >= vocab_end))
masked_target = (target.clone() - vocab_start)
masked_target[target_mask] = 0
logits_2d = vocab_parallel_logits.view((- 1), (vocab_end - vocab_start))
masked_target_1d = masked_target.view((- 1))
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[(arange_1d, masked_target_1d)]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
torch.distributed.all_reduce(predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_summa_row_group())
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=(- 1))
torch.distributed.all_reduce(sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_summa_row_group())
loss = (torch.log(sum_exp_logits) - predicted_logits)
exp_logits.div_(sum_exp_logits.unsqueeze(dim=(- 1)))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
def backward(ctx, output_grad):
(softmax, target_mask, masked_target_1d) = ctx.saved_tensors
grad_input = softmax
partition_vocab_size = softmax.size()[(- 1)]
grad_2d = grad_input.view((- 1), partition_vocab_size)
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
grad_2d[(arange_1d, masked_target_1d)] -= (1.0 - target_mask.view((- 1)).float())
grad_input.mul_(output_grad.unsqueeze(dim=(- 1)))
return (grad_input, None, None, None) |
def _convert_cropping(inexpr, keras_layer, _):
_check_data_format(keras_layer)
crop_type = type(keras_layer).__name__
if (crop_type == 'Cropping2D'):
(_, in_h, in_w, _) = keras_layer.input_shape
((crop_t, crop_b), (crop_l, crop_r)) = keras_layer.cropping
else:
raise tvm.error.OpNotImplemented('Operator {} is not supported for frontend Keras.'.format(crop_type))
int32_max = np.iinfo(np.int32).max
return _op.strided_slice(inexpr, begin=[0, 0, crop_t, crop_l], end=[int32_max, int32_max, (in_h - crop_b), (in_w - crop_r)]) |
_ordering
class Ticker(metaclass=ABCMeta):
def __init__(self, ticker: str, security_type: SecurityType, point_value: int):
self.ticker = ticker
self.security_type = security_type
self.point_value = point_value
self._name = ticker
self.logger = qf_logger.getChild(self.__class__.__name__)
def __str__(self):
return "{}('{}')".format(self.__class__.__name__, self.ticker)
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, self.ticker)
def as_string(self) -> str:
return self.ticker
def name(self) -> str:
return self._name
def set_name(self, name: str):
self._name = name
def from_string(self, ticker_str: Union[(str, Sequence[str])]) -> Union[('Ticker', Sequence['Ticker'])]:
pass
def __eq__(self, other):
return ((self is other) or (isinstance(self, type(other)) and (self.ticker == other.ticker)))
def __lt__(self, other):
if (not isinstance(other, Ticker)):
raise TypeError('Cannot compare this object with a Ticker')
class_name = self.__class__.__name__
other_class_name = other.__class__.__name__
return ((class_name, self.ticker) < (other_class_name, other.ticker))
def __hash__(self):
return hash((self.ticker, type(self)))
def __getstate__(self):
self.logger = None
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
self.logger = qf_logger.getChild(self.__class__.__name__) |
def Gtrain(train_loader, model, optimizer, criterion=nn.MSELoss()):
model.train()
loss_all = 0
criterion = criterion
for data in train_loader:
data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index, data.edge_attr, data.batch)
loss = criterion(out, data.y)
loss.backward()
loss_all += (loss.item() * data.num_graphs)
optimizer.step()
return (loss_all / len(train_loader.dataset)) |
class Effect7058(BaseEffect):
runTime = 'early'
type = ('projected', 'passive', 'gang')
def handler(fit, beacon, context, projectionRange, **kwargs):
for x in range(1, 3):
if beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x)):
value = beacon.getModifiedItemAttr('warfareBuff{}Value'.format(x))
id = beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x))
if id:
fit.addCommandBonus(id, value, beacon, kwargs['effect'], 'early') |
class BPRMF(object):
def __init__(self, data_config, pretrain_data, args):
self.model_type = 'mf'
self.pretrain_data = pretrain_data
self.n_users = data_config['n_users']
self.n_items = data_config['n_items']
self.lr = args.lr
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
self.regs = eval(args.regs)
self.verbose = args.verbose
self.users = tf.placeholder(tf.int32, shape=[None], name='users')
self.pos_items = tf.placeholder(tf.int32, shape=[None], name='pos_items')
self.neg_items = tf.placeholder(tf.int32, shape=[None], name='neg_items')
self.weights = self._init_weights()
u_e = tf.nn.embedding_lookup(self.weights['user_embedding'], self.users)
pos_i_e = tf.nn.embedding_lookup(self.weights['item_embedding'], self.pos_items)
neg_i_e = tf.nn.embedding_lookup(self.weights['item_embedding'], self.neg_items)
self.batch_predictions = tf.matmul(u_e, pos_i_e, transpose_a=False, transpose_b=True)
(self.base_loss, self.reg_loss) = self._create_bpr_loss(u_e, pos_i_e, neg_i_e)
self.kge_loss = tf.constant(0.0, tf.float32, [1])
self.loss = ((self.base_loss + self.kge_loss) + self.reg_loss)
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
self._statistics_params()
def _init_weights(self):
all_weights = dict()
initializer = tf.contrib.layers.xavier_initializer()
if (self.pretrain_data is None):
all_weights['user_embedding'] = tf.Variable(initializer([self.n_users, self.emb_dim]), name='user_embedding')
all_weights['item_embedding'] = tf.Variable(initializer([self.n_items, self.emb_dim]), name='item_embedding')
print('using xavier initialization')
else:
all_weights['user_embedding'] = tf.Variable(initial_value=self.pretrain_data['user_embed'], trainable=True, name='user_embedding', dtype=tf.float32)
all_weights['item_embedding'] = tf.Variable(initial_value=self.pretrain_data['item_embed'], trainable=True, name='item_embedding', dtype=tf.float32)
print('using pretrained initialization')
return all_weights
def _create_bpr_loss(self, users, pos_items, neg_items):
pos_scores = tf.reduce_sum(tf.multiply(users, pos_items), axis=1)
neg_scores = tf.reduce_sum(tf.multiply(users, neg_items), axis=1)
regularizer = ((tf.nn.l2_loss(users) + tf.nn.l2_loss(pos_items)) + tf.nn.l2_loss(neg_items))
maxi = tf.log(tf.nn.sigmoid((pos_scores - neg_scores)))
mf_loss = tf.negative(tf.reduce_mean(maxi))
reg_loss = (self.regs[0] * regularizer)
return (mf_loss, reg_loss)
def _statistics_params(self):
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if (self.verbose > 0):
print(('#params: %d' % total_parameters))
def train(self, sess, feed_dict):
return sess.run([self.opt, self.loss, self.base_loss, self.kge_loss, self.reg_loss], feed_dict)
def eval(self, sess, feed_dict):
batch_predictions = sess.run(self.batch_predictions, feed_dict)
return batch_predictions |
((simple_typed_classes(min_attrs=1) | simple_typed_dataclasses(min_attrs=1)), data())
def test_renaming(cl_and_vals, data):
converter = Converter()
(cl, vals, kwargs) = cl_and_vals
attrs = fields(cl)
to_replace = data.draw(sampled_from(attrs))
u_fn = make_dict_unstructure_fn(cl, converter, **{to_replace.name: override(rename='class')})
s_fn = make_dict_structure_fn(cl, converter, **{to_replace.name: override(rename='class')})
assert (s_fn.overrides == {to_replace.name: override(rename='class')})
converter.register_structure_hook(cl, s_fn)
converter.register_unstructure_hook(cl, u_fn)
inst = cl(*vals, **kwargs)
raw = converter.unstructure(inst)
assert ('class' in raw)
new_inst = converter.structure(raw, cl)
assert (inst == new_inst) |
class OpenTests():
def test_open_binary(self):
target = (resources.files(self.data) / 'binary.file')
with target.open('rb') as fp:
result = fp.read()
self.assertEqual(result, b'\x00\x01\x02\x03')
def test_open_text_default_encoding(self):
target = (resources.files(self.data) / 'utf-8.file')
with target.open() as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_open_text_given_encoding(self):
target = (resources.files(self.data) / 'utf-16.file')
with target.open(encoding='utf-16', errors='strict') as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_open_text_with_errors(self):
target = (resources.files(self.data) / 'utf-16.file')
with target.open(encoding='utf-8', errors='strict') as fp:
self.assertRaises(UnicodeError, fp.read)
with target.open(encoding='utf-8', errors='ignore') as fp:
result = fp.read()
self.assertEqual(result, 'H\x00e\x00l\x00l\x00o\x00,\x00 \x00U\x00T\x00F\x00-\x001\x006\x00 \x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00')
def test_open_binary_FileNotFoundError(self):
target = (resources.files(self.data) / 'does-not-exist')
self.assertRaises(FileNotFoundError, target.open, 'rb')
def test_open_text_FileNotFoundError(self):
target = (resources.files(self.data) / 'does-not-exist')
self.assertRaises(FileNotFoundError, target.open) |
def split_schrodinger_graph_potentials(schrodinger_result, trim_levels_beyond=0.01, linewidth=1, scale=0.3, suppress_invert=False, probability_density=False, wfalpha=0.8, potentialalpha=0.8, **kwargs):
defaults = {'step': 0.002, 'margin': 0.02, 'pdf': False, 'show': False, 'dpi': 100, 'fontsize': 12, 'figsize': (7, 6)}
options = copy.copy(defaults)
options['square'] = False
defaults.update(kwargs)
potentials = schrodinger_result['potentials']
wavefunctions = schrodinger_result['wavefunctions']
energy_levels = schrodinger_result['E']
x = schrodinger_result['x']
if ('EU' in energy_levels.keys()):
energy_levels['Ehh'] = energy_levels['EU']
energy_levels['Elh'] = energy_levels['EU']
wavefunctions['psi_hh'] = wavefunctions['psi_g1']
wavefunctions['psi_lh'] = wavefunctions['psi_g2']
(fig, (ax1, ax2)) = plt.subplots(nrows=2, ncols=1, figsize=defaults['figsize'], dpi=defaults['dpi'])
ax1.plot((x * .0), (potentials['Ve'] / q), 'k', linewidth=2, label='Ve')
ax1.set_ylabel('Energy (eV)', fontsize=defaults['fontsize'])
ax1.tick_params(labelsize=defaults['fontsize'])
ax1.grid(color='grey', linestyle='--', linewidth=0.5)
(ax2.plot((x * .0), (potentials['Vlh'] / q), 'k--', linewidth=2, label='Vlh'),)
ax2.plot((x * .0), (potentials['Vhh'] / q), 'k', linewidth=2, label='Vhh')
ax2.set_ylabel('Energy (eV)', fontsize=defaults['fontsize'])
ax2.set_xlabel('Possition (nm)', fontsize=defaults['fontsize'])
ax2.tick_params(labelsize=defaults['fontsize'])
ax2.grid(color='grey', linestyle='--', linewidth=0.5)
e_data = prepare_wavefunction_data_only(x, (energy_levels['Ee'] / q), wavefunctions['psi_e'], trim_levels_beyond, linewidth, 'blue', (0.03 / q), suppress_invert, alpha=wfalpha, square=probability_density)
hh_data = prepare_wavefunction_data_only(x, (energy_levels['Ehh'] / q), wavefunctions['psi_hh'], trim_levels_beyond, linewidth, 'green', (0.03 / q), suppress_invert, alpha=wfalpha, square=probability_density)
lh_data = prepare_wavefunction_data_only(x, (energy_levels['Elh'] / q), wavefunctions['psi_lh'], trim_levels_beyond, linewidth, 'red', (0.03 / q), suppress_invert, alpha=wfalpha, square=probability_density)
for (x, y) in e_data:
ax1.plot((x * .0), y, 'blue', linewidth=2, label='e')
for (x, y) in hh_data:
ax2.plot((x * .0), y, 'green', linewidth=2, label='hh')
for (x, y) in lh_data:
ax2.plot((x * .0), y, 'red', linewidth=2, label='lh')
if defaults['show']:
plt.tight_layout()
plt.show()
if defaults['pdf']:
(handle, path) = tempfile.mkstemp(prefix='tmp_solcore_', suffix=('.%s' % graph_defaults['format']))
canvas = FigureCanvasPdf(fig)
canvas.print_figure(path, dpi=defaults['dpi'], bbox_inches='tight')
open_with_os(path) |
class TestArchiveOffers(TestCase):
def setUp(self):
self.out = io.StringIO()
self.err = io.StringIO()
def test_archive_offers_errors(self):
with self.assertRaises(management.CommandError):
management.call_command('archive_offers', '-s', 'not-valid-date', stdout=self.out, stderr=self.err)
with self.assertRaises(management.CommandError):
management.call_command('archive_offers', '-o', '/tmp/does/not/exist/', stdout=self.out, stderr=self.err)
('django.db.connections')
def test_archive_offers(self, conn_mock):
management.call_command('archive_offers', stdout=self.out, stderr=self.err)
output = self.out.getvalue()
self.assertTrue(('Successfully archived' in output))
self.assertTrue(('Skipping copying offers' in output))
self.assertFalse(('Skipping deleting archived offers' in output))
management.call_command('archive_offers', '-d', stdout=self.out, stderr=self.err)
output = self.out.getvalue()
self.assertTrue(('Skipping deleting archived offers' in output))
_settings(BACKUPS_STORAGE='django.core.files.storage.FileSystemStorage')
('django.db.connections')
def test_archive_offers_storage(self, conn_mock):
management.call_command('archive_offers', '-d', stdout=self.out, stderr=self.err)
output = self.out.getvalue()
self.assertFalse(('Skipping deleting archived offers' in output))
self.assertTrue(('Copying offers' in output))
self.assertTrue(('Successfully copied' in output))
self.assertTrue(('Deleting archived offers' in output))
self.assertTrue(('Updating database statistics' in output))
management.call_command('archive_offers', '-d', stdout=self.out, stderr=self.err)
output = self.out.getvalue()
self.assertTrue(('already exists in backups' in output)) |
def save_weights(G, D, M, state_dict, weights_root, experiment_name, name_suffix=None, G_ema=None):
root = '/'.join([weights_root, experiment_name])
if (not os.path.exists(root)):
os.mkdir(root)
if name_suffix:
print(('Saving weights to %s/%s...' % (root, name_suffix)))
else:
print(('Saving weights to %s...' % root))
torch.save(G.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))))
torch.save(G.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
torch.save(D.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))))
torch.save(D.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
torch.save(M.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['M', name_suffix]))))
torch.save(M.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['M_optim', name_suffix]))))
torch.save(state_dict, ('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix]))))
if (G_ema is not None):
torch.save(G_ema.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))) |
def initializeModel(model):
model.setTable('employee')
model.setEditStrategy(QSqlTableModel.OnManualSubmit)
model.setRelation(2, QSqlRelation('city', 'id', 'name'))
model.setRelation(3, QSqlRelation('country', 'id', 'name'))
model.setHeaderData(0, Qt.Horizontal, 'ID')
model.setHeaderData(1, Qt.Horizontal, 'Name')
model.setHeaderData(2, Qt.Horizontal, 'City')
model.setHeaderData(3, Qt.Horizontal, 'Country')
model.select() |
class DebuggingRegexLexer(ExtendedRegexLexer):
def get_tokens_unprocessed(self, text, stack=('root',)):
tokendefs = self._tokens
self.ctx = ctx = LexerContext(text, 0)
ctx.stack = list(stack)
statetokens = tokendefs[ctx.stack[(- 1)]]
while 1:
for (rexmatch, action, new_state) in statetokens:
self.m = m = rexmatch(text, ctx.pos, ctx.end)
if m:
if (action is not None):
if (type(action) is _TokenType):
(yield (ctx.pos, action, m.group()))
ctx.pos = m.end()
elif (not isinstance(self, ExtendedRegexLexer)):
(yield from action(self, m))
ctx.pos = m.end()
else:
(yield from action(self, m, ctx))
if (not new_state):
statetokens = tokendefs[ctx.stack[(- 1)]]
if (new_state is not None):
if isinstance(new_state, tuple):
for state in new_state:
if (state == '#pop'):
ctx.stack.pop()
elif (state == '#push'):
ctx.stack.append(ctx.stack[(- 1)])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
del ctx.stack[new_state:]
elif (new_state == '#push'):
ctx.stack.append(ctx.stack[(- 1)])
else:
assert False, ('wrong state def: %r' % new_state)
statetokens = tokendefs[ctx.stack[(- 1)]]
break
else:
try:
if (ctx.pos >= ctx.end):
break
if (text[ctx.pos] == '\n'):
ctx.stack = ['root']
statetokens = tokendefs['root']
(yield (ctx.pos, Text, '\n'))
ctx.pos += 1
continue
(yield (ctx.pos, Error, text[ctx.pos]))
ctx.pos += 1
except IndexError:
break |
class MenuWrapperTests(unittest.TestCase):
def setUp(self):
Timings.defaults()
self.app = Application()
self.app.start('Notepad.exe')
self.dlg = self.app.Notepad
def tearDown(self):
self.app.kill()
def testInvalidHandle(self):
pass
def testItemCount(self):
self.assertEqual(5, self.dlg.menu().item_count())
def testItem(self):
self.assertEqual(u'&File', self.dlg.menu().item(0).text())
self.assertEqual(u'&File', self.dlg.menu().item(u'File').text())
self.assertEqual(u'&File', self.dlg.menu().item(u'&File', exact=True).text())
def testItems(self):
self.assertEqual([u'&File', u'&Edit', u'F&ormat', u'&View', u'&Help'], [item.text() for item in self.dlg.menu().items()])
def testFriendlyClassName(self):
self.assertEqual('MenuItem', self.dlg.menu().item(0).friendly_class_name())
def testMenuItemNotEnabled(self):
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_select, 'Edit->Find Next')
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_item('Edit->Find Next').click)
self.assertRaises(MenuItemNotEnabled, self.dlg.menu_item('Edit->Find Next').click_input)
def testGetProperties(self):
self.assertEqual({u'menu_items': [{u'index': 0, u'state': 0, u'item_type': 0, u'item_id': 64, u'text': u'View &Help'}, {u'index': 1, u'state': 3, u'item_type': 2048, u'item_id': 0, u'text': u''}, {u'index': 2, u'state': 0, u'item_type': 0, u'item_id': 65, u'text': u'&About Notepad'}]}, self.dlg.menu().get_menu_path('Help')[0].sub_menu().get_properties())
def testGetMenuPath(self):
self.assertEqual(u'&About Notepad', self.dlg.menu().get_menu_path(' Help -> #2 ')[(- 1)].text())
self.assertEqual(u'&About Notepad', self.dlg.menu().get_menu_path('Help->$65')[(- 1)].text())
self.assertEqual(u'&About Notepad', self.dlg.menu().get_menu_path('&Help->&About Notepad', exact=True)[(- 1)].text())
self.assertRaises(IndexError, self.dlg.menu().get_menu_path, '&Help->About what?', exact=True)
def test__repr__(self):
print(self.dlg.menu())
print(self.dlg.menu().get_menu_path('&Help->&About Notepad', exact=True)[(- 1)])
def testClick(self):
self.dlg.menu().get_menu_path('&Help->&About Notepad')[(- 1)].click()
About = self.app.window(name='About Notepad')
About.wait('ready')
About.OK.click()
About.wait_not('visible')
def testClickInput(self):
self.dlg.menu().get_menu_path('&Help->&About Notepad')[(- 1)].click_input()
About = self.app.window(name='About Notepad')
About.wait('ready')
About.OK.click()
About.wait_not('visible') |
('pyorbital.version.get_versions', return_value=dict([('version', '1.9.1+1.some-futur.dirty'), ('full-revisionid', 'some-future-git-version-hash'), ('dirty', True), ('error', None), ('date', '2023-01-20T09:37:30+0100')]))
def test_get_config_path_ppp_config_set_but_not_pyorbital_future(mock, caplog, monkeypatch):
monkeypatch.setenv('SATPY_CONFIG_PATH', '/path/to/satpy/etc')
monkeypatch.setenv('PPP_CONFIG_DIR', '/path/to/old/mpop/config/dir')
with caplog.at_level(logging.WARNING):
res = _get_config_path()
log_output = ('The use of PPP_CONFIG_DIR is no longer supported! ' + 'Please use PYORBITAL_CONFIG_PATH if you need a custom config path for pyorbital!')
assert (log_output in caplog.text)
assert (res == PKG_CONFIG_DIR) |
class MarketImpactTestCase(WithCreateBarData, ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1,)
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(trading_calendar.minutes_for_sessions_in_range(cls.equity_minute_bar_days[0], cls.equity_minute_bar_days[(- 1)]), cls.asset_finder.equities_sids)
def test_window_data(self):
session = pd.Timestamp('2006-03-01')
minute = self.trading_calendar.minutes_for_session(session)[1]
data = self.create_bardata(simulation_dt_func=(lambda : minute))
asset = self.asset_finder.retrieve_asset(1)
(mean_volume, volatility) = VolatilityVolumeShare(0.0)._get_window_data(data, asset, window_length=20)
self.assertEqual(mean_volume, 128.5)
reference_vol = (pd.Series(range(29, 49)).pct_change().std() * sqrt(252))
self.assertEqual(volatility, reference_vol) |
.parametrize('GET_query', GET_queries)
def test_set_context_querystring_with_filter_and_page(GET_query):
querydict = QueryDict(GET_query)
filter = ProjectFilter(querydict)
context = {'filter': filter}
context = set_context_querystring_with_filter_and_page(context)
if (('page' in GET_query) and ('title' in GET_query)):
assert ('querystring' in context)
assert (context['querystring'] == 'title=project')
querydict_copy = querydict.copy()
del querydict_copy['page']
assert (context['querystring'] == querydict_copy.urlencode())
elif (('page' not in GET_query) and ('title' in GET_query)):
assert ('querystring' in context)
assert (context['querystring'] == 'title=project')
elif (('page' in GET_query) and ('title' not in GET_query)):
assert (context.get('querystring', 'not-in-context') == '')
else:
assert (context.get('querystring', 'not-in-context') == 'not-in-context') |
def build_coordinator(hass, api):
timeout = (BASE_TIMEOUT + (len(api.things) * 2))
async def async_update_data():
try:
async with async_timeout.timeout(timeout):
(await hass.async_add_executor_job(api.refresh_status))
hass.data[DOMAIN][UPDATED_DATA] = api.get_status(legacy=True)
except asyncio.TimeoutError as err:
raise UpdateFailed(f'Command executed timed out when regularly fetching data.')
except Exception as err:
raise UpdateFailed(f'Error communicating with API: {err}')
_LOGGER.debug(f'Latest data: {[(name, value.status) for (name, value) in hass.data[DOMAIN][UPDATED_DATA].items()]}')
coordinator = DataUpdateCoordinator(hass, _LOGGER, name=DOMAIN, update_method=async_update_data, update_interval=DATA_UPDATE_INTERVAL)
coordinator.async_set_updated_data(None)
return coordinator |
def main():
initial_risk = 0.03
start_date = str_to_date('2016-01-01')
end_date = str_to_date('2017-12-31')
session_builder = container.resolve(BacktestTradingSessionBuilder)
session_builder.set_data_provider(daily_data_provider)
session_builder.set_backtest_name('Moving Average Alpha Model Backtest')
session_builder.set_position_sizer(InitialRiskPositionSizer, initial_risk=initial_risk)
session_builder.set_commission_model(IBCommissionModel)
session_builder.set_frequency(Frequency.DAILY)
ts = session_builder.build(start_date, end_date)
model = MovingAverageAlphaModel(fast_time_period=5, slow_time_period=20, risk_estimation_factor=1.25, data_provider=ts.data_handler)
model_tickers = [DummyTicker('AAA'), DummyTicker('BBB')]
model_tickers_dict = {model: model_tickers}
ts.use_data_preloading(model_tickers)
strategy = AlphaModelStrategy(ts, model_tickers_dict, use_stop_losses=True)
CalculateAndPlaceOrdersRegularEvent.set_daily_default_trigger_time()
strategy.subscribe(CalculateAndPlaceOrdersRegularEvent)
ts.start_trading()
backtest_tms = ts.portfolio.portfolio_eod_series().to_log_returns()
print('mean daily log return: {}'.format(backtest_tms.mean()))
print('std of daily log returns: {}'.format(backtest_tms.std()))
print('Finished! Go to output directory in order to consult the generated files with backtest results') |
class ItemStatsContainer(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent)
sMkt = Market.getInstance()
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.nbContainer = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.nbContainer, 1, (wx.EXPAND | wx.ALL), 2)
if (item.traits is not None):
self.traits = ItemTraits(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.traits, _t('Traits'))
if (isinstance(stuff, (Module, Drone)) and stuff.isMutated):
self.mutator = ItemMutatorPanel(self.nbContainer, stuff)
self.nbContainer.AddPage(self.mutator, _t('Mutations'))
if item.description:
self.desc = ItemDescription(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.desc, _t('Description'))
self.params = ItemParams(self.nbContainer, stuff, item, context)
self.nbContainer.AddPage(self.params, _t('Attributes'))
items = sMkt.getVariationsByItems([item])
if (len(items) > 1):
self.compare = ItemCompare(self.nbContainer, stuff, item, items, context)
self.nbContainer.AddPage(self.compare, _t('Compare'))
self.reqs = ItemRequirements(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.reqs, _t('Requirements'))
if (context == 'Skill'):
self.dependents = ItemDependents(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.dependents, _t('Dependents'))
self.effects = ItemEffects(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.effects, _t('Effects'))
if (stuff is not None):
self.affectedby = ItemAffectedBy(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.affectedby, _t('Affected by'))
if config.debug:
self.properties = ItemProperties(self.nbContainer, stuff, item, context)
self.nbContainer.AddPage(self.properties, _t('Properties'))
self.nbContainer.Bind(wx.EVT_LEFT_DOWN, self.mouseHit)
self.SetSizer(mainSizer)
self.Layout()
def __del__(self):
pass
def mouseHit(self, event):
(tab, _) = self.nbContainer.HitTest(event.Position)
if (tab != (- 1)):
self.nbContainer.SetSelection(tab)
def OnWindowClose(self):
mutaPanel = getattr(self, 'mutator', None)
if (mutaPanel is not None):
mutaPanel.OnWindowClose()
self.params.OnWindowClose() |
def pattern_exists(ordered_ops: List[Op], pattern: List[str]) -> Optional[List[MhaInfo]]:
mha_modules_info = []
sliding_window = deque(maxlen=len(pattern))
for (index, op) in enumerate(ordered_ops):
sliding_window.append(op)
sliced_pattern = [op.type for op in sliding_window]
if (sliced_pattern == pattern):
(_, parent_name) = ordered_ops[index].dotted_name.split('.', 1)
(module_qualified_name, _) = parent_name.rsplit('.', 1)
mha_info = MhaInfo(type(ordered_ops[index].residing_module), module_qualified_name)
mha_modules_info.append(mha_info)
return mha_modules_info |
class Graph():
suppress_show: bool = False
plotted = 0
def __init__(self, *data: Any, **options: Any) -> None:
self.axis: Any = None
self.options = copy(graph_defaults)
self.options.update(options)
self.data = list(flatten(data))
self.extra_artists: List = []
self.subplots: List = []
self.create_figure()
def main_drawing_flow(self, figure: Figure) -> None:
for subplot in self.subplots:
subplot.main_drawing_flow(figure)
self.create_axis(figure)
self.setup_matplotlib()
self.render_data()
if ('legend' in self.options):
self.draw_legend()
self.extra_artists.append(self.legend)
if ('text' in self.options):
self.extra_artists.append(self.text)
if (('square' in self.options) and self.options['square']):
make_square(self.figure, self.axis)
def add_subplot(self, *data: Any, **options: Any) -> None:
self.subplots.append(Graph(*data, figure=self.figure, **options))
def create_figure(self):
if ('figure' in self.options):
self.figure = self.options['figure']
elif ('figsize' in self.options):
self.figure = Figure(figsize=self.options['figsize'])
else:
self.figure = Figure()
self.figure.subplots_adjust(**{k: self.options[k] for k in self.options.keys() if (k in directions)})
def create_axis(self, figure: Figure) -> None:
if ('axis' in self.options):
self.axis = self.options['axis']
elif (self.axis is not None):
return
else:
subplot_args = (self.options['subplot_args'] if ('subplot_args' in self.options) else {})
self.axis = figure.add_subplot((self.options['subplot'] if ('subplot' in self.options) else 111), **subplot_args)
def setup_matplotlib(self) -> None:
simple_setters = {k: self.options[k] for k in mpl_simple_properties if (k in self.options)}
for (key, value) in simple_setters.items():
getattr(self.axis, 'set_{}'.format(key))(value)
if (('grid' in self.options) and (self.options['grid'] is not None)):
self.axis.grid(**self.options['grid'])
if ('xhide' in self.options):
self.axis.get_xaxis().set_visible(False)
if ('yhide' in self.options):
self.axis.get_yaxis().set_visible(False)
if ('xlabelcolor' in self.options):
self.axis.get_xaxis().label.set_color(self.options['xlabelcolor'])
if ('ylabelcolor' in self.options):
self.axis.get_yaxis().label.set_color(self.options['ylabelcolor'])
if ('x_major_formatter' in self.options):
self.axis.get_xaxis().set_major_formatter(self.options['x_major_formatter'])
if ('y_major_formatter' in self.options):
self.axis.get_yaxis().set_major_formatter(self.options['y_major_formatter'])
if (('yticklabelcolor' in self.options) and (self.options['yticklabelcolor'] is not None)):
for tl in self.axis.get_yticklabels():
tl.set_color(self.options['yticklabelcolor'])
if (('xticklabelcolor' in self.options) and (self.options['xticklabelcolor'] is not None)):
for tl in self.axis.get_xticklabels():
tl.set_color(self.options['xticklabelcolor'])
def render_data(self) -> None:
all_color_indexes = [(d.color_index if (hasattr(d, 'color_index') and (d.color_index is not None) and (not ('color' in d.line_format))) else ((- i) - 1)) for (i, d) in enumerate(self.data) if (not (('color' in d.line_format) or ('facecolor' in d.line_format) or ('edgecolor' in d.line_format)))]
unique_color_indexes = len(set(all_color_indexes))
cNorm = Normalize(vmin=0, vmax=unique_color_indexes)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=self.options['colormap'])
self.legend_additions: List = []
for (i, datum) in enumerate(self.data):
if (('color' not in datum.line_format) and ('edgecolor' not in datum.line_format) and ('facecolor' not in datum.line_format)):
if (hasattr(datum, 'color_index') and (datum.color_index is not None)):
color = scalarMap.to_rgba(datum.color_index)
else:
color = scalarMap.to_rgba(i)
datum.line_format['color'] = color
if ('edit' in self.options):
datum.edit = self.options['edit']
legend_entry = datum.draw(self.axis)
if (legend_entry is not None):
self.legend_additions.append((i, legend_entry[0], legend_entry[1]))
def draw_legend(self) -> None:
font_args = {}
if ('legend_font_size' in self.options):
font_args['size'] = self.options['legend_font_size']
if ('legend_font_name' in self.options):
font_args['fontname'] = self.options['legend_font_name']
if ('legend_font_weight' in self.options):
font_args['weight'] = self.options['legend_font_weight']
if ('legend_font_color' in self.options):
font_args['color'] = self.options['legend_font_color']
self.legend_font_properties = font_manager.FontProperties(**font_args)
legend_location = self.options['legend']
(handles, labels) = self.axis.get_legend_handles_labels()
self.legend_additions.sort()
for (i, (position, handle, label)) in enumerate(self.legend_additions):
handles.insert(position, handle)
labels.insert(position, label)
(final_handles, final_labels) = ([], [])
for (handle, label) in zip(handles, labels):
if (label in ('None', None)):
continue
final_labels.append(label)
final_handles.append(handle)
legend_args = (final_handles, final_labels)
legend_kwargs = {'frameon': (self.options['legendframe'] if ('legendframe' in self.options) else False), 'title': (self.options['legendtitle'] if ('legendtitle' in self.options) else False), 'ncol': (self.options['legendcolumns'] if ('legendcolumns' in self.options) else 1), 'numpoints': 4, 'prop': self.legend_font_properties, 'handlelength': (self.options['handlelength'] if ('handlelength' in self.options) else 2)}
if (legend_location == 'outside'):
legend_kwargs['bbox_to_anchor'] = (self.options['legendbox'] if ('legendbox' in self.options) else (1.6, 1))
else:
legend_kwargs['loc'] = legend_location
self.legend = self.axis.legend(*legend_args, **legend_kwargs)
def draw(self, path: str=None, show: bool=True) -> None:
self.main_drawing_flow(self.figure)
if (path is None):
(handle, path) = tempfile.mkstemp(prefix='tmp_solcore_', suffix=('.%s' % self.options['format']))
if (self.options['format'].upper() == 'PDF'):
self.canvas = FigureCanvasPdf(self.figure)
else:
self.canvas = FigureCanvasAgg(self.figure)
self.canvas.print_figure(path, dpi=self.options['dpi'], bbox_extra_artists=self.extra_artists, bbox_inches='tight')
if (show and (not self.suppress_show)):
open_with_os(path)
def show(self) -> None:
import pylab
self.main_drawing_flow(pylab.gcf())
pylab.show() |
class AnnualVirtualStorage(VirtualStorage):
def __init__(self, *args, **kwargs):
self.reset_day = kwargs.pop('reset_day', 1)
self.reset_month = kwargs.pop('reset_month', 1)
self.reset_to_initial_volume = kwargs.pop('reset_to_initial_volume', False)
self._last_reset_year = None
super(AnnualVirtualStorage, self).__init__(*args, **kwargs)
def reset(self):
super(AnnualVirtualStorage, self).reset()
self._last_reset_year = None
def before(self, ts):
super(AnnualVirtualStorage, self).before(ts)
if (ts.year != self._last_reset_year):
if ((ts.month > self.reset_month) or ((ts.month == self.reset_month) and (ts.day >= self.reset_day))):
use_initial_volume = self.reset_to_initial_volume
if ((ts.index == 0) and isinstance(self.max_volume, Parameter)):
use_initial_volume = True
self._reset_storage_only(use_initial_volume=use_initial_volume)
self._last_reset_year = ts.year
self.active = True |
class TableConnection():
def __init__(self, table_name: str, region: Optional[str]=None, host: Optional[str]=None, connect_timeout_seconds: Optional[float]=None, read_timeout_seconds: Optional[float]=None, max_retry_attempts: Optional[int]=None, max_pool_connections: Optional[int]=None, extra_headers: Optional[Mapping[(str, str)]]=None, aws_access_key_id: Optional[str]=None, aws_secret_access_key: Optional[str]=None, aws_session_token: Optional[str]=None, *, meta_table: Optional[MetaTable]=None) -> None:
self.table_name = table_name
self.connection = Connection(region=region, host=host, connect_timeout_seconds=connect_timeout_seconds, read_timeout_seconds=read_timeout_seconds, max_retry_attempts=max_retry_attempts, max_pool_connections=max_pool_connections, extra_headers=extra_headers, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token)
if (meta_table is not None):
self.connection.add_meta_table(meta_table)
def get_meta_table(self) -> MetaTable:
return self.connection.get_meta_table(self.table_name)
def get_operation_kwargs(self, hash_key: str, range_key: Optional[str]=None, key: str=KEY, attributes: Optional[Any]=None, attributes_to_get: Optional[Any]=None, actions: Optional[Sequence[Action]]=None, condition: Optional[Condition]=None, consistent_read: Optional[bool]=None, return_values: Optional[str]=None, return_consumed_capacity: Optional[str]=None, return_item_collection_metrics: Optional[str]=None, return_values_on_condition_failure: Optional[str]=None) -> Dict:
return self.connection.get_operation_kwargs(self.table_name, hash_key, range_key=range_key, key=key, attributes=attributes, attributes_to_get=attributes_to_get, actions=actions, condition=condition, consistent_read=consistent_read, return_values=return_values, return_consumed_capacity=return_consumed_capacity, return_item_collection_metrics=return_item_collection_metrics, return_values_on_condition_failure=return_values_on_condition_failure)
def delete_item(self, hash_key: str, range_key: Optional[str]=None, condition: Optional[Condition]=None, return_values: Optional[str]=None, return_consumed_capacity: Optional[str]=None, return_item_collection_metrics: Optional[str]=None) -> Dict:
return self.connection.delete_item(self.table_name, hash_key, range_key=range_key, condition=condition, return_values=return_values, return_consumed_capacity=return_consumed_capacity, return_item_collection_metrics=return_item_collection_metrics)
def update_item(self, hash_key: str, range_key: Optional[str]=None, actions: Optional[Sequence[Action]]=None, condition: Optional[Condition]=None, return_consumed_capacity: Optional[str]=None, return_item_collection_metrics: Optional[str]=None, return_values: Optional[str]=None) -> Dict:
return self.connection.update_item(self.table_name, hash_key, range_key=range_key, actions=actions, condition=condition, return_consumed_capacity=return_consumed_capacity, return_item_collection_metrics=return_item_collection_metrics, return_values=return_values)
def put_item(self, hash_key: str, range_key: Optional[str]=None, attributes: Optional[Any]=None, condition: Optional[Condition]=None, return_values: Optional[str]=None, return_consumed_capacity: Optional[str]=None, return_item_collection_metrics: Optional[str]=None) -> Dict:
return self.connection.put_item(self.table_name, hash_key, range_key=range_key, attributes=attributes, condition=condition, return_values=return_values, return_consumed_capacity=return_consumed_capacity, return_item_collection_metrics=return_item_collection_metrics)
def batch_write_item(self, put_items: Optional[Any]=None, delete_items: Optional[Any]=None, return_consumed_capacity: Optional[str]=None, return_item_collection_metrics: Optional[str]=None) -> Dict:
return self.connection.batch_write_item(self.table_name, put_items=put_items, delete_items=delete_items, return_consumed_capacity=return_consumed_capacity, return_item_collection_metrics=return_item_collection_metrics)
def batch_get_item(self, keys: Sequence[str], consistent_read: Optional[bool]=None, return_consumed_capacity: Optional[str]=None, attributes_to_get: Optional[Any]=None) -> Dict:
return self.connection.batch_get_item(self.table_name, keys, consistent_read=consistent_read, return_consumed_capacity=return_consumed_capacity, attributes_to_get=attributes_to_get)
def get_item(self, hash_key: str, range_key: Optional[str]=None, consistent_read: bool=False, attributes_to_get: Optional[Any]=None) -> Dict:
return self.connection.get_item(self.table_name, hash_key, range_key=range_key, consistent_read=consistent_read, attributes_to_get=attributes_to_get)
def scan(self, filter_condition: Optional[Any]=None, attributes_to_get: Optional[Any]=None, limit: Optional[int]=None, return_consumed_capacity: Optional[str]=None, segment: Optional[int]=None, total_segments: Optional[int]=None, exclusive_start_key: Optional[str]=None, consistent_read: Optional[bool]=None, index_name: Optional[str]=None) -> Dict:
return self.connection.scan(self.table_name, filter_condition=filter_condition, attributes_to_get=attributes_to_get, limit=limit, return_consumed_capacity=return_consumed_capacity, segment=segment, total_segments=total_segments, exclusive_start_key=exclusive_start_key, consistent_read=consistent_read, index_name=index_name)
def query(self, hash_key: str, range_key_condition: Optional[Condition]=None, filter_condition: Optional[Any]=None, attributes_to_get: Optional[Any]=None, consistent_read: bool=False, exclusive_start_key: Optional[Any]=None, index_name: Optional[str]=None, limit: Optional[int]=None, return_consumed_capacity: Optional[str]=None, scan_index_forward: Optional[bool]=None, select: Optional[str]=None) -> Dict:
return self.connection.query(self.table_name, hash_key, range_key_condition=range_key_condition, filter_condition=filter_condition, attributes_to_get=attributes_to_get, consistent_read=consistent_read, exclusive_start_key=exclusive_start_key, index_name=index_name, limit=limit, return_consumed_capacity=return_consumed_capacity, scan_index_forward=scan_index_forward, select=select)
def describe_table(self) -> Dict:
return self.connection.describe_table(self.table_name)
def delete_table(self) -> Dict:
return self.connection.delete_table(self.table_name)
def update_time_to_live(self, ttl_attr_name: str) -> Dict:
return self.connection.update_time_to_live(self.table_name, ttl_attr_name)
def update_table(self, read_capacity_units: Optional[int]=None, write_capacity_units: Optional[int]=None, global_secondary_index_updates: Optional[Any]=None) -> Dict:
return self.connection.update_table(self.table_name, read_capacity_units=read_capacity_units, write_capacity_units=write_capacity_units, global_secondary_index_updates=global_secondary_index_updates)
def create_table(self, attribute_definitions: Optional[Any]=None, key_schema: Optional[Any]=None, read_capacity_units: Optional[int]=None, write_capacity_units: Optional[int]=None, global_secondary_indexes: Optional[Any]=None, local_secondary_indexes: Optional[Any]=None, stream_specification: Optional[Dict]=None, billing_mode: str=DEFAULT_BILLING_MODE, tags: Optional[Dict[(str, str)]]=None) -> Dict:
return self.connection.create_table(self.table_name, attribute_definitions=attribute_definitions, key_schema=key_schema, read_capacity_units=read_capacity_units, write_capacity_units=write_capacity_units, global_secondary_indexes=global_secondary_indexes, local_secondary_indexes=local_secondary_indexes, stream_specification=stream_specification, billing_mode=billing_mode, tags=tags) |
class ExactSumConstraint(Constraint):
def __init__(self, exactsum: Union[(int, float)], multipliers: Optional[Sequence]=None):
self._exactsum = exactsum
self._multipliers = multipliers
def preProcess(self, variables: Sequence, domains: dict, constraints: List[tuple], vconstraints: dict):
Constraint.preProcess(self, variables, domains, constraints, vconstraints)
multipliers = self._multipliers
exactsum = self._exactsum
if multipliers:
for (variable, multiplier) in zip(variables, multipliers):
domain = domains[variable]
for value in domain[:]:
if ((value * multiplier) > exactsum):
domain.remove(value)
else:
for variable in variables:
domain = domains[variable]
for value in domain[:]:
if (value > exactsum):
domain.remove(value)
def __call__(self, variables: Sequence, domains: dict, assignments: dict, forwardcheck=False):
multipliers = self._multipliers
exactsum = self._exactsum
sum = 0
missing = False
if multipliers:
for (variable, multiplier) in zip(variables, multipliers):
if (variable in assignments):
sum += (assignments[variable] * multiplier)
else:
missing = True
if isinstance(sum, float):
sum = round(sum, 10)
if (sum > exactsum):
return False
if (forwardcheck and missing):
for (variable, multiplier) in zip(variables, multipliers):
if (variable not in assignments):
domain = domains[variable]
for value in domain[:]:
if ((sum + (value * multiplier)) > exactsum):
domain.hideValue(value)
if (not domain):
return False
else:
for variable in variables:
if (variable in assignments):
sum += assignments[variable]
else:
missing = True
if isinstance(sum, float):
sum = round(sum, 10)
if (sum > exactsum):
return False
if (forwardcheck and missing):
for variable in variables:
if (variable not in assignments):
domain = domains[variable]
for value in domain[:]:
if ((sum + value) > exactsum):
domain.hideValue(value)
if (not domain):
return False
if missing:
return (sum <= exactsum)
else:
return (sum == exactsum) |
def damp(sys, doprint=True):
(wn, zeta, poles) = sys.damp()
if doprint:
print(' Eigenvalue (pole) Damping Frequency')
for (p, z, w) in zip(poles, zeta, wn):
if (abs(p.imag) < 1e-12):
print((' %10.4g %10.4g %10.4g' % (p.real, 1.0, w)))
else:
print(('%10.4g%+10.4gj %10.4g %10.4g' % (p.real, p.imag, z, w)))
return (wn, zeta, poles) |
def check_average_voxelization_3d(origin, pitch, points, values, gpu, **kwargs):
batch_indices = np.zeros((points.shape[0],), dtype=np.int32)
if (gpu >= 0):
cuda.get_device_from_id(gpu).use()
values = cuda.to_gpu(values)
points = cuda.to_gpu(points)
batch_indices = cuda.to_gpu(batch_indices)
y = morefusion.functions.average_voxelization_3d(values, points, batch_indices, batch_size=1, origin=origin, pitch=pitch, dimensions=(32, 32, 32))[0]
y = y.transpose(1, 2, 3, 0)
matrix_values = cuda.to_cpu(y.array)
matrix_filled = (matrix_values != 0).any(axis=3)
scene = trimesh.Scene()
scene.angles = np.zeros(3)
geom = trimesh.voxel.VoxelGrid(matrix_filled, ttf.scale_and_translate(pitch, origin)).as_boxes()
(I, J, K) = zip(*np.argwhere(matrix_filled))
geom.visual.face_colors = matrix_values[(I, J, K)].repeat(12, axis=0)
scene.add_geometry(geom)
def callback(scene):
scene.set_camera(angles=scene.angles)
scene.angles += [0, np.deg2rad(1), 0]
trimesh.viewer.SceneViewer(scene=scene, callback=callback, **kwargs) |
def run_step(context):
logger.debug('started')
assert context, f'context must have value for {__name__}'
found_at_least_one = False
context.assert_key_has_value('tar', __name__)
tar_context = context.get_formatted('tar')
if tar_context.get('extract', None):
found_at_least_one = True
tar_extract(tar_context)
if tar_context.get('archive', None):
found_at_least_one = True
tar_archive(tar_context)
if (not found_at_least_one):
raise KeyNotInContextError('pypyr.steps.tar must have either extract or archive specified under the tar key. Or both of these. It has neither.')
logger.debug('done') |
def get_current_node_resource_key() -> str:
current_node_id = ray.get_runtime_context().get_node_id()
for node in ray.nodes():
if (node['NodeID'] == current_node_id):
for key in node['Resources'].keys():
if key.startswith('node:'):
return key
else:
raise ValueError('Cannot found the node dictionary for current node.') |
def main():
setup_default_logger()
argparser = get_argparser()
args = argparser.parse_args()
np.random.seed(1337)
neutralization_rxns = initialise_neutralisation_reactions()
smiles_dict = AllowedSmilesCharDictionary()
print('Preprocessing ChEMBL molecules...')
chembl_file = os.path.join(args.destination, CHEMBL_FILE_NAME)
data = pkgutil.get_data('guacamol.data', 'holdout_set_gcm_v1.smiles').decode('utf-8').splitlines()
holdout_mols = [i.split(' ')[0] for i in data]
holdout_set = set(canonicalize_list(holdout_mols, False))
holdout_fps = get_fingerprints_from_smileslist(holdout_set)
download_if_not_present(chembl_file, uri=CHEMBL_URL)
raw_smiles = get_raw_smiles(chembl_file, smiles_char_dict=smiles_dict, open_fn=gzip.open, extract_fn=extract_chembl)
file_prefix = 'chembl24_canon'
print(f'and standardizing {len(raw_smiles)} molecules using {args.n_jobs} cores, and excluding molecules based on ECFP4 similarity of > {TANIMOTO_CUTOFF} to the holdout set.')
runner = Parallel(n_jobs=args.n_jobs, verbose=2)
joblist = (delayed(filter_and_canonicalize)(smiles_str, holdout_set, holdout_fps, neutralization_rxns, TANIMOTO_CUTOFF, False) for smiles_str in raw_smiles)
output = runner(joblist)
all_good_mols = sorted(list(set([item[0] for item in output if item])))
np.random.shuffle(all_good_mols)
print(f'Ended up with {len(all_good_mols)} molecules. Preparing splits...')
VALID_SIZE = int((0.05 * len(all_good_mols)))
TEST_SIZE = int((0.15 * len(all_good_mols)))
dev_set = all_good_mols[0:VALID_SIZE]
dev_path = os.path.join(args.destination, f'{file_prefix}_dev-valid.smiles')
write_smiles(dev_set, dev_path)
test_set = all_good_mols[VALID_SIZE:(VALID_SIZE + TEST_SIZE)]
test_path = os.path.join(args.destination, f'{file_prefix}_test.smiles')
write_smiles(test_set, test_path)
train_set = all_good_mols[(VALID_SIZE + TEST_SIZE):]
train_path = os.path.join(args.destination, f'{file_prefix}_train.smiles')
write_smiles(train_set, train_path)
valid_hashes = [compare_hash(train_path, TRAIN_HASH), compare_hash(dev_path, VALID_HASH), compare_hash(test_path, TEST_HASH)]
if (not all(valid_hashes)):
raise SystemExit(f'Invalid hashes for the dataset files')
print('Dataset generation successful. You are ready to go.') |
class Cheng2020Anchor(JointAutoregressiveHierarchicalPriors):
def __init__(self, N=192, **kwargs):
super().__init__(N=N, M=N, **kwargs)
self.g_a = nn.Sequential(ResidualBlockWithStride(3, N, stride=2), ResidualBlock(N, N), ResidualBlockWithStride(N, N, stride=2), ResidualBlock(N, N), ResidualBlockWithStride(N, N, stride=2), ResidualBlock(N, N), conv3x3(N, N, stride=2))
self.h_a = nn.Sequential(conv3x3(N, N), nn.LeakyReLU(inplace=False), conv3x3(N, N), nn.LeakyReLU(inplace=False), conv3x3(N, N, stride=2), nn.LeakyReLU(inplace=False), conv3x3(N, N), nn.LeakyReLU(inplace=False), conv3x3(N, N, stride=2))
self.h_s = nn.Sequential(conv3x3(N, N), nn.LeakyReLU(inplace=False), subpel_conv3x3(N, N, 2), nn.LeakyReLU(inplace=False), conv3x3(N, ((N * 3) // 2)), nn.LeakyReLU(inplace=False), subpel_conv3x3(((N * 3) // 2), ((N * 3) // 2), 2), nn.LeakyReLU(inplace=False), conv3x3(((N * 3) // 2), (N * 2)))
self.g_s = nn.Sequential(ResidualBlock(N, N), ResidualBlockUpsample(N, N, 2), ResidualBlock(N, N), ResidualBlockUpsample(N, N, 2), ResidualBlock(N, N), ResidualBlockUpsample(N, N, 2), ResidualBlock(N, N), subpel_conv3x3(N, 3, 2))
def from_state_dict(cls, state_dict):
N = state_dict['g_a.0.conv1.weight'].size(0)
net = cls(N)
net.load_state_dict(state_dict)
return net |
class TestInitialSOC(TestCase):
def test_interpolant_parameter_sets(self):
model = pybamm.lithium_ion.SPM()
params = ['Ai2020', 'Chen2020', 'Ecker2015', 'Marquis2019', 'Mohtat2020', 'OKane2022', 'ORegan2022']
for param in params:
with self.subTest(param=param):
parameter_values = pybamm.ParameterValues(param)
sim = pybamm.Simulation(model=model, parameter_values=parameter_values)
sim.solve([0, 600], initial_soc=0.2)
sim.solve([0, 600], initial_soc='3.7 V') |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_while_max(mock_invoke, mock_moduleloader):
step = Step({'name': 'step1', 'while': {'max': 3}})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert (mock_logger_info.mock_calls == [call('while decorator will loop 3 times at 0.0s intervals.'), call('while: running step with counter 1'), call('while: running step with counter 2'), call('while: running step with counter 3')])
assert (mock_invoke.call_count == 3)
assert (len(context) == (original_len + 1))
assert (context['whileCounter'] == 3)
assert (step.while_decorator.while_counter == 3) |
class SponsorEmailNotificationTemplate(BaseEmailTemplate):
class Meta():
verbose_name = 'Sponsor Email Notification Template'
verbose_name_plural = 'Sponsor Email Notification Templates'
def get_email_context_data(self, **kwargs):
sponsorship = kwargs.pop('sponsorship')
context = {'sponsor_name': sponsorship.sponsor.name, 'sponsorship_start_date': sponsorship.start_date, 'sponsorship_end_date': sponsorship.end_date, 'sponsorship_status': sponsorship.status, 'sponsorship_level': sponsorship.level_name}
context.update(kwargs)
return context
def get_email_message(self, sponsorship, **kwargs):
contact_types = {'primary': kwargs.get('to_primary'), 'administrative': kwargs.get('to_administrative'), 'accounting': kwargs.get('to_accounting'), 'manager': kwargs.get('to_manager')}
contacts = sponsorship.sponsor.contacts.filter_by_contact_types(**contact_types)
if (not contacts.exists()):
return
recipients = contacts.values_list('email', flat=True)
return self.get_email(from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL, to=recipients, context={'sponsorship': sponsorship}) |
class s13_predefined_component_TestCase(pyuvm_unittest.pyuvm_TestCase):
def setUp(self):
super().setUp()
ConfigDB().clear()
uvm_root().clear_children()
def test_uvm_component_no_parent(self):
comp = uvm_component('test', None)
self.assertTrue(('test' in uvm_component.component_dict))
self.assertTrue((comp.parent == uvm_root()))
self.assertTrue(comp.print_enabled)
def test_do_execute_op_13_1_2_3(self):
comp = uvm_component('test', None)
with self.assertRaises(error_classes.UVMNotImplemented):
comp.do_execute_op('op')
def test_component_with_parent(self):
parent = uvm_component('parent', None)
child = uvm_component('child', parent)
self.assertTrue(('parent' in uvm_component.component_dict))
self.assertTrue(('parent.child' in uvm_component.component_dict))
self.assertTrue((parent.parent == uvm_root()))
self.assertTrue((child.parent == parent))
self.assertEqual(list(parent.hierarchy), [parent, child])
def test_hierarchy(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
golden_list = [parent, child1, child3, child2]
self.assertEqual(list(parent.hierarchy), golden_list)
hier = list(parent.hierarchy)
hier.reverse()
golden_list.reverse()
self.assertEqual(hier, golden_list)
def test_get_parent_13_1_3_1(self):
parent = uvm_component('parent', None)
child = uvm_component('child', parent)
grandchild = uvm_component('grandchild', child)
par = grandchild.get_parent()
self.assertEqual(child, par)
par = child.get_parent()
self.assertEqual(par, parent)
par = grandchild.get_parent().get_parent()
self.assertEqual(parent, par)
def test_get_full_name_13_1_3_2(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
child21 = uvm_component('child21', child2)
parent_name = parent.get_full_name()
self.assertEqual('parent', parent_name)
self.assertEqual('parent.child1', child1.get_full_name())
self.assertEqual('parent.child2', child2.get_full_name())
self.assertEqual('parent.child2.child21', child21.get_full_name())
def test_get_children_13_1_3_3(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
child11 = uvm_component('child11', child1)
_ = uvm_component('child111', child11)
children = parent.get_children()
self.assertTrue((len(children) == 3))
children = child1.get_children()
self.assertTrue((len(children) == 1))
children = child2.get_children()
self.assertTrue((len(children) == 0))
children = list(parent.children)
self.assertEqual(children, parent.get_children())
def test_child_iterator_13_1_3_4(self):
parent = uvm_component('parent', None)
_ = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
cl = parent.get_children()
for cc in parent.children:
_ = cc
self.assertIn(cc, cl)
def test_get_child_13_1_3_4(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
self.assertEqual(parent.get_child('child1'), child1)
self.assertIsNone(parent.get_child('orphan'))
def test_get_num_children_13_1_3_5(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
cl = parent.get_children()
self.assertEqual(parent.get_num_children(), len(cl))
self.assertEqual(child1.get_num_children(), len(child1.get_children()))
def test_has_child_13_1_3_6(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', child1)
self.assertTrue(child1.has_child('child3'))
self.assertEqual(len(parent.get_children()), 2)
self.assertEqual(parent.get_child('child1').get_name(), 'child1')
self.assertEqual(2, parent.get_num_children())
self.assertFalse(parent.has_child('orphan'))
def test_lookup_13_1_3_7(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
child4 = uvm_component('child4', child3)
self.assertEqual(child1, parent.lookup('child1'))
self.assertEqual(child3, parent.lookup('child1.child3'))
self.assertNotEqual(child1, parent.lookup('child2'))
self.assertEqual(child3, parent.lookup('.parent.child1.child3'))
self.assertEqual(child3, child1.lookup('child3'))
self.assertEqual(child4, child1.lookup('child3.child4'))
def test_get_depth_13_1_3_8(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
_ = uvm_component('child4', child3)
self.assertEqual(0, uvm_root().get_depth())
self.assertEqual(1, parent.get_depth())
self.assertEqual(2, child1.get_depth())
self.assertEqual(3, child3.get_depth())
class my_component(uvm_component):
async def run_phase(self):
...
def test_component_factory(self):
mc = self.my_component('mc', None)
mc2 = self.my_component.create('my_component', None)
self.assertEqual(type(mc), type(mc2))
def test_config_db(self):
aa = uvm_component('aa', None)
bb = uvm_component('bb', aa)
cc = uvm_component('cc', aa)
_ = uvm_component('D', cc)
ee = uvm_component('ee', bb)
aa.cdb_set('FIVE', 5, '')
datum = aa.cdb_get('FIVE', '')
self.assertEqual(5, datum)
with self.assertRaises(error_classes.UVMConfigItemNotFound):
bb.cdb_get('FIVE', '')
cc.cdb_set('TT', 33, 'aa.bb.cc.*')
with self.assertRaises(error_classes.UVMConfigItemNotFound):
cc.cdb_get('TT', '')
ConfigDB().set(None, 'aa.*', 'TEN', 10)
datum = ee.cdb_get('TEN', '')
self.assertEqual(10, datum)
ConfigDB().set(None, 'aa.cc', 'FF', 44)
datum = cc.cdb_get('FF', '')
self.assertEqual(44, datum)
def test_wildcard_precedence(self):
aa = uvm_component('aa', None)
bb = uvm_component('bb', aa)
cc = uvm_component('cc', aa)
aa.cdb_set('TEST', 11, '*')
aa.cdb_set('TEST', 22, 'bb')
ConfigDB().set(aa, 'aa', 'OTHER', 55)
_ = aa.cdb_get('TEST', 'X')
bb_int = bb.cdb_get('TEST', '')
self.assertEqual(22, bb_int)
cc_int = cc.cdb_get('TEST', '')
self.assertEqual(11, cc_int)
aao = aa.cdb_get('OTHER', 'aa')
self.assertEqual(55, aao)
def test_contextless_behavior_in_hierarchy(self):
aa = uvm_component('aa', None)
_ = uvm_component('B', aa)
_ = uvm_component('C', aa)
ConfigDB().set(aa, '*', 'OTHER', 55)
aa = ConfigDB().get(aa, 'B', 'OTHER')
self.assertEqual(55, aa)
async def test_agent_config(self):
class bottom(uvm_agent):
def build_phase(self):
super().build_phase()
class comp(uvm_agent):
def build_phase(self):
super().build_phase()
ConfigDB().set(self, 'bot', 'is_active', 0)
self.bot = bottom('bot', self)
class test(uvm_test):
def build_phase(self):
self.cdb_set('is_active', uvm_active_passive_enum.UVM_ACTIVE)
self.agent = comp('agent', self)
async def run_phase(self):
self.raise_objection()
self.drop_objection()
(await uvm_root().run_test('test', keep_singletons=True))
utt = uvm_root().get_child('uvm_test_top')
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE, utt.agent.get_is_active())
self.assertEqual(uvm_active_passive_enum.UVM_PASSIVE, utt.agent.bot.get_is_active())
self.assertTrue(utt.agent.active())
self.assertFalse(utt.agent.bot.active())
async def test_class_as_run_test_argument(self):
class DataHolder(metaclass=Singleton):
def __init__(self):
self.call_count = 0
def __str__(self):
return f'DataHolder.call_count: {self.call_count}'
class MyTest(uvm_test):
async def run_phase(self):
self.raise_objection()
DataHolder().call_count += 1
self.drop_objection()
(await uvm_root().run_test('MyTest', keep_set={DataHolder}))
(await uvm_root().run_test(MyTest, keep_set={DataHolder}))
self.assertTrue((DataHolder().call_count == 2))
async def test_default_agent_config(self):
class bottom(uvm_agent):
def build_phase(self):
super().build_phase()
class comp(uvm_agent):
def build_phase(self):
super().build_phase()
self.bot = bottom('bot', self)
class test(uvm_test):
def build_phase(self):
self.agent = comp('agent', self)
async def run_phase(self):
self.raise_objection()
self.drop_objection()
(await uvm_root().run_test('test', keep_singletons=True))
utt = uvm_root().get_child('uvm_test_top')
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE, utt.agent.get_is_active())
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE, utt.agent.bot.get_is_active())
self.assertTrue(utt.agent.active())
self.assertTrue(utt.agent.bot.active()) |
def generate_score(args: argparse.Namespace, task: tasks.FairseqTask, dataset: data.FairseqDataset, models: List[FairseqEncoderDecoderModel], lang_pair: Optional[str]=None, modify_target_dict: bool=True):
if (lang_pair and (len(models) > 0) and isinstance(models[0], FairseqMultiModel)):
if isinstance(dataset, data.RoundRobinZipDatasets):
dataset = dataset.datasets[lang_pair]
return _generate_score(models=[multi_model.models[lang_pair] for multi_model in models], args=args, task=task, dataset=dataset, modify_target_dict=modify_target_dict)
elif (lang_pair and (len(models) > 0) and isinstance(models[0], DualLearningModel)):
return _generate_score(models=[(multi_model.models['primal'] if (lang_pair == 'primal_parallel') else multi_model.models['dual']) for multi_model in models], args=args, task=task, dataset=dataset, modify_target_dict=modify_target_dict)
else:
return _generate_score(models=models, args=args, task=task, dataset=dataset, modify_target_dict=modify_target_dict) |
def expression_check(prog):
instr_dict = {}
start_count = len(instr_dict.keys())
r2p = r2pipe.open(prog)
info = r2p.cmdj('ij')['bin']
esilcheck = ESILCheck(info['arch'], bits=info['bits'])
r2p.cmd('aa')
funcs = r2p.cmdj('aflj')
for func in funcs:
try:
instrs = r2p.cmdj(('pdfj %d' % func['offset']))['ops']
for instr in instrs:
if ((instr['esil'] not in ('', 'TODO')) and (instr['type'] not in ('call', 'cjmp', 'jmp'))):
op_key = get_op_key(instr)
if (op_key not in instr_dict):
print((('-' * 120) + '\n'))
print(('%016x:\t%16s\t%s ' % (instr['offset'], instr['bytes'], instr['opcode'])))
try:
esilcheck.check(code=unhexlify(instr['bytes']))
except Exception as e:
print(('error: %s' % str(e)))
print(('\n' + ('-' * 120)))
instr_dict[op_key] = instr
except:
continue |
def vectorised_transform_physical_point_to_index(image, point_array, rotate=True):
if rotate:
spacing = image.GetSpacing()[::(- 1)]
origin = image.GetOrigin()[::(- 1)]
else:
spacing = image.GetSpacing()
origin = image.GetOrigin()
return ((point_array - origin) / spacing) |
class FlowRegressor(nn.Module):
def __init__(self, npoint, use_instance_norm):
super(FlowRegressor, self).__init__()
self.sa1 = PointNetSetAbstraction(npoint=int((npoint / 4)), radius=None, nsample=32, in_channel=128, mlp=[128, 128, 128], group_all=False, use_instance_norm=use_instance_norm)
self.sa2 = PointNetSetAbstraction(npoint=int((npoint / 4)), radius=None, nsample=32, in_channel=128, mlp=[128, 128, 128], group_all=False, use_instance_norm=use_instance_norm)
self.fc = torch.nn.Linear(128, 3)
def forward(self, pc1_l_loc, corr_feats):
(_, x) = self.sa1(pc1_l_loc[2], corr_feats)
(_, x) = self.sa2(pc1_l_loc[2], x)
x = x.permute(0, 2, 1).contiguous()
x = self.fc(x)
flow = x.permute(0, 2, 1).contiguous()
return flow |
def read_lst(lst_file):
with open(lst_file, 'r') as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
data = {'name': [], 'face_id': [], 'ymin': [], 'xmin': [], 'xmax': [], 'ymax': [], 'confidence': [], 'emotion': []}
for l in lines:
l = l.split(' ')
data['name'].append(l[0])
data['face_id'].append(int(l[1]))
data['ymin'].append(int(l[2]))
data['xmin'].append(int(l[3]))
data['xmax'].append(int(l[4]))
data['ymax'].append(int(l[5]))
data['confidence'].append(float(l[6]))
data['emotion'].append(int(l[7]))
df = pd.DataFrame.from_dict(data)
return df |
class UnsupportedClientError(BaseNetworkError):
def __init__(self, message: str):
self.message = message
def code(cls):
return 9
def detail(self):
return self.message
def from_detail(cls, detail) -> Self:
return cls(detail)
def __str__(self):
return f'Unsupported client: {self.message}' |
class Soquet():
binst: Union[(BloqInstance, DanglingT)]
reg: 'Register'
idx: Tuple[(int, ...)] = field(converter=_to_tuple, default=tuple())
def _check_idx(self, attribute, value):
if (len(value) != len(self.reg.shape)):
raise ValueError(f'Bad index shape {value} for {self.reg}.')
for (i, shape) in zip(value, self.reg.shape):
if (i >= shape):
raise ValueError(f'Bad index {i} for {self.reg}.')
def pretty(self) -> str:
label = self.reg.name
if (len(self.idx) > 0):
return f"{label}[{', '.join((str(i) for i in self.idx))}]"
return label
def __str__(self) -> str:
return f'{self.binst}.{self.pretty()}' |
class CamembertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = CamembertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
def step(engine, batch):
model.train()
if DUE:
likelihood.train()
optimizer.zero_grad()
(x, y) = batch
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
y_pred = model(x)
if (not DUE):
y_pred.squeeze_()
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return loss.item() |
def G_logistic(G, D, latents, latent_labels=None, augment=None, ada_augment=None, ada_aug_p=0.6, ada_aug_step=(500 * 1000), *args, **kwargs):
fakes = G(latents, labels=latent_labels)
if (augment is not None):
(fakes, _) = augment(fakes, ada_aug_p)
fake_scores = D(fakes, labels=latent_labels).float()
loss = (- F.binary_cross_entropy_with_logits(fake_scores, torch.zeros_like(fake_scores)))
reg = None
return (loss, reg) |
.wrap
def _maybe_compute_kjt_to_jt_dict(stride: int, stride_per_key: List[int], keys: List[str], length_per_key: List[int], values: torch.Tensor, lengths: torch.Tensor, variable_stride_per_key: bool, weights: Optional[torch.Tensor], jt_dict: Optional[Dict[(str, JaggedTensor)]]) -> Dict[(str, JaggedTensor)]:
if (not length_per_key):
return {}
if (jt_dict is None):
_jt_dict: Dict[(str, JaggedTensor)] = {}
if ((not torch.jit.is_scripting()) and is_torchdynamo_compiling()):
cat_size = 0
total_size = values.size(0)
for i in length_per_key:
cat_size += i
torch._check((cat_size <= total_size))
torch._check((cat_size == total_size))
values_list = torch.split(values, length_per_key)
if variable_stride_per_key:
split_lengths = torch.split(lengths, stride_per_key)
split_offsets = [torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) for lengths in split_lengths]
else:
split_lengths = torch.unbind((lengths.view((- 1), stride) if (lengths.numel() != 0) else lengths), dim=0)
split_offsets = torch.unbind((_batched_lengths_to_offsets(lengths.view((- 1), stride)) if (lengths.numel() != 0) else lengths), dim=0)
if (weights is not None):
weights_list = torch.split(weights, length_per_key)
for (idx, key) in enumerate(keys):
length = split_lengths[idx]
offset = split_offsets[idx]
_jt_dict[key] = JaggedTensor(lengths=length, offsets=offset, values=values_list[idx], weights=weights_list[idx])
else:
for (idx, key) in enumerate(keys):
length = split_lengths[idx]
offset = split_offsets[idx]
_jt_dict[key] = JaggedTensor(lengths=length, offsets=offset, values=values_list[idx])
jt_dict = _jt_dict
return jt_dict |
(all_backends)
def test_gmres_easy(backend):
xnp = get_xnp(backend)
dtype = xnp.float32
A = xnp.diag(xnp.array([3.0, 4.0, 5.0], dtype=dtype, device=None))
rhs = [[1], [1], [1]]
rhs = xnp.array(rhs, dtype=dtype, device=None)
soln = [[(1 / 3)], [(1 / 4)], [(1 / 5)]]
soln = xnp.array(soln, dtype=dtype, device=None)
(max_iters, tolerance) = (3, 1e-08)
fn = gmres
x0 = xnp.zeros_like(rhs)
(approx, _) = fn(lazify(A), rhs, x0, max_iters, tolerance)
rel_error = relative_error(soln, approx)
assert (rel_error < 1e-06)
(approx, _) = fn(lazify(A), rhs, x0, max_iters, tolerance, use_householder=False, use_triangular=True)
rel_error = relative_error(soln, approx)
assert (rel_error < 1e-06) |
class Vocab(collections.abc.Set):
def __init__(self, iterable, special_elems=(UNK, BOS, EOS)):
elements = list(special_elems)
elements.extend(iterable)
assert (len(elements) == len(set(elements)))
self.id_to_elem = {i: elem for (i, elem) in enumerate(elements)}
self.elem_to_id = {elem: i for (i, elem) in enumerate(elements)}
def __iter__(self):
for i in range(len(self)):
(yield self.id_to_elem[i])
def __contains__(self, value):
return (value in self.elem_to_id)
def __len__(self):
return len(self.elem_to_id)
def __getitem__(self, key):
if isinstance(key, slice):
raise TypeError('Slices not supported.')
return self.id_to_elem[key]
def index(self, value):
try:
return self.elem_to_id[value]
except KeyError:
return self.elem_to_id[UNK]
def indices(self, values):
return [self.index(value) for value in values]
def __hash__(self):
return id(self)
def load(self, in_path):
return Vocab(json.load(open(in_path)), special_elems=())
def save(self, out_path):
with open(out_path, 'w') as f:
json.dump([self.id_to_elem[i] for i in range(len(self.id_to_elem))], f) |
_tokenizers
class LayoutLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMTokenizer
rust_tokenizer_class = LayoutLMTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
def setUp(self):
super().setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'UNwanted,running'
output_text = 'unwanted, running'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize('UNwanted,running')
self.assertListEqual(tokens, ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_special_tokens_as_you_expect(self):
pass |
def compute_knn(distance_matrix: np.array, k: int=100) -> Tuple[(np.array, np.array)]:
k += 1
k_i = distance_matrix.argpartition(k, axis=0)
k_d = np.take_along_axis(distance_matrix, k_i, axis=0)
sorted_indices = k_d.argsort(axis=0)
k_i_sorted = np.take_along_axis(k_i, sorted_indices, axis=0)[1:k]
k_d_sorted = np.take_along_axis(distance_matrix, k_i_sorted, axis=0)
knn_indices = np.transpose(k_i_sorted)
knn_distances = np.transpose(k_d_sorted)
return (knn_distances, knn_indices) |
.parametrize('b_func, b_size', [(pt.matrix, (5, 1)), (pt.matrix, (5, 5)), (pt.vector, (5,))], ids=['b_col_vec', 'b_matrix', 'b_vec'])
.parametrize('lower', [True, False], ids=['lower=True', 'lower=False'])
.parametrize('trans', [0, 1, 2], ids=['trans=N', 'trans=C', 'trans=T'])
.parametrize('unit_diag', [True, False], ids=['unit_diag=True', 'unit_diag=False'])
.parametrize('complex', [True, False], ids=['complex', 'real'])
.filterwarnings('ignore:Cannot cache compiled function "numba_funcified_fgraph"')
def test_solve_triangular(b_func, b_size, lower, trans, unit_diag, complex):
if complex:
pytest.skip('Complex inputs currently not supported to solve_triangular')
complex_dtype = ('complex64' if config.floatX.endswith('32') else 'complex128')
dtype = (complex_dtype if complex else config.floatX)
A = pt.matrix('A', dtype=dtype)
b = b_func('b', dtype=dtype)
X = pt.linalg.solve_triangular(A, b, lower=lower, trans=trans, unit_diagonal=unit_diag)
f = pytensor.function([A, b], X, mode='NUMBA')
A_val = np.random.normal(size=(5, 5))
b = np.random.normal(size=b_size)
if complex:
A_val = (A_val + (np.random.normal(size=(5, 5)) * 1j))
b = (b + (np.random.normal(size=b_size) * 1j))
A_sym = (A_val A_val.conj().T)
A_tri = np.linalg.cholesky(A_sym).astype(dtype)
if unit_diag:
adj_mat = np.ones((5, 5))
adj_mat[np.diag_indices(5)] = (1 / np.diagonal(A_tri))
A_tri = (A_tri * adj_mat)
A_tri = A_tri.astype(dtype)
b = b.astype(dtype)
if (not lower):
A_tri = A_tri.T
X_np = f(A_tri, b)
np.testing.assert_allclose((transpose_func(A_tri, trans) X_np), b, atol=ATOL, rtol=RTOL) |
.parametrize('mock_release_id', range(3))
.parametrize('prerelease', (True, False))
def test_create_or_update_release_when_create_succeeds(default_gitea_client, mock_release_id, prerelease):
tag = 'v1.0.0'
with mock.patch.object(default_gitea_client, 'create_release') as mock_create_release, mock.patch.object(default_gitea_client, 'get_release_id_by_tag') as mock_get_release_id_by_tag, mock.patch.object(default_gitea_client, 'edit_release_notes') as mock_edit_release_notes:
mock_create_release.return_value = mock_release_id
mock_get_release_id_by_tag.return_value = mock_release_id
mock_edit_release_notes.return_value = mock_release_id
assert (default_gitea_client.create_or_update_release(tag, RELEASE_NOTES, prerelease) == mock_release_id)
mock_create_release.assert_called_once_with(tag, RELEASE_NOTES, prerelease)
mock_get_release_id_by_tag.assert_not_called()
mock_edit_release_notes.assert_not_called() |
def test_search_for_directory_setup_read_setup(provider: Provider, mocker: MockerFixture, fixture_dir: FixtureDirGetter) -> None:
mocker.patch('poetry.utils.env.EnvManager.get', return_value=MockEnv())
dependency = DirectoryDependency('demo', (((fixture_dir('git') / 'github.com') / 'demo') / 'demo'))
package = provider.search_for_direct_origin_dependency(dependency)
assert (package.name == 'demo')
assert (package.version.text == '0.1.2')
required = [r for r in package.requires if (not r.is_optional())]
optional = [r for r in package.requires if r.is_optional()]
assert (required == [get_dependency('pendulum', '>=1.4.4')])
assert (optional == [get_dependency('tomlkit'), get_dependency('cleo')])
assert (package.extras == {'foo': [get_dependency('cleo')], 'bar': [get_dependency('tomlkit')]}) |
def _save_zero_checkpoint(self, save_path: str, tag: str) -> None:
app_state = {'optimizer': self.optimizer, 'objects': StateDict(ds_config=self.config, ds_version=version)}
Snapshot.async_take(path=save_path, app_state=app_state)
if (self.global_rank == 0):
self._copy_recovery_script(save_path) |
def _temporal_scattered_matrix(H, psi0, n_emissions, c_ops, tlist, system_zero_state=None, construct_effective_hamiltonian=True):
T = len(tlist)
W = len(c_ops)
em_dims = max(n_emissions, 1)
phi_n = np.zeros(([(W * T)] * em_dims), dtype=complex)
if construct_effective_hamiltonian:
Heff = (QobjEvo(H) - ((1j / 2) * sum([(op.dag() * op) for op in c_ops])))
else:
Heff = H
evolver = Propagator(Heff, memoize=len(tlist))
all_emission_indices = combinations_with_replacement(range(T), n_emissions)
if (system_zero_state is None):
system_zero_state = psi0
for emission_indices in all_emission_indices:
partition = tuple(set(set_partition(emission_indices, W)))
for indices in partition:
taus = [[tlist[i] for i in wg_indices] for wg_indices in indices]
phi_n_amp = photon_scattering_amplitude(evolver, c_ops, tlist, taus, psi0, system_zero_state)
idx = _temporal_basis_idx(indices, T)
phi_n[idx] = phi_n_amp
return phi_n |
def dependencies_in_sync(requirements: list[Requirement], sys_path: (list[str] | None)=None, environment: (dict[(str, str)] | None)=None) -> bool:
if (sys_path is None):
sys_path = sys.path
if (environment is None):
environment = default_environment()
installed_distributions = DistributionCache(sys_path)
return all((dependency_in_sync(requirement, environment, installed_distributions) for requirement in requirements)) |
class Loader(jinja2.BaseLoader):
def __init__(self, subdir: str) -> None:
self._subdir = subdir
def get_source(self, _env: jinja2.Environment, template: str) -> Tuple[(str, str, Callable[([], bool)])]:
path = os.path.join(self._subdir, template)
try:
source = resources.read_file(path)
except OSError as e:
source = html_fallback.replace('%ERROR%', html.escape(str(e)))
source = source.replace('%FILE%', html.escape(template))
log.misc.exception('The {} template could not be loaded from {}'.format(template, path))
return (source, path, (lambda : True)) |
def get_all_hardware_grid_problems(device_graph: nx.Graph, central_qubit: cirq.GridQubit, n_instances: int, rs: np.random.RandomState):
all_hg_problems: Dict[(Tuple[(int, int)], HardwareGridProblem)] = {}
subgraphs = get_growing_subgraphs(device_graph=device_graph, central_qubit=central_qubit)
for n_qubits in sorted(subgraphs):
subgraph = nx.subgraph(device_graph, subgraphs[n_qubits])
for instance_i in range(n_instances):
problem = random_plus_minus_1_weights(subgraph, rs=rs)
qubits = sorted(problem.nodes)
coordinates = [(q.row, q.col) for q in qubits]
problem = nx.relabel_nodes(problem, {q: i for (i, q) in enumerate(qubits)})
all_hg_problems[(n_qubits, instance_i)] = HardwareGridProblem(graph=problem, coordinates=coordinates)
return all_hg_problems |
class save_smplx(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(self.vposer, _) = load_vposer(config.vposer_path, vp_model='snapshot')
self.vposer = self.vposer.to(self.device)
self.body_mesh_model = smplx.create(config.smplx_path, model_type='smplx', gender='neutral', ext='npz', num_pca_comps=12, create_global_orient=True, create_body_pose=True, create_betas=True, create_left_hand_pose=True, create_right_hand_pose=True, create_expression=True, create_jaw_pose=True, create_leye_pose=True, create_reye_pose=True, create_transl=True, batch_size=1, num_betas=10, num_expression_coeffs=10)
def forward(self, poses_input, poses_label, poses_predict, gazes, scene_points, prefix='epoch0'):
save_path = os.path.join(self.config.save_path, '{}_smplx'.format(prefix))
os.makedirs(save_path, exist_ok=True)
with torch.no_grad():
for (i, p) in enumerate(poses_input[0]):
pose = {}
body_pose = self.vposer.decode(p[6:], output_type='aa')
pose['body_pose'] = body_pose.cpu().unsqueeze(0)
pose['pose_embedding'] = p[6:].cpu().unsqueeze(0)
pose['global_orient'] = p[:3].cpu().unsqueeze(0)
pose['transl'] = p[3:6].cpu().unsqueeze(0)
smplx_output = self.body_mesh_model(return_verts=True, **pose)
body_verts_batch = smplx_output.vertices
smplx_faces = self.body_mesh_model.faces
out_mesh = trimesh.Trimesh(body_verts_batch[0].cpu().numpy(), smplx_faces, process=False)
out_mesh.export(os.path.join(save_path, 'input_{}.obj'.format(i)))
gaze_ply = trimesh.PointCloud(gazes[(0, i)].cpu().numpy(), colors=np.ones((gazes[0].shape[1], 3)))
gaze_ply.export(os.path.join(save_path, 'input_{}_gaze.ply').format(i))
for (i, p) in enumerate(poses_label[0]):
pose = {}
body_pose = self.vposer.decode(p[6:], output_type='aa')
pose['body_pose'] = body_pose.cpu().unsqueeze(0)
pose['pose_embedding'] = p[6:].cpu().unsqueeze(0)
pose['global_orient'] = p[:3].cpu().unsqueeze(0)
pose['transl'] = p[3:6].cpu().unsqueeze(0)
smplx_output = self.body_mesh_model(return_verts=True, **pose)
body_verts_batch = smplx_output.vertices
smplx_faces = self.body_mesh_model.faces
out_mesh = trimesh.Trimesh(body_verts_batch[0].cpu().numpy(), smplx_faces, process=False)
out_mesh.export(os.path.join(save_path, 'gt_{}.obj'.format(i)))
for (i, p) in enumerate(poses_predict[0]):
pose = {}
body_pose = self.vposer.decode(p[6:], output_type='aa')
pose['body_pose'] = body_pose.cpu().unsqueeze(0)
pose['pose_embedding'] = p[6:].cpu().unsqueeze(0)
pose['global_orient'] = p[:3].cpu().unsqueeze(0)
pose['transl'] = p[3:6].cpu().unsqueeze(0)
smplx_output = self.body_mesh_model(return_verts=True, **pose)
body_verts_batch = smplx_output.vertices
smplx_faces = self.body_mesh_model.faces
out_mesh = trimesh.Trimesh(body_verts_batch[0].cpu().numpy(), smplx_faces, process=False)
out_mesh.export(os.path.join(save_path, 'predict_{}.obj'.format(i)))
scene_ply = trimesh.PointCloud(scene_points[0].cpu().numpy(), colors=np.ones((scene_points.shape[1], 3)))
scene_ply.export(os.path.join(save_path, 'scene.ply')) |
class AsmCmdSolve(AsmCmdBase):
_id = 1
_menuText = QT_TRANSLATE_NOOP('asm3', 'Solve constraints')
_iconName = 'AssemblyWorkbench.svg'
_accel = 'A, S'
def Activated(cls):
from . import solver
FreeCAD.setActiveTransaction('Assembly solve')
logger.report('command "{}" exception'.format(cls.getName()), solver.solve, reportFailed=True)
FreeCAD.closeActiveTransaction() |
def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None):
if (win_length is None):
win_length = n_fft
n = (n_fft + (hop_length * (n_frames - 1)))
x = np.zeros(n, dtype=dtype)
win_sq = get_window(window, win_length, fftbins=True)
win_sq = (librosa_util.normalize(win_sq, norm=norm) ** 2)
win_sq = librosa_util.pad_center(win_sq, n_fft)
for i in range(n_frames):
sample = (i * hop_length)
x[sample:min(n, (sample + n_fft))] += win_sq[:max(0, min(n_fft, (n - sample)))]
return x |
def test_shorthand_property():
model = Model()
node = Node(model, 'node')
for attr in ('min_flow', 'max_flow', 'cost', 'conversion_factor'):
setattr(node, attr, 123)
if (attr == 'conversion_factor'):
with pytest.raises(ValueError):
setattr(node, attr, Parameter(model))
else:
setattr(node, attr, Parameter(model))
with pytest.raises(TypeError):
setattr(node, attr, '123')
setattr(node, attr, None) |
def test_jsonparse_no_json_raises():
context = Context({'jsonParse': {'a': 'b'}})
with pytest.raises(KeyNotInContextError) as err_info:
jsonparse.run_step(context)
assert (str(err_info.value) == "context['jsonParse']['json'] doesn't exist. It must exist for pypyr.steps.jsonparse.") |
class _cupy_channelizer_wrapper(object):
def __init__(self, grid, block, kernel):
if isinstance(grid, int):
grid = (grid,)
if isinstance(block, int):
block = (block,)
self.grid = grid
self.block = block
self.kernel = kernel
def __call__(self, n_chans, n_taps, n_pts, x, h, y):
kernel_args = (n_chans, n_taps, n_pts, x, h, y)
self.kernel(self.grid, self.block, kernel_args) |
def validate_component_args(func, *args, **kwargs):
signature = inspect.signature(func)
try:
signature.bind(*args, **kwargs)
except TypeError as e:
name = generate_obj_name(func)
raise ComponentParamError(f"Invalid args for '{name}'. {str(e).capitalize()}.") from e |
class PluginsListViewTestCase(TestCase):
fixtures = ['fixtures/styles.json', 'fixtures/auth.json', 'fixtures/simplemenu.json', 'fixtures/plugins.json']
def setUp(self):
pass
def test_plugins_list_view(self):
response = self.client.get(reverse('approved_plugins'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'plugins/plugin_list.html')
self.assertTrue(('current_sort_query' in response.context))
self.assertTrue(('current_querystring' in response.context))
self.assertTrue(('per_page_list' in response.context))
self.assertTrue(('show_more_items_number' in response.context))
def test_plugins_list_pagination(self):
response = self.client.get(reverse('approved_plugins'), {'per_page': 20})
self.assertEqual(response.status_code, 200)
self.assertTrue(('current_sort_query' in response.context))
self.assertTrue(('current_querystring' in response.context))
self.assertTrue(('per_page_list' in response.context))
self.assertTrue(('show_more_items_number' in response.context))
show_more_items_number = response.context['show_more_items_number']
self.assertEqual(show_more_items_number, 50)
response = self.client.get(reverse('approved_plugins'), {'per_page': 110})
self.assertEqual(response.status_code, 200)
self.assertTrue(('current_sort_query' in response.context))
self.assertTrue(('current_querystring' in response.context))
self.assertTrue(('per_page_list' in response.context))
self.assertTrue(('show_more_items_number' in response.context))
show_more_items_number = response.context['show_more_items_number']
records_count = Plugin.approved_objects.count()
self.assertEqual(show_more_items_number, (records_count + 1))
def test_plugins_list_sorting(self):
response = self.client.get(reverse('approved_plugins'), {'sort': 'name'})
self.assertEqual(response.status_code, 200)
self.assertTrue(('current_sort_query' in response.context))
self.assertTrue(('current_querystring' in response.context))
self.assertTrue(('per_page_list' in response.context))
self.assertTrue(('show_more_items_number' in response.context)) |
def costFunctionDis1(outputStates, qnnArch):
state0 = qt.basis((2 ** qnnArch[(- 1)]), 0)
dims1 = [2 for i in range(qnnArch[(- 1)])]
dims2 = [1 for i in range(qnnArch[(- 1)])]
dims = [dims1, dims2]
state0.dims = dims
costSum = 0
if (len(outputStates) == 0):
return 1
for i in range(len(outputStates)):
costSum += ((state0.dag() * outputStates[i]) * state0)
return (costSum.tr() / len(outputStates)) |
def load(file, file_format=None, file_client_args=None, **kwargs):
if isinstance(file, Path):
file = str(file)
if ((file_format is None) and is_str(file)):
file_format = file.split('.')[(- 1)]
if (file_format not in file_handlers):
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
if is_str(file):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO(file_client.get_text(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
else:
with BytesIO(file_client.get(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
def test_reiher_sf():
DE = 0.001
CHI = 10
N = 108
LAM = 4258.0
L = 200
output = sf.compute_cost(N, LAM, DE, L, CHI, stps=20000)
stps1 = output[0]
output = sf.compute_cost(N, LAM, DE, L, CHI, stps1)
assert (output == (14184, , 3320)) |
class LBHinge(nn.Module):
def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None):
super().__init__()
self.error_metric = error_metric
self.threshold = (threshold if (threshold is not None) else (- 100))
self.clip = clip
def forward(self, prediction, label, target_bb=None):
negative_mask = (label < self.threshold).float()
positive_mask = (1.0 - negative_mask)
prediction = ((negative_mask * F.relu(prediction)) + (positive_mask * prediction))
loss = self.error_metric(prediction, (positive_mask * label))
if (self.clip is not None):
loss = torch.min(loss, torch.tensor([self.clip], device=loss.device))
return loss |
class pair():
def __init__(self, aval, bval, alabel=None, blabel=None):
self.alabel = alabel
self.blabel = blabel
self.aval = aval
self.bval = bval
def __add__(self, rhs):
self.aval += rhs.aval
self.bval += rhs.bval
return self
def __str__(self):
return ('%s=%d %s=%d' % (self.alabel, self.aval, self.blabel, self.bval)) |
def test_overriding_struct_hook(converter: BaseConverter) -> None:
from math import ceil
class A():
a: int
b: str
converter.register_structure_hook(A, make_dict_structure_fn(A, converter, a=override(struct_hook=(lambda v, _: ceil(v))), _cattrs_detailed_validation=converter.detailed_validation))
assert (converter.structure({'a': 0.5, 'b': 1}, A) == A(1, '1')) |
class TaskBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, task='shared'):
super(TaskBatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.task = task |
def test_for_freevar_step(do_test):
a = CaseForFreeVarStepComp.DUT()
a._rtlir_test_ref = {'upblk': CombUpblk('upblk', [For(LoopVarDecl('i'), Number(0), Number(2), FreeVar('freevar_at_upblk', 1), [Assign([Attribute(Base(a), 'out')], Slice(Attribute(Base(a), 'in_'), Number(0), Number(8)), True)])])}
do_test(a) |
class BenchmarkAll(Application):
def __init__(self, config_filename, options):
config_filename = os.path.abspath(config_filename)
conf = parse_config(config_filename, 'compile_all')
super().__init__(conf, options)
self.config_filename = config_filename
self.safe_makedirs(self.conf.directory)
self.setup_log('compile_all')
self.outputs = []
self.skipped = []
self.failed = []
self.timings = []
self.logger = logging.getLogger()
def benchmark(self, revision, branch):
if branch:
key = ('%s-%s' % (branch, revision))
else:
key = revision
cmd = [sys.executable, '-m', 'pyperformance', 'compile', self.config_filename, revision, branch]
if (not self.conf.update):
cmd.append('--no-update')
if (not self.conf.system_tune):
cmd.append('--no-tune')
self.start = time.monotonic()
exitcode = self.run_nocheck(*cmd, log_stdout=False)
dt = (time.monotonic() - self.start)
if exitcode:
self.logger.error(('Benchmark exit code: %s' % exitcode))
if (exitcode == EXIT_ALREADY_EXIST):
self.skipped.append(key)
return
if (exitcode >= EXIT_COMPILE_ERROR):
if self.conf.system_tune:
self.conf.system_tune = False
if self.conf.update:
self.conf.update = False
if ((exitcode == 0) or (exitcode == EXIT_BENCH_ERROR)):
self.outputs.append((key, (exitcode == EXIT_BENCH_ERROR)))
self.timings.append(dt)
else:
self.failed.append(key)
def report(self):
for key in self.skipped:
self.logger.error(('Skipped: %s' % key))
for (key, success) in self.outputs:
if success:
success_message = 'All benchmarks succeeded'
else:
success_message = 'Some benchmarks failed'
self.logger.error(('Tested: %s (%s)' % (key, success_message)))
for key in self.failed:
text = ('FAILED: %s' % key)
self.logger.error(text)
def report_timings(self):
def format_time(seconds):
if (seconds >= 100):
return ('%.0f min %.0f sec' % divmod(seconds, 60))
else:
return ('%.0f sec' % math.ceil(seconds))
self.logger.error('Timings:')
self.logger.error(('- min: %s' % format_time(min(self.timings))))
text = ('- avg: %s' % format_time(statistics.mean(self.timings)))
if (len(self.timings) >= 2):
stdev = statistics.stdev(self.timings)
text = ('%s -- std dev: %s' % (text, format_time(stdev)))
self.logger.error(text)
self.logger.error(('- max: %s' % format_time(max(self.timings))))
def main(self):
self.safe_makedirs(self.conf.directory)
self.logger.error('Compile and benchmark all')
if self.log_filename:
self.logger.error(('Write logs into %s' % self.log_filename))
self.logger.error(('Revisions: %r' % (self.conf.revisions,)))
self.logger.error(('Branches: %r' % (self.conf.branches,)))
if ((not self.conf.revisions) and (not self.conf.branches)):
self.logger.error('ERROR: no branches nor revisions configured for compile_all')
sys.exit(1)
try:
for (revision, branch) in self.conf.revisions:
self.benchmark(revision, branch)
for branch in self.conf.branches:
self.benchmark(branch, branch)
finally:
self.report()
if self.timings:
self.report_timings()
if self.failed:
sys.exit(1) |
(User)
class UserAdmin(admin.ModelAdmin):
def top_role_coloured(self, user: User) -> SafeString:
return format_html('<span style="color: {0}; font-weight: bold;">{1}</span>', f'#{user.top_role.colour:06X}', user.top_role.name)
top_role_coloured.short_description = 'Top Role'
def all_roles_coloured(self, user: User) -> SafeString:
roles = Role.objects.filter(id__in=user.roles)
return format_html('</br>'.join((f'<span style="color: #{r.colour:06X}; font-weight: bold;">{r.name}</span>' for r in roles)))
all_roles_coloured.short_description = 'All Roles'
search_fields = ('name', 'id', 'roles')
list_filter = (UserRoleFilter, 'in_guild')
list_display = ('username', 'top_role_coloured', 'in_guild')
fields = ('username', 'id', 'in_guild', 'all_roles_coloured')
sortable_by = ('username',)
def has_add_permission(self, *args) -> bool:
return False
def has_change_permission(self, *args) -> bool:
return False |
(frozen=True)
class IdMaker():
__slots__ = ('argnames', 'parametersets', 'idfn', 'ids', 'config', 'nodeid', 'func_name')
argnames: Sequence[str]
parametersets: Sequence[ParameterSet]
idfn: Optional[Callable[([Any], Optional[object])]]
ids: Optional[Sequence[Optional[object]]]
config: Optional[Config]
nodeid: Optional[str]
func_name: Optional[str]
def make_unique_parameterset_ids(self) -> List[str]:
resolved_ids = list(self._resolve_ids())
if (len(resolved_ids) != len(set(resolved_ids))):
id_counts = Counter(resolved_ids)
id_suffixes: Dict[(str, int)] = defaultdict(int)
for (index, id) in enumerate(resolved_ids):
if (id_counts[id] > 1):
suffix = ''
if (id and id[(- 1)].isdigit()):
suffix = '_'
new_id = f'{id}{suffix}{id_suffixes[id]}'
while (new_id in set(resolved_ids)):
id_suffixes[id] += 1
new_id = f'{id}{suffix}{id_suffixes[id]}'
resolved_ids[index] = new_id
id_suffixes[id] += 1
assert (len(resolved_ids) == len(set(resolved_ids))), f'Internal error: resolved_ids={resolved_ids!r}'
return resolved_ids
def _resolve_ids(self) -> Iterable[str]:
for (idx, parameterset) in enumerate(self.parametersets):
if (parameterset.id is not None):
(yield parameterset.id)
elif (self.ids and (idx < len(self.ids)) and (self.ids[idx] is not None)):
(yield self._idval_from_value_required(self.ids[idx], idx))
else:
(yield '-'.join((self._idval(val, argname, idx) for (val, argname) in zip(parameterset.values, self.argnames))))
def _idval(self, val: object, argname: str, idx: int) -> str:
idval = self._idval_from_function(val, argname, idx)
if (idval is not None):
return idval
idval = self._idval_from_hook(val, argname)
if (idval is not None):
return idval
idval = self._idval_from_value(val)
if (idval is not None):
return idval
return self._idval_from_argname(argname, idx)
def _idval_from_function(self, val: object, argname: str, idx: int) -> Optional[str]:
if (self.idfn is None):
return None
try:
id = self.idfn(val)
except Exception as e:
prefix = (f'{self.nodeid}: ' if (self.nodeid is not None) else '')
msg = "error raised while trying to determine id of parameter '{}' at position {}"
msg = (prefix + msg.format(argname, idx))
raise ValueError(msg) from e
if (id is None):
return None
return self._idval_from_value(id)
def _idval_from_hook(self, val: object, argname: str) -> Optional[str]:
if self.config:
id: Optional[str] = self.config.hook.pytest_make_parametrize_id(config=self.config, val=val, argname=argname)
return id
return None
def _idval_from_value(self, val: object) -> Optional[str]:
if isinstance(val, STRING_TYPES):
return _ascii_escaped_by_config(val, self.config)
elif ((val is None) or isinstance(val, (float, int, bool, complex))):
return str(val)
elif isinstance(val, Pattern):
return ascii_escaped(val.pattern)
elif (val is NOTSET):
pass
elif isinstance(val, enum.Enum):
return str(val)
elif isinstance(getattr(val, '__name__', None), str):
name: str = getattr(val, '__name__')
return name
return None
def _idval_from_value_required(self, val: object, idx: int) -> str:
id = self._idval_from_value(val)
if (id is not None):
return id
if (self.func_name is not None):
prefix = f'In {self.func_name}: '
elif (self.nodeid is not None):
prefix = f'In {self.nodeid}: '
else:
prefix = ''
msg = f'{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__.'
fail(msg, pytrace=False)
def _idval_from_argname(argname: str, idx: int) -> str:
return (str(argname) + str(idx)) |
def define_numeric_word_range(names: str, from_: int, to_: int=None, step: int=1) -> pp.MatchFirst:
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action((lambda : val))
names = names.split()
if (to_ is None):
to_ = from_
values = range(from_, (to_ + 1), step)
ret = pp.MatchFirst((define_numeric_word(name, value) for (name, value) in zip(names, values)))
if (len(names) == 1):
ret.set_name(names[0])
else:
ret.set_name(f'{names[0]}-{names[(- 1)]}')
return ret |
def prune_repo_by_creation_date(repo, policy_config, namespace, tag_page_limit=100):
policy_method = policy_config.get('method', None)
if (policy_method != AutoPruneMethod.CREATION_DATE.value):
raise InvalidNamespaceAutoPruneMethod(f'Expected prune method type {AutoPruneMethod.CREATION_DATE.value} but got {policy_method}')
assert_valid_namespace_autoprune_policy(policy_config)
time_ms = int((convert_to_timedelta(policy_config['value']).total_seconds() * 1000))
while True:
tags = oci.tag.fetch_paginated_autoprune_repo_tags_older_than_ms(repo.id, time_ms, tag_page_limit)
if (len(tags) == 0):
break
for tag in tags:
try:
tag = oci.tag.delete_tag(repo.id, tag.name)
if (tag is not None):
log.log_action('autoprune_tag_delete', namespace.username, repository=repo, metadata={'performer': 'autoprune worker', 'namespace': namespace.username, 'repo': repo.name, 'tag': tag.name})
except Exception as err:
raise Exception(f'Error deleting tag with name: {tag.name} with repository id: {repo.id} with error as: {str(err)}') |
def test_kcut_equality(kcut_cause, kcut_effect):
other = KCut(Direction.CAUSE, KPartition(Part((0, 2), (0,)), Part((), (2,)), Part((3,), (3,))))
assert (kcut_cause == other)
assert (hash(kcut_cause) == hash(other))
assert (hash(kcut_cause) != hash(kcut_cause.partition))
assert (kcut_cause != kcut_effect)
assert (hash(kcut_cause) != hash(kcut_effect)) |
class F10_TestCase(FC6_TestCase):
def runTest(self):
parser = self.getParser('monitor')
self.assertEqual(issubclass(parser.__class__, DeprecatedCommand), True)
parser = parser._getParser()
self.assertIsNotNone(parser)
self.assertTrue((parser.description.find('deprecated:: Fedora10') > (- 1))) |
def test_ae_forward():
model_cfg = dict(type='AssociativeEmbedding', pretrained=None, backbone=dict(type='ResNet', depth=18), keypoint_head=dict(type='AESimpleHead', in_channels=512, num_joints=17, num_deconv_layers=0, tag_per_joint=True, with_ae_loss=[True], extra=dict(final_conv_kernel=1), loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=1, ae_loss_type='exp', with_ae_loss=[True], push_loss_factor=[0.001], pull_loss_factor=[0.001], with_heatmaps_loss=[True], heatmaps_loss_factor=[1.0])), train_cfg=dict(), test_cfg=dict(num_joints=17, max_num_people=30, scale_factor=[1], with_heatmaps=[True], with_ae=[True], project2image=True, nms_kernel=5, nms_padding=2, tag_per_joint=True, detection_threshold=0.1, tag_threshold=1, use_detection_val=True, ignore_too_much=False, adjust=True, refine=True, soft_nms=False, flip_test=True, post_process=True, shift_heatmap=True, use_gt_bbox=True, flip_pairs=[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]))
detector = AssociativeEmbedding(model_cfg['backbone'], model_cfg['keypoint_head'], model_cfg['train_cfg'], model_cfg['test_cfg'], model_cfg['pretrained'])
detector.init_weights()
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
target = mm_inputs.pop('target')
mask = mm_inputs.pop('mask')
joints = mm_inputs.pop('joints')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, target, mask, joints, img_metas, return_loss=True)
assert isinstance(losses, dict)
with torch.no_grad():
_ = detector.forward(imgs, img_metas=img_metas, return_loss=False)
_ = detector.forward_dummy(imgs) |
def tool(*args: Union[(str, Callable)], return_direct: bool=False, args_schema: Optional[Type[BaseModel]]=None, infer_schema: bool=True) -> Callable:
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(func: Callable) -> Tool:
assert func.__doc__, 'Function must have a docstring'
description = f'{tool_name}{signature(func)} - {func.__doc__.strip()}'
_args_schema = args_schema
if ((_args_schema is None) and infer_schema):
_args_schema = validate_arguments(func).model
tool_ = Tool(name=tool_name, func=func, args_schema=_args_schema, description=description, return_direct=return_direct)
return tool_
return _make_tool
if ((len(args) == 1) and isinstance(args[0], str)):
return _make_with_name(args[0])
elif ((len(args) == 1) and callable(args[0])):
return _make_with_name(args[0].__name__)(args[0])
elif (len(args) == 0):
def _partial(func: Callable[([str], str)]) -> BaseTool:
return _make_with_name(func.__name__)(func)
return _partial
else:
raise ValueError('Too many arguments for tool decorator') |
def compute_tencrop(outputs, labels):
output_size = outputs.size()
outputs = outputs.view((output_size[0] / 10), 10, output_size[1])
outputs = outputs.sum(1).squeeze(1)
(_, pred) = outputs.topk(1, 1, True, True)
pred = pred.t()
top1_count = pred.eq(labels.data.view(1, (- 1)).expand_as(pred)).view((- 1)).float().sum(0)
top1_error = (100.0 - ((100.0 * top1_count) / labels.size(0)))
top1_error = float(top1_error.cpu().numpy())
(_, pred) = outputs.topk(5, 1, True, True)
pred = pred.t()
top5_count = pred.eq(labels.data.view(1, (- 1)).expand_as(pred)).view((- 1)).float().sum(0)
top5_error = (100.0 - ((100.0 * top5_count) / labels.size(0)))
top5_error = float(top5_error.cpu().numpy())
return (top1_error, 0, top5_error) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.