code stringlengths 101 5.91M |
|---|
_utils.test(arch=get_host_arch_list())
def test_loop_var_life():
def test():
for i in ti.static(range(8)):
pass
print(i)
with pytest.raises(Exception):
test() |
def ep_rule_condition1(memory_info: 'MemoryInfo', manager: 'MemoryManager', args):
index_upper = args['index_upper']
index_lower = args['index_lower']
target_fidelity = args['target_fidelity']
if ((index_lower <= memory_info.index <= index_upper) and (memory_info.state == 'ENTANGLED') and (memory_info.fidelity < target_fidelity)):
for info in manager:
if ((info != memory_info) and (index_lower <= info.index <= index_upper) and (info.state == 'ENTANGLED') and (info.remote_node == memory_info.remote_node) and (info.fidelity == memory_info.fidelity)):
assert (memory_info.remote_memo != info.remote_memo)
return [memory_info, info]
return [] |
def basis_from_generators(gens, ords=None):
if (not gens):
return ([], [])
if (ords is None):
ords = [g.order() for g in gens]
from sage.arith.functions import lcm
lam = lcm(ords)
ps = sorted(lam.prime_factors(), key=lam.valuation)
gammas = []
ms = []
for p in ps:
pgens = [((o.prime_to_m_part(p) * g), o.p_primary_part(p)) for (g, o) in zip(gens, ords) if (not (o % p))]
assert pgens
pgens.sort(key=(lambda tup: tup[1]))
(alpha, ord_alpha) = pgens.pop()
vals = [ord_alpha.valuation(p)]
alphas = [alpha]
while pgens:
(beta, ord_beta) = pgens.pop()
try:
dlog = _discrete_log_pgroup(p, vals, alphas, beta)
except TypeError:
pass
else:
continue
val_beta = ord_beta.valuation(p)
beta_q = beta
for v in range(1, val_beta):
beta_q *= p
try:
e = _discrete_log_pgroup(p, vals, alphas, (- beta_q))
except TypeError:
continue
_expand_basis_pgroup(p, alphas, vals, beta, val_beta, (list(e) + [(p ** v)]))
break
else:
alphas.append(beta)
vals.append(val_beta)
for (i, (v, a)) in enumerate(sorted(zip(vals, alphas), reverse=True)):
if (i < len(gammas)):
gammas[i] += a
ms[i] *= (p ** v)
else:
gammas.append(a)
ms.append((p ** v))
return (gammas, ms) |
def uniform_quantize_tensor(tensor_data: np.ndarray, range_min: np.ndarray, range_max: np.ndarray, n_bits: int) -> np.ndarray:
(a, b) = fix_range_to_include_zero(range_min, range_max, n_bits)
delta = ((b - a) / ((2 ** n_bits) - 1))
clipped_tensor = np.clip(tensor_data, a_min=a, a_max=b)
q = ((delta * np.round(((clipped_tensor - a) / delta))) + a)
return q |
def correlation_coefficient_loss(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
(xm, ym) = ((x - mx), (y - my))
r_num = K.sum(tf.multiply(xm, ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = (r_num / r_den)
r = K.maximum(K.minimum(r, 1.0), (- 1.0))
return (1 - K.square(r)) |
class Conv2d_tf(nn.Conv2d):
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
kwargs['padding'] = 0
if (not isinstance(self.stride, Iterable)):
self.stride = (self.stride, self.stride)
if (not isinstance(self.dilation, Iterable)):
self.dilation = (self.dilation, self.dilation)
def forward(self, input):
if (self.padding == 'VALID'):
return F.conv2d(input, self.weight, self.bias, self.stride, padding=0, dilation=self.dilation, groups=self.groups)
input_rows = input.size(2)
filter_rows = self.weight.size(2)
effective_filter_size_rows = (((filter_rows - 1) * self.dilation[0]) + 1)
out_rows = (((input_rows + self.stride[0]) - 1) // self.stride[0])
padding_rows = max(0, ((((out_rows - 1) * self.stride[0]) + effective_filter_size_rows) - input_rows))
rows_odd = ((padding_rows % 2) != 0)
input_cols = input.size(3)
filter_cols = self.weight.size(3)
effective_filter_size_cols = (((filter_cols - 1) * self.dilation[1]) + 1)
out_cols = (((input_cols + self.stride[1]) - 1) // self.stride[1])
padding_cols = max(0, ((((out_cols - 1) * self.stride[1]) + effective_filter_size_cols) - input_cols))
cols_odd = ((padding_cols % 2) != 0)
if (rows_odd or cols_odd):
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, self.weight, self.bias, self.stride, padding=((padding_rows // 2), (padding_cols // 2)), dilation=self.dilation, groups=self.groups) |
def _persist_noise(noise, path):
with path.open(encoding='utf8', mode='w') as f:
f.write(' '.join(noise)) |
def test():
inout = np.ndarray([1], dtype=np.dtype(vec3d.as_ctypes()))
inout[0] = (4.0, 5.0, 6.0)
sdfg(A=inout)
expected = (5.0, 7.0, 9.0)
diff = tuple((abs((x - y)) for (x, y) in zip(inout[0], expected)))
print('Difference:', diff)
assert all(((d <= 1e-05) for d in diff)) |
def tf_idf_sim(claim, lines, freqs=None):
tfidf = OnlineTfidfDocRanker(args, [line['sentence'] for line in lines], freqs)
(line_ids, scores) = tfidf.closest_docs(claim, args.max_sent)
ret_lines = []
for (idx, line) in enumerate(line_ids):
ret_lines.append(lines[line])
ret_lines[(- 1)]['score'] = scores[idx]
return ret_lines |
class BootstrapFewShot(Teleprompter):
def __init__(self, metric=None, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, max_errors=5):
self.metric = metric
self.teacher_settings = teacher_settings
self.max_bootstrapped_demos = max_bootstrapped_demos
self.max_labeled_demos = max_labeled_demos
self.max_rounds = max_rounds
self.max_errors = max_errors
self.error_count = 0
self.error_lock = threading.Lock()
def compile(self, student, *, teacher=None, trainset, valset=None):
self.trainset = trainset
self.valset = valset
self._prepare_student_and_teacher(student, teacher)
self._prepare_predictor_mappings()
self._bootstrap()
self.student = self._train()
self.student._compiled = True
return self.student
def _prepare_student_and_teacher(self, student, teacher):
self.student = student.reset_copy()
self.teacher = (teacher.deepcopy() if (teacher is not None) else student.reset_copy())
assert (self.student._compiled is False), 'Student must be uncompiled.'
if (self.max_labeled_demos and (self.teacher._compiled is False)):
teleprompter = LabeledFewShot(k=self.max_labeled_demos)
self.teacher = teleprompter.compile(self.teacher.reset_copy(), trainset=self.trainset)
def _prepare_predictor_mappings(self):
(name2predictor, predictor2name) = ({}, {})
(student, teacher) = (self.student, self.teacher)
assert (len(student.predictors()) == len(teacher.predictors())), 'Student and teacher must have the same number of predictors.'
for ((name1, predictor1), (name2, predictor2)) in zip(student.named_predictors(), teacher.named_predictors()):
assert (name1 == name2), 'Student and teacher must have the same program structure.'
assert (predictor1.signature == predictor2.signature), f'Student and teacher must have the same signatures. {type(predictor1.signature)} != {type(predictor2.signature)}'
assert (id(predictor1) != id(predictor2)), 'Student and teacher must be different objects.'
name2predictor[name1] = None
predictor2name[id(predictor1)] = name1
if isinstance(predictor1, Retry):
predictor2name[id(predictor1.module)] = name1
predictor2name[id(predictor2)] = name2
self.name2predictor = name2predictor
self.predictor2name = predictor2name
def _bootstrap(self, *, max_bootsraps=None):
max_bootsraps = (max_bootsraps or self.max_bootstrapped_demos)
bootstrapped = {}
self.name2traces = {name: [] for name in self.name2predictor}
for round_idx in range(self.max_rounds):
for (example_idx, example) in enumerate(tqdm.tqdm(self.trainset)):
if (len(bootstrapped) >= max_bootsraps):
break
if (example_idx not in bootstrapped):
success = self._bootstrap_one_example(example, round_idx)
if success:
bootstrapped[example_idx] = True
print(f'Bootstrapped {len(bootstrapped)} full traces after {(example_idx + 1)} examples in round {round_idx}.')
self.validation = [x for (idx, x) in enumerate(self.trainset) if (idx not in bootstrapped)]
random.Random(0).shuffle(self.validation)
self.validation = (self.valset or self.validation)
def _bootstrap_one_example(self, example, round_idx=0):
name2traces = self.name2traces
teacher = self.teacher
predictor_cache = {}
try:
with dsp.settings.context(trace=[], **self.teacher_settings):
lm = dsp.settings.lm
lm = (lm.copy(temperature=(0.7 + (0.001 * round_idx))) if (round_idx > 0) else lm)
new_settings = (dict(lm=lm) if (round_idx > 0) else {})
with dsp.settings.context(**new_settings):
for (name, predictor) in teacher.named_predictors():
predictor_cache[name] = predictor.demos
predictor.demos = [x for x in predictor.demos if (x != example)]
prediction = teacher(**example.inputs())
trace = dsp.settings.trace
for (name, predictor) in teacher.named_predictors():
predictor.demos = predictor_cache[name]
success = ((self.metric is None) or self.metric(example, prediction, trace))
except Exception as e:
success = False
with self.error_lock:
self.error_count += 1
current_error_count = self.error_count
if (current_error_count >= self.max_errors):
raise e
print(f'Failed to run or to evaluate example {example} with {self.metric} due to {e}.')
if success:
for step in trace:
(predictor, inputs, outputs) = step
if ('dspy_uuid' in example):
demo = Example(augmented=True, dspy_uuid=example.dspy_uuid, **inputs, **outputs)
else:
demo = Example(augmented=True, **inputs, **outputs)
try:
predictor_name = self.predictor2name[id(predictor)]
except KeyError as e:
continue
print(f'Failed to find predictor {predictor} in {self.predictor2name}.')
print('Are you doing this in a notebook (Jupyter)? This might be caused by redefining values by rerunning cells.')
print('Try restarting the notebook, or open an issue.')
raise KeyError(f'Failed to find predictor {id(predictor)} {predictor} in {self.predictor2name}.') from e
name2traces[predictor_name].append(demo)
return success
def _train(self):
rng = random.Random(0)
raw_demos = self.validation
for (name, predictor) in self.student.named_predictors():
augmented_demos = self.name2traces[name][:self.max_bootstrapped_demos]
sample_size = min((self.max_labeled_demos - len(augmented_demos)), len(raw_demos))
sample_size = max(0, sample_size)
raw_demos = rng.sample(raw_demos, sample_size)
import dspy
if (dspy.settings.release >= ):
predictor.demos = (raw_demos + augmented_demos)
else:
predictor.demos = (augmented_demos + raw_demos)
return self.student |
def test_epoch(flow, test_loader, epoch, device=None, add_noise=True, annealing=False):
if annealing:
anneal_exponent = anneal_schedule(epoch, quiet=True)
else:
anneal_exponent = 0.0
snr_threshold = (2 * anneal_exponent)
anneal_exponent = torch.tensor(anneal_exponent).to(device)
snr_threshold = torch.tensor(snr_threshold).to(device)
with torch.no_grad():
flow.eval()
test_loss = 0.0
total_weight = 0.0
for (h, x, w, snr) in test_loader:
if (device is not None):
h = h.to(device, non_blocking=True)
x = x.to(device, non_blocking=True)
w = w.to(device, non_blocking=True)
snr = snr.to(device, non_blocking=True)
if add_noise:
y = (h + torch.randn_like(h))
else:
y = h
loss = (- flow.log_prob(x, context=y))
if (anneal_exponent > 0.0):
anneal_factor = ((snr - snr_threshold) ** anneal_exponent)
else:
anneal_factor = torch.tensor(1.0).to(device)
loss = (loss * anneal_factor)
test_loss += (w * loss).sum()
total_weight += w.sum()
test_loss = (test_loss.item() / len(test_loader.dataset))
print('Test set: Average loss: {:.4f}\n'.format(test_loss))
return test_loss |
def Newton_polytope_vars_coeffs(polynomial, variables):
R = polynomial.parent()
var_indices = [R.gens().index(x) for x in variables]
result = {}
for (c, m) in polynomial:
e = m.exponents()[0]
v = tuple([e[i] for i in var_indices])
m_red = (m // prod(((x ** i) for (x, i) in zip(variables, v))))
result[v] = (result.get(v, R.zero()) + (c * m_red))
return result |
def main_30():
print('outlier: 30%')
fig = plt.figure(figsize=(5, 5), dpi=150)
plot_i = 0
(h1,) = plt.plot(noise_sigmas, ransanc_pnp_add_rel_errors_outlier_30, marker='o', markersize=marker_size, markerfacecolor='none', label='RANSAC EPnP', linewidth=linewidth, color=((255 / 255.0), (150 / 255.0), (150 / 255.0)))
plot_i += 5
(h2,) = plt.plot(noise_sigmas, cpnp_add_rel_errors_outlier_30, marker='d', markersize=marker_size, markerfacecolor='none', label='Ours', linewidth=linewidth, color=(0, (112 / 255.0), (68 / 255.0)))
handles = [h1, h2]
labels = ['RANSAC EPnP', 'Ours']
plt.legend(handles, labels, loc='upper left', fontsize=font_size, fancybox=True, framealpha=0.5, handlelength=handlelength)
plt.xlim([0, 0.062])
plt.ylim([0.01, 0.45])
plt.yscale('log')
plt.grid(True)
ax = plt.gca()
ax.set_xlabel('noise level $\\sigma$ (outlier=30%)', fontsize=font_size)
ax.set_ylabel('pose error', fontsize=font_size)
ax.set_yticks([0.01, 0.02, 0.03, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4])
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.xaxis.set_tick_params(labelsize=font_size)
ax.yaxis.set_tick_params(labelsize=font_size)
save_path = 'output/sphere_synt_plot/sphere_outlier_30.png'
mmcv.mkdir_or_exist(osp.dirname(save_path))
plt.savefig(save_path, dpi=fig.dpi, bbox_inches='tight')
print('save fig path: ', save_path)
os.system(f'{viewer} {save_path}') |
_module()
class CosineAnnealingLRWarmRestarts(scheduler.CosineAnnealingWarmRestarts):
def __init__(self, optimizer, T_0, max_epoch=(- 1), T_mult=1, eta_min=0, verbose=False):
super(CosineAnnealingLRWarmRestarts, self).__init__(optimizer, T_0, T_mult=T_mult, eta_min=eta_min, verbose=verbose) |
class MultiMarginLoss(_WeightedLoss):
__constants__ = ['p', 'margin', 'reduction']
margin: float
p: int
def __init__(self, p: int=1, margin: float=1.0, weight: Optional[Tensor]=None, size_average=None, reduce=None, reduction: str='mean') -> None:
super(MultiMarginLoss, self).__init__(weight, size_average, reduce, reduction)
if ((p != 1) and (p != 2)):
raise ValueError('only p == 1 and p == 2 supported')
assert ((weight is None) or (weight.dim() == 1))
self.p = p
self.margin = margin
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.multi_margin_loss(input, target, p=self.p, margin=self.margin, weight=self.weight, reduction=self.reduction) |
def test_strides():
from mmdet.core import AnchorGenerator
self = AnchorGenerator([10], [1.0], [1.0], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[(- 5.0), (- 5.0), 5.0, 5.0], [5.0, (- 5.0), 15.0, 5.0], [(- 5.0), 5.0, 5.0, 15.0], [5.0, 5.0, 15.0, 15.0]])
assert torch.equal(anchors[0], expected_anchors)
self = AnchorGenerator([(10, 20)], [1.0], [1.0], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[(- 5.0), (- 5.0), 5.0, 5.0], [5.0, (- 5.0), 15.0, 5.0], [(- 5.0), 15.0, 5.0, 25.0], [5.0, 15.0, 15.0, 25.0]])
assert torch.equal(anchors[0], expected_anchors) |
def plot_reset_comparison(spk_in, mem_rec, spk_rec, mem_rec0, spk_rec0):
(fig, ax) = plt.subplots(nrows=3, ncols=2, figsize=(10, 6), sharex=True, gridspec_kw={'height_ratios': [0.4, 1, 0.4], 'wspace': 0.05})
splt.raster(spk_in, ax[0][0], s=400, c='black', marker='|')
ax[0][0].set_ylabel('Input Spikes')
ax[0][0].set_title('Reset by Subtraction')
ax[0][0].set_yticks([])
ax[1][0].plot(mem_rec)
ax[1][0].set_ylim([0, 0.7])
ax[1][0].set_ylabel('Membrane Potential ($U_{mem}$)')
ax[1][0].axhline(y=0.5, alpha=0.25, linestyle='dashed', c='black', linewidth=2)
splt.raster(spk_rec, ax[2][0], s=400, c='black', marker='|')
ax[2][0].set_yticks([])
ax[2][0].set_xlabel('Time step')
ax[2][0].set_ylabel('Output Spikes')
splt.raster(spk_in, ax[0][1], s=400, c='black', marker='|')
ax[0][1].set_title('Reset to Zero')
ax[0][1].set_yticks([])
ax[1][1].plot(mem_rec0)
ax[1][1].set_ylim([0, 0.7])
ax[1][1].axhline(y=0.5, alpha=0.25, linestyle='dashed', c='black', linewidth=2)
ax[1][1].set_yticks([])
ax[2][1].set_xlabel('Time step')
splt.raster(spk_rec0, ax[2][1], s=400, c='black', marker='|')
ax[2][1].set_yticks([])
plt.show() |
def get_abbr_impl():
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif (sys.platform == 'cli'):
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl |
def _find_spacepy_dir():
if ('SPACEPY' in os.environ):
parentdir = os.path.abspath(os.path.expanduser(os.environ['SPACEPY']))
if (not os.path.exists(parentdir)):
try:
os.makedirs(parentdir)
except OSError as e:
if (e.errno != errno.EEXIST):
raise
elif ('HOME' in os.environ):
parentdir = os.environ['HOME']
elif (('HOMEDRIVE' in os.environ) and ('HOMEPATH' in os.environ)):
parentdir = os.path.join(os.environ['HOMEDRIVE'], os.environ['HOMEPATH'])
else:
parentdir = os.path.expanduser('~')
return os.path.join(parentdir, '.spacepy') |
class ReferenceDecoder(Decoder):
name = 'reference'
def __init__(self, decoder_args):
super(ReferenceDecoder, self).__init__(decoder_args)
def decode(self, src_sentence, trgt_sentence):
self.trgt_sentence = (trgt_sentence + [utils.EOS_ID])
self.initialize_predictor(src_sentence)
hypo = PartialHypothesis(self.get_predictor_states(), self.calculate_stats)
while (hypo.get_last_word() != utils.EOS_ID):
self._expand_hypo(hypo)
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
return self.get_full_hypos_sorted()
def _expand_hypo(self, hypo):
self.set_predictor_states(hypo.predictor_states)
next_word = self.trgt_sentence[len(hypo.trgt_sentence)]
(ids, posterior, _) = self.apply_predictor()
ind = utils.binary_search(ids, k)
max_score = utils.max_(posterior)
hypo.predictor_states = self.get_predictor_states()
hypo.score += posterior[ind]
hypo.score_breakdown.append(posterior[ind])
hypo.trgt_sentence += [next_word]
self.consume(next_word) |
def worker_single(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
(cmd, data) = remote.recv()
if (cmd == 'step'):
(ob, select_opponent, reward, done, info) = env.step(data)
if all(done):
(ob, select_opponent) = env.reset()
remote.send((ob, select_opponent, reward, done, info))
elif (cmd == 'reset'):
(ob, select_opponent) = env.reset()
remote.send((ob, select_opponent))
elif (cmd == 'reset_task'):
ob = env.reset_task()
remote.send(ob)
elif (cmd == 'close'):
remote.close()
break
elif (cmd == 'get_spaces'):
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError |
.skipif((sys.version_info.major < 3), reason='not tested for python 2')
_instrumenter
def test_io(scorep_env, instrumenter):
trace_path = get_trace_path(scorep_env)
print('start')
(std_out, std_err) = utils.call_with_scorep('cases/file_io.py', ['--nocompiler', ('--instrumenter-type=' + instrumenter), '--noinstrumenter', '--io=runtime:posix'], env=scorep_env)
assert (std_err == '')
assert ('test\n' in std_out)
trace = utils.OTF2_Trace(trace_path)
file_regex = '\\[POSIX I\\/O\\][ \\w:/]*test\\.txt'
ops = {'open': {'ENTER': 'open64', 'IO_CREATE_HANDLE': file_regex, 'LEAVE': 'open64'}, 'seek': {'ENTER': 'lseek64', 'IO_SEEK': file_regex, 'LEAVE': 'lseek64'}, 'write': {'ENTER': 'write', 'IO_OPERATION_BEGIN': file_regex, 'IO_OPERATION_COMPLETE': file_regex, 'LEAVE': 'write'}, 'read': {'ENTER': 'read', 'IO_OPERATION_BEGIN': file_regex, 'IO_OPERATION_COMPLETE': file_regex, 'LEAVE': 'read'}, 'close': {'ENTER': 'close', 'IO_DESTROY_HANDLE': file_regex, 'LEAVE': 'close'}}
io_trace = ''
io_trace_after = ''
in_expected_io = False
after_expected_io = False
for line in str(trace).split('\n'):
if (('user_instrumenter:expect io' in line) and (in_expected_io is False)):
in_expected_io = True
elif (('user_instrumenter:expect io' in line) and (in_expected_io is True)):
in_expected_io = False
after_expected_io = True
if in_expected_io:
io_trace += (line + '\n')
if after_expected_io:
io_trace_after += (line + '\n')
for (_, details) in ops.items():
for (event, data) in details.items():
regex_str = '{event:}[ ]*[0-9 ]*[0-9 ]*(Region|Handle): "{data:}"'.format(event=event, data=data)
print(regex_str)
assert re.search(regex_str, io_trace) |
def register_Ns3DeviceEnergyModel_methods(root_module, cls):
cls.add_constructor([param('ns3::DeviceEnergyModel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('ChangeState', 'void', [param('int', 'newState')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetCurrentA', 'double', [], is_const=True)
cls.add_method('GetTotalEnergyConsumption', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('HandleEnergyDepletion', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('HandleEnergyRecharged', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetEnergySource', 'void', [param('ns3::Ptr< ns3::EnergySource >', 'source')], is_pure_virtual=True, is_virtual=True)
cls.add_method('DoGetCurrentA', 'double', [], is_const=True, visibility='private', is_virtual=True)
return |
def _get_logger(filename='test_install.log'):
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger |
class SegformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
drop_path_decays = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(SegformerOverlapPatchEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=(config.num_channels if (i == 0) else config.hidden_sizes[(i - 1)]), hidden_size=config.hidden_sizes[i]))
self.patch_embeddings = nn.ModuleList(embeddings)
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
layers = []
if (i != 0):
cur += config.depths[(i - 1)]
for j in range(config.depths[i]):
layers.append(SegformerLayer(config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[(cur + j)], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i]))
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
self.layer_norm = nn.ModuleList([nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)])
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[(Tuple, BaseModelOutput)]:
all_hidden_states = (() if output_hidden_states else None)
all_self_attentions = (() if output_attentions else None)
batch_size = pixel_values.shape[0]
hidden_states = pixel_values
for (idx, x) in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
(embedding_layer, block_layer, norm_layer) = x
(hidden_states, height, width) = embedding_layer(hidden_states)
for (i, blk) in enumerate(block_layer):
layer_outputs = blk(hidden_states, height, width, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = (all_self_attentions + (layer_outputs[1],))
hidden_states = norm_layer(hidden_states)
if ((idx != (len(self.patch_embeddings) - 1)) or ((idx == (len(self.patch_embeddings) - 1)) and self.config.reshape_last_stage)):
hidden_states = hidden_states.reshape(batch_size, height, width, (- 1)).permute(0, 3, 1, 2).contiguous()
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (not return_dict):
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if (v is not None)))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) |
.parametrize('task,use_bias', [(task, use_bias) for task in ['binary', 'regression'] for use_bias in [True, False]])
def test_PredictionLayer(task, use_bias):
with CustomObjectScope({'PredictionLayer': layers.PredictionLayer}):
layer_test(layers.PredictionLayer, kwargs={'task': task, 'use_bias': use_bias}, input_shape=(BATCH_SIZE, 1)) |
def isEqual(account1, account2):
if (len(account1) != len(account2)):
return False
for i in range(len(account1)):
if (account1[i] != account2[i]):
return False
return True |
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str]=None, add_base_to_relative: bool=False) -> List[Tuple[(str, str)]]:
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if (ignores is None):
ignores = []
result = []
for (root, dirs, files) in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if (not fnmatch.fnmatch(f, ignore_))]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert (len(absolute_paths) == len(relative_paths))
result += zip(absolute_paths, relative_paths)
return result |
def precision_n(candidate, references, n):
ref_max = reduce(max_count, [ngram_count(ref, n) for ref in references])
candidate_ngram_count = ngram_count(candidate, n)
total = sum(candidate_ngram_count.values())
correct = sum(reduce(min_count, (ref_max, candidate_ngram_count)).values())
score = ((correct / total) if total else 0)
return (score, correct, total) |
.hypothesis_nested
def test_cookies(flask_app):
_app.route('/cookies', methods=['GET'])
def cookies():
return jsonify(request.cookies)
schema = schemathesis.from_dict({'openapi': '3.0.2', 'info': {'title': 'Test', 'description': 'Test', 'version': '0.1.0'}, 'paths': {'/cookies': {'get': {'parameters': [{'name': 'token', 'in': 'cookie', 'required': True, 'schema': {'type': 'string', 'enum': ['test']}}], 'responses': {'200': {'description': 'OK'}}}}}}, app=flask_app)
strategy = schema['/cookies']['GET'].as_strategy()
(case=strategy)
(max_examples=3, suppress_health_check=[HealthCheck.filter_too_much], deadline=None)
def test(case):
response = case.call_wsgi()
assert (response.status_code == 200)
assert (response.json == {'token': 'test'})
test() |
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True, help='config file path')
parser.add_argument('--expname', type=str, help='experiment name')
parser.add_argument('--basedir', type=str, default='./logs/', help='where to store ckpts and logs')
(parser.add_argument('--datadir', type=str, default='./data/llff/fern', help='input data directory'),)
(parser.add_argument('--num_epochs', type=int, default=200, help='train how many epoches'),)
parser.add_argument('--num_gpus', type=int, default=1, help='use how many gpus')
parser.add_argument('--white_bkgd', action='store_true', help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument('--debug_green_bkgd', action='store_true', help='set to render synthetic data on a green bkgd (need to set white bkgd true first)')
parser.add_argument('--model', type=str, choices=['NeROIC'], default='NeROIC', help='name of the model')
parser.add_argument('--model_type', type=str, choices=['geometry', 'rendering'], required=True, help='Stage(Type) of the model')
parser.add_argument('--netdepth', type=int, default=8, help='layers in network')
parser.add_argument('--netwidth', type=int, default=256, help='channels per layer')
parser.add_argument('--netdepth_fine', type=int, default=8, help='layers in fine network')
parser.add_argument('--netwidth_fine', type=int, default=256, help='channels per layer in fine network')
parser.add_argument('--use_viewdirs', action='store_true', help='use full 5D input instead of 3D')
parser.add_argument('--i_embed', type=int, default=0, help='set 0 for default positional encoding, -1 for none')
parser.add_argument('--multires', type=int, default=10, help='log2 of max freq for positional encoding (3D location)')
parser.add_argument('--multires_views', type=int, default=4, help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument('--N_samples', type=int, default=64, help='number of coarse samples per ray')
parser.add_argument('--N_importance', type=int, default=0, help='number of additional fine samples per ray')
parser.add_argument('--perturb', type=float, default=1.0, help='set to 0. for no jitter, 1. for jitter')
parser.add_argument('--raw_noise_std', type=float, default=0.0, help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument('--encode_appearance', action='store_true', help='train nerf-w with appearance encoding')
parser.add_argument('--encode_transient', action='store_true', help='train nerf-w with transient encoding')
parser.add_argument('--N_vocab', type=int, default=1000, help='number of vocabulary (number of images) \n in the dataset for nn.Embedding')
parser.add_argument('--N_a', type=int, default=48, help='number of embeddings for appearance')
parser.add_argument('--N_tau', type=int, default=16, help='number of embeddings for transient objects')
parser.add_argument('--beta_min', type=float, default=0.1, help='minimum color variance for each ray')
parser.add_argument('--optimize_camera', action='store_true', help='optimize camera at the same time')
parser.add_argument('--normal_smooth_alpha', type=float, default=1.0, help='smoothing parameter for normal extraction')
parser.add_argument('--use_expected_depth', action='store_true', help='if specified, use expected depth instead of all sample points for sh model')
parser.add_argument('--min_glossiness', type=float, default=1.0, help='minimum glossiness of BRDF')
parser.add_argument('--use_specular', action='store_true', help='use specular shading in rendering')
parser.add_argument('--transient_lerp_mode', action='store_true', help='use lerp to blend transient rgb and static rgb')
parser.add_argument('--N_rand', type=int, default=((32 * 32) * 4), help='batch size (number of random rays per gradient step)')
parser.add_argument('--lrate', type=float, default=0.0005, help='learning rate')
parser.add_argument('--scheduler', type=str, choices=['cosine', 'multistep'], default='cosine', help='type of scheduler')
parser.add_argument('--decay_epoch', type=int, nargs='+', default=10, help='epochs that needed for decaying')
parser.add_argument('--decay_gamma', type=float, default=0.1, help='gamma value of lr decaying')
parser.add_argument('--chunk', type=int, default=(1024 * 32), help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument('--netchunk', type=int, default=(1024 * 32), help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument('--ft_path', type=str, default=None, help='specific weights npy file to reload for coarse network')
parser.add_argument('--load_prior', action='store_true', help='is specified, load model from ft_path as a prior, then train from scratch')
parser.add_argument('--test_img_id', type=int, default=0, help='the id (of transient and lighting) used for testing image')
parser.add_argument('--test_split', type=str, choices=['val', 'testtrain'], default='testtrain', help='which split of poses is tested')
parser.add_argument('--test_optimize_steps', type=int, default=0, help='Steps for lighting/camera optimization during testing')
(parser.add_argument('--test_env_filename', type=str, default='', help='path of the testing environment map'),)
(parser.add_argument('--test_env_maxthres', type=float, default=20, help='maximum radiance of the env map'),)
parser.add_argument('--lambda_sil', type=float, default=0, help='weight of silhouette loss')
parser.add_argument('--lambda_tr', type=float, default=0.01, help='weight of transient regularity loss')
parser.add_argument('--lambda_cam', type=float, default=0.01, help='weight of camera loss')
parser.add_argument('--lambda_n', type=float, default=0, help='weight of normal loss')
parser.add_argument('--lambda_smooth_n', type=float, default=0.5, help='weight of normal smoothiness loss')
parser.add_argument('--lambda_spec', type=float, default=0, help='weight of specular regularity loss')
parser.add_argument('--lambda_light', type=float, default=5, help='weight of light positive regularizaion loss')
parser.add_argument('--dataset_type', type=str, choices=['llff', 'nerd_real'], default='llff', help='options: llff, nerd_real')
parser.add_argument('--test_intv', type=int, default=8, help='will take every 1/N images as test set.')
parser.add_argument('--test_offset', type=int, default=1, help='index of the first test image')
parser.add_argument('--train_limit', type=int, default=(- 1), help='the limitation of training images')
parser.add_argument('--multiple_far', type=float, default=1.2, help='multiple of far distance')
parser.add_argument('--have_mask', action='store_true', help='if the dataset contains mask')
parser.add_argument('--mask_ratio', type=float, default=0, help='ratio between foreground/background rays (fg:bg = 1:N)')
parser.add_argument('--rays_path', type=str, default='', help='cached rays file(with normal, etc.)')
parser.add_argument('--test_resolution', type=int, default=(- 1), help='resolution of the testing images. If set to -1, use the maximum resolution of training images')
parser.add_argument('--factor', type=int, default=8, help='downsample factor for LLFF images')
parser.add_argument('--width', type=int, default=0, help='downsample width for LLFF images. Dafault set to 0 (no downsampling)')
parser.add_argument('--lindisp', action='store_true', help='sampling linearly in disparity rather than depth')
parser.add_argument('--use_bbox', action='store_true', help='use bounding box of the point cloud from SfM')
parser.add_argument('--i_testset', type=int, default=(- 1), help='frequency of testset saving. -1 means test on the end of every epoch')
parser.add_argument('--i_testepoch', type=int, default=1, help='frequency of testset saving related to epoch')
parser.add_argument('--i_video', type=int, default=50000, help='frequency of test video saving')
parser.add_argument('--i_traintest', type=int, default=50000, help='frequency of testing one train poses')
parser.add_argument('--N_test_pose', type=int, default=12, help='number of test poses')
parser.add_argument('--verbose', action='store_true', help='output additional infos')
return parser |
class NullOptimizer(Optimizer):
def __init__(self):
super().__init__(None)
def construct_from_pytorch(self, model_params):
return self
def __getattr__(self, item):
def pass_func(*args, **kwargs):
pass
return pass_func |
def main(args):
file_name = f'{args.policy}_{args.domain_name}_{args.seed}'
print('')
print(f'Policy: {args.policy}, Env: {args.domain_name}, Seed: {args.seed}')
print('')
log_path = safe_path(os.path.join(args.log_root, '{}_{}_damp1'.format(args.domain_name, args.task_name)))
result_path = safe_path(os.path.join(log_path, 'results'))
model_path = safe_path(os.path.join(log_path, 'models'))
env = dmc2gym.make(domain_name=args.domain_name, task_name=args.task_name, seed=0, visualize_reward=False, from_pixels=False, height=256, width=256, frame_skip=args.frame_skip)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {'state_dim': state_dim, 'action_dim': action_dim, 'max_action': max_action, 'discount': args.discount, 'tau': args.tau}
if (args.policy == 'TD3'):
kwargs['policy_noise'] = (args.policy_noise * max_action)
kwargs['noise_clip'] = (args.noise_clip * max_action)
kwargs['policy_freq'] = args.policy_freq
policy = TD3.TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
evaluations = [eval_policy(policy, env, args.seed)]
(state, done) = (env.reset(), False)
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
if (t < args.start_timesteps):
action = env.action_space.sample()
else:
action = (policy.select_action(np.array(state)) + np.random.normal(0, (max_action * args.expl_noise), size=action_dim)).clip((- max_action), max_action)
(next_state, reward, done, _) = env.step(action)
done_bool = (float(done) if (episode_timesteps < env._max_episode_steps) else 0)
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
if (t >= args.start_timesteps):
policy.train(replay_buffer, args.batch_size)
if done:
print(f'Total T: {(t + 1)} Episode Num: {(episode_num + 1)} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}')
(state, done) = (env.reset(), False)
episode_reward = 0
episode_timesteps = 0
episode_num += 1
if (((t + 1) % args.eval_freq) == 0):
evaluations.append(eval_policy(policy, env, args.seed))
np.save(os.path.join(result_path, '{}'.format(file_name)), evaluations)
if args.save_model:
policy.save(os.path.join(model_path, '{}'.format(file_name))) |
_function
def power(f, k):
if (k == 1):
return f
b = [int(a) for a in reversed(ZZ(k).binary())]
if (sum(b) == 1):
if (b[1] == 1):
return (f ** 2)
else:
return (power(f, ((2 ** b.index(1)) / 2)) ** 2)
else:
return prod((power(f, (2 ** i)) for (i, a) in enumerate(b) if a)) |
def get_loader(config):
transform_list = []
if config.use_augmentation:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list.append(transforms.RandomRotation(0.1))
transform_list.append(transforms.Scale(config.image_size))
transform_list.append(transforms.ToTensor())
transform_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform_test = transforms.Compose([transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_train = transforms.Compose(transform_list)
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform_train, split='train')
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform_train, train=True)
svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform_test, split='test')
mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform_test, train=False)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=config.shuffle, num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=config.shuffle, num_workers=config.num_workers)
svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
return (svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader) |
def vae_loss_function(recon_x, x, mu, logvar):
MSE = torch.sum(((recon_x - x) ** 2), (- 1))
KLD = ((- 0.5) * torch.sum((((1 + logvar) - mu.pow(2)) - logvar.exp()), (- 1)))
return (MSE + KLD).mean() |
def dist_location(dist):
egg_link = egg_link_path(dist)
if egg_link:
return normalize_path(egg_link)
return normalize_path(dist.location) |
def load_index(input_path):
(index, rev_index) = ({}, {})
with open(input_path) as f:
for (i, line) in enumerate(f.readlines()):
(v, _) = line.strip().split()
index[v] = i
rev_index[i] = v
return (index, rev_index) |
class ModelExpEmbAttn(ModelTemplate):
def __init__(self, token_emb_mat, glove_emb_mat, tds, cds, tl, scope):
super(ModelExpEmbAttn, self).__init__(token_emb_mat, glove_emb_mat, tds, cds, tl, scope)
self.update_tensor_add_ema_and_opt()
def build_network(self):
_logger.add()
_logger.add(('building %s neural network structure...' % cfg.network_type))
(tds, cds) = (self.tds, self.cds)
tl = self.tl
(tel, cel, cos, ocd, fh) = (self.tel, self.cel, self.cos, self.ocd, self.fh)
hn = self.hn
(bs, sl1, sl2) = (self.bs, self.sl1, self.sl2)
with tf.variable_scope('emb'):
token_emb_mat = generate_embedding_mat(tds, tel, init_mat=self.token_emb_mat, extra_mat=self.glove_emb_mat, extra_trainable=self.finetune_emb, scope='gene_token_emb_mat')
s1_emb = tf.nn.embedding_lookup(token_emb_mat, self.sent1_token)
s2_emb = tf.nn.embedding_lookup(token_emb_mat, self.sent2_token)
self.tensor_dict['s1_emb'] = s1_emb
self.tensor_dict['s2_emb'] = s2_emb
with tf.variable_scope('sent_enc_attn'):
s1_rep = traditional_attention(s1_emb, self.sent1_token_mask, 'traditional_attention', cfg.dropout, self.is_train, cfg.wd, tensor_dict=self.tensor_dict, name='s1_attn')
tf.get_variable_scope().reuse_variables()
s2_rep = traditional_attention(s2_emb, self.sent2_token_mask, 'traditional_attention', cfg.dropout, self.is_train, cfg.wd, tensor_dict=self.tensor_dict, name='s2_attn')
self.tensor_dict['s1_rep'] = s1_rep
self.tensor_dict['s2_rep'] = s2_rep
with tf.variable_scope('output'):
out_rep = tf.concat([s1_rep, s2_rep, (s1_rep - s2_rep), (s1_rep * s2_rep)], (- 1))
pre_output = tf.nn.elu(linear([out_rep], hn, True, 0.0, scope='pre_output', squeeze=False, wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train))
logits = linear([pre_output], self.output_class, True, 0.0, scope='logits', squeeze=False, wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train)
self.tensor_dict[logits] = logits
return logits |
class MeanPool(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
return x.mean(dim=(- 2)) |
def _asarray_square(A):
A = np.asarray(A)
if ((len(A.shape) != 2) or (A.shape[0] != A.shape[1])):
raise ValueError('expected square array_like input')
return A |
class JSONWriter(EventWriter):
def __init__(self, json_file, window_size=20):
self._file_handle = PathManager.open(json_file, 'a')
self._window_size = window_size
self._last_write = (- 1)
def write(self):
storage = get_event_storage()
to_save = defaultdict(dict)
for (k, (v, iter)) in storage.latest_with_smoothing_hint(self._window_size).items():
if (iter <= self._last_write):
continue
to_save[iter][k] = v
if len(to_save):
all_iters = sorted(to_save.keys())
self._last_write = max(all_iters)
for (itr, scalars_per_iter) in to_save.items():
scalars_per_iter['iteration'] = itr
self._file_handle.write((json.dumps(scalars_per_iter, sort_keys=True) + '\n'))
self._file_handle.flush()
try:
os.fsync(self._file_handle.fileno())
except AttributeError:
pass
def close(self):
self._file_handle.close() |
def generator_midinet(image, options, reuse=False, name='generator'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert (tf.get_variable_scope().reuse is False)
h0 = tf.nn.relu(batch_norm(linear(image, (options.df_dim * 16), 'g_h0_lin'), name='g_h0_lin_bn'))
h1 = tf.nn.relu(batch_norm(linear(h0, (options.df_dim * 8), 'g_h1_lin'), name='g_h1_lin_bn'))
h1 = tf.reshape(h1, [options.batch_size, 2, 1, (options.gf_dim * 4)])
h5 = tf.nn.relu(batch_norm(deconv2d(h1, (options.df_dim * 2), [4, 1], [4, 1], name='g_h5_conv'), name='g_h5_conv_bn'))
h6 = tf.nn.relu(batch_norm(deconv2d(h5, (options.df_dim * 2), [4, 1], [4, 1], name='g_h6_conv'), name='g_h6_conv_bn'))
h7 = tf.nn.relu(batch_norm(deconv2d(h6, (options.df_dim * 2), [4, 1], [4, 1], name='g_h7_conv'), name='g_h7_conv_bn'))
h8 = tf.nn.tanh(batch_norm(deconv2d(h7, options.output_c_dim, [1, 64], [1, 64], name='g_h8_conv'), name='g_h8_conv_bn'))
return h8 |
def screen_diversity(content_values, bins):
(h, w) = np.histogram(content_values, range=((- 1), 1), bins=bins)
return stats.entropy((h + 1), base=2) |
class InferConfig():
config = attr.ib()
config_args = attr.ib()
logdir = attr.ib()
section = attr.ib()
beam_size = attr.ib()
output = attr.ib()
step = attr.ib()
use_heuristic = attr.ib(default=False)
mode = attr.ib(default='infer')
limit = attr.ib(default=None)
output_history = attr.ib(default=False) |
def is_supported(method):
if hasattr(method, 'is_supported'):
return method.is_supported
return True |
class Partition7(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:7'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'encoder.block.21.layer.0.layer_norm', 'l_1': 'encoder.block.21.layer.0.SelfAttention.q', 'l_2': 'encoder.block.21.layer.0.SelfAttention.k', 'l_3': 'encoder.block.21.layer.0.SelfAttention.v', 'l_4': 'encoder.block.21.layer.0.SelfAttention.o', 'l_5': 'encoder.block.21.layer.0.dropout', 'l_6': 'encoder.block.21.layer.1.layer_norm', 'l_7': 'encoder.block.21.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.21.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.21.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.21.layer.1.dropout', 'l_11': 'encoder.block.22.layer.0.layer_norm', 'l_12': 'encoder.block.22.layer.0.SelfAttention.q', 'l_13': 'encoder.block.22.layer.0.SelfAttention.k', 'l_14': 'encoder.block.22.layer.0.SelfAttention.v', 'l_15': 'encoder.block.22.layer.0.SelfAttention.o', 'l_16': 'encoder.block.22.layer.0.dropout', 'l_17': 'encoder.block.22.layer.1.layer_norm', 'l_18': 'encoder.block.22.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.22.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.22.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.22.layer.1.dropout', 'l_22': 'encoder.block.23.layer.0.layer_norm', 'l_23': 'encoder.block.23.layer.0.SelfAttention.q', 'l_24': 'encoder.block.23.layer.0.SelfAttention.k', 'l_25': 'encoder.block.23.layer.0.SelfAttention.v', 'l_26': 'encoder.block.23.layer.0.SelfAttention.o', 'l_27': 'encoder.block.23.layer.0.dropout', 'l_28': 'encoder.block.23.layer.1.layer_norm', 'l_29': 'encoder.block.23.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.23.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.23.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.23.layer.1.dropout', 'l_33': 'encoder.final_layer_norm'}
self.to(self.device)
def forward(self, *args):
(x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(t_0)
t_2 = self.l_2(t_0)
t_3 = self.l_3(t_0)
t_0 = t_0.shape
t_0 = t_0[slice(None, 2, None)]
t_0 = t_0[0]
t_1 = t_1.view(t_0, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_0, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_0, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_1, t_2)
t_2 += x1
t_1 = t_2.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_1.type_as(t_2)
t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_0 = t_3.view(t_0, (- 1), 4096)
t_0 = self.l_4(t_0)
t_3 = self.l_5(t_0)
t_3 = (x0 + t_3)
t_0 = (t_0, None, x1)
t_2 = t_0[0]
t_3 = (t_3,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_1 = t_3[0]
t_4 = self.l_6(t_1)
t_3 = t_3[1]
t_0 = t_0[slice(2, None, None)]
t_4 = self.l_7(t_4)
t_4 = torch.nn.functional.relu(t_4, inplace=False)
t_4 = self.l_8(t_4)
t_4 = self.l_9(t_4)
t_4 = self.l_10(t_4)
t_4 = (t_1 + t_4)
t_3 = (t_4, t_3)
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_3 = t_3[0]
t_4 = self.l_11(t_3)
t_0 = t_0[2]
t_1 = self.l_12(t_4)
t_5 = self.l_13(t_4)
t_6 = self.l_14(t_4)
t_4 = t_4.shape
t_4 = t_4[slice(None, 2, None)]
t_4 = t_4[0]
t_1 = t_1.view(t_4, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_5 = t_5.view(t_4, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_6 = t_6.view(t_4, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_5 = t_5.transpose(3, 2)
t_5 = torch.matmul(t_1, t_5)
t_5 += t_0
t_1 = t_5.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_5 = t_1.type_as(t_5)
t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False)
t_6 = torch.matmul(t_5, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_4 = t_6.view(t_4, (- 1), 4096)
t_4 = self.l_15(t_4)
t_6 = self.l_16(t_4)
t_6 = (t_3 + t_6)
t_0 = (t_4, None, t_0)
t_4 = t_0[0]
t_6 = (t_6,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_3 = t_6[0]
t_5 = self.l_17(t_3)
t_6 = t_6[1]
t_0 = t_0[slice(2, None, None)]
t_5 = self.l_18(t_5)
t_5 = torch.nn.functional.relu(t_5, inplace=False)
t_5 = self.l_19(t_5)
t_5 = self.l_20(t_5)
t_5 = self.l_21(t_5)
t_5 = (t_3 + t_5)
t_6 = (t_5, t_6)
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_6 = t_6[0]
t_5 = self.l_22(t_6)
t_0 = t_0[2]
t_3 = self.l_23(t_5)
t_1 = self.l_24(t_5)
t_7 = self.l_25(t_5)
t_5 = t_5.shape
t_5 = t_5[slice(None, 2, None)]
t_5 = t_5[0]
t_3 = t_3.view(t_5, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_1 = t_1.view(t_5, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_7 = t_7.view(t_5, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_1 = t_1.transpose(3, 2)
t_1 = torch.matmul(t_3, t_1)
t_1 += t_0
t_3 = t_1.float()
t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_3.type_as(t_1)
t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False)
t_7 = torch.matmul(t_1, t_7)
t_7 = t_7.transpose(1, 2)
t_7 = t_7.contiguous()
t_5 = t_7.view(t_5, (- 1), 4096)
t_5 = self.l_26(t_5)
t_7 = self.l_27(t_5)
t_7 = (t_6 + t_7)
t_0 = (t_5, None, t_0)
t_5 = t_0[0]
t_7 = (t_7,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_6 = t_7[0]
t_1 = self.l_28(t_6)
t_7 = t_7[1]
t_0 = t_0[slice(2, None, None)]
t_1 = self.l_29(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_30(t_1)
t_1 = self.l_31(t_1)
t_1 = self.l_32(t_1)
t_1 = (t_6 + t_1)
t_7 = (t_1, t_7)
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_7 = t_7[0]
t_7 = self.l_33(t_7)
t_0 = t_0[2]
return (t_7,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class TestModelFromPaper1Config():
(autouse=True)
def setup(self, example_configuration_dir, atomic_dataset):
self.config = Configuration.from_yaml((example_configuration_dir / 'paper1_tardis_configv1.yml'))
self.simulation_state = SimulationState.from_config(self.config, atom_data=atomic_dataset)
def test_abundances(self):
oxygen_abundance = self.config.model.abundances.O
assert_array_almost_equal(oxygen_abundance, self.simulation_state.abundance.loc[8].values)
def test_velocities(self):
velocity = self.config.model.structure.velocity
assert_almost_equal(velocity.start.cgs.value, self.simulation_state.v_inner[0].cgs.value)
assert_almost_equal(velocity.stop.cgs.value, self.simulation_state.v_outer[(- 1)].cgs.value)
assert (len(self.simulation_state.v_outer) == velocity.num)
def test_densities(self):
assert_almost_equal(self.simulation_state.density[0].cgs.value, (7.e-14 * u.Unit('g/cm^3')).value)
assert_almost_equal(self.simulation_state.density[(- 1)].cgs.value, (1.e-15 * u.Unit('g/cm^3')).value)
def test_time_explosion(self):
assert_almost_equal(self.simulation_state.time_explosion.to(u.day).value, 13.0) |
class NonNegativeIntegers(UniqueRepresentation, Parent):
def __init__(self):
Parent.__init__(self, category=SetsWithGrading().Infinite(), facade=IntegerRing())
def an_element(self):
return 0
def _repr_(self):
return 'Non negative integers'
def graded_component(self, grade):
return FiniteEnumeratedSet([grade])
def grading(self, elt):
return elt
def generating_series(self, var='z'):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
R = PolynomialRing(IntegerRing(), var)
z = R.gen()
return (Integer(1) / (Integer(1) - z)) |
def _optimal_transportation_distance(x, y, d):
t0 = time.time()
m = ot.emd2(x, y, d)
logger.debug(('%8f secs for Wasserstein dist. \t#source_nbr: %d, #target_nbr: %d' % ((time.time() - t0), len(x), len(y))))
return m |
_params({'data_home': [str, PathLike, None], 'shuffle': ['boolean'], 'random_state': ['random_state'], 'download_if_missing': ['boolean'], 'return_X_y': ['boolean']}, prefer_skip_nested_validation=True)
def fetch_olivetti_faces(*, data_home=None, shuffle=False, random_state=0, download_if_missing=True, return_X_y=False):
data_home = get_data_home(data_home=data_home)
if (not exists(data_home)):
makedirs(data_home)
filepath = _pkl_filepath(data_home, 'olivetti.pkz')
if (not exists(filepath)):
if (not download_if_missing):
raise OSError('Data not found and `download_if_missing` is False')
print(('downloading Olivetti faces from %s to %s' % (FACES.url, data_home)))
mat_path = _fetch_remote(FACES, dirname=data_home)
mfile = loadmat(file_name=mat_path)
remove(mat_path)
faces = mfile['faces'].T.copy()
joblib.dump(faces, filepath, compress=6)
del mfile
else:
faces = joblib.load(filepath)
faces = np.float32(faces)
faces = (faces - faces.min())
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
target = np.array([(i // 10) for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
faces_vectorized = faces.reshape(len(faces), (- 1))
fdescr = load_descr('olivetti_faces.rst')
if return_X_y:
return (faces_vectorized, target)
return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr) |
def GetHitsMP_PNGraph(Graph, NIdHubH, NIdAuthH, MaxIter=20):
return _snap.GetHitsMP_PNGraph(Graph, NIdHubH, NIdAuthH, MaxIter) |
class GradientDescent(GradientOptimizer):
def __init__(self, objective: OptimizationFunction, parametrization: Parametrization, learning_rate: float, normalize_gradient: bool=False):
super().__init__()
self.alpha = learning_rate
self.objective = objective
self.param = parametrization
self.normalize_gradient = normalize_gradient
if self.normalize_gradient:
logger.warning('Normalizing gradient: Convergence not guaranteed.')
def iterate(self):
logger.debug('Iterating...')
gradient = self.objective.calculate_gradient(self.param)
gradient_norm = np.linalg.norm(gradient)
if self.normalize_gradient:
gradient /= gradient_norm
self.param.decode((self.param.encode() - (self.alpha * gradient)))
self.param.project()
logger.debug(('Performed gradient descent step with step size {0} ' + 'and gradient norm: {1}').format(self.alpha, gradient_norm)) |
def count_degree(fname: str):
node_counts = {}
line_count = 0
with open(fname, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if (line_count == 0):
line_count += 1
else:
ts = int(row[0])
src = row[1]
dst = row[2]
w = row[3]
if (src not in node_counts):
node_counts[src] = 1
else:
node_counts[src] += 1
if (dst not in node_counts):
node_counts[dst] = 1
else:
node_counts[dst] += 1
line_count += 1
return node_counts |
_properties
class ExpandTransformation(PatternTransformation):
def expressions(clc):
return [sdutil.node_path_graph(clc._match_node)]
def can_be_applied(self, graph: gr.OrderedMultiDiConnectorGraph, expr_index: int, sdfg, permissive: bool=False):
return True
def match_to_str(self, graph: gr.OrderedMultiDiConnectorGraph):
return str(self._match_node)
def expansion(node: nd.LibraryNode, parent_state: SDFGState, parent_sdfg: SDFG, *args, **kwargs):
raise NotImplementedError('Must be implemented by subclass')
def postprocessing(sdfg, state, expansion):
pass
def apply(self, state, sdfg, *args, **kwargs):
node = state.node(self.subgraph[type(self)._match_node])
expansion = type(self).expansion(node, state, sdfg, *args, **kwargs)
if isinstance(expansion, SDFG):
expansion = state.add_nested_sdfg(expansion, sdfg, node.in_connectors, node.out_connectors, name=node.name, schedule=node.schedule, debuginfo=node.debuginfo)
elif isinstance(expansion, nd.CodeNode):
expansion.debuginfo = node.debuginfo
if isinstance(expansion, nd.NestedSDFG):
nsdfg = expansion.sdfg
nsdfg.parent = state
nsdfg.parent_sdfg = sdfg
nsdfg.update_sdfg_list([])
nsdfg.parent_nsdfg_node = expansion
nsdfg.schedule = node.schedule
elif isinstance(expansion, (nd.EntryNode, nd.LibraryNode)):
if (expansion.schedule is ScheduleType.Default):
expansion.schedule = node.schedule
else:
raise TypeError('Node expansion must be a CodeNode or an SDFG')
expansion.environments = copy.copy(set(map((lambda a: a.full_class_path()), type(self).environments)))
sdutil.change_edge_dest(state, node, expansion)
sdutil.change_edge_src(state, node, expansion)
state.remove_node(node)
if isinstance(expansion, nd.NestedSDFG):
infer_types.set_default_schedule_and_storage_types(expansion.sdfg, [expansion.schedule], True)
type(self).postprocessing(sdfg, state, expansion)
def to_json(self, parent=None) -> Dict[(str, Any)]:
props = serialize.all_properties_to_json(self)
return {'type': 'ExpandTransformation', 'transformation': type(self).__name__, 'classpath': nd.full_class_path(self), **props}
def from_json(json_obj: Dict[(str, Any)], context: Dict[(str, Any)]=None) -> 'ExpandTransformation':
xform = pydoc.locate(json_obj['classpath'])
expr = xform.expressions()[json_obj.get('expr_index', 0)]
subgraph = {expr.node(int(k)): int(v) for (k, v) in json_obj.get('_subgraph', {}).items()}
ret = xform()
ret.setup_match(None, json_obj.get('sdfg_id', 0), json_obj.get('state_id', 0), subgraph, json_obj.get('expr_index', 0))
context = (context or {})
context['transformation'] = ret
serialize.set_properties_from_json(ret, json_obj, context=context, ignore_properties={'transformation', 'type', 'classpath'})
return ret |
def action_log_probs(policy_logits, actions):
return (- F.nll_loss(F.log_softmax(torch.flatten(policy_logits, 0, 1), dim=(- 1)), torch.flatten(actions, 0, 1), reduction='none').view_as(actions)) |
def test_deep_string_string():
(left, right) = ak.broadcast_arrays([['x', 'yz'], ['hello', 'world', 'foo', 'bar']], ['x', 'y'])
assert (right.to_list() == [['x', 'x'], ['y', 'y', 'y', 'y']]) |
def extract_sentences(dataset_files):
sentences = []
for (text_file, token_file, sentence_file) in dataset_files:
print(('Extracting sentences from %s and tokens from %s from the text file %s' % (sentence_file, token_file, text_file)))
sentences.extend(process_raw_file(text_file, token_file, sentence_file))
return sentences |
((not have_sympy), 'SymPy not installed')
def test_beta():
x = Symbol('x')
y = Symbol('y')
e1 = sympy.beta(sympy.Symbol('y'), sympy.Symbol('x'))
e2 = beta(y, x)
assert (sympify(e1) == e2)
assert (e2._sympy_() == e1) |
class Metropolis():
def __init__(self, T, random_gen=None):
self.beta = ((1.0 / T) if (T != 0) else float('inf'))
self.random_gen = check_random_state(random_gen)
def accept_reject(self, res_new, res_old):
with np.errstate(invalid='ignore'):
prod = ((- (res_new.fun - res_old.fun)) * self.beta)
w = math.exp(min(0, prod))
rand = self.random_gen.uniform()
return ((w >= rand) and (res_new.success or (not res_old.success)))
def __call__(self, *, res_new, res_old):
return bool(self.accept_reject(res_new, res_old)) |
def dimension_eis(X, k=2):
if is_ArithmeticSubgroup(X):
return X.dimension_eis(k)
elif isinstance(X, dirichlet.DirichletCharacter):
return Gamma1(X.modulus()).dimension_eis(k, X)
elif isinstance(X, (int, Integer)):
return Gamma0(X).dimension_eis(k)
raise TypeError(f'argument in dimension_eis must be an integer, a Dirichlet character, or a finite index subgroup of SL2Z (got {X})') |
def definite_meek(cg, background_knowledge=None):
cg_new = deepcopy(cg)
Tri = cg_new.find_triangles()
Kite = cg_new.find_kites()
Loop = True
while Loop:
Loop = False
for (i, j, k) in cg_new.definite_non_UC:
if (cg_new.is_fully_directed(i, j) and cg_new.is_undirected(j, k) and (not ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[j], cg_new.G.nodes[k]) or background_knowledge.is_required(cg_new.G.nodes[k], cg_new.G.nodes[j]))))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[j], cg_new.G.nodes[k])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[j], cg_new.G.nodes[k], Endpoint.TAIL, Endpoint.ARROW))
Loop = True
elif (cg_new.is_fully_directed(k, j) and cg_new.is_undirected(j, i) and (not ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[j], cg_new.G.nodes[i]) or background_knowledge.is_required(cg_new.G.nodes[i], cg_new.G.nodes[j]))))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[j], cg_new.G.nodes[i])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[j], cg_new.G.nodes[i], Endpoint.TAIL, Endpoint.ARROW))
Loop = True
for (i, j, k) in Tri:
if (cg_new.is_fully_directed(i, j) and cg_new.is_fully_directed(j, k) and cg_new.is_undirected(i, k)):
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[i], cg_new.G.nodes[k]) or background_knowledge.is_required(cg_new.G.nodes[k], cg_new.G.nodes[i]))):
pass
else:
edge1 = cg_new.G.get_edge(cg_new.G.nodes[i], cg_new.G.nodes[k])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[i], cg_new.G.nodes[k], Endpoint.TAIL, Endpoint.ARROW))
Loop = True
for (i, j, k, l) in Kite:
if ((((j, l, k) in cg_new.definite_UC) or ((k, l, j) in cg_new.definite_UC)) and (((j, i, k) in cg_new.definite_non_UC) or ((k, i, j) in cg_new.definite_non_UC)) and cg_new.is_undirected(i, l)):
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[i], cg_new.G.nodes[l]) or background_knowledge.is_required(cg_new.G.nodes[l], cg_new.G.nodes[i]))):
pass
else:
edge1 = cg_new.G.get_edge(cg_new.G.nodes[i], cg_new.G.nodes[l])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[i], cg_new.G.nodes[l], Endpoint.TAIL, Endpoint.ARROW))
Loop = True
return cg_new |
def list_secular_terms(min_order, max_order, eccentricities=True, inclinations=True):
args_dict = df_arguments_dictionary(max_order)
args = []
Nmax1 = ((max_order // 2) * 2)
Nmin1 = ((min_order // 2) * 2)
for N in range(0, (Nmax1 + 1), 2):
argsN = args_dict[N][0]
nutot_min = max(((Nmin1 - N) // 2), 0)
nutot_max = ((Nmax1 - N) // 2)
for nutot in range(nutot_min, (nutot_max + 1)):
for nu_vec in _nucombos(nutot):
for arg in argsN:
k_vec = (0, 0, *arg)
if ((inclinations == False) and (k_nu_depend_on_inclinations(k_vec, nu_vec) == True)):
continue
if ((eccentricities == False) and (k_nu_depend_on_eccentricities(k_vec, nu_vec) == True)):
continue
args.append((k_vec, nu_vec))
return args |
def version():
srcdir = os.path.join(cwd, 'spectralDNS')
with open(os.path.join(srcdir, '__init__.py')) as f:
m = re.search("__version__\\s*=\\s*'(.*)'", f.read())
return m.groups()[0] |
class PAN(FPN):
def __init__(self, in_channels, out_channels, add_extra_levels=False, extra_levels=2):
super().__init__(in_channels, out_channels, add_extra_levels, extra_levels)
self.init_weights()
def forward(self, x):
assert (len(x) == len(self.in_channels))
laterals = [lateral_conv(x[i]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, mode='bilinear')
inter_outs = [laterals[i] for i in range(used_backbone_levels)]
for i in range(0, (used_backbone_levels - 1)):
prev_shape = inter_outs[(i + 1)].shape[2:]
inter_outs[(i + 1)] += F.interpolate(inter_outs[i], size=prev_shape, mode='bilinear')
outs = []
outs.append(inter_outs[0])
outs.extend([inter_outs[i] for i in range(1, used_backbone_levels)])
return outs |
class InteractionNoise(AbstractNoise):
def transmit(self, actions):
return self.add_self_correct(actions)
def transmit_words(self, utt):
utt = self.add_hesitation(utt)
return self.add_self_restart(utt)
def add_hesitation(self, utt):
tokens = utt.split(' ')
if ((len(tokens) > 4) and (np.random.rand() < self.complexity.hesitation)):
pos = np.random.randint(1, (len(tokens) - 1))
tokens.insert(pos, np.random.choice(['hmm', 'uhm', 'hmm ...']))
return ' '.join(tokens)
return utt
def add_self_restart(self, utt):
tokens = utt.split(' ')
if ((len(tokens) > 4) and (np.random.rand() < self.complexity.self_restart)):
length = np.random.randint(1, 3)
tokens = ((tokens[0:length] + ['uhm yeah']) + tokens)
return ' '.join(tokens)
return utt
def add_self_correct(self, actions):
for a in actions:
if ((a.act == UserAct.INFORM) and (np.random.rand() < self.complexity.self_correct)):
a.parameters.append((BaseUsrSlot.SELF_CORRECT, True))
return actions |
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = 'Hello, World'
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, 'test.txt'), 'w') as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
def tearDownClass(cls) -> None:
if (cls._tmpdir is not None):
shutil.rmtree(cls._tmpdir)
def test_file_io(self):
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
sys.modules['fvcore'] = MagicMock()
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents) |
def cosine_similarity(a, b, eps=1e-08):
if (np.all((b == 0)) and np.all((a == 0))):
return 1.0
a_flat = a.flatten()
b_flat = b.flatten()
a_norm = tensor_norm(a)
b_norm = tensor_norm(b)
return (np.sum((a_flat * b_flat)) / ((a_norm * b_norm) + eps)) |
def test_dice_loss():
with pytest.raises(AssertionError):
DiceLoss(eps='1')
dice_loss = DiceLoss()
pred = torch.rand(1, 1, 32, 32)
gt = torch.rand(1, 1, 32, 32)
loss = dice_loss(pred, gt, None)
assert isinstance(loss, torch.Tensor)
mask = torch.rand(1, 1, 1, 1)
loss = dice_loss(pred, gt, mask)
assert isinstance(loss, torch.Tensor) |
class RandomLowLight(object):
def __init__(self, low_light_net, exp_ranges=[0.05, 0.3]):
self.threshold = 0.97
self.exp_range = exp_ranges
self.low_light_net = low_light_net
def __call__(self, img):
exp_degree = random.uniform(*self.exp_range)
(h, w, _) = img.shape
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
(l_channel, a_channel, b_channel) = cv2.split(img_lab)
l_channel_t = torch.from_numpy(l_channel).view(1, 1, h, w).cuda()
l_channel_f = (l_channel_t / 100.0)
exp_map = (exp_degree * torch.ones_like(l_channel_f))
stuated_map = (l_channel_f > self.threshold).int()
exp_map = ((exp_map * (1 - stuated_map)) + (l_channel_f * stuated_map))
low_light_l = (self.low_light_net(l_channel_f, exp_map) * 100).squeeze().cpu().detach().numpy()
scale = (low_light_l / (l_channel + 1e-08))
scale = np.dstack(([scale] * 3))
low_light_img = (img * scale)
return low_light_img |
.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError(('This mechanism intends to test correct cachehandling. As such, urlopen should never be accessed. URL: %s' % request.get_full_url()))
data_id = 61
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
(X_fetched, y_fetched) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen_raise)
(X_cached, y_cached) = fetch_openml(data_id=data_id, cache=True, data_home=cache_directory, return_X_y=True, as_frame=False, parser='liac-arff')
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached) |
def main():
global MODELS
prompt = SS3Prompt()
prompt.prompt = '(pyss3) >>> '
prompt.doc_header = 'Documented commands (type help <command>):'
Print.set_verbosity(VERBOSITY.VERBOSE)
Print.info(('PySS3 Command Line v%s | Sergio Burdisso (sergio.).\nPySS3 comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to redistribute it under certain conditions\n(Type "license" for more details).\nType "help" or "help <command>" for more information.\n' % __version__), decorator=False)
try:
MODELS = [path.splitext(model_file)[0] for model_file in listdir(SS3.__models_folder__) if (path.splitext(model_file)[1][1:] == STR_MODEL_EXT)]
ARGS['load'] = MODELS
except OSError:
Print.warn("No local 'ss3_models' folder was found.")
Print.warn('Suggestion: either create a new model and train it or open this\n* command line from a different folder (with models).\n')
try:
prompt.cmdloop()
except KeyboardInterrupt:
print('\nKeyboardInterrupt')
prompt.do_exit() |
def pformat(obj: Any) -> str:
import io
s = io.StringIO()
pprint(obj, file=s)
return s.getvalue() |
class PpmImageFile(ImageFile.ImageFile):
format = 'PPM'
format_description = 'Pbmplus image'
def _token(self, s=b''):
while True:
c = self.fp.read(1)
if ((not c) or (c in b_whitespace)):
break
if (c > b'y'):
raise ValueError('Expected ASCII value, found binary')
s = (s + c)
if (len(s) > 9):
raise ValueError('Expected int, got > 9 digits')
return s
def _open(self):
s = self.fp.read(1)
if (s != b'P'):
raise SyntaxError('not a PPM file')
magic_number = self._token(s)
mode = MODES[magic_number]
self.custom_mimetype = {b'P4': 'image/x-portable-bitmap', b'P5': 'image/x-portable-graymap', b'P6': 'image/x-portable-pixmap'}.get(magic_number)
if (mode == '1'):
self.mode = '1'
rawmode = '1;I'
else:
self.mode = rawmode = mode
for ix in range(3):
while True:
while True:
s = self.fp.read(1)
if (s not in b_whitespace):
break
if (s == b''):
raise ValueError('File does not extend beyond magic number')
if (s != b'#'):
break
s = self.fp.readline()
s = int(self._token(s))
if (ix == 0):
xsize = s
elif (ix == 1):
ysize = s
if (mode == '1'):
break
elif (ix == 2):
if (s > 255):
if (not (mode == 'L')):
raise ValueError(('Too many colors for band: %s' % s))
if (s < (2 ** 16)):
self.mode = 'I'
rawmode = 'I;16B'
else:
self.mode = 'I'
rawmode = 'I;32B'
self._size = (xsize, ysize)
self.tile = [('raw', (0, 0, xsize, ysize), self.fp.tell(), (rawmode, 0, 1))] |
class Distribution_parse_config_files():
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
if (sys.prefix != sys.base_prefix):
ignore_options = ['install-base', 'install-platbase', 'install-lib', 'install-platlib', 'install-purelib', 'install-headers', 'install-scripts', 'install-data', 'prefix', 'exec-prefix', 'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if (filenames is None):
filenames = self.find_config_files()
if DEBUG:
self.announce('Distribution.parse_config_files():')
parser = ConfigParser(interpolation=None)
for filename in filenames:
if DEBUG:
self.announce((' reading %s' % filename))
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if ((opt != '__name__') and (opt not in ignore_options)):
val = parser.get(section, opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
parser.__init__()
if ('global' in self.command_options):
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, (not strtobool(val)))
elif (opt in ('verbose', 'dry_run')):
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg) |
.parametrize('device', ['cpu', 'cuda'])
def test_differentiable(device, fl=5, fp=3, B=2, N=4):
unframe = diffsptk.Unframe(fl, fp)
U.check_differentiable(device, unframe, [B, N, fl]) |
def get_single_monitor_data(log_df: pd.DataFrame, monitor_names: Union[(str, List[str])], transformation_name: Optional[str]=None, iteration: Optional[int]=None, event_name: Optional[str]=None) -> Union[(float, List)]:
if (transformation_name is None):
all_transformation_names = log_df[LOG_TRANSFORMATION_KEY].unique().tolist()
else:
all_transformation_names = [transformation_name]
if (event_name is None):
all_event_names = log_df[LOG_TRANSFORMATION_INFO_KEY].unique().tolist()
else:
all_event_names = [event_name]
for name_transformation in reversed(all_transformation_names):
for name_event in reversed(all_event_names):
data_all = get_monitor_data(log_df, monitor_names, name_event)
if ((not data_all) or (name_transformation not in data_all.keys())):
continue
transformation_key = name_transformation
if (iteration in data_all[transformation_key].iteration):
iteration_index = data_all[transformation_key].iteration.index(iteration)
else:
iteration_index = (- 1)
return data_all[transformation_key].data[iteration_index]
return [] |
class Batch3dceCollator(object):
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
self.num_slice = cfg.INPUT.NUM_SLICES
self.num_image = cfg.INPUT.NUM_IMAGES_3DCE
def __call__(self, batch):
images = ()
targets = []
infos = []
for (im, target, info) in batch:
images += im.split(int((im.shape[0] / self.num_image)))
targets += [target]
infos += [info]
images = to_image_list(images, self.size_divisible)
return (images, tuple(targets), tuple(infos)) |
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type='softmax'):
super(BaselineTrain, self).__init__()
self.feature = model_func()
if (loss_type == 'softmax'):
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif (loss_type == 'dist'):
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
self.DBval = False
def forward(self, x):
x = Variable(x.cuda())
out = self.feature.forward(x)
scores = self.classifier.forward(out)
return scores
def forward_loss(self, x, y):
scores = self.forward(x)
y = Variable(y.cuda())
return self.loss_fn(scores, y)
def train_loop(self, epoch, train_loader, optimizer, tf_writer):
print_freq = 10
avg_loss = 0
for (i, (x, y)) in enumerate(train_loader):
optimizer.zero_grad()
loss = self.forward_loss(x, y)
loss.backward()
optimizer.step()
avg_loss = (avg_loss + loss.item())
if ((i % print_freq) == 0):
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), (avg_loss / float((i + 1)))))
tf_writer.add_scalar('loss/train', (avg_loss / float((i + 1))), epoch)
def test_loop(self, val_loader):
if self.DBval:
return self.analysis_loop(val_loader)
else:
return (- 1)
def analysis_loop(self, val_loader, record=None):
class_file = {}
for (i, (x, y)) in enumerate(val_loader):
x = x.cuda()
x_var = Variable(x)
feats = self.feature.forward(x_var).data.cpu().numpy()
labels = y.cpu().numpy()
for (f, l) in zip(feats, labels):
if (l not in class_file.keys()):
class_file[l] = []
class_file[l].append(f)
for cl in class_file:
class_file[cl] = np.array(class_file[cl])
DB = DBindex(class_file)
print(('DB index = %4.2f' % DB))
return (1 / DB) |
_SEG_HEADS_REGISTRY.register()
class SemSegFPNHead(nn.Module):
def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, num_classes: int, conv_dims: int, common_stride: int, loss_weight: float=1.0, norm: Optional[Union[(str, Callable)]]=None, ignore_value: int=(- 1)):
super().__init__()
input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride))
self.in_features = [k for (k, v) in input_shape]
feature_strides = [v.stride for (k, v) in input_shape]
feature_channels = [v.channels for (k, v) in input_shape]
self.ignore_value = ignore_value
self.common_stride = common_stride
self.loss_weight = loss_weight
self.scale_heads = []
for (in_feature, stride, channels) in zip(self.in_features, feature_strides, feature_channels):
head_ops = []
head_length = max(1, int((np.log2(stride) - np.log2(self.common_stride))))
for k in range(head_length):
norm_module = get_norm(norm, conv_dims)
conv = Conv2d((channels if (k == 0) else conv_dims), conv_dims, kernel_size=3, stride=1, padding=1, bias=(not norm), norm=norm_module, activation=F.relu)
weight_init.c2_msra_fill(conv)
head_ops.append(conv)
if (stride != self.common_stride):
head_ops.append(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[(- 1)])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
weight_init.c2_msra_fill(self.predictor)
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
return {'input_shape': {k: v for (k, v) in input_shape.items() if (k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES)}, 'ignore_value': cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, 'num_classes': cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, 'conv_dims': cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, 'common_stride': cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, 'norm': cfg.MODEL.SEM_SEG_HEAD.NORM, 'loss_weight': cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT}
def forward(self, features, targets=None):
x = self.layers(features)
if self.training:
return (None, self.losses(x, targets))
else:
x = F.interpolate(x, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
return (x, {})
def layers(self, features):
for (i, f) in enumerate(self.in_features):
if (i == 0):
x = self.scale_heads[i](features[f])
else:
x = (x + self.scale_heads[i](features[f]))
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = predictions.float()
predictions = F.interpolate(predictions, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
loss = F.cross_entropy(predictions, targets, reduction='mean', ignore_index=self.ignore_value)
losses = {'loss_sem_seg': (loss * self.loss_weight)}
return losses |
def get_requires_for_build_wheel(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools', 'wheel']) |
def get_random_affine():
(dx, dy) = np.random.randint((- 1.7), 1.8, 2)
M = np.float32([[1, 0, dx], [0, 1, dy]])
return M |
def unwrap_checkpoint(m: torch.nn.Module):
for module in m.modules():
if hasattr(module, 'precheckpoint_forward'):
module.forward = module.precheckpoint_forward
del module.precheckpoint_forward
return m |
.parametrize(('r_plot', 'end'), [[[1, 2, 2.1, 2.2, 4, 8, 8, np.inf], 6], [[1, 2, 2.1, 2.2, 2.3, 4, 8, 8, np.inf], 0], [[1, 2, 2.1, 2, np.inf], 0], [[1, 2, 2.1, np.inf], 2]])
def test_extend_upward(r_plot, end):
r_plot = np.array(r_plot)
ratio = (r_plot[:(- 1)] / r_plot[1:])
steep_upward = (ratio <= 0.9)
downward = (ratio > 1)
e = _extend_region(steep_upward, downward, 0, 2)
assert (e == end) |
def read_planar(planar_path, fmt=((1080, 1920), (1080, 1920), (1080, 1920))):
planar_file = np.fromfile(planar_path, dtype=np.uint8)
img = []
accum = 0
for res in fmt:
(h, w) = res
cha = planar_file[accum:(accum + (h * w))].reshape(h, w)
img.append(cha)
accum += (h * w)
return img |
def all_reduce_max(tensor_list):
if (get_world_size() == 1):
return
for tensor in tensor_list:
dist.all_reduce(tensor, op=dist.reduce_op.MAX) |
def get_attentiveFP_idx(df, file='./split_and_data/05_BACE_attentiveFP.data'):
(train, valid, test) = load(file)
print(('training set: %s, valid set: %s, test set %s' % (len(train), len(valid), len(test))))
train_idx = df[df.smiles.isin(train.mol)].index
valid_idx = df[df.smiles.isin(valid.mol)].index
test_idx = df[df.smiles.isin(test.mol)].index
print(('training set: %s, valid set: %s, test set %s' % (len(train_idx), len(valid_idx), len(test_idx))))
return (train_idx, valid_idx, test_idx) |
class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer):
def __init__(self, **kwargs):
super(DensePoseDataPointsUVisualizer, self).__init__(densepose_data_to_value_fn=_densepose_data_u_for_cmap, **kwargs) |
def generate_partition_state_methods() -> str:
state_dict = generate_state_dict_method()
load_state_dict = generate_load_state_dict_method()
named_parameters = generate_named_parameters_method()
named_buffers = generate_named_buffers_method()
(cpu, cuda, to) = generate_cpu_cuda_to_methods()
return ('\n'.join([state_dict, load_state_dict, named_parameters, named_buffers, cpu, cuda, to]) + '\n\n') |
def df_to_fc(df: pd.DataFrame, lat_colname: str='lat', lon_colname: str='lon') -> ee.FeatureCollection:
df = df.astype('object')
ee_features = []
for i in range(len(df)):
props = df.iloc[i].to_dict()
_geometry = ee.Geometry.Point([props[lon_colname], props[lat_colname]])
ee_feat = ee.Feature(_geometry, props)
ee_features.append(ee_feat)
return ee.FeatureCollection(ee_features) |
_section_pattern('arabic', PATS_NUM, int)
_section_pattern('roman_upper', PATS_ROMAN_UPPER, en.roman_to_int)
_section_pattern('roman_lower', PATS_ROMAN_LOWER, en.roman_to_int)
_section_pattern('alph_upper', PATS_ALPH_UPPER, en.alphabet_to_int)
_section_pattern('alph_lower', PATS_ALPH_LOWER, en.alphabet_to_int)
_section_pattern('arabic_multilevel', PATS_NUM_MULTILEVEL, int)
_section_pattern('hiragana', PATS_HIRAGANA, hiragana_to_int)
_section_pattern('katakana', PATS_KATAKANA, katakana_to_int)
_section_pattern('hiragana_iroha', PATS_HIRAGANA_IROHA, hiragana_iroha_to_int)
_section_pattern('katakana_iroha', PATS_KATAKANA_IROHA, katakana_iroha_to_int)
_section_pattern('kansuji', PATS_KANSUJI, kansuji_to_int)
_section_pattern('kakoimoji', PATS_KAKOIMOJI_SUJI, kakoimoji_to_int)
class SectionNumberJa(BaseSectionNumber):
_pattern()
def bullet_point(text: str):
m = en.PAT_BULLET_POINTS.match(text)
return (None if (m is None) else m.group(0)) |
def modrelu(input: Tensor, bias: Tensor, inplace: bool=False) -> Tensor:
if input.is_complex():
z_mag = torch.abs(input)
return (F.relu((z_mag + bias)) * (input / z_mag))
else:
return F.relu(input, inplace=inplace) |
_spec([HookScope.GLOBAL])
def before_load_schema(context: HookContext, raw_schema: dict[(str, Any)]) -> None: |
class NottinghamDatabase(RemoteABCFolderDataset):
_info = DatasetInfo(_NAME, _DESCRIPTION, _HOMEPAGE)
_sources = {'nmd': {'filename': 'nottingham_database.zip', 'url': ' 'archive': True, 'size': 142934, 'md5': 'f55c354aaf08bcb6e9b2b3b8d52e4df3', 'sha256': 'f79a4bffe78b16d630d4d69f9c62775a7aa246d0973c4d8714ab6c5139ff5a3b'}} |
def cut(src, tgt, l):
(x, sr) = torchaudio.load(str(src))
assert (sr == 16000)
x = x.squeeze()
target_frames = int((l * sr))
flag = 0
if (target_frames <= x.size(0)):
x = x[:target_frames]
flag = 1
else:
flag = 0
torchaudio.save(str(tgt), x.unsqueeze(0), sr)
return flag |
class TyposPerturbation(TextPerturbation):
(frozen=True)
class Description(PerturbationDescription):
prob: float = 0.0
name: str = 'typos'
def __init__(self, prob: float):
self.prob: float = prob
def description(self) -> PerturbationDescription:
return TyposPerturbation.Description(name=self.name, robustness=True, prob=self.prob)
def perturb(self, text: str, rng: Random) -> str:
key_approx = {}
key_approx['q'] = 'was'
key_approx['w'] = 'qesad'
key_approx['e'] = 'wsdfr'
key_approx['r'] = 'edfgt'
key_approx['t'] = 'rfghy'
key_approx['y'] = 'tghju'
key_approx['u'] = 'yhjki'
key_approx['i'] = 'ujklo'
key_approx['o'] = 'iklp'
key_approx['p'] = 'ol'
key_approx['a'] = 'qwsz'
key_approx['s'] = 'weadzx'
key_approx['d'] = 'erfcxs'
key_approx['f'] = 'rtgvcd'
key_approx['g'] = 'tyhbvf'
key_approx['h'] = 'yujnbg'
key_approx['j'] = 'uikmnh'
key_approx['k'] = 'iolmj'
key_approx['l'] = 'opk'
key_approx['z'] = 'asx'
key_approx['x'] = 'sdcz'
key_approx['c'] = 'dfvx'
key_approx['v'] = 'fgbc'
key_approx['b'] = 'ghnv'
key_approx['n'] = 'hjmb'
key_approx['m'] = 'jkn'
perturbed_texts = ''
for letter in text:
lcletter = letter.lower()
if (lcletter not in key_approx.keys()):
new_letter = lcletter
elif (rng.random() < self.prob):
new_letter = rng.choice(list(key_approx[lcletter]))
else:
new_letter = lcletter
if (not (lcletter == letter)):
new_letter = new_letter.upper()
perturbed_texts += new_letter
return perturbed_texts |
def BatchNorm_reader(reader, version, obj):
if ((version < 2) and hasattr(obj, 'running_std')):
obj.running_var = obj.running_var.pow((- 2)).add((- obj.eps))
del obj.running_std |
_model
def skresnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
sk_kwargs = dict(split_input=True)
default_cfg = default_cfgs['skresnet50']
model = ResNet(SelectiveKernelBottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def process_paths(args):
suffixes = ['_file', '_dir']
def _recurse(args):
if (('path' in args) and (args['path'] is not None)):
args['path'] = Path(args['path']).resolve()
for (k, v) in args.items():
for suffix in suffixes:
if (k.endswith(suffix) and (v is not None)):
args[k] = Path(v).resolve()
break
if isinstance(v, dict):
args[k] = _recurse(v)
return args
args = _recurse(args)
return args |
def test_dlrep_wrong_secrets(group):
g = group.generator()
g1 = (2 * g)
g2 = (5 * g)
x1 = Secret()
x2 = Secret()
p = DLRep(g, ((x1 * g1) + (x2 * g2)))
prover = p.get_prover({x1: 10, x2: 15})
verifier = p.get_verifier()
protocol = SigmaProtocol(verifier, prover)
assert (not protocol.verify()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.