code stringlengths 281 23.7M |
|---|
def dnorm_problem(dim):
(X, constraints) = initialize_constraints_on_dnorm_problem(dim)
Jr = cvxpy.Parameter(((dim ** 2), (dim ** 2)))
Ji = cvxpy.Parameter(((dim ** 2), (dim ** 2)))
objective = cvxpy.Maximize(cvxpy.trace(((Jr.T X.re) + (Ji.T X.im))))
problem = cvxpy.Problem(objective, constraints)
return (problem, Jr, Ji) |
.parametrize('min_len', [0, 3])
.parametrize('num_chars', [5, 9])
.parametrize('num_elements', itertools.chain(range(1, 26), [125]))
def test_scattered_hints_count(min_len, num_chars, num_elements):
manager = qutebrowser.browser.hints.HintManager(win_id=0)
chars = string.ascii_lowercase[:num_chars]
hints = manager._hint_scattered(min_len, chars, list(range(num_elements)))
assert (len(hints) == len(set(hints)))
assert (not any((x for x in hints if (len(x) < min_len))))
hint_lens = {len(h) for h in hints}
assert (len(hint_lens) <= 2)
if (len(hint_lens) == 2):
assert (abs(functools.reduce(operator.sub, hint_lens)) <= 1)
longest_hint_len = max(hint_lens)
shortest_hint_len = min(hint_lens)
longest_hints = [x for x in hints if (len(x) == longest_hint_len)]
if (min_len < (max(hint_lens) - 1)):
count_map = {}
for x in longest_hints:
prefix = x[:(- 1)]
count_map[prefix] = (count_map.get(prefix, 0) + 1)
assert all(((e != 1) for e in count_map.values()))
if ((longest_hint_len > min_len) and (longest_hint_len > 1)):
assert ((num_chars ** (longest_hint_len - 1)) < num_elements)
assert ((num_chars ** longest_hint_len) >= num_elements)
if ((longest_hint_len > min_len) and (longest_hint_len > 1)):
assert ((num_chars ** (longest_hint_len - 1)) < num_elements)
if (shortest_hint_len == longest_hint_len):
assert (((num_chars ** longest_hint_len) - num_elements) < (len(chars) - 1)) |
class TestLowRankTwoBodyDecomposition(QiskitNatureTestCase):
(4, 5)
def test_double_factorized_random(self, dim: int):
two_body_tensor = random_two_body_tensor_real(dim, seed=25257)
(diag_coulomb_mats, orbital_rotations) = double_factorized(two_body_tensor)
reconstructed = np.einsum('tpk,tqk,tkl,trl,tsl->pqrs', orbital_rotations, orbital_rotations, diag_coulomb_mats, orbital_rotations, orbital_rotations)
np.testing.assert_allclose(reconstructed, two_body_tensor, atol=1e-08)
((not _optionals.HAS_PYSCF), 'pyscf not available.')
def test_double_factorized_error_threshold_max_vecs(self):
driver = PySCFDriver(atom='Li 0 0 0; H 0 0 1.6')
driver_result = driver.run()
electronic_energy = driver_result.hamiltonian
two_body_tensor = unfold(electronic_energy.electronic_integrals.alpha['++--'])
with self.subTest('max rank'):
max_vecs = 20
(diag_coulomb_mats, orbital_rotations) = double_factorized(two_body_tensor, max_vecs=max_vecs)
reconstructed = np.einsum('tpk,tqk,tkl,trl,tsl->pqrs', orbital_rotations, orbital_rotations, diag_coulomb_mats, orbital_rotations, orbital_rotations)
self.assertEqual(len(orbital_rotations), max_vecs)
np.testing.assert_allclose(reconstructed, two_body_tensor, atol=1e-05)
with self.subTest('error threshold'):
error_threshold = 0.0001
(diag_coulomb_mats, orbital_rotations) = double_factorized(two_body_tensor, error_threshold=error_threshold)
reconstructed = np.einsum('tpk,tqk,tkl,trl,tsl->pqrs', orbital_rotations, orbital_rotations, diag_coulomb_mats, orbital_rotations, orbital_rotations)
self.assertLessEqual(len(orbital_rotations), 18)
np.testing.assert_allclose(reconstructed, two_body_tensor, atol=error_threshold)
with self.subTest('error threshold and max rank'):
(diag_coulomb_mats, orbital_rotations) = double_factorized(two_body_tensor, error_threshold=error_threshold, max_vecs=max_vecs)
reconstructed = np.einsum('tpk,tqk,tkl,trl,tsl->pqrs', orbital_rotations, orbital_rotations, diag_coulomb_mats, orbital_rotations, orbital_rotations)
self.assertLessEqual(len(orbital_rotations), 18)
np.testing.assert_allclose(reconstructed, two_body_tensor, atol=error_threshold) |
class GraphLearner(nn.Module):
def __init__(self, input_size, hidden_size, topk=None, epsilon=None, num_pers=16, metric_type='attention', device=None):
super(GraphLearner, self).__init__()
self.device = device
self.topk = topk
self.epsilon = epsilon
self.metric_type = metric_type
if (metric_type == 'attention'):
self.linear_sims = nn.ModuleList([nn.Linear(input_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
elif (metric_type == 'weighted_cosine'):
self.weight_tensor = torch.Tensor(num_pers, input_size)
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
elif (metric_type == 'gat_attention'):
self.linear_sims1 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.leakyrelu = nn.LeakyReLU(0.2)
print('[ GAT_Attention GraphLearner]')
elif (metric_type == 'kernel'):
self.precision_inv_dis = nn.Parameter(torch.Tensor(1, 1))
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(input_size, hidden_size)))
elif (metric_type == 'transformer'):
self.linear_sim1 = nn.Linear(input_size, hidden_size, bias=False)
self.linear_sim2 = nn.Linear(input_size, hidden_size, bias=False)
elif (metric_type == 'cosine'):
pass
else:
raise ValueError('Unknown metric_type: {}'.format(metric_type))
print('[ Graph Learner metric type: {} ]'.format(metric_type))
def forward(self, context, ctx_mask=None):
if (self.metric_type == 'attention'):
attention = 0
for _ in range(len(self.linear_sims)):
context_fc = torch.relu(self.linear_sims[_](context))
attention += torch.matmul(context_fc, context_fc.transpose((- 1), (- 2)))
attention /= len(self.linear_sims)
markoff_value = (- INF)
elif (self.metric_type == 'weighted_cosine'):
expand_weight_tensor = self.weight_tensor.unsqueeze(1)
if (len(context.shape) == 3):
expand_weight_tensor = expand_weight_tensor.unsqueeze(1)
context_fc = (context.unsqueeze(0) * expand_weight_tensor)
context_norm = F.normalize(context_fc, p=2, dim=(- 1))
attention = torch.matmul(context_norm, context_norm.transpose((- 1), (- 2))).mean(0)
markoff_value = 0
elif (self.metric_type == 'transformer'):
Q = self.linear_sim1(context)
attention = (torch.matmul(Q, Q.transpose((- 1), (- 2))) / math.sqrt(Q.shape[(- 1)]))
markoff_value = (- INF)
elif (self.metric_type == 'gat_attention'):
attention = []
for _ in range(len(self.linear_sims1)):
a_input1 = self.linear_sims1[_](context)
a_input2 = self.linear_sims2[_](context)
attention.append(self.leakyrelu((a_input1 + a_input2.transpose((- 1), (- 2)))))
attention = torch.mean(torch.stack(attention, 0), 0)
markoff_value = (- INF)
elif (self.metric_type == 'kernel'):
dist_weight = torch.mm(self.weight, self.weight.transpose((- 1), (- 2)))
attention = self.compute_distance_mat(context, dist_weight)
attention = torch.exp((((- 0.5) * attention) * (self.precision_inv_dis ** 2)))
markoff_value = 0
elif (self.metric_type == 'cosine'):
context_norm = context.div(torch.norm(context, p=2, dim=(- 1), keepdim=True))
attention = torch.mm(context_norm, context_norm.transpose((- 1), (- 2))).detach()
markoff_value = 0
if (ctx_mask is not None):
attention = attention.masked_fill_((1 - ctx_mask.byte().unsqueeze(1)), markoff_value)
attention = attention.masked_fill_((1 - ctx_mask.byte().unsqueeze((- 1))), markoff_value)
if (self.epsilon is not None):
attention = self.build_epsilon_neighbourhood(attention, self.epsilon, markoff_value)
if (self.topk is not None):
attention = self.build_knn_neighbourhood(attention, self.topk, markoff_value)
return attention
def build_knn_neighbourhood(self, attention, topk, markoff_value):
topk = min(topk, attention.size((- 1)))
(knn_val, knn_ind) = torch.topk(attention, topk, dim=(- 1))
weighted_adjacency_matrix = to_cuda((markoff_value * torch.ones_like(attention)).scatter_((- 1), knn_ind, knn_val), self.device)
return weighted_adjacency_matrix
def build_epsilon_neighbourhood(self, attention, epsilon, markoff_value):
mask = (attention > epsilon).detach().float()
weighted_adjacency_matrix = ((attention * mask) + (markoff_value * (1 - mask)))
return weighted_adjacency_matrix
def compute_distance_mat(self, X, weight=None):
if (weight is not None):
trans_X = torch.mm(X, weight)
else:
trans_X = X
norm = torch.sum((trans_X * X), dim=(- 1))
dists = ((((- 2) * torch.matmul(trans_X, X.transpose((- 1), (- 2)))) + norm.unsqueeze(0)) + norm.unsqueeze(1))
return dists |
class DomainThermalParameters(BaseParameters):
def __init__(self, domain, main_param):
self.domain = domain
self.main_param = main_param
def _set_parameters(self):
Domain = self.domain.capitalize()
self.h_tab = pybamm.Parameter(f'{Domain} tab heat transfer coefficient [W.m-2.K-1]')
def h_cc(self, y, z):
inputs = {'Distance across electrode width [m]': y, 'Distance across electrode height [m]': z}
Domain = self.domain.capitalize()
return pybamm.FunctionParameter(f'{Domain} current collector surface heat transfer coefficient [W.m-2.K-1]', inputs)
def c_p(self, T):
inputs = {'Temperature [K]': T}
if (self.domain == 'separator'):
name = 'Separator specific heat capacity [J.kg-1.K-1]'
else:
Domain = self.domain.capitalize()
name = f'{Domain} electrode specific heat capacity [J.kg-1.K-1]'
return pybamm.FunctionParameter(name, inputs)
def c_p_cc(self, T):
inputs = {'Temperature [K]': T}
Domain = self.domain.capitalize()
return pybamm.FunctionParameter(f'{Domain} current collector specific heat capacity [J.kg-1.K-1]', inputs)
def lambda_(self, T):
inputs = {'Temperature [K]': T}
if (self.domain == 'separator'):
name = 'Separator thermal conductivity [W.m-1.K-1]'
else:
Domain = self.domain.capitalize()
name = f'{Domain} electrode thermal conductivity [W.m-1.K-1]'
return pybamm.FunctionParameter(name, inputs)
def lambda_cc(self, T):
inputs = {'Temperature [K]': T}
Domain = self.domain.capitalize()
return pybamm.FunctionParameter(f'{Domain} current collector thermal conductivity [W.m-1.K-1]', inputs)
def rho(self, T):
inputs = {'Temperature [K]': T}
if (self.domain == 'separator'):
name = 'Separator density [kg.m-3]'
else:
Domain = self.domain.capitalize()
name = f'{Domain} electrode density [kg.m-3]'
return pybamm.FunctionParameter(name, inputs)
def rho_cc(self, T):
inputs = {'Temperature [K]': T}
Domain = self.domain.capitalize()
return pybamm.FunctionParameter(f'{Domain} current collector density [kg.m-3]', inputs)
def rho_c_p(self, T):
return (self.rho(T) * self.c_p(T))
def rho_c_p_cc(self, T):
return (self.rho_cc(T) * self.c_p_cc(T)) |
def default_regression_model(num_values, num_anchors, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'):
options = {'kernel_size': 3, 'strides': 1, 'padding': 'same', 'kernel_initializer': keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), 'bias_initializer': 'zeros'}
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(filters=regression_feature_size, activation='relu', name='pyramid_regression_{}'.format(i), **options)(outputs)
outputs = keras.layers.Conv2D((num_anchors * num_values), name='pyramid_regression', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), num_values), name='pyramid_regression_reshape')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name) |
class Pile(TracesGroup):
def __init__(self):
TracesGroup.__init__(self, None)
self.subpiles = {}
self.open_files = {}
self.listeners = []
self.abspaths = set()
def add_listener(self, obj):
self.listeners.append(util.smart_weakref(obj))
def notify_listeners(self, what, content):
for ref in self.listeners:
obj = ref()
if obj:
obj(what, content)
def load_files(self, filenames, filename_attributes=None, fileformat='mseed', cache=None, show_progress=True, update_progress=None):
load = loader(filenames, fileformat, cache, filename_attributes, show_progress=show_progress, update_progress=update_progress)
self.add_files(load)
def add_files(self, files):
for file in files:
self.add_file(file)
def add_file(self, file):
if ((file.abspath is not None) and (file.abspath in self.abspaths)):
logger.warning(('File already in pile: %s' % file.abspath))
return
if (file.deltatmin is None):
logger.warning(('Sampling rate of all traces are zero in file: %s' % file.abspath))
return
subpile = self.dispatch(file)
subpile.add_file(file)
if (file.abspath is not None):
self.abspaths.add(file.abspath)
def remove_file(self, file):
subpile = file.get_parent()
if (subpile is not None):
subpile.remove_file(file)
if (file.abspath is not None):
self.abspaths.remove(file.abspath)
def remove_files(self, files):
subpile_files = {}
for file in files:
subpile = file.get_parent()
if (subpile not in subpile_files):
subpile_files[subpile] = []
subpile_files[subpile].append(file)
for (subpile, files) in subpile_files.items():
subpile.remove_files(files)
for file in files:
if (file.abspath is not None):
self.abspaths.remove(file.abspath)
def dispatch_key(self, file):
dt = int(math.floor(math.log(file.deltatmin)))
return dt
def dispatch(self, file):
k = self.dispatch_key(file)
if (k not in self.subpiles):
self.subpiles[k] = SubPile(self)
return self.subpiles[k]
def get_deltats(self):
return list(self.deltats.keys())
def chop(self, tmin, tmax, group_selector=None, trace_selector=None, snap=(round, round), include_last=False, load_data=True):
chopped = []
used_files = set()
traces = self.relevant(tmin, tmax, group_selector, trace_selector)
if load_data:
files_changed = False
for tr in traces:
if (tr.file and (tr.file not in used_files)):
if tr.file.load_data():
files_changed = True
if (tr.file is not None):
used_files.add(tr.file)
if files_changed:
traces = self.relevant(tmin, tmax, group_selector, trace_selector)
for tr in traces:
if ((not load_data) and (tr.ydata is not None)):
tr = tr.copy(data=False)
tr.ydata = None
try:
chopped.append(tr.chop(tmin, tmax, inplace=False, snap=snap, include_last=include_last))
except trace.NoData:
pass
return (chopped, used_files)
def _process_chopped(self, chopped, degap, maxgap, maxlap, want_incomplete, wmax, wmin, tpad):
chopped.sort(key=(lambda a: a.full_id))
if degap:
chopped = degapper(chopped, maxgap=maxgap, maxlap=maxlap)
if (not want_incomplete):
chopped_weeded = []
for tr in chopped:
emin = (tr.tmin - (wmin - tpad))
emax = ((tr.tmax + tr.deltat) - (wmax + tpad))
if ((abs(emin) <= (0.5 * tr.deltat)) and (abs(emax) <= (0.5 * tr.deltat))):
chopped_weeded.append(tr)
elif degap:
if ((0.0 < emin <= (5.0 * tr.deltat)) and (((- 5.0) * tr.deltat) <= emax < 0.0)):
tr.extend((wmin - tpad), ((wmax + tpad) - tr.deltat), fillmethod='repeat')
chopped_weeded.append(tr)
chopped = chopped_weeded
for tr in chopped:
tr.wmin = wmin
tr.wmax = wmax
return chopped
def chopper(self, tmin=None, tmax=None, tinc=None, tpad=0.0, group_selector=None, trace_selector=None, want_incomplete=True, degap=True, maxgap=5, maxlap=None, keep_current_files_open=False, accessor_id=None, snap=(round, round), include_last=False, load_data=True, style=None):
if (tmin is None):
if (self.tmin is None):
logger.warning("Pile's tmin is not set - pile may be empty.")
return
tmin = (self.tmin + tpad)
if (tmax is None):
if (self.tmax is None):
logger.warning("Pile's tmax is not set - pile may be empty.")
return
tmax = (self.tmax - tpad)
if (not self.is_relevant((tmin - tpad), (tmax + tpad), group_selector)):
return
if (accessor_id not in self.open_files):
self.open_files[accessor_id] = set()
open_files = self.open_files[accessor_id]
if (tinc is None):
tinc = (tmax - tmin)
nwin = 1
else:
eps = (tinc * 1e-06)
if (tinc != 0.0):
nwin = (int((((tmax - eps) - tmin) / tinc)) + 1)
else:
nwin = 1
for iwin in range(nwin):
(wmin, wmax) = ((tmin + (iwin * tinc)), min((tmin + ((iwin + 1) * tinc)), tmax))
(chopped, used_files) = self.chop((wmin - tpad), (wmax + tpad), group_selector, trace_selector, snap, include_last, load_data)
for file in (used_files - open_files):
file.use_data()
open_files.update(used_files)
processed = self._process_chopped(chopped, degap, maxgap, maxlap, want_incomplete, wmax, wmin, tpad)
if (style == 'batch'):
(yield Batch(tmin=wmin, tmax=wmax, i=iwin, n=nwin, traces=processed))
else:
(yield processed)
unused_files = (open_files - used_files)
while unused_files:
file = unused_files.pop()
file.drop_data()
open_files.remove(file)
if (not keep_current_files_open):
while open_files:
file = open_files.pop()
file.drop_data()
def all(self, *args, **kwargs):
alltraces = []
for traces in self.chopper(*args, **kwargs):
alltraces.extend(traces)
return alltraces
def iter_all(self, *args, **kwargs):
for traces in self.chopper(*args, **kwargs):
for tr in traces:
(yield tr)
def chopper_grouped(self, gather, progress=None, *args, **kwargs):
keys = self.gather_keys(gather)
if (len(keys) == 0):
return
outer_group_selector = None
if ('group_selector' in kwargs):
outer_group_selector = kwargs['group_selector']
outer_trace_selector = None
if ('trace_selector' in kwargs):
outer_trace_selector = kwargs['trace_selector']
gather_cache = {}
pbar = None
try:
if (progress is not None):
pbar = util.progressbar(progress, len(keys))
for (ikey, key) in enumerate(keys):
def tsel(tr):
return ((gather(tr) == key) and ((outer_trace_selector is None) or outer_trace_selector(tr)))
def gsel(gr):
if (gr not in gather_cache):
gather_cache[gr] = gr.gather_keys(gather)
return ((key in gather_cache[gr]) and ((outer_group_selector is None) or outer_group_selector(gr)))
kwargs['trace_selector'] = tsel
kwargs['group_selector'] = gsel
for traces in self.chopper(*args, **kwargs):
(yield traces)
if pbar:
pbar.update((ikey + 1))
finally:
if pbar:
pbar.finish()
def gather_keys(self, gather, selector=None):
keys = set()
for subpile in self.subpiles.values():
keys |= subpile.gather_keys(gather, selector)
return sorted(keys)
def iter_traces(self, load_data=False, return_abspath=False, group_selector=None, trace_selector=None):
for subpile in self.subpiles.values():
if ((not group_selector) or group_selector(subpile)):
for tr in subpile.iter_traces(load_data, return_abspath, group_selector, trace_selector):
(yield tr)
def iter_files(self):
for subpile in self.subpiles.values():
for file in subpile.iter_files():
(yield file)
def reload_modified(self):
modified = False
for subpile in self.subpiles.values():
modified |= subpile.reload_modified()
return modified
def get_tmin(self):
return self.tmin
def get_tmax(self):
return self.tmax
def get_deltatmin(self):
return self.deltatmin
def get_deltatmax(self):
return self.deltatmax
def is_empty(self):
return ((self.tmin is None) and (self.tmax is None))
def __str__(self):
if ((self.tmin is not None) and (self.tmax is not None)):
tmin = util.time_to_str(self.tmin)
tmax = util.time_to_str(self.tmax)
s = 'Pile\n'
s += ('number of subpiles: %i\n' % len(self.subpiles))
s += ('timerange: %s - %s\n' % (tmin, tmax))
s += ('networks: %s\n' % ', '.join(sl(self.networks.keys())))
s += ('stations: %s\n' % ', '.join(sl(self.stations.keys())))
s += ('locations: %s\n' % ', '.join(sl(self.locations.keys())))
s += ('channels: %s\n' % ', '.join(sl(self.channels.keys())))
s += ('deltats: %s\n' % ', '.join(sl(self.deltats.keys())))
else:
s = 'empty Pile'
return s
def snuffle(self, **kwargs):
from pyrocko.gui.snuffler.snuffler import snuffle
snuffle(self, **kwargs) |
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if (self.use_color and (levelname in COLORS)):
levelname_color = (((COLOR_SEQ % (30 + COLORS[levelname])) + levelname) + RESET_SEQ)
record.levelname = levelname_color
return logging.Formatter.format(self, record) |
def post_release_work():
current_version = get_version()
dev_version = f'{current_version.major}.{(current_version.minor + 1)}.0.dev0'
current_version = current_version.base_version
version = input(f'Which version are we developing now? [{dev_version}]')
if (len(version) == 0):
version = dev_version
print(f'Updating version to {version}.')
global_version_update(version) |
def taxids_at_ranks(qid, ranks, taxdump):
cid = qid
pid = ''
res = {x: None for x in ranks}
rankset = set(ranks)
while True:
taxon = _get_taxon(cid, taxdump)
rank = taxon['rank']
if (rank in rankset):
res[rank] = cid
pid = taxon['parent']
if ((pid == cid) or (pid == '0')):
break
cid = pid
return res |
class InitiatorSetup(NamedTuple):
current_state: State
block_number: typing.BlockNumber
channel: NettingChannelState
channel_map: typing.Dict[(typing.ChannelID, NettingChannelState)]
channels: ChannelSet
available_routes: typing.List[RouteState]
prng: random.Random
lock: HashTimeLockState |
class Test_pep440_branch(unittest.TestCase, Testing_branch_renderer_case_mixin):
style = 'pep440-branch'
expected = {'tagged_0_commits_clean': 'v1.2.3', 'tagged_0_commits_dirty': 'v1.2.3+0.g.dirty', 'tagged_1_commits_clean': 'v1.2.3+1.gabc', 'tagged_1_commits_dirty': 'v1.2.3+1.gabc.dirty', 'untagged_0_commits_clean': '0+untagged.0.g', 'untagged_0_commits_dirty': '0+untagged.0.g.dirty', 'untagged_1_commits_clean': '0+untagged.1.gabc', 'untagged_1_commits_dirty': '0+untagged.1.gabc.dirty', 'branch_tagged_0_commits_clean': 'v1.2.3', 'branch_tagged_0_commits_dirty': 'v1.2.3.dev0+0.g.dirty', 'branch_tagged_1_commits_clean': 'v1.2.3.dev0+1.gabc', 'branch_tagged_1_commits_dirty': 'v1.2.3.dev0+1.gabc.dirty', 'branch_untagged_0_commits_clean': '0.dev0+untagged.0.g', 'branch_untagged_0_commits_dirty': '0.dev0+untagged.0.g.dirty', 'branch_untagged_1_commits_clean': '0.dev0+untagged.1.gabc', 'branch_untagged_1_commits_dirty': '0.dev0+untagged.1.gabc.dirty', 'error_getting_parts': 'unknown'} |
class CfdRunnableFoam(_CfdRunnable):
def __init__(self, solver=None):
super(CfdRunnableFoam, self).__init__(solver)
self.writer = CfdCaseWriterFoam.CfdCaseWriterFoam(self.analysis)
if using_freecad_plot:
from FoamCaseBuilder import FoamResidualPloter
self.ploter = FoamResidualPloter.FoamResidualPloter()
else:
pass
def check_prerequisites(self):
return ''
def write_case(self):
return self.writer.write_case()
def edit_case(self):
case_path = ((self.solver.WorkingDir + os.path.sep) + self.solver.InputCaseName)
FreeCAD.Console.PrintMessage('Please edit the case input files externally at: {}'.format(case_path))
self.writer.builder.editCase()
def get_solver_cmd(self):
cmd = self.writer.builder.getSolverCommand()
FreeCAD.Console.PrintMessage((('Solver run command: ' + cmd) + '\n'))
return cmd
def solve(self):
pass
def view_result_externally(self):
self.writer.builder.viewResult()
def view_result(self):
result = self.writer.builder.exportResult()
from importCfdResultFoamVTK import importCfdResult
importCfdResult(result, self.analysis)
def process_output(self, text):
if using_freecad_plot:
self.ploter.process_text(text)
self.ploter.refresh() |
class GCN3D(nn.Module):
def __init__(self, class_num, support_num, neighbor_num):
super().__init__()
self.neighbor_num = neighbor_num
self.conv_0 = gcn3d.Conv_surface(kernel_num=128, support_num=support_num)
self.conv_1 = gcn3d.Conv_layer(128, 128, support_num=support_num)
self.pool_1 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)
self.conv_2 = gcn3d.Conv_layer(128, 256, support_num=support_num)
self.conv_3 = gcn3d.Conv_layer(256, 256, support_num=support_num)
self.pool_2 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)
self.conv_4 = gcn3d.Conv_layer(256, 512, support_num=support_num)
dim_fuse = sum([128, 128, 256, 256, 512, 512, 16])
self.conv1d_block = nn.Sequential(nn.Conv1d(dim_fuse, 512, 1), nn.ReLU(inplace=True), nn.Conv1d(512, 512, 1), nn.ReLU(inplace=True), nn.Conv1d(512, class_num, 1))
def forward(self, vertices: 'tensor (bs, vetice_num, 3)', onehot: 'tensor (bs, cat_num)'):
(bs, vertice_num, _) = vertices.size()
neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)
fm_0 = F.relu(self.conv_0(neighbor_index, vertices), inplace=True)
fm_1 = F.relu(self.conv_1(neighbor_index, vertices, fm_0), inplace=True)
(v_pool_1, fm_pool_1) = self.pool_1(vertices, fm_1)
neighbor_index = gcn3d.get_neighbor_index(v_pool_1, self.neighbor_num)
fm_2 = F.relu(self.conv_2(neighbor_index, v_pool_1, fm_pool_1), inplace=True)
fm_3 = F.relu(self.conv_3(neighbor_index, v_pool_1, fm_2), inplace=True)
(v_pool_2, fm_pool_2) = self.pool_2(v_pool_1, fm_3)
neighbor_index = gcn3d.get_neighbor_index(v_pool_2, self.neighbor_num)
fm_4 = self.conv_4(neighbor_index, v_pool_2, fm_pool_2)
f_global = fm_4.max(1)[0]
nearest_pool_1 = gcn3d.get_nearest_index(vertices, v_pool_1)
nearest_pool_2 = gcn3d.get_nearest_index(vertices, v_pool_2)
fm_2 = gcn3d.indexing_neighbor(fm_2, nearest_pool_1).squeeze(2)
fm_3 = gcn3d.indexing_neighbor(fm_3, nearest_pool_1).squeeze(2)
fm_4 = gcn3d.indexing_neighbor(fm_4, nearest_pool_2).squeeze(2)
f_global = f_global.unsqueeze(1).repeat(1, vertice_num, 1)
onehot = onehot.unsqueeze(1).repeat(1, vertice_num, 1)
fm_fuse = torch.cat([fm_0, fm_1, fm_2, fm_3, fm_4, f_global, onehot], dim=2)
conv1d_input = fm_fuse.permute(0, 2, 1)
conv1d_out = self.conv1d_block(conv1d_input)
pred = conv1d_out.permute(0, 2, 1)
return pred |
class ParallelPoolPerformerTests(TestCase, ParallelPerformerTestsMixin):
def setUp(self):
super(ParallelPoolPerformerTests, self).setUp()
self.pool = ThreadPool()
self.p_performer = partial(perform_parallel_with_pool, self.pool)
self.dispatcher = ComposedDispatcher([base_dispatcher, TypeDispatcher({ParallelEffects: self.p_performer})]) |
def model_composited(t_imgs_dict, t_labels_dict, params=dict()):
net = Parameters()
net.inputs = t_imgs_dict
net.imgs = dict()
net.resi_imgs = dict()
net.resi_imgs_noaug = dict()
net.latent = dict()
net.logits = dict()
net.instr = dict()
net.resi_outs = dict()
net.activations = dict()
is_train = params['is_train']
def store_act(name, target, activations):
if (name not in net.activations):
net.activations[name] = dict()
net.activations[name][target] = activations
coords_res = int(params.get('coords_res', 20))
batch_size = net.inputs['real'].get_shape()[0]
(t_canonical_coords, blk_size) = create_canonical_coordinates(batch_size, 160, coords_res)
coords_sigma = ((params.get('coords_sigma', 1.0) * blk_size) * 0.2)
for (key, t_img) in net.inputs.items():
net.resi_imgs_noaug[key] = t_img
if (is_train and params.get('local_warping', 0)):
with tf.variable_scope('input'):
(net.imgs[key], _, __) = oper_random_geo_perturb(t_img, t_canonical_coords, coords_sigma)
else:
net.imgs[key] = t_img
net.mean_imgs = dict()
for (key, t_img) in net.imgs.items():
value = params.get(('mean_' + key), 0.5)
if isinstance(value, str):
value = read_image(value)
value = np.expand_dims(value, axis=0)
print('mean image', value.shape)
net.mean_imgs[key] = value
net.resi_imgs[key] = (t_img - value)
if is_train:
noise_sigma = params.get('noise_sigma', (3.0 / 255.0))
t_noise = tf.random_normal(tf.shape(t_img), stddev=noise_sigma)
net.resi_imgs[key] = (net.resi_imgs[key] + t_noise)
net.resi_imgs_noaug[key] = (net.resi_imgs_noaug[key] - value)
fakes = filter((lambda name: ((name != 'real') and (name != 'unsup'))), t_imgs_dict.keys())
with tf.variable_scope('generator'):
def encoder(t_input, name):
with runits('relu') as activations:
t_logits = oper_img2img(t_input, prog_ch, params=params, name='img2prog')
t_logits = tf.contrib.layers.avg_pool2d(t_logits, [8, 8], 8)
t_instr = tf.argmax(t_logits, axis=3, name='prediction')
net.latent[name] = t_logits
net.logits[name] = t_logits
net.instr[name] = tf.expand_dims(t_instr, axis=3)
store_act(name, 'img2prog', activations)
return t_logits
feedback = (params.get('feedback', 0) if is_train else 0)
decoding = params.get('decoder', 0)
for (name, t_resi_inp) in net.resi_imgs.items():
t_latent = encoder(t_resi_inp, name)
if (not decoding):
continue
return net |
(init=False)
class TokenNetworkRegistryState(State):
class Meta():
unknown = marshmallow.EXCLUDE
fields = ['address', 'token_network_list', 'tokennetworkaddresses_to_tokennetworks']
load_only = ['tokennetworkaddresses_to_tokennetworks']
address: TokenNetworkRegistryAddress
token_network_list: List[TokenNetworkState]
tokenaddresses_to_tokennetworkaddresses: Dict[(TokenAddress, TokenNetworkAddress)] = field(repr=False)
tokennetworkaddresses_to_tokennetworks: Dict[(TokenNetworkAddress, TokenNetworkState)] = field(repr=False, default_factory=dict)
def __init__(self, address: TokenNetworkRegistryAddress, token_network_list: List[TokenNetworkState], tokennetworkaddresses_to_tokennetworks: Dict[(Any, TokenNetworkState)]=None) -> None:
if ((not token_network_list) and tokennetworkaddresses_to_tokennetworks):
token_network_list = list(tokennetworkaddresses_to_tokennetworks.values())
self.address = address
self.token_network_list = []
self.tokennetworkaddresses_to_tokennetworks = {}
self.tokenaddresses_to_tokennetworkaddresses = {}
for tn in token_network_list:
self.add_token_network(tn)
def add_token_network(self, token_network: TokenNetworkState) -> None:
self.token_network_list.append(token_network)
self.tokennetworkaddresses_to_tokennetworks[token_network.address] = token_network
self.tokenaddresses_to_tokennetworkaddresses[token_network.token_address] = token_network.address |
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile'])
query = ' \n\t\tSELECT CASE WHEN pmc > 0.0 THEN CAST (amc AS DOUBLE) / CAST (pmc AS DOUBLE) ELSE -1.0 END AS am_pm_ratio\n\t\tFROM \n\t\t(\n\t\t\tSELECT SUM(amc1) AS amc, SUM(pmc1) AS pmc\n\t\t\tFROM\n\t\t\t(\n\t\t\t\tSELECT\n\t\t\t\t\tCASE WHEN t_hour BETWEEN 7 AND 8 THEN COUNT(1) ELSE 0 END AS amc1,\n\t\t\t\t\tCASE WHEN t_hour BETWEEN 19 AND 20 THEN COUNT(1) ELSE 0 END AS pmc1\n\t\t\t\tFROM web_sales ws\n\t\t\t\tJOIN household_demographics hd ON (hd.hd_demo_sk = ws.ws_ship_hdemo_sk and hd.hd_dep_count = 5)\n\t\t\t\tJOIN web_page wp ON (wp.wp_web_page_sk = ws.ws_web_page_sk and wp.wp_char_count BETWEEN 5000 AND 6000)\n\t\t\t\tJOIN time_dim td ON (td.t_time_sk = ws.ws_sold_time_sk and td.t_hour IN (7,8,19,20))\n\t\t\t\tGROUP BY t_hour\n\t\t\t) cnt_am_pm\n\t\t) sum_am_pm\n\t'
result = bc.sql(query)
return result |
def pad_sequences(sequences, pad_mark=0):
max_len = max(map((lambda x: len(x)), sequences))
(seq_list, seq_len_list) = ([], [])
for seq in sequences:
seq = list(seq)
seq_ = (seq[:max_len] + ([pad_mark] * max((max_len - len(seq)), 0)))
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return (seq_list, seq_len_list) |
class SNConv2d(nn.Conv2d):
Ip = 1
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(SNConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
u = Parameter(torch.FloatTensor(1, self.weight.size(0)).normal_(), requires_grad=False)
self.register_parameter('u', u)
def W_bar(self):
(sigma, _u) = max_singular_value(self.weight, u=self.u, Ip=self.Ip)
if self.training:
self.u = Parameter(_u.data, requires_grad=False)
return (self.weight / sigma)
def forward(self, input):
return F.conv2d(input, self.W_bar, self.bias, self.stride, self.padding, self.dilation, self.groups) |
def change_nvfancontrol_default(name, value):
with open('/etc/nvfancontrol.conf', 'r') as f:
lines = f.readlines()
with open('/etc/nvfancontrol.conf', 'w') as f:
for line in lines:
match_defaults = re.search(FAN_NVFAN_DEFAULT_RE, line.strip())
if match_defaults:
parsed_line = match_defaults.groupdict()
if (name.upper() == parsed_line['type']):
line = line.replace(parsed_line['value'], value)
f.write(line) |
def _send_invitations(*, queryset, invited_only: bool=False, uninvited_only: bool=False, is_reminder: bool=False):
queryset = queryset.filter(status=ScheduleItem.STATUS.waiting_confirmation, submission__isnull=False, type__in=[ScheduleItem.TYPES.talk, ScheduleItem.TYPES.submission, ScheduleItem.TYPES.training])
if uninvited_only:
queryset = queryset.filter(speaker_invitation_sent_at__isnull=True)
elif invited_only:
queryset = queryset.filter(speaker_invitation_sent_at__isnull=False)
for schedule_item in queryset:
schedule_item.speaker_invitation_sent_at = timezone.now()
send_schedule_invitation_email(schedule_item, is_reminder=is_reminder)
schedule_item.save() |
def _get_build_status(build_obj):
phase = build_obj.phase
status = {}
error = None
if (not database.BUILD_PHASE.is_terminal_phase(phase)):
try:
status = build_logs.get_status(build_obj.uuid)
except BuildStatusRetrievalError as bsre:
phase = 'cannot_load'
if SuperUserPermission().can():
error = str(bsre)
else:
error = 'Redis may be down. Please contact support.'
if (phase != 'cannot_load'):
if ((status is not None) and ('heartbeat' in status) and status['heartbeat']):
heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
if ((datetime.datetime.utcnow() - heartbeat) > datetime.timedelta(minutes=1)):
phase = database.BUILD_PHASE.INTERNAL_ERROR
if (phase == database.BUILD_PHASE.INTERNAL_ERROR):
retry = (build_obj.queue_id and dockerfile_build_queue.has_retries_remaining(build_obj.queue_id))
if (not retry):
phase = 'expired'
return (phase, status, error) |
class Effect6794(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff4Value', src.getModifiedItemAttr('shipBonusORECapital3'), skill='Capital Industrial Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'buffDuration', src.getModifiedItemAttr('shipBonusORECapital3'), skill='Capital Industrial Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff1Value', src.getModifiedItemAttr('shipBonusORECapital3'), skill='Capital Industrial Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff3Value', src.getModifiedItemAttr('shipBonusORECapital3'), skill='Capital Industrial Ships', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), 'warfareBuff2Value', src.getModifiedItemAttr('shipBonusORECapital3'), skill='Capital Industrial Ships', **kwargs) |
def Frame(name, widget):
def hx(value):
return hex(int((value * 255)))[2:]
f = Gtk.Frame()
qltk.add_css(f, '* {opacity: 0.9}')
l = Gtk.Label()
l.set_markup(util.escape(name))
qltk.add_css(l, ' * {opacity: 0.6; padding: 0px 2px;}')
f.set_label_widget(l)
a = Align(top=6, left=12, bottom=6, right=6)
f.add(a)
a.add(widget)
return f |
def test_TMM_dielectric_model():
drud = Drude(An=24.317, Brn=0.12574)
model = DielectricConstantModel(e_inf=3.4837, oscillators=[drud])
wavelength = (2 * np.logspace(3, 4, 10))
n = model.n_and_k(wavelength)
data = ((0.3737771 + 2.0726883j), (0. + 3.j), (0. + 4.j), (1. + 5.j), (1. + 6.8504966j), (2. + 8.j), (4. + 10.j), (5. + 12.j), (7. + 13.j), (9.9538966 + 15.j))
assert all([(d == approx(o)) for (d, o) in zip(data, n)]) |
class SpecialGuestSection():
id: strawberry.ID
title: str
guest_name: str
guest_job_title: str
event_date: datetime.date
cta: (CTA | None)
_block: strawberry.Private[Any]
def guest_photo(self) -> str:
guest_photo = self._block.value['guest_photo']
return guest_photo.get_rendition('fill-600x600|jpegquality-60').full_url
def from_block(cls, block):
cta = block.value['cta']
return cls(id=block.id, title=block.value['title'], guest_name=block.value['guest_name'], guest_job_title=block.value['guest_job_title'], event_date=block.value['event_date'], cta=(CTA.from_block(cta) if cta['label'] else None), _block=block) |
def _check_service_key(app):
if (not app.config.get('SETUP_COMPLETE', False)):
return (True, 'Stack not fully setup; skipping check')
try:
kid = instance_keys.local_key_id
except IOError as ex:
return (True, 'Stack not fully setup; skipping check')
try:
key_is_valid = bool(instance_keys.get_service_key_public_key(kid))
message = (('Could not find valid instance service key %s' % kid) if (not key_is_valid) else None)
return (key_is_valid, message)
except Exception as ex:
logger.exception('Got exception when trying to retrieve the instance key')
return (True, 'Failed to get instance key due to a database issue; skipping check') |
def check_dicom_agrees(ds1, ds2):
assert (ds1.SOPInstanceUID == ds2.SOPInstanceUID)
assert (ds1.SeriesInstanceUID == ds2.SeriesInstanceUID)
assert (ds1.StudyInstanceUID == ds2.StudyInstanceUID)
assert (ds1.PatientID == ds2.PatientID)
assert (ds1.Modality == ds2.Modality)
assert (ds1.Manufacturer == ds2.Manufacturer)
assert (len(ds1.BeamSequence) == len(ds2.BeamSequence))
assert (ds1.BeamSequence[0].Manufacturer == ds2.BeamSequence[0].Manufacturer) |
(help='Try ./bin/projects.py docs/data/projects.yml')
('input', type=click.File('r'))
('--online/--no-online', default=True, help='Get info from GitHub')
('--auth', help='GitHub authentication token')
('--dry-run', default=False, help='Print the output, rather than writing it to files in the repo')
def projects(input: TextIO, online: bool, auth: (str | None), dry_run: bool) -> None:
config = yaml.safe_load(input)
projects = get_projects(config, online=online, auth=auth)
if dry_run:
output = render_projects(projects, dest_path=README_FILE)
print(output)
else:
insert_projects_table(README_FILE, projects=projects[:10], input_filename=input.name, include_info=False)
insert_projects_table(DOCS_PAGE, projects=projects, input_filename=input.name, include_info=False) |
class ChecklistParameterItem(GroupParameterItem):
def __init__(self, param, depth):
self.btnGrp = QtWidgets.QButtonGroup()
self.btnGrp.setExclusive(False)
self._constructMetaBtns()
super().__init__(param, depth)
def _constructMetaBtns(self):
self.metaBtnWidget = QtWidgets.QWidget()
self.metaBtnLayout = lay = QtWidgets.QHBoxLayout(self.metaBtnWidget)
lay.setContentsMargins(0, 0, 0, 0)
lay.setSpacing(2)
self.metaBtns = {}
lay.addStretch(0)
for title in ('Clear', 'Select'):
self.metaBtns[title] = btn = QtWidgets.QPushButton(f'{title} All')
self.metaBtnLayout.addWidget(btn)
btn.clicked.connect(getattr(self, f'{title.lower()}AllClicked'))
self.metaBtns['default'] = self.makeDefaultButton()
self.metaBtnLayout.addWidget(self.metaBtns['default'])
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tw = self.treeWidget()
if (tw is None):
return
tw.setItemWidget(self, 1, self.metaBtnWidget)
def selectAllClicked(self):
self.param.valChangingProxy.timer.stop()
self.param.setValue(self.param.reverse[0])
def clearAllClicked(self):
self.param.valChangingProxy.timer.stop()
self.param.setValue([])
def insertChild(self, pos, item):
ret = super().insertChild(pos, item)
self.btnGrp.addButton(item.widget)
return ret
def addChild(self, item):
ret = super().addChild(item)
self.btnGrp.addButton(item.widget)
return ret
def takeChild(self, i):
child = super().takeChild(i)
self.btnGrp.removeButton(child.widget)
def optsChanged(self, param, opts):
super().optsChanged(param, opts)
if ('expanded' in opts):
for btn in self.metaBtns.values():
btn.setVisible(opts['expanded'])
exclusive = opts.get('exclusive', param.opts['exclusive'])
enabled = opts.get('enabled', param.opts['enabled'])
for (name, btn) in self.metaBtns.items():
if (name != 'default'):
btn.setDisabled((exclusive or (not enabled)))
self.btnGrp.setExclusive(exclusive)
if (('limits' not in opts) and (('enabled' in opts) or ('readonly' in opts))):
self.updateDefaultBtn()
def expandedChangedEvent(self, expanded):
for btn in self.metaBtns.values():
btn.setVisible(expanded)
def valueChanged(self, param, val):
self.updateDefaultBtn()
def updateDefaultBtn(self):
self.metaBtns['default'].setEnabled(((not self.param.valueIsDefault()) and self.param.opts['enabled'] and self.param.writable()))
return
makeDefaultButton = WidgetParameterItem.makeDefaultButton
defaultClicked = WidgetParameterItem.defaultClicked |
class FatigueModel(ABC):
def __init__(self, scaling: float=1, state_only: bool=None, apply_to_joint_dynamics: bool=None):
self.scaling = scaling
self.state_only = (self.default_state_only() if (state_only is None) else state_only)
self.apply_to_joint_dynamics = (self.default_apply_to_joint_dynamics() if (apply_to_joint_dynamics is None) else apply_to_joint_dynamics)
def type() -> str:
def suffix(variable_type: VariableType) -> tuple:
def color() -> tuple:
def default_state_only(self) -> bool:
def default_apply_to_joint_dynamics(self) -> bool:
def default_initial_guess(self) -> tuple:
def default_bounds(self, variable_type: VariableType) -> tuple:
def dynamics_suffix() -> str:
def fatigue_suffix() -> str:
def dynamics(self, dxdt, nlp, index, states, controls) -> MX:
def multi_type(self): |
class TestSequentialNodeRewriter():
def test_optimizer_verbose(self, capsys):
x = MyVariable('x')
y = MyVariable('y')
o1 = op1(x, y)
fgraph = FunctionGraph([x, y], [o1], clone=False)
_rewriter(None)
def local_rewrite_1(fgraph, node):
if (node.inputs[0] == x):
res = op2(y, *node.inputs[1:])
return [res]
_rewriter(None)
def local_rewrite_2(fgraph, node):
if (node.inputs[0] == y):
res = op2(x, *node.inputs[1:])
return [res]
seq_rewriter = SequentialNodeRewriter(local_rewrite_1, local_rewrite_2)
with config.change_flags(optimizer_verbose=True):
(new_res,) = seq_rewriter.transform(fgraph, o1.owner)
_ = seq_rewriter.transform(fgraph, new_res.owner)
capres = capsys.readouterr()
assert (capres.err == '')
assert ('rewriting: rewrite local_rewrite_1 replaces node Op1(x, y) with [Op2.0]' in capres.out)
assert ('rewriting: rewrite local_rewrite_2 replaces node Op2(y, y) with [Op2.0]' in capres.out) |
def test_set_pos_center_when_scaled(qapp, item):
item.setScale(2)
with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 200, 100)):
item.set_pos_center(QtCore.QPointF(0, 0))
assert (item.pos().x() == (- 200))
assert (item.pos().y() == (- 100)) |
class Migration(migrations.Migration):
dependencies = [('conditions', '0017_data_migration')]
operations = [migrations.AlterField(model_name='condition', name='comment', field=models.TextField(blank=True, help_text='Additional internal information about this condition.', verbose_name='Comment')), migrations.AlterField(model_name='condition', name='key', field=models.SlugField(blank=True, help_text='The internal identifier of this condition.', max_length=128, verbose_name='Key')), migrations.AlterField(model_name='condition', name='target_text', field=models.CharField(blank=True, help_text='If using a regular value, the text value this condition is checking against (for boolean values use 1 and 0).', max_length=256, verbose_name='Target (Text)')), migrations.AlterField(model_name='condition', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this condition (auto-generated).', max_length=640, verbose_name='URI')), migrations.AlterField(model_name='condition', name='uri_prefix', field=models.URLField(blank=True, help_text='The prefix for the URI of this condition.', max_length=256, verbose_name='URI Prefix'))] |
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, BatchNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0) |
('PyQt6.QtWidgets.QGraphicsScene.mousePressEvent')
def test_mouse_press_event_when_left_click_over_no_item_in_crop_mode(mouse_mock, view, item):
view.scene.addItem(item)
view.scene.cancel_crop_mode = MagicMock()
view.scene.crop_item = item
view.scene.itemAt = MagicMock(return_value=None)
event = MagicMock(button=MagicMock(return_value=Qt.MouseButton.LeftButton))
view.scene.mousePressEvent(event)
event.accept.assert_not_called()
mouse_mock.assert_called_once_with(event)
view.scene.cancel_crop_mode.assert_called_once_with()
assert (view.scene.move_active is False)
assert (view.scene.rubberband_active is True) |
def evaluate_annotation(key2refs, scorer):
if (scorer.method() == 'Bleu'):
scores = np.array([0.0 for n in range(4)])
else:
scores = 0
num_cap_per_audio = len(next(iter(key2refs.values())))
for i in range(num_cap_per_audio):
if (i > 0):
for key in key2refs:
key2refs[key].insert(0, res[key][0])
res = {key: [refs.pop()] for (key, refs) in key2refs.items()}
(score, _) = scorer.compute_score(key2refs, res)
if (scorer.method() == 'Bleu'):
scores += np.array(score)
else:
scores += score
score = (scores / num_cap_per_audio)
return score |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_text', default='input_text.txt')
parser.add_argument('--length', default=50, type=int)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--temperature', default=0.7, type=float)
parser.add_argument('--top_k', default=1, type=int)
parser.add_argument('--run_name', default='117M')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--nsamples', default=5, type=int)
return parser.parse_args() |
class InputOutputOracleREST(InputOutputOracle):
def __init__(self, grammar: TritonGrammar, inputs: List[Input], f_name: str=''):
super(InputOutputOracleREST, self).__init__(grammar, inputs, f_name)
self.session = requests.Session()
self._size = 0
def create(filename: Union[(str, Path)], grammar: TritonGrammar, inputs: List[Input], constants: List[int]=[]) -> 'InputOutputOracleREST':
raise RuntimeError('REST Lookup tables cannot be created only loaded')
def load(file: Union[(Path, str)]) -> 'InputOutputOracleREST':
res = requests.get(file)
if (res.status_code == 200):
data = res.json()
g = TritonGrammar.from_dict(data['grammar'])
lkp = InputOutputOracleREST(g, data['inputs'], file)
lkp._size = data['size']
lkp.session.headers['Host'] = file
lkp._name = file
return lkp
else:
raise ConnectionAbortedError(f'Cannot reach remote server (code:{res.status_code})')
def add_entry(self, hash: Hash, value: str):
raise NotImplementedError('REST Lookup Table are read-only at the moment')
def add_entries(self, entries: List[Tuple[(Hash, str)]], chunk_size: int=10000) -> None:
raise NotImplementedError('REST Lookup tables are read-only at the moment')
def __iter__(self) -> Iterator[Tuple[(Hash, str)]]:
raise NotImplementedError('Entries iteration is not implemented')
def size(self) -> int:
return self._size
def _get_item(self, h: Hash) -> Optional[str]:
hex_hash = binascii.hexlify(h).decode()
res = self.session.get(((str(self.name) + '/entry/') + hex_hash))
if (res.status_code == 200):
data = res.json()
return (data['expression'] if data else None)
else:
raise ConnectionError('REST query did not succeeded correctly') |
def parse_init(init_file):
with open(init_file, 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
line_index = 0
while ((line_index < len(lines)) and (not lines[line_index].startswith('_import_structure = {'))):
line_index += 1
if (line_index >= len(lines)):
return None
objects = []
while ((not lines[line_index].startswith('if TYPE_CHECKING')) and (find_backend(lines[line_index]) is None)):
line = lines[line_index]
single_line_import_search = _re_import_struct_key_value.search(line)
if (single_line_import_search is not None):
imports = [obj[1:(- 1)] for obj in single_line_import_search.groups()[0].split(', ') if (len(obj) > 0)]
objects.extend(imports)
elif line.startswith(((' ' * 8) + '"')):
objects.append(line[9:(- 3)])
line_index += 1
import_dict_objects = {'none': objects}
while (not lines[line_index].startswith('if TYPE_CHECKING')):
backend = find_backend(lines[line_index])
if (backend is not None):
line_index += 1
objects = []
while ((len(lines[line_index]) <= 1) or lines[line_index].startswith((' ' * 4))):
line = lines[line_index]
if (_re_import_struct_add_one.search(line) is not None):
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif (_re_import_struct_add_many.search(line) is not None):
imports = _re_import_struct_add_many.search(line).groups()[0].split(', ')
imports = [obj[1:(- 1)] for obj in imports if (len(obj) > 0)]
objects.extend(imports)
elif (_re_between_brackets.search(line) is not None):
imports = _re_between_brackets.search(line).groups()[0].split(', ')
imports = [obj[1:(- 1)] for obj in imports if (len(obj) > 0)]
objects.extend(imports)
elif (_re_quote_object.search(line) is not None):
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(((' ' * 8) + '"')):
objects.append(line[9:(- 3)])
elif line.startswith(((' ' * 12) + '"')):
objects.append(line[13:(- 3)])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
objects = []
while ((line_index < len(lines)) and (find_backend(lines[line_index]) is None) and (not lines[line_index].startswith('else'))):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if (single_line_import_search is not None):
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith((' ' * 8)):
objects.append(line[8:(- 2)])
line_index += 1
type_hint_objects = {'none': objects}
while (line_index < len(lines)):
backend = find_backend(lines[line_index])
if (backend is not None):
line_index += 1
objects = []
while ((len(lines[line_index]) <= 1) or lines[line_index].startswith((' ' * 8))):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if (single_line_import_search is not None):
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith((' ' * 12)):
objects.append(line[12:(- 2)])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return (import_dict_objects, type_hint_objects) |
class CodeLogger():
def __init__(self, name):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.INFO)
if (not self.logger.handlers):
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(CustomFormatter('%(message)s'))
self.logger.addHandler(handler)
def colorize_code(code, language):
if ((not language) or (language.lower() == 'python')):
lexer = PythonLexer()
elif (language.lower() == 'sql'):
lexer = SqlLexer()
else:
raise ValueError(f'Unsupported language: {language}')
return highlight(code, lexer, TerminalFormatter())
def info(self, message):
pattern = '```(python|sql)?(.*?)```'
parts = re.split(pattern, message, flags=re.DOTALL)
colored_message = ''
for i in range(0, len(parts), 3):
colored_message += parts[i]
if ((i + 2) < len(parts)):
colored_message += (('\n```\n' + self.colorize_code(parts[(i + 2)], parts[(i + 1)])) + '```')
self.logger.info(colored_message)
def warning(self, message):
self.logger.warning(message) |
class AdvertiserTopicReportView(AdvertiserAccessMixin, BaseReportView):
template_name = 'adserver/reports/advertiser-topic.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
advertiser_slug = kwargs.get('advertiser_slug', '')
advertiser = get_object_or_404(Advertiser, slug=advertiser_slug)
context.update({'advertiser': advertiser, 'metabase_advertiser_topics': settings.METABASE_QUESTIONS.get('ADVERTISER_TOPIC_PERFORMANCE')})
return context |
class RuleCommandTests(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.bot = helpers.MockBot()
self.cog = information.Information(self.bot)
self.ctx = helpers.MockContext(author=helpers.MockMember(id=1, name='Bellaluma'))
self.full_rules = [('First rule', ['first', 'number_one']), ('Second rule', ['second', 'number_two']), ('Third rule', ['third', 'number_three'])]
self.bot.api_client.get.return_value = self.full_rules
async def test_return_none_if_one_rule_number_is_invalid(self):
test_cases = [('1 6 7 8', (6, 7, 8)), ('10 first', (10,)), ('first 10', (10,))]
for (raw_user_input, extracted_rule_numbers) in test_cases:
with self.subTest(identifier=raw_user_input):
invalid = ', '.join((str(rule_number) for rule_number in extracted_rule_numbers if ((rule_number < 1) or (rule_number > len(self.full_rules)))))
final_rule_numbers = (await self.cog.rules(self.cog, self.ctx, args=raw_user_input))
self.assertEqual(self.ctx.send.call_args, unittest.mock.call(shorten((':x: Invalid rule indices: ' + invalid), 75, placeholder=' ...')))
self.assertEqual(None, final_rule_numbers)
async def test_return_correct_rule_numbers(self):
test_cases = [('1 2 first', {1, 2}), ('1 hello 2 second', {1}), ('second third unknown 999', {2, 3})]
for (raw_user_input, expected_matched_rule_numbers) in test_cases:
with self.subTest(identifier=raw_user_input):
final_rule_numbers = (await self.cog.rules(self.cog, self.ctx, args=raw_user_input))
self.assertEqual(expected_matched_rule_numbers, final_rule_numbers)
async def test_return_default_rules_when_no_input_or_no_match_are_found(self):
test_cases = [('', None), ('hello 2 second', None), ('hello 999', None)]
for (raw_user_input, expected_matched_rule_numbers) in test_cases:
with self.subTest(identifier=raw_user_input):
final_rule_numbers = (await self.cog.rules(self.cog, self.ctx, args=raw_user_input))
embed = self.ctx.send.call_args.kwargs['embed']
self.assertEqual(information.DEFAULT_RULES_DESCRIPTION, embed.description)
self.assertEqual(expected_matched_rule_numbers, final_rule_numbers) |
def test_edge_edge_degenerate_first_edge(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[0, 0, 0]])
p2_h = np.array([[0, 1, 0]])
q2_h = np.array([[1, 0, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0)
test.assertAlmostEqual(st0[1], 0.5) |
def test_filter_end_block_inclusive(deploy_client: JSONRPCClient) -> None:
(contract_proxy, _) = deploy_rpc_test_contract(deploy_client, 'RpcTest')
estimated_transaction1 = deploy_client.estimate_gas(contract_proxy, 'createEvent', {}, 1)
assert estimated_transaction1
transaction_1 = deploy_client.transact(estimated_transaction1)
deploy_client.poll_transaction(transaction_1)
estimated_transaction2 = deploy_client.estimate_gas(contract_proxy, 'createEvent', {}, 1)
assert estimated_transaction2
transaction_2 = deploy_client.transact(estimated_transaction2)
deploy_client.poll_transaction(transaction_2)
contract_proxy_address = to_canonical_address(contract_proxy.address)
result_1 = deploy_client.get_filter_events(contract_proxy_address)
block_number_events = get_list_of_block_numbers(result_1)
block_number_event_1 = block_number_events[0]
block_number_event_2 = block_number_events[1]
result_2 = deploy_client.get_filter_events(contract_proxy_address, to_block=block_number_event_1)
assert (get_list_of_block_numbers(result_2) == [block_number_event_1])
result_3 = deploy_client.get_filter_events(contract_proxy_address, to_block=block_number_event_2)
assert (get_list_of_block_numbers(result_3) == block_number_events) |
def cyclic_learning_rate(global_step, learning_rate=0.01, max_lr=0.1, step_size=20.0, gamma=0.99994, mode='triangular', name=None):
if (global_step is None):
raise ValueError('global_step is required for cyclic_learning_rate.')
with ops.name_scope(name, 'CyclicLearningRate', [learning_rate, global_step]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name='learning_rate')
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
step_size = math_ops.cast(step_size, dtype)
def cyclic_lr():
double_step = math_ops.multiply(2.0, step_size)
global_div_double_step = math_ops.divide(global_step, double_step)
cycle = math_ops.floor(math_ops.add(1.0, global_div_double_step))
double_cycle = math_ops.multiply(2.0, cycle)
global_div_step = math_ops.divide(global_step, step_size)
tmp = math_ops.subtract(global_div_step, double_cycle)
x = math_ops.abs(math_ops.add(1.0, tmp))
a1 = math_ops.maximum(0.0, math_ops.subtract(1.0, x))
a2 = math_ops.subtract(max_lr, learning_rate)
clr = math_ops.multiply(a1, a2)
if (mode == 'triangular2'):
clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast((cycle - 1), tf.int32)), tf.float32))
if (mode == 'exp_range'):
clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr)
return math_ops.add(clr, learning_rate, name=name)
if (not context.executing_eagerly()):
cyclic_lr = cyclic_lr()
return cyclic_lr |
def extract_pairs(pair_text, phashes_dict):
pairs = []
els = pair_text.split('-')
if (len(els) > 2):
for i in range(len(els)):
pair = '-'.join(els[0:i])
if (pair in phashes_dict):
pairs.append(pair)
break
pair2 = '-'.join(els[i:])
if (pair2 in phashes_dict):
pairs.append(pair2)
else:
return els
return pairs |
class TCompileMatch(TestCase):
def test_basics_default(self):
assert compile('foo')('foo')
assert compile('foo')('fooo')
assert (not compile('foo')('fo'))
def test_ignore_case(self):
assert compile('foo', ignore_case=True)('Foo')
assert (not compile('foo', ignore_case=False)('Foo'))
def test_assert_dot_all(self):
assert compile('a.b', dot_all=True)('a\nb')
assert (not compile('a.b', dot_all=False)('a\nb'))
assert compile('a.b', dot_all=False)('a b')
def test_unicode_equivalence(self):
assert compile('A')('A')
assert compile('A')('A')
assert compile('A')('A')
assert compile('A')('A')
assert compile('o')('o')
assert compile('o')('o')
assert compile('o')('o')
def test_assert_asym(self):
assert compile('o', asym=True)('o')
assert (not compile('o', asym=False)('o'))
def test_assert_asym_unicode_equivalence(self):
assert compile('A', asym=True)('A')
assert compile('A', asym=True)('A')
assert compile('A', asym=True)('A')
assert compile('A', asym=True)('A')
def test_invalid(self):
with self.assertRaises(ValueError):
compile('(F', asym=False)
with self.assertRaises(ValueError):
compile('(F', asym=True) |
class GroupOverSampleKaiming(object):
def __init__(self, crop_size, scale_size=None):
self.crop_size = (crop_size if (not isinstance(crop_size, int)) else (crop_size, crop_size))
if (scale_size is not None):
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
def __call__(self, img_group):
if (self.scale_worker is not None):
img_group = self.scale_worker(img_group)
(image_w, image_h) = img_group[0].size
(crop_w, crop_h) = self.crop_size
offsets = self.fill_fix_offset(image_w, image_h, crop_w, crop_h)
oversample_group = list()
for (o_w, o_h) in offsets:
normal_group = list()
for (i, img) in enumerate(img_group):
crop = img.crop((o_w, o_h, (o_w + crop_w), (o_h + crop_h)))
normal_group.append(crop)
oversample_group.extend(normal_group)
return oversample_group
def fill_fix_offset(self, image_w, image_h, crop_w, crop_h):
ret = list()
if (image_w == 256):
h_step = ((image_h - crop_h) // 4)
ret.append((0, 0))
ret.append((0, (4 * h_step)))
ret.append((0, (2 * h_step)))
elif (image_h == 256):
w_step = ((image_w - crop_w) // 4)
ret.append((0, 0))
ret.append(((4 * w_step), 0))
ret.append(((2 * w_step), 0))
else:
raise ValueError('Either image_w or image_h should be equal to 256')
return ret |
_torch
_retrieval
class RagModelSaveLoadTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
generator_config = AutoConfig.from_pretrained('facebook/bart-large-cnn')
return RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, dataset='wiki_dpr', dataset_split='train', index_name='exact', index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8)
def test_rag_sequence_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config).to(torch_device)
rag_sequence.save_pretrained(tmp_dirname)
rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_sequence
question_encoder = AutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
generator = AutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn')
rag_sequence = RagSequenceForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
def test_rag_token_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config, question_encoder_max_length=200, generator_max_length=200).to(torch_device)
rag_token.save_pretrained(tmp_dirname)
rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_token.to(torch_device)
self.assertTrue((rag_token.question_encoder.config.max_length == 200))
self.assertTrue((rag_token.generator.config.max_length == 200))
with torch.no_grad():
output = rag_token(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_token
question_encoder = AutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
generator = AutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn')
rag_token = RagTokenForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever)
rag_token.to(torch_device)
with torch.no_grad():
output = rag_token(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4) |
def _get_cached_and_pending_stats(discover_deltas_pending: List[ObjectRef[DeltaStatsCacheResult]], deltacat_storage=unimplemented_deltacat_storage) -> Tuple[(List[DeltaStats], List[ObjectRef[DeltaStats]])]:
delta_stats_processed: List[DeltaStats] = []
delta_stats_pending: List[ObjectRef[DeltaStats]] = []
while discover_deltas_pending:
(ready, discover_deltas_pending) = ray.wait(discover_deltas_pending)
cached_results: List[DeltaStatsCacheResult] = ray.get(ready)
for cached_result in cached_results:
if cached_result.hits:
delta_stats_processed.append(cached_result.hits)
if cached_result.misses:
missed_column_names: List[str] = cached_result.misses.column_names
delta_locator: DeltaLocator = cached_result.misses.delta_locator
delta_stats_pending.append(get_delta_stats.remote(delta_locator, missed_column_names, deltacat_storage))
return (delta_stats_processed, delta_stats_pending) |
def test_force_locale_with_threading_and_app_context():
app = flask.Flask(__name__)
babel.Babel(app, locale_selector=(lambda : 'de_DE'))
semaphore = Semaphore(value=0)
def first_app_context():
with app.app_context():
with babel.force_locale('en_US'):
assert (str(babel.get_locale()) == 'en_US')
semaphore.acquire()
thread = Thread(target=first_app_context)
thread.start()
try:
with app.app_context():
assert (str(babel.get_locale()) == 'de_DE')
finally:
semaphore.release()
thread.join() |
def RCISD(mf, frozen=None, mo_coeff=None, mo_occ=None):
from pyscf.df.df_jk import _DFHF
mf = mf.remove_soscf()
if (not mf.istype('RHF')):
mf = mf.to_rhf()
if (isinstance(mf, _DFHF) and mf.with_df):
from pyscf import lib
lib.logger.warn(mf, f'DF-RCISD for DFHF method {mf} is not available. Normal RCISD method is called.')
return cisd.RCISD(mf, frozen, mo_coeff, mo_occ)
else:
return cisd.RCISD(mf, frozen, mo_coeff, mo_occ) |
class RerankerTokenizer():
def __init__(self, total_maxlen, base):
self.total_maxlen = total_maxlen
self.tok = AutoTokenizer.from_pretrained(base)
def tensorize(self, questions, passages):
assert (type(questions) in [list, tuple]), type(questions)
assert (type(passages) in [list, tuple]), type(passages)
encoding = self.tok(questions, passages, padding='longest', truncation='longest_first', return_tensors='pt', max_length=self.total_maxlen, add_special_tokens=True)
return encoding |
class CSRNet_LCM(nn.Module):
def __init__(self, load_weights=True):
super(CSRNet_LCM, self).__init__()
self.seen = 0
self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
self.backend_feat = ['M', 512, 512, 'M', 512, 256, 'M', 128, 64]
self.frontend = make_layers(self.frontend_feat)
self.backend = make_layers(self.backend_feat, in_channels=512, dilation=2)
self.output_layer = nn.Conv2d(64, 1, kernel_size=1)
if load_weights:
mod = models.vgg16(pretrained=False)
pretrain_path = './models/Pretrain_Model/vgg16-397923af.pth'
mod.load_state_dict(torch.load(pretrain_path))
print(('loaded pretrain model: ' + pretrain_path))
self._initialize_weights()
self.frontend.load_state_dict(mod.features[0:23].state_dict())
def forward(self, x):
x = self.frontend(x)
x = self.backend(x)
x = self.output_layer(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) |
def test_regression_mediator_task_no_routes():
pseudo_random_generator = random.Random()
channels = make_channel_set([NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=0), partner_state=NettingChannelEndStateProperties(balance=UNIT_TRANSFER_AMOUNT, address=HOP2, privatekey=HOP2_KEY))])
payer_transfer = factories.make_signed_transfer_for(channels[0], factories.LockedTransferSignedStateProperties(sender=HOP2, pkey=HOP2_KEY, expiration=30))
init_state_change = ActionInitMediator(from_hop=channels.get_hop(0), candidate_route_states=channels.get_routes(), from_transfer=payer_transfer, balance_proof=payer_transfer.balance_proof, sender=payer_transfer.balance_proof.sender)
init_iteration = mediator.state_transition(mediator_state=None, state_change=init_state_change, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=pseudo_random_generator, block_number=5, block_hash=factories.make_block_hash())
msg = 'The task must not be cleared, even if there is no route to forward the transfer'
assert (init_iteration.new_state is not None), msg
assert (init_iteration.new_state.waiting_transfer.transfer == payer_transfer)
assert (search_for_item(init_iteration.events, SendLockedTransfer, {}) is None)
secrethash = UNIT_SECRETHASH
lock = channels[0].partner_state.secrethashes_to_lockedlocks[secrethash]
(send_lock_expired, _) = channel.create_sendexpiredlock(sender_end_state=channels[0].partner_state, locked_lock=lock, pseudo_random_generator=pseudo_random_generator, chain_id=channels[0].chain_id, token_network_address=channels[0].token_network_address, channel_identifier=channels[0].identifier, recipient=channels[0].our_state.address)
assert send_lock_expired
lock_expired_message = message_from_sendevent(send_lock_expired)
lock_expired_message.sign(LocalSigner(channels.partner_privatekeys[0]))
balance_proof = balanceproof_from_envelope(lock_expired_message)
message_identifier = message_identifier_from_prng(pseudo_random_generator)
expired_block_number = channel.get_sender_expiration_threshold(lock.expiration)
block_hash = factories.make_block_hash()
expire_block_iteration = mediator.state_transition(mediator_state=init_iteration.new_state, state_change=Block(block_number=expired_block_number, gas_limit=0, block_hash=block_hash), channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=pseudo_random_generator, block_number=expired_block_number, block_hash=block_hash)
assert (expire_block_iteration.new_state is not None)
receive_expired_iteration = mediator.state_transition(mediator_state=expire_block_iteration.new_state, state_change=ReceiveLockExpired(sender=balance_proof.sender, balance_proof=balance_proof, secrethash=secrethash, message_identifier=message_identifier), channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=pseudo_random_generator, block_number=expired_block_number, block_hash=block_hash)
msg = 'The only used channel had the lock cleared, the task must be cleared'
assert (receive_expired_iteration.new_state is None), msg
assert (secrethash not in channels[0].partner_state.secrethashes_to_lockedlocks) |
_vision
_torch
class AlignModelIntegrationTest(unittest.TestCase):
def test_inference(self):
model_name = 'kakaobrain/align-base'
model = AlignModel.from_pretrained(model_name).to(torch_device)
processor = AlignProcessor.from_pretrained(model_name)
image = prepare_img()
texts = ['a photo of a cat', 'a photo of a dog']
inputs = processor(text=texts, images=image, return_tensors='pt').to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
self.assertEqual(outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])))
self.assertEqual(outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])))
expected_logits = torch.tensor([[9.7093, 3.4679]], device=torch_device)
self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=0.001)) |
def _schedule_item_status_to_message(status: str):
from schedule.models import ScheduleItem
if (status == ScheduleItem.STATUS.confirmed):
return 'I am happy with the time slot.'
if (status == ScheduleItem.STATUS.maybe):
return 'I can make this time slot work if it is not possible to change'
if (status == ScheduleItem.STATUS.rejected):
return 'The time slot does not work for me'
if (status == ScheduleItem.STATUS.cant_attend):
return "I can't attend the conference anymore"
return 'Undefined' |
def test_cwd(tmp_path):
project_dir = (tmp_path / 'project')
test_projects.new_c_project().generate(project_dir)
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={'CIBW_BEFORE_ALL': f'python -c "import os; assert os.getcwd() == {str(project_dir)!r}"', 'CIBW_BEFORE_ALL_LINUX': 'python -c "import os; assert os.getcwd() == \'/project\'"'})
expected_wheels = utils.expected_wheels('spam', '0.1.0')
assert (set(actual_wheels) == set(expected_wheels)) |
class FC():
_activations = {None: tf.identity, 'ReLU': tf.nn.relu, 'tanh': tf.tanh, 'sigmoid': tf.sigmoid, 'softmax': tf.nn.softmax, 'swish': (lambda x: (x * tf.sigmoid(x)))}
def __init__(self, output_dim, input_dim=None, activation=None, weight_decay=None, ensemble_size=1):
(self.input_dim, self.output_dim) = (input_dim, output_dim)
self.activation = activation
self.weight_decay = weight_decay
self.ensemble_size = ensemble_size
self.variables_constructed = False
(self.weights, self.biases) = (None, None)
self.decays = None
def __repr__(self):
return 'FC(output_dim={!r}, input_dim={!r}, activation={!r}, weight_decay={!r}, ensemble_size={!r})'.format(self.output_dim, self.input_dim, self.activation, self.weight_decay, self.ensemble_size)
def get_model_vars(self, idx, sess):
(weights, biases) = sess.run([self.weights, self.biases])
weight = weights[idx].copy()
bias = biases[idx].copy()
return {'weights': weight, 'biases': bias}
def set_model_vars(self, idx, sess, variables):
for (attr, var) in variables.items():
tensor = getattr(self, attr)
op = tensor[idx].assign(var)
sess.run(op)
def set_model_vars(self, variables):
ops = [getattr(self, attr).assign(var) for (attr, var) in variables.items()]
return ops
def reset(self, sess):
sess.run(self.weights.initializer)
sess.run(self.biases.initializer)
def compute_output_tensor(self, input_tensor):
weights = self.weights
if (len(input_tensor.shape) == 2):
raw_output = (tf.einsum('ij,ajk->aik', input_tensor, weights) + self.biases)
elif ((len(input_tensor.shape) == 3) and (input_tensor.shape[0].value == self.ensemble_size)):
raw_output = (tf.matmul(input_tensor, weights) + self.biases)
else:
raise ValueError('Invalid input dimension.')
return FC._activations[self.activation](raw_output)
def get_decays(self):
return self.decays
def copy(self, sess=None):
new_layer = eval(repr(self))
return new_layer
def construct_vars(self):
if self.variables_constructed:
return
if ((self.input_dim is None) or (self.output_dim is None)):
raise RuntimeError('Cannot construct variables without fully specifying input and output dimensions.')
self.weights = tf.get_variable('FC_weights', shape=[self.ensemble_size, self.input_dim, self.output_dim], initializer=tf.truncated_normal_initializer(stddev=(1 / (2 * np.sqrt(self.input_dim)))))
self.biases = tf.get_variable('FC_biases', shape=[self.ensemble_size, 1, self.output_dim], initializer=tf.constant_initializer(0.0))
if (self.weight_decay is not None):
self.decays = [tf.multiply(self.weight_decay, tf.nn.l2_loss(self.weights), name='weight_decay')]
self.variables_constructed = True
def get_vars(self):
return [self.weights, self.biases]
def get_input_dim(self):
return self.input_dim
def set_input_dim(self, input_dim):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.input_dim = input_dim
def get_output_dim(self):
return self.output_dim
def set_output_dim(self, output_dim):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.output_dim = output_dim
def get_activation(self, as_func=True):
if as_func:
return FC._activations[self.activation]
else:
return self.activation
def set_activation(self, activation):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.activation = activation
def unset_activation(self):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.set_activation(None)
def get_weight_decay(self):
return self.weight_decay
def set_weight_decay(self, weight_decay):
self.weight_decay = weight_decay
if self.variables_constructed:
if (self.weight_decay is not None):
self.decays = [tf.multiply(self.weight_decay, tf.nn.l2_loss(self.weights), name='weight_decay')]
def unset_weight_decay(self):
self.set_weight_decay(None)
if self.variables_constructed:
self.decays = []
def set_ensemble_size(self, ensemble_size):
if self.variables_constructed:
raise RuntimeError('Variables already constructed.')
self.ensemble_size = ensemble_size
def get_ensemble_size(self):
return self.ensemble_size |
def _get_boundaries(x_values, y_values, round_val):
x1 = np.min((np.floor(((x_values - 0.5) / round_val)) * round_val))
x2 = np.max((np.ceil(((x_values + 0.5) / round_val)) * round_val))
y1 = np.min((np.floor(((y_values - 0.5) / round_val)) * round_val))
y2 = np.max((np.ceil(((y_values + 0.5) / round_val)) * round_val))
x_range = (x2 - x1)
y_range = (y2 - y1)
max_range = max(x_range, y_range)
x_center = ((x1 + x2) / 2)
y_center = ((y1 + y2) / 2)
min_x = max((x_center - (max_range / 2)), 0)
max_x = min((x_center + (max_range / 2)), 100)
min_y = max((y_center - (max_range / 2)), 0)
max_y = min((y_center + (max_range / 2)), 100)
return (min_x, max_x, min_y, max_y) |
def test_simulationtimecondition():
cond = OSC.SimulationTimeCondition(1.2, OSC.Rule.greaterThan)
prettyprint(cond.get_element())
cond2 = OSC.SimulationTimeCondition(1.2, OSC.Rule.greaterThan)
cond3 = OSC.SimulationTimeCondition(1.3, OSC.Rule.greaterThan)
assert (cond == cond2)
assert (cond != cond3)
cond4 = OSC.SimulationTimeCondition.parse(cond.get_element())
assert (cond == cond4)
assert (version_validation('SimulationTimeCondition', cond, 0) == ValidationResponse.OK)
assert (version_validation('SimulationTimeCondition', cond, 1) == ValidationResponse.OK)
assert (version_validation('SimulationTimeCondition', cond, 2) == ValidationResponse.OK) |
def ssh(function=None, **kwargs):
def decorator(func, *args, **kwargs):
hostname = kwargs['host']
username = kwargs['user']
sshkey = kwargs['key']
python = (kwargs['python'] if ('python' in kwargs) else 'python3.8')
logging.debug('ssh: func: %s', func.func)
if (not func.source):
func.source = inspect.getsource(func.func)
func.func.userfunc = True
logging.debug('ssh: func source:\n%s', func.source)
_ast = ast.parse(func.source)
decorators = _ast.body[0].decorator_list
ssh_decorator = None
for decorator in decorators:
if (hasattr(decorator, 'func') and (decorator.func.id == 'ssh')):
logging.debug('REMOVE SSH DECORATOR:')
ssh_decorator = decorator
if ssh_decorator:
decorators.remove(ssh_decorator)
logging.debug('ssh: func new source: %s', astunparse.unparse(_ast))
logging.debug('ssh args: %s %s', str(args), str(kwargs))
logging.debug('SSH: Calling function: %s', str(func))
def wrapper(f_func, *args, **kwargs):
def find_func(pfunc):
logging.debug('find_func: %s', pfunc.__name__)
if (hasattr(pfunc, 'userfunc') and pfunc.userfunc):
return pfunc.userfunc
elif isinstance(pfunc, partial):
return find_func(pfunc.func)
return pfunc
if ('SOURCE' in os.environ):
sourcefile = os.environ['SOURCE']
else:
sourcefile = sys.argv[0]
logging.debug('ssh: SOURCE:%s', sourcefile)
with open(sourcefile) as source:
logging.debug('ssh: SOURCE:%s', source.read())
logging.debug('SSH: user:%s host:%s key: %s', kwargs['user'], kwargs['host'], kwargs['key'])
def setup_virtualenv(host, user, key, env):
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
_ssh.connect(hostname=hostname, username=username, key_filename=sshkey)
command = 'python3.8 -m venv {}; export LLVM_CONFIG=/usr/bin/llvm-config-10; {}/bin/pip install --upgrade py-entangle'.format(env, env)
(_, stdout, _) = _ssh.exec_command(command)
for line in stdout.read().splitlines():
logging.debug('SSH: setup_virtualenv: stdout: %s', line)
if ('env' in kwargs):
setup_virtualenv(kwargs['host'], kwargs['user'], kwargs['key'], kwargs['env'])
del kwargs['env']
del kwargs['user']
del kwargs['host']
del kwargs['key']
logging.debug('Run function: %s(%s,%s)', func.__name__, args, kwargs)
vargs = []
for arg in args:
if callable(arg):
vargs += [arg()]
else:
vargs += [arg]
sourceuuid = ('sshsource' + hashlib.md5(uuid4().bytes).hexdigest())
with open(sourcefile) as source:
_source = source.read()
logging.debug('Parsing SOURCE')
_ast = ast.parse(_source)
funcdefs = [funcdef for funcdef in _ast.body if isinstance(funcdef, ast.FunctionDef)]
logging.debug('Removing decorators from SOURCE')
for funcdef in funcdefs:
__funcname__ = funcdef.name
if (__funcname__ == func.__name__):
decorators = funcdef.decorator_list
remove_decorators = []
for decorator in decorators:
if (hasattr(decorator, 'func') and ((decorator.func.id == 'ssh') or (decorator.func.id == 'dataflow'))):
logging.debug('REMOVE SSH DECORATOR:')
remove_decorators += [decorator]
[decorators.remove(dec) for dec in remove_decorators]
'\n for funcdef in funcdefs:\n __funcname__ = funcdef.name\n if __funcname__ == func.__name__:\n try:\n\n funcdef.decorator_list.clear()\n pass\n except:\n import traceback\n logging.error(traceback.format_exc())\n '
logging.debug('UNparsing SOURCE')
_source = astunparse.unparse(_ast)
logging.debug('Attempting to write SOURCE')
with open('{}.py'.format(sourceuuid), 'w') as appsource:
appsource.write(_source)
logging.debug('Wrote SOURCE')
appuuid = ('sshapp' + hashlib.md5(uuid4().bytes).hexdigest())
__func = find_func(func)
with open('{}.py'.format(appuuid), 'w') as app:
pargs = codecs.encode(pickle.dumps(vargs), 'base64').decode()
pargs = re.sub('\\n', '', pargs).strip()
app.write('import logging\n\nlogger=logging.getLogger()\nlogging.disabled=False\nlogger.disabled=False\n')
app.write('import pickle, codecs, re\n')
app.write('from {} import {}\n\n'.format(sourceuuid, __func.__name__))
app.write("pargs = '{}'\n".format(pargs))
app.write('args = pickle.loads(codecs.decode(pargs.encode(), "base64"))\n')
app.write('print("ARGS: {}".format(args))\n')
app.write('result = {}(*args)()\n'.format(__func.__name__))
app.write('print("RESULT:", result)\n')
app.write('resultp = codecs.encode(pickle.dumps(result), "base64").decode()\n')
app.write("print('===BEGIN===')\n")
app.write('print(resultp)\n')
p_func = partial(f_func, *args, **kwargs)
p_func.__name__ = f_func.__name__
logging.debug('args: %s kwargs: %s', args, kwargs)
def ssh_function(remotefunc, username, hostname, sshkey, appuuid, sourceuuid, *args):
logging.debug('ssh_function: remote function args: %s', *args)
files = [(appuuid + '.py'), (sourceuuid + '.py')]
logging.debug('SCP files: %s to %%s:%s', files, username, hostname, '/tmp')
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
_ssh.connect(hostname=hostname, username=username, key_filename=sshkey)
logging.debug('SSH FUNCTION: %s', remotefunc)
scp = SCPClient(_ssh.get_transport())
try:
scp.put(files, remote_path='/tmp')
finally:
pass
command = 'export SOURCE={}.py; cd /tmp; {} /tmp/{}.py'.format(sourceuuid, python, appuuid)
result = None
with open('/tmp/command', 'w') as cmd:
cmd.write(command)
logging.debug('SSH: executing %s %%s', command, username, hostname)
(_, stdout, _) = _ssh.exec_command(command)
logging.debug('SSH: CWD %s', os.getcwd())
logging.debug('SSH: importing module %s', sourceuuid)
sys.path.append(os.getcwd())
try:
importlib.import_module(sourceuuid)
except Exception:
pass
result_next = False
resultlines = []
for line in stdout.read().splitlines():
logging.debug('SSH: command stdout: %s', line)
if result_next:
if (len(line.strip()) > 0):
resultlines += [line]
logging.debug('SSH: got result line: %s', line)
if (line == b'===BEGIN==='):
result_next = True
logging.debug('Unpickle: %s', b''.join(resultlines))
if (len(resultlines) > 0):
result = pickle.loads(codecs.decode(b''.join(resultlines), 'base64'))
else:
result = None
_ssh.close()
return result
_p_func = find_func(p_func)
logging.debug('_p_func: %s', _p_func)
logging.debug('p_func: %s', p_func)
ssh_p = partial(ssh_function, p_func, username, hostname, sshkey, appuuid, sourceuuid)
ssh_p.__name__ = p_func.__name__
ssh_p.userfunc = f_func.func
frame = sys._getframe(1)
if ('dataflow' in frame.f_locals):
logging.debug('DATAFLOW detected!')
result = ssh_p()
else:
logging.debug('DATAFLOW NOT detected!')
result = ProcessMonitor(ssh_p, timeout=None, wait=None, cache=False, shared_memory=False, sleep=0)
if callable(result):
_result = result()
else:
_result = result
if isinstance(_result, ProcessMonitor):
_result = _result()
logging.debug('SSH RESULT2: %s', _result)
return _result
p_func = partial(wrapper, func, **kwargs)
if isinstance(func, (ProcessMonitor, ThreadMonitor)):
p_func.__name__ = func.func.__name__
else:
p_func.__name__ = func.__name__
return p_func
if (function is not None):
logging.debug('ssh: function source: %s', function.source)
return decorator(function, **kwargs)
return partial(decorator, **kwargs) |
.unit()
.parametrize(('markers', 'expected'), [(None, []), ([], []), ([pytask.mark.produces(), pytask.mark.depends_on()], [pytask.mark.produces(), pytask.mark.depends_on()]), ([pytask.mark.produces(), pytask.mark.produces(), pytask.mark.depends_on()], [pytask.mark.produces(), pytask.mark.produces(), pytask.mark.depends_on()])])
def test_get_all_marks_from_obj(markers, expected):
def func():
...
if (markers is not None):
func.pytask_meta = CollectionMetadata(markers=markers)
result = get_all_marks(func)
assert (result == expected) |
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
stop: Optional[List[str]] = None
class Config():
extra = Extra.ignore
def _llm_type(self) -> str:
return 'anthropic-chat'
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f'''
{message.role.capitalize()}: {message.content}'''
elif isinstance(message, HumanMessage):
message_text = f'{self.HUMAN_PROMPT} {message.content}'
elif isinstance(message, AIMessage):
message_text = f'{self.AI_PROMPT} {message.content}'
elif isinstance(message, SystemMessage):
message_text = f'{self.HUMAN_PROMPT} <admin>{message.content}</admin>'
else:
raise ValueError(f'Got unknown type {message}')
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
return ''.join((self._convert_one_message_to_text(message) for message in messages))
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
if (not self.AI_PROMPT):
raise NameError('Please ensure the anthropic package is loaded')
if (not isinstance(messages[(- 1)], AIMessage)):
messages.append(AIMessage(content=''))
text = self._convert_messages_to_text(messages)
return text.rstrip()
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[(str, Any)] = {'prompt': prompt, **self._default_params}
if (self.stop is not None):
if (stop is None):
stop = self.stop
else:
stop.extend(self.stop)
if stop:
params['stop_sequences'] = stop
if self.streaming:
completion = ''
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data['completion'][len(completion):]
completion = data['completion']
if run_manager:
run_manager.on_llm_new_token(delta)
else:
response = self.client.completion(**params)
completion = response['completion']
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[AsyncCallbackManagerForLLMRun]=None) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[(str, Any)] = {'prompt': prompt, **self._default_params}
if stop:
params['stop_sequences'] = stop
if self.streaming:
completion = ''
stream_resp = (await self.client.acompletion_stream(**params))
async for data in stream_resp:
delta = data['completion'][len(completion):]
completion = data['completion']
if run_manager:
(await run_manager.on_llm_new_token(delta))
else:
response = (await self.client.acompletion(**params))
completion = response['completion']
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)]) |
class CasadiAlgebraicSolver(pybamm.BaseSolver):
def __init__(self, tol=1e-06, extra_options=None):
super().__init__()
self.tol = tol
self.name = 'CasADi algebraic solver'
self.algebraic_solver = True
self.extra_options = (extra_options or {})
pybamm.citations.register('Andersson2019')
def tol(self):
return self._tol
def tol(self, value):
self._tol = value
def _integrate(self, model, t_eval, inputs_dict=None):
inputs_dict = (inputs_dict or {})
inputs = casadi.vertcat(*[v for v in inputs_dict.values()])
y0 = model.y0
if (model.rhs == {}):
len_rhs = 0
y0_diff = casadi.DM()
y0_alg = y0
else:
if (model.len_rhs_and_alg == y0.shape[0]):
len_rhs = model.len_rhs
else:
len_rhs = (model.len_rhs + model.len_rhs_sens)
y0_diff = y0[:len_rhs]
y0_alg = y0[len_rhs:]
y_alg = None
t_sym = casadi.MX.sym('t')
y_alg_sym = casadi.MX.sym('y_alg', y0_alg.shape[0])
y_sym = casadi.vertcat(y0_diff, y_alg_sym)
alg = model.casadi_algebraic(t_sym, y_sym, inputs)
constraints = np.zeros_like(model.bounds[0], dtype=int)
constraints[(model.bounds[0] >= 0)] = 1
constraints[(model.bounds[1] <= 0)] = (- 1)
roots = casadi.rootfinder('roots', 'newton', dict(x=y_alg_sym, p=t_sym, g=alg), {**self.extra_options, 'abstol': self.tol, 'constraints': list(constraints[len_rhs:])})
timer = pybamm.Timer()
integration_time = 0
for (idx, t) in enumerate(t_eval):
try:
timer.reset()
y_alg_sol = roots(y0_alg, t)
integration_time += timer.time()
success = True
message = None
y_sol = casadi.vertcat(y0_diff, y_alg_sol)
fun = model.casadi_algebraic(t, y_sol, inputs)
except RuntimeError as err:
success = False
message = err.args[0]
fun = None
if (success and ((not any(np.isnan(fun))) and np.all((casadi.fabs(fun) < self.tol)))):
y0_alg = y_alg_sol
y0 = casadi.vertcat(y0_diff, y0_alg)
if (y_alg is None):
y_alg = y_alg_sol
else:
y_alg = casadi.horzcat(y_alg, y_alg_sol)
elif (not success):
raise pybamm.SolverError(f'Could not find acceptable solution: {message}')
elif any(np.isnan(fun)):
raise pybamm.SolverError('Could not find acceptable solution: solver returned NaNs')
else:
raise pybamm.SolverError('\n Could not find acceptable solution: solver terminated\n successfully, but maximum solution error ({})\n above tolerance ({})\n '.format(casadi.mmax(casadi.fabs(fun)), self.tol))
y_diff = casadi.horzcat(*([y0_diff] * len(t_eval)))
y_sol = casadi.vertcat(y_diff, y_alg)
try:
explicit_sensitivities = bool(model.calculate_sensitivities)
except AttributeError:
explicit_sensitivities = False
sol = pybamm.Solution([t_eval], y_sol, model, inputs_dict, termination='final time', sensitivities=explicit_sensitivities)
sol.integration_time = integration_time
return sol |
class EventLoopManager():
current = None
exceptions = []
exceptionLock = threading.RLock()
waitingLock = threading.RLock()
def __init__(self):
self.threads = []
self.loops = []
self.separateLoops = []
self.waiting = {}
self.pending = []
self.updates = []
self.mainLoop = None
self.mainWaitFor = None
self.running = False
def schedule(self, *funcs, main=False, ups=None, waitFor=None):
functions = list(funcs)
for i in range(len(functions)):
sig = inspect.signature(functions[i])
if ('loop' not in sig.parameters):
functions[i] = wrap(functions[i])
if main:
self.updates.extend(functions)
self.mainWaitFor = waitFor
else:
if (ups is None):
raise PyUnityException('ups argument is required if main is False')
self.waiting[waitFor] = []
loop = EventLoop()
self.loops.append(loop)
def inner():
clock = Clock()
clock.Start(ups)
while self.running:
with EventLoopManager.waitingLock:
for waiter in self.waiting[waitFor]:
waiter.loop.call_soon_threadsafe(waiter.event.set)
for func in functions:
try:
func(loop)
except Exception as e:
with EventLoopManager.exceptionLock:
EventLoopManager.exceptions.append(e)
break
loop.call_soon(loop.stop)
loop.run_forever()
clock.Maintain()
t = threading.Thread(target=inner, daemon=True)
self.threads.append(t)
def addLoop(self, loop):
def inner():
while self.running:
loop.call_soon(loop.stop)
loop.run_forever()
self.loops.append(loop)
self.separateLoops.append(loop)
t = threading.Thread(target=inner, daemon=True)
self.threads.append(t)
def start(self):
self.setup()
while self.running:
self.update()
def setup(self):
if (EventLoopManager.current is not None):
raise PyUnityException('Only one EventLoopManager can be running')
EventLoopManager.current = self
self.waiting[self.mainWaitFor] = []
for loop in self.separateLoops:
loop.call_soon(loop.stop)
loop.run_forever()
self.separateLoops.clear()
self.running = True
for thread in self.threads:
thread.start()
self.mainLoop = EventLoop()
asyncio.set_event_loop(self.mainLoop)
def handleExceptions(cls):
with cls.exceptionLock:
if len(cls.exceptions):
from . import SceneManager
from .scenes.runner import ChangeScene
if isinstance(cls.exceptions[0], ChangeScene):
exc = cls.exceptions.pop()
cls.exceptions.clear()
raise exc
elif config.exitOnError:
Logger.LogLine(Logger.ERROR, f'Exception in Scene: {SceneManager.CurrentScene().name!r}')
exc = cls.exceptions.pop()
cls.exceptions.clear()
raise exc
else:
for exception in cls.exceptions:
Logger.LogLine(Logger.ERROR, f'Exception ignored in Scene: {SceneManager.CurrentScene().name!r}')
Logger.LogException(exception)
cls.exceptions.clear()
def updateEvents(self):
with EventLoopManager.waitingLock:
for waiter in self.waiting[self.mainWaitFor]:
waiter.loop.call_soon_threadsafe(waiter.event.set)
for func in self.updates:
func(self.mainLoop)
for event in self.pending:
event.trigger()
self.pending.clear()
def update(self):
EventLoopManager.handleExceptions()
self.updateEvents()
self.mainLoop.call_soon(self.mainLoop.stop)
self.mainLoop.run_forever()
def quit(self):
self.running = False
for thread in self.threads:
thread.join()
self.threads = []
self.loops = []
self.separateLoops = []
self.waiting = {}
self.pending = []
self.updates = []
self.mainLoop = None
EventLoopManager.current = None |
def SynthesizeAddSecondOrder(NetworkPrefixCounter):
trajectories = []
for vessel in range(vessels):
trajectory = []
for step in range(steps):
if (len(trajectory) == 0):
port = random.randint(0, 99)
elif (len(trajectory) == 1):
prev = trajectory[(- 1)]
if (prev in [0, 3, 6]):
port = AltBiasedNextStep(prev)
else:
port = NextStep(prev)
else:
prev = trajectory[(- 1)]
pprev = trajectory[(- 2)]
if (prev in [0, 3, 6]):
port = AltBiasedNextStep(prev)
elif ((pprev, prev) in [(27, 28)]):
port = BiasedNextStep(prev)
else:
port = NextStep(prev)
trajectory.append(port)
trajectories.append(trajectory)
WriteTrajectories(trajectories, NetworkPrefixCounter, 'AddSecondOrder') |
def main():
parser = argparse.ArgumentParser(description='significant test')
parser.add_argument('-d', '--Domain', required=True, type=str, help='which domain to work on?')
parser.add_argument('-fn', '--FolderName', required=True, type=str, help='base name of the folder to store result?')
parser.add_argument('-s', '--Split', required=True, type=str, help='what split to test?', choices=['train', 'dev', 'test'])
parser.add_argument('-c', '--Compare', action='store_true', help='compare with another model?')
parser.add_argument('-fn2', '--FolderName2', type=str, help='base name of the folder to store another result?')
parser.add_argument('-df', '--DrawFigure', action='store_true', help='draw figure?')
parser.add_argument('-ps', '--PathStorage', type=str, default='../..', help='Path of storage which stores domains (with data), logs, results, etc. Must be local (e.g. no HDFS allowed)')
parser.add_argument('-sd', '--Seed', default=12345, type=int, help='random seed')
args = parser.parse_args()
path_storage = getAbsPath(args.PathStorage)
args.PathDomain = os.path.join(path_storage, 'domains', args.Domain)
bs = Bootstrapping()
if (args.Domain == args.FolderName):
args.Database = 'gen'
args.PathResult = os.path.join(args.PathDomain, f'results_gen_{args.Split}')
else:
path_logs = os.path.join(args.PathDomain, 'Logs', args.FolderName)
assert os.path.exists(path_logs)
args.PathResult = os.path.join(path_logs, f'results_{args.Split}')
with open(args.PathResult, 'rb') as f:
allres = pickle.load(f)
bs.run(allres['loglik'])
if args.Compare:
if (args.Domain == args.FolderName2):
args.Database2 = 'gen'
args.PathResult2 = os.path.join(args.PathDomain, f'results_gen_{args.Split}')
else:
path_logs = os.path.join(args.PathDomain, 'Logs', args.FolderName2)
assert os.path.exists(path_logs)
args.PathResult2 = os.path.join(path_logs, f'results_{args.Split}')
with open(args.PathResult2, 'rb') as f:
allres2 = pickle.load(f)
print()
bs.run(allres2['loglik'])
print()
pp = PairPerm()
pp.run(allres['loglik'], allres2['loglik'])
if args.DrawFigure:
dr = Drawer()
def _getName(folder_name):
if ('single' in folder_name):
return 'NHP'
elif ('struct' in folder_name):
return 'structured NHP'
else:
raise Exception(f'Unparsable name {folder_name}')
path_save = os.path.join(args.PathDomain, 'figures')
if (not os.path.exists(path_save)):
os.makedirs(path_save)
dr.draw(allres['loglik'], allres2['loglik'], name1=_getName(args.PathResult), name2=_getName(args.PathResult2), figname=f'{args.FolderName}_vs_{args.FolderName2}', path_save=path_save) |
def splitZip(path):
components = os.path.normpath(path).split(os.sep)
for (index, component) in enumerate(components):
if component.endswith('.zip'):
zipPath = os.sep.join(components[0:(index + 1)])
archivePath = ''.join([(x + '/') for x in components[(index + 1):]])
return (zipPath, archivePath)
else:
return (path, None) |
class Issue(DataClassDictMixin):
id: int
node_id: str
url: str
repository_url: str
labels_url: str
comments_url: str
events_url: str
html_url: str
number: int
state: IssueState
state_reason: Optional[StateReason]
title: str
user: Optional[SimpleUser]
labels: List[Label]
assignee: Optional[SimpleUser]
assignees: Optional[List[SimpleUser]]
locked: bool
active_lock_reason: Optional[str]
comments: int
closed_at: Optional[datetime]
created_at: Optional[datetime]
updated_at: Optional[datetime]
author_association: AuthorAssociation
reactions: Optional[Reactions] = None
pull_request: Optional[PullRequest] = None
body_html: Optional[str] = None
body_text: Optional[str] = None
timeline_url: Optional[str] = None
body: Optional[str] = None
class Config(BaseConfig):
omit_default = True |
class SimpleWire(ComponentLevel4):
def construct(s):
s.read = CalleePort(method=s.rd)
s.write = CalleePort(method=s.wr)
s.v = 0
s.add_constraints((M(s.rd) > M(s.wr)))
def wr(s, v):
s.v = v
def rd(s):
return s.v
def line_trace(s):
return ('%d' % s.v) |
def main():
parser = argparse.ArgumentParser(description='Networks')
parser.add_argument('--modelname', default='SETR_ConvFormer', type=str, help='type of model')
parser.add_argument('--task', default='ICH', help='task or dataset name')
args = parser.parse_args()
opt = get_config(args.task)
opt.save_path_code = '_'
opt.mode = 'eval'
print(opt.load_path)
device = torch.device(opt.device)
if (opt.gray == 'yes'):
from utils.utils_gray import JointTransform2D, ImageToImage2D
else:
from utils.utils_rgb import JointTransform2D, ImageToImage2D
seed_value = 300
np.random.seed(seed_value)
random.seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
tf_test = JointTransform2D(img_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
test_dataset = ImageToImage2D(opt.data_path, opt.test_split, tf_test, opt.classes)
testloader = DataLoader(test_dataset, batch_size=1, shuffle=False)
model = get_model(modelname=args.modelname, img_size=opt.img_size, img_channel=opt.img_channel, classes=opt.classes)
model.to(device)
model.load_state_dict(torch.load(opt.load_path))
pytorch_total_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('Total_params: {}'.format(pytorch_total_params))
criterion = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}, {}, weight_ce=1)
if (opt.mode == 'train'):
(dices, mean_dice, hds, val_losses) = get_eval(testloader, model, criterion, opt)
print(dices, mean_dice, hds)
else:
(dice, iou, acc, se, sp) = get_eval(testloader, model, criterion, opt)
print(dice, iou, acc, se, sp)
'\n timestr0 = time.strftime(\'%m%d%H%M\')\n record_path = "./records/ACDC/" + args.modelname + opt.save_path_code + timestr0 + \'/\'\n if not os.path.exists(record_path):\n os.mkdir(record_path)\n for i in range(6):\n os.mkdir(record_path + "sample" + str(i) + "/")\n for i in range(6): # the sample id \n with open(record_path + "sample" + str(i) + "/" + "rtoken1.txt", \'a\') as f:\n for j in range (rtoken1.shape[1]): \n f.write(str(rtoken1[i, j]) + " ")\n f.write(\'\n\')\n with open(record_path + "sample" + str(i) + "/" + "rtoken2.txt", \'a\') as f:\n for j in range (rtoken2.shape[1]): \n f.write(str(rtoken2[i, j]) + " ")\n f.write(\'\n\')\n with open(record_path + "sample" + str(i) + "/" + "rtoken3.txt", \'a\') as f:\n for j in range (rtoken3.shape[1]): \n f.write(str(rtoken3[i, j]) + " ")\n f.write(\'\n\')\n with open(record_path + "sample" + str(i) + "/" + "rmap1.txt", \'a\') as f:\n for j in range (rmap1.shape[1]): \n f.write(str(rmap1[i, j]) + " ")\n f.write(\'\n\')\n with open(record_path + "sample" + str(i) + "/" + "rmap2.txt", \'a\') as f:\n for j in range (rmap2.shape[1]): \n f.write(str(rmap2[i, j]) + " ")\n f.write(\'\n\')\n with open(record_path + "sample" + str(i) + "/" + "rmap3.txt", \'a\') as f:\n for j in range (rmap3.shape[1]): \n f.write(str(rmap3[i, j]) + " ")\n f.write(\'\n\')\n ' |
def test_unionize_dataframe_categories_single(uniontest_df1, uniontest_df2, uniontest_df3):
(udf1, udf2, udf3) = janitor.unionize_dataframe_categories(uniontest_df1, uniontest_df2, uniontest_df3, column_names='fruits')
assert (set(udf1['fruits'].dtype.categories) == set(udf2['fruits'].dtype.categories))
assert (set(udf1['fruits'].dtype.categories) == set(udf3['fruits'].dtype.categories))
assert (not (set(udf1['jerbs'].dtype.categories) == set(udf2['jerbs'].dtype.categories))) |
def unet_resnext_50_lovasz(input_shape, freeze_encoder):
(resnet_base, hyper_list) = Unet(backbone_name='resnext50', input_shape=input_shape, input_tensor=None, encoder_weights='imagenet', freeze_encoder=freeze_encoder, skip_connections='default', decoder_block_type='transpose', decoder_filters=(128, 64, 32, 16, 8), decoder_use_batchnorm=True, n_upsample_blocks=5, upsample_rates=(2, 2, 2, 2, 2), classes=1, activation='sigmoid')
x = SpatialDropout2D(0.2)(resnet_base.output)
x = Conv2D(1, (1, 1), name='prediction')(x)
model = Model(resnet_base.input, x)
return model |
class Migration(migrations.Migration):
dependencies = [('options', '0012_meta')]
operations = [migrations.AlterModelOptions(name='option', options={'ordering': ('optionset__order', 'optionset__key', 'order', 'key'), 'permissions': (('view_option', 'Can view Option'),), 'verbose_name': 'Option', 'verbose_name_plural': 'Options'})] |
class TestGetOrganization(ApiTestCase):
def test_unknownorg(self):
self.login(ADMIN_ACCESS_USER)
self.getResponse(Organization, params=dict(orgname='notvalid'), expected_code=404)
def test_cannotaccess(self):
self.login(NO_ACCESS_USER)
self.getResponse(Organization, params=dict(orgname=ORGANIZATION), expected_code=200)
def test_getorganization(self):
self.login(READ_ACCESS_USER)
json = self.getJsonResponse(Organization, params=dict(orgname=ORGANIZATION))
self.assertEqual(ORGANIZATION, json['name'])
self.assertEqual(False, json['is_admin'])
def test_getorganization_asadmin(self):
self.login(ADMIN_ACCESS_USER)
json = self.getJsonResponse(Organization, params=dict(orgname=ORGANIZATION))
self.assertEqual(ORGANIZATION, json['name'])
self.assertEqual(True, json['is_admin']) |
class _composite_rays(Function):
_fwd(cast_inputs=torch.float32)
def forward(ctx, n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, deltas, weights_sum, depth, image, T_thresh=0.01):
_backend.composite_rays(n_alive, n_step, T_thresh, rays_alive, rays_t, sigmas, rgbs, deltas, weights_sum, depth, image)
return tuple() |
class VcfSpeedSuite():
def setup(self) -> None:
asv_env_dir = os.environ['ASV_ENV_DIR']
path = Path(asv_env_dir, 'project/sgkit/tests/io/vcf/data/1000G.phase3.broad.withGenotypes.chr20..vcf.gz')
tmp_path = Path(tempfile.mkdtemp())
self.input_vcf = tmp_path.joinpath('1000G.in.vcf').as_posix()
self.input_zarr = tmp_path.joinpath('1000G.in.zarr').as_posix()
self.output_zarr = tmp_path.joinpath('1000G.out.zarr').as_posix()
self.output_vcf = tmp_path.joinpath('1000G.out.vcf').as_posix()
_gunzip(path, self.input_vcf)
self.field_defs = {'FORMAT/AD': {'Number': 'R'}}
vcf_to_zarr(self.input_vcf, self.input_zarr, fields=['INFO/*', 'FORMAT/*'], field_defs=self.field_defs, chunk_length=1000, target_part_size=None)
def track_vcf_to_zarr_speed(self) -> None:
duration = _time_func(vcf_to_zarr, self.input_vcf, self.output_zarr, fields=['INFO/*', 'FORMAT/*'], field_defs=self.field_defs, chunk_length=1000, target_part_size=None)
return _to_mb_per_s(os.path.getsize(self.input_vcf), duration)
def track_zarr_to_vcf_speed(self) -> None:
for _ in range(2):
duration = _time_func(zarr_to_vcf, self.input_zarr, self.output_vcf)
return _to_mb_per_s(os.path.getsize(self.output_vcf), duration) |
class PoseHighResolutionNet(nn.Module):
def __init__(self, cfg, **kwargs):
self.inplanes = 64
extra = cfg['MODEL']['EXTRA']
self.cfg = cfg
super(PoseHighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 4)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=False)
self.is_prm = False
if (('prm' in kwargs) and (kwargs['prm'] == True)):
self.prm = PRM(pre_stage_channels[0])
self.is_prm = True
self.final_layer = nn.Conv2d(in_channels=pre_stage_channels[0], out_channels=cfg['MODEL']['NUM_JOINTS'], kernel_size=extra['FINAL_CONV_KERNEL'], stride=1, padding=(1 if (extra['FINAL_CONV_KERNEL'] == 3) else 0))
self.pretrained_layers = extra['PRETRAINED_LAYERS']
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i]), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), nn.BatchNorm2d(outchannels), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
x = y_list[0]
if self.is_prm:
x = self.prm(x)
x = self.final_layer(x)
if (self.cfg.MODEL.NAME in ['pose_hrnet', 'pose_multi']):
return x
else:
return (y_list[0], x)
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
for (name, _) in m.named_parameters():
if (name in ['bias']):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for (name, _) in m.named_parameters():
if (name in ['bias']):
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained, map_location='cpu')
logger.info('=> loading pretrained model {}'.format(pretrained))
need_init_state_dict = {}
for (name, m) in pretrained_state_dict.items():
if ((name.split('.')[0] in self.pretrained_layers) or (self.pretrained_layers[0] is '*')):
need_init_state_dict[name] = m
self.load_state_dict(need_init_state_dict, strict=False)
elif pretrained:
logger.error('=> please download pre-trained models first!')
raise ValueError('{} is not exist!'.format(pretrained)) |
def download_file_from_google_drive(id, destination):
URL = '
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
return True |
.parametrize('size1, size2, axis, concatenate', [((5,), (3,), 0, True), ((5,), (3,), (- 1), True), ((5, 2), (3, 2), 0, True), ((2, 5), (2, 3), 1, True), ((2, 5), (2, 5), 0, False), ((2, 5), (2, 5), 1, False), ((2, 5), (2, 5), 2, False)])
def test_measurable_join_univariate(size1, size2, axis, concatenate):
base1_rv = pt.random.normal(size=size1, name='base1')
base2_rv = pt.random.exponential(size=size2, name='base2')
if concatenate:
y_rv = pt.concatenate((base1_rv, base2_rv), axis=axis)
else:
y_rv = pt.stack((base1_rv, base2_rv), axis=axis)
y_rv.name = 'y'
base1_vv = base1_rv.clone()
base2_vv = base2_rv.clone()
y_vv = y_rv.clone()
base_logps = list(conditional_logp({base1_rv: base1_vv, base2_rv: base2_vv}).values())
if concatenate:
base_logps = pt.concatenate(base_logps, axis=axis)
else:
base_logps = pt.stack(base_logps, axis=axis)
y_logp = logp(y_rv, y_vv)
assert_no_rvs(y_logp)
base1_testval = base1_rv.eval()
base2_testval = base2_rv.eval()
if concatenate:
y_testval = np.concatenate((base1_testval, base2_testval), axis=axis)
else:
y_testval = np.stack((base1_testval, base2_testval), axis=axis)
np.testing.assert_allclose(base_logps.eval({base1_vv: base1_testval, base2_vv: base2_testval}), y_logp.eval({y_vv: y_testval})) |
class OctaveMatrixGenerator(MatrixGenerator):
_idx_start = 1
_idx_delim = '()'
_base_printer = OctaveCodePrinter
_type_declar = ''
_line_contin = ' ...'
_comment_char = '%'
_m_template = 'function [{output_args}] = {prefix}({input_args})\n% function [{output_args}] = {prefix}({input_args})\n%\n{docstring}\n\n{subexprs}\n\n{outputs}\n\nend\n'
def doprint(self, prefix='eval_mats'):
self.code_blocks['prefix'] = prefix
return self._m_template.format(**self.code_blocks)
def write(self, prefix='eval_mats', path=None):
if (path is None):
path = os.getcwd()
text = self.doprint(prefix=prefix)
with open(os.path.join(path, (prefix + '.m')), 'w') as f:
f.write(text) |
class ReduceLRWDOnPlateau(ReduceLROnPlateau):
def epoch_step(self, metrics, epoch):
current = metrics
if (current is None):
warnings.warn('Learning Rate Plateau Reducing requires metrics available!', RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif (not self.in_cooldown()):
if (self.wait >= self.patience):
for param_group in self.optimizer.param_groups:
old_lr = float(param_group['lr'])
if (old_lr > (self.min_lr + self.eps)):
new_lr = (old_lr * self.factor)
new_lr = max(new_lr, self.min_lr)
param_group['lr'] = new_lr
if (self.verbose > 0):
print(('\nEpoch %d: reducing learning rate to %s.' % (epoch, new_lr)))
if (param_group['weight_decay'] != 0):
old_weight_decay = float(param_group['weight_decay'])
new_weight_decay = max((old_weight_decay * self.factor), self.min_lr)
if (old_weight_decay > (new_weight_decay + self.eps)):
param_group['weight_decay'] = new_weight_decay
if self.verbose:
print('\nEpoch {epoch}: reducing weight decay factor of group {i} to {new_weight_decay:.4e}.')
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1 |
def test_default_image_optimizer():
torch.manual_seed(0)
image = torch.rand(1, 3, 128, 128)
optimizer = optim.default_image_optimizer(image)
assert isinstance(optimizer, torch.optim.Optimizer)
actual = optimizer.param_groups[0]['params'][0]
desired = image
ptu.assert_allclose(actual, desired) |
def parse_args():
parser = argparse.ArgumentParser(description='AB3DMOT')
parser.add_argument('--dataset', type=str, default='nuScenes', help='KITTI, nuScenes')
parser.add_argument('--split', type=str, default='val', help='train, val, test')
parser.add_argument('--det_name', type=str, default='centerpoint', help='name of the detection method')
args = parser.parse_args()
return args |
def _validate_pickup_pool_size(item_pool: list[PickupEntry], game: GameDescription, configuration: BaseConfiguration) -> None:
min_starting_pickups = configuration.standard_pickup_configuration.minimum_random_starting_pickups
if (len(item_pool) > (game.region_list.num_pickup_nodes + min_starting_pickups)):
raise InvalidConfiguration('Item pool has {} items, which is more than {} (game) + {} (minimum starting items)'.format(len(item_pool), game.region_list.num_pickup_nodes, min_starting_pickups))
max_starting_pickups = configuration.standard_pickup_configuration.maximum_random_starting_pickups
if (min_starting_pickups > max_starting_pickups):
raise InvalidConfiguration(f'Preset has {min_starting_pickups} minimum starting items, which is more than the maximum of {max_starting_pickups}.') |
def can_symlink(local_resource_dir: Path) -> bool:
if (not WINDOWS):
return True
if (local_resource_dir not in _can_symlink_cache):
with TemporaryDirectory(dir=local_resource_dir) as d:
p = Path(d)
target = (p / 'a')
target.touch()
lnk = (p / 'b')
try:
lnk.symlink_to(target)
_can_symlink_cache[local_resource_dir] = True
except (OSError, NotImplementedError):
_can_symlink_cache[local_resource_dir] = False
return _can_symlink_cache[local_resource_dir] |
class YosysBehavioralTranslatorL2(YosysBehavioralTranslatorL1, VBehavioralTranslatorL2):
def _get_rtlir2v_visitor(s):
return YosysBehavioralRTLIRToVVisitorL2
def rtlir_tr_behavioral_tmpvars(s, tmpvars):
_tmpvars = []
for tmpvar in tmpvars:
_tmpvars += tmpvar
make_indent(_tmpvars, 1)
return '\n'.join(_tmpvars) |
class AndRequestChecker(RequestChecker):
def __init__(self, request_checkers: Iterable[RequestChecker]):
self._request_checkers = request_checkers
def check_request(self, mediator: DirectMediator, request: Request) -> None:
for checker in self._request_checkers:
checker.check_request(mediator, request) |
class LearningSchedulesTest(tf.test.TestCase):
def testExponentialDecayWithBurnin(self):
global_step = tf.placeholder(tf.int32, [])
learning_rate_base = 1.0
learning_rate_decay_steps = 3
learning_rate_decay_factor = 0.1
burnin_learning_rate = 0.5
burnin_steps = 2
exp_rates = [0.5, 0.5, 1, 0.1, 0.1, 0.1, 0.01, 0.01]
learning_rate = learning_schedules.exponential_decay_with_burnin(global_step, learning_rate_base, learning_rate_decay_steps, learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
with self.test_session() as sess:
output_rates = []
for input_global_step in range(8):
output_rate = sess.run(learning_rate, feed_dict={global_step: input_global_step})
output_rates.append(output_rate)
self.assertAllClose(output_rates, exp_rates)
def testManualStepping(self):
global_step = tf.placeholder(tf.int64, [])
boundaries = [2, 3, 7]
rates = [1.0, 2.0, 3.0, 4.0]
exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
learning_rate = learning_schedules.manual_stepping(global_step, boundaries, rates)
with self.test_session() as sess:
output_rates = []
for input_global_step in range(10):
output_rate = sess.run(learning_rate, feed_dict={global_step: input_global_step})
output_rates.append(output_rate)
self.assertAllClose(output_rates, exp_rates) |
def test_update_questionset_error_section(db):
questionset = QuestionSet.objects.exclude(pages=None).first()
page = questionset.pages.first()
section = page.sections.first()
section.locked = True
section.save()
question = Question.objects.exclude(questionsets=questionset).first()
with pytest.raises(ValidationError):
QuestionLockedValidator(question)({'questionsets': [questionset], 'locked': False}) |
def build_dataset():
noise_label_path = os.path.join('noisy_labels', args.noise_label_file)
noise_y = np.load(noise_label_path)
print('Load noisy label from {}'.format(noise_label_path))
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
if (args.dataset == 'cifar10'):
trainset = CIFAR10(root='data', split='train', train_ratio=0.9, trust_ratio=0, download=True, transform=transform_train)
valset = CIFAR10(root='data', split='val', train_ratio=0.9, trust_ratio=0, download=True, transform=transform_test)
testset = CIFAR10(root='data', split='test', download=True, transform=transform_test)
num_class = 10
elif (args.dataset == 'cifar100'):
trainset = CIFAR100(root='data', split='train', train_ratio=0.9, trust_ratio=0, download=True, transform=transform_train)
valset = CIFAR100(root='data', split='val', train_ratio=0.9, trust_ratio=0, download=True, transform=transform_test)
testset = CIFAR100(root='data', split='test', download=True, transform=transform_test)
num_class = 100
else:
raise ValueError('Dataset should be cifar10 or cifar100.')
print('train data size:', len(trainset))
print('validation data size:', len(valset))
print('test data size:', len(testset))
num_noise_class = len(np.unique(noise_y))
assert (num_noise_class == num_class)
assert (len(noise_y) == len(trainset))
gt_clean_y = deepcopy(trainset.get_data_labels())
y_train = noise_y.copy()
noise_y_train = None
p = None
if (args.corruption_type == 'unif'):
(noise_y_train, p, _) = noisify_with_P(y_train, nb_classes=num_class, noise=args.corruption_prob, random_state=args.seed)
trainset.update_corrupted_label(noise_y_train)
print('apply uniform noise')
else:
if (args.dataset == 'cifar10'):
(noise_y_train, p, _) = noisify_cifar10_asymmetric(y_train, noise=args.corruption_prob, random_state=args.seed)
elif (args.dataset == 'cifar100'):
(noise_y_train, p, _) = noisify_cifar100_asymmetric(y_train, noise=args.corruption_prob, random_state=args.seed)
else:
raise ValueError('Dataset should be cifar10 or cifar100.')
trainset.update_corrupted_label(noise_y_train)
print('apply asymmetric noise')
print('probability transition matrix:\n{}\n'.format(p))
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.prefetch, pin_memory=True)
train_meta_loader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size, shuffle=True, num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.prefetch, pin_memory=True)
return (train_loader, train_meta_loader, test_loader, gt_clean_y, np.asarray(trainset.targets)) |
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 2), kernel_size=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
.parametrize('text,result', [('1', PEP440Version(release=Release.from_parts(1))), ('1.2.3', PEP440Version(release=Release.from_parts(1, 2, 3))), ('1.2.3-1', PEP440Version(release=Release.from_parts(1, 2, 3), post=ReleaseTag('post', 1))), ('1.2.3.dev1', PEP440Version(release=Release.from_parts(1, 2, 3), dev=ReleaseTag('dev', 1))), ('1.2.3-1.dev1', PEP440Version(release=Release.from_parts(1, 2, 3), post=ReleaseTag('post', 1), dev=ReleaseTag('dev', 1))), ('1.2.3+local', PEP440Version(release=Release.from_parts(1, 2, 3), local='local')), ('1.2.3+local.1', PEP440Version(release=Release.from_parts(1, 2, 3), local=('local', 1))), ('1.2.3+local1', PEP440Version(release=Release.from_parts(1, 2, 3), local='local1')), ('1.2.3+1', PEP440Version(release=Release.from_parts(1, 2, 3), local=1)), ('1.2.3a1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('alpha', 1))), ('1.2.3.a1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('alpha', 1))), ('1.2.3alpha1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('alpha', 1))), ('1.2.3b1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('beta', 1))), ('1.2.3.b1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('beta', 1))), ('1.2.3beta1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('beta', 1))), ('1.2.3rc1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('rc', 1))), ('1.2.3.rc1', PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag('rc', 1))), ('2.2.0dev0+build.', PEP440Version(release=Release.from_parts(2, 2, 0), dev=ReleaseTag('dev', 0), local=('build', '')))])
def test_pep440_parse_text(text: str, result: PEP440Version) -> None:
assert (PEP440Version.parse(text) == result) |
class TestEntryPoints(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
self.ep = importlib_metadata.EntryPoint(name='name', value='value', group='group')
def test_entry_point_pickleable(self):
revived = pickle.loads(pickle.dumps(self.ep))
assert (revived == self.ep)
def test_positional_args(self):
EntryPoint('name', 'value', 'group')
def test_immutable(self):
with self.assertRaises(AttributeError):
self.ep.name = 'badactor'
def test_repr(self):
assert ('EntryPoint' in repr(self.ep))
assert ('name=' in repr(self.ep))
assert ("'name'" in repr(self.ep))
def test_hashable(self):
hash(self.ep)
def test_module(self):
assert (self.ep.module == 'value')
def test_attr(self):
assert (self.ep.attr is None)
def test_sortable(self):
sorted([EntryPoint(name='b', value='val', group='group'), EntryPoint(name='a', value='val', group='group')]) |
def _check_chain(r, chain):
chain = list(reversed(chain))
while chain:
elem = chain.pop()
if (elem is None):
if (r.owner is not None):
return False
elif (r.owner is None):
return False
elif isinstance(elem, Op):
if (r.owner.op != elem):
return False
else:
try:
if (issubclass(elem, Op) and (not isinstance(r.owner.op, elem))):
return False
except TypeError:
return False
if chain:
r = r.owner.inputs[chain.pop()]
return (r is not None) |
class _PreparedIterableCursor():
def __init__(self, prepared, params, kwargs):
self._prepared = prepared
self._params = params
self._kwargs = kwargs
def __aiter__(self):
return getattr(self._prepared, '_get_iterator')(*self._params, **self._kwargs)
def __await__(self):
return getattr(self._prepared, '_get_cursor')(*self._params, **self._kwargs).__await__() |
def test_vector_arg_types(v2: wp.vec2, v3: wp.vec3, v4: wp.vec4, m22: wp.mat22, m33: wp.mat33, m44: wp.mat44):
wp.expect_eq(v2, wp.vec2(1.0, 2.0))
wp.expect_eq(v3, wp.vec3(1.0, 2.0, 3.0))
wp.expect_eq(v4, wp.vec4(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(m22, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(m33, wp.mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0))
wp.expect_eq(m44, wp.mat44(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.