code stringlengths 281 23.7M |
|---|
class DCGANGenerator(object):
def __init__(self, hidden_dim=128, batch_size=64, hidden_activation=tf.nn.relu, output_activation=tf.nn.tanh, use_batch_norm=True, z_distribution='normal', scope='generator', **kwargs):
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.use_batch_norm = use_batch_norm
self.z_distribution = z_distribution
self.scope = scope
def __call__(self, z, is_training=True, **kwargs):
with tf.variable_scope(self.scope):
if self.use_batch_norm:
l0 = self.hidden_activation(batch_norm(linear(z, ((4 * 4) * 512), name='l0', stddev=0.02), name='bn0', is_training=is_training))
l0 = tf.reshape(l0, [self.batch_size, 4, 4, 512])
dc1 = self.hidden_activation(batch_norm(deconv2d(l0, [self.batch_size, 8, 8, 256], name='dc1', stddev=0.02), name='bn1', is_training=is_training))
dc2 = self.hidden_activation(batch_norm(deconv2d(dc1, [self.batch_size, 16, 16, 128], name='dc2', stddev=0.02), name='bn2', is_training=is_training))
dc3 = self.hidden_activation(batch_norm(deconv2d(dc2, [self.batch_size, 32, 32, 64], name='dc3', stddev=0.02), name='bn3', is_training=is_training))
dc4 = self.output_activation(deconv2d(dc3, [self.batch_size, 32, 32, 3], 3, 3, 1, 1, name='dc4', stddev=0.02))
else:
l0 = self.hidden_activation(linear(z, ((4 * 4) * 512), name='l0', stddev=0.02))
l0 = tf.reshape(l0, [self.batch_size, 4, 4, 512])
dc1 = self.hidden_activation(deconv2d(l0, [self.batch_size, 8, 8, 256], name='dc1', stddev=0.02))
dc2 = self.hidden_activation(deconv2d(dc1, [self.batch_size, 16, 16, 128], name='dc2', stddev=0.02))
dc3 = self.hidden_activation(deconv2d(dc2, [self.batch_size, 32, 32, 64], name='dc3', stddev=0.02))
dc4 = self.output_activation(deconv2d(dc3, [self.batch_size, 32, 32, 3], 3, 3, 1, 1, name='dc4', stddev=0.02))
x = dc4
return x
def generate_noise(self):
if (self.z_distribution == 'normal'):
return np.random.randn(self.batch_size, self.hidden_dim).astype(np.float32)
elif (self.z_distribution == 'uniform'):
return np.random.uniform((- 1), 1, (self.batch_size, self.hidden_dim)).astype(np.float32)
else:
raise NotImplementedError |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_str_false(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'run': 'False'})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call('step1 not running because run is False.')
mock_invoke_step.assert_not_called()
assert (len(context) == original_len) |
class TDictMixin(TestCase):
def setUp(self):
self.fdict = FDict()
self.rdict = {}
self.fdict['foo'] = self.rdict['foo'] = 'bar'
def test_getsetitem(self):
self.failUnlessEqual(self.fdict['foo'], 'bar')
self.failUnlessRaises(KeyError, self.fdict.__getitem__, 'bar')
def test_has_key_contains(self):
self.failUnless(('foo' in self.fdict))
self.failIf(('bar' in self.fdict))
def test_iter(self):
self.failUnlessEqual(list(iter(self.fdict)), ['foo'])
def test_clear(self):
self.fdict.clear()
self.rdict.clear()
self.failIf(self.fdict)
def test_keys(self):
self.failUnlessEqual(list(self.fdict.keys()), list(self.rdict.keys()))
self.failUnlessEqual(list(self.fdict.keys()), list(self.rdict.keys()))
def test_values(self):
self.failUnlessEqual(list(self.fdict.values()), list(self.rdict.values()))
self.failUnlessEqual(list(self.fdict.values()), list(self.rdict.values()))
def test_items(self):
self.failUnlessEqual(list(self.fdict.items()), list(self.rdict.items()))
self.failUnlessEqual(list(self.fdict.items()), list(self.rdict.items()))
def test_pop(self):
self.failUnlessEqual(self.fdict.pop('foo'), self.rdict.pop('foo'))
self.failUnlessRaises(KeyError, self.fdict.pop, 'woo')
def test_pop_bad(self):
self.failUnlessRaises(TypeError, self.fdict.pop, 'foo', 1, 2)
def test_popitem(self):
self.failUnlessEqual(self.fdict.popitem(), self.rdict.popitem())
self.failUnlessRaises(KeyError, self.fdict.popitem)
def test_update_other(self):
other = {'a': 1, 'b': 2}
self.fdict.update(other)
self.rdict.update(other)
def test_update_other_is_list(self):
other = [('a', 1), ('b', 2)]
self.fdict.update(other)
self.rdict.update(dict(other))
def test_update_kwargs(self):
self.fdict.update(a=1, b=2)
other = {'a': 1, 'b': 2}
self.rdict.update(other)
def test_setdefault(self):
self.fdict.setdefault('foo', 'baz')
self.rdict.setdefault('foo', 'baz')
self.fdict.setdefault('bar', 'baz')
self.rdict.setdefault('bar', 'baz')
def test_get(self):
self.failUnlessEqual(self.rdict.get('a'), self.fdict.get('a'))
self.failUnlessEqual(self.rdict.get('a', 'b'), self.fdict.get('a', 'b'))
self.failUnlessEqual(self.rdict.get('foo'), self.fdict.get('foo'))
def test_repr(self):
self.failUnlessEqual(repr(self.rdict), repr(self.fdict))
def test_len(self):
self.failUnlessEqual(len(self.rdict), len(self.fdict))
def tearDown(self):
self.failUnlessEqual(self.fdict, self.rdict)
self.failUnlessEqual(self.rdict, self.fdict) |
class CustomCallback(TrainerCallback):
def __init__(self, trainer) -> None:
super().__init__()
self._trainer = trainer
def on_epoch_end(self, args, state, control, **kwargs):
if control.should_evaluate:
control_copy = deepcopy(control)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix='train')
return control_copy |
class OrgProfileViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def test_OrgProfileViewOk(self):
response = self.client.get(reverse('org_profile', args=['rap']))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/org_profile.html')
def test_OrgProfileViewKo(self):
response = self.client.get(reverse('org_profile', args=['not_existing_org']))
self.assertEqual(response.status_code, 404) |
def spectral_response_all_junctions(solar_cell, incident_light=None, energy=None, V=0, verbose=False):
science_reference('Nelson pin spectral response', 'Jenny: (Nelson. The Physics of Solar Cells. Imperial College Press (2003))')
if (energy is None):
if (incident_light is not None):
energy = incident_light[0]
bs = np.copy(incident_light[1])
else:
energy = siUnits(np.linspace(0.5, 3.5, 450), 'eV')
bs = np.ones_like(energy)
elif (incident_light is not None):
bs = np.interp(energy, incident_light[0], incident_light[1])
else:
bs = np.ones_like(energy)
bs_initial = np.copy(bs)
if hasattr(solar_cell, 'shading'):
bs *= (1 - solar_cell.shading)
if (hasattr(solar_cell, 'reflectivity') and (solar_cell.reflectivity is not None)):
ref = solar_cell.reflectivity(energy)
bs *= (1 - ref)
reflected = (ref * bs_initial)
else:
reflected = np.zeros_like(bs)
qe_result = []
passive_loss = np.ones_like(bs)
for (layer_index, layer_object) in enumerate(solar_cell):
if (type(layer_object) is Layer):
bs = (bs * np.exp(((- layer_object.material.alphaE(energy)) * layer_object.width)))
passive_loss *= np.exp(((- layer_object.material.alphaE(energy)) * layer_object.width))
elif (type(layer_object) is Junction):
idx = 0
for junction_layer_object in layer_object:
if (junction_layer_object.role != 'emitter'):
bs = (bs * np.exp(((- junction_layer_object.material.alphaE(energy)) * junction_layer_object.width)))
passive_loss *= np.exp(((- junction_layer_object.material.alphaE(energy)) * junction_layer_object.width))
idx += 1
else:
break
output = calculate_junction_sr(layer_object, energy, bs, bs_initial, V, printParameters=verbose)
qe_result.append(output)
for junction_layer_object in layer_object[idx:]:
bs *= np.exp(((- junction_layer_object.material.alphaE(energy)) * junction_layer_object.width))
else:
raise ValueError('Strange layer-like object discovered in structure stack: {}'.format(type(layer_object)))
return {'junctions': qe_result, 'transmitted': bs, 'transmitted_fraction': (bs / bs_initial), 'passive_loss': (1 - passive_loss), 'reflected': reflected, 'reflected_fraction': (reflected / bs_initial), 'e': energy} |
def _wraps(orig, glmfunc=None):
if (glmfunc is None):
glmfunc = orig
def decorator(func):
if ('PYUNITY_SPHINX_CHECK' in os.environ):
return func
if isinstance(orig, str):
if GLM_SUPPORT:
return getattr(glm, glmfunc)
else:
return getattr(math, orig)
return orig
return decorator |
def getDroneMult(drone, src, tgt, atkSpeed, atkAngle, distance, tgtSpeed, tgtAngle, tgtSigRadius):
if ((distance is not None) and (((not GraphSettings.getInstance().get('ignoreDCR')) and (distance > src.item.extraAttributes['droneControlRange'])) or ((not GraphSettings.getInstance().get('ignoreLockRange')) and (distance > src.item.maxTargetRange)))):
return 0
droneSpeed = drone.getModifiedItemAttr('maxVelocity')
droneOpt = GraphSettings.getInstance().get('mobileDroneMode')
if ((droneSpeed > 1) and (((droneOpt == GraphDpsDroneMode.auto) and (droneSpeed >= tgtSpeed)) or (droneOpt == GraphDpsDroneMode.followTarget))):
cth = 1
else:
droneRadius = drone.getModifiedItemAttr('radius')
if (distance is None):
cthDistance = None
else:
cthDistance = ((distance + src.getRadius()) - droneRadius)
cth = _calcTurretChanceToHit(atkSpeed=min(atkSpeed, droneSpeed), atkAngle=atkAngle, atkRadius=droneRadius, atkOptimalRange=(drone.maxRange or 0), atkFalloffRange=(drone.falloff or 0), atkTracking=drone.getModifiedItemAttr('trackingSpeed'), atkOptimalSigRadius=drone.getModifiedItemAttr('optimalSigRadius'), distance=cthDistance, tgtSpeed=tgtSpeed, tgtAngle=tgtAngle, tgtRadius=tgt.getRadius(), tgtSigRadius=tgtSigRadius)
mult = _calcTurretMult(cth)
return mult |
class JointLoss(nn.Module):
def __init__(self, args: Namespace, device: torch.device, criterion, size_average: bool=True, weights: dict=None, denomitor: float=1e-08):
super(JointLoss, self).__init__()
self.args = args
self.device = device
self.cross_entropy = criterion['ce']
self.ce_tagger = criterion['ce_tag']
self.ce_operate = criterion['ce_ops']
self.nll_loss = criterion['nll']
self.average = size_average
self.max_gen = args.max_generate
self.weights = weights
self.softmax = nn.LogSoftmax(dim=(- 1))
self.denomitor = denomitor
try:
self.gamma = args.swloss_gamma
except:
self.gamma = 0.01
def pointer_loss(self, logits: torch.Tensor, gts: torch.Tensor, masks: torch.Tensor=None) -> torch.Tensor:
label_loss = self.cross_entropy(logits, gts)
if (masks is not None):
mask_logits = (softmax_logits(logits) * masks)
order_logits = torch.cat([torch.diag_embed(torch.diag(mask_logits[ins], (- 1)), offset=(- 1)).unsqueeze(0) for ins in range(mask_logits.shape[0])], dim=0)
irorder_logits = (mask_logits - order_logits)
order_loss = (torch.sum(torch.exp(irorder_logits), dim=[1, 2]) / (torch.sum(torch.exp(order_logits), dim=[1, 2]) + self.denomitor))
if self.average:
order_loss = torch.mean(order_loss)
else:
order_loss = torch.sum(order_loss)
combine_loss = (label_loss + order_loss)
else:
combine_loss = label_loss
return combine_loss
def tagger_loss(self, tagger_preds: list, tagger_truth: list) -> torch.Tensor:
(tagger_logits, ins_logits, mod_logits) = tagger_preds
(tagger_gts, ins_gts, mod_gts) = tagger_truth
tagger_logits = tagger_logits.permute(0, 2, 1)
insert_logits = ins_logits.permute(0, 2, 1)
modify_logits = mod_logits.permute(0, 2, 1)
tagger_loss = self.ce_tagger(tagger_logits, tagger_gts)
tag_combine_loss = tagger_loss
if (torch.max(ins_gts) > 0):
insert_loss = self.ce_operate(insert_logits, ins_gts)
tag_combine_loss += insert_loss
if (torch.max(mod_gts) > 0):
modify_loss = self.ce_operate(modify_logits, mod_gts)
tag_combine_loss += modify_loss
return tag_combine_loss
def forward(self, preds: dict, gts: dict, switch_mask: torch.Tensor=None, need_info: bool=False):
(binary_preds, binary_gts) = (preds['binary'], gts['binary'])
binary_loss = self.cross_entropy(binary_preds, binary_gts)
(logits_iwo, logits_ip, logits_sc, logits_ill, logits_cm, logits_cr, logits_um) = preds['type']
(gts_iwo, gts_ip, gts_sc, gts_ill, gts_cm, gts_cr, gts_um) = gts['type'].T
type_loss = ((((((self.cross_entropy(logits_iwo, gts_iwo) + self.cross_entropy(logits_ip, gts_ip)) + self.cross_entropy(logits_sc, gts_sc)) + self.cross_entropy(logits_cm, gts_cm)) + self.cross_entropy(logits_ill, gts_ill)) + self.cross_entropy(logits_cr, gts_cr)) + self.cross_entropy(logits_um, gts_um))
(switch_preds, switch_gts) = (preds['switch'], gts['switch'])
pointer_loss = self.pointer_loss(switch_preds, switch_gts, None)
(tagger_preds, tagger_gts) = (preds['tagger'], gts['tagger'])
tagger_loss = (self.gamma * self.tagger_loss(tagger_preds, tagger_gts))
(mlm_logits, mlm_tgts) = (preds['generate'], gts['generate'])
output_mlm = self.softmax(mlm_logits)
loss_mlm = self.nll_loss(output_mlm, mlm_tgts)
total_loss = (((binary_loss + type_loss) + pointer_loss) + tagger_loss)
if (not torch.isnan(loss_mlm)):
total_loss = (total_loss + loss_mlm)
else:
loss_mlm = torch.zeros(loss_mlm.shape, dtype=loss_mlm.dtype, device=loss_mlm.device)
if need_info:
loss_info = {'binary': binary_loss.item(), 'type': type_loss.item(), 'switch': pointer_loss.item(), 'tagger': tagger_loss.item(), 'generate': loss_mlm.item()}
return (total_loss, loss_info)
else:
return total_loss |
class GlobalAttention(nn.Module):
def __init__(self, dim, coverage=False, attn_type='dot', attn_func='softmax'):
super(GlobalAttention, self).__init__()
self.dim = dim
assert (attn_type in ['dot', 'general', 'mlp']), 'Please select a valid attention type (got {:s}).'.format(attn_type)
self.attn_type = attn_type
assert (attn_func in ['softmax', 'sparsemax']), 'Please select a valid attention function.'
self.attn_func = attn_func
if (self.attn_type == 'general'):
self.linear_in = nn.Linear(dim, dim, bias=False)
elif (self.attn_type == 'mlp'):
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
out_bias = (self.attn_type == 'mlp')
self.linear_out = nn.Linear((dim * 2), dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
(src_batch, src_len, src_dim) = h_s.size()
(tgt_batch, tgt_len, tgt_dim) = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if (self.attn_type in ['general', 'dot']):
if (self.attn_type == 'general'):
h_t_ = h_t.view((tgt_batch * tgt_len), tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view((- 1), dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view((- 1), dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh((wq + uh))
return self.v(wquh.view((- 1), dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
if (source.dim() == 2):
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
(batch, source_l, dim) = memory_bank.size()
(batch_, target_l, dim_) = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if (coverage is not None):
(batch_, source_l_) = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if (coverage is not None):
cover = coverage.view((- 1)).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
align = self.score(source, memory_bank)
if (memory_lengths is not None):
mask = sequence_mask(memory_lengths, max_len=align.size((- 1)))
mask = mask.unsqueeze(1)
align.masked_fill_((~ mask), (- float('inf')))
if (self.attn_func == 'softmax'):
align_vectors = F.softmax(align.view((batch * target_l), source_l), (- 1))
else:
align_vectors = sparsemax(align.view((batch * target_l), source_l), (- 1))
align_vectors = align_vectors.view(batch, target_l, source_l)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view((batch * target_l), (dim * 2))
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if (self.attn_type in ['general', 'dot']):
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
(batch_, dim_) = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
(batch_, source_l_) = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
(target_l_, batch_, dim_) = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
(target_l_, batch_, source_l_) = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return (attn_h, align_vectors) |
class DirLayout():
LAYOUT = ['folder0/file00', 'folder0/file01', 'folder1/folder10/file100', 'folder1/file10', 'folder1/file11', 'file0', 'file1']
def layout_folders(cls):
folders = set()
for path in cls.LAYOUT:
parts = path.split('/')
if (len(parts) > 1):
folders.add(parts[0])
folders = list(folders)
folders.sort()
return folders
def get_folder_content(cls, name):
folders = set()
files = set()
for path in cls.LAYOUT:
if (not path.startswith((name + '/'))):
continue
parts = path.split('/')
if (len(parts) == 2):
files.add(parts[1])
else:
folders.add(parts[1])
folders = list(folders)
folders.sort()
files = list(files)
files.sort()
return (folders, files)
def __init__(self, factory):
self._factory = factory
self.base = factory.getbasetemp()
self.layout = factory.mktemp('layout')
self._mklayout()
def _mklayout(self):
for filename in self.LAYOUT:
path = (self.layout / filename)
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
def file_url(self):
return urlutils.file_url(str(self.layout))
def path(self, *parts):
return self.layout.joinpath(*parts)
def base_path(self):
return self.base |
def random_lil(shape, dtype, nnz):
sp = pytest.importorskip('scipy')
rval = sp.sparse.lil_matrix(shape, dtype=dtype)
huge = (2 ** 30)
for k in range(nnz):
idx = (np.random.default_rng().integers(1, (huge + 1), size=2) % shape)
value = np.random.random()
if (dtype in integer_dtypes):
value = int((value * 100))
rval.__setitem__(tuple(idx), value)
return rval |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--module', choices=['all', 'base', 'dplyr'], required=True, help='The module to test')
parser.add_argument('--allow-conflict-names', action='store_true', help='Whether to allow conflict names', default=False)
parser.add_argument('--getattr', action='store_true', help='Whether to test datar.all.sum, otherwise test from datar.all import sum.', default=False)
parser.add_argument('--fun', help='The function to test. If _ then sum for all/base, slice for dplyr', choices=['sum', 'filter', '_'], default='_')
parser.add_argument('--error', help='The error to expect')
args = parser.parse_args()
make_test(args.module, args.allow_conflict_names, args.getattr, args.fun, args.error) |
class FakeNotificationAdapter(notification.AbstractNotificationAdapter):
NAME = 'fake'
def __init__(self) -> None:
super().__init__()
self.presented = []
self.id_gen = itertools.count(1)
def present(self, qt_notification: 'QWebEngineNotification', *, replaces_id: Optional[int]) -> int:
self.presented.append(qt_notification)
return next(self.id_gen)
(int)
def on_web_closed(self, notification_id: int) -> None:
raise NotImplementedError |
class TestTag():
def test_expired_with_tag_expired_a_minute_ago(self):
now_ms = get_epoch_timestamp_ms()
one_hour_ago_ms = (now_ms - (3600 * 1000))
one_minute_ago_ms = (now_ms - (60 * 1000))
tag = Tag(name='latest', reversion=False, manifest_digest='abc123', lifetime_start_ts=(one_hour_ago_ms // 1000), lifetime_start_ms=one_hour_ago_ms, lifetime_end_ts=(one_minute_ago_ms // 1000), lifetime_end_ms=one_minute_ago_ms)
assert tag.expired
def test_expired_with_tag_expired_now(self):
now_ms = get_epoch_timestamp_ms()
one_hour_ago_ms = (now_ms - (3600 * 1000))
tag = Tag(name='latest', reversion=False, manifest_digest='abc123', lifetime_start_ts=(one_hour_ago_ms // 1000), lifetime_start_ms=one_hour_ago_ms, lifetime_end_ts=(now_ms // 1000), lifetime_end_ms=now_ms)
assert tag.expired
def test_expired_before_tag_expiration(self):
now_ms = get_epoch_timestamp_ms()
one_hour_ago_ms = (now_ms - (3600 * 1000))
one_hour_from_now_ms = (now_ms + (3600 * 1000))
tag = Tag(name='latest', reversion=False, manifest_digest='abc123', lifetime_start_ts=(one_hour_ago_ms // 1000), lifetime_start_ms=one_hour_ago_ms, lifetime_end_ts=(one_hour_from_now_ms // 1000), lifetime_end_ms=one_hour_from_now_ms)
assert (not tag.expired)
def test_expired_with_tag_lifetime_end_none(self):
now_ms = get_epoch_timestamp_ms()
one_hour_ago_ms = (now_ms - (3600 * 1000))
tag = Tag(name='latest', reversion=False, manifest_digest='abc123', lifetime_start_ts=(one_hour_ago_ms // 1000), lifetime_start_ms=one_hour_ago_ms, lifetime_end_ts=None, lifetime_end_ms=None)
assert (not tag.expired) |
def clip_graphs_to_size(data, size_limit=5000):
if hasattr(data, 'num_nodes'):
N = data.num_nodes
else:
N = data.x.shape[0]
if (N <= size_limit):
return data
else:
logging.info(f' ...clip to {size_limit} a graph of size: {N}')
if hasattr(data, 'edge_attr'):
edge_attr = data.edge_attr
else:
edge_attr = None
(edge_index, edge_attr) = subgraph(list(range(size_limit)), data.edge_index, edge_attr)
if hasattr(data, 'x'):
data.x = data.x[:size_limit]
data.num_nodes = size_limit
else:
data.num_nodes = size_limit
if hasattr(data, 'node_is_attributed'):
data.node_is_attributed = data.node_is_attributed[:size_limit]
data.node_dfs_order = data.node_dfs_order[:size_limit]
data.node_depth = data.node_depth[:size_limit]
data.edge_index = edge_index
if hasattr(data, 'edge_attr'):
data.edge_attr = edge_attr
return data |
def test_newtype_optionals(genconverter):
Foo = NewType('Foo', str)
genconverter.register_unstructure_hook(Foo, (lambda v: v.replace('foo', 'bar')))
class ModelWithFoo():
total_foo: Foo
maybe_foo: Optional[Foo]
assert (genconverter.unstructure(ModelWithFoo(Foo('foo'), Foo('is it a foo?'))) == {'total_foo': 'bar', 'maybe_foo': 'is it a bar?'}) |
class TestHRITDecompress(unittest.TestCase):
def test_xrit_cmd(self):
old_env = os.environ.get('XRIT_DECOMPRESS_PATH', None)
os.environ['XRIT_DECOMPRESS_PATH'] = '/path/to/my/bin'
with pytest.raises(IOError, match='.* does not exist!'):
get_xritdecompress_cmd()
os.environ['XRIT_DECOMPRESS_PATH'] = gettempdir()
with pytest.raises(IOError, match='.* is a directory!.*'):
get_xritdecompress_cmd()
with NamedTemporaryFile() as fd:
os.environ['XRIT_DECOMPRESS_PATH'] = fd.name
fname = fd.name
res = get_xritdecompress_cmd()
if (old_env is not None):
os.environ['XRIT_DECOMPRESS_PATH'] = old_env
else:
os.environ.pop('XRIT_DECOMPRESS_PATH')
assert (fname == res)
def test_xrit_outfile(self):
stdout = [b'Decompressed file: bla.__\n']
outfile = get_xritdecompress_outfile(stdout)
assert (outfile == b'bla.__')
('satpy.readers.hrit_base.Popen')
def test_decompress(self, popen):
popen.return_value.returncode = 0
popen.return_value.communicate.return_value = [b'Decompressed file: bla.__\n']
old_env = os.environ.get('XRIT_DECOMPRESS_PATH', None)
with NamedTemporaryFile() as fd:
os.environ['XRIT_DECOMPRESS_PATH'] = fd.name
res = decompress('bla.C_')
if (old_env is not None):
os.environ['XRIT_DECOMPRESS_PATH'] = old_env
else:
os.environ.pop('XRIT_DECOMPRESS_PATH')
assert (res == os.path.join('.', 'bla.__')) |
def tdm_fmcw_tx():
wavelength = (const.c / .0)
tx_channel_1 = {'location': (0, ((- 4) * wavelength), 0), 'delay': 0}
tx_channel_2 = {'location': (0, 0, 0), 'delay': 0.0001}
return Transmitter(f=[(.0 - .0), (.0 + .0)], t=8e-05, tx_power=20, prp=0.0002, pulses=2, channels=[tx_channel_1, tx_channel_2]) |
def _lambert_conformal_conic__to_cf(conversion):
params = _to_dict(conversion)
if conversion.method_name.lower().endswith('(2sp)'):
return {'grid_mapping_name': 'lambert_conformal_conic', 'standard_parallel': (params['latitude_of_1st_standard_parallel'], params['latitude_of_2nd_standard_parallel']), 'latitude_of_projection_origin': params['latitude_of_false_origin'], 'longitude_of_central_meridian': params['longitude_of_false_origin'], 'false_easting': params['easting_at_false_origin'], 'false_northing': params['northing_at_false_origin']}
return {'grid_mapping_name': 'lambert_conformal_conic', 'standard_parallel': params['latitude_of_natural_origin'], 'longitude_of_central_meridian': params['longitude_of_natural_origin'], 'false_easting': params['false_easting'], 'false_northing': params['false_northing']} |
class DarkBlock(nn.Module):
def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(DarkBlock, self).__init__()
mid_chs = int(round((out_chs * bottle_ratio)))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block)
self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.conv2 = ConvBnAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs)
self.attn = create_attn(attn_layer, channels=out_chs)
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if (self.attn is not None):
x = self.attn(x)
if (self.drop_path is not None):
x = self.drop_path(x)
x = (x + shortcut)
return x |
class UncaughtError(Redirect):
def __init__(self, view, root_ui, target_ui, exception):
error_source_bookmark = (view.as_bookmark(target_ui) if view else None)
target_bookmark = root_ui.get_bookmark_for_error(str(exception), error_source_bookmark)
super().__init__(target_bookmark) |
def test_bitstruct_signals():
bs = mk_bitstruct('BitStructType', {'foo': Bits1, 'bar': Bits32})
class A2(Component):
def construct(s):
s.in0 = InPort(bs)
s.in1 = InPort(Bits32)
s.out = OutPort(Bits32)
def add_upblk():
s.out = (s.in0.bar + s.in1)
def tv_in(m, tv):
m.in0 = tv[0]
m.in1 = tv[1]
def tv_out(m, tv):
assert (m.out == tv[2])
run_test(A2(), [[bs(0, 0), b32((- 1)), b32((- 1))], [bs(0, 1), b32(1), b32(2)], [bs(0, (- 1)), b32(0), b32((- 1))], [bs(0, 42), b32(42), b32(84)]], tv_in, tv_out) |
def plot_amplitudes_zpk(zpks, filename_pdf, fmin=0.001, fmax=100.0, nf=100, fnorm=None):
from pyrocko.plot import gmtpy
p = gmtpy.LogLogPlot(width=(30 * gmtpy.cm), yexp=0)
for (i, (zeros, poles, constant)) in enumerate(zpks):
(f, h) = evaluate(zeros, poles, constant, fmin, fmax, nf)
if (fnorm is not None):
h /= evaluate_at(zeros, poles, constant, fnorm)
amp = num.abs(h)
p.plot((f, amp), ('-W2p,%s' % gmtpy.color(i)))
p.save(filename_pdf) |
def squad_build_drqa_doc_encodings(out_dir, encoder_model, num_workers, all_squad=False):
print('loading data...')
corpus = SquadRelevanceCorpus()
questions = corpus.get_dev()
if all_squad:
questions.extend(corpus.get_train())
relevant_titles = list(set([q.paragraph.doc_title for q in questions]))
conn = sqlite3.connect(DRQA_DOC_DB)
c = conn.cursor()
titles = list(set([q.paragraph.doc_title for q in questions]))
for (i, t) in enumerate(titles):
if (t == 'Sky (United Kingdom)'):
titles[i] = 'Sky UK'
title_to_doc_id = {t1: t2 for (t1, t2) in zip(titles, relevant_titles)}
c.execute('CREATE TEMPORARY TABLE squad_docs(id)')
c.executemany('INSERT INTO squad_docs VALUES (?)', [(x,) for x in titles])
c.execute('SELECT id, text FROM documents WHERE id IN squad_docs')
out = c.fetchall()
conn.close()
out = [(title_to_doc_id[title], text) for (title, text) in out]
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=1, max_num_question_words=None, max_num_context_words=None)
voc = corpus.get_vocab()
encoder = SentenceEncoderSingleContext(model_dir_path=encoder_model, vocabulary=voc, spec=spec, loader=ResourceLoader())
workers = ProcessPool(num_workers, initializer=init, initargs=[])
documents = {}
tokenized_documents = {}
print('Tokenizing...')
with tqdm(total=len(out)) as pbar:
for (doc, tok_doc) in tqdm(workers.imap_unordered(get_document_paragraphs, out)):
documents.update(doc)
tokenized_documents.update(tok_doc)
pbar.update()
encodings = {}
print('Encoding...')
for (title, paragraphs) in tqdm(tokenized_documents.items()):
dummy_question = 'Hello Hello'.split()
model_paragraphs = [BinaryQuestionAndParagraphs(question=dummy_question, paragraphs=[x], label=1, num_distractors=0, question_id='dummy') for x in paragraphs]
encodings.update({f'{title}_{i}': rep for (i, rep) in enumerate(encoder.encode_paragraphs(model_paragraphs))})
with open(join(out_dir, 'docs.json'), 'w') as f:
json.dump(documents, f)
np.savez_compressed(join(out_dir, 'encodings.npz'), **encodings) |
class TestQObjRepr():
.parametrize('obj', [QObject(), object(), None])
def test_simple(self, obj):
assert (qtutils.qobj_repr(obj) == repr(obj))
def _py_repr(self, obj):
r = repr(obj)
if (r.startswith('<') and r.endswith('>')):
return r[1:(- 1)]
return r
def test_object_name(self):
obj = QObject()
obj.setObjectName('Tux')
expected = f"<{self._py_repr(obj)}, objectName='Tux'>"
assert (qtutils.qobj_repr(obj) == expected)
def test_class_name(self):
obj = QTimer()
hidden = sip.cast(obj, QObject)
expected = f"<{self._py_repr(hidden)}, className='QTimer'>"
assert (qtutils.qobj_repr(hidden) == expected)
def test_both(self):
obj = QTimer()
obj.setObjectName('Pomodoro')
hidden = sip.cast(obj, QObject)
expected = f"<{self._py_repr(hidden)}, objectName='Pomodoro', className='QTimer'>"
assert (qtutils.qobj_repr(hidden) == expected)
def test_rich_repr(self):
class RichRepr(QObject):
def __repr__(self):
return 'RichRepr()'
obj = RichRepr()
assert (repr(obj) == 'RichRepr()')
expected = "<RichRepr(), className='RichRepr'>"
assert (qtutils.qobj_repr(obj) == expected) |
def test_phased_gaussian_single_particle():
chain = PhasedGaussianSingleParticle(k=(1.2 * 7), sigma=(1.2 / 7), position=(1.5 / 7))
amplitudes = chain.get_amplitudes(sites_count=8)
np.testing.assert_allclose(amplitudes, [((- 0.) - 0.3883731j), (0. - 0.3186606j), (0. + 0.3186606j), ((- 0.) + 0.3883731j), ((- 0.) + 0.j), ((- 0.034451) - 0.j), (0.0111212 - 0.j), (0. + 0.j)], rtol=1e-05)
assert np.isclose(np.linalg.norm(amplitudes), 1) |
class ThermalBuilder(BasicBuilder):
def __init__(self, casePath, solverSettings=getDefaultHeatTransferSolverSettings(), templatePath=None, fluidProperties={'name': 'air', 'compressible': False, 'kinematicViscosity': 100000.0}, turbulenceProperties={'name': 'kEpsilon'}, boundarySettings=[], internalFields={}, transientSettings={'startTime': 0.0, 'endTime': 1.0, 'timeStep': 0.001, 'writeInterval': 100}, paralleSettings={'method': 'simple', 'numberOfSubdomains': multiprocessing.cpu_count()}):
super(ThermalBuilder, self).__init__(casePath, solverSettings, templatePath, fluidProperties, turbulenceProperties, boundarySettings, internalFields, transientSettings, paralleSettings)
def build(self):
super(ThermalBuilder, self).build()
case = self._casePath
def check(self):
msg = super(ThermalBuilder, self).check()
case = self._casePath
settings = self._solverSettings
if settings['heatTransfering']:
flist = ['constant/g', '0/T', '0/p_rgh']
for f in flist:
if (not os.path.exists(((case + os.path.sep) + f))):
return (msg + 'Error: {} file not found\n'.format(f))
return msg
def setupFluidProperties(self, value=None):
if (value and isinstance(value, dict)):
self.fluidProperties = value
if self._solverSettings['compressible']:
self.setupThermophysicalProperties()
else:
self.setupTransportProperties()
def setupTransportProperties(self):
case = self._casePath
solver_settings = self._solverSettings
if solver_settings['nonNewtonian']:
print('Warning: nonNewtonian case setup is not implemented, please edit dict file directly')
else:
if (self.fluidProperties['name'] == 'air'):
lines = air_Boussinesq_thermophysicalProperties
if (self.fluidProperties['name'] == 'water'):
lines = water_Boussinesq_thermophysicalProperties
else:
print('Error: unrecoginsed fluid name: {}'.format(self.fluidProperties['name']))
if _debug:
print('Info: Fluid properties is written to constant/transportProperties')
print(lines)
createRawFoamFile(case, 'constant', 'transportProperties', lines)
def setupThermophysicalProperties(self):
case = self._casePath
solver_settings = self._solverSettings
if solver_settings['nonNewtonian']:
print('Warning: nonNewtonian case setup is not implemented, please edit dict file directly')
else:
if (self.fluidProperties['name'] == 'air'):
if (solver_settings['compressible'] and (not solver_settings['heatTransfering'])):
type = 'hePsiThermo'
else:
type = 'heRhoThermo'
lines = (air_thermophysicalProperties % type)
else:
print('Error: unrecoginsed fluid name: {}'.format(self.fluidProperties['name']))
if _debug:
print('Infor:Fluid properties is written to constant/thermophysicalProperties')
print(lines)
createRawFoamFile(case, 'constant', 'thermophysicalProperties', lines)
'\n def _createInitVarables(self):\n """all variable file will be created from scratch\n a default fvScheme is needed for each var, but * can be used to match all\n """\n super(ThermalBuilder, self)._createInitVarables()\n vars = self.getThermalVariables()\n casePath = self._casePath\n\n print("Info: initialize solver created thermal related fields (variables): ",vars)\n for v in vars:\n\n #clean this file contect if existent\n lines = [\n "dimensions [0 0 0 0 0 0 0];\n", "\n",\n "internalField uniform 0;\n", "\n", # will be set again in caseBuilder\n \'boundaryField\n\', "{\n", "\n", "}\n",\n ]\n if v in set([\'T\', \'p_rgh\', \'alphat\']):\n createRawFoamFile(casePath, \'0\', v, lines, \'volScalarField\')\n fname = casePath + os.path.sep + "0" + os.path.sep + v\n f = ParsedParameterFile(fname)\n \n if v == \'T\':\n f[\'dimensions\'] = "[0 0 0 1 0 0 0]"\n f[\'internalField\'] = "uniform 300"\n \n elif v == \'alphat\': # thermal turbulence viscosity/diffusivity for heat transfer cases\n f[\'dimensions\'] = "[0 2 -1 0 0 0 0]"\n f[\'internalField\'] = \'uniform 0\' #\n else:\n print("variable {} is not recognized and dimension is left unchanged".format(v))\n \n f.writeFile()\n '
def initBoundaryConditions(self):
bc_names = listBoundaryNames(self._casePath)
super(ThermalBuilder, self).initBoundaryConditions()
self.initThermalBoundaryAsWall(bc_names)
def initThermalBoundaryAsWall(self, bc_names):
f = ParsedParameterFile((self._casePath + '/0/T'))
for bc in bc_names:
f['boundaryField'][bc] = {}
f['boundaryField'][bc]['type'] = 'zeroGradient'
f.writeFile()
if ('alphat' in self._solverCreatedVariables):
self.initThermalTurbulenceBoundaryAsWall(bc_names)
def initThermalTurbulenceBoundaryAsWall(self, bc_names):
f = ParsedParameterFile((self._casePath + '/0/alphat'))
for bc in bc_names:
f['boundaryField'][bc] = {}
if self._solverSettings['heatTransfering']:
if self._solverSettings['compressible']:
f['boundaryField'][bc]['type'] = 'compressible::alphatWallFunction'
f['boundaryField'][bc]['Prt'] = 0.85
f['boundaryField'][bc]['value'] = '$internalField'
else:
f['boundaryField'][bc]['type'] = 'alphatJayatillekeWallFunction'
f['boundaryField'][bc]['Prt'] = 0.85
f['boundaryField'][bc]['value'] = '$internalField'
elif self._solverSettings['compressible']:
f['boundaryField'][bc]['type'] = 'compressible::alphatkeWallFunction'
f['boundaryField'][bc]['value'] = 'uniform 0'
else:
f['boundaryField'][bc]['type'] = 'alphatkeWallFunction'
f['boundaryField'][bc]['value'] = 'uniform 0'
f.writeFile()
def setupThermalBoundary(self):
f = ParsedParameterFile((self._casePath + '/0/T'))
for boundary in self._boundarySettings:
bType = boundary['type']
s = boundary['thermalSettings']
bc = boundary['name']
f['boundaryField'][bc] = {}
f['boundaryField'][bc]['type'] = s['subtype']
vType = s['subtype']
if (vType == 'fixedValue'):
f['boundaryField'][bc]['value'] = formatValue(s['temperature'])
elif (vType == 'zeroGradient'):
pass
elif (vType == 'fixedGradient'):
f['boundaryField'][bc]['gradient'] = formatValue(s['heatFlux'])
elif (vType == 'mixed'):
f['boundaryField'][bc]['value'] = formatValue(s['temperature'])
f['boundaryField'][bc]['gradient'] = formatValue(s['heatFlux'])
elif (vType == 'coupled'):
print('{} wall boundary value type is not supproted'.format(vType))
raise NotImplementedError()
elif (bType == 'wall'):
if (vType == 'heatFlux'):
if self._solver_settings['compressible']:
f['boundaryField'][bc]['type'] = 'compressible::externalWallHeatFluxTemperature'
else:
f['boundaryField'][bc]['type'] = 'externalWallHeatFluxTemperature'
f['boundaryField'][bc]['mode'] = 'flux'
f['boundaryField'][bc]['q'] = formatValue(s['heatFlux'])
f['boundaryField'][bc]['kappaMethod'] = 'fluidThermo'
f['boundaryField'][bc]['relaxation'] = '1'
elif (vType == 'HTC'):
if self._solver_settings['compressible']:
f['boundaryField'][bc]['type'] = 'compressible::externalWallHeatFluxTemperature'
else:
f['boundaryField'][bc]['type'] = 'externalWallHeatFluxTemperature'
f['boundaryField'][bc]['mode'] = 'coefficient'
f['boundaryField'][bc]['kappaMethod'] = 'fluidThermo'
f['boundaryField'][bc]['Ta'] = formatValue(s['temperature'])
f['boundaryField'][bc]['h'] = formatValue(s['HTC'])
f['boundaryField'][bc]['relaxation'] = '1'
else:
print('wall thermal boundary value type: {} is not defined'.format(vType))
elif ((bType == 'inlet') and (vType == 'totalTemperature')):
f['boundaryField'][bc] = {'type': 'totalTemperature', 'T0': formatValue(s['temperature'])}
elif (bType == 'outlet'):
assert (vType == 'fixedValue')
f['boundaryField'][bc] = {'type': 'inletOutlet', 'inletValue': formatValue(s['temperature']), 'value': formatValue(s['temperature'])}
elif (bType == 'freestream'):
f['boundaryField'][bc] = {'type': 'zeroGradient'}
elif (bType == 'interface'):
f['boundaryField'][bc] = {'type': boundary['subtype']}
else:
print('boundary value type: {} is not defined'.format(bType))
f.writeFile()
if ('alphat' in self._solverCreatedVariables):
self._setupThermalTurbulenceDiffusivity()
def _setupThermalTurbulenceDiffusivity(self):
f = ParsedParameterFile((self._casePath + '/0/alphat'))
for bcDict in self._boundarySettings:
bc = bcDict['name']
subtype = bcDict['subtype']
if (bcDict['type'] in set(['inlet', 'outlet', 'freestream'])):
f['boundaryField'][bc] = {'type': 'calculated', 'value': '$internalField'}
elif (bcDict['type'] == 'interface'):
['boundaryField'][bc] = {'type': subtype}
else:
pass
f.writeFile()
def setupBoundaryConditions(self):
super(ThermalBuilder, self).setupBoundaryConditions()
self.setupThermalBoundary()
def setupInletBoundary(self, bcDict):
super(ThermalBuilder, self).setupInletBoundary(bcDict)
if self._solverSettings['compressible']:
raise NotImplementedError()
def setupOutletBoundary(self, bcDict):
super(ThermalBuilder, self).setupOutletBoundary(bcDict)
def setupFreestreamBoundary(self, bcDict):
super(ThermalBuilder, self).setupFreestreamBoundary(bcDict)
def setupInterfaceBoundary(self, bcDict):
super(ThermalBuilder, self).setupInterfaceBoundary(bcDict) |
class CLIPVisionConfig(PretrainedConfig):
model_type = 'clip_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'clip'):
config_dict = config_dict['vision_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batchSize, shuffle=(not opt.serial_batches), num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size) |
def _replace_shared_variables(graph: List[TensorVariable]) -> List[TensorVariable]:
shared_variables = [var for var in graph_inputs(graph) if isinstance(var, SharedVariable)]
if any((isinstance(var.type, RandomType) for var in shared_variables)):
raise ValueError('Graph contains shared RandomType variables which cannot be safely replaced')
if any(((var.default_update is not None) for var in shared_variables)):
raise ValueError('Graph contains shared variables with default_update which cannot be safely replaced.')
replacements = {var: pt.constant(var.get_value(borrow=True)) for var in shared_variables}
new_graph = clone_replace(graph, replace=replacements)
return new_graph |
def test_complete_package_does_not_merge_different_source_type_and_name(provider: Provider, root: ProjectPackage, fixture_dir: FixtureDirGetter) -> None:
project_dir = fixture_dir('with_conditional_path_deps')
path = (project_dir / 'demo_one').as_posix()
dep_with_source_name = Factory.create_dependency('demo', '>=1.0')
dep_with_source_name.source_name = 'source'
root.add_dependency(dep_with_source_name)
root.add_dependency(Factory.create_dependency('demo', {'path': path}))
with pytest.raises(IncompatibleConstraintsError) as e:
provider.complete_package(DependencyPackage(root.to_dependency(), root))
expected = f'''Incompatible constraints in requirements of root (1.2.3):
demo {project_dir.as_uri()}/demo_one (1.2.3)
demo (>=1.0) ; source=source'''
assert (str(e.value) == expected) |
def setup(app):
_directive = 'versionremoved'
if (_directive not in versionlabels):
versionlabels[_directive] = 'Removed in version %s'
if (versionlabel_classes is not None):
versionlabel_classes[_directive] = 'removed'
app.add_directive(_directive, VersionChange)
return {'version': __version__, 'parallel_read_safe': True, 'parallel_write_safe': True} |
class NnetLatticeBiglmFasterRecognizer(NnetRecognizer):
def __init__(self, transition_model, acoustic_model, decoder, symbols=None, allow_partial=True, decodable_opts=None, online_ivector_period=10):
if (not isinstance(decoder, _dec.LatticeBiglmFasterDecoder)):
raise TypeError('decoder argument should be a LatticeBiglmFasterDecoder')
super(NnetLatticeBiglmFasterRecognizer, self).__init__(transition_model, acoustic_model, decoder, symbols, allow_partial, decodable_opts, online_ivector_period)
def from_files(cls, model_rxfilename, graph_rxfilename, old_lm_rxfilename, new_lm_rxfilename, symbols_filename=None, allow_partial=True, decoder_opts=None, decodable_opts=None, online_ivector_period=10):
(transition_model, acoustic_model) = cls.read_model(model_rxfilename)
graph = _fst.read_fst_kaldi(graph_rxfilename)
self.old_lm = _fst.read_fst_kaldi(old_lm_rxfilename)
_fst_utils.apply_probability_scale((- 1.0), self.old_lm)
self.new_lm = _fst.read_fst_kaldi(new_lm_rxfilename)
self._old_lm = _fst_spec.StdBackoffDeterministicOnDemandFst(self.old_lm)
self._new_lm = _fst_spec.StdBackoffDeterministicOnDemandFst(self.new_lm)
self._compose_lm = _fst_spec.StdComposeDeterministicOnDemandFst(self._old_lm, self._new_lm)
self._cache_compose_lm = _fst_spec.StdCacheDeterministicOnDemandFst(self._compose_lm)
if (not decoder_opts):
decoder_opts = _dec.LatticeFasterDecoderOptions()
decoder = _dec.LatticeBiglmFasterDecoder(graph, decoder_opts, self._cache_compose_lm)
if (symbols_filename is None):
symbols = None
else:
symbols = _fst.SymbolTable.read_text(symbols_filename)
return cls(transition_model, acoustic_model, decoder, symbols, allow_partial, decodable_opts, online_ivector_period) |
def train(data_dir='./data/', embedding_size=300, skipgram=False, siter=10, diter=10, negative_samples=10, window_size=5, output_path='./model', overwrite_compass=True, streamlit=False, component=None):
if (streamlit and (component is None)):
raise ValueError('`component` cannot be `None` when `streamlit` is `True`.')
aligner = TWEC(size=embedding_size, sg=int(skipgram), siter=siter, diter=diter, workers=4, ns=negative_samples, window=window_size, opath=output_path)
if streamlit:
component.write('Training')
progress = 0.0
progress_bar = component.progress(progress)
output = component.beta_expander('Output')
all_files = sorted(os.listdir(data_dir))
num_files = len(all_files)
start = time.time()
aligner.train_compass(os.path.join(data_dir, 'compass.txt'), overwrite=overwrite_compass)
end = time.time()
compass_out = f'Time Taken for TWEC Pre-Training: {(end - start)} ms'
if (not streamlit):
print(compass_out)
else:
progress += (1 / num_files)
progress_bar.progress(np.round(progress, decimals=1))
with output:
st.write(compass_out)
slices = {}
for file in all_files:
if (file != 'compass.txt'):
start = time.time()
slices[file.split('.')[0]] = aligner.train_slice(os.path.join(data_dir, file), save=True)
end = time.time()
year_out = f"Time Taken for TWEC Fine-tuning for {file.split('.')[0]}: {(end - start)} ms"
if (not streamlit):
print(year_out)
else:
progress += (1 / num_files)
if (progress > 1.0):
progress = 1.0
progress_bar.progress(progress)
with output:
st.write(year_out) |
def test_root_count(root, testapp, add_file_to_root):
resp = testapp.get('/')
resp.mustcontain('PyPI compatible package index serving 0 packages')
add_file_to_root(root, 'Twisted-11.0.0.tar.bz2')
resp = testapp.get('/')
resp.mustcontain('PyPI compatible package index serving 1 packages') |
class TestMultiCorpusDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([i for i in range(1, 5000, 2)]).view(1, (- 1))
tokens_ds1 = TokenBlockDataset(tokens_1, sizes=[tokens_1.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_1 = LanguagePairDataset(tokens_ds1, tokens_ds1.sizes, d, shuffle=False)
tokens_2 = torch.LongTensor([i for i in range(0, 5000, 2)]).view(1, (- 1))
tokens_ds2 = TokenBlockDataset(tokens_2, sizes=[tokens_2.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_2 = LanguagePairDataset(tokens_ds2, tokens_ds2.sizes, d, shuffle=False)
def _test_sample_helper(self, distribution):
m = MultiCorpusDataset(OrderedDict({0: self.dataset_1, 1: self.dataset_2}), distribution=distribution, seed=0, sort_indices=True)
m.set_epoch(1)
indices = m.ordered_indices()
count_sample_from_first_dataset = 0
items = set()
for i in indices:
item = m[i]['source'].item()
if ((item % 2) == 1):
count_sample_from_first_dataset += 1
items.add(item)
sample_from_first_ds_percentage = ((1.0 * count_sample_from_first_dataset) / len(indices))
self.assertLess(abs((sample_from_first_ds_percentage - distribution[0])), 0.01)
self.assertEqual(len(items), int((min(len(self.dataset_1), (len(indices) * distribution[0])) + min(len(self.dataset_1), (len(indices) * distribution[1])))))
print(distribution)
def test_multi_corpus_dataset(self):
for distribution in [[0.5, 0.5], [0.1, 0.9], [0.9, 0.1]]:
self._test_sample_helper(distribution=distribution) |
class Solution():
def minDepth(self, root: Optional[TreeNode]) -> int:
if (not root):
return 0
nodes = [(root, 0)]
depth = math.inf
while nodes:
(node, level) = nodes.pop()
if ((not node.left) and (not node.right)):
depth = min(level, depth)
if node.left:
nodes.append((node.left, (level + 1)))
if node.right:
nodes.append((node.right, (level + 1)))
return (depth + 1) |
def find_organization_invites(organization, user_obj):
invite_check = (TeamMemberInvite.user == user_obj)
if user_obj.verified:
invite_check = (invite_check | (TeamMemberInvite.email == user_obj.email))
query = TeamMemberInvite.select().join(Team).where(invite_check, (Team.organization == organization))
return query |
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument('--width', default=1, type=int, help='backbone width')
parser.add_argument('--num_target', type=int, default=256, help='Proposal number [default: 256]')
parser.add_argument('--sampling', default='kps', type=str, help='Query points sampling method (kps, fps)')
parser.add_argument('--nhead', default=8, type=int, help='multi-head number')
parser.add_argument('--num_decoder_layers', default=6, type=int, help='number of decoder layers')
parser.add_argument('--dim_feedforward', default=2048, type=int, help='dim_feedforward')
parser.add_argument('--transformer_dropout', default=0.1, type=float, help='transformer_dropout')
parser.add_argument('--transformer_activation', default='relu', type=str, help='transformer_activation')
parser.add_argument('--self_position_embedding', default='loc_learned', type=str, help='position_embedding in self attention (none, xyz_learned, loc_learned)')
parser.add_argument('--cross_position_embedding', default='xyz_learned', type=str, help='position embedding in cross attention (none, xyz_learned)')
parser.add_argument('--query_points_generator_loss_coef', default=0.8, type=float)
parser.add_argument('--obj_loss_coef', default=0.1, type=float, help='Loss weight for objectness loss')
parser.add_argument('--box_loss_coef', default=1, type=float, help='Loss weight for box loss')
parser.add_argument('--sem_cls_loss_coef', default=0.1, type=float, help='Loss weight for classification loss')
parser.add_argument('--center_loss_type', default='smoothl1', type=str, help='(smoothl1, l1)')
parser.add_argument('--center_delta', default=1.0, type=float, help='delta for smoothl1 loss in center loss')
parser.add_argument('--size_loss_type', default='smoothl1', type=str, help='(smoothl1, l1)')
parser.add_argument('--size_delta', default=1.0, type=float, help='delta for smoothl1 loss in size loss')
parser.add_argument('--heading_loss_type', default='smoothl1', type=str, help='(smoothl1, l1)')
parser.add_argument('--heading_delta', default=1.0, type=float, help='delta for smoothl1 loss in heading loss')
parser.add_argument('--query_points_obj_topk', default=4, type=int, help='query_points_obj_topk')
parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 8]')
parser.add_argument('--dataset', default='scannet', help='Dataset name. sunrgbd or scannet. [default: scannet]')
parser.add_argument('--num_point', type=int, default=50000, help='Point Number [default: 50000]')
parser.add_argument('--use_height', action='store_true', help='Use height signal in input.')
parser.add_argument('--use_color', action='store_true', help='Use RGB color in input.')
parser.add_argument('--use_sunrgbd_v2', action='store_true', help='Use V2 box labels for SUN RGB-D dataset')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--start_epoch', type=int, default=1, help='Epoch to run [default: 1]')
parser.add_argument('--max_epoch', type=int, default=400, help='Epoch to run [default: 180]')
parser.add_argument('--optimizer', type=str, default='adamW', help='optimizer')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for SGD')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='Optimization L2 weight decay [default: 0.0005]')
parser.add_argument('--learning_rate', type=float, default=0.004, help='Initial learning rate for all except decoder [default: 0.004]')
parser.add_argument('--decoder_learning_rate', type=float, default=0.0004, help='Initial learning rate for decoder [default: 0.0004]')
parser.add_argument('--lr-scheduler', type=str, default='step', choices=['step', 'cosine'], help='learning rate scheduler')
parser.add_argument('--warmup-epoch', type=int, default=(- 1), help='warmup epoch')
parser.add_argument('--warmup-multiplier', type=int, default=100, help='warmup multiplier')
parser.add_argument('--lr_decay_epochs', type=int, default=[280, 340], nargs='+', help='for step scheduler. where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='for step scheduler. decay rate for learning rate')
parser.add_argument('--clip_norm', default=0.1, type=float, help='gradient clipping max norm')
parser.add_argument('--bn_momentum', type=float, default=0.1, help='Default bn momeuntum')
parser.add_argument('--syncbn', action='store_true', help='whether to use sync bn')
parser.add_argument('--checkpoint_path', default=None, help='Model checkpoint path [default: None]')
parser.add_argument('--log_dir', default='log', help='Dump dir to save model checkpoint [default: log]')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=100, help='save frequency')
parser.add_argument('--val_freq', type=int, default=50, help='val frequency')
parser.add_argument('--local_rank', type=int, help='local rank for DistributedDataParallel')
parser.add_argument('--ap_iou_thresholds', type=float, default=[0.25, 0.5], nargs='+', help='A list of AP IoU thresholds [default: 0.25,0.5]')
parser.add_argument('--rng_seed', type=int, default=0, help='manual seed')
(args, unparsed) = parser.parse_known_args()
return args |
def find_all_i2c_power_monitor(i2c_path):
power_sensor = {}
if (not os.path.isdir(i2c_path)):
logger.error("Folder {root_dir} doesn't exist".format(root_dir=i2c_path))
return power_sensor
power_i2c_sensors = {}
items = os.listdir(i2c_path)
for item in items:
path = '{base_path}/{item}'.format(base_path=i2c_path, item=item)
name_path = '{path}/name'.format(path=path)
if os.path.isfile(name_path):
raw_name = cat(name_path).strip()
if ('ina3221' in raw_name):
power_i2c_sensors[item] = find_driver_power_folders(path)
for (name, paths) in power_i2c_sensors.items():
for path in paths:
sensors = list_all_i2c_ports(path)
power_sensor.update(sensors)
if power_sensor:
logger.info('Found I2C power monitor')
return power_sensor |
def install_nvfancontrol(args):
if (not os.path.isfile('/usr/bin/nvfancontrol')):
shutil.copy('tests/nvfancontrol', '/usr/bin/nvfancontrol')
print('Copied test/nvfancontrol')
else:
print('/usr/bin/nvfancontrol already exists')
pytest.exit('I cannot install a fake nvfancontrol! nvfancontrol already exist')
if (not os.path.isfile('/etc/nvfancontrol.conf')):
shutil.copy('tests/nvfancontrol.conf', '/etc/nvfancontrol.conf')
print('Copied nvfancontrol.conf')
if (not os.path.isfile('/etc/systemd/system/nvfancontrol.service')):
shutil.copy('tests/nvfancontrol.service', '/etc/systemd/system/nvfancontrol.service')
print('Copy a fake /etc/systemd/system/nvfancontrol.service')
os.system('systemctl daemon-reload')
os.system('systemctl start nvfancontrol.service') |
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap['example'] = Processor.process_example
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if (name not in self._processMap):
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, ('Unknown function %s' % name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_example(self, seqid, iprot, oprot):
args = example_args()
args.read(iprot)
iprot.readMessageEnd()
result = example_result()
try:
result.success = self._handler.example()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ExpectedException as exc:
msg_type = TMessageType.REPLY
result.exc = exc
except baseplate.thrift.ttypes.Error as err:
msg_type = TMessageType.REPLY
result.err = err
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin('example', msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush() |
class TFC_RNN(nn.Module):
def __init__(self, in_channels, num_layers_tfc, gr, kt, kf, f, bn_factor_rnn, num_layers_rnn, bidirectional=True, min_bn_units_rnn=16, bias_rnn=True, bn_factor_tif=16, bias_tif=True, skip_connection=True, activation=nn.ReLU):
super(TFC_RNN, self).__init__()
self.skip_connection = skip_connection
self.tfc = TFC(in_channels, num_layers_tfc, gr, kt, kf, activation)
self.bn = nn.BatchNorm2d(gr)
hidden_units_rnn = max((f // bn_factor_rnn), min_bn_units_rnn)
self.rnn = nn.GRU(f, hidden_units_rnn, num_layers_rnn, bias=bias_rnn, batch_first=True, bidirectional=bidirectional)
f_from = ((hidden_units_rnn * 2) if bidirectional else hidden_units_rnn)
f_to = f
self.tif_f1_to_f2 = TIF_f1_to_f2(gr, f_from, f_to, bn_factor=bn_factor_tif, bias=bias_tif, activation=activation)
def forward(self, x):
x = self.tfc(x)
x = self.bn(x)
tfc_output = x
(B, C, T, F) = x.shape
x = x.view((- 1), T, F)
self.rnn.flatten_parameters()
(x, _) = self.rnn(x)
x = x.view(B, C, T, (- 1))
rnn_output = self.tif_f1_to_f2(x)
return ((tfc_output + rnn_output) if self.skip_connection else rnn_output) |
class TestQueryBestSize(EndianTest):
def setUp(self):
self.req_args_0 = {'drawable': , 'height': 64528, 'item_class': 1, 'width': 8620}
self.req_bin_0 = b'a\x01\x03\x005\x8a\xb4u\xac!\x10\xfc'
self.reply_args_0 = {'height': 2023, 'sequence_number': 41036, 'width': 35260}
self.reply_bin_0 = b'\x01\x00L\xa0\x00\x00\x00\x00\xbc\x89\xe7\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.QueryBestSize._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.QueryBestSize._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.QueryBestSize._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.QueryBestSize._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
class CTOTrainer(NetworkTrainer):
def __init__(self, opt):
super().__init__(opt)
def set_network(self):
self.net = CTO(self.opt.train['num_class'])
self.net = torch.nn.DataParallel(self.net, device_ids=self.opt.train['gpus'])
self.net = self.net.cuda()
def train(self, scaler, dice_loss):
self.net.train()
losses = AverageMeter()
for (i_batch, sampled_batch) in enumerate(self.train_loader):
self.optimizer.zero_grad()
(volume_batch, label_batch) = (sampled_batch['image'], sampled_batch['label'])
edges = torch.from_numpy(get_gt_bnd(label_batch.numpy())).cuda()
(volume_batch, label_batch) = (volume_batch.cuda(), label_batch.cuda())
with autocast():
(lateral_map_3, lateral_map_2, lateral_map_1, edge_map) = self.net(volume_batch)
loss3 = structure_loss(lateral_map_3, label_batch)
loss2 = structure_loss(lateral_map_2, label_batch)
loss1 = structure_loss(lateral_map_1, label_batch)
losse = dice_loss(edge_map, edges)
loss = (((loss3 + loss2) + loss1) + (3 * losse))
scaler.scale(loss).backward()
scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_value_(self.net.parameters(), self.opt.train['clip'])
scaler.step(self.optimizer)
scaler.update()
losses.update(loss.item(), volume_batch.size(0))
return losses.avg
def val(self, dice_loss):
self.net.eval()
val_losses = AverageMeter()
with torch.no_grad():
for (i_batch, sampled_batch) in enumerate(self.val_loader):
(volume_batch, label_batch) = (sampled_batch['image'], sampled_batch['label'])
(volume_batch, label_batch) = (volume_batch.cuda(), label_batch.cuda())
(lateral_map_3, lateral_map_2, lateral_map_1, edge_map) = self.net(volume_batch)
loss = dice_loss(lateral_map_1, label_batch)
val_losses.update(loss.item(), volume_batch.size(0))
return val_losses.avg
def run(self):
num_epoch = self.opt.train['train_epochs']
scaler = GradScaler()
dice_loss = DiceLoss()
self.logger.info('=> Initial learning rate: {:g}'.format(self.opt.train['lr']))
self.logger.info('=> Batch size: {:d}'.format(self.opt.train['batch_size']))
self.logger.info('=> Number of training iterations: {:d} * {:d}'.format(num_epoch, int(len(self.train_loader))))
self.logger.info('=> Training epochs: {:d}'.format(self.opt.train['train_epochs']))
dataprocess = tqdm(range(self.opt.train['start_epoch'], num_epoch))
best_val_loss = 100.0
print('=> start training!')
for epoch in dataprocess:
poly_lr(self.optimizer, self.opt.train['lr'], epoch, num_epoch)
state = {'epoch': (epoch + 1), 'state_dict': self.net.state_dict(), 'optimizer': self.optimizer.state_dict()}
train_loss = self.train(scaler, dice_loss)
val_loss = self.val(dice_loss)
self.logger_results.info('{:d}\t{:.4f}\t{:.4f}'.format((epoch + 1), train_loss, val_loss))
if (val_loss < best_val_loss):
best_val_loss = val_loss
save_bestcheckpoint(state, self.opt.train['save_dir'])
print(f'save best checkpoint at epoch {epoch}')
if ((epoch > (self.opt.train['train_epochs'] / 2.0)) and ((epoch % self.opt.train['checkpoint_freq']) == 0)):
save_checkpoint(state, epoch, self.opt.train['save_dir'], True)
logging.info('training finished') |
def test_float_image():
m = folium.Map([45.0, 3.0], zoom_start=4)
url = '
szt = plugins.FloatImage(url, bottom=60, left=70, width='20%')
m.add_child(szt)
m._repr_html_()
out = normalize(m._parent.render())
tmpl = Template('\n <img id="{{this.get_name()}}" alt="float_image"\n src=" style="z-index: 999999">\n </img>\n ')
assert (normalize(tmpl.render(this=szt)) in out)
tmpl = Template('\n <style>\n #{{this.get_name()}} {\n position: absolute;\n bottom: 60%;\n left: 70%;\n width: 20%;\n }\n </style>\n ')
assert (normalize(tmpl.render(this=szt)) in out)
bounds = m.get_bounds()
assert (bounds == [[None, None], [None, None]]), bounds |
def check_accuracy(model, test, device):
total = 0
correct_body = 0
correct_shirt = 0
correct_pant = 0
correct_hair = 0
correct_action = 0
with torch.no_grad():
for item in test:
(body, shirt, pant, hair, action, image) = item
image = image.to(device)
body = body.to(device)
shirt = shirt.to(device)
pant = pant.to(device)
hair = hair.to(device)
action = action.to(device)
(pred_body, pred_shirt, pred_pant, pred_hair, pred_action) = model(image)
(_, pred_body) = torch.max(pred_body.data, 1)
(_, pred_shirt) = torch.max(pred_shirt.data, 1)
(_, pred_pant) = torch.max(pred_pant.data, 1)
(_, pred_hair) = torch.max(pred_hair.data, 1)
(_, pred_action) = torch.max(pred_action.data, 1)
total += body.size(0)
correct_body += (pred_body == body).sum().item()
correct_shirt += (pred_shirt == shirt).sum().item()
correct_pant += (pred_pant == pant).sum().item()
correct_hair += (pred_hair == hair).sum().item()
correct_action += (pred_action == action).sum().item()
print('Accuracy, Body : {} Shirt : {} Pant : {} Hair : {} Action {}'.format((correct_body / total), (correct_shirt / total), (correct_pant / total), (correct_hair / total), (correct_action / total))) |
class TestPolicyInformation():
def test_invalid_policy_identifier(self):
with pytest.raises(TypeError):
x509.PolicyInformation('notanoid', None)
def test_none_policy_qualifiers(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), None)
assert (pi.policy_identifier == x509.ObjectIdentifier('1.2.3'))
assert (pi.policy_qualifiers is None)
def test_policy_qualifiers(self):
pq = ['string']
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), pq)
assert (pi.policy_identifier == x509.ObjectIdentifier('1.2.3'))
assert (pi.policy_qualifiers == pq)
def test_invalid_policy_identifiers(self):
with pytest.raises(TypeError):
x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), [1, 2])
def test_iter_input(self):
qual = ['foo', 'bar']
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), iter(qual))
assert (pi.policy_qualifiers is not None)
assert (list(pi.policy_qualifiers) == qual)
def test_repr(self):
pq: typing.List[typing.Union[(str, x509.UserNotice)]] = ['string', x509.UserNotice(None, 'hi')]
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), pq)
assert (repr(pi) == "<PolicyInformation(policy_identifier=<ObjectIdentifier(oid=1.2.3, name=Unknown OID)>, policy_qualifiers=['string', <UserNotice(notice_reference=None, explicit_text='hi')>])>")
def test_eq(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string', x509.UserNotice(None, 'hi')])
pi2 = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string', x509.UserNotice(None, 'hi')])
assert (pi == pi2)
def test_ne(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string'])
pi2 = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string2'])
pi3 = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3.4'), ['string'])
assert (pi != pi2)
assert (pi != pi3)
assert (pi != object())
def test_hash(self):
pi = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string', x509.UserNotice(None, 'hi')])
pi2 = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), ['string', x509.UserNotice(None, 'hi')])
pi3 = x509.PolicyInformation(x509.ObjectIdentifier('1.2.3'), None)
assert (hash(pi) == hash(pi2))
assert (hash(pi) != hash(pi3)) |
def import_catalog(element, save=False, user=None):
try:
catalog = Catalog.objects.get(uri=element.get('uri'))
except Catalog.DoesNotExist:
catalog = Catalog()
set_common_fields(catalog, element)
catalog.order = (element.get('order') or 0)
set_lang_field(catalog, 'title', element)
set_lang_field(catalog, 'help', element)
catalog.available = element.get('available', True)
validate_instance(catalog, element, CatalogLockedValidator, CatalogUniqueURIValidator)
check_permissions(catalog, element, user)
if (save and (not element.get('errors'))):
if catalog.id:
element['updated'] = True
logger.info('Catalog %s updated.', element.get('uri'))
else:
element['created'] = True
logger.info('Catalog created with uri %s.', element.get('uri'))
catalog.save()
set_m2m_through_instances(catalog, 'sections', element, 'catalog', 'section', 'catalog_sections')
catalog.sites.add(Site.objects.get_current())
catalog.editors.add(Site.objects.get_current())
return catalog |
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description='copies a file between fsspec locations')
parser.add_argument('--src', type=str, help='fsspec location of the file to read from', required=True)
parser.add_argument('--dst', type=str, help='fsspec location of where to copy the file to', required=True)
parser.add_argument('--bufsize', type=int, help='bufsize to use for copying', default=(64 * 1024))
return parser.parse_args(argv) |
def do_EQU(op, stack, state):
reg = stack.pop()
(val,) = pop_values(stack, state)
tmp = get_value(reg, state)
if (state.condition != None):
val = z3.If(state.condition, val, tmp)
state.registers[reg] = val
state.esil['old'] = tmp
state.esil['cur'] = val
state.esil['lastsz'] = state.registers[reg].size() |
def test_debug_false_by_default(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
pytester.makeconftest("\n from django.conf import settings\n\n def pytest_configure():\n settings.configure(SECRET_KEY='set from pytest_configure',\n DEBUG=True,\n DATABASES={'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:'}},\n INSTALLED_APPS=['django.contrib.auth',\n 'django.contrib.contenttypes',])\n ")
pytester.makepyfile('\n from django.conf import settings\n def test_debug_is_false():\n assert settings.DEBUG is False\n ')
r = pytester.runpytest_subprocess()
assert (r.ret == 0) |
class CocoDistEvalRecallHook(DistEvalHook):
def __init__(self, dataset, interval=1, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
super(CocoDistEvalRecallHook, self).__init__(dataset, interval=interval)
self.proposal_nums = np.array(proposal_nums, dtype=np.int32)
self.iou_thrs = np.array(iou_thrs, dtype=np.float32)
def evaluate(self, runner, results):
ar = fast_eval_recall(results, self.dataset.coco, self.proposal_nums, self.iou_thrs)
for (i, num) in enumerate(self.proposal_nums):
runner.log_buffer.output['{}'.format(num)] = ar[i]
runner.log_buffer.ready = True |
def parse_versioned_line(line):
if (line[0] == '#'):
line = line[1:].strip()
line = line.rsplit('#', maxsplit=1)[0]
line = line.split(';')[0].strip()
ops = ['==', '~=', '!=', '>', '<', '>=', '<=']
if any(((op in line) for op in ops)):
for op in ops:
if (op in line):
(name, version) = line.split(op)
elif line.startswith('-e'):
(rest, name) = line.split('#egg=')
version = rest.split('')[1][:7]
else:
name = line
version = '?'
if name.startswith('#'):
name = name[1:].strip()
return (name, version) |
class Telemetry():
def __init__(self, data, url, shard=None):
self.shard = (shard or ('xbox' if ('xbox-' in url) else 'pc'))
self.events = [Event.instance(event_data) for event_data in self.generate_events_data(data)]
def generate_events_data(self, data):
data_class = SHARD_DATA_MAP[self.shard]
for event in data:
(yield data_class(event))
def events_from_type(self, _type):
return [ev for ev in self.events if (type(ev).__name__ == _type)]
def from_json(cls, path, shard='pc'):
with open(path, 'r') as telemetry_file:
data = json.load(telemetry_file)
return cls(data, path, shard) |
def completions(config: Config, autoimport_workspace: Workspace, request):
(document, position) = request.param
com_position = {'line': 0, 'character': position}
autoimport_workspace.put_document(DOC_URI, source=document)
doc = autoimport_workspace.get_document(DOC_URI)
(yield pylsp_autoimport_completions(config, autoimport_workspace, doc, com_position, None))
autoimport_workspace.rm_document(DOC_URI) |
class TestErrorTree(TestCase):
def test_it_knows_how_many_total_errors_it_contains(self):
errors = [exceptions.ValidationError('Something', validator=i) for i in range(8)]
tree = exceptions.ErrorTree(errors)
self.assertEqual(tree.total_errors, 8)
def test_it_contains_an_item_if_the_item_had_an_error(self):
errors = [exceptions.ValidationError('a message', path=['bar'])]
tree = exceptions.ErrorTree(errors)
self.assertIn('bar', tree)
def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
errors = [exceptions.ValidationError('a message', path=['bar'])]
tree = exceptions.ErrorTree(errors)
self.assertNotIn('foo', tree)
def test_keywords_that_failed_appear_in_errors_dict(self):
error = exceptions.ValidationError('a message', validator='foo')
tree = exceptions.ErrorTree([error])
self.assertEqual(tree.errors, {'foo': error})
def test_it_creates_a_child_tree_for_each_nested_path(self):
errors = [exceptions.ValidationError('a bar message', path=['bar']), exceptions.ValidationError('a bar -> 0 message', path=['bar', 0])]
tree = exceptions.ErrorTree(errors)
self.assertIn(0, tree['bar'])
self.assertNotIn(1, tree['bar'])
def test_children_have_their_errors_dicts_built(self):
(e1, e2) = (exceptions.ValidationError('1', validator='foo', path=['bar', 0]), exceptions.ValidationError('2', validator='quux', path=['bar', 0]))
tree = exceptions.ErrorTree([e1, e2])
self.assertEqual(tree['bar'][0].errors, {'foo': e1, 'quux': e2})
def test_multiple_errors_with_instance(self):
(e1, e2) = (exceptions.ValidationError('1', validator='foo', path=['bar', 'bar2'], instance='i1'), exceptions.ValidationError('2', validator='quux', path=['foobar', 2], instance='i2'))
exceptions.ErrorTree([e1, e2])
def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
error = exceptions.ValidationError('123', validator='foo', instance=[])
tree = exceptions.ErrorTree([error])
with self.assertRaises(IndexError):
tree[0]
def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
error = exceptions.ValidationError('a message', validator='foo', instance={}, path=['foo'])
tree = exceptions.ErrorTree([error])
self.assertIsInstance(tree['foo'], exceptions.ErrorTree)
def test_iter(self):
(e1, e2) = (exceptions.ValidationError('1', validator='foo', path=['bar', 'bar2'], instance='i1'), exceptions.ValidationError('2', validator='quux', path=['foobar', 2], instance='i2'))
tree = exceptions.ErrorTree([e1, e2])
self.assertEqual(set(tree), {'bar', 'foobar'})
def test_repr_single(self):
error = exceptions.ValidationError('1', validator='foo', path=['bar', 'bar2'], instance='i1')
tree = exceptions.ErrorTree([error])
self.assertEqual(repr(tree), '<ErrorTree (1 total error)>')
def test_repr_multiple(self):
(e1, e2) = (exceptions.ValidationError('1', validator='foo', path=['bar', 'bar2'], instance='i1'), exceptions.ValidationError('2', validator='quux', path=['foobar', 2], instance='i2'))
tree = exceptions.ErrorTree([e1, e2])
self.assertEqual(repr(tree), '<ErrorTree (2 total errors)>')
def test_repr_empty(self):
tree = exceptions.ErrorTree([])
self.assertEqual(repr(tree), '<ErrorTree (0 total errors)>') |
def simple_loads(explode: bool, name: str, schema_type: str, location: Mapping[(str, Any)]) -> Any:
value = location[name]
if (schema_type == 'array'):
return split(value, separator=',')
explode_type = (explode, schema_type)
if (explode_type == (False, 'object')):
return dict(map(split, split(value, separator=',', step=2)))
elif (explode_type == (True, 'object')):
return dict(map(partial(split, separator='='), split(value, separator=',')))
return value |
class _BaseCore():
__metaclass__ = abc.ABCMeta
def __init__(self, hash_func, default_params):
self.default_params = default_params
self.hash_func = hash_func
def set_func(self, func):
func_params = list(inspect.signature(func).parameters)
self.func_is_method = (func_params and (func_params[0] == 'self'))
self.func = func
def get_key(self, args, kwds):
if (self.hash_func is not None):
return self.hash_func(args, kwds)
else:
return self.default_params['hash_func'](args, kwds)
def get_entry(self, args, kwds):
key = self.get_key(args, kwds)
return self.get_entry_by_key(key)
def precache_value(self, args, kwds, value_to_cache):
key = self.get_key(args, kwds)
self.set_entry(key, value_to_cache)
return value_to_cache
def check_calc_timeout(self, time_spent):
if (self.wait_for_calc_timeout is not None):
calc_timeout = self.wait_for_calc_timeout
else:
calc_timeout = self.default_params['wait_for_calc_timeout']
if ((calc_timeout > 0) and (time_spent >= calc_timeout)):
raise RecalculationNeeded()
def get_entry_by_key(self, key):
def set_entry(self, key, func_res):
def mark_entry_being_calculated(self, key):
def mark_entry_not_calculated(self, key):
def wait_on_entry_calc(self, key):
def clear_cache(self):
def clear_being_calculated(self): |
def get_diff(repo, base_commit, commits):
print('\n### DIFF ###\n')
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
if ((diff_obj.change_type == 'A') and diff_obj.b_path.endswith('.py')):
code_diff.append(diff_obj.b_path)
elif ((diff_obj.change_type == 'D') and diff_obj.a_path.endswith('.py')):
code_diff.append(diff_obj.a_path)
elif ((diff_obj.change_type in ['M', 'R']) and diff_obj.b_path.endswith('.py')):
if (diff_obj.a_path != diff_obj.b_path):
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
elif diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f'Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.')
else:
code_diff.append(diff_obj.a_path)
return code_diff |
class TestSubtype(unittest.TestCase):
def test_bit(self) -> None:
assert is_subtype(bit_rprimitive, bool_rprimitive)
assert is_subtype(bit_rprimitive, int_rprimitive)
assert is_subtype(bit_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bit_rprimitive, rt)
def test_bool(self) -> None:
assert (not is_subtype(bool_rprimitive, bit_rprimitive))
assert is_subtype(bool_rprimitive, int_rprimitive)
assert is_subtype(bool_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bool_rprimitive, rt)
def test_int64(self) -> None:
assert is_subtype(int64_rprimitive, int64_rprimitive)
assert is_subtype(int64_rprimitive, int_rprimitive)
assert (not is_subtype(int64_rprimitive, short_int_rprimitive))
assert (not is_subtype(int64_rprimitive, int32_rprimitive))
assert (not is_subtype(int64_rprimitive, int16_rprimitive))
def test_int32(self) -> None:
assert is_subtype(int32_rprimitive, int32_rprimitive)
assert is_subtype(int32_rprimitive, int_rprimitive)
assert (not is_subtype(int32_rprimitive, short_int_rprimitive))
assert (not is_subtype(int32_rprimitive, int64_rprimitive))
assert (not is_subtype(int32_rprimitive, int16_rprimitive))
def test_int16(self) -> None:
assert is_subtype(int16_rprimitive, int16_rprimitive)
assert is_subtype(int16_rprimitive, int_rprimitive)
assert (not is_subtype(int16_rprimitive, short_int_rprimitive))
assert (not is_subtype(int16_rprimitive, int64_rprimitive))
assert (not is_subtype(int16_rprimitive, int32_rprimitive)) |
class TestVSCF(QiskitChemistryTestCase):
def test_bitstring(self):
bitstr = vscf_bitstring([2, 2])
self.assertTrue(all((bitstr[::(- 1)] == np.array([True, False, True, False]))))
def test_qubits_4(self):
basis = [2, 2]
vscf = VSCF(basis)
ref = QuantumCircuit(4)
ref.x([0, 2])
self.assertEqual(ref, vscf)
def test_qubits_5(self):
basis = [2, 3]
vscf = VSCF(basis)
ref = QuantumCircuit(5)
ref.x([0, 2])
self.assertEqual(ref, vscf) |
.parametrize('testfile', ['bsrn-pay0616.dat.gz', 'bsrn-lr0100-pay0616.dat'])
def test_read_bsrn(testfile, expected_index):
(data, metadata) = read_bsrn((DATA_DIR / testfile))
assert_index_equal(expected_index, data.index)
assert ('ghi' in data.columns)
assert ('dni_std' in data.columns)
assert ('dhi_min' in data.columns)
assert ('lwd_max' in data.columns)
assert ('relative_humidity' in data.columns) |
class TestLogFilter():
def _make_record(self, logger, name, level=logging.DEBUG):
return logger.makeRecord(name, level=level, fn=None, lno=0, msg='', args=None, exc_info=None)
.parametrize('filters, negated, category, logged', [(set(), False, 'eggs.bacon.spam', True), (set(), False, 'eggs', True), (set(), True, 'ham', True), ({'eggs', 'bacon'}, False, 'eggs', True), ({'eggs', 'bacon'}, False, 'bacon', True), ({'eggs'}, False, 'eggs.fried', True), ({'eggs', 'bacon'}, False, 'spam', False), ({'eggs'}, False, 'eggsauce', False), ({'fried'}, False, 'eggs.fried', False), ({'eggs', 'bacon'}, True, 'eggs', False), ({'eggs', 'bacon'}, True, 'bacon', False), ({'eggs', 'bacon'}, True, 'spam', True), ({'eggs'}, True, 'eggsauce', True)])
def test_logfilter(self, logger, filters, negated, category, logged):
logfilter = log.LogFilter(filters, negated=negated)
record = self._make_record(logger, category)
assert (logfilter.filter(record) == logged)
def test_logfilter_benchmark(self, logger, benchmark):
record = self._make_record(logger, 'unfiltered')
filters = set(log.LOGGER_NAMES)
logfilter = log.LogFilter(filters, negated=False)
benchmark((lambda : logfilter.filter(record)))
.parametrize('only_debug', [True, False])
def test_debug(self, logger, only_debug):
logfilter = log.LogFilter({'eggs'}, only_debug=only_debug)
record = self._make_record(logger, 'bacon', level=logging.INFO)
assert (logfilter.filter(record) == only_debug)
.parametrize('category, filter_str, logged_before, logged_after', [('init', 'url,js', True, False), ('url', 'url,js', False, True), ('js', 'url,js', False, True), ('js', 'none', False, True)])
def test_debug_log_filter_cmd(self, monkeypatch, logger, category, filter_str, logged_before, logged_after):
logfilter = log.LogFilter({'init'})
monkeypatch.setattr(log, 'console_filter', logfilter)
record = self._make_record(logger, category)
assert (logfilter.filter(record) == logged_before)
utilcmds.debug_log_filter('url,js')
assert (logfilter.filter(record) == logged_after)
def test_debug_log_filter_cmd_invalid(self, monkeypatch):
logfilter = log.LogFilter(set())
monkeypatch.setattr(log, 'console_filter', logfilter)
with pytest.raises(cmdutils.CommandError, match='Invalid log category blabla'):
utilcmds.debug_log_filter('blabla')
.parametrize('filter_str, expected_names, negated', [('!js,misc', {'js', 'misc'}, True), ('js,misc', {'js', 'misc'}, False), ('js, misc', {'js', 'misc'}, False), ('JS, Misc', {'js', 'misc'}, False), (None, set(), False), ('none', set(), False)])
def test_parsing(self, filter_str, expected_names, negated):
logfilter = log.LogFilter.parse(filter_str)
assert (logfilter.names == expected_names)
assert (logfilter.negated == negated)
.parametrize('filter_str, invalid', [('js,!misc', '!misc'), ('blabla,js,blablub', 'blabla, blablub')])
def test_parsing_invalid(self, filter_str, invalid):
with pytest.raises(log.InvalidLogFilterError, match='Invalid log category {} - valid categories: statusbar, .*'.format(invalid)):
log.LogFilter.parse(filter_str) |
def get_mult_function(mult_table, n_dims):
non_zero_indices = mult_table.nonzero()
k_list = non_zero_indices[0]
l_list = non_zero_indices[1]
m_list = non_zero_indices[2]
mult_table_vals = np.array([mult_table[(k, l, m)] for (k, l, m) in np.transpose(non_zero_indices)], dtype=int)
def mv_mult(value, other_value):
output = np.zeros(n_dims)
for (ind, k) in enumerate(k_list):
l = l_list[ind]
m = m_list[ind]
output[l] += ((value[k] * mult_table_vals[ind]) * other_value[m])
return output
return mv_mult |
.parametrize('from_json', [True, False])
def test_from_json(from_json):
json_path = os.path.join(os.path.dirname(__file__), 'models', 'demand_saving2_with_variables.json')
if from_json:
json_dict = pywr_json_to_d3_json(json_path, attributes=True)
else:
model = load_model('demand_saving2_with_variables.json')
json_dict = pywr_model_to_d3_json(model, attributes=True)
assert ('nodes' in json_dict.keys())
assert ('links' in json_dict.keys())
node_names = ['Inflow', 'Reservoir', 'Demand', 'Spill']
for node in json_dict['nodes']:
assert (node['name'] in node_names)
if (node['name'] == 'Reservoir'):
assert_array_equal(node['position'], [1, 1])
demand = get_node(json_dict['nodes'], 'Demand')
demand_max_flow = get_node_attribute(demand, 'max_flow')
assert (demand_max_flow['value'] == 'demand_max_flow - AggregatedParameter') |
def test_do_deterministic():
rng = np.random.default_rng(seed=435)
with pm.Model() as m:
x = pm.Normal('x', 0, 0.001)
y = pm.Deterministic('y', (x + 105))
z = pm.Normal('z', y, 0.001)
do_m = do(m, {'z': (x - 105)})
assert (pm.draw(do_m['z'], random_seed=rng) < 100) |
()
def run(config: EvalConfig):
print('Loading CLIP model...')
device = torch.device(('cuda' if (torch.cuda.is_available() and (torch.cuda.device_count() > 0)) else 'cpu'))
(model, preprocess) = clip.load('ViT-B/16', device)
model.eval()
print('Done.')
prompts = [p.name for p in config.output_path.glob('*') if p.is_dir()]
print(f'Running on {len(prompts)} prompts...')
results_per_prompt = {}
for prompt in tqdm(prompts):
print(f'Running on: "{prompt}"')
image_paths = [p for p in (config.output_path / prompt).rglob('*') if (p.suffix in ['.png', '.jpg'])]
images = [Image.open(p) for p in image_paths]
image_names = [p.name for p in image_paths]
queries = [preprocess(image).unsqueeze(0).to(device) for image in images]
with torch.no_grad():
if (' and ' in prompt):
prompt_parts = prompt.split(' and ')
elif (' with ' in prompt):
prompt_parts = prompt.split(' with ')
else:
print(f"Unable to split prompt: {prompt}. Looking for 'and' or 'with' for splitting! Skipping!")
continue
full_text_features = get_embedding_for_prompt(model, prompt, templates=imagenet_templates)
first_half_features = get_embedding_for_prompt(model, prompt_parts[0], templates=imagenet_templates)
second_half_features = get_embedding_for_prompt(model, prompt_parts[1], templates=imagenet_templates)
images_features = [model.encode_image(image) for image in queries]
images_features = [(feats / feats.norm(dim=(- 1), keepdim=True)) for feats in images_features]
full_text_similarities = [(feat.float() full_text_features.T).item() for feat in images_features]
first_half_similarities = [(feat.float() first_half_features.T).item() for feat in images_features]
second_half_similarities = [(feat.float() second_half_features.T).item() for feat in images_features]
results_per_prompt[prompt] = {'full_text': full_text_similarities, 'first_half': first_half_similarities, 'second_half': second_half_similarities, 'image_names': image_names}
aggregated_results = {'full_text_aggregation': aggregate_by_full_text(results_per_prompt), 'min_first_second_aggregation': aggregate_by_min_half(results_per_prompt)}
with open((config.metrics_save_path / 'clip_raw_metrics.json'), 'w') as f:
json.dump(results_per_prompt, f, sort_keys=True, indent=4)
with open((config.metrics_save_path / 'clip_aggregated_metrics.json'), 'w') as f:
json.dump(aggregated_results, f, sort_keys=True, indent=4) |
def node_location(caller):
text = '\n The |cLocation|n of this object in the world. If not given, the object will spawn in the\n inventory of |c{caller}|n by default.\n\n {current}\n '.format(caller=caller.key, current=_get_current_value(caller, 'location'))
helptext = '\n You get the most control by not specifying the location - you can then teleport the spawned\n objects as needed later. Setting the location may be useful for quickly populating a given\n location. One could also consider randomizing the location using a $protfunc.\n\n |c$protfuncs|n\n {pfuncs}\n '.format(pfuncs=_format_protfuncs())
text = (text, helptext)
options = _wizard_options('location', 'permissions', 'home', search=True)
options.append({'key': '_default', 'goto': (_set_property, dict(prop='location', processor=(lambda s: s.strip())))})
return (text, options) |
class PreviousStateRecorder():
def __init__(self):
self.states = {}
def add_state(self, data_item, slot_values):
dialog_ID = data_item['dialogue_ID']
turn_id = data_item['turn_id']
if (dialog_ID not in self.states):
self.states[dialog_ID] = {}
self.states[dialog_ID][turn_id] = slot_values
def state_retrieval(self, data_item):
dialog_ID = data_item['dialogue_ID']
turn_id = data_item['turn_id']
if (turn_id == 0):
return {}
else:
return self.states[dialog_ID][(turn_id - 1)] |
def rdiff_backup_action(source_local, dest_local, src_dir, dest_dir, generic_opts, action, specific_opts, std_input=None, return_stdout=False, return_stderr=False):
remote_exec = CMD_SEP.join([b'cd %s', b'%s server::%s'])
is_remote = False
if (src_dir and (not source_local)):
src_dir = (remote_exec % (abs_remote1_dir, RBBin, src_dir))
is_remote = True
if (dest_dir and (not dest_local)):
dest_dir = (remote_exec % (abs_remote2_dir, RBBin, dest_dir))
is_remote = True
if is_remote:
generic_opts = (list(generic_opts) + [b'--remote-schema', b'{h}'])
cmdargs = ((([RBBin] + list(generic_opts)) + [action]) + list(specific_opts))
if src_dir:
cmdargs.append(src_dir)
if dest_dir:
cmdargs.append(dest_dir)
print('Executing: ', ' '.join(map(shlex.quote, map(os.fsdecode, cmdargs))))
if (return_stdout or return_stderr):
try:
if return_stderr:
ret_val = subprocess.check_output(cmdargs, input=std_input, stderr=subprocess.STDOUT, universal_newlines=False)
else:
ret_val = subprocess.check_output(cmdargs, input=std_input, universal_newlines=False)
except subprocess.CalledProcessError as exc:
ret_val = exc.output
if (os.name == 'nt'):
ret_val = ret_val.replace(b'\r\n', b'\n')
else:
ret_val = os_system(cmdargs, input=std_input, universal_newlines=False)
return ret_val |
class Environment(Singleton):
def __init__(self, conf_dir=None, data_path=None):
if (not hasattr(self, 'conf_dir')):
if conf_dir:
self.conf_dir = conf_dir
else:
self.conf_dir = get_platform().get_default_conf_dir()
if (not hasattr(self, 'data_path')):
if data_path:
self.data_path = data_path
else:
self.data_path = get_platform().get_default_data_path()
def conf_file(self):
return os.path.join(self.conf_dir, 'conf.xml')
def log_file(self):
return os.path.join(self.conf_dir, 'log.out')
def temp_dir(self):
return os.path.join(self.conf_dir, 'tmp')
def gpx_dir(self):
return os.path.join(self.conf_dir, 'gpx')
def extension_dir(self):
return os.path.join(self.conf_dir, 'extensions')
def plugin_dir(self):
return os.path.join(self.conf_dir, 'plugins')
def glade_dir(self):
return os.path.join(self.data_path, 'glade')
def clear_temp_dir(self):
if (not os.path.isdir(self.temp_dir)):
return
else:
files = os.listdir(self.temp_dir)
for name in files:
fullname = os.path.join(self.temp_dir, name)
if os.path.isfile(fullname):
os.remove(os.path.join(self.temp_dir, name))
def create_directories(self):
self._create_dir(self.conf_dir)
self._create_dir(self.temp_dir)
self._create_dir(self.extension_dir)
self._create_dir(self.plugin_dir)
self._create_dir(self.gpx_dir)
def _create_dir(self, dir_name):
if (not os.path.isdir(dir_name)):
os.mkdir(dir_name) |
def interpret_opcodes(iterable):
vl = Values()
nt = Notification()
for (kind, data) in iterable:
if (kind == TYPE_TIME):
vl.time = nt.time = data
elif (kind == TYPE_TIME_HR):
vl.time = nt.time = (data / HR_TIME_DIV)
elif (kind == TYPE_INTERVAL):
vl.interval = data
elif (kind == TYPE_INTERVAL_HR):
vl.interval = (data / HR_TIME_DIV)
elif (kind == TYPE_HOST):
vl.host = nt.host = data
elif (kind == TYPE_PLUGIN):
vl.plugin = nt.plugin = data
elif (kind == TYPE_PLUGIN_INSTANCE):
vl.plugininstance = nt.plugininstance = data
elif (kind == TYPE_TYPE):
vl.type = nt.type = data
elif (kind == TYPE_TYPE_INSTANCE):
vl.typeinstance = nt.typeinstance = data
elif (kind == TYPE_SEVERITY):
nt.severity = data
elif (kind == TYPE_MESSAGE):
nt.message = data
(yield deepcopy(nt))
elif (kind == TYPE_VALUES):
vl[:] = data
(yield deepcopy(vl)) |
def test_db_access_with_repr_in_report(django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module("\n import pytest\n\n from .app.models import Item\n\n def test_via_db_blocker(django_db_setup, django_db_blocker):\n with django_db_blocker.unblock():\n Item.objects.get(name='This one is not there')\n\n def test_via_db_fixture(db):\n Item.objects.get(name='This one is not there')\n ")
result = django_pytester.runpytest_subprocess('--tb=auto')
result.stdout.fnmatch_lines(['tpkg/test_the_test.py FF', 'E *DoesNotExist: Item matching query does not exist.', 'tpkg/test_the_test.py:8: ', 'self = *RuntimeError*Database access not allowed*', 'E *DoesNotExist: Item matching query does not exist.', '* 2 failed*'])
assert ('INTERNALERROR' not in (str(result.stdout) + str(result.stderr)))
assert (result.ret == 1) |
def _safe_attr(attr, camel_killer=False, replacement_char='x'):
allowed = ((string.ascii_letters + string.digits) + '_')
attr = _safe_key(attr)
if camel_killer:
attr = _camel_killer(attr)
attr = attr.replace(' ', '_')
out = ''
for character in attr:
out += (character if (character in allowed) else '_')
out = out.strip('_')
try:
int(out[0])
except (ValueError, IndexError):
pass
else:
out = '{0}{1}'.format(replacement_char, out)
if (out in kwlist):
out = '{0}{1}'.format(replacement_char, out)
return re.sub('_+', '_', out) |
class Effect3036(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Missile Launcher Bomb')), 'moduleReactivationDelay', (skill.getModifiedItemAttr('reactivationDelayBonus') * skill.level), **kwargs) |
def might_extract_gz(path):
path = Path(path)
file_output_name = '.'.join(path.name.split('.')[:(- 1)])
file_name = path.name
if (not (path.parent / file_output_name).exists()):
logging.info('Extracting %s ...\n', file_name)
with gzip.open(str(path), 'rb') as f_in:
with open(str((path.parent / file_output_name)), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out) |
def get_all_preds_for_execution(gold: str, pred: str) -> Tuple[(int, Iterator[str])]:
(_, gold_values) = extract_query_values(gold)
(pred_query_value_replaced, _) = extract_query_values(pred)
num_slots = len([v for v in pred_query_value_replaced if (v == VALUE_NUM_SYMBOL.lower())])
num_alternatives = (len(gold_values) ** num_slots)
return (num_alternatives, plugin_all_permutations(pred_query_value_replaced, gold_values)) |
def test_pickups_to_solve_list_multiple(echoes_game_description, echoes_pickup_database, echoes_game_patches):
db = echoes_game_description.resource_database
missile_expansion = pickup_creator.create_ammo_pickup(echoes_pickup_database.ammo_pickups['Missile Expansion'], [5], False, db)
pool = ([missile_expansion] * 5)
requirement = RequirementList([ResourceRequirement.create(db.get_item('Missile'), 10, False)])
resources = ResourceCollection.with_database(db)
resources.set_resource(db.get_item('MissileLauncher'), 1)
resources.set_resource(db.get_item('Missile'), 5)
state = State(resources, (), 99, MagicMock(), echoes_game_patches, None, StateGameData(db, echoes_game_description.region_list, 100, 99))
result = pickup_list.pickups_to_solve_list(pool, requirement, state)
assert (result == [missile_expansion]) |
_loss('weighted_cross_entropy')
def weighted_cross_entropy(pred, true):
if (cfg.model.loss_fun == 'weighted_cross_entropy'):
V = true.size(0)
n_classes = (pred.shape[1] if (pred.ndim > 1) else 2)
label_count = torch.bincount(true)
label_count = label_count[label_count.nonzero(as_tuple=True)].squeeze()
cluster_sizes = torch.zeros(n_classes, device=pred.device).long()
cluster_sizes[torch.unique(true)] = label_count
weight = ((V - cluster_sizes).float() / V)
weight *= (cluster_sizes > 0).float()
if (pred.ndim > 1):
pred = F.log_softmax(pred, dim=(- 1))
return (F.nll_loss(pred, true, weight=weight), pred)
else:
loss = F.binary_cross_entropy_with_logits(pred, true.float(), weight=weight[true])
return (loss, torch.sigmoid(pred)) |
class MovingAverage():
def __init__(self, ema, oneminusema_correction=True):
self.ema = ema
self.ema_data = {}
self._updates = 0
self._oneminusema_correction = oneminusema_correction
def update(self, dict_data):
ema_dict_data = {}
for (name, data) in dict_data.items():
data = data.view(1, (- 1))
if (self._updates == 0):
previous_data = torch.zeros_like(data)
else:
previous_data = self.ema_data[name]
ema_data = ((self.ema * previous_data) + ((1 - self.ema) * data))
if self._oneminusema_correction:
ema_dict_data[name] = (ema_data / (1 - self.ema))
else:
ema_dict_data[name] = ema_data
self.ema_data[name] = ema_data.clone().detach()
self._updates += 1
return ema_dict_data |
def search_molecules_in_crystal(struc, tol=0.2, once=False, ignore_HH=True):
def check_one_layer(struc, sites0, visited):
new_members = []
for site0 in sites0:
(sites_add, visited) = check_one_site(struc, site0, visited)
new_members.extend(sites_add)
return (new_members, visited)
def check_one_site(struc, site0, visited, rmax=2.8):
neigh_sites = struc.get_neighbors(site0, rmax)
ids = [m.index for m in visited]
sites_add = []
ids_add = []
pbc = isinstance(struc, Structure)
for site1 in neigh_sites:
if (site1.index not in (ids + ids_add)):
try:
if CovalentBond.is_bonded(site0, site1, tol):
if pbc:
(d, image) = site0.distance_and_image(site1)
else:
d = site0.distance(site1)
(val1, val2) = (site1.specie.value, site0.specie.value)
key = '{:s}-{:s}'.format(val1, val2)
if (key == 'H-H'):
if (not ignore_HH):
if pbc:
site1.frac_coords += image
sites_add.append(site1)
ids_add.append(site1.index)
elif (d < bonds[key]):
if pbc:
site1.frac_coords += image
sites_add.append(site1)
ids_add.append(site1.index)
except ValueError:
if pbc:
(d, image) = site0.distance_and_image(site1)
else:
d = site0.distance(site1)
(val1, val2) = (site1.specie.value, site0.specie.value)
key = '{:s}-{:s}'.format(val1, val2)
if (d < bonds[key]):
if pbc:
site1.frac_coords += image
sites_add.append(site1)
ids_add.append(site1.index)
if (len(sites_add) > 0):
visited.extend(sites_add)
return (sites_add, visited)
molecules = []
visited_ids = []
for (id, site) in enumerate(struc.sites):
if (id not in visited_ids):
first_site = site
visited = [first_site]
first_site.index = id
(n_iter, max_iter) = (0, (len(struc) - len(visited_ids)))
while (n_iter < max_iter):
if (n_iter == 0):
(new_sites, visited) = check_one_site(struc, first_site, visited)
else:
(new_sites, visited) = check_one_layer(struc, new_sites, visited)
n_iter += 1
if (len(new_sites) == 0):
break
coords = [s.coords for s in visited]
coords = np.array(coords)
numbers = [s.specie.number for s in visited]
molecules.append(Molecule(numbers, coords))
visited_ids.extend([s.index for s in visited])
if (once and (len(molecules) == 1)):
break
return molecules |
def load_img_info(files):
assert isinstance(files, tuple)
(img_file, gt_file) = files
assert (int(osp.basename(gt_file)[3:(- 4)]) == int(osp.basename(img_file)[2:(- 4)]))
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(file_name=osp.join(osp.basename(img_file)), height=img.shape[0], width=img.shape[1], segm_file=osp.join(osp.basename(gt_file)))
if (osp.splitext(gt_file)[1] == '.txt'):
img_info = load_txt_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info |
class DummyLM(LM):
def __init__(self):
pass
def create_from_arg_string(cls, arg_string, additional_config=None):
return cls()
def loglikelihood(self, requests):
res = []
for _ in requests:
res.append(((- random.random()), False))
return res
def greedy_until(self, requests):
res = []
for (ctx, _) in requests:
res.append('lol')
assert (ctx.strip() != '')
return res
def loglikelihood_rolling(self, requests):
res = []
for _ in requests:
res.append((- random.random()))
return res |
def make_exe(filename):
original_mode = filename.stat().st_mode
levels = [S_IXUSR, S_IXGRP, S_IXOTH]
for at in range(len(levels), 0, (- 1)):
try:
mode = original_mode
for level in levels[:at]:
mode |= level
filename.chmod(mode)
break
except OSError:
continue |
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
if (not is_tf_available()):
raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as tf
import tf2onnx
from tf2onnx import __version__ as t2ov
print(f'Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}')
(input_names, output_names, dynamic_axes, tokens) = infer_shapes(nlp, 'tf')
nlp.model.predict(tokens.data)
input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for (key, tensor) in tokens.items()]
(model_proto, _) = tf2onnx.convert.from_keras(nlp.model, input_signature, opset=opset, output_path=output.as_posix())
except ImportError as e:
raise Exception(f'Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}') |
_fixtures(WebFixture, CSRFFixture)
def test_submit_form_with_expired_csrf_token(web_fixture, csrf_fixture):
fixture = csrf_fixture
wsgi_app = web_fixture.new_wsgi_app(child_factory=fixture.MyForm.factory(), enable_js=True)
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
select_widget_path = XPath.select_labelled('choice')
assert (browser.get_value(select_widget_path) == '1')
browser.select(select_widget_path, '2')
wsgi_app.config.web.csrf_timeout_seconds = 0.5
time.sleep((wsgi_app.config.web.csrf_timeout_seconds + 0.1))
browser.click(XPath.button_labelled('Submit'))
wsgi_app.config.web.csrf_timeout_seconds = 300
error_message = XPath.paragraph().including_text('This page has expired. For security reasons, please review your input and retry.')
assert browser.is_element_present(error_message)
browser.click(XPath.button_labelled('Submit'))
assert (not browser.is_element_present(error_message)) |
class EfficientNetMetrics():
def __init__(self, multiclass=True, weight=None, **kwargs):
self.multiclass = multiclass
self.weight = (weight is not None)
self.metrics = {}
self.metrics['loss'] = Average()
self.metrics['accuracy'] = Accuracy(is_multilabel=(not multiclass))
self.metrics['precision'] = Precision(average=False, is_multilabel=(not multiclass))
self.metrics['recall'] = Recall(average=False, is_multilabel=(not multiclass))
F1 = (((self.metrics['precision'] * self.metrics['recall']) * 2) / ((self.metrics['precision'] + self.metrics['recall']) + 1e-20))
self.metrics['f1'] = MetricsLambda((lambda t: torch.mean(t).item()), F1)
if (self.multiclass and self.weight):
F1 = (F1 * (weight / weight.sum()))
self.metrics['weighted-f1'] = MetricsLambda((lambda t: torch.mean(t).item()), F1)
def reset(self):
for key in self.metrics:
self.metrics[key].reset()
def update(self, loss, y_pred, y_true):
self.metrics['loss'].update(loss[0].item())
if (not self.multiclass):
y_pred = torch.sigmoid(y_pred)
y_pred = torch.round(y_pred)
y_pred = y_pred.cpu().float()
y_true = y_true.cpu().float()
for key in self.metrics:
if (key == 'loss'):
continue
self.metrics[key].update((y_pred, y_true))
def compute(self):
result = {}
result['loss'] = self.metrics['loss'].compute().item()
result['accuracy'] = self.metrics['accuracy'].compute()
result['precision'] = self.metrics['precision'].compute().mean().item()
result['recall'] = self.metrics['recall'].compute().mean().item()
result['f1'] = self.metrics['f1'].compute()
if ('weighted-f1' in self.metrics):
result['weighted-f1'] = self.metrics['weighted-f1'].compute()
return result
def log_tensorboard(self, writer, step, results=None, loss=None, train=True):
results = (self.compute() if (results is None) else results)
mode_str = ('train' if train else 'val')
writer.add_scalar(('Loss/' + mode_str), (results['loss'] if (loss is None) else loss[0].item()), step)
writer.add_scalar(('Accuracy/' + mode_str), results['accuracy'], step)
writer.add_scalar(('Precision/' + mode_str), results['precision'], step)
writer.add_scalar(('Recall/' + mode_str), results['recall'], step)
writer.add_scalar(('F1/' + mode_str), results['f1'], step)
if ('weighted-f1' in results):
writer.add_scalar(('Weighted-F1/' + mode_str), results['weighted-f1'], step)
return results |
def operate_vocab(vocab_root_path, vocab_a_name, vocab_b_name, operator):
assert (operator in ['intersect', 'sub'])
vocab_a = load_vocab((vocab_root_path / vocab_a_name))
vocab_b = load_vocab((vocab_root_path / vocab_b_name))
vocab_a_set = set(vocab_a)
vocab_b_set = set(vocab_b)
print(f'''{vocab_a_name}: {len(vocab_a)}
{vocab_b_name}: {len(vocab_b)}''')
print(f'''{vocab_a_name} set: {len(vocab_a_set)}
{vocab_b_name} set: {len(vocab_b_set)}''')
if (operator == 'intersect'):
result = (vocab_a_set & vocab_b_set)
elif (operator == 'sub'):
result = (vocab_a_set - vocab_b_set)
print(f'{operator} of {vocab_a_name} {vocab_b_name} size: {len(result)}')
with (vocab_root_path / f"{operator}_{vocab_a_name.split('_')[0]}_{vocab_b_name.split('_')[0]}_{vocab_a_name.split('_')[(- 1)]}").open('w', encoding='utf-8') as w_f:
for word in tqdm(result):
w_f.write((word + '\n')) |
class DatoidCz(SimpleDownloader):
__name__ = 'DatoidCz'
__type__ = 'downloader'
__version__ = '0.02'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Datoid.cz downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
NAME_PATTERN = 'Nazev souboru: (?P<N>.+)'
SIZE_PATTERN = 'Velikost: (?P<S>[\\d.,]+) (?P<U>[\\w^_]+)'
OFFLINE_PATTERN = 'Tento soubor neexistuje'
URL_REPLACEMENTS = [('datoid.sk', 'datoid.cz'), ('datoid.pl', 'datoid.cz')]
def handle_free(self, pyfile):
url = self.req.last_effective_url
urlp = urllib.parse.urlparse(url)
json_data = json.loads(self.load(urllib.parse.urljoin(url, (('/f/' + urlp.path) + str(int((time.time() * 1000)))))))
self.log_debug(json_data)
if ('error' in json_data):
self.fail(json_data['error'])
self.link = json_data['redirect']
def handle_premium(self, pyfile):
url = self.req.last_effective_url
urlp = urllib.parse.urlparse(url)
self.link = urllib.parse.urljoin(url, (('/f/' + urlp.path) + str(int((time.time() * 1000))))) |
.parametrize('game', [RandovaniaGame.METROID_PRIME, RandovaniaGame.METROID_PRIME_ECHOES, RandovaniaGame.METROID_PRIME_CORRUPTION])
def test_on_preset_changed(skip_qtbot, preset_manager, game):
base = preset_manager.default_preset_for_game(game).get_preset()
preset = dataclasses.replace(base, uuid=uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6'))
options = MagicMock()
editor = PresetEditor(preset, options)
window = PresetStartingArea(editor, default_database.game_description_for(preset.game), MagicMock())
window.on_preset_changed(editor.create_custom_preset_with())
num_areas = len(StartingLocationList.nodes_list(preset.game))
assert (len(window._starting_location_for_area) == num_areas) |
class CosineDecayScheduler(object):
def __init__(self, base_lr=1.0, last_iter=0, T_max=50):
self.base_lr = base_lr
self.last_iter = last_iter
self.T_max = T_max
self.cnt = 0
def decay_rate(self, step):
self.last_iter = step
decay_rate = (((self.base_lr * (1 + math.cos(((math.pi * self.last_iter) / self.T_max)))) / 2.0) if (self.last_iter <= self.T_max) else 0)
return decay_rate |
class uvm_tlm_fifo(uvm_tlm_fifo_base):
def __init__(self, name=None, parent=None, size=1):
super().__init__(name, parent, size)
def size(self):
return self.queue.maxsize
def used(self):
return self.queue.qsize()
def is_empty(self):
return self.queue.empty()
def is_full(self):
return self.queue.full()
def flush(self):
self.queue._queue.clear() |
class SlopePupil(Pupil):
_type = 'slope'
slope = None
def __init__(self, slope, **kwargs):
super().__init__(**kwargs)
self.slope = slope
def dict(self):
dat = super().dict()
dat['slope'] = float(self.slope)
return dat
def text(self):
(yield from super().text())
(yield ('Slope: %g' % self.slope))
def radius(self):
return (self.slope * self.distance)
def radius(self, r):
self.slope = (r / self.distance) |
class Validator(Feature):
pickle_rm_attr = ['validate', 'consistent']
def on_attach(self, fgraph):
for attr in ('validate', 'validate_time'):
if hasattr(fgraph, attr):
raise AlreadyThere('Validator feature is already present or in conflict with another plugin.')
fgraph.validate = partial(self.validate_, fgraph)
fgraph.consistent = partial(self.consistent_, fgraph)
def unpickle(self, fgraph):
fgraph.validate = partial(self.validate_, fgraph)
fgraph.consistent = partial(self.consistent_, fgraph)
def on_detach(self, fgraph):
del fgraph.validate
del fgraph.consistent
def validate_(self, fgraph):
t0 = time.perf_counter()
try:
ret = fgraph.execute_callbacks('validate')
except Exception as e:
cf = inspect.currentframe()
uf = cf.f_back
uf_info = inspect.getframeinfo(uf)
if (uf_info.function == 'replace_all_validate'):
raise
else:
verbose = uf.f_locals.get('verbose', False)
if verbose:
r = uf.f_locals.get('r', '')
reason = uf_info.function
print(f'''validate failed on node {r}.
Reason: {reason}, {e}''')
raise
t1 = time.perf_counter()
if fgraph.profile:
fgraph.profile.validate_time += (t1 - t0)
return ret
def consistent_(self, fgraph):
try:
fgraph.validate()
return True
except Exception:
return False |
class StackedResidualBlocks(nn.Module):
def __init__(self, n_blocks: int, conv_op: Type[_ConvNd], input_channels: int, output_channels: Union[(int, List[int], Tuple[(int, ...)])], kernel_size: Union[(int, List[int], Tuple[(int, ...)])], initial_stride: Union[(int, List[int], Tuple[(int, ...)])], conv_bias: bool=False, norm_op: Union[(None, Type[nn.Module])]=None, norm_op_kwargs: dict=None, dropout_op: Union[(None, Type[_DropoutNd])]=None, dropout_op_kwargs: dict=None, nonlin: Union[(None, Type[torch.nn.Module])]=None, nonlin_kwargs: dict=None, block: Union[(Type[BasicBlockD], Type[BottleneckD])]=BasicBlockD, bottleneck_channels: Union[(int, List[int], Tuple[(int, ...)])]=None, stochastic_depth_p: float=0.0, squeeze_excitation: bool=False, squeeze_excitation_reduction_ratio: float=(1.0 / 16)):
super().__init__()
assert (n_blocks > 0), 'n_blocks must be > 0'
assert (block in [BasicBlockD, BottleneckD]), 'block must be BasicBlockD or BottleneckD'
if (not isinstance(output_channels, (tuple, list))):
output_channels = ([output_channels] * n_blocks)
if (not isinstance(bottleneck_channels, (tuple, list))):
bottleneck_channels = ([bottleneck_channels] * n_blocks)
if (block == BasicBlockD):
blocks = nn.Sequential(block(conv_op, input_channels, output_channels[0], kernel_size, initial_stride, conv_bias, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, stochastic_depth_p, squeeze_excitation, squeeze_excitation_reduction_ratio), *[block(conv_op, output_channels[(n - 1)], output_channels[n], kernel_size, 1, conv_bias, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, stochastic_depth_p, squeeze_excitation, squeeze_excitation_reduction_ratio) for n in range(1, n_blocks)])
else:
blocks = nn.Sequential(block(conv_op, input_channels, bottleneck_channels[0], output_channels[0], kernel_size, initial_stride, conv_bias, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, stochastic_depth_p, squeeze_excitation, squeeze_excitation_reduction_ratio), *[block(conv_op, output_channels[(n - 1)], bottleneck_channels[n], output_channels[n], kernel_size, 1, conv_bias, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, stochastic_depth_p, squeeze_excitation, squeeze_excitation_reduction_ratio) for n in range(1, n_blocks)])
self.blocks = blocks
self.initial_stride = maybe_convert_scalar_to_list(conv_op, initial_stride)
self.output_channels = output_channels[(- 1)]
def forward(self, x):
return self.blocks(x)
def compute_conv_feature_map_size(self, input_size):
assert (len(input_size) == len(self.initial_stride)), 'just give the image size without color/feature channels or batch channel. Do not give input_size=(b, c, x, y(, z)). Give input_size=(x, y(, z))!'
output = self.blocks[0].compute_conv_feature_map_size(input_size)
size_after_stride = [(i // j) for (i, j) in zip(input_size, self.initial_stride)]
for b in self.blocks[1:]:
output += b.compute_conv_feature_map_size(size_after_stride)
return output |
def display_constraints(ctr_entities, separator=''):
for ce in ctr_entities:
if (ce is not None):
if hasattr(ce, 'entities'):
print((separator + str(ce)))
display_constraints(ce.entities, (separator + '\t'))
else:
print((separator + str(ce.constraint))) |
def test_saving_the_same_answer_does_not_trigger_event(submission_factory, graphql_client, user, schedule_item_factory, slot_factory, day_factory, mocker):
mock_event = mocker.patch('api.schedule.mutations.send_new_schedule_invitation_answer')
graphql_client.force_login(user)
submission = submission_factory(speaker_id=user.id)
schedule_item = schedule_item_factory(status=ScheduleItem.STATUS.confirmed, speaker_invitation_notes='notes', submission=submission, type=ScheduleItem.TYPES.submission, conference=submission.conference, slot=slot_factory(day=day_factory(conference=submission.conference), hour='10:00', duration=30))
response = graphql_client.query('mutation($input: UpdateScheduleInvitationInput!) {\n updateScheduleInvitation(input: $input) {\n __typename\n ... on ScheduleInvitation {\n option\n notes\n }\n }\n }', variables={'input': {'submissionId': submission.hashid, 'option': 'CONFIRM', 'notes': 'notes'}})
assert (not response.get('errors'))
assert (response['data']['updateScheduleInvitation'] == {'__typename': 'ScheduleInvitation', 'option': 'CONFIRM', 'notes': 'notes'})
schedule_item.refresh_from_db()
assert (schedule_item.status == ScheduleItem.STATUS.confirmed)
assert (schedule_item.speaker_invitation_notes == 'notes')
mock_event.assert_not_called() |
def resolve_variants(node, rng: np.random.RandomState, variant_config, output_config):
if isinstance(node, dict):
if ('_variants' in node):
var = node['_variants']
global_id = var['global_id']
if (var['type'] == 'options'):
option_dict = var['options']
keys = list(option_dict.keys())
if (global_id in variant_config):
idx = keys.index(variant_config[global_id])
else:
idx = rng.choice(len(keys))
key = keys[idx]
resolved_value = option_dict[key]
output_config[global_id] = {'type': 'options', 'key': key, 'index': idx}
else:
raise NotImplementedError
if isinstance(resolved_value, dict):
node.update(resolved_value)
del node['_variants']
else:
assert (len(node) == 1), 'parent node of scalar variant should not have other keys'
return resolved_value
for key in node:
node[key] = resolve_variants(node[key], rng, variant_config, output_config)
return node
if isinstance(node, list):
for i in range(len(node)):
node[i] = resolve_variants(node[i], rng, variant_config, output_config)
return node
return node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.