code stringlengths 281 23.7M |
|---|
def find_constraint_latent(model, x, y, w, relaxed=True):
h = model.latent(x, y, w)
h_hat = model.loss_augmented_inference(x, h, w, relaxed=relaxed)
joint_feature = model.joint_feature
delta_joint_feature = (joint_feature(x, h) - joint_feature(x, h_hat))
loss = model.loss(y, h_hat)
slack = max((loss - np.dot(w, delta_joint_feature)), 0)
return (h_hat, delta_joint_feature, slack, loss) |
class TestUVCCVSCF(QiskitChemistryTestCase):
def setUp(self):
super().setUp()
aqua_globals.random_seed = 8
self.reference_energy = 592.
def test_uvcc_vscf(self):
co2_2modes_2modals_2body = [[[[[0, 0, 0]], 320.], [[[0, 1, 1]], 1760.], [[[1, 0, 0]], 342.], [[[1, 1, 1]], 1032.]], [[[[0, 0, 0], [1, 0, 0]], (- 57.)], [[[0, 0, 1], [1, 0, 0]], (- 56.)], [[[0, 1, 0], [1, 0, 0]], (- 56.)], [[[0, 1, 1], [1, 0, 0]], (- 60.)], [[[0, 0, 0], [1, 0, 1]], (- 65.)], [[[0, 0, 1], [1, 0, 1]], (- 62.)], [[[0, 1, 0], [1, 0, 1]], (- 62.)], [[[0, 1, 1], [1, 0, 1]], (- 121.)], [[[0, 0, 0], [1, 1, 0]], (- 65.)], [[[0, 0, 1], [1, 1, 0]], (- 62.)], [[[0, 1, 0], [1, 1, 0]], (- 62.)], [[[0, 1, 1], [1, 1, 0]], (- 121.)], [[[0, 0, 0], [1, 1, 1]], (- 170.)], [[[0, 0, 1], [1, 1, 1]], (- 167.)], [[[0, 1, 0], [1, 1, 1]], (- 167.)], [[[0, 1, 1], [1, 1, 1]], (- 179.)]]]
basis = [2, 2]
bosonic_op = BosonicOperator(co2_2modes_2modals_2body, basis)
qubit_op = bosonic_op.mapping('direct', threshold=1e-05)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
init_state = VSCF(basis)
num_qubits = sum(basis)
uvcc_varform = UVCC(num_qubits, basis, [0, 1], initial_state=init_state)
q_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_transpiler=90, seed_simulator=12)
optimizer = COBYLA(maxiter=1000)
algo = VQE(qubit_op, uvcc_varform, optimizer)
vqe_result = algo.run(q_instance)
energy = vqe_result['optimal_value']
self.assertAlmostEqual(energy, self.reference_energy, places=4) |
class _CookieCacheManager():
_Cookie_cache = None
def get_cookie_cache(cls):
if (cls._Cookie_cache is None):
with _cache_init_lock:
cls._initialise()
return cls._Cookie_cache
def _initialise(cls, cache_dir=None):
cls._Cookie_cache = _CookieCache() |
def howAboutNow():
global delayTime, RUNTIME, READYTIME
ticks = int(time.time())
if (ticks >= RUNTIME):
return 'T_RUN'
elif (ticks >= READYTIME):
delayTime = (0.2 if ((RUNTIME - ticks) < 15) else 8)
return 'T_READY'
else:
howlong = (RUNTIME - ticks)
if (howlong < 7200):
delayTime = (0.2 if (howlong < 15) else (10 if (howlong < 300) else (200 if (howlong < 3600) else 2400)))
else:
delayTime = (7200 if (howlong > 10800) else 4800)
print((u'[I]: %s , %s ' % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ticks)), str(((READYTIME - ticks) / 60)))))
return '' |
def dice(gt, pred, label=None):
(gt, pred) = to_numpy(gt, pred)
if (label is None):
(gt, pred) = (gt.astype(np.bool), pred.astype(np.bool))
else:
(gt, pred) = ((gt == label), (pred == label))
intersection = np.logical_and(gt, pred)
return ((2.0 * intersection.sum()) / (gt.sum() + pred.sum())) |
class UniterEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.max_seq_length = args.max_seq_length
set_visual_config(args)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=True)
self.model = UFE.from_pretrained('bert-base-cased')
if args.from_scratch:
print('initializing all the weights')
self.model.apply(self.model.init_bert_weights)
def dim(self):
return 768
def forward(self, sents, feats, boxes, visual_attention_mask=None):
train_features = convert_sents_to_features(sents, self.max_seq_length, self.tokenizer)
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
assert (feats.shape[1] == 36), 'Not Using 36 ROIs, please change the following 2 lines'
visual_segment_ids = torch.ones(input_ids.shape[0], feats.shape[1], dtype=torch.long).cuda()
v_mask = torch.ones(input_mask.shape[0], feats.shape[1], dtype=torch.long).cuda()
output = self.model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, visual_feats=feats, visual_token_type_ids=visual_segment_ids, visual_attention_mask=v_mask, img_pos_feat=boxes)
return output
def load(self, path):
state_dict = torch.load(path)
print(('Load UNITER PreTrained Model from %s' % path))
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print('Weights in loaded but not in model:')
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print('Weights in model but not in loaded:')
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
self.model.load_state_dict(state_dict, strict=False) |
class TabToolButton(QtWidgets.QToolButton):
SIZE = (16, 16)
def __init__(self, *args):
QtWidgets.QToolButton.__init__(self, *args)
self.setIconSize(QtCore.QSize(*self.SIZE))
self.setStyleSheet('QToolButton{ border: none; }')
def mousePressEvent(self, event):
event.ignore() |
class StridedBottomUpStack(BottomUpStackInterface):
def __init__(self, strided_block: StridedBlockInterface, layer_num_blocks: List[int], strided_reduction: bool) -> None:
self._strided_block = strided_block
self._layer_num_blocks = layer_num_blocks
self._strided_reduction = strided_reduction
self._name = 'bu'
def convolve(self, bottom_up: Tensor, num_filters: int, layer_index: int) -> Tensor:
for block_index in range(self._layer_num_blocks[layer_index]):
bottom_up = self._strided_block.convolve(bottom_up, num_filters, name=f'{self._name}_layer{layer_index}_block{block_index}')
return bottom_up
def downsample_and_convolve(self, bottom_up: Tensor, num_filters_in: int, num_filters_out: int, layer_index: int) -> Tensor:
num_blocks_in_layer = self._layer_num_blocks[layer_index]
if (self._strided_reduction or (num_blocks_in_layer < 2)):
stride_filters = num_filters_out
else:
stride_filters = num_filters_in
bottom_up = self._strided_block.strided_convolve(bottom_up, stride_filters, name=f'{self._name}_layer{layer_index}_block0')
for block_index in range(1, num_blocks_in_layer):
bottom_up = self._strided_block.convolve(bottom_up, num_filters_out, name=f'{self._name}_layer{layer_index}_block{block_index}')
return bottom_up |
def validate(model, dataloader, criterion):
model.eval()
device = model.device
epoch_start = time.time()
running_loss = 0.0
preds = []
golds = []
with torch.no_grad():
for batch in dataloader:
premises = batch['premise'].to(device)
premises_lengths = batch['premise_length'].to(device)
hypotheses = batch['hypothesis'].to(device)
hypotheses_lengths = batch['hypothesis_length'].to(device)
labels = batch['label'].to(device)
similarity = batch['similarity'].to(device)
logits = model(premises, premises_lengths, hypotheses, hypotheses_lengths, similarity)
loss = criterion(logits.squeeze(1), labels)
running_loss += loss.item()
preds.extend(logits.squeeze(1).data.cpu().numpy())
golds.extend(labels.data.cpu().numpy())
p = pearsonr(preds, golds)
s = spearmanr(preds, golds)
epoch_time = (time.time() - epoch_start)
epoch_loss = (running_loss / len(dataloader))
return (epoch_time, epoch_loss, p[0], s[0]) |
def _load_libr_all(directory: Path):
changed = True
loaded = set()
loaded_dlls = {}
while changed:
changed = False
for p in directory.iterdir():
if (p in loaded):
continue
if (p.is_file() and p.name.endswith('dll')):
try:
loaded_dlls[p.name] = ctypes.cdll.LoadLibrary(str(p))
if (p not in loaded):
changed = True
loaded.add(p)
except OSError:
pass
return loaded_dlls |
def test_cache_metadata(disk_cache):
url = '
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
device = disk_cache.prepare(metadata)
device.write(b'foobar')
disk_cache.insert(device)
assert (disk_cache.metaData(QUrl(url)) == metadata) |
def test_routing_in_direct_channel(happy_path_fixture, our_signer, one_to_n_address):
(addresses, chain_state, _, _, token_network_state) = happy_path_fixture
(address1, _, _, _) = addresses
pfs_proxy = PFSProxy(PFS_CONFIG)
with patch('raiden.routing.get_best_routes_pfs') as pfs_route_request, patch.object(pfs_proxy, 'query_address_metadata') as pfs_user_request:
pfs_route_request.return_value = (None, [], 'feedback_token')
pfs_user_request.return_value = None
(_, routes, _) = get_best_routes(chain_state=chain_state, token_network_address=token_network_state.address, one_to_n_address=one_to_n_address, from_address=our_signer.address, to_address=address1, amount=PaymentAmount(50), previous_address=None, pfs_proxy=pfs_proxy, privkey=PRIVKEY, our_address_metadata=make_address_metadata(our_signer))
assert (routes[0].hop_after(our_signer.address) == address1)
assert (not pfs_route_request.called)
assert pfs_user_request.called
with patch('raiden.routing.get_best_routes_pfs') as pfs_request:
pfs_request.return_value = (None, [], 'feedback_token')
get_best_routes(chain_state=chain_state, token_network_address=token_network_state.address, one_to_n_address=one_to_n_address, from_address=our_signer.address, to_address=address1, amount=PaymentAmount(51), previous_address=None, pfs_proxy=pfs_proxy, privkey=PRIVKEY, our_address_metadata=make_address_metadata(our_signer))
assert pfs_request.called |
def plot_pie(AU_list, pos_freq, neg_freq):
ploting_labels = ([(x + '+ {0:.2f}'.format(y)) for (x, y) in zip(AU_list, pos_freq)] + [(x + '- {0:.2f}'.format(y)) for (x, y) in zip(AU_list, neg_freq)])
cmap = matplotlib.cm.get_cmap('coolwarm')
colors = ([cmap(x) for x in pos_freq] + [cmap(x) for x in neg_freq])
fracs = np.ones((len(AU_list) * 2))
plt.pie(fracs, labels=ploting_labels, autopct=None, shadow=False, colors=colors, startangle=78.75)
plt.title('AUs distribution')
plt.show() |
def test_named_signals():
ct.iosys.InputOutputSystem._idCounter = 0
h1 = TransferFunction([1], [1, 2, 2])
h2 = TransferFunction([1], [0.1, 1])
omega = np.logspace((- 1), 2, 10)
f1 = FRD(h1, omega)
f2 = FRD(h2, omega)
assert (f1.name == 'sys[2]')
assert (f2.name == 'sys[3]')
assert (f1.ninputs == 1)
assert (f1.input_labels == ['u[0]'])
assert (f1.noutputs == 1)
assert (f1.output_labels == ['y[0]'])
f1 = FRD(h1, omega, name='mysys', inputs='u0', outputs='y0')
assert (f1.name == 'mysys')
assert (f1.ninputs == 1)
assert (f1.input_labels == ['u0'])
assert (f1.noutputs == 1)
assert (f1.output_labels == ['y0']) |
class unet_3D_ds(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(unet_3D_ds, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int((x / self.feature_scale)) for x in filters]
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(3, 3, 3), padding_size=(1, 1, 1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(3, 3, 3), padding_size=(1, 1, 1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(3, 3, 3), padding_size=(1, 1, 1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(3, 3, 3), padding_size=(1, 1, 1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(3, 3, 3), padding_size=(1, 1, 1))
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
self.dsv4 = UnetDsv3(in_size=filters[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=filters[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=filters[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=n_classes, kernel_size=1)
self.dropout1 = nn.Dropout3d(p=0.5)
self.dropout2 = nn.Dropout3d(p=0.3)
self.dropout3 = nn.Dropout3d(p=0.2)
self.dropout4 = nn.Dropout3d(p=0.1)
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up4 = self.dropout1(up4)
up3 = self.up_concat3(conv3, up4)
up3 = self.dropout2(up3)
up2 = self.up_concat2(conv2, up3)
up2 = self.dropout3(up2)
up1 = self.up_concat1(conv1, up2)
up1 = self.dropout4(up1)
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
if (not self.training):
return dsv1
return (dsv1, dsv2, dsv3, dsv4)
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p |
class EMAModel():
def __init__(self, model, update_after_step=0, inv_gamma=1.0, power=(2 / 3), min_value=0.0, max_value=0.9999):
self.averaged_model = model
self.averaged_model.eval()
self.averaged_model.requires_grad_(False)
self.update_after_step = update_after_step
self.inv_gamma = inv_gamma
self.power = power
self.min_value = min_value
self.max_value = max_value
self.decay = 0.0
self.optimization_step = 0
def get_decay(self, optimization_step):
step = max(0, ((optimization_step - self.update_after_step) - 1))
value = (1 - ((1 + (step / self.inv_gamma)) ** (- self.power)))
if (step <= 0):
return 0.0
return max(self.min_value, min(value, self.max_value))
_grad()
def step(self, new_model):
self.decay = self.get_decay(self.optimization_step)
all_dataptrs = set()
for (module, ema_module) in zip(new_model.modules(), self.averaged_model.modules()):
for (param, ema_param) in zip(module.parameters(recurse=False), ema_module.parameters(recurse=False)):
if isinstance(param, dict):
raise RuntimeError('Dict parameter not supported')
if isinstance(module, _BatchNorm):
ema_param.copy_(param.to(dtype=ema_param.dtype).data)
elif (not param.requires_grad):
ema_param.copy_(param.to(dtype=ema_param.dtype).data)
else:
ema_param.mul_(self.decay)
ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=(1 - self.decay))
self.optimization_step += 1 |
def pointer(encoded_ref, query, mask, W_ref, W_q, v, C=10.0, temperature=1.0):
encoded_query = tf.expand_dims(tf.matmul(query, W_q), 1)
scores = tf.reduce_sum((v * tf.tanh((encoded_ref + encoded_query))), [(- 1)])
scores = (C * tf.tanh((scores / temperature)))
masked_scores = tf.clip_by_value((scores - (.0 * mask)), (- .0), .0)
return masked_scores |
class QuantizableInceptionAux(InceptionAux):
def __init__(self, *args, **kwargs):
super(QuantizableInceptionAux, self).__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.7)
def forward(self, x):
x = F.adaptive_avg_pool2d(x, (4, 4))
x = self.conv(x)
x = torch.flatten(x, 1)
x = self.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x |
def test_spin_pairs_iter():
spinful_lattice = HubbardSquareLattice(3, 3)
with pytest.raises(ValueError):
spinful_lattice.spin_pairs_iter(10)
assert (tuple(spinful_lattice.spin_pairs_iter(SpinPairs.ALL, True)) == ((0, 0), (0, 1), (1, 0), (1, 1)))
assert (tuple(spinful_lattice.spin_pairs_iter(SpinPairs.ALL, False)) == ((0, 0), (0, 1), (1, 1)))
assert (tuple(spinful_lattice.spin_pairs_iter(SpinPairs.SAME, True)) == tuple(spinful_lattice.spin_pairs_iter(SpinPairs.SAME, False)) == ((0, 0), (1, 1)))
assert (tuple(spinful_lattice.spin_pairs_iter(SpinPairs.DIFF, True)) == ((0, 1), (1, 0)))
assert (tuple(spinful_lattice.spin_pairs_iter(SpinPairs.DIFF, False)) == ((0, 1),))
spinless_lattice = HubbardSquareLattice(3, 3, spinless=True)
assert (tuple(spinless_lattice.spin_pairs_iter(SpinPairs.ALL, True)) == tuple(spinless_lattice.spin_pairs_iter(SpinPairs.ALL, False)) == tuple(spinless_lattice.spin_pairs_iter(SpinPairs.SAME, True)) == tuple(spinless_lattice.spin_pairs_iter(SpinPairs.SAME, False)) == ((0, 0),))
assert (tuple(spinless_lattice.spin_pairs_iter(SpinPairs.DIFF, True)) == tuple(spinless_lattice.spin_pairs_iter(SpinPairs.DIFF, False)) == tuple()) |
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline, allow_flow_plain, allow_block_plain, allow_single_quoted, allow_double_quoted, allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block |
class PEPConverterTests(TestCase):
def test_source_link(self):
pep = get_pep_page(FAKE_PEP_REPO, '0525')
self.assertEqual(pep.title, 'PEP 525 -- Asynchronous Generators')
self.assertIn('Source: <a href=" pep.content.rendered)
def test_source_link_rst(self):
pep = get_pep_page(FAKE_PEP_REPO, '0012')
self.assertEqual(pep.title, 'PEP 12 -- Sample reStructuredText PEP Template')
self.assertIn('Source: <a href=" pep.content.rendered)
def test_invalid_pep_number(self):
with captured_stdout() as stdout:
get_pep_page(FAKE_PEP_REPO, '9999999')
self.assertRegex(stdout.getvalue(), "PEP Path '(.*)9999999(.*)' does not exist, skipping")
def test_add_image_not_found(self):
with captured_stdout() as stdout:
add_pep_image(FAKE_PEP_REPO, '0525', '/path/that/does/not/exist')
self.assertRegex(stdout.getvalue(), "Image Path '(.*)/path/that/does/not/exist(.*)' does not exist, skipping")
def test_html_do_not_prettify(self):
pep = get_pep_page(FAKE_PEP_REPO, '3001')
self.assertEqual(pep.title, 'PEP 3001 -- Procedure for reviewing and improving standard library modules')
self.assertIn('<tr class="field"><th class="field-name">Title:</th><td class="field-body">Procedure for reviewing and improving standard library modules</td>\n</tr>', pep.content.rendered)
def test_strip_html_and_body_tags(self):
pep = get_pep_page(FAKE_PEP_REPO, '0525')
self.assertNotIn('<html>', pep.content.rendered)
self.assertNotIn('</html>', pep.content.rendered)
self.assertNotIn('<body>', pep.content.rendered)
self.assertNotIn('</body>', pep.content.rendered) |
def _create_threshold_tensor(threshold: Union[(int, List[float], torch.Tensor)], device: torch.device) -> torch.Tensor:
if isinstance(threshold, int):
threshold = torch.linspace(0, 1.0, threshold, device=device)
elif isinstance(threshold, list):
threshold = torch.tensor(threshold, device=device)
return threshold |
def _add_property(ClassType, name):
fget = (lambda self: self[name])
fset = (lambda self, value: self.set_attribute(name, value))
fdel = (lambda self: self.set_attribute(name, None))
fdoc = 'This is the %s attribute for object definition'
setattr(ClassType, name, property(fget, fset, fdel, fdoc)) |
class Effect6301(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Nosferatu')), 'falloffEffectiveness', src.getModifiedItemAttr('shipBonusAC2'), skill='Amarr Cruiser', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Nosferatu')), 'maxRange', src.getModifiedItemAttr('shipBonusAC2'), skill='Amarr Cruiser', **kwargs) |
def get_timezone() -> Optional[timezone]:
ctx = _get_current_context()
tzinfo = getattr(ctx, 'babel_tzinfo', None)
if (tzinfo is None):
babel = get_babel()
if (babel.timezone_selector is None):
tzinfo = babel.instance.default_timezone
else:
rv = babel.timezone_selector()
if (rv is None):
tzinfo = babel.instance.default_timezone
else:
tzinfo = (timezone(rv) if isinstance(rv, str) else rv)
ctx.babel_tzinfo = tzinfo
return tzinfo |
class FaceFilter():
def __init__(self, reference_file_paths, nreference_file_paths, detector, aligner, loglevel, multiprocess=False, threshold=0.4):
logger.debug('Initializing %s: (reference_file_paths: %s, nreference_file_paths: %s, detector: %s, aligner: %s. loglevel: %s, multiprocess: %s, threshold: %s)', self.__class__.__name__, reference_file_paths, nreference_file_paths, detector, aligner, loglevel, multiprocess, threshold)
self.numeric_loglevel = get_loglevel(loglevel)
self.vgg_face = VGGFace()
self.filters = self.load_images(reference_file_paths, nreference_file_paths)
self.align_faces(detector, aligner, loglevel, multiprocess)
self.get_filter_encodings()
self.threshold = threshold
logger.debug('Initialized %s', self.__class__.__name__)
def load_images(reference_file_paths, nreference_file_paths):
retval = dict()
for fpath in reference_file_paths:
retval[fpath] = {'image': cv2_read_img(fpath, raise_error=True), 'type': 'filter'}
for fpath in nreference_file_paths:
retval[fpath] = {'image': cv2_read_img(fpath, raise_error=True), 'type': 'nfilter'}
logger.debug('Loaded filter images: %s', {k: v['type'] for (k, v) in retval.items()})
return retval
def align_faces(self, detector_name, aligner_name, loglevel, multiprocess):
extractor = Extractor(detector_name, aligner_name, loglevel, multiprocess=multiprocess)
self.run_extractor(extractor)
del extractor
self.load_aligned_face()
def run_extractor(self, extractor):
exception = False
for _ in range(extractor.passes):
self.queue_images(extractor)
if exception:
break
extractor.launch()
for faces in extractor.detected_faces():
exception = faces.get('exception', False)
if exception:
break
filename = faces['filename']
detected_faces = faces['detected_faces']
if (len(detected_faces) > 1):
logger.warning("Multiple faces found in %s file: '%s'. Using first detected face.", self.filters[filename]['type'], filename)
detected_faces = [detected_faces[0]]
self.filters[filename]['detected_faces'] = detected_faces
if extractor.final_pass:
landmarks = faces['landmarks']
self.filters[filename]['landmarks'] = landmarks
def queue_images(self, extractor):
in_queue = extractor.input_queue
for (fname, img) in self.filters.items():
logger.debug("Adding to filter queue: '%s' (%s)", fname, img['type'])
feed_dict = dict(filename=fname, image=img['image'])
if img.get('detected_faces', None):
feed_dict['detected_faces'] = img['detected_faces']
logger.debug("Queueing filename: '%s' items: %s", fname, list(feed_dict.keys()))
in_queue.put(feed_dict)
logger.debug('Sending EOF to filter queue')
in_queue.put('EOF')
def load_aligned_face(self):
for (filename, face) in self.filters.items():
logger.debug("Loading aligned face: '%s'", filename)
bounding_box = face['detected_faces'][0]
image = face['image']
landmarks = face['landmarks'][0]
detected_face = DetectedFace()
detected_face.from_bounding_box_dict(bounding_box, image)
detected_face.landmarksXY = landmarks
detected_face.load_aligned(image, size=224)
face['face'] = detected_face.aligned_face
del face['image']
logger.debug("Loaded aligned face: ('%s', shape: %s)", filename, face['face'].shape)
def get_filter_encodings(self):
for (filename, face) in self.filters.items():
logger.debug("Getting encodings for: '%s'", filename)
encodings = self.vgg_face.predict(face['face'])
logger.debug('Filter Filename: %s, encoding shape: %s', filename, encodings.shape)
face['encoding'] = encodings
del face['face']
def check(self, detected_face):
logger.trace('Checking face with FaceFilter')
distances = {'filter': list(), 'nfilter': list()}
encodings = self.vgg_face.predict(detected_face.aligned_face)
for filt in self.filters.values():
similarity = self.vgg_face.find_cosine_similiarity(filt['encoding'], encodings)
distances[filt['type']].append(similarity)
avgs = {key: (avg(val) if val else None) for (key, val) in distances.items()}
mins = {key: (min(val) if val else None) for (key, val) in distances.items()}
if (distances['filter'] and (avgs['filter'] > self.threshold)):
msg = 'Rejecting filter face: {} > {}'.format(round(avgs['filter'], 2), self.threshold)
retval = False
elif ((not distances['filter']) and (avgs['nfilter'] < self.threshold)):
msg = 'Rejecting nFilter face: {} < {}'.format(round(avgs['nfilter'], 2), self.threshold)
retval = False
elif (distances['filter'] and distances['nfilter'] and (mins['filter'] > mins['nfilter'])):
msg = 'Rejecting face as distance from nfilter sample is smaller: (filter: {}, nfilter: {})'.format(round(mins['filter'], 2), round(mins['nfilter'], 2))
retval = False
elif (distances['filter'] and distances['nfilter'] and (avgs['filter'] > avgs['nfilter'])):
msg = 'Rejecting face as average distance from nfilter sample is smaller: (filter: {}, nfilter: {})'.format(round(mins['filter'], 2), round(mins['nfilter'], 2))
retval = False
elif (distances['filter'] and distances['nfilter']):
var_k = min(5, (min(len(distances['filter']), len(distances['nfilter'])) + 1))
var_n = sum(list(map((lambda x: x[0]), list(sorted(([(1, d) for d in distances['filter']] + [(0, d) for d in distances['nfilter']]), key=(lambda x: x[1])))[:var_k])))
ratio = (var_n / var_k)
if (ratio < 0.5):
msg = 'Rejecting face as k-nearest neighbors classification is less than 0.5: {}'.format(round(ratio, 2))
retval = False
else:
msg = None
retval = True
else:
msg = None
retval = True
if msg:
logger.verbose(msg)
else:
logger.trace('Accepted face: (similarity: %s, threshold: %s)', distances, self.threshold)
return retval |
class Distribution(torch.Tensor):
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if (self.dist_type == 'normal'):
(self.mean, self.var) = (kwargs['mean'], kwargs['var'])
elif (self.dist_type == 'categorical'):
self.num_categories = kwargs['num_categories']
self.N_target_cate = kwargs['N_target_cate']
def sample_(self):
if (self.dist_type == 'normal'):
self.normal_(self.mean, self.var)
elif (self.dist_type == 'categorical'):
self.random_(0, self.N_target_cate)
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj |
def render_pep8_errors_e224_and_e273(msg, _node, source_lines):
line = msg.line
res = re.search('column (\\d+)', msg.msg)
col = int(res.group().split()[(- 1)])
curr_idx = ((col + len(source_lines[(line - 1)][col:])) - len(source_lines[(line - 1)][col:].lstrip('\t')))
(yield from render_context((line - 2), line, source_lines))
(yield (line, slice(col, curr_idx), LineType.ERROR, source_lines[(line - 1)]))
(yield from render_context((line + 1), (line + 3), source_lines)) |
def cmd_compile(options):
from .compile import parse_config, BenchmarkRevision
conf = parse_config(options.config_file, 'compile')
if (options is not None):
if options.no_update:
conf.update = False
if options.no_tune:
conf.system_tune = False
bench = BenchmarkRevision(conf, options.revision, options.branch, patch=options.patch, options=options)
bench.main() |
class StatusAction(Action):
def __call__(self, twitter, options):
statuses = self.getStatuses(twitter, options)
sf = get_formatter('status', options)
for status in statuses:
statusStr = sf(status, options)
if statusStr.strip():
printNicely(statusStr) |
class DirectedAcyclicGraph():
def __init__(self, exposure, outcome):
self.exposure = exposure
self.outcome = outcome
dag = nx.DiGraph()
dag.add_edge(self.exposure, self.outcome)
self.dag = dag
self.adjustment_sets = None
self.minimal_adjustment_sets = None
self.arrow_misdirections = None
def add_arrow(self, source, endpoint):
dag = self.dag.copy()
dag.add_edge(source, endpoint)
if (not nx.is_directed_acyclic_graph(dag)):
raise DAGError('Cyclic graph detected. Invalid addition for arrow.')
self.dag = dag
def add_arrows(self, pairs):
dag = self.dag.copy()
dag.add_edges_from(pairs)
if (not nx.is_directed_acyclic_graph(dag)):
raise DAGError('Cyclic graph detected. Invalid addition for arrow(s).')
self.dag = dag
def add_from_networkx(self, network):
if (not nx.is_directed_acyclic_graph(network)):
raise DAGError('Cyclic graph detected. Invalid networkx input.')
nodes = list(network.nodes)
if (self.exposure not in nodes):
raise DAGError((str(self.exposure) + ' is not a node in the DAG'))
if (self.outcome not in nodes):
raise DAGError((str(self.outcome) + ' is not a node in the DAG'))
self.dag = network.copy()
def draw_dag(self, positions=None, invert=False, fig_size=(6, 5), node_size=1000):
if invert:
dag = nx.complement(self.dag.to_undirected())
else:
dag = self.dag.copy()
fig = plt.figure(figsize=fig_size)
ax = plt.subplot(1, 1, 1)
if (positions is None):
positions = nx.spectral_layout(self.dag)
nx.draw_networkx(dag, positions, node_color='#d3d3d3', node_size=node_size, edge_color='black', linewidths=1.0, width=1.5, arrowsize=15, ax=ax, font_size=12)
plt.axis('off')
return ax
def calculate_adjustment_sets(self):
sets_to_check = self._define_all_adjustment_sets_(dag=self.dag)
valid_sets = []
for adj_set in sets_to_check:
if self._check_valid_adjustment_set_(graph=self.dag, adjustment_set=adj_set):
valid_sets.append(adj_set)
self.adjustment_sets = valid_sets
self.minimal_adjustment_sets = [x for x in valid_sets if (len(x) == len(min(valid_sets, key=len)))]
def _define_all_adjustment_sets_(self, dag):
all_nodes = list(dag.nodes)
all_nodes.remove(self.exposure)
all_nodes.remove(self.outcome)
list_of_sets = []
for i in range(0, (len(all_nodes) + 1)):
list_of_sets.extend([x for x in combinations(all_nodes, i)])
return list_of_sets
def _check_valid_adjustment_set_(self, graph, adjustment_set):
dag = graph.copy()
all_nodes = list(dag.nodes())
all_nodes.remove(self.exposure)
all_nodes.remove(self.outcome)
desc_x = descendants(dag, self.exposure)
if (desc_x & set(adjustment_set)):
return False
set_check = set(adjustment_set).union([self.exposure, self.outcome])
set_remove = set(dag.nodes)
for n in set_check:
set_remove = (set_remove & (dag.nodes - ancestors(dag, n)))
set_remove = ((set_remove - set([self.exposure, self.outcome])) - set(adjustment_set))
dag.remove_nodes_from(set_remove)
for endpoint in list(dag.successors(self.exposure)):
dag.remove_edge(self.exposure, endpoint)
for n in dag:
sources = list(dag.predecessors(n))
if (len(sources) > 1):
for (s1, s2) in combinations(sources, 2):
if (not (dag.has_edge(s2, s1) or dag.has_edge(s1, s2))):
dag.add_edge(s1, s2)
uag = dag.to_undirected()
uag.remove_nodes_from(adjustment_set)
if nx.has_path(uag, self.exposure, self.outcome):
return False
else:
return True
def assess_misdirections(self, chosen_adjustment_set):
all_edges = list(self.dag.edges())
l = []
for i in range(0, (len(all_edges) + 1)):
l.append([x for x in combinations(all_edges, i)])
valid_switches = []
valid_graphs = []
for c in range(1, len(l)):
for s in l[c]:
g = self.dag.copy()
g.remove_edges_from(s)
for pair in s:
g.add_edge(pair[1], pair[0])
if nx.is_directed_acyclic_graph(g):
valid_graphs.append(g)
valid_switches.append(s)
alternative_adjustment_sets = {}
for (v, g) in zip(valid_switches, valid_graphs):
sets_to_check = self._define_all_adjustment_sets_(dag=g)
valid_sets = []
for adj_set in sets_to_check:
if self._check_valid_adjustment_set_(graph=g, adjustment_set=adj_set):
valid_sets.append(adj_set)
if (chosen_adjustment_set not in set(valid_sets)):
alternative_adjustment_sets[v] = valid_sets
self.arrow_misdirections = alternative_adjustment_sets |
def quantization_aware_training_range_learning():
tf.reset_default_graph()
parser2 = tf_gen.MnistParser(batch_size=100, data_inputs=['reshape_input'])
generator = tf_gen.TfRecordGenerator(tfrecords=[os.path.join('data', 'mnist', 'validation.tfrecords')], parser=parser2)
sess = graph_saver.load_model_from_meta('models/mnist_save.meta', 'models/mnist_save')
sim = quantsim.QuantizationSimModel(sess, ['reshape_input'], ['dense_1/BiasAdd'], quant_scheme=QuantScheme.training_range_learning_with_tf_init)
sim.compute_encodings(pass_calibration_data, forward_pass_callback_args=None)
g = sim.session.graph
sess = sim.session
with g.as_default():
parser2 = tf_gen.MnistParser(batch_size=100, data_inputs=['reshape_input'])
generator2 = tf_gen.TfRecordGenerator(tfrecords=['data/mnist/validation.tfrecords'], parser=parser2)
cross_entropy = g.get_operation_by_name('xent')
train_step = g.get_operation_by_name('Adam')
x = sim.session.graph.get_tensor_by_name('reshape_input:0')
y = g.get_tensor_by_name('labels:0')
fc1_w = g.get_tensor_by_name('dense_1/MatMul/ReadVariableOp:0')
perf = graph_eval.evaluate_graph(sess, generator2, ['accuracy'], graph_eval.default_eval_func, 1)
print(('Quantized performance: ' + str((perf * 100))))
ce = g.get_tensor_by_name('xent:0')
train_step = tf.train.AdamOptimizer(0.001, name='TempAdam').minimize(ce)
graph_eval.initialize_uninitialized_vars(sess)
mnist = input_data.read_data_sets('./data', one_hot=True)
for i in range(100):
batch = mnist.train.next_batch(50)
sess.run([train_step, fc1_w], feed_dict={x: batch[0], y: batch[1]})
if ((i % 10) == 0):
perf = graph_eval.evaluate_graph(sess, generator2, ['accuracy'], graph_eval.default_eval_func, 1)
print(('Quantized performance: ' + str((perf * 100))))
sess.close() |
class AoAModel3_no_p(AttModel):
def __init__(self, opt):
super(AoAModel3_no_p, self).__init__(opt)
self.num_layers = 2
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if (opt.use_multi_head == 2):
del self.ctx2att
self.ctx2att = nn.Linear(opt.rnn_size, ((2 * opt.multi_head_scale) * opt.rnn_size))
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = (lambda x, y: x)
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, flag_feats, att_masks):
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, flag_feats, att_masks)
if self.use_mean_feats:
if (att_masks is None):
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum((att_feats * att_masks.unsqueeze((- 1))), 1) / torch.sum(att_masks.unsqueeze((- 1)), 1))
else:
mean_feats = self.fc_embed(fc_feats)
p_att_feats = self.ctx2att(att_feats)
return (mean_feats, att_feats, p_att_feats, att_masks) |
.parametrize('command_and_args, text, is_error', [('hint --flag foo --', '', False), ('hint --flag foo --help', '', False), ('hint --flag foo', '--', False), ('nargs --one_or_more one --', '', False), ('nargs --one_or_more one or --set_value', '', False), ('nargs --one_or_more one or more', '--', False), ('nargs --set_value set value --', '', False), ('nargs --set_value set value --one_or_more', '', False), ('nargs --set_value set value', '--', False), ('nargs --set_val set value', '--', False), ('nargs --range choices --', '', False), ('nargs --range choices range --set_value', '', False), ('nargs --range range', '--', False), ('hint --flag --', '', True), ('hint --flag --help', '', True), ('hint --flag', '--', True), ('nargs --one_or_more --', '', True), ('nargs --one_or_more --set_value', '', True), ('nargs --one_or_more', '--', True), ('nargs --set_value set --', '', True), ('nargs --set_value set --one_or_more', '', True), ('nargs --set_value set', '--', True), ('nargs --set_val set', '--', True), ('nargs --range --', '', True), ('nargs --range --set_value', '', True), ('nargs --range', '--', True)])
def test_unfinished_flag_error(ac_app, command_and_args, text, is_error, capsys):
line = '{} {}'.format(command_and_args, text)
endidx = len(line)
begidx = (endidx - len(text))
complete_tester(text, line, begidx, endidx, ac_app)
(out, err) = capsys.readouterr()
assert (is_error == all(((x in out) for x in ['Error: argument', 'expected']))) |
class ButtonList():
def __init__(self, stdscr, on_click, buttons=[], info={}):
self.buttons = []
self.on_click = on_click
self.stdscr = stdscr
self.info = info
self._selected = ''
for button in buttons:
self.add_button(button)
def add_button(self, label):
info_button = deepcopy(self.info)
info_button['label'] = label
button = SmallButton(self.stdscr, self.on_click, toggle=True, label=label, info=info_button)
self.buttons.append(button)
def update(self, y, x, key, mouse, selected_button, colors=[]):
self._selected = selected_button
if (not colors):
colors = ([None] * len(self.buttons))
for (i, (button, color)) in enumerate(zip(self.buttons, colors)):
name = button.get_label()
button.set_selected((self._selected == name))
button.update((y + i), x, key=key, mouse=mouse, color=color) |
def add_command_line_args_to_variant_spec(variant_spec, command_line_args):
variant_spec['run_params'].update({'checkpoint_frequency': (command_line_args.checkpoint_frequency if (command_line_args.checkpoint_frequency is not None) else variant_spec['run_params'].get('checkpoint_frequency', 0)), 'checkpoint_at_end': (command_line_args.checkpoint_at_end if (command_line_args.checkpoint_at_end is not None) else variant_spec['run_params'].get('checkpoint_at_end', True))})
variant_spec['restore'] = command_line_args.restore
return variant_spec |
def partition_into_regions(vcf_path: PathType, *, index_path: Optional[PathType]=None, num_parts: Optional[int]=None, target_part_size: Union[(None, int, str)]=None, storage_options: Optional[Dict[(str, str)]]=None) -> Optional[Sequence[str]]:
if ((num_parts is None) and (target_part_size is None)):
raise ValueError('One of num_parts or target_part_size must be specified')
if ((num_parts is not None) and (target_part_size is not None)):
raise ValueError('Only one of num_parts or target_part_size may be specified')
if ((num_parts is not None) and (num_parts < 1)):
raise ValueError('num_parts must be positive')
if (target_part_size is not None):
target_part_size_bytes: int = dask.utils.parse_bytes(target_part_size)
if (target_part_size_bytes < 1):
raise ValueError('target_part_size must be positive')
file_length = get_file_length(vcf_path, storage_options=storage_options)
if (num_parts is not None):
target_part_size_bytes = (file_length // num_parts)
elif (target_part_size_bytes is not None):
num_parts = ceildiv(file_length, target_part_size_bytes)
if (num_parts == 1):
return None
part_lengths = np.array([(i * target_part_size_bytes) for i in range(num_parts)])
if (index_path is None):
index_path = get_tabix_path(vcf_path, storage_options=storage_options)
if (index_path is None):
index_path = get_csi_path(vcf_path, storage_options=storage_options)
if (index_path is None):
raise ValueError('Cannot find .tbi or .csi file.')
index = read_index(index_path, storage_options=storage_options)
sequence_names = get_sequence_names(vcf_path, index)
(file_offsets, region_contig_indexes, region_positions) = index.offsets()
ind = np.searchsorted(file_offsets, part_lengths)
ind = np.delete(ind, (ind >= len(file_offsets)))
ind = np.unique(ind)
region_contigs = region_contig_indexes[ind]
region_starts = region_positions[ind]
regions = []
for i in range(len(region_starts)):
contig = sequence_names[region_contigs[i]]
start = region_starts[i]
if (i == (len(region_starts) - 1)):
regions.append(region_string(contig, start))
else:
next_contig = sequence_names[region_contigs[(i + 1)]]
next_start = region_starts[(i + 1)]
end = (next_start - 1)
if (next_contig == contig):
regions.append(region_string(contig, start, end))
else:
regions.append(region_string(contig, start))
for ri in range((region_contigs[i] + 1), region_contigs[(i + 1)]):
regions.append(sequence_names[ri])
regions.append(region_string(next_contig, 1, end))
for ri in range((region_contigs[(- 1)] + 1), len(sequence_names)):
regions.append(sequence_names[ri])
return regions |
_ASSIGNERS.register_module()
class LaneHungarianAssigner(HungarianAssigner):
def assign(self, lane_pred, cls_pred, gt_lanes, gt_labels, img_meta, gt_lanes_ignore=None, eps=1e-07):
assert (gt_lanes_ignore is None), 'Only case when gt_lanes_ignore is None is supported.'
(num_gts, num_lanes) = (gt_lanes.size(0), lane_pred.size(0))
assigned_gt_inds = lane_pred.new_full((num_lanes,), (- 1), dtype=torch.long)
assigned_labels = lane_pred.new_full((num_lanes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_lanes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
cls_cost = self.cls_cost(cls_pred, gt_labels)
reg_cost = self.reg_cost(lane_pred, gt_lanes)
cost = (cls_cost + reg_cost)
cost = cost.detach().cpu()
(matched_row_inds, matched_col_inds) = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(lane_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(lane_pred.device)
assigned_gt_inds[:] = 0
assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1)
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) |
class EntityTrigger(_TriggerType):
def __init__(self, name, delay, conditionedge, entitycondition, triggerentity, triggeringrule=TriggeringEntitiesRule.any, triggeringpoint='start'):
self.name = name
if (triggeringpoint not in ['start', 'stop']):
raise ValueError('not a valid triggering point, valid start or stop')
if (triggeringpoint == 'start'):
self._triggerpoint = 'StartTrigger'
else:
self._triggerpoint = 'StopTrigger'
self.delay = convert_float(delay)
self.conditionedge = convert_enum(conditionedge, ConditionEdge)
if (not isinstance(entitycondition, _EntityTriggerType)):
raise TypeError('entitycondition is not a valid EntityCondition')
self.entitycondition = entitycondition
self.triggerentity = TriggeringEntities(triggeringrule)
self.triggerentity.add_entity(triggerentity)
self._used_by_parent = False
def __eq__(self, other):
if isinstance(other, EntityTrigger):
if ((self.get_attributes() == other.get_attributes()) and (self.triggerentity == other.triggerentity) and (self.entitycondition == other.entitycondition) and (self._triggerpoint == other._triggerpoint)):
return True
elif isinstance(other, Trigger):
if ((len(other.conditiongroups) == 1) and (len(other.conditiongroups[0].conditions) == 1)):
if ((self._triggerpoint == other._triggerpoint) and (other.conditiongroups[0].conditions[0] == self)):
return True
elif isinstance(other, ConditionGroup):
if (len(other.conditions) == 1):
if ((self._triggerpoint == other._triggerpoint) and (other.conditions[0] == self)):
return True
return False
def parse(element):
if (element.tag != 'Condition'):
raise NotAValidElement('ValueTrigger only parses a Condition, not ', element)
name = element.attrib['name']
delay = convert_float(element.attrib['delay'])
conditionedge = convert_enum(element.attrib['conditionEdge'], ConditionEdge)
entityconditionelement = element.find('ByEntityCondition')
triggering_entities = TriggeringEntities.parse(entityconditionelement.find('TriggeringEntities'))
condition = _EntityConditionFactory.parse_entity_condition(entityconditionelement.find('EntityCondition'))
enttrig = EntityTrigger(name, delay, conditionedge, condition, '')
enttrig.triggerentity = triggering_entities
return enttrig
def _set_used_by_parent(self):
self._used_by_parent = True
def add_triggering_entity(self, triggerentity):
self.triggerentity.add_entity(triggerentity)
return self
def get_attributes(self):
return {'name': self.name, 'delay': str(self.delay), 'conditionEdge': self.conditionedge.get_name()}
def get_element(self):
condition = ET.Element('Condition', attrib=self.get_attributes())
byentity = ET.SubElement(condition, 'ByEntityCondition')
byentity.append(self.triggerentity.get_element())
byentity.append(self.entitycondition.get_element())
if self._used_by_parent:
return condition
else:
element = ET.Element(self._triggerpoint)
condgroup = ET.SubElement(element, 'ConditionGroup')
condgroup.append(condition)
return element |
class ToggleTheme(Mutation):
theme = String()
_required
def mutate(_root, info):
user = info.context.user
if (user.theme == Author.Theme.DARK):
user.theme = Author.Theme.LIGHT
user.save(update_fields=['theme'])
return ToggleTheme(user.theme)
user.theme = Author.Theme.DARK
user.save(update_fields=['theme'])
return ToggleTheme(user.theme) |
class LogReader(object):
def __init__(self, path):
self.path = path
with open(self.path, 'r') as f:
self.doc = f.read()
def isfloat(self, str):
try:
float(str)
return True
except ValueError:
return False
def casttype(self, str):
res = None
if str.isdigit():
res = int(str)
elif self.isfloat(str):
res = float(str)
elif ((str == 'True') or (str == 'False')):
res = (True if (str == 'True') else False)
else:
res = str
return res
def finished(self):
return ('training finished' in self.doc)
def getArgs(self):
block_args = self.doc.split('Specifications\n')[(- 1)]
block_args = block_args.split('Checkpoints:\n')[0]
lines_args = block_args.split('\n')
res = {}
for line in lines_args:
items = line.split(' : ')
res[items[0]] = self.casttype(items[(- 1)])
return res
def getBest(self):
block_score = self.doc.split('Checkpoints:\n')[(- 1)]
lines_score = block_score.split('\n')
best_score = ''
best_episode = ''
for line in lines_score:
if ('current best loglik is' in line):
best_score = line.split('current best loglik is ')[(- 1)]
best_score = best_score.split(' (updated at')[0]
best_episode = line.split('(updated at episode-')[(- 1)]
best_episode = best_episode.split(')')[0]
best_score = self.casttype(best_score)
best_episode = self.casttype(best_episode)
return (best_score, best_episode)
def getAll(self):
res = self.getArgs()
(best_score, best_episode) = self.getBest()
res['_best_score'] = best_score
res['_best_episode'] = best_episode
return res |
class MBConvBlock(nn.Module):
def __init__(self, ksize, input_filters, output_filters, expand_ratio=1, stride=1, image_size=224):
super().__init__()
self._bn_mom = 0.1
self._bn_eps = 0.01
self._se_ratio = 0.25
self._input_filters = input_filters
self._output_filters = output_filters
self._expand_ratio = expand_ratio
self._kernel_size = ksize
self._stride = stride
inp = self._input_filters
oup = (self._input_filters * self._expand_ratio)
if (self._expand_ratio != 1):
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
k = self._kernel_size
s = self._stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(in_channels=oup, out_channels=oup, groups=oup, kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int((self._input_filters * self._se_ratio)))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
final_oup = self._output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
x = inputs
if (self._expand_ratio != 1):
expand = self._expand_conv(inputs)
bn0 = self._bn0(expand)
x = self._swish(bn0)
depthwise = self._depthwise_conv(x)
bn1 = self._bn1(depthwise)
x = self._swish(bn1)
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = (torch.sigmoid(x_squeezed) * x)
x = self._bn2(self._project_conv(x))
(input_filters, output_filters) = (self._input_filters, self._output_filters)
if ((self._stride == 1) and (input_filters == output_filters)):
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = (x + inputs)
return x |
def _resolve_transformers_model(model_family):
di = {'mbart50': transformers.MBartForConditionalGeneration, 'm2m100': transformers.M2M100ForConditionalGeneration, 'nllb200': transformers.AutoModelForSeq2SeqLM}
if (model_family in di):
return di[model_family]
else:
error_msg = f'{model_family} is not a valid value for model_family. Please choose model_family to be equal to one of the following values: {list(di.keys())}'
raise ValueError(error_msg) |
def verify_model_dtype(model):
dtype2param_num = defaultdict(int)
dtype2param_name = defaultdict(list)
dtype2trainable_param_num = defaultdict(int)
dtype2trainable_param_name = defaultdict(list)
for (name, p) in model.named_parameters():
dtype = p.dtype
dtype2param_num[dtype] += p.numel()
dtype2param_name[dtype].append(name)
if p.requires_grad:
dtype2trainable_param_num[dtype] += p.numel()
dtype2trainable_param_name[dtype].append(name)
total = 0
print('verify all params of the model')
for (k, v) in dtype2param_num.items():
total += v
for (k, v) in dtype2param_num.items():
print(k, v, (v / total))
for (k, v) in dtype2trainable_param_name.items():
print(k, v)
print()
print('verify trainable params the model')
total_trainable = 0
for (k, v) in dtype2trainable_param_num.items():
total_trainable += v
for (k, v) in dtype2trainable_param_num.items():
print(k, v, (v / total_trainable))
for (k, v) in dtype2trainable_param_num.items():
print(k, v) |
.parametrize('manager', [BareConfig, ManagerConfig], indirect=True)
def test_setgroup(manager):
manager.test_window('one')
manager.c.group['b'].toscreen()
manager.groupconsistency()
if (len(manager.c.get_screens()) == 1):
assert (manager.c.get_groups()['a']['screen'] is None)
else:
assert (manager.c.get_groups()['a']['screen'] == 1)
assert (manager.c.get_groups()['b']['screen'] == 0)
manager.c.group['c'].toscreen()
manager.groupconsistency()
assert (manager.c.get_groups()['c']['screen'] == 0)
manager.c.group['c'].toscreen(toggle=True)
manager.groupconsistency()
assert (manager.c.group.info()['name'] == 'b') |
def cmdparser(raw_string, cmdset, caller, match_index=None):
if (not raw_string):
return []
matches = build_matches(raw_string, cmdset, include_prefixes=True)
if (not matches):
(mindex, new_raw_string) = try_num_prefixes(raw_string)
if (mindex is not None):
return cmdparser(new_raw_string, cmdset, caller, match_index=int(mindex))
if _CMD_IGNORE_PREFIXES:
raw_string = (raw_string.lstrip(_CMD_IGNORE_PREFIXES) if (len(raw_string) > 1) else raw_string)
matches = build_matches(raw_string, cmdset, include_prefixes=False)
matches = [match for match in matches if match[2].access(caller, 'cmd')]
if (len(matches) > 1):
trimmed = [match for match in matches if raw_string.startswith(match[0])]
if trimmed:
matches = trimmed
if (len(matches) > 1):
matches = sorted(matches, key=(lambda m: m[3]))
quality = [mat[3] for mat in matches]
matches = matches[(- quality.count(quality[(- 1)])):]
if (len(matches) > 1):
matches = sorted(matches, key=(lambda m: m[4]))
quality = [mat[4] for mat in matches]
matches = matches[(- quality.count(quality[(- 1)])):]
if ((len(matches) > 1) and (match_index is not None) and (0 < match_index <= len(matches))):
matches = [matches[(match_index - 1)]]
return matches |
class F9_RaidData(F7_RaidData):
removedKeywords = (F7_RaidData.removedKeywords + ['bytesPerInode'])
removedAttrs = (F7_RaidData.removedAttrs + ['bytesPerInode'])
def __init__(self, *args, **kwargs):
F7_RaidData.__init__(self, *args, **kwargs)
self.deleteRemovedAttrs()
self.fsprofile = kwargs.get('fsprofile', '')
self.encrypted = kwargs.get('encrypted', False)
self.passphrase = kwargs.get('passphrase', '')
def _getArgsAsStr(self):
retval = F7_RaidData._getArgsAsStr(self)
if self.fsprofile:
retval += (' --fsprofile="%s"' % self.fsprofile)
if self.encrypted:
retval += ' --encrypted'
if self.passphrase:
retval += (' --passphrase="%s"' % self.passphrase)
return retval |
_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
columns = list(((column,) + columns))
mask = (df[columns] > UINT32_MAX)
if (invalid_data_behavior != 'ignore'):
mask |= df[columns].isnull()
else:
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if (invalid_data_behavior == 'raise'):
raise ValueError(('%d values out of bounds for uint32: %r' % (mv.sum(), df[mask.any(axis=1)])))
if (invalid_data_behavior == 'warn'):
warnings.warn(('Ignoring %d values because they are out of bounds for uint32: %r' % (mv.sum(), df[mask.any(axis=1)])), stacklevel=3)
df[mask] = 0
return df |
def test_create_spawn_point_field(echoes_game_description, echoes_pickup_database, empty_patches):
resource_db = echoes_game_description.resource_database
morph = pickup_creator.create_standard_pickup(echoes_pickup_database.get_pickup_with_name('Morph Ball'), StandardPickupState(), resource_db, None, False)
loc = NodeIdentifier.create('Temple Grounds', 'Hive Chamber B', 'Door to Hive Storage')
patches = empty_patches.assign_starting_location(loc).assign_extra_starting_pickups([morph])
capacities = [{'amount': (1 if (item.short_name == 'MorphBall') else 0), 'index': item.extra['item_id']} for item in resource_db.item if (item.extra['item_id'] < 1000)]
result = patch_data_factory._create_spawn_point_field(patches, echoes_game_description)
assert (result == {'location': {'world_asset_id': , 'area_asset_id': }, 'amount': capacities, 'capacity': capacities}) |
def test_kw_only_decorator() -> None:
(foodef, bardef, cee, dee) = astroid.extract_node('\n from dataclasses import dataclass\n\n (kw_only=True)\n class Foo:\n a: int\n e: str\n\n\n (kw_only=False)\n class Bar(Foo):\n c: int\n\n\n (kw_only=False)\n class Cee(Bar):\n d: int\n\n\n (kw_only=True)\n class Dee(Cee):\n ee: int\n\n\n Foo.__init__ #\n Bar.__init__ #\n Cee.__init__ #\n Dee.__init__ #\n ')
foo_init: bases.UnboundMethod = next(foodef.infer())
if PY310_PLUS:
assert ([a.name for a in foo_init.args.args] == ['self'])
assert ([a.name for a in foo_init.args.kwonlyargs] == ['a', 'e'])
else:
assert ([a.name for a in foo_init.args.args] == ['self', 'a', 'e'])
assert ([a.name for a in foo_init.args.kwonlyargs] == [])
bar_init: bases.UnboundMethod = next(bardef.infer())
if PY310_PLUS:
assert ([a.name for a in bar_init.args.args] == ['self', 'c'])
assert ([a.name for a in bar_init.args.kwonlyargs] == ['a', 'e'])
else:
assert ([a.name for a in bar_init.args.args] == ['self', 'a', 'e', 'c'])
assert ([a.name for a in bar_init.args.kwonlyargs] == [])
cee_init: bases.UnboundMethod = next(cee.infer())
if PY310_PLUS:
assert ([a.name for a in cee_init.args.args] == ['self', 'c', 'd'])
assert ([a.name for a in cee_init.args.kwonlyargs] == ['a', 'e'])
else:
assert ([a.name for a in cee_init.args.args] == ['self', 'a', 'e', 'c', 'd'])
assert ([a.name for a in cee_init.args.kwonlyargs] == [])
dee_init: bases.UnboundMethod = next(dee.infer())
if PY310_PLUS:
assert ([a.name for a in dee_init.args.args] == ['self', 'c', 'd'])
assert ([a.name for a in dee_init.args.kwonlyargs] == ['a', 'e', 'ee'])
else:
assert ([a.name for a in dee_init.args.args] == ['self', 'a', 'e', 'c', 'd', 'ee'])
assert ([a.name for a in dee_init.args.kwonlyargs] == []) |
def ground_union(head_grounding, sql_unit, i_op, qdmr, sql_spider_data, table_data, grounding_out):
assert (qdmr.ops[i_op] == 'union')
args = qdmr.args[i_op]
if (len(head_grounding) == len(args)):
for (grnd, arg) in zip(head_grounding, args):
i_op_arg = QdmrInstance.ref_to_index(arg, max_index=i_op)
op_grounder[qdmr.ops[i_op_arg]]([grnd], sql_unit, i_op_arg, qdmr, sql_spider_data, table_data, grounding_out)
elif (len(head_grounding) == 1):
assert (head_grounding[0][0] == 'none'), 'Non none aggregator in vertical grounding'
grnd = ('none', head_grounding[0][1])
for arg in args:
i_op_arg = QdmrInstance.ref_to_index(arg, max_index=i_op)
op_grounder[qdmr.ops[i_op_arg]]([grnd], sql_unit, i_op_arg, qdmr, sql_spider_data, table_data, grounding_out)
else:
raise NotImplementedError('Unknown type of union op') |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'grailqa_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
question = raw_data['question']
entity = grailqa_get_entity(raw_data)
schema = grailqa_get_schema(raw_data)
(text_in, struct_in) = grailqa_get_input(question, entity, schema)
seq_out = raw_data['s_expression']
extend_data.update({'text_in': text_in, 'struct_in': struct_in, 'seq_out': seq_out})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
_fixtures(GitFixture)
def test_is_version_controlled(git_fixture):
fixture = git_fixture
non_initialised_directory = fixture.new_git_directory(initialised=False)
git = Git(non_initialised_directory.name)
assert (not git.is_version_controlled())
git = Git(fixture.git_directory.name)
assert git.is_version_controlled() |
def fix_jukebox_keys(state_dict, model_state_dict, key_prefix, mapping):
new_dict = {}
import re
re_encoder_block_conv_in = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).(bias|weight)')
re_encoder_block_resnet = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)')
re_encoder_block_proj_out = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(bias|weight)')
re_decoder_block_conv_out = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).(bias|weight)')
re_decoder_block_resnet = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)')
re_decoder_block_proj_in = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(bias|weight)')
re_prior_cond_conv_out = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(\\d).(bias|weight)')
re_prior_cond_resnet = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)')
re_prior_cond_proj_in = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(bias|weight)')
for (original_key, value) in state_dict.items():
if re_encoder_block_conv_in.fullmatch(original_key):
regex_match = re_encoder_block_conv_in.match(original_key)
groups = regex_match.groups()
block_index = ((int(groups[2]) * 2) + int(groups[3]))
re_new_key = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[(- 1)]}'
key = re_encoder_block_conv_in.sub(re_new_key, original_key)
elif re_encoder_block_resnet.fullmatch(original_key):
regex_match = re_encoder_block_resnet.match(original_key)
groups = regex_match.groups()
block_index = ((int(groups[2]) * 2) + int(groups[3]))
conv_index = {'1': 1, '3': 2}[groups[(- 2)]]
prefix = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
resnet_block = f'resnet_block.{groups[(- 3)]}.conv1d_{conv_index}.{groups[(- 1)]}'
re_new_key = (prefix + resnet_block)
key = re_encoder_block_resnet.sub(re_new_key, original_key)
elif re_encoder_block_proj_out.fullmatch(original_key):
regex_match = re_encoder_block_proj_out.match(original_key)
groups = regex_match.groups()
re_new_key = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[(- 1)]}'
key = re_encoder_block_proj_out.sub(re_new_key, original_key)
elif re_decoder_block_conv_out.fullmatch(original_key):
regex_match = re_decoder_block_conv_out.match(original_key)
groups = regex_match.groups()
block_index = (((int(groups[2]) * 2) + int(groups[3])) - 2)
re_new_key = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[(- 1)]}'
key = re_decoder_block_conv_out.sub(re_new_key, original_key)
elif re_decoder_block_resnet.fullmatch(original_key):
regex_match = re_decoder_block_resnet.match(original_key)
groups = regex_match.groups()
block_index = (((int(groups[2]) * 2) + int(groups[3])) - 2)
conv_index = {'1': 1, '3': 2}[groups[(- 2)]]
prefix = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
resnet_block = f'resnet_block.{groups[(- 3)]}.conv1d_{conv_index}.{groups[(- 1)]}'
re_new_key = (prefix + resnet_block)
key = re_decoder_block_resnet.sub(re_new_key, original_key)
elif re_decoder_block_proj_in.fullmatch(original_key):
regex_match = re_decoder_block_proj_in.match(original_key)
groups = regex_match.groups()
re_new_key = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[(- 1)]}'
key = re_decoder_block_proj_in.sub(re_new_key, original_key)
elif re_prior_cond_conv_out.fullmatch(original_key):
regex_match = re_prior_cond_conv_out.match(original_key)
groups = regex_match.groups()
block_index = (((int(groups[1]) * 2) + int(groups[2])) - 2)
re_new_key = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[(- 1)]}'
key = re_prior_cond_conv_out.sub(re_new_key, original_key)
elif re_prior_cond_resnet.fullmatch(original_key):
regex_match = re_prior_cond_resnet.match(original_key)
groups = regex_match.groups()
block_index = (((int(groups[1]) * 2) + int(groups[2])) - 2)
conv_index = {'1': 1, '3': 2}[groups[(- 2)]]
prefix = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
resnet_block = f'resnet_block.{groups[(- 3)]}.conv1d_{conv_index}.{groups[(- 1)]}'
re_new_key = (prefix + resnet_block)
key = re_prior_cond_resnet.sub(re_new_key, original_key)
elif re_prior_cond_proj_in.fullmatch(original_key):
regex_match = re_prior_cond_proj_in.match(original_key)
groups = regex_match.groups()
re_new_key = f'conditioner_blocks.upsampler.proj_in.{groups[(- 1)]}'
key = re_prior_cond_proj_in.sub(re_new_key, original_key)
else:
key = original_key
key = replace_key(key)
if ((f'{key_prefix}.{key}' not in model_state_dict) or (key is None)):
print(f'failed converting {original_key} to {key}, does not match')
elif (value.shape != model_state_dict[f'{key_prefix}.{key}'].shape):
val = model_state_dict[f'{key_prefix}.{key}']
print(f'''{original_key}-> {key} :
shape {val.shape} and {value.shape}, do not match''')
key = original_key
mapping[key] = original_key
new_dict[key] = value
return new_dict |
def insert_cylinder(arr, cyl_radius=4, cyl_height=2, cyl_centre=(0, 0, 0)):
arr_copy = arr[:]
(x, y, z) = np.indices(arr.shape)
if (not hasattr(cyl_radius, '__iter__')):
cyl_radius = ([cyl_radius] * 2)
condition_radial = (((((z - cyl_centre[0]) / cyl_radius[0]) ** 2) + (((y - cyl_centre[1]) / cyl_radius[1]) ** 2)) <= 1)
condition_height = (np.abs(((x - cyl_centre[2]) / (0.5 * cyl_height))) <= 1)
arr_copy[(condition_radial & condition_height)] = 1
return arr_copy |
def _get_dataclass_attributes(node: nodes.ClassDef, init: bool=False) -> Iterator[nodes.AnnAssign]:
for assign_node in node.body:
if ((not isinstance(assign_node, nodes.AnnAssign)) or (not isinstance(assign_node.target, nodes.AssignName))):
continue
if _is_class_var(assign_node.annotation):
continue
if _is_keyword_only_sentinel(assign_node.annotation):
continue
if ((not init) and _is_init_var(assign_node.annotation)):
continue
(yield assign_node) |
_2_unicode_compatible
class ConferenceModerator(AuditModel):
conference = models.ForeignKey(Conference, related_name='moderators', on_delete=models.CASCADE)
moderator = models.ForeignKey(User, on_delete=models.CASCADE)
active = models.BooleanField(default=True, verbose_name='Is Active?')
class Meta():
unique_together = ('conference', 'moderator')
verbose_name = 'moderator'
verbose_name_plural = 'moderators'
def __str__(self):
return '{}[{}]'.format(self.moderator.get_full_name(), self.conference) |
class MCSls(object):
def __init__(self, formula, use_cld=False, solver_name='m22', use_timer=False):
self.oracle = Solver(name=solver_name, bootstrap_with=formula.hard, use_timer=use_timer)
self.solver = solver_name
if (isinstance(formula, WCNFPlus) and formula.atms):
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(solver_name)
for atm in formula.atms:
self.oracle.add_atmost(*atm)
self.topv = formula.nv
self.sels = []
self.ucld = use_cld
self.smap = {}
VariableMap = collections.namedtuple('VariableMap', ['e2i', 'i2e'])
self.vmap = VariableMap(e2i={}, i2e={})
for v in range(1, (formula.nv + 1)):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
for cl in formula.soft:
new_cl = cl[:]
if ((len(cl) > 1) or (cl[0] < 0)):
self.topv += 1
sel = self.topv
new_cl.append((- sel))
self.oracle.add_clause(new_cl)
else:
sel = cl[0]
self.sels.append(sel)
self.smap[sel] = len(self.sels)
def __del__(self):
self.delete()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.delete()
def delete(self):
if self.oracle:
self.oracle.delete()
self.oracle = None
def add_clause(self, clause, soft=False):
cl = list(map((lambda l: self._map_extlit(l)), (clause if ((not (len(clause) == 2)) or (not (type(clause[0]) in (list, tuple, set)))) else clause[0])))
if (not soft):
if ((not (len(clause) == 2)) or (not (type(clause[0]) in (list, tuple, set)))):
self.oracle.add_clause(cl)
else:
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(self.solver)
self.oracle.add_atmost(cl, clause[1])
else:
sel = cl[0]
if ((len(cl) > 1) or (cl[0] < 0)):
self.topv += 1
sel = self.topv
self.oracle.add_clause((cl + [(- sel)]))
self.sels.append(sel)
self.smap[sel] = len(self.sels)
def compute(self, enable=[]):
self.setd = []
self.solution = None
self.bb_assumps = []
self.ss_assumps = []
if self.oracle.solve(assumptions=[self.sels[(cl_id - 1)] for cl_id in enable]):
self._overapprox()
self._compute()
self.solution = [self.smap[(- l)] for l in self.bb_assumps]
return self.solution
def enumerate(self):
done = False
while (not done):
mcs = self.compute()
if (mcs != None):
(yield mcs)
else:
done = True
def block(self, mcs):
self.oracle.add_clause([self.sels[(cl_id - 1)] for cl_id in mcs])
def _overapprox(self):
model = self.oracle.get_model()
for sel in self.sels:
if ((len(model) < sel) or (model[(sel - 1)] > 0)):
self.ss_assumps.append(sel)
else:
self.setd.append(sel)
def _compute(self):
i = 0
while (i < len(self.setd)):
if self.ucld:
self.do_cld_check(self.setd[i:])
i = 0
if self.setd:
self.ss_assumps.append(self.setd[i])
if (not self.oracle.solve(assumptions=(self.ss_assumps + self.bb_assumps))):
self.ss_assumps.pop()
self.bb_assumps.append((- self.setd[i]))
i += 1
def do_cld_check(self, cld):
self.topv += 1
sel = self.topv
cld.append((- sel))
self.oracle.add_clause(cld)
self.ss_assumps.append(sel)
self.setd = []
self.oracle.solve(assumptions=(self.ss_assumps + self.bb_assumps))
self.ss_assumps.pop()
if (self.oracle.get_status() == True):
model = self.oracle.get_model()
for l in cld[:(- 1)]:
if (model[(abs(l) - 1)] > 0):
self.ss_assumps.append(l)
else:
self.setd.append(l)
else:
self.bb_assumps.extend([(- l) for l in cld[:(- 1)]])
self.oracle.add_clause([(- sel)])
def _map_extlit(self, l):
v = abs(l)
if (v in self.vmap.e2i):
return int(copysign(self.vmap.e2i[v], l))
else:
self.topv += 1
self.vmap.e2i[v] = self.topv
self.vmap.i2e[self.topv] = v
return int(copysign(self.topv, l))
def oracle_time(self):
return self.oracle.time_accum() |
def order_and_id_of_texts(found_polygons_text_region, found_polygons_text_region_h, matrix_of_orders, indexes_sorted, index_of_types, kind_of_texts, ref_point):
indexes_sorted = np.array(indexes_sorted)
index_of_types = np.array(index_of_types)
kind_of_texts = np.array(kind_of_texts)
id_of_texts = []
order_of_texts = []
index_of_types_1 = index_of_types[(kind_of_texts == 1)]
indexes_sorted_1 = indexes_sorted[(kind_of_texts == 1)]
index_of_types_2 = index_of_types[(kind_of_texts == 2)]
indexes_sorted_2 = indexes_sorted[(kind_of_texts == 2)]
counter = EynollahIdCounter(region_idx=ref_point)
for (idx_textregion, _) in enumerate(found_polygons_text_region):
id_of_texts.append(counter.next_region_id)
interest = indexes_sorted_1[(indexes_sorted_1 == index_of_types_1[idx_textregion])]
if (len(interest) > 0):
order_of_texts.append(interest[0])
for (idx_headerregion, _) in enumerate(found_polygons_text_region_h):
id_of_texts.append(counter.next_region_id)
interest = indexes_sorted_2[index_of_types_2[idx_headerregion]]
order_of_texts.append(interest)
return (order_of_texts, id_of_texts) |
def iload_stationxml(sx, segment, content):
inut = 0
far_future = (time.time() + (20 * Y))
from pyrocko.io import stationxml
value_or_none = stationxml.value_or_none
for network in sx.network_list:
for station in network.station_list:
net = network.code
sta = station.code
tmin = station.start_date
tmax = station.end_date
if ((tmax is not None) and (tmax > far_future)):
tmax = None
station_nut = model.make_station_nut(file_segment=segment, file_element=inut, codes=model.CodesNSL(net, sta, '*'), tmin=tmin, tmax=tmax)
if ('station' in content):
station_nut.content = model.Station(lat=station.latitude.value, lon=station.longitude.value, elevation=value_or_none(station.elevation), **station_nut.station_kwargs)
station_copy = copy.copy(station)
station_copy.channel_list = []
station_nut.raw_content['stationxml'] = station_copy
(yield station_nut)
inut += 1
for channel in station.channel_list:
cha = channel.code
loc = channel.location_code.strip()
tmin = channel.start_date
tmax = channel.end_date
if ((tmax is not None) and (tmax > far_future)):
tmax = None
deltat = None
if ((channel.sample_rate is not None) and (channel.sample_rate.value != 0.0)):
deltat = (1.0 / channel.sample_rate.value)
if ((deltat is None) and channel.response):
out_rate_resp = channel.response.output_sample_rate
if out_rate_resp:
deltat = (1.0 / out_rate_resp)
nut = model.make_channel_nut(file_segment=segment, file_element=inut, codes=model.CodesNSLCE(net, sta, loc, cha, ''), tmin=tmin, tmax=tmax, deltat=deltat)
if ('channel' in content):
nut.content = model.Channel(lat=channel.latitude.value, lon=channel.longitude.value, elevation=value_or_none(channel.elevation), depth=value_or_none(channel.depth), azimuth=value_or_none(channel.azimuth), dip=value_or_none(channel.dip), **nut.channel_kwargs)
channel_copy = copy.copy(channel)
channel_copy.response = None
nut.raw_content['stationxml'] = channel_copy
(yield nut)
inut += 1
context = ('%s.%s.%s.%s' % (net, sta, loc, cha))
if channel.response:
nut = model.make_response_nut(file_segment=segment, file_element=inut, codes=model.CodesNSLCE(net, sta, loc, cha, ''), tmin=tmin, tmax=tmax, deltat=deltat)
try:
resp = channel.response.get_squirrel_response(context, **nut.response_kwargs)
if ('response' in content):
nut.content = resp
nut.raw_content['stationxml'] = channel.response
(yield nut)
inut += 1
except stationxml.StationXMLError as e:
logger.debug(('Bad instrument response: %s' % str(e))) |
class BasicLayer(nn.Module):
def __init__(self, dim, out_dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=nn.LayerNorm, downsample=None):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.blocks = nn.ModuleList([SwinTransformerBlock(dim=out_dim, input_resolution=self.input_resolution, num_heads=num_heads, window_size=window_size, shift_size=(0 if ((i % 2) == 0) else (window_size // 2)), mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop, drop_path=(drop_path[i] if isinstance(drop_path, list) else drop_path), norm_layer=norm_layer) for i in range(depth)])
if (downsample is not None):
self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
if (self.downsample is not None):
x = self.downsample(x)
for (_, blk) in enumerate(self.blocks):
x = blk(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if (self.downsample is not None):
flops += self.downsample.flops()
return flops
def update_resolution(self, H, W):
self.input_resolution = (H, W)
for (_, blk) in enumerate(self.blocks):
blk.input_resolution = ((H // 2), (W // 2))
blk.update_mask()
if (self.downsample is not None):
self.downsample.input_resolution = (H, W) |
class RTStructBuilder():
def create_new(dicom_series_path: str) -> RTStruct:
series_data = image_helper.load_sorted_image_series(dicom_series_path)
ds = ds_helper.create_rtstruct_dataset(series_data)
return RTStruct(series_data, ds)
def create_from(dicom_series_path: str, rt_struct_path: str, warn_only: bool=False) -> RTStruct:
series_data = image_helper.load_sorted_image_series(dicom_series_path)
ds = dcmread(rt_struct_path)
RTStructBuilder.validate_rtstruct(ds)
RTStructBuilder.validate_rtstruct_series_references(ds, series_data, warn_only)
return RTStruct(series_data, ds)
def validate_rtstruct(ds: Dataset):
if ((ds.SOPClassUID != SOPClassUID.RTSTRUCT) or (not hasattr(ds, 'ROIContourSequence')) or (not hasattr(ds, 'StructureSetROISequence')) or (not hasattr(ds, 'RTROIObservationsSequence'))):
raise Exception('Please check that the existing RTStruct is valid')
def validate_rtstruct_series_references(ds: Dataset, series_data: List[Dataset], warn_only: bool=False):
for refd_frame_of_ref in ds.ReferencedFrameOfReferenceSequence:
if ('RTReferencedStudySequence' not in refd_frame_of_ref):
return
for rt_refd_study in refd_frame_of_ref.RTReferencedStudySequence:
for rt_refd_series in rt_refd_study.RTReferencedSeriesSequence:
for contour_image in rt_refd_series.ContourImageSequence:
RTStructBuilder.validate_contour_image_in_series_data(contour_image, series_data, warn_only)
def validate_contour_image_in_series_data(contour_image: Dataset, series_data: List[Dataset], warning_only: bool=False):
for series in series_data:
if (contour_image.ReferencedSOPInstanceUID == series.SOPInstanceUID):
return
msg = f'Loaded RTStruct references image(s) that are not contained in input series data. Problematic image has SOP Instance Id: {contour_image.ReferencedSOPInstanceUID}'
if warning_only:
warnings.warn(msg)
else:
raise Exception(msg) |
class HARSave(BaseHandler):
env = Fetcher().jinja_env
def get_variables(env, tpl):
variables = set()
extracted = set(utils.jinja_globals.keys())
loop_extracted = set(('loop_index0', 'loop_index', 'loop_first', 'loop_last', 'loop_length', 'loop_revindex0', 'loop_revindex', 'loop_depth', 'loop_depth0'))
for entry in tpl:
req = entry['request']
rule = entry['rule']
var = set()
def _get(obj, key):
if (not obj.get(key)):
return
try:
ast = env.parse(obj[key])
except:
return
var.update(meta.find_undeclared_variables(ast))
_get(req, 'method')
_get(req, 'url')
_get(req, 'data')
for header in req['headers']:
_get(header, 'name')
_get(header, 'value')
for cookie in req['cookies']:
_get(cookie, 'name')
_get(cookie, 'value')
variables.update(((var - extracted) - loop_extracted))
extracted.update(set((x['name'] for x in rule.get('extract_variables', []))))
return variables
.authenticated
async def post(self, id):
self.evil((+ 1))
reponame = self.get_argument('reponame', '')
harname = self.get_argument('name', '')
userid = self.current_user['id']
try:
if ('json' in self.request.headers['Content-Type']):
self.request.body = self.request.body.replace(b'\xc2\xa0', b' ')
except:
logger_Web_Handler.debug(('HARSave Replace error: %s' % e))
data = json.loads(self.request.body)
async with self.db.transaction() as sql_session:
har = (await self.db.user.encrypt(userid, data['har'], sql_session=sql_session))
tpl = (await self.db.user.encrypt(userid, data['tpl'], sql_session=sql_session))
variables = list(self.get_variables(self.env, data['tpl']))
init_env = {}
try:
ast = self.env.parse(data['tpl'])
for x in ast.find_all(Filter):
if ((x.name == 'default') and isinstance(x.node, Name) and (len(x.args) > 0) and (x.node.name in variables) and (x.node.name not in init_env)):
try:
init_env[x.node.name] = x.args[0].as_const()
except Exception as e:
logger_Web_Handler.debug(('HARSave init_env error: %s' % e))
except Exception as e:
logger_Web_Handler.debug(('HARSave ast error: %s' % e))
variables = json.dumps(variables)
init_env = json.dumps(init_env)
groupName = 'None'
if id:
_tmp = self.check_permission((await self.db.tpl.get(id, fields=('id', 'userid', 'lock'), sql_session=sql_session)), 'w')
if (not _tmp['userid']):
self.set_status(403)
(await self.finish(u''))
return
if _tmp['lock']:
self.set_status(403)
(await self.finish(u''))
return
(await self.db.tpl.mod(id, har=har, tpl=tpl, variables=variables, init_env=init_env, sql_session=sql_session))
groupName = (await self.db.tpl.get(id, fields=('_groups',), sql_session=sql_session))['_groups']
else:
try:
id = (await self.db.tpl.add(userid, har, tpl, variables, init_env=init_env, sql_session=sql_session))
except Exception as e:
if ('max_allowed_packet' in str(e)):
raise Exception(('harMySQL max_allowed_packet ; \n' + str(e)))
if (not id):
raise Exception('create tpl error')
setting = data.get('setting', {})
(await self.db.tpl.mod(id, tplurl='{0}|{1}'.format(harname, reponame), sitename=setting.get('sitename'), siteurl=setting.get('siteurl'), note=setting.get('note'), interval=(setting.get('interval') or None), mtime=time.time(), updateable=0, _groups=groupName, sql_session=sql_session))
(await self.finish({'id': id})) |
def main():
parser = argparse.ArgumentParser(description='Process Markdown according to the CommonMark specification.')
if (sys.version_info < (3, 0)):
reload(sys)
sys.setdefaultencoding('utf-8')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='Input Markdown file to parse, defaults to STDIN')
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help='Output HTML/JSON file, defaults to STDOUT')
parser.add_argument('-a', action='store_true', help='Print formatted AST')
parser.add_argument('-aj', action='store_true', help='Output JSON AST')
args = parser.parse_args()
parser = commonmark.Parser()
f = args.infile
o = args.o
lines = []
for line in f:
lines.append(line)
data = ''.join(lines)
ast = parser.parse(data)
if ((not args.a) and (not args.aj)):
renderer = commonmark.HtmlRenderer()
o.write(renderer.render(ast))
exit()
if args.a:
commonmark.dumpAST(ast)
exit()
o.write(commonmark.dumpJSON(ast))
exit() |
def update_hints_text(game: RandovaniaGame, hint_item_names_tree_widget: QtWidgets.QTableWidget):
pickup_database = default_database.pickup_database_for_game(game)
rows = []
for item in pickup_database.standard_pickups.values():
rows.append((item.name, item.pickup_category.hint_details[1], item.pickup_category.general_details[1], item.broad_category.hint_details[1]))
for (name, pickup_category, broad_category) in _GAME_SPECIFIC.get(game, list)():
rows.append((name, pickup_category.hint_details[1], pickup_category.general_details[1], broad_category.hint_details[1]))
for ammo in pickup_database.ammo_pickups.values():
rows.append((ammo.name, ammo.pickup_category.hint_details[1], ammo.pickup_category.general_details[1], ammo.broad_category.hint_details[1]))
hint_item_names_tree_widget.setSortingEnabled(False)
hint_item_names_tree_widget.setRowCount(len(rows))
for (i, elements) in enumerate(rows):
for (j, element) in enumerate(elements):
hint_item_names_tree_widget.setItem(i, j, QtWidgets.QTableWidgetItem(element))
for i in range(4):
hint_item_names_tree_widget.resizeColumnToContents(i)
hint_item_names_tree_widget.setSortingEnabled(True) |
def test_imbalance_penalty_with_barely_sufficient_balance():
imbalance_penalty = calculate_imbalance_fees(channel_capacity=TokenAmount(20), proportional_imbalance_fee=ProportionalFeeAmount(1))
(pair, _) = _foward_transfer_pair(TokenAmount(10), NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(9)), fee_schedule=FeeScheduleState(flat=FeeAmount(0), imbalance_penalty=imbalance_penalty)), NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(9)), fee_schedule=FeeScheduleState(flat=FeeAmount(1), imbalance_penalty=imbalance_penalty)))
assert pair |
def to_starred_table_for_no_overlap2(x1, x2, y1, y2, w1, w2, h1, h2):
t = []
_non_overlapping_tuples_for(t, x1.dom, x2.dom, w1, True, True)
_non_overlapping_tuples_for(t, x2.dom, x1.dom, w2, False, True)
_non_overlapping_tuples_for(t, y1.dom, y2.dom, h1, True, False)
_non_overlapping_tuples_for(t, y2.dom, y1.dom, h2, False, False)
return t |
class AmmoPickerContents(wx.ScrolledCanvas):
indent = 15
def __init__(self, parent, fit):
wx.ScrolledCanvas.__init__(self, parent)
self.SetScrollRate(0, 15)
mods = self.getMods(fit)
drones = self.getDrones(fit)
fighters = self.getFighters(fit)
self.rbLabelMap = {}
self.rbCheckboxMap = {}
mainSizer = wx.BoxSizer(wx.VERTICAL)
moduleSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(moduleSizer, 0, wx.ALL, 0)
self.droneSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.droneSizer, 0, wx.ALL, 0)
fighterSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(fighterSizer, 0, wx.ALL, 0)
firstRadio = True
for (modInfo, modAmmo) in mods:
text = '\n'.join(('{}x {}'.format(amount, item.name) for (item, amount) in modInfo))
modRb = self.addRadioButton(moduleSizer, text, firstRadio)
firstRadio = False
mod = next((m for m in fit.modules if (m.itemID == next(iter(modInfo))[0].ID)), None)
(_, ammoTree) = Ammo.getInstance().getModuleStructuredAmmo(mod)
if (len(ammoTree) == 1):
for (ammoCatName, ammos) in ammoTree.items():
for ammo in ammos:
self.addCheckbox(moduleSizer, ammo.name, modRb, indentLvl=1)
else:
for (ammoCatName, ammos) in ammoTree.items():
if (len(ammos) == 1):
ammo = next(iter(ammos))
self.addCheckbox(moduleSizer, ammo.name, modRb, indentLvl=1)
else:
self.addLabel(moduleSizer, '{}:'.format(ammoCatName), modRb, indentLvl=1)
for ammo in ammos:
self.addCheckbox(moduleSizer, ammo.name, modRb, indentLvl=2)
if drones:
droneRb = self.addRadioButton(self.droneSizer, 'Drones', firstRadio)
from gui.builtinAdditionPanes.droneView import DroneView
for drone in sorted(drones, key=DroneView.droneKey):
self.addCheckbox(self.droneSizer, '{}x {}'.format(drone.amount, drone.item.name), droneRb, indentLvl=1)
addBtn = wx.Button(self, wx.ID_ANY, '+', style=wx.BU_EXACTFIT)
addBtn.Bind(wx.EVT_BUTTON, self.OnDroneGroupAdd)
mainSizer.Add(addBtn, 0, wx.LEFT, self.indent)
if fighters:
fighterRb = self.addRadioButton(fighterSizer, 'Fighters', firstRadio)
from gui.builtinAdditionPanes.fighterView import FighterDisplay
for fighter in sorted(fighters, key=FighterDisplay.fighterKey):
self.addCheckbox(fighterSizer, '{}x {}'.format(fighter.amount, fighter.item.name), fighterRb, indentLvl=1)
self.SetSizer(mainSizer)
self.refreshStatus()
def addRadioButton(self, sizer, text, firstRadio=False):
if firstRadio:
rb = wx.RadioButton(self, wx.ID_ANY, text, style=wx.RB_GROUP)
rb.SetValue(True)
else:
rb = wx.RadioButton(self, wx.ID_ANY, text)
rb.SetValue(False)
rb.Bind(wx.EVT_RADIOBUTTON, self.rbSelected)
sizer.Add(rb, 0, (wx.EXPAND | wx.ALL), 0)
return rb
def addCheckbox(self, sizer, text, currentRb, indentLvl=0):
cb = wx.CheckBox(self, (- 1), text)
sizer.Add(cb, 0, (wx.EXPAND | wx.LEFT), (self.indent * indentLvl))
if (currentRb is not None):
self.rbCheckboxMap.setdefault(currentRb, []).append(cb)
def addLabel(self, sizer, text, currentRb, indentLvl=0):
text = (text[0].capitalize() + text[1:])
label = wx.StaticText(self, wx.ID_ANY, text)
sizer.Add(label, 0, (wx.EXPAND | wx.LEFT), (self.indent * indentLvl))
if (currentRb is not None):
self.rbLabelMap.setdefault(currentRb, []).append(label)
def getMods(self, fit):
sMkt = Market.getInstance()
sAmmo = Ammo.getInstance()
loadableChargesCache = {}
modsPrelim = {}
if (fit is not None):
for mod in fit.modules:
if (not mod.canDealDamage()):
continue
typeID = mod.item.ID
if (typeID not in loadableChargesCache):
loadableChargesCache[typeID] = sAmmo.getModuleFlatAmmo(mod)
charges = loadableChargesCache[typeID]
if charges:
data = modsPrelim.setdefault(frozenset(charges), {})
if (mod.item not in data):
data[mod.item] = 0
data[mod.item] += 1
modsFinal = []
for (charges, itemCounts) in modsPrelim.items():
modsFinal.append((sorted(itemCounts.items(), key=(lambda i: sMkt.itemSort(i[0], reverseMktGrp=True)), reverse=True), charges))
modsFinal.sort(key=(lambda i: sMkt.itemSort(i[0][0][0], reverseMktGrp=True)), reverse=True)
return modsFinal
def getDrones(self, fit):
drones = []
if (fit is not None):
for drone in fit.drones:
if (drone.item is None):
continue
if drone.canDealDamage(ignoreState=True):
drones.append(drone)
continue
if {'remoteWebifierEntity', 'remoteTargetPaintEntity'}.intersection(drone.item.effects):
drones.append(drone)
continue
return drones
def getFighters(self, fit):
fighters = []
if (fit is not None):
for fighter in fit.fighters:
if (fighter.item is None):
continue
if fighter.canDealDamage(ignoreState=True):
fighters.append(fighter)
continue
for ability in fighter.abilities:
if (not ability.active):
continue
if (ability.effect.name == 'fighterAbilityStasisWebifier'):
fighters.append(fighter)
break
return fighters
def OnDroneGroupAdd(self, event):
event.Skip()
sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText()
self.droneSizer.Add(sizer, 0, (wx.EXPAND | wx.LEFT), self.indent)
def refreshStatus(self):
for map in (self.rbLabelMap, self.rbCheckboxMap):
for (rb, items) in map.items():
for item in items:
item.Enable(rb.GetValue())
def rbSelected(self, event):
event.Skip()
self.refreshStatus() |
def trashicra_to_detectwaste(label):
metals_and_plastics = ['plastic', 'metal', 'rubber']
non_recyclable = ['cloth', 'paper']
bio = ['wood']
unknown = ['unknown']
if (label in metals_and_plastics):
label = 'metals_and_plastics'
elif (label in non_recyclable):
label = 'non-recyclable'
elif (label in bio):
label = 'bio'
elif (label in unknown):
label = 'unknown'
else:
print(label, 'is non-trashicra label')
label = 'unknown'
return label |
class ResNetBasicblock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last_relu=True):
super(ResNetBasicblock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last_relu = last_relu
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
if self.last_relu:
out = self.relu(out)
return out |
('/repository/<repopath:repository>/status', methods=['GET'])
_repository_name()
_protect
def build_status_badge(namespace_name, repo_name):
token = request.args.get('token', None)
repo = model.repository.get_repository(namespace_name, repo_name)
if (repo and (repo.kind.name != 'image')):
abort(404)
is_public = model.repository.repository_is_public(namespace_name, repo_name)
if (not is_public):
if ((not repo) or (token != repo.badge_token)):
abort(404)
recent_build = model.build.get_recent_repository_build(namespace_name, repo_name)
if (recent_build and (recent_build.phase == 'complete')):
status_name = 'ready'
elif (recent_build and (recent_build.phase == 'error')):
status_name = 'failed'
elif (recent_build and (recent_build.phase == 'cancelled')):
status_name = 'cancelled'
elif (recent_build and (recent_build.phase != 'complete')):
status_name = 'building'
else:
status_name = 'none'
if (request.headers.get('If-None-Match') == status_name):
return Response(status=304)
response = make_response(STATUS_TAGS[status_name])
response.content_type = 'image/svg+xml'
response.headers['Cache-Control'] = 'no-cache'
response.headers['ETag'] = status_name
return response |
def match_graph(loc_dict, max_loc, max_obj_perloc):
num_locations = len(loc_dict)
chosen_locs = np.random.choice(range(num_locations), min(max_loc, num_locations), replace=False)
new_loc_dict = {}
count = 0
for (l, attr) in loc_dict.items():
cur_index = attr['index']
obj_list = attr['objects']
if (cur_index in chosen_locs):
new_loc_dict[l] = {}
new_loc_dict[l]['objects'] = obj_list
new_loc_dict[l]['index'] = count
count += 1
loc_dict = new_loc_dict.copy()
for (l, attr) in new_loc_dict.items():
obj_list = attr['objects']
chosen_objects = list(np.random.choice(obj_list, min(max_obj_perloc, len(obj_list)), replace=False))
loc_dict[l]['objects'] = chosen_objects
return loc_dict |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, drop=False):
super(ResNet, self).__init__()
self.drop = drop
self.in_planes = 64
self.conv1 = conv3x3(3, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, drop=drop, block_size=5)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, drop=drop, block_size=3)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride, drop=False, block_size=2):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, drop=drop, block_size=block_size))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def features(self, x, lin=0, lout=5):
out = x
if ((lin < 1) and (lout > (- 1))):
out = self.conv1(out)
out = self.bn1(out)
out = F.relu(out)
if ((lin < 2) and (lout > 0)):
out = self.layer1(out)
if ((lin < 3) and (lout > 1)):
out = self.layer2(out)
if ((lin < 4) and (lout > 2)):
out = self.layer3(out)
if ((lin < 5) and (lout > 3)):
out = self.layer4(out)
if (lout > 4):
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return out
def classifier(self, x, lin=0, lout=5):
out = x
if (lout > 4):
out = self.linear(out)
return out
def forward(self, x, lin=0, lout=5):
out = self.features(x, lin, lout)
out = self.classifier(out, lin, lout)
return out |
class DPRQuestionEncoderTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = DPRQuestionEncoderTokenizer |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0071_auto__1843')]
operations = [migrations.AddField(model_name='requiredtextasset', name='max_length', field=models.IntegerField(blank=True, default=None, help_text='Limit to length of the input, empty means unlimited', null=True)), migrations.AddField(model_name='requiredtextassetconfiguration', name='max_length', field=models.IntegerField(blank=True, default=None, help_text='Limit to length of the input, empty means unlimited', null=True))] |
class KombuThriftSerializer(KombuSerializer[T]):
def __init__(self, thrift_class: Type[T], protocol_factory: TProtocolFactory=TBinaryProtocolAcceleratedFactory()):
self.thrift_class = thrift_class
self.factory = protocol_factory
def name(self) -> str:
return f'thrift-{self.thrift_class.__name__}'
def serialize(self, obj: T) -> bytes:
if (not isinstance(obj, self.thrift_class)):
raise TypeError(f'object to serialize must be of {self.thrift_class.__name__} type')
return TSerialization.serialize(obj, self.factory)
def deserialize(self, message: bytes) -> T:
return TSerialization.deserialize(self.thrift_class(), message, self.factory) |
def process(data):
(candidates, references, pool_id) = data
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = 'rouge-tmp-{}-{}'.format(current_time, pool_id)
if (not os.path.isdir(tmp_dir)):
os.mkdir(tmp_dir)
os.mkdir((tmp_dir + '/candidate'))
os.mkdir((tmp_dir + '/reference'))
try:
for i in range(cnt):
if (len(references[i]) < 1):
continue
with open((tmp_dir + '/candidate/cand.{}.txt'.format(i)), 'w', encoding='utf-8') as f:
f.write(candidates[i])
with open((tmp_dir + '/reference/ref.{}.txt'.format(i)), 'w', encoding='utf-8') as f:
f.write(references[i])
r = pyrouge.Rouge155()
r.model_dir = (tmp_dir + '/reference/')
r.system_dir = (tmp_dir + '/candidate/')
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = 'cand.(\\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict |
def deprecated(value: object, module_name: str, message: str, warning_class: type[Warning], name: (str | None)=None) -> _DeprecatedValue:
module = sys.modules[module_name]
if (not isinstance(module, _ModuleWithDeprecations)):
sys.modules[module_name] = module = _ModuleWithDeprecations(module)
dv = _DeprecatedValue(value, message, warning_class)
if (name is not None):
setattr(module, name, dv)
return dv |
def validate_branch_ops(conn_graph: ConnectedGraph):
def check_for_branch_op(op_info: ModuleIdentifierOpInfo):
op = conn_graph.get_all_ops()[op_info.module_name]
return_bool = True
product = op.output
if ('branch' not in product.name):
logger.error('branch not in product name')
return_bool = False
if (len(product.consumers) > 1):
logger.error("branch op is not parent op's only consumer")
return_bool = False
branch_op = product.consumers[0]
if (branch_op.type != 'branch'):
logger.error("parent op's child op is not of type branch")
return_bool = False
branch_product = branch_op.output
if ('multiple_ops' not in branch_product.name):
logger.error("multiple_ops not in branch op's product's name")
return_bool = False
if (len(branch_product.consumers) <= 1):
logger.error("branch op's product has one or fewer consumers")
return_bool = False
for consumer in branch_product.consumers:
for input_product in consumer.inputs:
if (input_product.producer == op):
logger.error("parent op is still one of child op's inputs (as opposed to branch op)")
return_bool = False
return return_bool
module_identifier = StructureModuleIdentifier(conn_graph.graph, conn_graph._starting_op_names, conn_graph._valid_ops)
num_branches_found = 0
for tf_op in conn_graph.graph.get_operations():
if (tf_op not in module_identifier.processed_ops):
continue
found_branch = False
for output_tensor in tf_op.outputs:
if (len(output_tensor.consumers()) > 1):
child_module_set = set()
for consumer_op in output_tensor.consumers():
if (consumer_op in module_identifier._valid_ops):
child_module_info = module_identifier.get_op_info(consumer_op)
child_module_set.add(child_module_info.module_name)
if (len(child_module_set) > 1):
found_branch = True
break
if found_branch:
num_branches_found += 1
tf_op_info = module_identifier.get_op_info(tf_op)
if (not check_for_branch_op(tf_op_info)):
return False
logger.info('Found %s branches', num_branches_found)
return True |
def timebase(sys, strict=True):
if isinstance(sys, (int, float, complex, np.number)):
return None
elif (not isinstance(sys, InputOutputSystem)):
raise ValueError('Timebase not defined')
if (sys.dt == None):
return None
elif strict:
return float(sys.dt)
return sys.dt |
def start_all_dummy_clients(nclients):
global NCLIENTS
NCLIENTS = int(nclients)
actions = DUMMYRUNNER_SETTINGS.ACTIONS
if (len(actions) < 2):
print(ERROR_FEW_ACTIONS)
return
pratio = (1.0 / sum((tup[0] for tup in actions[2:])))
(flogin, flogout, probs, cfuncs) = (actions[0], actions[1], [(tup[0] * pratio) for tup in actions[2:]], [tup[1] for tup in actions[2:]])
cprobs = [sum((v for (i, v) in enumerate(probs) if (i <= k))) for k in range(len(probs))]
actions = ((flogin, flogout) + tuple(zip(cprobs, cfuncs)))
factory = DummyFactory(actions)
for i in range(NCLIENTS):
reactor.connectTCP('localhost', TELNET_PORT, factory)
reactor.run() |
class ReplaceDataset(BaseWrapperDataset):
def __init__(self, dataset, replace_map, offsets):
super().__init__(dataset)
assert (len(replace_map) > 0)
self.replace_map = replace_map
self.offsets = offsets
def __getitem__(self, index):
item = self.dataset[index]
is_tuple = isinstance(item, tuple)
srcs = (item if is_tuple else [item])
for (offset, src) in zip(self.offsets, srcs):
for (k, v) in self.replace_map.items():
src_off = (src[offset:] if (offset >= 0) else src[:offset])
src_off.masked_fill_((src_off == k), v)
item = (srcs if is_tuple else srcs[0])
return item |
class HP6634A(HP6632A):
def __init__(self, adapter, name='Hewlett Packard HP6634A', **kwargs):
super().__init__(adapter, name, **kwargs)
current_values = [0, limits['HP6634A']['Cur_lim']]
OVP_values = [0, limits['HP6634A']['OVP_lim']]
voltage_values = [0, limits['HP6634A']['Volt_lim']] |
def convert(framework: str, model: str, output: Path, opset: int, tokenizer: Optional[str]=None, use_external_format: bool=False, pipeline_name: str='feature-extraction', **model_kwargs):
warnings.warn('The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of Transformers', FutureWarning)
print(f'ONNX opset version set to: {opset}')
nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
if (not output.parent.exists()):
print(f'Creating folder {output.parent}')
makedirs(output.parent.as_posix())
elif (len(listdir(output.parent.as_posix())) > 0):
raise Exception(f'Folder {output.parent.as_posix()} is not empty, aborting conversion')
if (framework == 'pt'):
convert_pytorch(nlp, opset, output, use_external_format)
else:
convert_tensorflow(nlp, opset, output) |
def annotation_difference(first, second):
union = set(first).union(set(second))
first_not_second = union.difference(set(second))
second_not_first = union.difference(set(first))
total = (len(first_not_second) + len(second_not_first))
return (first_not_second, second_not_first, total) |
def pytorch2onnx(model, input_shape, opset_version=11, show=False, output_file='tmp.onnx', verify=False):
model.cpu().eval()
one_img = torch.randn(input_shape)
register_extra_symbolics(opset_version)
torch.onnx.export(model, one_img, output_file, export_params=True, keep_initializers_as_inputs=True, verbose=show, opset_version=opset_version)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
pytorch_results = model(one_img)
if (not isinstance(pytorch_results, (list, tuple))):
assert isinstance(pytorch_results, torch.Tensor)
pytorch_results = [pytorch_results]
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list((set(input_all) - set(input_initializer)))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(output_file)
onnx_results = sess.run(None, {net_feed_input[0]: one_img.detach().numpy()})
assert (len(pytorch_results) == len(onnx_results))
for (pt_result, onnx_result) in zip(pytorch_results, onnx_results):
assert np.allclose(pt_result.detach().cpu(), onnx_result, atol=1e-05), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are same between Pytorch and ONNX') |
def test():
cmd = argparse.ArgumentParser('The testing components of')
cmd.add_argument('--gpu', default=(- 1), type=int, help='use id of gpu, -1 if cpu.')
cmd.add_argument('--input', help='the path to the raw text file.')
cmd.add_argument('--model', required=True, help='path to save model')
cmd.add_argument('--batch_size', '--batch', type=int, default=1, help='the batch size.')
args = cmd.parse_args(sys.argv[2:])
if (args.gpu >= 0):
torch.cuda.set_device(args.gpu)
use_cuda = ((args.gpu >= 0) and torch.cuda.is_available())
args2 = dict2namedtuple(json.load(codecs.open(os.path.join(args.model, 'config_rnn.json'), 'r', encoding='utf-8')))
with open(args2.config_path, 'r') as fin:
config = json.load(fin)
if (config['token_embedder']['char_dim'] > 0):
char_lexicon = {}
with codecs.open(os.path.join(args.model, 'char.dic'), 'r', encoding='utf-8') as fpi:
for line in fpi:
tokens = line.strip().split('\t')
if (len(tokens) == 1):
tokens.insert(0, '\u3000')
(token, i) = tokens
char_lexicon[token] = int(i)
char_emb_layer = EmbeddingLayer(config['token_embedder']['char_dim'], char_lexicon, fix_emb=False)
logging.info(('char embedding size: ' + str(len(char_emb_layer.word2id))))
else:
char_lexicon = None
char_emb_layer = None
word_lexicon = {}
with codecs.open(os.path.join(args.model, 'word.dic'), 'r', encoding='utf-8') as fpi:
for line in fpi:
tokens = line.strip().split('\t')
if (len(tokens) == 1):
tokens.insert(0, '\u3000')
(token, i) = tokens
word_lexicon[token] = int(i)
if (config['token_embedder']['word_dim'] > 0):
word_emb_layer = EmbeddingLayer(config['token_embedder']['word_dim'], word_lexicon, fix_emb=False, embs=None)
logging.info(('word embedding size: ' + str(len(word_emb_layer.word2id))))
else:
word_emb_layer = None
model = Model(config, word_emb_layer, char_emb_layer, len(word_lexicon), use_cuda)
if use_cuda:
model.cuda()
logging.info(str(model))
model.load_model(args.model)
if (config['token_embedder']['name'].lower() == 'cnn'):
test = read_corpus(args.input, config['token_embedder']['max_characters_per_token'], max_sent_len=10000)
elif (config['token_embedder']['name'].lower() == 'lstm'):
test = read_corpus(args.input, max_sent_len=10000)
else:
raise ValueError('')
(test_w, test_c, test_lens, test_masks) = create_batches(test, args.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)
test_result = eval_model(model, (test_w, test_c, test_lens, test_masks))
logging.info('test_ppl={:.6f}'.format(test_result)) |
.xfail
def test_tell_in_random_order(first_add_33=False):
import random
from operator import attrgetter
tol = 1e-10
for (f, a, b) in ([f0, 0, 3], [f21, 0, 1], [f24, 0, 3], [f7, 0, 1]):
learners = []
for shuffle in [True, False]:
learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
if first_add_33:
(xs, _) = learner.ask(33)
for x in xs:
learner.tell(x, f(x))
(xs, _) = learner.ask(10000)
if shuffle:
random.shuffle(xs)
for x in xs:
learner.tell(x, f(x))
learners.append(learner)
assert (set(learners[0].data) == set(learners[1].data))
for learner in learners:
ivals = sorted(learner.approximating_intervals, key=(lambda lrn: lrn.a))
for i in range((len(ivals) - 1)):
assert (ivals[i].b == ivals[(i + 1)].a), (ivals[i], ivals[(i + 1)])
ivals = [sorted(ival, key=attrgetter('a')) for ival in [lrn.approximating_intervals for lrn in learners]]
assert all(((ival.a == other_ival.a) for (ival, other_ival) in zip(*ivals)))
ivals = [{(i.a, i.b) for i in lrn.approximating_intervals} for lrn in learners]
assert (ivals[0] == ivals[1])
assert np.allclose(learners[0].igral, learners[1].igral), f
(igral, err, *_) = algorithm_4(f, a, b, tol=tol)
assert all((((lrn.err + err) >= abs((lrn.igral - igral))) for lrn in learners))
for learner in learners:
assert np.isfinite(learner.err) |
def get_ghz_simple(n: int, measure: bool=True, full_measurement: bool=True) -> QuantumCircuit:
q = QuantumRegister(n, 'q')
circ = QuantumCircuit(q)
circ.h(q[0])
for i in range(1, n):
circ.cx(q[(i - 1)], q[i])
if measure:
meas = get_measurement_circ(n, 'q', 'c', full_measurement)
circ = (circ + meas)
return circ |
class MultilingualLinear(torch.nn.Module):
def __init__(self, input_size, output_size, n_factors=1, rank=1, use_multiplicative=False, weight_drop=0.0, mfw_activation='none', no_bias=False):
super().__init__()
self.use_multiplicative = use_multiplicative
self.weight_drop = weight_drop
self.no_bias = no_bias
assert ((not self.no_bias) or self.use_multiplicative)
self.weight = torch.nn.Parameter(torch.Tensor(output_size, input_size))
self.bias = torch.nn.Parameter(torch.Tensor(output_size))
if (not self.no_bias):
self.r = torch.nn.Parameter(torch.Tensor(n_factors, rank, output_size))
self.s = torch.nn.Parameter(torch.Tensor(n_factors, rank, input_size))
if use_multiplicative:
self.rm = torch.nn.Parameter(torch.Tensor(n_factors, 1, output_size))
self.sm = torch.nn.Parameter(torch.Tensor(n_factors, 1, input_size))
self.reset_parameters()
self.mfw_activation = mfw_activation.lower()
def reset_parameters(self, init='normal'):
if (init == 'normal'):
torch.nn.init.xavier_normal_(self.weight)
else:
torch.nn.init.xavier_uniform_(self.weight)
if self.use_multiplicative:
torch.nn.init.constant_(self.rm, 1.0)
torch.nn.init.constant_(self.sm, 1.0)
if (not self.no_bias):
torch.nn.init.normal_(self.r, 0.0, 0.02)
torch.nn.init.normal_(self.s, 0.0, 0.02)
def freeze(self):
if self.use_multiplicative:
self.rm.requires_grad = False
self.sm.requires_grad = False
if (not self.no_bias):
self.r.requires_grad = False
self.s.requires_grad = False
def unfreeze(self):
if self.use_multiplicative:
self.rm.requires_grad = True
self.sm.requires_grad = True
if (not self.no_bias):
self.r.requires_grad = True
self.s.requires_grad = True
def get_weight(self, indices, factorize=True):
weight_ = self.weight
if (indices is None):
return (weight_, self.bias)
if factorize:
weight_ = F.dropout(self.weight, p=self.weight_drop, training=self.training)
if ((indices.size(0) == 1) and (len(indices.shape) == 1)):
if self.use_multiplicative:
rm = torch.index_select(self.rm, 0, indices).squeeze(0)
sm = torch.index_select(self.sm, 0, indices).squeeze(0)
weight_ = (weight_ * torch.sum(torch.bmm(rm.unsqueeze((- 1)), sm.unsqueeze(1)), dim=0))
if (self.mfw_activation == 'none'):
weight_ = weight_
elif (self.mfw_activation == 'gelu'):
weight_ = F.gelu(weight_)
elif (self.mfw_activation == 'silu'):
weight_ = F.silu(weight_)
else:
raise NotImplementedError
if (not self.no_bias):
r = torch.index_select(self.r, 0, indices).squeeze(0)
s = torch.index_select(self.s, 0, indices).squeeze(0)
weight_mask = torch.bmm(r.unsqueeze((- 1)), s.unsqueeze(1))
weight_mask = torch.sum(weight_mask, dim=0)
weight_ = (weight_ + weight_mask)
return (weight_, self.bias)
def forward(self, input, indices=None, factorize=True):
if ((indices.size(0) == 1) and (len(indices.shape) == 1)):
(weight_, bias) = self.get_weight(indices, factorize=factorize)
input = F.linear(input, weight_, self.bias)
return input
else:
print(indices.size(), input.size())
raise NotImplementedError |
class TaskPool(TaskPoolBase):
def __init__(self, process_func: callable, max_batch_size: int, name: str, min_batch_size=1, timeout=None, pool_size=None, prefetch_batches=1, daemon=True, start=False):
super().__init__(process_func, daemon=daemon, name=name)
(self.min_batch_size, self.max_batch_size, self.timeout) = (min_batch_size, max_batch_size, timeout)
self.prefetch_batches = prefetch_batches
self.tasks = mp.Queue(maxsize=(pool_size or 0))
self.undispatched_task_timestamps = mp.SimpleQueue()
(self.batch_receiver, self.batch_sender) = mp.Pipe(duplex=False)
(self.outputs_receiver, self.outputs_sender) = mp.Pipe(duplex=False)
if start:
self.start()
def submit_task(self, *args: torch.Tensor) -> Future:
task = Task(MPFuture(), args)
if (self.get_task_size(task) > self.max_batch_size):
exc = ValueError(f"Task size greater than max_batch_size ({self.max_batch_size}), it can't be processed")
task.future.set_exception(exc)
else:
self.tasks.put(task)
self.undispatched_task_timestamps.put(time.time())
return task.future
def iterate_minibatches(self, *args, **kwargs):
batch = []
total_size = 0
while True:
if ((total_size >= self.min_batch_size) and self.tasks.empty()):
(yield batch)
batch = []
total_size = 0
try:
logger.debug(f'{self.name} getting next task')
task = self.tasks.get(timeout=self.timeout)
except Empty:
logger.warning(f"Timeout reached but batch doesn't contain >={self.min_batch_size} elements yet")
continue
task_size = self.get_task_size(task)
if ((total_size + task_size) > self.max_batch_size):
(yield batch)
batch = []
total_size = 0
try:
if task.future.set_running_or_notify_cancel():
batch.append(task)
total_size += task_size
except InvalidStateError as e:
logger.debug(f'Failed to add task to batch: {task.future} raised {e}')
def run(self, *args, **kwargs):
torch.set_num_threads(1)
logger.info(f'{self.name} starting, pid={os.getpid()}')
pending_batches = {}
output_thread = threading.Thread(target=self._pool_output_loop, args=[pending_batches], name=f'{self.name}_output', daemon=True)
try:
output_thread.start()
self._pool_input_loop(pending_batches, *args, **kwargs)
except KeyboardInterrupt:
logger.debug('Caught KeyboardInterrupt, shutting down')
finally:
output_thread.join()
def _pool_input_loop(self, pending_batches: Dict[(Any, List[Task])], *args, **kwargs):
prev_num_tasks = 0
batch_index = max(pending_batches.keys(), default=0)
batch_iterator = self.iterate_minibatches(*args, **kwargs)
while True:
for skip_i in range(prev_num_tasks):
finished_task_timestamp = self.undispatched_task_timestamps.get()
if (skip_i == (prev_num_tasks - 1)):
self.priority = finished_task_timestamp
logger.debug(f'{self.name} getting next batch')
batch_tasks = next(batch_iterator)
pending_batches[batch_index] = batch_tasks
logger.debug(f'{self.name}, batch {batch_index}: aggregating inputs')
batch_inputs = [torch.cat([task.args[i] for task in batch_tasks]) for i in range(len(batch_tasks[0].args))]
batch_inputs = [inp.detach().requires_grad_(inp.requires_grad).share_memory_() for inp in batch_inputs]
logger.debug(f'{self.name}, batch {batch_index}: sending to runtime')
self.batch_sender.send((batch_index, batch_inputs))
logger.debug(f'{self.name}, batch {batch_index}: sent to runtime')
prev_num_tasks = len(batch_tasks)
batch_index += 1
def _pool_output_loop(self, pending_batches: Dict[(Any, List[Task])]):
while True:
logger.debug(f'{self.name} waiting for results from runtime')
(batch_index, batch_outputs) = self.outputs_receiver.recv()
logger.debug(f'{self.name}, batch {batch_index}: got results')
batch_tasks = pending_batches.pop(batch_index)
task_sizes = [self.get_task_size(task) for task in batch_tasks]
outputs_per_task = zip(*(torch.split_with_sizes(tensor, task_sizes, dim=0) for tensor in batch_outputs))
logger.debug(f'{self.name}, batch {batch_index}: sending outputs to handlers')
for (task, task_outputs) in zip(batch_tasks, outputs_per_task):
try:
task.future.set_result(tuple(task_outputs))
except InvalidStateError as e:
logger.debug(f'Failed to send task result due to an exception: {e}')
def empty(self):
return (not self.batch_receiver.poll())
def load_batch_to_runtime(self, timeout=None, device=None) -> Tuple[(Any, List[torch.Tensor])]:
if (not self.batch_receiver.poll(timeout)):
raise TimeoutError()
(batch_index, batch_inputs) = self.batch_receiver.recv()
batch_inputs = [tensor.to(device, non_blocking=True) for tensor in batch_inputs]
return (batch_index, batch_inputs)
def send_outputs_from_runtime(self, batch_index: int, batch_outputs: List[torch.Tensor]):
batch_outputs = [tensor.to(device='cpu').share_memory_().detach().requires_grad_(tensor.requires_grad) for tensor in batch_outputs]
self.outputs_sender.send((batch_index, batch_outputs))
def get_task_size(self, task: Task) -> int:
return (len(task.args[0]) if task.args else 1) |
def sample_function(user_train, usernum, itemnum, batch_size, maxlen, threshold_user, threshold_item, result_queue, SEED):
def sample():
user = np.random.randint(1, (usernum + 1))
while (len(user_train[user]) <= 1):
user = np.random.randint(1, (usernum + 1))
seq = np.zeros([maxlen], dtype=np.int32)
pos = np.zeros([maxlen], dtype=np.int32)
neg = np.zeros([maxlen], dtype=np.int32)
nxt = user_train[user][(- 1)]
idx = (maxlen - 1)
ts = set(user_train[user])
for i in reversed(user_train[user][:(- 1)]):
if (random.random() > threshold_item):
i = np.random.randint(1, (itemnum + 1))
nxt = np.random.randint(1, (itemnum + 1))
seq[idx] = i
pos[idx] = nxt
if (nxt != 0):
neg[idx] = random_neq(1, (itemnum + 1), ts)
nxt = i
idx -= 1
if (idx == (- 1)):
break
if (random.random() > threshold_user):
user = np.random.randint(1, (usernum + 1))
return (user, seq, pos, neg)
np.random.seed(SEED)
while True:
one_batch = []
for i in range(batch_size):
one_batch.append(sample())
result_queue.put(zip(*one_batch)) |
class NvidiaGPUCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(NvidiaGPUCollector, self).get_default_config_help()
config_help.update({'bin': 'The path to the nvidia-smi binary', 'stats': 'A list of Nvidia GPU stats to collect. Use `nvidia-smi --help-query-gpu` for more information'})
return config_help
def get_default_config(self):
config = super(NvidiaGPUCollector, self).get_default_config()
config.update({'path': 'nvidia', 'bin': '/usr/bin/nvidia-smi', 'stats': ['index', 'memory.total', 'memory.used', 'memory.free', 'utilization.gpu', 'utilization.memory', 'temperature.gpu']})
return config
def collect_via_nvidia_smi(self, stats_config):
raw_output = self.run_command(['--query-gpu={query_gpu}'.format(query_gpu=','.join(stats_config)), '--format=csv,nounits,noheader'])
if (raw_output is None):
return
results = raw_output[0].strip().split('\n')
for result in results:
stats = result.strip().split(',')
assert (len(stats) == len(stats_config))
index = stats[0]
for (stat_name, metric) in izip(stats_config[1:], stats[1:]):
metric_name = 'gpu_{index}.{stat_name}'.format(index=str(index), stat_name=stat_name)
self.publish(metric_name, metric)
def collect_via_pynvml(self, stats_config):
try:
NVML_TEMPERATURE_GPU = 0
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
for device_index in xrange(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
memoryInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationRates = pynvml.nvmlDeviceGetUtilizationRates(handle)
metrics = {'memory.total': ((memoryInfo.total / 1024) / 1024), 'memory.used': ((memoryInfo.total / 1024) / 1024), 'memory.free': ((memoryInfo.free / 1024) / 1024), 'utilization.gpu': utilizationRates.gpu, 'utilization.memory': utilizationRates.memory, 'temperature.gpu': pynvml.nvmlDeviceGetTemperature(handle, NVML_TEMPERATURE_GPU)}
for stat_name in stats_config[1:]:
metric = metrics.get(stat_name)
if metric:
metric_name = 'gpu_{index}.{stat_name}'.format(index=str(device_index), stat_name=stat_name)
self.publish(metric_name, metric)
finally:
pynvml.nvmlShutdown()
def collect(self):
stats_config = self.config['stats']
if USE_PYTHON_BINDING:
collect_metrics = self.collect_via_pynvml
else:
collect_metrics = self.collect_via_nvidia_smi
collect_metrics(stats_config) |
class CpmTokenizerFast(XLNetTokenizerFast):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__('You need to install jieba to use CpmTokenizer or CpmTokenizerFast. See for installation.')
self.jieba = jieba
self.translator = str.maketrans(' \n', '')
def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
batch_text_or_text_pairs = [' '.join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)]) for text in batch_text_or_text_pairs]
return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
def _decode(self, *args, **kwargs):
text = super()._decode(*args, **kwargs)
text = text.replace(' ', '').replace('', ' ').replace('', '\n')
return text |
class CustomCircuitOracle(Oracle):
def __init__(self, variable_register: QuantumRegister, output_register: QuantumRegister, circuit: QuantumCircuit, ancillary_register: Optional[QuantumRegister]=None, evaluate_classically_callback: Optional[Callable[([str], Tuple[(bool, List[int])])]]=None):
super().__init__()
if (variable_register is None):
raise AquaError('Missing QuantumRegister for variables.')
if (output_register is None):
raise AquaError('Missing QuantumRegister for output.')
if (circuit is None):
raise AquaError('Missing custom QuantumCircuit for the oracle.')
self._variable_register = variable_register
self._output_register = output_register
self._circuit = circuit
self._ancillary_register = ancillary_register
if (evaluate_classically_callback is not None):
self.evaluate_classically = evaluate_classically_callback
def variable_register(self):
return self._variable_register
def output_register(self):
return self._output_register
def ancillary_register(self):
return self._ancillary_register
def circuit(self):
return self._circuit
def construct_circuit(self):
return self._circuit |
def convert(src, dst):
state_dict = OrderedDict()
src_dict = torch.load(src)
src_state_dict = src_dict.get('state_dict', src_dict)
for (k, v) in src_state_dict.items():
if (not k.startswith('backbone')):
continue
b_k = k.replace('backbone.', '')
b_k_splits = b_k.split('.')
tail = b_k_splits[(- 1)]
if b_k.startswith('conv1'):
if (b_k_splits[1] == 'conv'):
name = f'conv1.{tail}'
elif (b_k_splits[1] == 'bn'):
name = f'bn1.{tail}'
elif (b_k_splits[1] == 'gn'):
name = f'gn1.{tail}'
else:
raise RuntimeError(b_k)
elif b_k.startswith('layer'):
layer_idx = int(b_k_splits[0][(- 1)])
block_idx = int(b_k_splits[1])
if (b_k_splits[2] == 'downsample'):
if (b_k_splits[3] == 'conv'):
name = f'layer{layer_idx}.{block_idx}.downsample.0.{tail}'
elif (b_k_splits[3] == 'bn'):
name = f'layer{layer_idx}.{block_idx}.downsample.1.{tail}'
elif (b_k_splits[3] == 'gn'):
name = f'layer{layer_idx}.{block_idx}.downsample.1.{tail}'
else:
raise RuntimeError(b_k)
elif (b_k_splits[3] == 'conv'):
conv_module_idx = int(b_k_splits[2][(- 1)])
name = f'layer{layer_idx}.{block_idx}.conv{conv_module_idx}.{tail}'
elif (b_k_splits[3] == 'bn'):
conv_module_idx = int(b_k_splits[2][(- 1)])
name = f'layer{layer_idx}.{block_idx}.bn{conv_module_idx}.{tail}'
elif (b_k_splits[3] == 'gn'):
conv_module_idx = int(b_k_splits[2][(- 1)])
name = f'layer{layer_idx}.{block_idx}.gn{conv_module_idx}.{tail}'
else:
raise RuntimeError(b_k)
else:
raise RuntimeError(f'{b_k}')
state_dict[name] = v
print(f'{k} --> {name}')
checkpoint = dict()
checkpoint['state_dict'] = state_dict
checkpoint['meta'] = dict()
torch.save(checkpoint, dst) |
class GatedConv(nn.Module):
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
super(GatedConv, self).__init__()
self.conv = onmt.modules.WeightNormConv2d(input_size, (2 * input_size), kernel_size=(width, 1), stride=(1, 1), padding=(((width // 2) * (1 - nopad)), 0))
init.xavier_uniform_(self.conv.weight, gain=((4 * (1 - dropout)) ** 0.5))
self.dropout = nn.Dropout(dropout)
def forward(self, x_var):
x_var = self.dropout(x_var)
x_var = self.conv(x_var)
(out, gate) = x_var.split(int((x_var.size(1) / 2)), 1)
out = (out * torch.sigmoid(gate))
return out |
def _applyActionSide1(state, act):
(me, them, extra) = state
if (act == 'Super Potion'):
me = applyHPChange(me, 50)
return {(me, them, extra): Fraction(1)}
mdata = attack_data[act]
aind = (3 if mdata.isspec else 0)
dind = (3 if mdata.isspec else 1)
pdiv = (64 if mdata.crit else 512)
dmg_dist = getCritDist(me.fixed.lvl, Fraction(me.fixed.basespeed, pdiv), me.stats[aind], me.fixed.stats[aind], them.stats[dind], them.fixed.stats[dind], mdata.power, mdata.stab, mdata.te)
dist = defaultdict(Fraction)
for (dmg, p) in dmg_dist.items():
them2 = applyHPChange(them, (- dmg))
dist[(me, them2, extra)] += p
return dist |
def exceptions2exit(exception_list):
def exceptions2exit_decorator(func):
(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as ex:
from .cli import get_log_level
if (get_log_level() <= logging.DEBUG):
traceback.print_exc()
print(f'ERROR: {ex}')
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator |
def _validate_jwk(jwk):
if ('kty' not in jwk):
abort(400)
if (jwk['kty'] == 'EC'):
if (('x' not in jwk) or ('y' not in jwk)):
abort(400)
elif (jwk['kty'] == 'RSA'):
if (('e' not in jwk) or ('n' not in jwk)):
abort(400)
else:
abort(400) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.