code stringlengths 281 23.7M |
|---|
.end_to_end()
def test_collect_string_product_raises_error_with_annotation(runner, tmp_path):
source = '\n from pytask import Product\n from typing_extensions import Annotated\n\n def task_write_text(out: Annotated[str, Product] = "out.txt") -> None:\n out.touch()\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.FAILED) |
def test_inp_tags_getter_and_setter():
common_data = {'ElementType': (['Subcatch'] * 4), 'Name': ['CA-1', 'CA-7', 'CA-8', 'CA-11'], 'Tag': (['CA'] * 4)}
expected_output = pd.DataFrame(common_data)
expected_output.set_index(['ElementType'], inplace=True)
common_data['Tag'] = (['Modified'] * 4)
tags_to_set = pd.DataFrame(common_data)
tags_to_set.set_index(['ElementType'], inplace=True)
model = Model(MODEL_GREEN_AMPT)
with tempfile.TemporaryDirectory() as tempdir:
temp_inp_path = os.path.join(tempdir, f'{model.inp.name}.inp')
model.inp.save(temp_inp_path)
temp_model = Model(temp_inp_path)
assert expected_output.equals(temp_model.inp.tags.sort_index())
temp_model.inp.tags['Tag'] = (['Modified'] * 4)
assert tags_to_set.equals(temp_model.inp.tags.sort_index()) |
def save_h5_output(h5_filename, seg, segrefine, group, grouppred, label_dtype='uint8'):
print(h5_filename)
h5_fout = h5py.File(h5_filename)
h5_fout.create_dataset('seglabel', data=seg, compression='gzip', compression_opts=1, dtype=label_dtype)
h5_fout.create_dataset('segrefine', data=segrefine, compression='gzip', compression_opts=1, dtype=label_dtype)
h5_fout.create_dataset('pid', data=group, compression='gzip', compression_opts=1, dtype=label_dtype)
h5_fout.create_dataset('predpid', data=grouppred, compression='gzip', compression_opts=1, dtype=label_dtype)
h5_fout.close() |
def hsic_gam(X=None, Y=None, alph=None, width_x=None, width_y=None, K=None, Kc=None, L=None, Lc=None, mode=None, kwdth='mdbs'):
n = X.shape[0]
if (kwdth == 'scott'):
width_x = bw_scott(X)
width_y = bw_scott(Y)
elif (kwdth == 'silverman'):
width_x = bw_silverman(X)
width_y = bw_silverman(Y)
elif isinstance(kwdth, float):
width_x = kwdth
width_y = kwdth
else:
if ((width_x is None) and ((K is None) or (Kc is None))):
width_x = get_width(X)
if ((width_y is None) and ((L is None) or (Lc is None))):
width_y = get_width(Y)
bone = np.ones((n, 1), dtype=float)
H = (np.identity(n) - (np.ones((n, n), dtype=float) / n))
if ((K is None) or (Kc is None)):
K = rbf_dot(X, X, width_x)
Kc = np.dot(np.dot(H, K), H)
if ((L is None) or (Lc is None)):
L = rbf_dot(Y, Y, width_y)
Lc = np.dot(np.dot(H, L), H)
testStat = (np.sum((Kc.T * Lc)) / n)
if (mode == 'testStat'):
return testStat
varHSIC = (((Kc * Lc) / 6) ** 2)
varHSIC = (((np.sum(varHSIC) - np.trace(varHSIC)) / n) / (n - 1))
varHSIC = (((((((varHSIC * 72) * (n - 4)) * (n - 5)) / n) / (n - 1)) / (n - 2)) / (n - 3))
K = (K - np.diag(np.diag(K)))
L = (L - np.diag(np.diag(L)))
muX = ((np.dot(np.dot(bone.T, K), bone) / n) / (n - 1))
muY = ((np.dot(np.dot(bone.T, L), bone) / n) / (n - 1))
mHSIC = ((((1 + (muX * muY)) - muX) - muY) / n)
al = ((mHSIC ** 2) / varHSIC)
bet = ((varHSIC * n) / mHSIC)
if (mode == 'pvalue'):
p_value = (1 - gamma.cdf(testStat, al, scale=bet))
return p_value
thresh = gamma.ppf((1 - alph), al, scale=bet)[0][0]
if (mode == 'testStatMinusThres'):
return (testStat - thresh)
return (testStat < thresh) |
class GeodesicLengthSpace(LengthSpace):
def geodesic(self, pt_a: Point, pt_b: Point, frac: float=0.5, **kwargs) -> Point:
if (len(kwargs) > 0):
warnings.warn(f'{self.__class__.__name__}.geodesic takes no kwargs, but got {kwargs.keys()}')
if torch.allclose(pt_a, pt_b):
return pt_a.clone()
elif (frac == 0.0):
return pt_a.clone()
elif (frac == 1.0):
return pt_b.clone()
else:
return self._geodesic_impl(pt_a, pt_b, frac)
def _geodesic_impl(self, pt_a: Point, pt_b: Point, frac: float=0.5) -> Point: |
def get_imagenet_dataloader_sample(data_folder, args, is_sample=True):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
train_data_folder = os.path.join(data_folder, 'train')
test_data_folder = os.path.join(data_folder, 'val')
if (args.evaluate is False):
train_set = ImageFolderSample(train_data_folder, transform=train_transform, args=args, is_sample=is_sample)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
test_set = datasets.ImageFolder(test_data_folder, transform=test_transform)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
if (args.evaluate is False):
print('num_samples', len(train_set.samples))
print('num_class', len(train_set.classes))
return (len(train_set), train_loader, test_loader)
else:
return test_loader |
def basic_blocks(dim, index, layers, segment_dim, mlp_ratio=3.0, qkv_bias=False, qk_scale=None, attn_drop=0, drop_path_rate=0.0, skip_lam=1.0, mlp_fn=WeightedPermuteMLP, **kwargs):
blocks = []
for block_idx in range(layers[index]):
block_dpr = ((drop_path_rate * (block_idx + sum(layers[:index]))) / (sum(layers) - 1))
blocks.append(PermutatorBlock(dim, segment_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, drop_path=block_dpr, skip_lam=skip_lam, mlp_fn=mlp_fn))
blocks = nn.Sequential(*blocks)
return blocks |
.parametrize('remote_config, expected_token', [({'type': HvcsClient.GITHUB.value}, EnvConfigVar(env='GH_TOKEN')), ({'type': HvcsClient.GITLAB.value}, EnvConfigVar(env='GITLAB_TOKEN')), ({'type': HvcsClient.GITEA.value}, EnvConfigVar(env='GITEA_TOKEN')), ({}, EnvConfigVar(env='GH_TOKEN'))])
def test_load_hvcs_default_token(remote_config: dict[(str, Any)], expected_token):
raw_config = RawConfig.model_validate({'remote': remote_config})
assert (expected_token == raw_config.remote.token) |
class DeletedMessagesLogURLTests(AuthenticatedAPITestCase):
def setUpTestData(cls):
cls.author = cls.actor = User.objects.create(id=324888, name='Black Knight', discriminator=1975)
cls.deletion_context = MessageDeletionContext.objects.create(actor=cls.actor, creation=datetime.now(tz=UTC))
def test_valid_log_url(self):
expected_url = reverse('staff:logs', args=(1,))
[context] = MessageDeletionContext.objects.all()
self.assertEqual(context.log_url, expected_url) |
class DataLoaderTrain(IterableDataset):
def __init__(self, data_dir, filename_pat, args, world_size, worker_rank, cuda_device_idx, news_index, news_combined, word_dict, enable_prefetch=True, enable_shuffle=False, enable_gpu=True):
self.data_dir = data_dir
self.filename_pat = filename_pat
self.npratio = args.npratio
self.user_log_length = args.user_log_length
self.batch_size = args.batch_size
self.worker_rank = worker_rank
self.world_size = world_size
self.cuda_device_idx = cuda_device_idx
self.sampler = None
self.shuffle_buffer_size = args.shuffle_buffer_size
self.enable_prefetch = enable_prefetch
self.enable_shuffle = enable_shuffle
self.enable_gpu = enable_gpu
self.epoch = (- 1)
self.news_combined = news_combined
self.news_index = news_index
self.word_dict = word_dict
def start(self):
self.epoch += 1
self.sampler = StreamSampler(data_dir=self.data_dir, filename_pat=self.filename_pat, batch_size=self.batch_size, worker_rank=self.worker_rank, world_size=self.world_size, enable_shuffle=self.enable_shuffle, shuffle_buffer_size=self.shuffle_buffer_size, shuffle_seed=self.epoch)
self.sampler.__iter__()
def trans_to_nindex(self, nids):
return [(self.news_index[i] if (i in self.news_index) else 0) for i in nids]
def pad_to_fix_len(self, x, fix_length, padding_front=True, padding_value=0):
if padding_front:
pad_x = (([padding_value] * (fix_length - len(x))) + x[(- fix_length):])
mask = (([0] * (fix_length - len(x))) + ([1] * min(fix_length, len(x))))
else:
pad_x = (x[:fix_length] + ([padding_value] * (fix_length - len(x))))
mask = (([1] * min(fix_length, len(x))) + ([0] * (len(x) - fix_length)))
return (pad_x, mask)
def _produce(self):
if self.enable_gpu:
torch.cuda.set_device(self.cuda_device_idx)
try:
self.epoch += 1
self.sampler = StreamSampler(data_dir=self.data_dir, filename_pat=self.filename_pat, batch_size=self.batch_size, worker_rank=self.worker_rank, world_size=self.world_size, enable_shuffle=self.enable_shuffle, shuffle_seed=self.epoch)
for batch in self.sampler:
if self.stopped:
break
context = self._process(batch)
self.outputs.put(context)
self.aval_count += 1
except:
traceback.print_exc(file=sys.stdout)
self.pool.shutdown(wait=False)
raise
def start_async(self):
self.aval_count = 0
self.stopped = False
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def parse_sent(self, sent, fix_length):
sent = [(self.word_dict[w] if (w in self.word_dict) else 0) for w in utils.word_tokenize(sent)]
(sent, _) = self.pad_to_fix_len(sent, fix_length, padding_front=False)
return sent
def parse_sents(self, sents, max_sents_num, max_sent_length, padding_front=True):
(sents, sents_mask) = self.pad_to_fix_len(sents, max_sents_num, padding_value='')
sents = [self.parse_sent(s, max_sent_length) for s in sents]
sents = np.stack(sents, axis=0)
sents_mask = np.array(sents_mask)
return (sents, sents_mask)
def _process(self, batch):
batch_size = len(batch)
(batch_poss, batch) = batch
batch_poss = [x.decode(encoding='utf-8') for x in batch_poss]
batch = [x.decode(encoding='utf-8').split('\t') for x in batch]
label = 0
(user_feature_batch, log_mask_batch, news_feature_batch, label_batch) = ([], [], [], [])
for (poss, line) in zip(batch_poss, batch):
click_docs = line[3].split()
(click_docs, log_mask) = self.pad_to_fix_len(self.trans_to_nindex(click_docs), self.user_log_length)
user_feature = self.news_combined[click_docs]
sess_news = [i.split('-') for i in line[4].split()]
sess_neg = [i[0] for i in sess_news if (i[(- 1)] == '0')]
poss = self.trans_to_nindex([poss])
sess_neg = self.trans_to_nindex(sess_neg)
if (len(sess_neg) > 0):
neg_index = news_sample(list(range(len(sess_neg))), self.npratio)
sam_negs = [sess_neg[i] for i in neg_index]
else:
sam_negs = ([0] * self.npratio)
sample_news = (poss + sam_negs)
news_feature = self.news_combined[sample_news]
user_feature_batch.append(user_feature)
log_mask_batch.append(log_mask)
news_feature_batch.append(news_feature)
label_batch.append(label)
if self.enable_gpu:
user_feature_batch = torch.LongTensor(user_feature_batch).cuda()
log_mask_batch = torch.FloatTensor(log_mask_batch).cuda()
news_feature_batch = torch.LongTensor(news_feature_batch).cuda()
label_batch = torch.LongTensor(label_batch).cuda()
else:
user_feature_batch = torch.LongTensor(user_feature_batch)
log_mask_batch = torch.FloatTensor(log_mask_batch)
news_feature_batch = torch.LongTensor(news_feature_batch)
label_batch = torch.LongTensor(label_batch)
return (user_feature_batch, log_mask_batch, news_feature_batch, label_batch)
def __iter__(self):
logging.info('DataLoader __iter__()')
if self.enable_prefetch:
self.join()
self.start_async()
else:
self.start()
return self
def __next__(self):
if (self.sampler and self.sampler.reach_end() and (self.aval_count == 0)):
raise StopIteration
if self.enable_prefetch:
next_batch = self.outputs.get()
self.outputs.task_done()
self.aval_count -= 1
else:
next_batch = self._process(self.sampler.__next__())
return next_batch
def join(self):
self.stopped = True
if self.sampler:
if self.enable_prefetch:
while (self.outputs.qsize() > 0):
self.outputs.get()
self.outputs.task_done()
self.outputs.join()
self.pool.shutdown(wait=True)
logging.info('shut down pool.')
self.sampler = None |
class DuringEvent(Event):
def _trigger(self, model, *args, **kwargs):
res = super(DuringEvent, self)._trigger(model, *args, **kwargs)
if (res is False):
state = self.machine.get_state(model.state)
event_data = EventData(state, self, self.machine, model, args=args, kwargs=kwargs)
event_data.result = res
state.during(event_data)
return res |
class WorkGroup(ContentManageable, NameSlugModel):
active = models.BooleanField(default=True, db_index=True)
approved = models.BooleanField(default=False, db_index=True)
short_description = models.TextField(blank=True, help_text='Short description used on listing pages')
purpose = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, help_text='State what the mission of the group is. List all (if any) common goals that will be shared amongst the workgroup.')
active_time = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, help_text='How long will this workgroup exist? If the mission is not complete by the stated time, is it extendable? Is so, for how long?')
core_values = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, help_text='List the core values that the workgroup will adhere to throughout its existence. Will the workgroup adopt any statements? If so, which statement?')
rules = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, help_text='Give a comprehensive explanation of how the decision making will work within the workgroup and list the rules that accompany these procedures.')
communication = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, help_text='How will the team communicate? How often will the team communicate?')
support = MarkupField(blank=True, default_markup_type=DEFAULT_MARKUP_TYPE, help_text='What resources will you need from the PSF in order to have a functional and effective workgroup?')
url = models.URLField('URL', blank=True, help_text='Main URL for Group')
organizers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='+')
members = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='working_groups') |
class _SavedCmd2Env():
def __init__(self) -> None:
self.readline_settings = _SavedReadlineSettings()
self.readline_module: Optional[ModuleType] = None
self.history: List[str] = []
self.sys_stdout: Optional[TextIO] = None
self.sys_stdin: Optional[TextIO] = None |
class SetEmojiStatus():
async def set_emoji_status(self: 'pyrogram.Client', emoji_status: Optional['types.EmojiStatus']=None) -> bool:
(await self.invoke(raw.functions.account.UpdateEmojiStatus(emoji_status=(emoji_status.write() if emoji_status else raw.types.EmojiStatusEmpty()))))
return True |
class PresetEchoesBeamConfiguration(PresetTab, Ui_PresetEchoesBeamConfiguration):
def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager):
super().__init__(editor, game_description, window_manager)
self.setupUi(self)
def _add_header(text: str, col: int):
label = QtWidgets.QLabel(self.beam_configuration_group)
label.setText(text)
self.beam_configuration_layout.addWidget(label, 0, col)
_add_header('Ammo A', 1)
_add_header('Ammo B', 2)
_add_header('Uncharged', 3)
_add_header('Charged', 4)
_add_header('Combo', 5)
_add_header('Missiles for Combo', 6)
self._beam_ammo_a = {}
self._beam_ammo_b = {}
self._beam_uncharged = {}
self._beam_charged = {}
self._beam_combo = {}
self._beam_missile = {}
def _create_ammo_combo():
combo = QComboBox(self.beam_configuration_group)
combo.addItem('None', (- 1))
combo.addItem('Power Bomb', 43)
combo.addItem('Missile', 44)
combo.addItem('Dark Ammo', 45)
combo.addItem('Light Ammo', 46)
return combo
row = 1
for (beam, beam_name) in _BEAMS.items():
label = QtWidgets.QLabel(self.beam_configuration_group)
label.setText(beam_name)
self.beam_configuration_layout.addWidget(label, row, 0)
ammo_a = _create_ammo_combo()
ammo_a.currentIndexChanged.connect(functools.partial(self._on_ammo_type_combo_changed, beam, ammo_a, False))
self._beam_ammo_a[beam] = ammo_a
self.beam_configuration_layout.addWidget(ammo_a, row, 1)
ammo_b = _create_ammo_combo()
ammo_b.currentIndexChanged.connect(functools.partial(self._on_ammo_type_combo_changed, beam, ammo_b, True))
self._beam_ammo_b[beam] = ammo_b
self.beam_configuration_layout.addWidget(ammo_b, row, 2)
spin_box = QtWidgets.QSpinBox(self.beam_configuration_group)
spin_box.setSuffix(' ammo')
spin_box.setMaximum(250)
spin_box.valueChanged.connect(functools.partial(self._on_ammo_cost_spin_changed, beam, 'uncharged_cost'))
self._beam_uncharged[beam] = spin_box
self.beam_configuration_layout.addWidget(spin_box, row, 3)
spin_box = QtWidgets.QSpinBox(self.beam_configuration_group)
spin_box.setSuffix(' ammo')
spin_box.setMaximum(250)
spin_box.valueChanged.connect(functools.partial(self._on_ammo_cost_spin_changed, beam, 'charged_cost'))
self._beam_charged[beam] = spin_box
self.beam_configuration_layout.addWidget(spin_box, row, 4)
spin_box = QtWidgets.QSpinBox(self.beam_configuration_group)
spin_box.setSuffix(' ammo')
spin_box.setMaximum(250)
spin_box.valueChanged.connect(functools.partial(self._on_ammo_cost_spin_changed, beam, 'combo_ammo_cost'))
self._beam_combo[beam] = spin_box
self.beam_configuration_layout.addWidget(spin_box, row, 5)
spin_box = QtWidgets.QSpinBox(self.beam_configuration_group)
spin_box.setSuffix(' missile')
spin_box.setMaximum(250)
spin_box.setMinimum(1)
spin_box.valueChanged.connect(functools.partial(self._on_ammo_cost_spin_changed, beam, 'combo_missile_cost'))
self._beam_missile[beam] = spin_box
self.beam_configuration_layout.addWidget(spin_box, row, 6)
row += 1
def tab_title(cls) -> str:
return 'Beam Configuration'
def uses_patches_tab(cls) -> bool:
return True
def _on_ammo_type_combo_changed(self, beam: str, combo: QComboBox, is_ammo_b: bool, _):
with self._editor as editor:
beam_configuration = editor.configuration.beam_configuration
old_config: BeamAmmoConfiguration = getattr(beam_configuration, beam)
if is_ammo_b:
new_config = dataclasses.replace(old_config, ammo_b=combo.currentData())
else:
new_config = dataclasses.replace(old_config, ammo_a=combo.currentData())
editor.set_configuration_field('beam_configuration', dataclasses.replace(beam_configuration, **{beam: new_config}))
def _on_ammo_cost_spin_changed(self, beam: str, field_name: str, value: int):
with self._editor as editor:
beam_configuration = editor.configuration.beam_configuration
new_config = dataclasses.replace(getattr(beam_configuration, beam), **{field_name: value})
editor.set_configuration_field('beam_configuration', dataclasses.replace(beam_configuration, **{beam: new_config}))
def on_preset_changed(self, preset: Preset):
beam_configuration = preset.configuration.beam_configuration
for beam in _BEAMS:
config: BeamAmmoConfiguration = getattr(beam_configuration, beam)
set_combo_with_value(self._beam_ammo_a[beam], config.ammo_a)
set_combo_with_value(self._beam_ammo_b[beam], config.ammo_b)
self._beam_ammo_b[beam].setEnabled((config.ammo_a != (- 1)))
self._beam_uncharged[beam].setValue(config.uncharged_cost)
self._beam_charged[beam].setValue(config.charged_cost)
self._beam_combo[beam].setValue(config.combo_ammo_cost)
self._beam_missile[beam].setValue(config.combo_missile_cost) |
class WrappedChecksumCL(Component):
def construct(s, DutType=ChecksumCL):
s.recv = CalleeIfcCL(Type=Bits128)
s.give = CalleeIfcCL(Type=Bits32)
s.checksum_unit = DutType()
s.out_q = BypassQueueCL(num_entries=1)
connect_pairs(s.recv, s.checksum_unit.recv, s.checksum_unit.send, s.out_q.enq, s.out_q.deq, s.give) |
def _fill_flatten_shape_if_needed(op: Op):
if ((op.type == 'Flatten') and op.output_shape):
dims = op.output_shape.as_list()
if dims:
if (dims[(- 1)] is None):
output_size = 1
input_shape = op.inputs[0].shape.as_list()
for dim in input_shape:
if (dim is not None):
output_size *= dim
new_output_shape = tf.TensorShape([tf.compat.v1.Dimension(None), tf.compat.v1.Dimension(output_size)])
op.output_shape = new_output_shape
op.output.shape = new_output_shape |
def test_v1_event_payment_sent_failed_schema():
event = EventPaymentSentFailed(token_network_registry_address=UNIT_TOKEN_NETWORK_REGISTRY_ADDRESS, token_network_address=UNIT_TOKEN_NETWORK_ADDRESS, identifier=PaymentID(1), target=TargetAddress(factories.make_address()), reason='whatever')
log_time = datetime.datetime.now()
timestamped = TimestampedEvent(event, log_time)
dumped = EventPaymentSentFailedSchema().dump(timestamped)
expected = {'event': 'EventPaymentSentFailed', 'log_time': log_time.isoformat(), 'reason': 'whatever'}
assert all(((dumped.get(key) == value) for (key, value) in expected.items())) |
(cc=STDCALL, params={'pSecDesc': PSECURITY_DESCRIPTOR, 'cAuthSvc': LONG, 'asAuthSvc': POINTER, 'pReserved1': PVOID, 'dwAuthnLevel': DWORD, 'dwImpLevel': DWORD, 'pAuthList': PVOID, 'dwCapabilities': DWORD, 'pReserved3': PVOID})
def hook_CoInitializeSecurity(ql: Qiling, address: int, params):
return S_OK |
class ESRI_ArcGIS(WMSBase):
layer_prefix = 'ESRI'
name = 'ESRI_ArcGIS'
def __init__(self, m=None):
self.m = m
self.m.add_wms.ESRI_ArcGIS
self.wmslayers = []
for servicename in self.m.add_wms.ESRI_ArcGIS._layers:
self.wmslayers.extend([((servicename + '__') + key) for key in getattr(self.m.add_wms.ESRI_ArcGIS, servicename).__dict__.keys() if (not ((key in ['m']) or key.startswith('_')))])
def do_add_layer(self, wmslayer, layer):
wms = None
for servicename in self.m.add_wms.ESRI_ArcGIS._layers:
prefix = f'{servicename}__'
layers = [i for i in self.wmslayers if i.startswith(prefix)]
if (wmslayer in layers):
service = getattr(self.m.add_wms.ESRI_ArcGIS, servicename, None)
wms = getattr(service, remove_prefix(wmslayer, prefix)).add_layer.xyz_layer
wms.name = f'{self.name}_{wmslayer}'
break
if (wms is None):
_log.error(f'EOaps: WebMap layer {wmslayer}, {layer} not found')
return
wms(layer=layer, transparent=True)
self.ask_for_legend(wms, wmslayer) |
def setup_regularizer(opt):
try:
reg_name = opt.get('regularizer')
except Exception:
print('Please specify the regularizer type')
assert (reg_name in ['fixed', 'alter_mf']), NotImplementedError('Invalid {} regularizer'.format(reg_name))
if (reg_name == 'fixed'):
regularizer = FixedRegularizer(opt)
if (reg_name == 'alter_mf'):
regularizer = MFAlterRegularizer(opt)
if (reg_name == 'alter_mf_higher'):
regularizer = MFARHigher(opt)
return regularizer |
def _clean_text(text):
plm_special_tokens = '(\\<pad\\>)|(\\<s\\>)|(\\<\\/s\\>)|(\\<unk\\>)|(\\<\\|endoftext\\|\\>)'
text = re.sub(plm_special_tokens, '', text)
moses_norm = MosesPunctNormalizer()
text = moses_norm.normalize(text)
text = _tokenization_norm(text)
text = clean(text, fix_unicode=True, to_ascii=True, lower=False, no_line_breaks=True, no_urls=True, no_emails=True, no_phone_numbers=True, no_numbers=False, no_digits=False, no_currency_symbols=False, no_punct=False, replace_with_punct='', replace_with_url='', replace_with_email='', replace_with_phone_number='', replace_with_number='<NUMBER>', replace_with_digit='<DIGIT>', replace_with_currency_symbol='<CUR>', lang='en')
punct_pattern = '[^ A-Za-z0-9.?!,:;\\-\\[\\]\\{\\}\\(\\)\\\'\\"]'
text = re.sub(punct_pattern, '', text)
spe_pattern = '[-\\[\\]\\{\\}\\(\\)\\\'\\"]{2,}'
text = re.sub(spe_pattern, '', text)
text = ' '.join(text.split())
return text |
def test_auto_fp16():
with pytest.raises(TypeError):
class ExampleObject(object):
_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
class ExampleModule(nn.Module):
_fp16()
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
class ExampleModule(nn.Module):
_fp16(apply_to=('x',))
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
class ExampleModule(nn.Module):
_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
class ExampleModule(nn.Module):
_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32) |
def test_default_changelog_template(repo_with_git_flow_and_release_channels_angular_commits, default_angular_parser):
version = Version.parse('1.1.0-alpha.3')
repo = repo_with_git_flow_and_release_channels_angular_commits
env = environment(trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True)
rh = ReleaseHistory.from_git_history(repo=repo, translator=VersionTranslator(), commit_parser=default_angular_parser)
context = make_changelog_context(hvcs_client=Github(remote_url=repo.remote().url), release_history=rh)
context.bind_to_environment(env)
release = rh.released[version]
actual_content = env.from_string(default_release_notes_template).render(version=version, release=release)
assert (actual_content == EXPECTED_CONTENT) |
class Delivery(Object):
def __init__(self, payload=None, log=None, errors=None, error=None):
if (payload is None):
payload = []
if (log is None):
log = []
if (errors is None):
errors = []
if (error is not None):
errors.append(error)
Object.__init__(self, payload=payload, log=log, errors=errors)
payload = List.T(Any.T())
log = List.T(Tuple.T(3, String.T()))
errors = List.T(Tuple.T(3, String.T()))
def extend(self, other):
self.payload.extend(other.payload)
self.log.extend(other.log)
self.errors.extend(other.errors)
def extend_without_payload(self, other):
self.log.extend(other.log)
self.errors.extend(other.errors)
return other.payload
def emit_log(self):
for (name, message, context) in self.log:
message = ('%s: %s' % (context, message))
_logs[name](message)
def expect(self, quiet=False):
if (not quiet):
self.emit_log()
if self.errors:
(name, message, context) = self.errors[0]
if context:
message += (' (%s)' % context)
if (len(self.errors) > 1):
message += ' Additional errors pending.'
raise _exceptions[name](message)
return self.payload
def expect_one(self, quiet=False):
payload = self.expect(quiet=quiet)
if (len(payload) != 1):
raise DeliveryError(('Expected 1 element but got %i.' % len(payload)))
return payload[0] |
def test_remote_pipe_closed():
master_pid = os.getpid()
with pm.Model():
x = pm.Normal('x', shape=2, mu=0.1)
at_pid = pt.as_tensor_variable(np.array(master_pid, dtype='int32'))
pm.Normal('y', mu=_crash_remote_process(x, at_pid), shape=2)
step = pm.Metropolis()
with pytest.raises(ps.ParallelSamplingError, match='Chain [0-9] failed with') as ex:
pm.sample(step=step, mp_ctx='spawn', tune=2, draws=2, cores=2, chains=2) |
class LocaldriveTests(MusicTest):
def setUp(self) -> None:
super().setUp()
self._setup_test_library()
def test_suggested_song(self) -> None:
suggestion = json.loads(self.client.get(reverse('offline-suggestions'), {'term': 'backbeat', 'playlist': 'false'}).content)[(- 1)]
self._request_suggestion(suggestion['key'])
state = self._poll_musiq_state((lambda state: state['musiq']['currentSong']))
current_song = state['musiq']['currentSong']
self.assertEqual(current_song['externalUrl'], 'local_library/other/Backbeat.mp3')
self.assertAlmostEqual(current_song['duration'], 46, delta=1)
self.assertEqual(current_song['artist'], 'Kevin MacLeod')
self.assertEqual(current_song['title'], 'Backbeat')
def test_suggested_playlist(self) -> None:
state = self._add_local_playlist()
self.assertEqual(state['musiq']['currentSong']['externalUrl'], 'local_library/other/Backbeat.mp3')
self.assertEqual(state['musiq']['songQueue'][0]['externalUrl'], 'local_library/other/Forest Frolic Loop.mp3')
self.assertEqual(state['musiq']['songQueue'][1]['externalUrl'], 'local_library/other/Village Tarantella.mp3')
self.assertEqual(state['musiq']['songQueue'][2]['externalUrl'], 'local_library/heroes/Gothamlicious.mp3')
self.assertEqual(state['musiq']['songQueue'][3]['externalUrl'], 'local_library/heroes/New Hero in Town.mp3')
def test_autoplay(self) -> None:
suggestion = json.loads(self.client.get(reverse('offline-suggestions'), {'term': 'backbeat', 'playlist': 'false'}).content)[(- 1)]
self.client.post(reverse('request-music'), {'key': suggestion['key'], 'query': '', 'playlist': 'false', 'platform': 'local'})
self._poll_current_song()
self.client.post(reverse('set-autoplay'), {'value': 'true'})
state = self._poll_musiq_state((lambda state: ((len(state['musiq']['songQueue']) == 1) and state['musiq']['songQueue'][0]['internalUrl'])))
old_id = state['musiq']['songQueue'][0]['id']
self.client.post(reverse('skip'))
self._wait_for_new_song(old_id)
def test_radio(self) -> None:
suggestion = json.loads(self.client.get(reverse('offline-suggestions'), {'term': 'backbeat', 'playlist': 'false'}).content)[(- 1)]
self._request_suggestion(suggestion['key'])
self._poll_current_song()
self.client.post(reverse('request-radio'))
self._poll_musiq_state((lambda state: ((len(state['musiq']['songQueue']) >= 1) and all((song['internalUrl'] for song in state['musiq']['songQueue'])))), timeout=3) |
def locate_files(file, subdir=None):
if (subdir is None):
subdir = '.'
resdirs = util.listdir(ops.RESDIR, includeFiles=False)
files = []
for resdir in resdirs:
if ((resdir.lower() == 'ops') or (resdir.lower() == 'dsz')):
continue
fullpath = os.path.normpath(os.path.join(ops.RESDIR, resdir, subdir, file))
if os.path.exists(fullpath):
files.append((resdir, fullpath))
files.sort(cmp=(lambda x, y: cmp(x[0].lower(), y[0].lower())))
return files |
def get_dial(dialogue):
dial = []
d_orig = analyze_dialogue(dialogue, MAX_LENGTH)
if (d_orig is None):
return None
usr = [t['text'] for t in d_orig['usr_log']]
sys = [t['text'] for t in d_orig['sys_log']]
sys_a = [t['dialogue_acts'] for t in d_orig['sys_log']]
bvs = [t['belief_value_summary'] for t in d_orig['sys_log']]
domain = [t['domain'] for t in d_orig['usr_log']]
for item in zip(usr, sys, sys_a, domain, bvs):
dial.append({'usr': item[0], 'sys': item[1], 'sys_a': item[2], 'domain': item[3], 'bvs': item[4]})
return dial |
def default_fsaf_classification_model(num_classes, pyramid_feature_size=256, prior_probability=0.01, classification_feature_size=256, name='fsaf_classification_model'):
options = {'kernel_size': 3, 'strides': 1, 'padding': 'same'}
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(filters=classification_feature_size, activation='relu', name='pyramid_classification_{}'.format(i), kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer='zeros', **options)(outputs)
outputs = keras.layers.Conv2D(filters=num_classes, kernel_initializer=keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), bias_initializer=initializers.PriorProbability(probability=prior_probability), name='pyramid_classification', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), num_classes), name='fsaf_classification_reshape')(outputs)
outputs = keras.layers.Activation('sigmoid', name='fsaf_classification_sigmoid')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name) |
class BertTokenizerMismatchTest(unittest.TestCase):
def test_tokenizer_mismatch_warning(self):
EXAMPLE_BERT_JAPANESE_ID = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers', level='WARNING') as cm:
BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID)
self.assertTrue(cm.records[0].message.startswith('The tokenizer class you load from this checkpoint is not the same type as the class this function is called from.'))
EXAMPLE_BERT_ID = 'bert-base-cased'
with self.assertLogs('transformers', level='WARNING') as cm:
BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID)
self.assertTrue(cm.records[0].message.startswith('The tokenizer class you load from this checkpoint is not the same type as the class this function is called from.')) |
def parse_args():
special_args = [{'name': ['-s', '--size'], 'default': '10000', 'metavar': 'n', 'type': int, 'help': "The array size n in n^2 (default 10000). For 'svd' operation the second dimension is given by --second-size."}, {'name': ['-2', '--second-size'], 'default': '1000', 'type': int, 'help': "The second dimension size for 'svd' operation (default 1000)."}, {'name': ['-t', '--type'], 'choices': ['cpu', 'gpu'], 'default': 'gpu', 'type': str, 'help': 'Do merge with GPU or CPU dataframes.'}, {'name': ['-o', '--operation'], 'default': 'transpose_sum', 'type': str, 'help': "The operation to run, valid options are: 'transpose_sum' (default), 'dot', 'fft', 'svd', 'sum', 'mean', 'slice'."}, {'name': ['-c', '--chunk-size'], 'default': '2500', 'type': int, 'help': 'Chunk size (default 2500).'}, {'name': '--ignore-size', 'default': '1 MiB', 'metavar': 'nbytes', 'type': parse_bytes, 'help': "Ignore messages smaller than this (default '1 MB')."}, {'name': '--runs', 'default': 3, 'type': int, 'help': 'Number of runs (default 3).'}, {'name': ['-b', '--backend'], 'choices': ['dask', 'dask-noop'], 'default': 'dask', 'type': str, 'help': 'Compute backend to use.'}]
return parse_benchmark_args(description='Transpose on LocalCUDACluster benchmark', args_list=special_args) |
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm3d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv3d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool3d(kernel_size=2, stride=2)) |
class TestNetcdfEncodingKwargs():
()
def scene(self):
scn = Scene()
attrs = {'start_time': datetime(2018, 5, 30, 10, 0), 'end_time': datetime(2018, 5, 30, 10, 15)}
scn['test-array'] = xr.DataArray([1.0, 2, 3], attrs=attrs)
return scn
(params=[True, False])
def compression_on(self, request):
return request.param
()
def encoding(self, compression_on):
enc = {'test-array': {'dtype': 'int8', 'scale_factor': 0.1, 'add_offset': 0.0, '_FillValue': 3}}
if compression_on:
comp_params = _get_compression_params(complevel=7)
enc['test-array'].update(comp_params)
return enc
()
def filename(self, tmp_path):
return str((tmp_path / 'test.nc'))
()
def complevel_exp(self, compression_on):
if compression_on:
return 7
return 0
()
def expected(self, complevel_exp):
return {'data': [10, 20, 30], 'scale_factor': 0.1, 'fill_value': 3, 'dtype': np.int8, 'complevel': complevel_exp}
def test_encoding_kwarg(self, scene, encoding, filename, expected):
scene.save_datasets(filename=filename, encoding=encoding, writer='cf')
self._assert_encoding_as_expected(filename, expected)
def _assert_encoding_as_expected(self, filename, expected):
with xr.open_dataset(filename, mask_and_scale=False) as f:
np.testing.assert_array_equal(f['test-array'][:], expected['data'])
assert (f['test-array'].attrs['scale_factor'] == expected['scale_factor'])
assert (f['test-array'].attrs['_FillValue'] == expected['fill_value'])
assert (f['test-array'].dtype == expected['dtype'])
assert (f['test-array'].encoding['complevel'] == expected['complevel'])
def test_warning_if_backends_dont_match(self, scene, filename, monkeypatch):
import netCDF4
with monkeypatch.context() as m:
m.setattr(netCDF4, '__version__', '1.5.0')
m.setattr(netCDF4, '__netcdf4libversion__', '4.9.1')
with pytest.warns(UserWarning, match='Backend version mismatch'):
scene.save_datasets(filename=filename, writer='cf')
def test_no_warning_if_backends_match(self, scene, filename, monkeypatch):
import netCDF4
with monkeypatch.context() as m:
m.setattr(netCDF4, '__version__', '1.6.0')
m.setattr(netCDF4, '__netcdf4libversion__', '4.9.0')
m.setattr(xr, '__version__', '2022.12.0')
with warnings.catch_warnings():
scene.save_datasets(filename=filename, writer='cf')
warnings.simplefilter('error') |
def _dynamic_dict(example, src_field, tgt_field):
src = src_field.tokenize(example['src'])
unk = src_field.unk_token
pad = src_field.pad_token
src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])
unk_idx = src_ex_vocab.stoi[unk]
src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])
example['src_map'] = src_map
example['src_ex_vocab'] = src_ex_vocab
if ('tgt' in example):
tgt = tgt_field.tokenize(example['tgt'])
mask = torch.LongTensor((([unk_idx] + [src_ex_vocab.stoi[w] for w in tgt]) + [unk_idx]))
example['alignment'] = mask
return (src_ex_vocab, example) |
def test_module_metadata_is_fixed_up() -> None:
import trio
import trio.testing
assert (trio.Cancelled.__module__ == 'trio')
assert (trio.open_nursery.__module__ == 'trio')
assert (trio.abc.Stream.__module__ == 'trio.abc')
assert (trio.lowlevel.wait_task_rescheduled.__module__ == 'trio.lowlevel')
assert (trio.testing.trio_test.__module__ == 'trio.testing')
assert (trio.lowlevel.ParkingLot.__init__.__module__ == 'trio.lowlevel')
assert (trio.abc.Stream.send_all.__module__ == 'trio.abc')
assert (trio.Cancelled.__name__ == 'Cancelled')
assert (trio.Cancelled.__qualname__ == 'Cancelled')
assert (trio.abc.SendStream.send_all.__name__ == 'send_all')
assert (trio.abc.SendStream.send_all.__qualname__ == 'SendStream.send_all')
assert (trio.to_thread.__name__ == 'trio.to_thread')
assert (trio.to_thread.run_sync.__name__ == 'run_sync')
assert (trio.to_thread.run_sync.__qualname__ == 'run_sync') |
_fixtures(WebFixture)
def test_adding_items_with_captions(web_fixture):
carousel = Carousel(web_fixture.view, 'my_carousel_id')
caption_widget = Widget(web_fixture.view)
carousel_item = carousel.add_slide(Img(web_fixture.view), caption_widget=caption_widget)
[image, div_containing_caption] = carousel_item.children
assert (div_containing_caption.get_attribute('class') == 'carousel-caption')
[actual_caption_widget] = div_containing_caption.children
assert (actual_caption_widget is caption_widget) |
def _train_labeler(args):
if (args.data_setup == 'joint'):
(train_gen_list, val_gen_list, crowd_dev_gen, elmo, bert, vocab) = get_joint_datasets(args)
else:
train_fname = args.train_data
dev_fname = args.dev_data
print(train_fname, dev_fname)
(data_gens, elmo) = get_datasets([(train_fname, 'train', args.goal), (dev_fname, 'dev', args.goal)], args)
train_gen_list = [(args.goal, data_gens[0])]
val_gen_list = [(args.goal, data_gens[1])]
train_log = SummaryWriter(os.path.join(constant.EXP_ROOT, args.model_id, 'log', 'train'))
validation_log = SummaryWriter(os.path.join(constant.EXP_ROOT, args.model_id, 'log', 'validation'))
tensorboard = TensorboardWriter(train_log, validation_log)
if (args.model_type == 'labeler'):
print('==> Labeler')
model = denoising_models.Labeler(args, constant.ANSWER_NUM_DICT[args.goal])
elif (args.model_type == 'filter'):
print('==> Filter')
model = denoising_models.Filter(args, constant.ANSWER_NUM_DICT[args.goal])
else:
print(('Invalid model type: -model_type ' + args.model_type))
raise NotImplementedError
model.cuda()
total_loss = 0
batch_num = 0
best_macro_f1 = 0.0
start_time = time.time()
init_time = time.time()
if args.bert:
if args.bert_param_path:
print(('==> Loading BERT from ' + args.bert_param_path))
model.bert.load_state_dict(torch.load(args.bert_param_path, map_location='cpu'))
no_decay = ['bias', 'gamma', 'beta']
optimizer_parameters = [{'params': [p for (n, p) in model.named_parameters() if (n not in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for (n, p) in model.named_parameters() if (n in no_decay)], 'weight_decay_rate': 0.0}]
optimizer = BERTAdam(optimizer_parameters, lr=args.bert_learning_rate, warmup=args.bert_warmup_proportion, t_total=(- 1))
else:
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
if args.load:
load_model(args.reload_model_name, constant.EXP_ROOT, args.model_id, model, optimizer)
for (idx, m) in enumerate(model.modules()):
logging.info(((str(idx) + '->') + str(m)))
while True:
batch_num += 1
for (type_name, data_gen) in train_gen_list:
try:
batch = next(data_gen)
(batch, _) = to_torch(batch)
except StopIteration:
logging.info(((type_name + ' finished at ') + str(batch_num)))
print('Done!')
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, '{0:s}/{1:s}.pt'.format(constant.EXP_ROOT, args.model_id))
return
optimizer.zero_grad()
(loss, output_logits, cls_logits) = model(batch, type_name)
loss.backward()
total_loss += loss.item()
optimizer.step()
if (((batch_num % args.log_period) == 0) and (batch_num > 0)):
gc.collect()
cur_loss = float((1.0 * loss.clone().item()))
elapsed = (time.time() - start_time)
train_loss_str = '|loss {0:3f} | at {1:d}step | {2:.2f} ms/batch'.format(cur_loss, batch_num, ((elapsed * 1000) / args.log_period))
start_time = time.time()
print(train_loss_str)
logging.info(train_loss_str)
tensorboard.add_train_scalar(('train_loss_' + type_name), cur_loss, batch_num)
if (((batch_num % args.eval_period) == 0) and (batch_num > 0)):
output_index = get_output_index(output_logits, threshold=args.threshold)
gold_pred_train = get_gold_pred_str(output_index, batch['y'].data.cpu().clone(), args.goal)
print(gold_pred_train[:10])
accuracy = ((sum([(set(y) == set(yp)) for (y, yp) in gold_pred_train]) * 1.0) / len(gold_pred_train))
train_acc_str = '{1:s} Train accuracy: {0:.1f}%'.format((accuracy * 100), type_name)
if (cls_logits is not None):
cls_accuracy = (sum([((1.0 if (pred > 0.0) else 0.0) == gold) for (pred, gold) in zip(cls_logits, batch['y_cls'].data.cpu().numpy())]) / float(cls_logits.size()[0]))
cls_tp = sum([(((1.0 if (pred > 0.0) else 0.0) == 1.0) and (gold == 1.0)) for (pred, gold) in zip(cls_logits, batch['y_cls'].data.cpu().numpy())])
cls_precision = (cls_tp / float(sum([(1.0 if (pred > 0.0) else 0.0) for pred in cls_logits])))
cls_recall = (cls_tp / float(sum(batch['y_cls'].data.cpu().numpy())))
cls_f1 = f1(cls_precision, cls_recall)
train_cls_acc_str = '{1:s} Train cls accuracy: {0:.2f}% P: {2:.3f} R: {3:.3f} F1: {4:.3f}'.format((cls_accuracy * 100), type_name, cls_precision, cls_recall, cls_f1)
print(train_acc_str)
if (cls_logits is not None):
print(train_cls_acc_str)
logging.info(train_acc_str)
tensorboard.add_train_scalar(('train_acc_' + type_name), accuracy, batch_num)
if (args.goal != 'onto'):
for (val_type_name, val_data_gen) in val_gen_list:
if (val_type_name == type_name):
(eval_batch, _) = to_torch(next(val_data_gen))
evaluate_batch(batch_num, eval_batch, model, tensorboard, val_type_name, args, args.goal)
if (((batch_num % args.eval_period) == 0) and (batch_num > 0) and (args.data_setup == 'joint')):
print('---- eval at step {0:d} ---'.format(batch_num))
(crowd_eval_loss, macro_f1) = evaluate_data(batch_num, 'crowd/dev_tree.json', model, tensorboard, 'open', args, elmo, bert, vocab=vocab)
if (best_macro_f1 < macro_f1):
best_macro_f1 = macro_f1
save_fname = '{0:s}/{1:s}_best.pt'.format(constant.EXP_ROOT, args.model_id)
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_fname)
print('Total {0:.2f} minutes have passed, saving at {1:s} '.format(((time.time() - init_time) / 60), save_fname))
if (((batch_num % args.eval_period) == 0) and (batch_num > 0) and (args.goal == 'onto')):
print('---- OntoNotes: eval at step {0:d} ---'.format(batch_num))
(crowd_eval_loss, macro_f1) = evaluate_data(batch_num, args.dev_data, model, tensorboard, args.goal, args, elmo)
if (((batch_num % args.save_period) == 0) and (batch_num > 0)):
save_fname = '{0:s}/{1:s}_{2:d}.pt'.format(constant.EXP_ROOT, args.model_id, batch_num)
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_fname)
print('Total {0:.2f} minutes have passed, saving at {1:s} '.format(((time.time() - init_time) / 60), save_fname))
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, '{0:s}/{1:s}.pt'.format(constant.EXP_ROOT, args.model_id)) |
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(ResNet, self).__init__()
if (block_name.lower() == 'basicblock'):
assert (((depth - 2) % 6) == 0), 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = ((depth - 2) // 6)
block = BasicBlock
elif (block_name.lower() == 'bottleneck'):
assert (((depth - 2) % 9) == 0), 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = ((depth - 2) // 9)
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear((64 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class TestTopLevelCodeChecker(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = TopLevelCodeChecker
CONFIG = {}
def test_message_simple(self):
src = '\n print("testing code")\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[0], args=2), ignore_position=True):
self.checker.visit_module(mod)
def test_message_complex(self):
src = '\n if __name__ == "__main__":\n print("I\'m in main")\n print("testing code")\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[1], args=4), ignore_position=True):
self.checker.visit_module(mod)
def test_no_message_import(self):
src = '\n import test\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_no_message_import_from(self):
src = '\n from test import unittest\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_no_message_function_def(self):
src = '\n def print_hello():\n print("hello")\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_no_message_class_def(self):
src = '\n class Printer:\n def print_hello():\n print("hello")\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_no_message_constant_assignment(self):
src = '\n MAX_DURATION = 30\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_message_regular_assignment(self):
src = '\n name = "George"\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[0], args=2), ignore_position=True):
self.checker.visit_module(mod)
def test_no_message_type_alias_assignment(self):
src = '\n MyType = list[list[list[int]]]\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod)
def test_message_type_alias_assignment(self):
src = '\n TypeName = list[int]\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[0], args=2), ignore_position=True):
self.checker.visit_module(mod)
def test_message_regular_assignment_unpacking(self):
src = '\n name, CONST = "George", 3\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[0], args=2), ignore_position=True):
self.checker.visit_module(mod)
def test_message_regular_assignment_starred(self):
src = '\n NAME, *nums = ["George", 3, 4]\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-top-level-code', node=mod.body[0], args=2), ignore_position=True):
self.checker.visit_module(mod)
def test_no_message_is_main(self):
src = '\n if __name__ == "__main__":\n print("I\'m in main")\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_module(mod) |
def predict(input_dict, output_stride=16):
image = input_dict['image']
org_shape = tf.shape(image)
affinity = affinity_seg(image, output_stride)
curr_shape = tf.shape(affinity)
affinity = tf.image.resize_bilinear(affinity, [(curr_shape[1] * output_stride), (curr_shape[2] * output_stride)])
affinity = tf.slice(affinity, [0, 0, 0, 0], [org_shape[0], org_shape[1], org_shape[2], (7 * 8)])
return affinity |
.parametrize('url, rev', [('git+ None), ('git+ 'master')])
def test_add_git_constraint_with_subdirectory(url: str, rev: (str | None), app: PoetryTestApplication, tester: CommandTester) -> None:
tester.execute(url)
expected = 'Updating dependencies\nResolving dependencies...\n\nPackage operations: 1 install, 0 updates, 0 removals\n\n - Installing two (2.0.0 9cf87a2)\n\nWriting lock file\n'
assert (tester.io.fetch_output().strip() == expected.strip())
assert isinstance(tester.command, InstallerCommand)
assert (tester.command.installer.executor.installations_count == 1)
pyproject: dict[(str, Any)] = app.poetry.file.read()
content = pyproject['tool']['poetry']
constraint = {'git': ' 'subdirectory': 'two'}
if rev:
constraint['rev'] = rev
assert ('two' in content['dependencies'])
assert (content['dependencies']['two'] == constraint) |
class VectorInputWidget(QWidget):
def __init__(self, velocity, obj=None, parent=None):
super(VectorInputWidget, self).__init__(parent)
self.valueTypes = ['Cartisan components']
NumberOfComponents = 3
vector_config = [['vector'], ['vector'], ['vector'], ['Vx', 'Vy', 'Vz'], (['m/s'] * NumberOfComponents), [([True] * NumberOfComponents)]]
self.componentWidget = InputWidget(None, vector_config)
self.forms = [self.componentWidget]
if within_FreeCADGui:
self.valueTypes += ['Magnitude and normal']
self.magNormalWidget = MagnitudeNormalWidget(velocity, obj, self)
self.forms += [self.magNormalWidget.form]
valueTypeTips = self.valueTypes
(self.buttonGroupValueType, _buttonGroupLayout) = _createChoiceGroup(self.valueTypes, valueTypeTips)
self.buttonGroupValueType.buttonClicked.connect(self.valueTypeChanged)
self.currentValueType = self.valueTypes[0]
_layout = QVBoxLayout()
_layout.addLayout(_buttonGroupLayout)
for w in self.forms:
_layout.addWidget(w)
self.setLayout(_layout)
self.setVector(velocity)
def valueTypeChanged(self):
self.currentValueType = self.valueTypes[self.buttonGroupValueType.checkedId()]
self.updateUi()
def vectorChanged(self):
_vector = self.vector()
self.setVector(vector)
def updateUi(self):
for (i, form) in enumerate(self.forms):
if (i == self.buttonGroupValueType.checkedId()):
form.setVisible(True)
form.updateUi()
else:
form.setVisible(False)
def vector(self):
if (self.buttonGroupValueType.checkedId() == 0):
_inputs = self.componentWidget.inputSettings()
vector = [_inputs[k] for k in ['Vx', 'Vy', 'Vz']]
elif within_FreeCADGui:
vector = self.magNormalWidget.vector()
return vector
def setVector(self, vector):
_inputs = self.componentWidget.inputSettings()
for (i, k) in enumerate(['Vx', 'Vy', 'Vz']):
_inputs[k] = vector[i]
self.componentWidget.setInputSettings(_inputs)
if within_FreeCADGui:
self.magNormalWidget.setVector(vector)
self.updateUi() |
def test_variables__multiple_specs(dummy_ds: xr.Dataset) -> None:
spec = ArrayLikeSpec('baz', 'baz doc', kind='i', ndim=1)
invalid_spec = ArrayLikeSpec('baz', 'baz doc', kind='i', ndim=2)
variables.validate(dummy_ds, {'foo': spec, 'bar': spec})
variables.validate(dummy_ds, {'foo': spec})
variables.validate(dummy_ds, {'bar': spec})
with pytest.raises(ValueError, match='bar does not match the spec'):
variables.validate(dummy_ds, {'bar': invalid_spec})
with pytest.raises(ValueError, match='bar does not match the spec'):
variables.validate(dummy_ds, {'foo': spec}, {'bar': invalid_spec}) |
def subdivide_edges(bm, edges, direction, widths):
dir = direction.copy().normalized()
cuts = (len(widths) - 1)
res = bmesh.ops.subdivide_edges(bm, edges=edges, cuts=cuts)
inner_edges = filter_geom(res.get('geom_inner'), BMEdge)
distance = (sum(widths) / len(widths))
final_position = 0.0
for (i, edge) in enumerate(sort_edges(inner_edges, dir)):
original_position = ((i + 1) * distance)
final_position += widths[i]
diff = (final_position - original_position)
bmesh.ops.translate(bm, verts=edge.verts, vec=(diff * dir))
return inner_edges |
class TestDataPrep(unittest.TestCase):
def test_process_rxn_templates(self):
path_to_rxn_templates = './data/rxn_set_hb_test.txt'
path_to_building_blocks = './data/building_blocks_matched.csv.gz'
building_blocks = pd.read_csv(path_to_building_blocks, compression='gzip')['SMILES'].tolist()
rxn_templates = []
with open(path_to_rxn_templates, 'rt') as rxn_template_file:
for line in rxn_template_file:
rxn = Reaction(line.split('|')[1].strip())
rxn.set_available_reactants(building_block_list=building_blocks)
rxn_templates.append(rxn)
r = ReactionSet(rxn_templates)
r.save('./data/rxns_hb.json.gz')
path_to_ref_rxn_templates = './data/ref/rxns_hb.json.gz'
r_ref = ReactionSet()
r_ref.load(path_to_ref_rxn_templates)
for (rxn_idx, rxn) in enumerate(r.rxns):
rxn = rxn.__dict__
ref_rxn = r_ref.rxns[rxn_idx].__dict__
self.assertTrue((rxn == ref_rxn))
def test_synthetic_tree_prep(self):
np.random.seed(6)
path_to_rxns = './data/ref/rxns_hb.json.gz'
r_ref = ReactionSet()
r_ref.load(path_to_rxns)
rxns = r_ref.rxns
path_to_building_blocks = './data/building_blocks_matched.csv.gz'
building_blocks = pd.read_csv(path_to_building_blocks, compression='gzip')['SMILES'].tolist()
num_trials = 25
num_finish = 0
num_error = 0
num_unfinish = 0
trees = []
for _ in tqdm(range(num_trials)):
(tree, action) = synthetic_tree_generator(building_blocks, rxns, max_step=5)
if (action == 3):
trees.append(tree)
num_finish += 1
elif (action == (- 1)):
num_error += 1
else:
num_unfinish += 1
synthetic_tree_set = SyntheticTreeSet(sts=trees)
synthetic_tree_set.save('./data/st_data.json.gz')
self.assertEqual(num_finish, 3)
self.assertEqual(num_unfinish, 0)
sts_ref = SyntheticTreeSet()
sts_ref.load('./data/ref/st_data.json.gz')
for (st_idx, st) in enumerate(sts_ref.sts):
st = st.__dict__
ref_st = sts_ref.sts[st_idx].__dict__
self.assertTrue((st == ref_st))
def test_featurization(self):
embedding = 'fp'
radius = 2
nbits = 4096
dataset_type = 'train'
path_st = './data/ref/st_data.json.gz'
save_dir = './data/'
reference_data_dir = './data/ref/'
st_set = SyntheticTreeSet()
st_set.load(path_st)
data = st_set.sts
del st_set
states = []
steps = []
save_idx = 0
for st in tqdm(data):
try:
(state, step) = organize(st, target_embedding=embedding, radius=radius, nBits=nbits)
except Exception as e:
print(e)
continue
states.append(state)
steps.append(step)
del data
if (len(steps) != 0):
states = sparse.vstack(states)
steps = sparse.vstack(steps)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
sparse.save_npz(f'{save_dir}states_{save_idx}_{dataset_type}.npz', states)
sparse.save_npz(f'{save_dir}steps_{save_idx}_{dataset_type}.npz', steps)
states_ref = sparse.load_npz(f'{reference_data_dir}states_{save_idx}_{dataset_type}.npz')
steps_ref = sparse.load_npz(f'{reference_data_dir}steps_{save_idx}_{dataset_type}.npz')
self.assertEqual(states.toarray().all(), states_ref.toarray().all())
self.assertEqual(steps.toarray().all(), steps_ref.toarray().all())
def test_dataprep(self):
main_dir = './data/'
ref_dir = './data/ref/'
copyfile(f'{ref_dir}states_0_train.npz', f'{main_dir}states_0_train.npz')
copyfile(f'{ref_dir}steps_0_train.npz', f'{main_dir}steps_0_train.npz')
prep_data(main_dir=main_dir, num_rxn=3, out_dim=300)
X_act = sparse.load_npz(f'{main_dir}X_act_train.npz')
y_act = sparse.load_npz(f'{main_dir}y_act_train.npz')
X_act_ref = sparse.load_npz(f'{ref_dir}X_act_train.npz')
y_act_ref = sparse.load_npz(f'{ref_dir}y_act_train.npz')
self.assertEqual(X_act.toarray().all(), X_act_ref.toarray().all())
self.assertEqual(y_act.toarray().all(), y_act_ref.toarray().all())
X_rt1 = sparse.load_npz(f'{main_dir}X_rt1_train.npz')
y_rt1 = sparse.load_npz(f'{main_dir}y_rt1_train.npz')
X_rt1_ref = sparse.load_npz(f'{ref_dir}X_rt1_train.npz')
y_rt1_ref = sparse.load_npz(f'{ref_dir}y_rt1_train.npz')
self.assertEqual(X_rt1.toarray().all(), X_rt1_ref.toarray().all())
self.assertEqual(y_rt1.toarray().all(), y_rt1_ref.toarray().all())
X_rxn = sparse.load_npz(f'{main_dir}X_rxn_train.npz')
y_rxn = sparse.load_npz(f'{main_dir}y_rxn_train.npz')
X_rxn_ref = sparse.load_npz(f'{ref_dir}X_rxn_train.npz')
y_rxn_ref = sparse.load_npz(f'{ref_dir}y_rxn_train.npz')
self.assertEqual(X_rxn.toarray().all(), X_rxn_ref.toarray().all())
self.assertEqual(y_rxn.toarray().all(), y_rxn_ref.toarray().all())
X_rt2 = sparse.load_npz(f'{main_dir}X_rt2_train.npz')
y_rt2 = sparse.load_npz(f'{main_dir}y_rt2_train.npz')
X_rt2_ref = sparse.load_npz(f'{ref_dir}X_rt2_train.npz')
y_rt2_ref = sparse.load_npz(f'{ref_dir}y_rt2_train.npz')
self.assertEqual(X_rt2.toarray().all(), X_rt2_ref.toarray().all())
self.assertEqual(y_rt2.toarray().all(), y_rt2_ref.toarray().all())
def test_bb_emb(self):
main_dir = './data/'
ref_dir = './data/ref/'
model_type = 'gin_supervised_contextpred'
device = 'cpu'
model = load_pretrained(model_type).to(device)
model.eval()
path_to_building_blocks = './data/building_blocks_matched.csv.gz'
building_blocks = pd.read_csv(path_to_building_blocks, compression='gzip')['SMILES'].tolist()
embeddings = []
for smi in tqdm(building_blocks):
embeddings.append(get_mol_embedding(smi, model=model))
embeddings = np.array(embeddings)
np.save(f'{main_dir}building_blocks_emb.npy', embeddings)
embeddings_ref = np.load(f'{ref_dir}building_blocks_emb.npy')
self.assertEqual(embeddings.all(), embeddings_ref.all()) |
def replace_registered_tbes_with_mock_tbes(M: torch.nn.Module, path: str='') -> None:
for (child_name, child) in M.named_children():
child_path = (f'{path}.{child_name}' if path else child_name)
if isinstance(child, IntNBitTableBatchedEmbeddingBagsCodegen):
M.register_module(child_name, mock_tbe_from_tbe(child))
else:
replace_registered_tbes_with_mock_tbes(child, child_path) |
class OpenRole(ScrimsButton):
def __init__(self, ctx: Context, letter: str):
super().__init__(emoji=ri(letter))
self.ctx = ctx
async def callback(self, interaction: Interaction):
(await interaction.response.defer())
m = (await self.ctx.simple('Mention the role you want to open registration for.'))
role = (await inputs.role_input(self.ctx, delete_after=True))
(await self.ctx.safe_delete(m))
self.view.record.open_role_id = role.id
(await self.view.refresh_view()) |
def test_set_size_2w() -> None:
instance = printer.Dummy()
instance.set_with_default(double_width=True)
expected_sequence = (TXT_NORMAL, TXT_STYLE['size']['2w'], TXT_STYLE['flip'][False], TXT_STYLE['smooth'][False], TXT_STYLE['bold'][False], TXT_STYLE['underline'][0], SET_FONT(b'\x00'), TXT_STYLE['align']['left'], TXT_STYLE['invert'][False])
assert (instance.output == b''.join(expected_sequence)) |
.skipif((python_implementation() == 'PyPy'), reason='no orjson on PyPy')
(everythings(min_int=(- ), max_int=, allow_inf=False), booleans())
def test_orjson_converter(everything: Everything, detailed_validation: bool):
from cattrs.preconf.orjson import make_converter as orjson_make_converter
converter = orjson_make_converter(detailed_validation=detailed_validation)
raw = converter.dumps(everything)
assert (converter.loads(raw, Everything) == everything) |
def flatten(*args, keep_none=False):
t = []
for arg in args:
if (arg is None):
if keep_none:
t.append(arg)
elif isinstance(arg, (str, range, Domain)):
t.append(arg)
elif isinstance(arg, types.GeneratorType):
res = list(arg)
if (len(res) > 0):
t.extend(flatten(*res, keep_none=keep_none))
elif isinstance(arg, Iterable):
t.extend(flatten(*arg, keep_none=keep_none))
else:
t.append(arg)
return tools.curser.cp_array(t) |
def _parse_pylint_stdio_result(document, stdout):
diagnostics = []
lines = stdout.splitlines()
for raw_line in lines:
parsed_line = re.match('(.*):(\\d*):(\\d*): (\\w*): (.*)', raw_line)
if (not parsed_line):
log.debug("Pylint output parser can't parse line '%s'", raw_line)
continue
parsed_line = parsed_line.groups()
if (len(parsed_line) != 5):
log.debug("Pylint output parser can't parse line '%s'", raw_line)
continue
(_, line, character, code, msg) = parsed_line
line = (int(line) - 1)
character = int(character)
severity_map = {'C': lsp.DiagnosticSeverity.Information, 'E': lsp.DiagnosticSeverity.Error, 'F': lsp.DiagnosticSeverity.Error, 'I': lsp.DiagnosticSeverity.Information, 'R': lsp.DiagnosticSeverity.Hint, 'W': lsp.DiagnosticSeverity.Warning}
severity = severity_map[code[0]]
diagnostic = {'source': 'pylint', 'code': code, 'range': {'start': {'line': line, 'character': character}, 'end': {'line': line, 'character': (len(document.lines[line]) - 1)}}, 'message': msg, 'severity': severity}
if (code in UNNECESSITY_CODES):
diagnostic['tags'] = [lsp.DiagnosticTag.Unnecessary]
if (code in DEPRECATION_CODES):
diagnostic['tags'] = [lsp.DiagnosticTag.Deprecated]
diagnostics.append(diagnostic)
return diagnostics |
_settings(PRETIX_WEBHOOK_SECRET='secret')
def test_cannot_call_pretix_webhook_with_incorrect_basic_auth(rest_api_client):
rest_api_client.basic_auth('pretix', 'incorrect')
response = rest_api_client.post(reverse('pretix-webhook'))
assert (response.status_code == 401)
assert ('Incorrect authentication credentials.' in response.json()['detail']) |
class Amm():
def __init__(self, x_1: Token, x_2: Token):
if (x_1.name != 'x_1'):
raise Exception('must be 1')
if (x_2.name != 'x_2'):
raise Exception('must be 2')
self.x_1 = x_1.qty
self.x_2 = x_2.qty
self.invariant = (self.x_1 * self.x_2)
self.liquidity = []
self.total_supply_liquidity = (- 1)
self.x1s = []
l.pool_created(self)
def add_liquidity(self, x_i: Token):
self._update_prev()
l.log.info(self)
_liquidity = 0
reserve_x_i = self._get(x_i.name)
reserve_x_j = self._get(x_i.complement)
x_j = quote(x_i.qty, reserve_x_i, reserve_x_j)
if (self.total_supply_liquidity == (- 1)):
_liquidity = (np.sqrt((x_i.qty * x_j)) - MINIMUM_LIQUIDITY)
if (_liquidity < 0):
_liquidity = 0
self.total_supply_liquidity = MINIMUM_LIQUIDITY
else:
_liquidity = np.min([(x_i.qty * (self.total_supply_liquidity / reserve_x_i)), (x_j * (self.total_supply_liquidity / reserve_x_j))])
self.total_supply_liquidity += _liquidity
self.liquidity.append(_liquidity)
self.prev_invariant = (self.x_1 * self.x_2)
self._set(x_i.name, (self._get(x_i.name) + x_i.qty))
self._set(x_i.complement, (self._get(x_i.complement) + x_j))
self.invariant = (self.x_1 * self.x_2)
self.x1s.append(x_i.qty)
l.liquidity_event(self.liquidity[(- 1)])
l.added_liquidity(x_i, x_j, self)
l.log.info(self)
def remove_liquidity(self, remove_ix: int):
if (not (remove_ix <= len(self.liquidity))):
return
l.log.info(self)
remove_this_liquidity = self.liquidity[remove_ix]
x_1 = (self.x_1 * (remove_this_liquidity / self.total_supply_liquidity))
x_2 = (self.x_2 * (remove_this_liquidity / self.total_supply_liquidity))
self.total_supply_liquidity -= remove_this_liquidity
self.prev_invariant = self.invariant
self.x_1 -= x_1
self.x_2 -= x_2
self.invariant = (self.x_1 * self.x_2)
del self.liquidity[remove_ix]
del self.x1s[remove_ix]
l.removed_liquidity(x_1, x_2, remove_this_liquidity)
l.log.info(self)
def trade(self, x_i: Token):
self._update_prev()
l.log.info(self)
x_j = get_amount_out(x_i, self._get(x_i.name), self._get(x_i.complement))
self._set(x_i.name, (self._get(x_i.name) + x_i.qty))
self._set(x_i.complement, (self._get(x_i.complement) - x_j))
l.trade_executed(x_i, x_j, self)
l.log.info(self)
def _divergence_loss(self, pct_move: float):
if (not ((- 1) <= pct_move <= 5.01)):
raise Exception('invalid pct price move. pct_move in [-1, 5]')
curr_exchange_rate = (self.x_2 / self.x_1)
new_price = ((1 + pct_move) * curr_exchange_rate)
x_1 = np.sqrt((self.invariant / new_price))
x_2 = np.sqrt((self.invariant * new_price))
value_if_held = (self.x_1 + (self.x_2 / new_price))
value_removable = (x_1 + (x_2 / new_price))
imperm_loss = (1 - (value_removable / value_if_held))
return imperm_loss
def _get(self, name: str):
return self.__getattribute__(name)
def _set(self, name: str, value: Any):
self.__setattr__(name, value)
def _update_prev(self):
(self.prev_x_1, self.prev_x_2, self.prev_invariant) = (self.x_1, self.x_2, self.invariant)
def _plot_divergence_loss(self):
x = np.arange((- 0.9999), 5.0001, 0.0001)
y = []
for pct_change in x:
loss = self._divergence_loss(pct_change)
y.append(loss)
domain = [(_x * 100) for _x in x]
plt.plot(domain, [(_y * (- 100)) for _y in y], linewidth=2)
plt.title('Uniswap divergence loss')
plt.xlabel('% change in ratio x_2 / x_1')
plt.ylabel('divergence loss % = hold value / pool value - 1')
return plt
def __repr__(self):
return (json.dumps({'x_1': self.x_1, 'x_2': self.x_2, 'invariant': self.invariant, 'lps': self.liquidity, 'total_supply_liquidity': self.total_supply_liquidity}, indent=4) + '\n') |
class TestTrainingExtensionsChannelPruningCostCalculator(unittest.TestCase):
def test_calculate_channel_pruning_cost_all_layers(self):
model = mnist_torch_model.Net().to('cpu')
print(model)
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
layer_ratio_list = []
for layer in layer_db:
if (layer.module is model.conv2):
layer_ratio_list.append(LayerCompRatioPair(layer, Decimal('0.5')))
else:
layer_ratio_list.append(LayerCompRatioPair(layer, None))
dataset_size = 1000
batch_size = 10
data_loader = create_fake_data_loader(dataset_size=dataset_size, batch_size=batch_size)
pruner = InputChannelPruner(data_loader=data_loader, input_shape=(1, 1, 28, 28), num_reconstruction_samples=10, allow_custom_downsample_ops=True)
cost_calculator = ChannelPruningCostCalculator(pruner)
compressed_cost = cost_calculator.calculate_compressed_cost(layer_db, layer_ratio_list, CostMetric.mac)
self.assertEqual(8552704, compressed_cost.mac) |
def main():
try:
session = rs.Session()
js = rs.job.Service('pbspro://localhost/', session=session)
jd = rs.job.Description()
jd.wall_time_limit = 1
jd.executable = '/bin/date'
jd.total_cpu_count = 36
jd.queue = 'regular'
jd.project = 'URTG0014'
jd.output = 'examplejob.out'
jd.error = 'examplejob.err'
job = js.create_job(jd)
job.add_callback(rs.STATE, job_state_change_cb)
print(('Job ID : %s' % job.id))
print(('Job State : %s' % job.state))
print('\n...starting job...\n')
job.run()
print(('Job ID : %s' % job.id))
print('\nListing active jobs: ')
for jid in js.list():
print((' * %s' % jid))
print('\n...waiting for job...\n')
job.wait()
print(('Job State : %s' % job.state))
print(('Exitcode : %s' % job.exit_code))
print(('Exec. hosts : %s' % job.execution_hosts))
print(('Create time : %s' % job.created))
print(('Start time : %s' % job.started))
print(('End time : %s' % job.finished))
js.close()
return 0
except rs.SagaException as ex:
print(('An exception occured: (%s) %s ' % (ex.type, str(ex))))
print((' \n*** Backtrace:\n %s' % ex.traceback))
return (- 1) |
def infer_metric_tags_from_eval_results(eval_results):
if (eval_results is None):
return {}
result = {}
for key in eval_results.keys():
if (key.lower().replace(' ', '_') in METRIC_TAGS):
result[key.lower().replace(' ', '_')] = key
elif (key.lower() == 'rouge1'):
result['rouge'] = key
return result |
class DeclineChatJoinRequest():
async def decline_chat_join_request(self: 'pyrogram.Client', chat_id: Union[(int, str)], user_id: int) -> bool:
(await self.invoke(raw.functions.messages.HideChatJoinRequest(peer=(await self.resolve_peer(chat_id)), user_id=(await self.resolve_peer(user_id)), approved=False)))
return True |
def ria(phi=1.0, direction=None, mechanism=(), purview=(), partition=None, repertoire=None, partitioned_repertoire=None):
return models.RepertoireIrreducibilityAnalysis(phi=phi, direction=direction, mechanism=mechanism, purview=purview, partition=partition, repertoire=repertoire, partitioned_repertoire=partitioned_repertoire) |
def detect_first_second_person(text):
text = text.replace('', '"').replace('', '"').replace('\n', ' ')
text = text.split('"')
for i in range(0, len(text), 2):
if any([(s in ((' ' + text[i]) + ' ')) for s in ['I ', "I'", ' my ', 'My ', ' me ', 'Me.', 'Me ', ' you ', " you'", 'You ', "You'", ' we ', 'We ', "We'", '?', '!']]):
return True
return False |
class TestResultsModifyingParseAction(PyparsingExpressionTestCase):
def compute_stats_parse_action(t):
t['sum'] = sum(t)
t['ave'] = (sum(t) / len(t))
t['min'] = min(t)
t['max'] = max(t)
tests = [PpTestSpec(desc='A parse action that adds new key-values', expr=pp.pyparsing_common.integer[...].addParseAction(compute_stats_parse_action), text='27 1 14 22 89', expected_list=[27, 1, 14, 22, 89], expected_dict={'ave': 30.6, 'max': 89, 'min': 1, 'sum': 153})] |
class Color(Adjustment):
def process(self, old_face, new_face, raw_mask):
clip = self.config.get('clip', True)
preserve_paper = self.config.get('preserve_paper', True)
source = cv2.cvtColor(np.rint(((old_face * raw_mask) * 255.0)).astype('uint8'), cv2.COLOR_BGR2LAB).astype('float32')
target = cv2.cvtColor(np.rint(((new_face * raw_mask) * 255.0)).astype('uint8'), cv2.COLOR_BGR2LAB).astype('float32')
(l_mean_src, l_std_src, a_mean_src, a_std_src, b_mean_src, b_std_src) = self.image_stats(source)
(l_mean_tar, l_std_tar, a_mean_tar, a_std_tar, b_mean_tar, b_std_tar) = self.image_stats(target)
(light, col_a, col_b) = cv2.split(target)
light -= l_mean_tar
col_a -= a_mean_tar
col_b -= b_mean_tar
if preserve_paper:
light = ((l_std_tar / l_std_src) * light)
col_a = ((a_std_tar / a_std_src) * col_a)
col_b = ((b_std_tar / b_std_src) * col_b)
else:
light = ((l_std_src / l_std_tar) * light)
col_a = ((a_std_src / a_std_tar) * col_a)
col_b = ((b_std_src / b_std_tar) * col_b)
light += l_mean_src
col_a += a_mean_src
col_b += b_mean_src
light = self._scale_array(light, clip=clip)
col_a = self._scale_array(col_a, clip=clip)
col_b = self._scale_array(col_b, clip=clip)
transfer = cv2.merge([light, col_a, col_b])
transfer = (cv2.cvtColor(transfer.astype('uint8'), cv2.COLOR_LAB2BGR).astype('float32') / 255.0)
background = (new_face * (1 - raw_mask))
merged = (transfer + background)
return merged
def image_stats(image):
(light, col_a, col_b) = cv2.split(image)
(l_mean, l_std) = (light.mean(), light.std())
(a_mean, a_std) = (col_a.mean(), col_a.std())
(b_mean, b_std) = (col_b.mean(), col_b.std())
return (l_mean, l_std, a_mean, a_std, b_mean, b_std)
def _min_max_scale(arr, new_range=(0, 255)):
arr_min = arr.min()
arr_max = arr.max()
if ((arr_min < new_range[0]) or (arr_max > new_range[1])):
scaled = ((((new_range[1] - new_range[0]) * (arr - arr_min)) / (arr_max - arr_min)) + new_range[0])
else:
scaled = arr
return scaled
def _scale_array(self, arr, clip=True):
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = self._min_max_scale(arr, new_range=scale_range)
return scaled |
def get_args():
parser = argparse.ArgumentParser(description='process the textgrid files')
parser.add_argument('--path', type=str, required=True, help='Data path')
parser.add_argument('--no-overlap', type=strtobool, default=False, help='Whether to ignore the overlapping utterances.')
parser.add_argument('--max_length', default=100000, type=float, help='overlap speech max time,if longger than max length should cut')
parser.add_argument('--overlap_length', default=1, type=float, help='if length longer than max length, speech overlength shorter, is cut')
parser.add_argument('--mars', type=strtobool, default=False, help='Whether to process mars data set.')
args = parser.parse_args()
return args |
class MutableByteHashmapStrategy(HashmapStrategy):
import_from_mixin(UnwrappedHashmapStrategyMixin)
(erase, unerase) = rerased.new_static_erasing_pair('byte-hashmap-strategy')
def is_correct_type(self, w_obj):
return isinstance(w_obj, values.W_MutableBytes)
def wrap(self, val):
return val
def unwrap(self, w_val):
assert isinstance(w_val, values.W_MutableBytes)
return w_val
def _create_empty_dict(self):
return r_dict(cmp_mutable_bytes, hash_mutable_bytes) |
(on_setattr=attr.setters.validate)
class ValidatedSetter():
a: int
b: str = attr.ib(on_setattr=attr.setters.NO_OP)
c: bool = attr.ib(on_setattr=attr.setters.frozen)
d: int = attr.ib(on_setattr=[attr.setters.convert, attr.setters.validate])
e: bool = attr.ib(on_setattr=attr.setters.pipe(attr.setters.convert, attr.setters.validate)) |
def intersect(ifst1, ifst2, connect=True, compose_filter='auto'):
try:
compose_filter = _getters.GetComposeFilter(compose_filter)
except ValueError:
raise ValueError('Unknown compose filter: {!r}'.format(compose_filter))
ofst = ifst1._mutable_fst_type()
ifst1._ops.intersect(ifst1, ifst2, ofst, connect, compose_filter)
return ofst |
class Program(Loader):
EXTERN_SYM_BASE =
EXTERN_SYM_SIZE = 4096
BASE_STACK =
END_STACK =
def __init__(self, path: PathLike):
super(Program, self).__init__(path)
self.path: Path = Path(path)
if (not self.path.is_file()):
raise FileNotFoundError(f'file {path} not found (or not a file)')
self._binary = lief.parse(str(self.path))
if (self._binary is None):
raise FileNotFoundError(f'file {path} not recognised by lief')
self._arch = self._load_arch()
if (self._arch is None):
raise FileNotFoundError(f'binary {path} architecture unsupported {self._binary.abstract.header.architecture}')
try:
self._plfm = _plfm_mapper[self._binary.format]
except KeyError:
self._plfm = None
self._funs = {f.name: f for f in self._binary.concrete.functions}
def name(self) -> str:
return f'Program({self.path})'
def endianness(self) -> Endian:
return {lief.ENDIANNESS.LITTLE: Endian.LITTLE, lief.ENDIANNESS.BIG: Endian.BIG}[self._binary.abstract.header.endianness]
def entry_point(self) -> Addr:
return self._binary.entrypoint
def architecture(self) -> Architecture:
return self._arch
def platform(self) -> Optional[Platform]:
return self._plfm
def format(self) -> lief.EXE_FORMATS:
return self._binary.format
def _load_arch(self) -> Optional[Architecture]:
arch = self._binary.abstract.header.architecture
if (arch in _arch_mapper):
arch = _arch_mapper[arch]
if (arch == Architecture.X86):
arch = (Architecture.X86 if self._binary.abstract.header.is_32 else Architecture.X86_64)
return arch
else:
return None
def relocation_enum(self):
rel_map = {lief.ELF.ARCH.AARCH64: lief.ELF.RELOCATION_AARCH64, lief.ELF.ARCH.ARM: lief.ELF.RELOCATION_ARM, lief.ELF.ARCH.PPC64: lief.ELF.RELOCATION_PPC64, lief.ELF.ARCH.PPC: lief.ELF.RELOCATION_PPC, lief.ELF.ARCH.i386: lief.ELF.RELOCATION_i386, lief.ELF.ARCH.x86_64: lief.ELF.RELOCATION_X86_64}
return rel_map[self._binary.concrete.header.machine_type]
def _is_glob_dat(self, rel: lief.ELF.Relocation) -> bool:
rel_enum = self.relocation_enum
if hasattr(rel_enum, 'GLOB_DAT'):
return (rel_enum(rel.type) == getattr(rel_enum, 'GLOB_DAT'))
else:
return False
def memory_segments(self) -> Generator[(LoadableSegment, None, None)]:
if (self.format == lief.EXE_FORMATS.ELF):
for (i, seg) in enumerate(self._binary.concrete.segments):
if (seg.type == lief.ELF.SEGMENT_TYPES.LOAD):
content = bytearray(seg.content)
if (seg.virtual_size != len(seg.content)):
content += (bytearray([0]) * (seg.virtual_size - seg.physical_size))
(yield LoadableSegment(seg.virtual_address, perms=Perm(int(seg.flags)), content=bytes(content), name=f'seg{i}'))
else:
raise NotImplementedError(f'memory segments not implemented for: {self.format.name}')
(yield LoadableSegment(self.EXTERN_SYM_BASE, self.EXTERN_SYM_SIZE, (Perm.R | Perm.W), name='[extern]'))
(yield LoadableSegment(self.END_STACK, (self.BASE_STACK - self.END_STACK), (Perm.R | Perm.W), name='[stack]'))
def imported_functions_relocations(self) -> Generator[(Tuple[(str, Addr)], None, None)]:
if (self.format == lief.EXE_FORMATS.ELF):
try:
for rel in self._binary.concrete.pltgot_relocations:
(yield (rel.symbol.name, rel.address))
for rel in self._binary.dynamic_relocations:
if (self._is_glob_dat(rel) and rel.has_symbol and (not rel.symbol.is_variable)):
(yield (rel.symbol.name, rel.address))
except Exception:
logger.error('Something wrong with the pltgot relocations')
else:
raise NotImplementedError(f'Imported functions relocations not implemented for: {self.format.name}')
def imported_variable_symbols_relocations(self) -> Generator[(Tuple[(str, Addr)], None, None)]:
if (self.format == lief.EXE_FORMATS.ELF):
rel_enum = self.relocation_enum
for rel in self._binary.dynamic_relocations:
if rel.has_symbol:
if rel.symbol.is_variable:
(yield (rel.symbol.name, rel.address))
else:
raise NotImplementedError(f'Imported symbols relocations not implemented for: {self.format.name}')
def find_function_addr(self, name: str) -> Optional[Addr]:
f = self._funs.get(name)
return (f.address if f else None)
def arch_mode(self) -> ArchMode:
pass |
class MonkeyPatch():
def __init__(self) -> None:
self._setattr: List[Tuple[(object, str, object)]] = []
self._setitem: List[Tuple[(Mapping[(Any, Any)], object, object)]] = []
self._cwd: Optional[str] = None
self._savesyspath: Optional[List[str]] = None
def context(cls) -> Generator[('MonkeyPatch', None, None)]:
m = cls()
try:
(yield m)
finally:
m.undo()
def setattr(self, target: str, name: object, value: Notset=..., raising: bool=...) -> None:
...
def setattr(self, target: object, name: str, value: object, raising: bool=...) -> None:
...
def setattr(self, target: Union[(str, object)], name: Union[(object, str)], value: object=notset, raising: bool=True) -> None:
__tracebackhide__ = True
import inspect
if isinstance(value, Notset):
if (not isinstance(target, str)):
raise TypeError('use setattr(target, name, value) or setattr(target, value) with target being a dotted import string')
value = name
(name, target) = derive_importpath(target, raising)
elif (not isinstance(name, str)):
raise TypeError('use setattr(target, name, value) with name being a string or setattr(target, value) with target being a dotted import string')
oldval = getattr(target, name, notset)
if (raising and (oldval is notset)):
raise AttributeError(f'{target!r} has no attribute {name!r}')
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target: Union[(object, str)], name: Union[(str, Notset)]=notset, raising: bool=True) -> None:
__tracebackhide__ = True
import inspect
if isinstance(name, Notset):
if (not isinstance(target, str)):
raise TypeError('use delattr(target, name) or delattr(target) with target being a dotted import string')
(name, target) = derive_importpath(target, raising)
if (not hasattr(target, name)):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, notset)
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic: Mapping[(K, V)], name: K, value: V) -> None:
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic: Mapping[(K, V)], name: K, raising: bool=True) -> None:
if (name not in dic):
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name: str, value: str, prepend: Optional[str]=None) -> None:
if (not isinstance(value, str)):
warnings.warn(PytestWarning('Value of environment variable {name} type should be str, but got {value!r} (type: {type}); converted to str implicitly'.format(name=name, value=value, type=type(value).__name__)), stacklevel=2)
value = str(value)
if (prepend and (name in os.environ)):
value = ((value + prepend) + os.environ[name])
self.setitem(os.environ, name, value)
def delenv(self, name: str, raising: bool=True) -> None:
environ: MutableMapping[(str, str)] = os.environ
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
if (self._savesyspath is None):
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
if ('pkg_resources' in sys.modules):
from pkg_resources import fixup_namespace_packages
fixup_namespace_packages(str(path))
from importlib import invalidate_caches
invalidate_caches()
def chdir(self, path: Union[(str, 'os.PathLike[str]')]) -> None:
if (self._cwd is None):
self._cwd = os.getcwd()
os.chdir(path)
def undo(self) -> None:
for (obj, name, value) in reversed(self._setattr):
if (value is not notset):
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for (dictionary, key, value) in reversed(self._setitem):
if (value is notset):
try:
del dictionary[key]
except KeyError:
pass
else:
dictionary[key] = value
self._setitem[:] = []
if (self._savesyspath is not None):
sys.path[:] = self._savesyspath
self._savesyspath = None
if (self._cwd is not None):
os.chdir(self._cwd)
self._cwd = None |
class TestDefaultEnvFile(EnvironmentTestCase):
def test_run_with_default_env_file(self, runner, target, env1):
env = self.run_environ(runner, *target, '--default-env-file', env1)
assert (env.get('SECRET') == 'unknown')
assert (env.get('PASSWORD') == 'sweet')
assert (env.get('PATH') == '/usr/bin')
def test_run_with_multiple_default_env_files(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--default-env-file', env1, '--default-env-file', env2)
assert (env.get('SECRET') == 'unknown')
assert (env.get('PASSWORD') == 'sweet')
assert (env.get('PATH') == '/usr/bin')
env = self.run_environ(runner, *target, '--default-env-file', env2, '--default-env-file', env1)
assert (env.get('SECRET') == 'unknown')
assert (env.get('PASSWORD') == 'bitter')
assert (env.get('PATH') == '/usr/bin') |
def build_roi_heads(cfg):
roi_heads = []
if (not cfg.MODEL.RPN_ONLY):
roi_heads.append(('box', build_roi_box_head(cfg)))
if cfg.MODEL.MASK_ON:
roi_heads.append(('mask', build_roi_mask_head(cfg)))
if cfg.MODEL.KEYPOINT_ON:
roi_heads.append(('keypoint', build_roi_keypoint_head(cfg)))
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads |
def checkThisFile(f):
if isinstance(f, git.Diff):
if (f.deleted_file or (f.b_blob.size == 0)):
return False
f = f.b_path
elif ((not os.path.exists(f)) or (os.stat(f).st_size == 0)):
return False
for exempt in ExemptFiles:
if exempt.search(f):
return False
for checker in FilesToCheck:
if checker.search(f):
return True
return False |
class WebEngineAudio(browsertab.AbstractAudio):
_widget: webview.WebEngineView
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._overridden = False
delay_ms = 2000
self._silence_timer = QTimer(self)
self._silence_timer.setSingleShot(True)
self._silence_timer.setInterval(delay_ms)
def _connect_signals(self):
page = self._widget.page()
page.audioMutedChanged.connect(self.muted_changed)
page.recentlyAudibleChanged.connect(self._delayed_recently_audible_changed)
self._tab.url_changed.connect(self._on_url_changed)
config.instance.changed.connect(self._on_config_changed)
self._silence_timer.timeout.connect(functools.partial(self.recently_audible_changed.emit, False))
def _delayed_recently_audible_changed(self, recently_audible):
timer = self._silence_timer
if recently_audible:
if timer.isActive():
timer.stop()
self.recently_audible_changed.emit(recently_audible)
else:
if timer.isActive():
return
timer.start()
def set_muted(self, muted: bool, override: bool=False) -> None:
self._overridden = override
assert (self._widget is not None)
page = self._widget.page()
page.setAudioMuted(muted)
def is_muted(self):
page = self._widget.page()
return page.isAudioMuted()
def is_recently_audible(self):
page = self._widget.page()
return page.recentlyAudible()
(QUrl)
def _on_url_changed(self, url):
if (self._overridden or (not url.isValid())):
return
mute = config.instance.get('content.mute', url=url)
self.set_muted(mute)
_filter('content.mute')
def _on_config_changed(self):
self._on_url_changed(self._tab.url()) |
class TestPersistence(TestCase):
def test_unitquantity_persistence(self):
x = pq.m
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
x = pq.CompoundUnit('pc/cm**3')
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_quantity_persistence(self):
x = (20 * pq.m)
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_uncertainquantity_persistence(self):
x = UncertainQuantity(20, 'm', 0.2)
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_unitconstant_persistence(self):
x = constants.m_e
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_quantity_object_dtype(self):
x = Quantity(1, dtype=object)
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_uncertainquantity_object_dtype(self):
x = UncertainQuantity(20, 'm', 0.2, dtype=object)
y = pickle.loads(pickle.dumps(x))
self.assertQuantityEqual(x, y)
def test_backward_compat(self):
orig = [pq.m, (20 * pq.m), UncertainQuantity(20, 'm', 0.2), constants.m_e]
data = [b'\x80\x02cquantities.unitquantity\nUnitLength\nq\x00(X\x05\x00\x00\x00meterq\x01NX\x01\x00\x00\x00mq\x02N]q\x03(X\x06\x00\x00\x00metersq\x04X\x05\x00\x00\x00metreq\x05X\x06\x00\x00\x00metresq\x06eNtq\x07Rq\x08K\x01K\x02K\x02\x86q\t\x86q\nb.', b'\x80\x02cquantities.quantity\n_reconstruct_quantity\nq\x00(cquantities.quantity\nQuantity\nq\x01cnumpy\nndarray\nq\x02K\x00\x85q\x03X\x01\x00\x00\x00bq\x04tq\x05Rq\x06(K\x01)cnumpy\ndtype\nq\x07X\x02\x00\x00\x00f8q\x08K\x00K\x01\x87q\tRq\n(K\x03X\x01\x00\x00\x00<q\x0bNNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\x0cb\x89c_codecs\nencode\nq\rX\x08\x00\x00\x00\x00\x00\x00\x00\x00\\x0eX\x06\x00\x00\x00latin1q\x0f\x86q\x10Rq\x11cquantities.dimensionality\nDimensionality\nq\x12)\x81q\x13cquantities.unitquantity\nUnitLength\nq\x14(X\x05\x00\x00\x00meterq\x15NX\x01\x00\x00\x00mq\x16N]q\x17(X\x06\x00\x00\x00metersq\x18X\x05\x00\x00\x00metreq\x19X\x06\x00\x00\x00metresq\x1aeNtq\x1bRq\x1cK\x01K\x02K\x02\x86q\x1d\x86q\x1ebK\x01stq\x1fb.', b'\x80\x02cquantities.quantity\n_reconstruct_quantity\nq\x00(cquantities.uncertainquantity\nUncertainQuantity\nq\x01cnumpy\nndarray\nq\x02K\x00\x85q\x03X\x01\x00\x00\x00bq\x04tq\x05Rq\x06(K\x01)cnumpy\ndtype\nq\x07X\x02\x00\x00\x00f8q\x08K\x00K\x01\x87q\tRq\n(K\x03X\x01\x00\x00\x00<q\x0bNNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\x0cb\x89c_codecs\nencode\nq\rX\x08\x00\x00\x00\x00\x00\x00\x00\x00\\x0eX\x06\x00\x00\x00latin1q\x0f\x86q\x10Rq\x11cquantities.dimensionality\nDimensionality\nq\x12)\x81q\x13cquantities.unitquantity\nUnitLength\nq\x14(X\x05\x00\x00\x00meterq\x15NX\x01\x00\x00\x00mq\x16N]q\x17(X\x06\x00\x00\x00metersq\x18X\x05\x00\x00\x00metreq\x19X\x06\x00\x00\x00metresq\x1aeNtq\x1bRq\x1cK\x01K\x02K\x02\x86q\x1d\x86q\x1ebK\x01sh\x00(cquantities.quantity\nQuantity\nq\x1fh\x02h\x03h\x04tq Rq!(K\x01)h\n\x89h\rX\x0f\x00\x00\x00\xc2\x9a\xc2\x99\xc2\x99\xc2\x99\xc2\x99\xc2\x99\xc3\x89?q"h\x0f\x86q#Rq$h\x12)\x81q%h\x1cK\x01stq&btq\'b.', b'\x80\x02cquantities.unitquantity\nUnitConstant\nq\x00(X\r\x00\x00\x00electron_massq\x01cquantities.quantity\n_reconstruct_quantity\nq\x02(cquantities.quantity\nQuantity\nq\x03cnumpy\nndarray\nq\x04K\x00\x85q\x05X\x01\x00\x00\x00bq\x06tq\x07Rq\x08(K\x01)cnumpy\ndtype\nq\tX\x02\x00\x00\x00f8q\nK\x00K\x01\x87q\x0bRq\x0c(K\x03X\x01\x00\x00\x00<q\rNNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tq\x0eb\x89c_codecs\nencode\nq\x0fX\x0c\x00\x00\x00N?\xc3\xab\xc2\x93\xc3\x9cy\xc2\xb29q\x10X\x06\x00\x00\x00latin1q\x11\x86q\x12Rq\x13cquantities.dimensionality\nDimensionality\nq\x14)\x81q\x15cquantities.unitquantity\nUnitMass\nq\x16(X\x08\x00\x00\x00kilogramq\x17NX\x02\x00\x00\x00kgq\x18N]q\x19X\t\x00\x00\x00kilogramsq\x1aaNtq\x1bRq\x1cK\x01K\x01K?\x86q\x1d\x86q\x1ebK\x01stq\x1fbX\x03\x00\x00\x00m_eq X\x04\x00\x00\x00m\xe2\x82\x91q!]q"Ntq#Rq$K\x01K\x00M!\x01\x86q%\x86q&b.']
for (x, d) in zip(orig, data):
y = pickle.loads(d)
self.assertQuantityEqual(x, y)
def test_copy_quantity(self):
for dtype in [float, object]:
x = (20 * pq.m).astype(dtype)
y = copy.copy(x)
self.assertQuantityEqual(x, y)
def test_copy_uncertainquantity(self):
for dtype in [float, object]:
x = UncertainQuantity(20, 'm', 0.2).astype(dtype)
y = copy.copy(x)
self.assertQuantityEqual(x, y) |
class GarbageCollectorTest(unittest.TestCase):
def test_garbage_collector_call_count_train(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
max_epochs = 2
expected_num_total_steps = ((dataset_len / batch_size) * max_epochs)
my_unit = DummyTrainUnit(2)
gc_callback_mock = MagicMock(spec=GarbageCollector)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
train(my_unit, dataloader, max_epochs=max_epochs, callbacks=[gc_callback_mock])
self.assertEqual(gc_callback_mock.on_train_start.call_count, 1)
self.assertEqual(gc_callback_mock.on_train_step_end.call_count, expected_num_total_steps)
self.assertEqual(gc_callback_mock.on_train_end.call_count, 1)
def test_garbage_collector_enabled_train(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
max_epochs = 2
my_unit = DummyTrainUnit(2)
gc_callback = GarbageCollector(2)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
self.assertTrue(gc.isenabled())
train(my_unit, dataloader, max_epochs=max_epochs, callbacks=[gc_callback])
self.assertTrue(gc.isenabled())
def test_garbage_collector_call_count_evaluate(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
expected_num_total_steps = (dataset_len / batch_size)
my_unit = DummyEvalUnit(2)
gc_callback_mock = MagicMock(spec=GarbageCollector)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
evaluate(my_unit, dataloader, callbacks=[gc_callback_mock])
self.assertEqual(gc_callback_mock.on_eval_start.call_count, 1)
self.assertEqual(gc_callback_mock.on_eval_step_end.call_count, expected_num_total_steps)
self.assertEqual(gc_callback_mock.on_eval_end.call_count, 1)
def test_garbage_collector_enabled_evaluate(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
my_unit = DummyEvalUnit(2)
gc_callback = GarbageCollector(2)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
self.assertTrue(gc.isenabled())
evaluate(my_unit, dataloader, callbacks=[gc_callback])
self.assertTrue(gc.isenabled())
def test_garbage_collector_call_count_predict(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
expected_num_total_steps = (dataset_len / batch_size)
my_unit = DummyPredictUnit(2)
gc_callback_mock = MagicMock(spec=GarbageCollector)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
predict(my_unit, dataloader, callbacks=[gc_callback_mock])
self.assertEqual(gc_callback_mock.on_predict_start.call_count, 1)
self.assertEqual(gc_callback_mock.on_predict_step_end.call_count, expected_num_total_steps)
self.assertEqual(gc_callback_mock.on_predict_end.call_count, 1)
def test_garbage_collector_enabled_predict(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
my_unit = DummyPredictUnit(2)
gc_callback = GarbageCollector(2)
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
self.assertTrue(gc.isenabled())
predict(my_unit, dataloader, callbacks=[gc_callback])
self.assertTrue(gc.isenabled())
def test_garbage_collector_call_count_fit(self) -> None:
input_dim = 2
train_dataset_len = 10
eval_dataset_len = 6
batch_size = 2
max_epochs = 2
evaluate_every_n_epochs = 1
expected_num_total_steps = (((train_dataset_len / batch_size) * max_epochs) + ((eval_dataset_len / batch_size) * max_epochs))
gc_step_interval = 4
my_unit = DummyFitUnit(2)
gc_callback = GarbageCollector(gc_step_interval)
train_dataloader = generate_random_dataloader(train_dataset_len, input_dim, batch_size)
eval_dataloader = generate_random_dataloader(eval_dataset_len, input_dim, batch_size)
expected_num_calls_to_gc_collect = (expected_num_total_steps + (expected_num_total_steps / gc_step_interval))
with mock.patch('torchtnt.framework.callbacks.garbage_collector.gc.collect') as gc_collect_mock:
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=max_epochs, evaluate_every_n_epochs=evaluate_every_n_epochs, callbacks=[gc_callback])
self.assertEqual(gc_collect_mock.call_count, expected_num_calls_to_gc_collect)
def test_garbage_collector_enabled_fit(self) -> None:
input_dim = 2
train_dataset_len = 10
eval_dataset_len = 6
batch_size = 2
max_epochs = 2
evaluate_every_n_epochs = 1
my_unit = DummyFitUnit(2)
gc_callback = GarbageCollector(2)
train_dataloader = generate_random_dataloader(train_dataset_len, input_dim, batch_size)
eval_dataloader = generate_random_dataloader(eval_dataset_len, input_dim, batch_size)
self.assertTrue(gc.isenabled())
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=max_epochs, evaluate_every_n_epochs=evaluate_every_n_epochs, callbacks=[gc_callback])
self.assertTrue(gc.isenabled()) |
class PFSError(enum.IntEnum):
INVALID_REQUEST = 2000
INVALID_SIGNATURE = 2001
REQUEST_OUTDATED = 2002
BAD_IOU = 2100
MISSING_IOU = 2101
WRONG_IOU_RECIPIENT = 2102
IOU_EXPIRED_TOO_EARLY = 2103
INSUFFICIENT_SERVICE_PAYMENT = 2104
IOU_ALREADY_CLAIMED = 2105
USE_THIS_IOU = 2106
DEPOSIT_TOO_LOW = 2107
NO_ROUTE_FOUND = 2201
def is_iou_rejected(error_code: int) -> bool:
return ((error_code >= 2100) and (error_code < 2200)) |
class ELBO_NF(object):
def __init__(self, criterion, num_samples, temperature=1.0):
self.criterion = criterion
self.num_samples = num_samples
self.temperature = temperature
def __call__(self, model, input, target):
t = model.flow.sample()
output = model(input, t=t)
(nll, _) = self.criterion(output, target)
kl = model.compute_kl_mc(t)
loss = (nll + ((kl * self.temperature) / self.num_samples))
return (loss, output, {'nll': nll.item(), 'kl': kl.item()}) |
class UseThenDisconnect(object):
def __init__(self, config_object):
self.config_object = config_object
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
if (self.config_object.get('TESTING') is True):
return
close_db_filter(None) |
class ListRefresher(QThread):
infos = pyqtSignal(object)
err_msg = pyqtSignal(str, int)
def __init__(self, parent=None):
super(ListRefresher, self).__init__(parent)
self._disk = None
self._fid = (- 1)
self.r_files = True
self.r_folders = True
self.r_path = True
self._mutex = QMutex()
self._is_work = False
def set_disk(self, disk):
self._disk = disk
def set_values(self, fid, r_files=True, r_folders=True, r_path=True):
if (not self._is_work):
self._fid = fid
self.r_files = r_files
self.r_folders = r_folders
self.r_path = r_path
self.start()
else:
self.err_msg.emit(',!', 3100)
def __del__(self):
self.wait()
def stop(self):
self._mutex.lock()
self._is_work = False
self._mutex.unlock()
def goto_root_dir(self):
self._fid = (- 1)
self.run()
def run(self):
if (not self._is_work):
self._mutex.lock()
self._is_work = True
emit_infos = {}
emit_infos['r'] = {'fid': self._fid, 'files': self.r_files, 'folders': self.r_folders, 'path': self.r_path}
try:
if self.r_files:
info = {i.name: i for i in self._disk.get_file_list(self._fid)}
emit_infos['file_list'] = {key: info.get(key) for key in sorted(info.keys())}
if self.r_folders:
(folders, full_path) = self._disk.get_dir_list(self._fid)
if ((not full_path) and (not folders) and (self._fid != (- 1))):
self.err_msg.emit(f'id {self._fid} ,', 2900)
self._is_work = False
self._mutex.unlock()
return self.goto_root_dir()
info = {i.name: i for i in folders}
emit_infos['folder_list'] = {key: info.get(key) for key in sorted(info.keys())}
emit_infos['path_list'] = full_path
except TimeoutError:
self.err_msg.emit(',,!', 7000)
except Exception as e:
self.err_msg.emit(',,!', 7000)
logger.error(f'ListRefresher error: e={e}')
else:
self.infos.emit(emit_infos)
self._is_work = False
self._mutex.unlock() |
class TToggledPlayOrderMenu(TestCase):
def setUp(self):
self.orders = Orders([OrderShuffle, OrderWeighted, FakeOrder])
self.tpom = ToggledPlayOrderMenu(Icons.AUDIO_X_GENERIC, orders=self.orders, current_order=OrderShuffle, enabled=True)
def tearDown(self):
self.tpom.destroy()
def test_enabled_initially(self):
self.assertTrue(self.tpom.enabled)
def test_setting_enabled(self):
self.tpom.enabled = False
self.assertFalse(self.tpom.enabled)
self.tpom.enabled = True
self.assertTrue(self.tpom.enabled)
def test_initial(self):
self.assertEqual(self.tpom.current, OrderShuffle)
def test_unknown_name(self):
self.assertRaises(ValueError, self.tpom.set_active_by_name, 'foobar')
def test_set_by_name(self):
self.tpom.set_active_by_name('fake')
self.assertEqual(self.tpom.current.name, 'fake')
def test_get_name(self):
for order in self.orders:
self.tpom.current = order
self.assertEqual(self.tpom.current, order)
def test_set_orders(self):
self.tpom.set_orders([])
self.assertFalse(self.tpom.current)
def test_playorder_disables_when_order_disappears(self):
self.tpom.orders = Orders([OrderWeighted, FakeOrder])
self.assertFalse(self.tpom.enabled) |
class Color(SObject):
name__ = String.T(optional=True)
r__ = Component.T(default=0.0, help='Red component ``[0., 1.]``.')
g__ = Component.T(default=0.0, help='Green component ``[0., 1.]``.')
b__ = Component.T(default=0.0, help='Blue component ``[0., 1.]``.')
a__ = Component.T(default=1.0, help='Alpha (opacity) component ``[0., 1.]``.')
def __init__(self, *args, **kwargs):
if (len(args) == 1):
SObject.__init__(self, init_props=False)
self.name = args[0]
elif (len(args) in (3, 4)):
SObject.__init__(self, init_props=False)
if all((isinstance(x, int) for x in args)):
if (len(args) == 3):
args = (args + (255,))
self.RGBA = args
elif all((isinstance(x, float) for x in args)):
if (len(args) == 3):
args = (args + (1.0,))
self.rgba = args
else:
SObject.__init__(self, init_props=False)
self.name__ = kwargs.get('name', None)
self.r__ = kwargs.get('r', 0.0)
self.g__ = kwargs.get('g', 0.0)
self.b__ = kwargs.get('b', 0.0)
self.a__ = kwargs.get('a', 1.0)
def __eq__(self, other):
return ((self.name__ == other.name__) and (self.r__ == other.r__) and (self.g__ == other.g__) and (self.b__ == other.b__) and (self.a__ == other.a__))
def name(self):
return (self.name__ or '')
def name(self, name):
(self.r__, self.g__, self.b__, self.a__) = parse_color(name)
self.name__ = name
def r(self):
return self.r__
def r(self, r):
self.name__ = None
self.r__ = r
def g(self):
return self.g__
def g(self, g):
self.name__ = None
self.g__ = g
def b(self):
return self.b__
def b(self, b):
self.name__ = None
self.b__ = b
def a(self):
return self.a__
def a(self, a):
self.name__ = None
self.a__ = a
def rgb(self):
return (self.r__, self.g__, self.b__)
def rgb(self, rgb):
(self.r__, self.g__, self.b__) = rgb
self.name__ = None
def rgba(self):
return (self.r__, self.g__, self.b__, self.a__)
def rgba(self, rgba):
(self.r__, self.g__, self.b__, self.a__) = rgba
self.name__ = None
def RGB(self):
return tuple((to_int_255(x) for x in self.rgb))
def RGB(self, RGB):
(self.r__, self.g__, self.b__) = (to_float_1(x) for x in RGB)
self.name__ = None
def RGBA(self):
return tuple((to_int_255(x) for x in self.rgba))
def RGBA(self, RGBA):
(self.r__, self.g__, self.b__, self.a__) = (to_float_1(x) for x in RGBA)
self.name__ = None
def str_hex(self):
return simplify_hex(('#%02x%02x%02x%02x' % self.RGBA))
def use_hex_name(self):
self.name__ = simplify_hex(('#%02x%02x%02x%02x' % self.RGBA))
def str_rgb(self):
return ('rgb(%5.3f, %5.3f, %5.3f)' % self.rgb)
def str_RGB(self):
return ('RGB(%i, %i, %i)' % self.RGB)
def str_rgba(self):
return ('rgba(%5.3f, %5.3f, %5.3f, %5.3f)' % self.rgba)
def str_RGBA(self):
return ('RGBA(%i, %i, %i, %i)' % self.RGBA)
def describe(self):
return ('\n name: %s\n hex: %s\n RGBA: %s\n rgba: %s\n str: %s\n' % (self.name, self.str_hex, self.str_RGBA, self.str_rgba, str(self)))
def __str__(self):
return (self.name__ if (self.name__ is not None) else self.str_rgba) |
class BBCodeLexer(RegexLexer):
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
url = '
version_added = '0.6'
tokens = {'root': [('[^[]+', Text), ('\\[/?\\w+', Keyword, 'tag'), ('\\[', Text)], 'tag': [('\\s+', Text), ('(\\w+)(=)("?[^\\s"\\]]+"?)', bygroups(Name.Attribute, Operator, String)), ('(=)("?[^\\s"\\]]+"?)', bygroups(Operator, String)), ('\\]', Keyword, '#pop')]} |
class Objdict(dict):
def __getattr__(self, name):
if (name in self):
return self[name]
else:
raise AttributeError(('No such attribute: ' + name))
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if (name in self):
del self[name]
else:
raise AttributeError(('No such attribute: ' + name)) |
class SignedDataEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return None
def get_serialized_entity_reference(self, entity_reference):
raise NotImplementedError
def deserialize_entity_reference(self, serialized_entity_reference):
raise NotImplementedError
def description(self, entity_reference):
return 'signed'
def analytics_id_and_public_metadata(self, entity_reference):
return ('signed', {'signed': entity_reference}) |
def train(training_data_loader, G_optimizer, D_optimizer, model, discr, criterion, epoch):
lr = adjust_learning_rate(D_optimizer, (epoch - 1))
mse = []
Gloss = []
Dloss = []
for param_group in G_optimizer.param_groups:
param_group['lr'] = (lr / 2)
for param_group in D_optimizer.param_groups:
param_group['lr'] = lr
print('Epoch={}, lr={}'.format(epoch, D_optimizer.param_groups[0]['lr']))
for (iteration, batch) in enumerate(training_data_loader, 1):
target = Variable(batch[1])
input = Variable(batch[0])
if opt.cuda:
target = target.cuda()
input = input.cuda()
discr.zero_grad()
D_result = discr(target).squeeze()
D_real_loss = (- D_result.mean())
G_result = model(input)
D_result = discr(G_result.data).squeeze()
D_fake_loss = D_result.mean()
D_train_loss = (D_real_loss + D_fake_loss)
Dloss.append(D_train_loss.data)
D_train_loss.backward()
D_optimizer.step()
discr.zero_grad()
alpha = torch.rand(target.size(0), 1, 1, 1)
alpha1 = alpha.cuda().expand_as(target)
interpolated1 = Variable(((alpha1 * target.data) + ((1 - alpha1) * G_result.data)), requires_grad=True)
out = discr(interpolated1).squeeze()
grad = torch.autograd.grad(outputs=out, inputs=interpolated1, grad_outputs=torch.ones(out.size()).cuda(), retain_graph=True, create_graph=True, only_inputs=True)[0]
grad = grad.view(grad.size(0), (- 1))
grad_l2norm = torch.sqrt(torch.sum((grad ** 2), dim=1))
d_loss_gp = torch.mean(((grad_l2norm - 1) ** 2))
gp_loss = (10 * d_loss_gp)
gp_loss.backward()
D_optimizer.step()
discr.zero_grad()
model.zero_grad()
G_result = model(input)
D_result = discr(G_result).squeeze()
mse_loss = (torch.mean(((G_result - input) ** 2)) ** 0.5)
mse.append(mse_loss.data)
G_train_loss = ((- D_result.mean()) + (opt.sigma * mse_loss))
Gloss.append(G_train_loss)
G_train_loss.backward()
G_optimizer.step()
if ((iteration % 10) == 0):
print('===> Epoch[{}]({}/{}): Loss_G: {:.5}, Loss_mse: {:.5}'.format(epoch, iteration, len(training_data_loader), G_train_loss.data, mse_loss.data))
save_image(G_result.data, './checksample/output.png')
save_image(input.data, './checksample/input.png')
save_image(target.data, './checksample/gt.png')
return (torch.mean(torch.FloatTensor(mse)), torch.mean(torch.FloatTensor(Gloss))) |
class NLLModel(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.models = nn.ModuleList()
self.device = [(i % args.n_gpu) for i in range(args.n_model)]
self.loss_fnt = nn.CrossEntropyLoss()
for i in range(args.n_model):
model = NERModel(args)
model.to(self.device[i])
self.models.append(model)
def forward(self, input_ids, attention_mask, labels=None):
if (labels is None):
return self.models[0](input_ids=input_ids, attention_mask=attention_mask)
else:
num_models = len(self.models)
outputs = []
for i in range(num_models):
output = self.models[i](input_ids=input_ids.to(self.device[i]), attention_mask=attention_mask.to(self.device[i]), labels=(labels.to(self.device[i]) if (labels is not None) else None))
output = tuple([o.to(0) for o in output])
outputs.append(output)
model_output = outputs[0]
loss = (sum([output[0] for output in outputs]) / num_models)
logits = [output[1] for output in outputs]
probs = [F.softmax(logit, dim=(- 1)) for logit in logits]
avg_prob = torch.stack(probs, dim=0).mean(0)
mask = (labels.view((- 1)) != (- 1)).to(logits[0])
reg_loss = (sum([(kl_div(avg_prob, prob) * mask) for prob in probs]) / num_models)
reg_loss = (reg_loss.sum() / (mask.sum() + 0.001))
loss = (loss + (self.args.alpha_t * reg_loss))
model_output = ((loss,) + model_output[1:])
return model_output |
class AttrVI_ATTR_MANF_NAME(Attribute):
resources = [(constants.InterfaceType.pxi, 'INSTR'), (constants.InterfaceType.pxi, 'BACKPLANE'), (constants.InterfaceType.usb, 'INSTR'), (constants.InterfaceType.usb, 'RAW'), (constants.InterfaceType.vxi, 'INSTR')]
py_name = 'manufacturer_name'
visa_name = 'VI_ATTR_MANF_NAME'
visa_type = 'ViString'
default = NotAvailable
(read, write, local) = (True, False, False) |
def test_missile_cosmetic_dropdown(skip_qtbot: pytestqt.qtbot.QtBot) -> None:
cosmetic_patches = DreadCosmeticPatches(missile_cosmetic=DreadMissileCosmeticType.NONE)
dialog = DreadCosmeticPatchesDialog(None, cosmetic_patches)
skip_qtbot.addWidget(dialog)
set_combo_with_value(dialog.missile_cosmetic_dropdown, DreadMissileCosmeticType.TRANS)
assert (dialog.cosmetic_patches == DreadCosmeticPatches(missile_cosmetic=DreadMissileCosmeticType.TRANS)) |
def elf_references_PyFPE_jbuf(elf: ELFFile) -> bool:
offending_symbol_names = ('PyFPE_jbuf', 'PyFPE_dummy', 'PyFPE_counter')
section = elf.get_section_by_name('.dynsym')
if (section is not None):
for sym in section.iter_symbols():
if ((sym.name in offending_symbol_names) and (sym['st_shndx'] == 'SHN_UNDEF') and (sym['st_info']['type'] in ('STT_FUNC', 'STT_NOTYPE'))):
return True
return False |
class Object3d(object):
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2])
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = (float(label[15]) if (label.__len__() == 16) else (- 1.0))
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
height = ((float(self.box2d[3]) - float(self.box2d[1])) + 1)
if ((height >= 40) and (self.truncation <= 0.15) and (self.occlusion <= 0)):
self.level_str = 'Easy'
return 0
elif ((height >= 25) and (self.truncation <= 0.3) and (self.occlusion <= 1)):
self.level_str = 'Moderate'
return 1
elif ((height >= 25) and (self.truncation <= 0.5) and (self.occlusion <= 2)):
self.level_str = 'Hard'
return 2
else:
self.level_str = 'UnKnown'
return (- 1)
def generate_corners3d(self):
(l, h, w) = (self.l, self.h, self.w)
x_corners = [(l / 2), (l / 2), ((- l) / 2), ((- l) / 2), (l / 2), (l / 2), ((- l) / 2), ((- l) / 2)]
y_corners = [0, 0, 0, 0, (- h), (- h), (- h), (- h)]
z_corners = [(w / 2), ((- w) / 2), ((- w) / 2), (w / 2), (w / 2), ((- w) / 2), ((- w) / 2), (w / 2)]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], [0, 1, 0], [(- np.sin(self.ry)), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners])
corners3d = np.dot(R, corners3d).T
corners3d = (corners3d + self.loc)
return corners3d
def to_str(self):
print_str = ('%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, self.loc, self.ry))
return print_str
def to_kitti_format(self):
kitti_str = ('%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1], self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], self.ry))
return kitti_str |
def test_pylsp_format_document_with_config(config, config_document):
result = pylsp_format_document(config, config_document)
assert (result == [{'range': {'start': {'line': 0, 'character': 0}, 'end': {'line': 1, 'character': 0}}, 'newText': 'run(\n these,\n arguments,\n should,\n be,\n wrapped,\n)\n'}]) |
class TestRArray(unittest.TestCase):
def test_basics(self) -> None:
a = RArray(int_rprimitive, 10)
assert (a.item_type == int_rprimitive)
assert (a.length == 10)
def test_str_conversion(self) -> None:
a = RArray(int_rprimitive, 10)
assert (str(a) == 'int[10]')
assert (repr(a) == '<RArray <RPrimitive builtins.int>[10]>')
def test_eq(self) -> None:
a = RArray(int_rprimitive, 10)
assert (a == RArray(int_rprimitive, 10))
assert (a != RArray(bool_rprimitive, 10))
assert (a != RArray(int_rprimitive, 9))
def test_hash(self) -> None:
assert (hash(RArray(int_rprimitive, 10)) == hash(RArray(int_rprimitive, 10)))
assert (hash(RArray(bool_rprimitive, 5)) == hash(RArray(bool_rprimitive, 5)))
def test_alignment(self) -> None:
a = RArray(int_rprimitive, 10)
assert (compute_rtype_alignment(a) == PLATFORM_SIZE)
b = RArray(bool_rprimitive, 55)
assert (compute_rtype_alignment(b) == 1)
def test_size(self) -> None:
a = RArray(int_rprimitive, 9)
assert (compute_rtype_size(a) == (9 * PLATFORM_SIZE))
b = RArray(bool_rprimitive, 3)
assert (compute_rtype_size(b) == 3) |
def _merge_a_into_b(a: CfgNode, b: CfgNode, root: CfgNode, key_list: list):
_assert_with_logging(isinstance(a, CfgNode), '`a` (cur type {}) must be an instance of {}'.format(type(a), CfgNode))
_assert_with_logging(isinstance(b, CfgNode), '`b` (cur type {}) must be an instance of {}'.format(type(b), CfgNode))
for (k, v_) in a.items():
full_key = '.'.join((key_list + [k]))
v = copy.deepcopy(v_)
v = b._decode_cfg_value(v)
if (k in b):
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
if isinstance(v, CfgNode):
try:
_merge_a_into_b(v, b[k], root, (key_list + [k]))
except BaseException:
raise
else:
b[k] = v
elif b.is_new_allowed():
b[k] = v
elif root.key_is_deprecated(full_key):
continue
elif root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
else:
raise KeyError('Non-existent config key: {}'.format(full_key)) |
def autolink(pattern: str, prefix: str):
def role(name, rawtext, text: str, lineno, inliner, options=None, content=None):
if (options is None):
options = {}
url = pattern.format(text)
node = nodes.reference(rawtext, f'{prefix}{text}', refuri=url, **options)
return ([node], [])
return role |
(base=CrawlerTask, bind=True)
def crawl_ptt_post(self, url: str, board: str) -> Optional[Dict]:
logger.info('Crawl %s', url)
try:
post = ptt.crawl_post(url, board)
except (IndexError, HTTPError):
logger.error('Could not crawl %s', url)
return
if (not post):
logger.error('Post is invalid %s', url)
return
exist_row = self.sess.query(PttPost).filter((PttPost.id == post.id)).first()
if exist_row:
exist_row.author = post.author
exist_row.title = post.title
exist_row.content = post.content
new_comments = []
for (exist_comment, crawled_comment) in zip_longest(sorted(exist_row.comments, key=(lambda x: x.comment_id)), sorted(post.comments, key=(lambda x: x.comment_id))):
if exist_comment:
exist_comment.reaction = crawled_comment.reaction
exist_comment.author = crawled_comment.author
exist_comment.content = crawled_comment.content
exist_comment.updated_at = datetime.utcnow()
else:
new_comments.append(crawled_comment.to_orm())
exist_row.comments.extend(new_comments)
exist_row.updated_at = datetime.utcnow()
self.sess.merge(exist_row)
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, will call session.rollback()', url)
self.sess.rollback()
else:
self.sess.add(post.to_orm())
try:
self.sess.commit()
except:
logger.warning('Commit failed on %s, will call session.rollback()', url)
self.sess.rollback()
return {'id': post.id, 'title': post.title} |
def apply_adaround_and_find_quantized_accuracy(model: torch.nn.Module, evaluator: aimet_common.defs.EvalFunction, data_loader: torch_data.DataLoader, use_cuda: bool=False, logdir: str='') -> float:
bn_folded_model = copy.deepcopy(model)
_ = fold_all_batch_norms(bn_folded_model, input_shapes=(1, 3, 224, 224))
input_shape = (1, image_net_config.dataset['image_channels'], image_net_config.dataset['image_width'], image_net_config.dataset['image_height'])
if use_cuda:
dummy_input = torch.rand(input_shape).cuda()
else:
dummy_input = torch.rand(input_shape)
iterations = 5
params = AdaroundParameters(data_loader=data_loader, num_batches=5)
ada_model = Adaround.apply_adaround(bn_folded_model, dummy_input, params, path=logdir, filename_prefix='adaround', default_param_bw=8, default_quant_scheme=QuantScheme.post_training_tf_enhanced)
quantsim = QuantizationSimModel(model=ada_model, dummy_input=dummy_input, quant_scheme=QuantScheme.post_training_tf_enhanced, rounding_mode='nearest', default_output_bw=8, default_param_bw=8, in_place=False)
quantsim.set_and_freeze_param_encodings(encoding_path=os.path.join(logdir, 'adaround.encodings'))
quantsim.compute_encodings(forward_pass_callback=partial(evaluator, use_cuda=use_cuda), forward_pass_callback_args=iterations)
quantsim.export(path=logdir, filename_prefix='adaround_resnet', dummy_input=dummy_input.cpu())
accuracy = evaluator(quantsim.model, use_cuda=use_cuda)
return accuracy |
class SmilesRnnDistributionLearner():
def __init__(self, output_dir: str, n_epochs=10, hidden_size=512, n_layers=3, max_len=100, batch_size=64, rnn_dropout=0.2, lr=0.001, valid_every=100) -> None:
self.n_epochs = n_epochs
self.output_dir = output_dir
self.hidden_size = hidden_size
self.n_layers = n_layers
self.max_len = max_len
self.batch_size = batch_size
self.rnn_dropout = rnn_dropout
self.lr = lr
self.valid_every = valid_every
self.print_every = 10
self.seed = 42
def train(self, training_set: List[str], validation_set: List[str]) -> DistributionMatchingGenerator:
cuda_available = torch.cuda.is_available()
device_str = ('cuda' if cuda_available else 'cpu')
device = torch.device(device_str)
logger.info(f'CUDA enabled: {cuda_available}')
set_random_seed(self.seed, device)
(train_seqs, _) = load_smiles_from_list(training_set, self.max_len)
(valid_seqs, _) = load_smiles_from_list(validation_set, self.max_len)
train_set = get_tensor_dataset(train_seqs)
test_set = get_tensor_dataset(valid_seqs)
sd = SelfiesCharDictionary()
n_characters = sd.get_char_num()
smiles_model = SmilesRnn(input_size=n_characters, hidden_size=self.hidden_size, output_size=n_characters, n_layers=self.n_layers, rnn_dropout=self.rnn_dropout)
optimizer = torch.optim.Adam(smiles_model.parameters(), lr=self.lr)
criterion = torch.nn.CrossEntropyLoss(ignore_index=sd.pad_idx)
trainer = SmilesRnnTrainer(model=smiles_model, criteria=[criterion], optimizer=optimizer, device=device, log_dir=self.output_dir)
trainer.fit(train_set, test_set, batch_size=self.batch_size, print_every=self.print_every, valid_every=self.valid_every, n_epochs=self.n_epochs) |
class AdornedRetort(OperatingRetort):
def __init__(self, *, recipe: Iterable[Provider]=(), strict_coercion: bool=True, debug_trail: DebugTrail=DebugTrail.ALL):
self._strict_coercion = strict_coercion
self._debug_trail = debug_trail
super().__init__(recipe)
def _calculate_derived(self):
super()._calculate_derived()
self._loader_cache = {}
self._dumper_cache = {}
def replace(self: AR, *, strict_coercion: Optional[bool]=None, debug_trail: Optional[DebugTrail]=None) -> AR:
with self._clone() as clone:
if (strict_coercion is not None):
clone._strict_coercion = strict_coercion
if (debug_trail is not None):
clone._debug_trail = debug_trail
return clone
def extend(self: AR, *, recipe: Iterable[Provider]) -> AR:
with self._clone() as clone:
clone._inc_instance_recipe = (tuple(recipe) + clone._inc_instance_recipe)
return clone
def _get_config_recipe(self) -> VarTuple[Provider]:
return (ValueProvider(StrictCoercionRequest, self._strict_coercion), ValueProvider(DebugTrailRequest, self._debug_trail))
def get_loader(self, tp: Type[T]) -> Loader[T]:
try:
return self._loader_cache[tp]
except KeyError:
pass
loader_ = self._make_loader(tp)
self._loader_cache[tp] = loader_
return loader_
def _make_loader(self, tp: Type[T]) -> Loader[T]:
loader_ = self._facade_provide(LoaderRequest(loc_map=LocMap(TypeHintLoc(type=tp))), error_message=f'Cannot produce loader for type {tp!r}')
if (self._debug_trail == DebugTrail.FIRST):
def trail_rendering_wrapper(data):
try:
return loader_(data)
except Exception as e:
render_trail_as_note(e)
raise
return trail_rendering_wrapper
return loader_
def get_dumper(self, tp: Type[T]) -> Dumper[T]:
try:
return self._dumper_cache[tp]
except KeyError:
pass
dumper_ = self._make_dumper(tp)
self._dumper_cache[tp] = dumper_
return dumper_
def _make_dumper(self, tp: Type[T]) -> Dumper[T]:
dumper_ = self._facade_provide(DumperRequest(loc_map=LocMap(TypeHintLoc(type=tp))), error_message=f'Cannot produce dumper for type {tp!r}')
if (self._debug_trail == DebugTrail.FIRST):
def trail_rendering_wrapper(data):
try:
return dumper_(data)
except Exception as e:
render_trail_as_note(e)
raise
return trail_rendering_wrapper
return dumper_
def load(self, data: Any, tp: Type[T], /) -> T:
...
def load(self, data: Any, tp: TypeHint, /) -> Any:
...
def load(self, data: Any, tp: TypeHint, /):
return self.get_loader(tp)(data)
def dump(self, data: T, tp: Type[T], /) -> Any:
...
def dump(self, data: Any, tp: Optional[TypeHint]=None, /) -> Any:
...
def dump(self, data: Any, tp: Optional[TypeHint]=None, /) -> Any:
if (tp is None):
tp = type(data)
if is_generic_class(tp):
raise ValueError(f'Can not infer the actual type of generic class instance ({tp!r}), you have to explicitly pass the type of object')
return self.get_dumper(tp)(data) |
class Process(nn.Module):
def __init__(self, feature, norm_layer, bn_momentum, dilations=[1, 2, 3]):
super(Process, self).__init__()
self.main = nn.Sequential(*[Bottleneck3D(feature, (feature // 4), bn_momentum=bn_momentum, norm_layer=norm_layer, dilation=[i, i, i]) for i in dilations])
def forward(self, x):
return self.main(x) |
class Effect6507(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Hybrid Turret')), 'speed', src.getModifiedItemAttr('shipBonusDreadnoughtG2'), skill='Gallente Dreadnought', **kwargs) |
def CutoutAbs(img, v, **kwarg):
(w, h) = img.size
x0 = np.random.uniform(0, w)
y0 = np.random.uniform(0, h)
x0 = int(max(0, (x0 - (v / 2.0))))
y0 = int(max(0, (y0 - (v / 2.0))))
x1 = int(min(w, (x0 + v)))
y1 = int(min(h, (y0 + v)))
xy = (x0, y0, x1, y1)
color = (127, 127, 127)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.