code stringlengths 281 23.7M |
|---|
def send_subscription_change(change_description, customer_id, customer_email, quay_username):
SUBSCRIPTION_CHANGE_TITLE = 'Subscription Change - {0} {1}'
SUBSCRIPTION_CHANGE = '\n Change: {0}<br>\n Customer id: <a href=" Customer email: <a href="mailto:{2}">{2}</a><br>\n Quay user or org name: {3}<br>\n '
title = SUBSCRIPTION_CHANGE_TITLE.format(quay_username, change_description)
msg = Message(title, recipients=[''])
msg.html = SUBSCRIPTION_CHANGE.format(change_description, customer_id, customer_email, quay_username)
if features.FIPS:
assert app.config['MAIL_USE_TLS'], 'MAIL_USE_TLS must be enabled to use SMTP in FIPS mode.'
with mock.patch('smtplib.SMTP.login', login_fips_safe):
mail.send(msg)
else:
mail.send(msg) |
def create_video_from_containers(in_container: InputContainer, out_container: OutputContainer, draw_on_av_frame: DrawOnAvFrame, add_border: bool) -> None:
in_video_stream = in_container.streams.video[0]
in_video_stream.thread_type = 'AUTO'
transformation_sizes = _compute_transformation_sizes(in_video_stream, add_border)
out_video_stream = _set_up_out_video_stream(out_container, transformation_sizes, in_video_stream)
if in_container.streams.audio:
in_audio_stream = in_container.streams.audio[0]
else:
in_audio_stream = None
out_audio_stream = out_container.add_stream(OUT_AUDIO_CODEC)
apply_filters = _create_filters(transformation_sizes.resizing_image_size, in_video_stream)
out_video_frame_queue = []
for in_frame in in_container.decode(in_video_stream, in_audio_stream):
if isinstance(in_frame, av.VideoFrame):
filtered_frame = apply_filters(in_frame)
out_frame = draw_on_av_frame(filtered_frame, transformation_sizes)
heappush(out_video_frame_queue, (out_frame.pts, out_frame))
while (len(out_video_frame_queue) > MAX_VIDEO_FRAME_QUEUE_LEN):
popped_frame = heappop(out_video_frame_queue)[1]
add_video_frame(popped_frame, out_video_stream, out_container)
elif isinstance(in_frame, av.AudioFrame):
in_frame.pts = None
add_audio_frame(in_frame, out_audio_stream, out_container)
while out_video_frame_queue:
popped_frame = heappop(out_video_frame_queue)[1]
add_video_frame(popped_frame, out_video_stream, out_container)
if (not in_audio_stream):
add_silent_audio(out_audio_stream, out_container)
flush_stream(out_video_stream, out_container)
flush_stream(out_audio_stream, out_container) |
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
self.langs = ['en', 'ru']
config = {'langs': self.langs, 'src_vocab_size': 10, 'tgt_vocab_size': 20}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['src_vocab_file'])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['tgt_vocab_file'])
config_file = os.path.join(self.tmpdirname, 'tokenizer_config.json')
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, 'w') as fp:
fp.write('\n'.join(merges))
with open(config_file, 'w') as fp:
fp.write(json.dumps(config))
_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained('facebook/wmt19-ru-en')
_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained('facebook/wmt19-en-ru')
def test_online_tokenizer_config(self):
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ['en', 'ru'])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
'Adapted from Sennrich et al. 2015 and
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = 'lower'
bpe_tokens = ['low', 'er</w>']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + ['<unk>'])
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (text + [2]))
assert (encoded_pair == (((text + [2]) + text_2) + [2]))
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [["Here's a little song I wrote. Don't worry, be happy.", [2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2]], ["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]]]
for (src_text, tgt_input_ids) in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained('facebook/wmt19-ru-en', do_lower_case=True)
tokens = tokenizer.tokenize('USA is United States of America')
expected = ['us', 'a</w>', 'is</w>', 'un', 'i', 'ted</w>', 'st', 'ates</w>', 'of</w>', 'am', 'er', 'ica</w>']
self.assertListEqual(tokens, expected)
('FSMTConfig.__init__ requires non-optional args')
def test_torch_encode_plus_sent_to_model(self):
pass
('FSMTConfig.__init__ requires non-optional args')
def test_np_encode_plus_sent_to_model(self):
pass |
def test_service_registry_random_pfs(service_registry_address, private_keys, web3, contract_manager):
addresses = [privatekey_to_address(key) for key in private_keys]
(c1_service_proxy, urls) = deploy_service_registry_and_set_urls(private_keys=private_keys, web3=web3, contract_manager=contract_manager, service_registry_address=service_registry_address)
assert (c1_service_proxy.ever_made_deposits_len(BLOCK_ID_LATEST) == 3)
for (idx, address) in enumerate(addresses):
assert (c1_service_proxy.get_service_url(BLOCK_ID_LATEST, address) == urls[idx])
assert (c1_service_proxy.get_service_url(BLOCK_ID_LATEST, HOP1) is None)
for (idx, address) in enumerate(addresses):
assert (c1_service_proxy.ever_made_deposits(BLOCK_ID_LATEST, idx) == address)
assert (not c1_service_proxy.ever_made_deposits(BLOCK_ID_LATEST, 9999))
mock_get_pfs_info = Mock()
mock_get_pfs_info.return_value.price = 100
with patch('raiden.network.pathfinding.get_pfs_info', mock_get_pfs_info):
assert (not get_valid_pfs_url(c1_service_proxy, 0, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(99)))
assert (get_valid_pfs_url(c1_service_proxy, 0, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(100)) == urls[0])
assert (get_random_pfs(c1_service_proxy, BLOCK_ID_LATEST, pathfinding_max_fee=FeeAmount(100)) in urls) |
def get_triplet_mask(labels: torch.Tensor) -> torch.Tensor:
indices_equal = torch.eye(labels.size()[0], dtype=torch.bool, device=labels.device)
indices_not_equal = torch.logical_not(indices_equal)
i_not_equal_j = indices_not_equal.unsqueeze(2)
i_not_equal_k = indices_not_equal.unsqueeze(1)
j_not_equal_k = indices_not_equal.unsqueeze(0)
distinct_indices = torch.logical_and(torch.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
labels_equal = (labels.unsqueeze(0) == labels.unsqueeze(1))
i_equal_j = labels_equal.unsqueeze(2)
i_equal_k = labels_equal.unsqueeze(1)
valid_indices = torch.logical_and(i_equal_j, torch.logical_not(i_equal_k))
mask = torch.logical_and(distinct_indices, valid_indices)
return mask |
class ChannelSpatialSELayer3D(nn.Module):
def __init__(self, num_channels, reduction_ratio=2):
super(ChannelSpatialSELayer3D, self).__init__()
self.cSE = ChannelSELayer3D(num_channels, reduction_ratio)
self.sSE = SpatialSELayer3D(num_channels)
def forward(self, input_tensor):
output_tensor = torch.max(self.cSE(input_tensor), self.sSE(input_tensor))
return output_tensor |
class Material():
def __init__(self, normalmap=None):
if (normalmap != None):
normalmap = load_image(('sightpy/normalmaps/' + normalmap))
self.normalmap = normalmap
def get_Normal(self, hit):
N_coll = hit.collider.get_Normal(hit)
if (self.normalmap is not None):
(u, v) = hit.get_uv()
im = self.normalmap[((- (((v * self.normalmap.shape[0]) * self.repeat).astype(int) % self.normalmap.shape[0])), (((u * self.normalmap.shape[1]) * self.repeat).astype(int) % self.normalmap.shape[1]))].T
N_map = (vec3((im[0] - 0.5), (im[1] - 0.5), (im[2] - 0.5)) * 2.0)
return (N_map.matmul(hit.collider.inverse_basis_matrix).normalize() * hit.orientation)
else:
return (N_coll * hit.orientation)
def set_normalmap(self, normalmap, repeat=1.0):
self.normalmap = load_image(('sightpy/normalmaps/' + normalmap))
self.repeat = repeat
def get_color(self, scene, ray, hit):
pass |
class SolveModel():
solver: pybamm.BaseSolver
model: pybamm.BaseModel
t_eval: np.ndarray
def solve_setup(self, parameter, model_, option, value, solver_class):
import importlib
idaklu_spec = importlib.util.find_spec('pybamm.solvers.idaklu')
if (idaklu_spec is not None):
try:
idaklu = importlib.util.module_from_spec(idaklu_spec)
idaklu_spec.loader.exec_module(idaklu)
except ImportError as e:
print('XXXXX cannot find klu', e)
idaklu_spec = None
self.solver = solver_class()
self.model = model_({option: value})
c_rate = 1
tmax = (4000 / c_rate)
nb_points = 500
self.t_eval = np.linspace(0, tmax, nb_points)
geometry = self.model.default_geometry
param = pybamm.ParameterValues(parameter)
param.process_model(self.model)
param.process_geometry(geometry)
var_pts = {'x_n': 20, 'x_s': 20, 'x_p': 20, 'r_n': 30, 'r_p': 30, 'y': 10, 'z': 10}
mesh = pybamm.Mesh(geometry, self.model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, self.model.default_spatial_methods)
disc.process_model(self.model)
def solve_model(self, _model, _params):
self.solver.solve(self.model, t_eval=self.t_eval) |
def scale_jitter(tensor, target, jitter_factor, jitter_size=None, mask=None):
if (jitter_size is None):
(_, h, w) = tensor.shape
(new_h, new_w) = (int((h * jitter_factor)), int((w * jitter_factor)))
jitter_factor_x = jitter_factor_y = jitter_factor
else:
(new_h, new_w) = jitter_size
(_, h, w) = tensor.shape
(jitter_factor_y, jitter_factor_x) = ((new_h / h), (new_w / w))
tensor_out = torch.nn.functional.interpolate(tensor.unsqueeze(0), size=(new_h, new_w), mode='nearest').squeeze(0)
target_out = copy.deepcopy(target)
target_mask = []
if ('gt_masks' in target):
mask = target['gt_masks']
if (mask is not None):
for polys in mask.polygons:
new_polys = copy.deepcopy(polys)
for p in new_polys:
p[0::2] *= jitter_factor_x
p[1::2] *= jitter_factor_y
target_mask.append(new_polys)
if isinstance(target, dict):
target_out['gt_boxes'].scale(jitter_factor_x, jitter_factor_y)
if ('gt_masks' in target):
target_out['gt_masks'] = PolygonMasks(target_mask)
elif isinstance(target, Boxes):
target_out.scale(jitter_factor_x, jitter_factor_y)
else:
raise ValueError(('Unsupported target %s' % str(target)))
return (tensor_out, target_out, target_mask) |
class ReleaseFile(ContentManageable, NameSlugModel):
os = models.ForeignKey(OS, related_name='releases', verbose_name='OS', on_delete=models.CASCADE)
release = models.ForeignKey(Release, related_name='files', on_delete=models.CASCADE)
description = models.TextField(blank=True)
is_source = models.BooleanField('Is Source Distribution', default=False)
url = models.URLField('URL', unique=True, db_index=True, help_text='Download URL')
gpg_signature_file = models.URLField('GPG SIG URL', blank=True, help_text='GPG Signature URL')
sigstore_signature_file = models.URLField('Sigstore Signature URL', blank=True, help_text='Sigstore Signature URL')
sigstore_cert_file = models.URLField('Sigstore Cert URL', blank=True, help_text='Sigstore Cert URL')
sigstore_bundle_file = models.URLField('Sigstore Bundle URL', blank=True, help_text='Sigstore Bundle URL')
md5_sum = models.CharField('MD5 Sum', max_length=200, blank=True)
filesize = models.IntegerField(default=0)
download_button = models.BooleanField(default=False, help_text='Use for the supernav download button for this OS')
def validate_unique(self, exclude=None):
if self.download_button:
qs = ReleaseFile.objects.filter(release=self.release, os=self.os, download_button=True).exclude(pk=self.id)
if (qs.count() > 0):
raise ValidationError('Only one Release File per OS can have "Download button" enabled')
super(ReleaseFile, self).validate_unique(exclude=exclude)
class Meta():
verbose_name = 'Release File'
verbose_name_plural = 'Release Files'
ordering = ('-release__is_published', 'release__name', 'os__name', 'name')
constraints = [models.UniqueConstraint(fields=['os', 'release'], condition=models.Q(download_button=True), name='only_one_download_per_os_per_release')] |
class AM2RBasePatchesFactory(BasePatchesFactory):
def create_base_patches(self, configuration: BaseConfiguration, rng: Random, game: GameDescription, is_multiworld: bool, player_index: int, rng_required: bool=True) -> GamePatches:
assert isinstance(configuration, AM2RConfiguration)
parent = super().create_base_patches(configuration, rng, game, is_multiworld, player_index, rng_required)
get_node = game.region_list.typed_node_by_identifier
dock_weakness: list[tuple[(DockNode, DockWeakness)]] = []
blue_door = game.dock_weakness_database.get_by_weakness('door', 'Normal Door')
if (configuration.blue_save_doors or configuration.force_blue_labs):
for area in game.region_list.all_areas:
if ((configuration.blue_save_doors and area.extra.get('unlocked_save_station')) or (configuration.force_blue_labs and area.extra.get('force_blue_labs'))):
for node in area.nodes:
if (isinstance(node, DockNode) and (node.dock_type.short_name == 'door')):
dock_weakness.append((node, blue_door))
dock_weakness.append((get_node(node.default_connection, DockNode), blue_door))
return parent.assign_dock_weakness(dock_weakness) |
class Effect7233(BaseEffect):
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Precursor Weapon')), 'damageMultiplierBonusPerCycle', implant.getModifiedItemAttr('damageMultiplierBonusPerCycleModifier'), **kwargs) |
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
if ((opt.load_dict is not None) and (len(opt.load_dict) > 0)):
print(('[INFO] Loading dictionary from ... %s' % opt.load_dict))
dicts = torch.load(opt.load_dict)
src_langs = opt.train_src_lang.split('|')
tgt_langs = opt.train_tgt_lang.split('|')
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if (len(opt.train_src_atbs) > 0):
src_atbs = opt.train_src_atbs.split('|')
tgt_atbs = opt.train_tgt_atbs.split('|')
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if (not opt.load_dict):
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if ('langs' not in dicts):
dicts['langs'] = dict()
else:
print(dicts['langs'])
print('Adding languages to existing dictionary ...')
for lang in langs:
idx = len(dicts['langs'])
if (lang not in dicts['langs']):
dicts['langs'][lang] = idx
if ('atbs' not in dicts):
dicts['atbs'] = dict()
else:
print('Adding attributes to existing dictionary ...')
for atb in atbs:
idx = len(dicts['atbs'])
if (atb not in dicts['atbs']):
dicts['atbs'][atb] = idx
print('Languages: ', dicts['langs'])
print('Attributes: ', dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split('|')
tgt_train_files = opt.train_tgt.split('|')
if (opt.asr or opt.lm):
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab, opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set((src_train_files + tgt_train_files)), opt.src_vocab, opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab, opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab, opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int((time.time() - start))))
print(('Vocabulary generated after %s' % elapse))
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt, dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt, dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split('|')
tgt_input_files = opt.train_tgt.split('|')
src_langs = opt.train_src_lang.split('|')
tgt_langs = opt.train_tgt_lang.split('|')
src_atbs = (opt.train_src_atbs.split('|') if (len(atbs) > 0) else ([None] * len(src_input_files)))
tgt_atbs = (opt.train_tgt_atbs.split('|') if (len(atbs) > 0) else ([None] * len(tgt_input_files)))
assert (len(src_input_files) == len(src_langs))
assert (len(src_input_files) == len(src_atbs))
assert (len(src_input_files) == len(tgt_input_files))
assert (len(tgt_input_files) == len(tgt_langs))
assert (len(tgt_input_files) == len(tgt_atbs))
past_src_files = opt.past_train_src.split('|')
idx = 0
n_input_files = len(src_input_files)
train = dict()
(train['src'], train['tgt']) = (list(), list())
(train['src_sizes'], train['tgt_sizes']) = (list(), list())
(train['src_atb'], train['tgt_atb']) = (list(), list())
(train['src_lang'], train['tgt_lang']) = (list(), list())
data = dict()
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
train['past_src'] = list()
train['past_src_sizes'] = list()
for (i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb)) in enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = ('train.%i.%s-%s' % (idx, src_lang, tgt_lang))
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if (opt.multi_dataset and opt.resume):
print(('Checking existing path %s ...' % dataset_path))
if os.path.exists(dataset_path):
print(('[INFO] Found data %s in the savedir ... Ignoring' % data_name))
idx = (idx + 1)
continue
(src_data, tgt_data, src_sizes, tgt_sizes) = make_asr_data(src_file, tgt_file, dicts['tgt'], tokenizer, max_src_length=opt.src_seq_length, max_tgt_length=opt.tgt_seq_length, input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, fp16=opt.fp16, add_bos=(not opt.no_bos), asr_format=opt.asr_format, output_format=opt.format, num_workers=opt.num_threads, external_tokenizer=opt.external_tokenizer, tgt_lang=tgt_lang, verbose=opt.verbose, lang_list=dicts['langs'])
n_samples = len(src_data)
(src_atb_data, tgt_atb_data) = (None, None)
if ((n_input_files == 1) or opt.multi_dataset):
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
if (len(atbs) > 0):
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if (len(atbs) > 0):
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
past_src_file = past_src_files[i]
(past_src_data, _, past_src_sizes, _) = make_asr_data(past_src_file, None, None, None, input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, add_bos=(not opt.no_bos), fp16=opt.fp16, asr_format=opt.asr_format, output_format=opt.format, num_workers=opt.num_threads, external_tokenizer=opt.external_tokenizer, tgt_lang=tgt_lang, verbose=opt.verbose, lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if (len(atbs) > 0):
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print(('Saving training set %i %s-%s to disk ...' % (idx, src_lang, tgt_lang)))
path = os.path.join(dirname(opt.save_data), ('train.%i.%s-%s' % (idx, src_lang, tgt_lang)))
os.makedirs(path, exist_ok=True)
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = (idx + 1)
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if (len(atbs) > 0):
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split('|')
tgt_input_files = opt.valid_tgt.split('|')
past_src_files = opt.past_valid_src.split('|')
src_langs = opt.valid_src_lang.split('|')
tgt_langs = opt.valid_tgt_lang.split('|')
src_atbs = (opt.valid_src_atbs.split('|') if (len(atbs) > 0) else ([None] * len(src_input_files)))
tgt_atbs = (opt.valid_tgt_atbs.split('|') if (len(atbs) > 0) else ([None] * len(tgt_input_files)))
assert (len(src_input_files) == len(src_langs))
assert (len(src_input_files) == len(tgt_input_files))
assert (len(tgt_input_files) == len(tgt_langs))
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
(valid['src'], valid['tgt']) = (list(), list())
(valid['src_sizes'], valid['tgt_sizes']) = (list(), list())
(valid['src_lang'], valid['tgt_lang']) = (list(), list())
(valid['src_atb'], valid['tgt_atb']) = (list(), list())
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb)) in enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = ('valid.%i.%s-%s' % (idx, src_lang, tgt_lang))
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if (opt.multi_dataset and opt.resume):
if os.path.exists(dataset_path):
print(('[INFO] Found data %s in the savedir ... Ignoring' % data_name))
idx = (idx + 1)
continue
(src_data, tgt_data, src_sizes, tgt_sizes) = make_asr_data(src_file, tgt_file, dicts['tgt'], tokenizer, max_src_length=max(1024, opt.src_seq_length), max_tgt_length=max(1024, opt.tgt_seq_length), input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, fp16=opt.fp16, add_bos=(not opt.no_bos), asr_format=opt.asr_format, output_format=opt.format, external_tokenizer=opt.external_tokenizer, tgt_lang=tgt_lang, verbose=opt.verbose, lang_list=dicts['langs'])
n_samples = len(src_data)
if ((n_input_files == 1) or opt.multi_dataset):
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
if (len(atbs) > 0):
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if (len(atbs) > 0):
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
past_src_file = past_src_files[i]
(past_src_data, _, past_src_sizes, _) = make_asr_data(past_src_file, None, None, None, input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, fp16=opt.fp16, add_bos=(not opt.no_bos), asr_format=opt.asr_format, output_format=opt.format, num_workers=opt.num_threads, external_tokenizer=opt.external_tokenizer, tgt_lang=tgt_lang, verbose=opt.verbose, lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if (len(atbs) > 0):
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print(('Saving validation set %i %s-%s to disk ...' % (idx, src_lang, tgt_lang)))
path = os.path.join(dirname(opt.save_data), ('valid.%i.%s-%s' % (idx, src_lang, tgt_lang)))
os.makedirs(path, exist_ok=True)
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = (idx + 1)
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if (len(atbs) > 0):
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else:
src_input_files = opt.train_src.split('|')
tgt_input_files = opt.train_tgt.split('|')
src_langs = opt.train_src_lang.split('|')
tgt_langs = opt.train_tgt_lang.split('|')
assert (len(src_input_files) == len(src_langs))
assert (len(src_input_files) == len(tgt_input_files))
assert (len(tgt_input_files) == len(tgt_langs))
past_src_files = opt.past_train_src.split('|')
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
(train['src'], train['tgt']) = (list(), list())
(train['src_sizes'], train['tgt_sizes']) = (list(), list())
(train['src_lang'], train['tgt_lang']) = (list(), list())
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for (i, (src_file, tgt_file, src_lang, tgt_lang)) in enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = (idx if (not opt.multi_mirror) else (2 * idx))
data_name = ('train.%i.%s-%s' % (dataset_idx, src_lang, tgt_lang))
mirrored_data_name = ('train.%i.%s-%s' % ((dataset_idx + 1), tgt_lang, src_lang))
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if (opt.multi_dataset and opt.resume):
print(('Checking existing path %s ...' % dataset_path))
if os.path.exists(dataset_path):
print(('[INFO] Found data %s in the savedir ... Ignoring' % data_name))
idx = (idx + 1)
continue
else:
os.makedirs(dataset_path, exist_ok=True)
(src_data, tgt_data, src_sizes, tgt_sizes) = make_translation_data(src_file, tgt_file, dicts['src'], dicts['tgt'], tokenizer, max_src_length=opt.src_seq_length, max_tgt_length=opt.tgt_seq_length, add_bos=(not opt.no_bos), data_type=opt.data_type, num_workers=opt.num_threads, verbose=opt.verbose, external_tokenizer=opt.external_tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, lang_list=dicts['langs'], early_save=opt.multi_dataset, savedir=dataset_path, mirror=opt.multi_mirror, mirror_savedir=mirrored_dataset_path)
if ((n_input_files == 1) or opt.multi_dataset):
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert (src_data is not None)
n_samples = len(src_data)
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
past_src_file = past_src_files[i]
(past_src_data, _, past_src_sizes, _) = make_translation_data(past_src_file, '/dev/null', dicts['src'], dicts['src'], tokenizer, max_src_length=opt.src_seq_length, max_tgt_length=opt.tgt_seq_length, add_bos=(not opt.no_bos), data_type=opt.data_type, num_workers=opt.num_threads, verbose=opt.verbose, external_tokenizer=opt.external_tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print(('Saving training set %i %s-%s to disk ...' % (dataset_idx, src_lang, tgt_lang)))
path = dataset_path
os.makedirs(path, exist_ok=True)
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print(('Saving training set %i %s-%s to disk ...' % ((dataset_idx + 1), tgt_lang, src_lang)))
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = (idx + 1)
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split('|')
tgt_input_files = opt.valid_tgt.split('|')
past_src_files = opt.past_valid_src.split('|')
src_langs = opt.valid_src_lang.split('|')
tgt_langs = opt.valid_tgt_lang.split('|')
assert (len(src_input_files) == len(src_langs))
assert (len(src_input_files) == len(tgt_input_files))
assert (len(tgt_input_files) == len(tgt_langs))
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
(valid['src'], valid['tgt']) = (list(), list())
(valid['src_sizes'], valid['tgt_sizes']) = (list(), list())
(valid['src_lang'], valid['tgt_lang']) = (list(), list())
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
(src_data, tgt_data, src_sizes, tgt_sizes) = make_translation_data(src_file, tgt_file, dicts['src'], dicts['tgt'], tokenizer, max_src_length=max(1024, opt.src_seq_length), max_tgt_length=max(1024, opt.tgt_seq_length), add_bos=(not opt.no_bos), data_type=opt.data_type, num_workers=opt.num_threads, verbose=opt.verbose, external_tokenizer=opt.external_tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, lang_list=dicts['langs'])
n_samples = len(src_data)
if ((n_input_files == 1) or opt.multi_dataset):
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if (opt.past_train_src and (len(past_src_files) == len(src_input_files))):
past_src_file = past_src_files[i]
(past_src_data, _, past_src_sizes, _) = make_translation_data(past_src_file, '/dev/null', dicts['src'], dicts['src'], tokenizer, max_src_length=max(1024, opt.src_seq_length), max_tgt_length=max(1024, opt.tgt_seq_length), add_bos=(not opt.no_bos), data_type=opt.data_type, num_workers=opt.num_threads, verbose=opt.verbose, external_tokenizer=opt.external_tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print(('Saving validation set %i %s-%s to disk ...' % (idx, src_lang, tgt_lang)))
path = os.path.join(dirname(opt.save_data), ('valid.%i.%s-%s' % (idx, src_lang, tgt_lang)))
os.makedirs(path, exist_ok=True)
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = (idx + 1)
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int((time.time() - start))))
print(('Binarization finished after %s' % elapse))
if ((opt.src_vocab is None) and (opt.asr == False) and (opt.lm == False)):
save_vocabulary('source', dicts['src'], (opt.save_data + '.src.dict'))
if (opt.tgt_vocab is None):
save_vocabulary('target', dicts['tgt'], (opt.save_data + '.tgt.dict'))
if opt.multi_dataset:
print(('Saving dictionary to %s' % (opt.save_data + '.dict.pt')))
torch.save(dicts, (opt.save_data + '.dict.pt'))
if ((opt.src_vocab is None) and (opt.asr == False) and (opt.lm == False)):
save_vocabulary('source', dicts['src'], (opt.save_data + '.src.dict'))
if (opt.tgt_vocab is None):
save_vocabulary('target', dicts['tgt'], (opt.save_data + '.tgt.dict'))
print('Finished.')
elif (opt.format in ['raw', 'bin']):
print((("Saving data to '" + opt.save_data) + ".train.pt'..."))
save_data = {'dicts': dicts, 'type': opt.src_type, 'train': train, 'valid': valid}
torch.save(save_data, (opt.save_data + '.train.pt'))
print('Done')
elif (opt.format in ['scp', 'scpmem', 'wav']):
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, 'ASR data format is required for this memory indexed format'
torch.save(dicts, (opt.save_data + '.dict.pt'))
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if (train[set_] is None):
continue
if (opt.data_type == 'int64'):
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder((opt.save_data + ('.train.%s.bin' % set_)), dtype=dtype)
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize((opt.save_data + ('.train.%s.idx' % set_)))
del train_data
if (valid[set_] is None):
continue
valid_data = MMapIndexedDatasetBuilder((opt.save_data + ('.valid.%s.bin' % set_)), dtype=dtype)
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize((opt.save_data + ('.valid.%s.idx' % set_)))
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if (train[set_] is not None):
np_array = np.asarray(train[set_])
np.save((opt.save_data + ('.train.%s.npy' % set_)), np_array)
else:
print(('Training %s not found ' % set_))
if (valid[set_] is not None):
np_array = np.asarray(valid[set_])
np.save((opt.save_data + ('.valid.%s.npy' % set_)), np_array)
else:
print(('Validation %s not found ' % set_))
if (('past_src' in train) and (len(train['past_src']) > 0)):
set_ = 'past_src_sizes'
if (train[set_] is not None):
np_array = np.asarray(train[set_])
np.save((opt.save_data + ('.train.%s.npy' % set_)), np_array)
else:
print(('Training %s not found ' % set_))
if (valid[set_] is not None):
np_array = np.asarray(valid[set_])
np.save((opt.save_data + ('.valid.%s.npy' % set_)), np_array)
else:
print(('Validation %s not found ' % set_))
save_data = {'train': train['src'], 'valid': valid['src']}
if (('past_src' in train) and (len(train['past_src']) > 0)):
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if (opt.format in ['wav']):
torch.save(save_data, (opt.save_data + '.wav_path.pt'))
else:
torch.save(save_data, (opt.save_data + '.scp_path.pt'))
print('Done')
elif (opt.format in ['mmap', 'mmem']):
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
torch.save(dicts, (opt.save_data + '.dict.pt'))
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if ((set_ not in train) or (train[set_] is None)):
continue
if (opt.data_type == 'int64'):
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder((opt.save_data + ('.train.%s.bin' % set_)), dtype=dtype)
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize((opt.save_data + ('.train.%s.idx' % set_)))
del train_data
if (valid[set_] is None):
continue
valid_data = MMapIndexedDatasetBuilder((opt.save_data + ('.valid.%s.bin' % set_)), dtype=dtype)
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize((opt.save_data + ('.valid.%s.idx' % set_)))
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if ((set_ not in train) or (train[set_] is not None)):
np_array = np.asarray(train[set_])
np.save((opt.save_data + ('.train.%s.npy' % set_)), np_array)
else:
print(('Training %s not found ' % set_))
if (('past_src' in train) and (len(train['past_src']) > 0)):
set_ = 'past_src_sizes'
if (train[set_] is not None):
np_array = np.asarray(train[set_])
np.save((opt.save_data + ('.train.%s.npy' % set_)), np_array)
else:
print(('Training %s not found ' % set_))
if (valid[set_] is not None):
np_array = np.asarray(valid[set_])
np.save((opt.save_data + ('.valid.%s.npy' % set_)), np_array)
else:
print(('Validation %s not found ' % set_))
else:
raise NotImplementedError |
class Effect5778(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Missile Launcher Operation')), 'speed', ship.getModifiedItemAttr('shipBonusMF2'), skill='Minmatar Frigate', **kwargs) |
class TestTfWinnower(unittest.TestCase):
.tf1
def test_mask_propagation_on_keras_model(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
_ = keras_model()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow = [3, 5, 7]
input_op_names = ['conv2d_input']
output_op_names = ['keras_model/Softmax']
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
first_conv2d_opname = 'conv2d/Conv2D'
middle_batchnorm_opname = 'batch_normalization'
second_conv2d_opname = 'conv2d_1/Conv2D'
ops_dict = mask_winnower._conn_graph.get_all_ops()
first_conv2d_mask = mask_winnower._mask_propagator.op_to_mask_dict[ops_dict[first_conv2d_opname]]
self.assertEqual(3, sum(first_conv2d_mask.input_channel_masks[0]))
self._check_mask_indices(input_channels_to_winnow, 'output', first_conv2d_mask)
middle_batchnorm_mask = mask_winnower._mask_propagator.op_to_mask_dict[ops_dict[middle_batchnorm_opname]]
self._check_mask_indices(input_channels_to_winnow, 'input', middle_batchnorm_mask)
self._check_mask_indices(input_channels_to_winnow, 'output', middle_batchnorm_mask)
second_conv2d_mask = mask_winnower._mask_propagator.op_to_mask_dict[ops_dict[second_conv2d_opname]]
self._check_mask_indices(input_channels_to_winnow, 'input', second_conv2d_mask)
self.assertEqual(4, sum(second_conv2d_mask.output_channel_masks[0]))
sess.close()
.tf1
def test_mask_propagation_on_single_residual_model(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
_ = single_residual()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['Relu_2']
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D')
input_channels_to_winnow_2 = [13, 15]
module_mask_pair = (tf_op, input_channels_to_winnow_2)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow_3 = [13, 15]
module_mask_pair = (tf_op, input_channels_to_winnow_3)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
ops_dict = mask_winnower._conn_graph.get_all_ops()
self._check_mask_indices(input_channels_to_winnow, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d_3/Conv2D']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['batch_normalization']])
self._check_mask_indices(input_channels_to_winnow_2, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['batch_normalization']])
self._check_mask_indices(input_channels_to_winnow, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d_2/Conv2D']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['Relu']])
self._check_mask_indices(input_channels_to_winnow_2, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['Relu']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d/Conv2D']])
sess.close()
.tf2
def test_mask_propagation_on_single_residual_model_for_tf2(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
_ = single_residual_for_tf2()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['Relu_2']
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D')
input_channels_to_winnow_2 = [13, 15]
module_mask_pair = (tf_op, input_channels_to_winnow_2)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow_3 = [13, 15]
module_mask_pair = (tf_op, input_channels_to_winnow_3)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
ops_dict = mask_winnower._conn_graph.get_all_ops()
self._check_mask_indices(input_channels_to_winnow, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d_3/Conv2D']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['batch_normalization']])
self._check_mask_indices(input_channels_to_winnow_2, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['batch_normalization']])
self._check_mask_indices(input_channels_to_winnow, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d_2/Conv2D']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['Relu']])
self._check_mask_indices(input_channels_to_winnow_2, 'input', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['Relu']])
self._check_mask_indices(input_channels_to_winnow_2, 'output', mask_winnower._mask_propagator.op_to_mask_dict[ops_dict['conv2d/Conv2D']])
sess.close()
def test_mask_propagation_with_maxpool_as_last_layer(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
inputs = tf.keras.Input(shape=(64, 32, 3))
x = tf.keras.layers.Conv2D(64, (3, 3))(inputs)
x = tf.keras.layers.Conv2D(16, (3, 3))(x)
_ = tf.keras.layers.MaxPool2D()(x)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow = [1]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
input_op_names = ['input_1']
output_op_names = ['max_pooling2d/MaxPool']
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
self.assertEqual(0, 0)
sess.close()
def test_mask_propagation_with_conv_as_last_layer(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
inputs = tf.keras.Input(shape=(8, 8, 3))
x = tf.keras.layers.Conv2D(4, (2, 2))(inputs)
_ = tf.keras.layers.Conv2D(2, (1, 1))(x)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow = [1]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
input_op_names = ['input_1']
output_op_names = ['conv2d_1/BiasAdd']
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
self.assertEqual(0, 0)
sess.close()
def test_mask_propagation_with_dense_as_last_layer(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
inputs = tf.keras.Input(shape=(8, 8, 3))
x = tf.keras.layers.Conv2D(4, (2, 2))(inputs)
x = tf.keras.layers.Conv2D(2, (1, 1))(x)
x = tf.keras.layers.Flatten()(x)
_ = tf.keras.layers.Dense(2)(x)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D')
input_channels_to_winnow = [1]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
input_op_names = ['input_1']
output_op_names = ['dense/BiasAdd']
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
self.assertEqual(0, 0)
sess.close()
def test_mask_propagation_with_concat(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
_ = concat_model()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D')
input_channels_to_winnow = [2, 3, 6, 7, 17]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_4/Conv2D')
input_channels_to_winnow_1 = [2, 3, 6, 7, 8, 17]
module_mask_pair = (tf_op, input_channels_to_winnow_1)
module_zero_channels_list.append(module_mask_pair)
input_op_names = ['input_1']
output_op_names = ['concat_model/Softmax']
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
modified_op_list = mask_winnower._mask_propagator.get_ops_with_non_default_ip_op_masks()
self.assertEqual(6, len(modified_op_list))
conv2d_1_op = mask_winnower._conn_graph.get_all_ops()['conv2d_1/Conv2D']
conv2d_1_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_1_op]
self.assertEqual([1, 1, 0, 0, 1, 1], conv2d_1_op_mask.output_channel_masks[0])
conv2d_op = mask_winnower._conn_graph.get_all_ops()['conv2d/Conv2D']
conv2d_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_op]
self.assertEqual([0, 0, 1, 1, 1], conv2d_op_mask.output_channel_masks[0])
conv2d_2_op = mask_winnower._conn_graph.get_all_ops()['conv2d_2/Conv2D']
conv2d_2_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_2_op]
self.assertEqual([1, 1, 1, 1, 1, 1, 0], conv2d_2_op_mask.output_channel_masks[0])
conv2d_3_op = mask_winnower._conn_graph.get_all_ops()['conv2d_3/Conv2D']
conv2d_3_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_3_op]
self.assertEqual([1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], conv2d_3_op_mask.input_channel_masks[0])
conv2d_4_op = mask_winnower._conn_graph.get_all_ops()['conv2d_4/Conv2D']
conv2d_4_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_4_op]
self.assertEqual([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0], conv2d_4_op_mask.input_channel_masks[0])
sess.close()
def test_mask_propagation_for_add_with_split_parent(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
_ = upsample_model()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['upsample_model/Softmax']
module_zero_channels_list = []
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
conv2d_2_op = mask_winnower._conn_graph.get_all_ops()['conv2d_2/Conv2D']
conv2d_2_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_2_op]
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], conv2d_2_op_mask.output_channel_masks[0])
add_op = mask_winnower._conn_graph.get_all_ops()['Add']
add_mask = mask_winnower._mask_propagator.op_to_mask_dict[add_op]
self.assertEqual(8, sum(add_mask.input_channel_masks[0]))
self.assertEqual(8, sum(add_mask.input_channel_masks[1]))
self.assertEqual(8, sum(add_mask.output_channel_masks[0]))
sess.close()
def test_mask_propagation_for_add_with_non_split_parents(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
_ = single_residual()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['conv2d_4/Conv2D']
module_zero_channels_list = []
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_4/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
conv2d_1_op = mask_winnower._conn_graph.get_all_ops()['conv2d_1/Conv2D']
conv2d_1_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_1_op]
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], conv2d_1_op_mask.output_channel_masks[0])
conv2d_3_op = mask_winnower._conn_graph.get_all_ops()['conv2d_3/Conv2D']
conv2d_3_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[conv2d_3_op]
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], conv2d_3_op_mask.output_channel_masks[0])
add_op = mask_winnower._conn_graph.get_all_ops()['Add']
add_mask = mask_winnower._mask_propagator.op_to_mask_dict[add_op]
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], add_mask.input_channel_masks[0])
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], add_mask.input_channel_masks[1])
self.assertEqual([1, 1, 1, 0, 1, 0, 1, 0], add_mask.output_channel_masks[0])
def test_mask_propagation_set_downstream_masks(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
_ = model_to_test_downstream_masks()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['model_to_test_downstream_masks/Softmax']
module_zero_channels_list = []
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D')
input_channels_to_winnow = [3, 5, 7]
module_mask_pair = (tf_op, input_channels_to_winnow)
module_zero_channels_list.append(module_mask_pair)
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
mask_winnower._propagate_masks()
relu_op = mask_winnower._conn_graph.get_all_ops()['Relu']
relu_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[relu_op]
self.assertEqual(8, sum(relu_op_mask.output_channel_masks[0]))
relu_1_op = mask_winnower._conn_graph.get_all_ops()['Relu_1']
relu_1_op_mask = mask_winnower._mask_propagator.op_to_mask_dict[relu_1_op]
self.assertEqual(8, sum(relu_1_op_mask.output_channel_masks[0]))
sess.close()
def test_create_masks_with_postprocessing_ops(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
module_zero_channels_list = []
model_with_postprocessing_nodes()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_op_names = ['input_1']
output_op_names = ['top1-acc', 'top5-acc']
mask_winnower = MaskPropagationWinnower(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True)
flatten_op = mask_winnower._conn_graph.get_all_ops()['flatten/Reshape']
self.assertTrue((flatten_op not in mask_winnower._mask_propagator.op_to_mask_dict.keys()))
self.assertEqual(3, len(mask_winnower._mask_propagator.op_to_mask_dict))
def _check_mask_indices(self, winnowed_channels: List, channel_type: str, op_mask: Mask):
if (channel_type == 'input'):
for channel in winnowed_channels:
self.assertEqual(0, op_mask.input_channel_masks[0][channel])
elif (channel_type == 'output'):
for channel in winnowed_channels:
self.assertEqual(0, op_mask.output_channel_masks[0][channel]) |
class BaselineYNet(nn.Module):
def __init__(self, input_size=(3, 32, 32), num_classes=10, activation='softplus', residual=False, hidden_width=128, aug=0):
super(BaselineYNet, self).__init__()
(y_net, output_size) = make_y_net(input_size=input_size, explicit_params=False, activation=activation, hidden_width=hidden_width)
self.projection = nn.Sequential(nn.Flatten(), nn.Linear((int(np.prod(output_size)) + aug), num_classes))
self.y_net = y_net
self.residual = residual
def forward(self, y, *args, **kwargs):
t = y.new_tensor(0.0)
outs = self.y_net(t, y).flatten(start_dim=1)
if self.residual:
outs += y.flatten(start_dim=1)
return (self.projection(outs), torch.tensor(0.0, device=y.device)) |
class MissingDependencies(RuntimeError):
def __init__(self, missing_dependencies, *args, **kwargs):
super().__init__(*args, **kwargs)
self.missing_dependencies = missing_dependencies
def __str__(self):
prefix = super().__str__()
unknown_str = ', '.join(map(str, self.missing_dependencies))
return '{} {}'.format(prefix, unknown_str) |
def test_search(requests_mock):
requests_mock.get(f'{API_V1}/search', json=load_sample_data('get_search.json'), status_code=200)
response = search([8348, 6432])
taxon_result = response['results'][0]
place_result = response['results'][1]
project_result = response['results'][2]
user_result = response['results'][3]
assert all((isinstance(result['score'], float) for result in response['results']))
assert (taxon_result['type'] == 'Taxon')
assert isinstance(taxon_result['record']['created_at'], datetime)
assert (place_result['type'] == 'Place')
assert isinstance(place_result['record']['location'][0], float)
assert isinstance(place_result['record']['location'][1], float)
assert (project_result['type'] == 'Project')
assert isinstance(project_result['record']['last_post_at'], datetime)
assert (user_result['type'] == 'User')
assert isinstance(user_result['record']['created_at'], datetime) |
class CurrentUserGPGKeyManager(RetrieveMixin, CreateMixin, DeleteMixin, RESTManager):
_path = '/user/gpg_keys'
_obj_cls = CurrentUserGPGKey
_create_attrs = RequiredOptional(required=('key',))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> CurrentUserGPGKey:
return cast(CurrentUserGPGKey, super().get(id=id, lazy=lazy, **kwargs)) |
class BTOOLS_OT_material_group_assign(bpy.types.Operator):
bl_idname = 'btools.material_group_assign'
bl_label = 'Assign Faces to Group'
bl_options = {'REGISTER', 'UNDO'}
def poll(cls, context):
obj = context.object
matgroup = obj.bt_materials[obj.bt_materials_active_index]
return matgroup
def execute(self, context):
obj = context.object
active_matgroup = obj.bt_materials[obj.bt_materials_active_index]
with bmesh_from_active_object(context) as bm:
layer = bm.faces.layers.int.get('.bt_material_group_index')
for face in bm.faces:
if face.select:
face[layer] = active_matgroup.index
if active_matgroup.material:
face.material_index = obj.data.materials.find(active_matgroup.material.name)
else:
face.material_index = 0
return {'FINISHED'} |
def test_solver_does_not_return_prereleases_if_not_requested(solver: Solver, repo: Repository, package: ProjectPackage) -> None:
package.add_dependency(Factory.create_dependency('A', '*'))
package.add_dependency(Factory.create_dependency('B', '*'))
package.add_dependency(Factory.create_dependency('C', '*'))
package_a = get_package('A', '1.0')
package_b = get_package('B', '1.0')
package_c = get_package('C', '1.0')
package_c_dev = get_package('C', '1.1-beta.1')
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_c_dev)
transaction = solver.solve()
check_solver_result(transaction, [{'job': 'install', 'package': package_a}, {'job': 'install', 'package': package_b}, {'job': 'install', 'package': package_c}]) |
def mc_elbo(z0, t0, t1, prior_params, post_params, log_likelihood_params, prior_drift, diffusion, posterior_drift, log_likelihood, rng):
(aug_drift, aug_diffusion) = make_aug_dynamics(prior_drift, diffusion, posterior_drift)
aug_init = pack(z0, 0.0)
out = sdeint_ito(aug_drift, aug_diffusion, aug_init, np.array([t0, t1]), rng, (prior_params, post_params), dt=0.1)
(final_state, logpq) = unpack(out[1])
return (logpq + log_likelihood(final_state, log_likelihood_params)) |
def save_churns(churns, path='./results/code_churns_features_multithread.csv'):
with open(path, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['commit', 'lines_of_code_added', 'lines_of_code_deleted', 'files_churned', 'line_of_code_old'])
for row in churns:
if row:
writer.writerow([row[0], row[1], row[2], row[3], row[4]]) |
class _FindExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
current_obj = current_line.object()
for node in state.select_nodes(current_obj):
if self.check_find(state, node, info, char_index):
if modify:
(yield state.change_state([DeleteEdges(CharacterNode(char_index), [Relation.FACING], AnyNode()), AddEdges(CharacterNode(char_index), Relation.CLOSE, NodeInstance(node), add_reverse=True)], node, current_obj, in_place=in_place))
else:
(yield state)
def check_find(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
if (not _is_character_close_to(state, node, char_index)):
char_node = _get_character_node(state, char_index)
info.error('{} is not close to {}', char_node, node)
return False
return True |
def zero_scale_fix(model, device):
for (k, m) in model.named_modules():
if (isinstance(m, quant_nn.QuantConv2d) or isinstance(m, quant_nn.QuantConvTranspose2d)):
weight_amax = m._weight_quantizer._amax.detach().cpu().numpy()
print(k)
ones = np.ones_like(weight_amax)
print('zero scale number = {}'.format(np.sum((weight_amax == 0.0))))
weight_amax = np.where((weight_amax == 0.0), ones, weight_amax)
m._weight_quantizer._amax.copy_(torch.from_numpy(weight_amax).to(device))
else:
continue |
def monotonically_increasing_and_bounded(iterable, min=None, max=None):
if (not isinstance(iterable, Iterable)):
raise TypeError('Expected iterable to be of type Iterable, got ({})'.format(iterable.__class__.__name__))
for i in range(len(iterable)):
if ((min is not None) and (iterable[i] < min)):
return False
if ((max is not None) and (iterable[i] > max)):
return False
if ((i > 0) and (iterable[i] <= iterable[(i - 1)])):
return False
return True |
class Gumbel(Continuous):
rv_op = gumbel
def dist(cls, mu, beta, **kwargs):
mu = pt.as_tensor_variable(floatX(mu))
beta = pt.as_tensor_variable(floatX(beta))
return super().dist([mu, beta], **kwargs)
def moment(rv, size, mu, beta):
mean = (mu + (beta * np.euler_gamma))
if (not rv_size_is_none(size)):
mean = pt.full(size, mean)
return mean
def logp(value, mu, beta):
z = ((value - mu) / beta)
res = (((- z) - pt.exp((- z))) - pt.log(beta))
return check_parameters(res, (beta > 0), msg='beta > 0')
def logcdf(value, mu, beta):
res = (- pt.exp(((- (value - mu)) / beta)))
return check_parameters(res, (beta > 0), msg='beta > 0')
def icdf(value, mu, beta):
res = (mu - (beta * pt.log((- pt.log(value)))))
res = check_icdf_value(res, value)
return check_parameters(res, (beta > 0), msg='beta > 0') |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return (out, preact)
else:
return out |
class TextReporter(Reporter):
def __init__(self, *, verbosity: int, stream: (t.TextIO | None)=None) -> None:
super().__init__(verbosity=verbosity)
self.stream = stream
def _echo(self, s: str, *, indent: int=0) -> None:
click.echo(((' ' * indent) + s), file=self.stream)
def report_success(self, result: CheckResult) -> None:
if (self.verbosity < 1):
return
ok = click.style('ok', fg='green')
self._echo(f'{ok} -- validation done')
if (self.verbosity > 1):
self._echo('The following files were checked:')
for filename in result.successes:
self._echo(f' {filename}')
def _format_validation_error_message(self, err: jsonschema.ValidationError, filename: (str | None)=None) -> str:
error_loc = err.json_path
if filename:
error_loc = f'{filename}::{error_loc}'
error_loc = click.style(error_loc, fg='yellow')
return f'{error_loc}: {err.message}'
def _show_validation_error(self, filename: str, err: jsonschema.ValidationError) -> None:
self._echo(self._format_validation_error_message(err, filename=filename), indent=2)
if err.context:
best_match = jsonschema.exceptions.best_match(err.context)
self._echo('Underlying errors caused this.', indent=2)
self._echo('')
self._echo('Best Match:', indent=2)
self._echo(self._format_validation_error_message(best_match), indent=4)
best_deep_match = find_best_deep_match(err)
if (best_deep_match != best_match):
self._echo('Best Deep Match:', indent=2)
self._echo(self._format_validation_error_message(best_deep_match), indent=4)
if (self.verbosity > 1):
self._echo('All Errors:', indent=2)
for e in iter_validation_error(err):
self._echo(self._format_validation_error_message(e), indent=4)
else:
num_other_errors = (len(list(iter_validation_error(err))) - 1)
if (best_deep_match != best_match):
num_other_errors -= 1
if (num_other_errors > 0):
self._echo('')
self._echo(f"{click.style(str(num_other_errors), fg='yellow')} other errors were produced. Use '--verbose' to see all errors.", indent=2)
def _show_parse_error(self, filename: str, err: ParseError) -> None:
if (self.verbosity < 2):
self._echo(click.style(str(err), fg='yellow'), indent=2)
elif (self.verbosity < 3):
self._echo(textwrap.indent(format_error(err, mode='short'), ' '))
else:
self._echo(textwrap.indent(format_error(err, mode='full'), ' '))
def report_errors(self, result: CheckResult) -> None:
if (self.verbosity < 1):
return
if result.parse_errors:
self._echo('Several files failed to parse.')
for (filename, errors) in result.parse_errors.items():
for err in errors:
self._show_parse_error(filename, err)
if result.validation_errors:
self._echo('Schema validation errors were encountered.')
for (filename, parse_errors) in result.validation_errors.items():
for parse_err in parse_errors:
self._show_validation_error(filename, parse_err) |
class Win32Window(BaseWindow):
_window_class = None
_hwnd = None
_dc = None
_wgl_context = None
_tracking = False
_hidden = False
_has_focus = False
_exclusive_keyboard = False
_exclusive_keyboard_focus = True
_exclusive_mouse = False
_exclusive_mouse_focus = True
_exclusive_mouse_screen = None
_exclusive_mouse_lpos = None
_exclusive_mouse_buttons = 0
_mouse_platform_visible = True
_pending_click = False
_in_title_bar = False
_keyboard_state = {42: False, 54: False}
_ws_style = 0
_ex_ws_style = 0
_minimum_size = None
_maximum_size = None
def __init__(self, *args, **kwargs):
self._event_handlers = {}
self._view_event_handlers = {}
for func_name in self._platform_event_names:
if (not hasattr(self, func_name)):
continue
func = getattr(self, func_name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
self._always_dwm = (sys.getwindowsversion() >= (6, 2))
self._interval = 0
super(Win32Window, self).__init__(*args, **kwargs)
def _recreate(self, changes):
if ('context' in changes):
self._wgl_context = None
self._create()
def _create(self):
if self._fullscreen:
self._ws_style = WS_POPUP
self._ex_ws_style = 0
else:
styles = {self.WINDOW_STYLE_DEFAULT: (WS_OVERLAPPEDWINDOW, 0), self.WINDOW_STYLE_DIALOG: (((WS_OVERLAPPED | WS_CAPTION) | WS_SYSMENU), WS_EX_DLGMODALFRAME), self.WINDOW_STYLE_TOOL: (((WS_OVERLAPPED | WS_CAPTION) | WS_SYSMENU), WS_EX_TOOLWINDOW), self.WINDOW_STYLE_BORDERLESS: (WS_POPUP, 0), self.WINDOW_STYLE_TRANSPARENT: (WS_OVERLAPPEDWINDOW, WS_EX_LAYERED), self.WINDOW_STYLE_OVERLAY: (WS_POPUP, (WS_EX_LAYERED | WS_EX_TRANSPARENT))}
(self._ws_style, self._ex_ws_style) = styles[self._style]
if (self._resizable and (not self._fullscreen)):
self._ws_style |= WS_THICKFRAME
else:
self._ws_style &= (~ (WS_THICKFRAME | WS_MAXIMIZEBOX))
if self._fullscreen:
width = self.screen.width
height = self.screen.height
else:
(width, height) = self._client_to_window_size(self._width, self._height)
if (not self._window_class):
module = _kernel32.GetModuleHandleW(None)
white = _gdi32.GetStockObject(WHITE_BRUSH)
black = _gdi32.GetStockObject(BLACK_BRUSH)
self._window_class = WNDCLASS()
self._window_class.lpszClassName = (u'GenericAppClass%d' % id(self))
self._window_class.lpfnWndProc = WNDPROC(self._get_window_proc(self._event_handlers))
self._window_class.style = ((CS_VREDRAW | CS_HREDRAW) | CS_OWNDC)
self._window_class.hInstance = 0
self._window_class.hIcon = _user32.LoadImageW(module, MAKEINTRESOURCE(1), IMAGE_ICON, 0, 0, (LR_DEFAULTSIZE | LR_SHARED))
self._window_class.hbrBackground = black
self._window_class.lpszMenuName = None
self._window_class.cbClsExtra = 0
self._window_class.cbWndExtra = 0
_user32.RegisterClassW(byref(self._window_class))
self._view_window_class = WNDCLASS()
self._view_window_class.lpszClassName = (u'GenericViewClass%d' % id(self))
self._view_window_class.lpfnWndProc = WNDPROC(self._get_window_proc(self._view_event_handlers))
self._view_window_class.style = 0
self._view_window_class.hInstance = 0
self._view_window_class.hIcon = 0
self._view_window_class.hbrBackground = white
self._view_window_class.lpszMenuName = None
self._view_window_class.cbClsExtra = 0
self._view_window_class.cbWndExtra = 0
_user32.RegisterClassW(byref(self._view_window_class))
if (not self._hwnd):
self._hwnd = _user32.CreateWindowExW(self._ex_ws_style, self._window_class.lpszClassName, u'', self._ws_style, CW_USEDEFAULT, CW_USEDEFAULT, width, height, 0, 0, self._window_class.hInstance, 0)
self._view_hwnd = _user32.CreateWindowExW(0, self._view_window_class.lpszClassName, u'', (WS_CHILD | WS_VISIBLE), 0, 0, 0, 0, self._hwnd, 0, self._view_window_class.hInstance, 0)
self._dc = _user32.GetDC(self._view_hwnd)
if self._file_drops:
if WINDOWS_7_OR_GREATER:
_user32.ChangeWindowMessageFilterEx(self._hwnd, WM_DROPFILES, MSGFLT_ALLOW, None)
_user32.ChangeWindowMessageFilterEx(self._hwnd, WM_COPYDATA, MSGFLT_ALLOW, None)
_user32.ChangeWindowMessageFilterEx(self._hwnd, WM_COPYGLOBALDATA, MSGFLT_ALLOW, None)
_shell32.DragAcceptFiles(self._hwnd, True)
raw_keyboard = RAWINPUTDEVICE(1, 6, 0, None)
if (not _user32.RegisterRawInputDevices(byref(raw_keyboard), 1, sizeof(RAWINPUTDEVICE))):
print('Warning: Failed to unregister raw input keyboard.')
else:
_user32.ShowWindow(self._hwnd, SW_HIDE)
_user32.SetWindowLongW(self._hwnd, GWL_STYLE, self._ws_style)
_user32.SetWindowLongW(self._hwnd, GWL_EXSTYLE, self._ex_ws_style)
if self._fullscreen:
hwnd_after = (HWND_TOPMOST if (self.style == 'overlay') else HWND_NOTOPMOST)
_user32.SetWindowPos(self._hwnd, hwnd_after, self._screen.x, self._screen.y, width, height, SWP_FRAMECHANGED)
elif False:
(x, y) = self._client_to_window_pos(*factory.get_location())
_user32.SetWindowPos(self._hwnd, HWND_NOTOPMOST, x, y, width, height, SWP_FRAMECHANGED)
elif ((self.style == 'transparent') or (self.style == 'overlay')):
_user32.SetLayeredWindowAttributes(self._hwnd, 0, 254, LWA_ALPHA)
if (self.style == 'overlay'):
_user32.SetWindowPos(self._hwnd, HWND_TOPMOST, 0, 0, width, height, (SWP_NOMOVE | SWP_NOSIZE))
else:
_user32.SetWindowPos(self._hwnd, HWND_NOTOPMOST, 0, 0, width, height, (SWP_NOMOVE | SWP_FRAMECHANGED))
self._update_view_location(self._width, self._height)
if (not self._wgl_context):
self.canvas = Win32Canvas(self.display, self._view_hwnd, self._dc)
self.context.attach(self.canvas)
self._wgl_context = self.context._context
self.switch_to()
self.set_caption(self._caption)
self.set_vsync(self._vsync)
if self._visible:
self.set_visible()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_expose')
def _update_view_location(self, width, height):
if self._fullscreen:
x = ((self.screen.width - width) // 2)
y = ((self.screen.height - height) // 2)
else:
x = y = 0
_user32.SetWindowPos(self._view_hwnd, 0, x, y, width, height, (SWP_NOZORDER | SWP_NOOWNERZORDER))
def close(self):
if (not self._hwnd):
super(Win32Window, self).close()
return
self.set_mouse_platform_visible(True)
_user32.DestroyWindow(self._hwnd)
_user32.UnregisterClassW(self._view_window_class.lpszClassName, 0)
_user32.UnregisterClassW(self._window_class.lpszClassName, 0)
self._window_class = None
self._view_window_class = None
self._view_event_handlers.clear()
self._event_handlers.clear()
self._hwnd = None
self._dc = None
self._wgl_context = None
super(Win32Window, self).close()
def _dwm_composition_enabled(self):
is_enabled = c_int()
_dwmapi.DwmIsCompositionEnabled(byref(is_enabled))
return is_enabled.value
def _get_vsync(self):
return bool(self._interval)
vsync = property(_get_vsync)
def set_vsync(self, vsync):
if (pyglet.options['vsync'] is not None):
vsync = pyglet.options['vsync']
self._interval = vsync
if (not self._fullscreen):
if (self._always_dwm or self._dwm_composition_enabled()):
vsync = 0
self.context.set_vsync(vsync)
def switch_to(self):
self.context.set_current()
def update_transparency(self):
region = _gdi32.CreateRectRgn(0, 0, (- 1), (- 1))
bb = DWM_BLURBEHIND()
bb.dwFlags = (DWM_BB_ENABLE | DWM_BB_BLURREGION)
bb.hRgnBlur = region
bb.fEnable = True
_dwmapi.DwmEnableBlurBehindWindow(self._hwnd, ctypes.byref(bb))
_gdi32.DeleteObject(region)
def flip(self):
self.draw_mouse_cursor()
if (not self._fullscreen):
if (self._always_dwm or self._dwm_composition_enabled()):
if self._interval:
_dwmapi.DwmFlush()
if (self.style in ('overlay', 'transparent')):
self.update_transparency()
self.context.flip()
def set_location(self, x, y):
(x, y) = self._client_to_window_pos(x, y)
_user32.SetWindowPos(self._hwnd, 0, x, y, 0, 0, ((SWP_NOZORDER | SWP_NOSIZE) | SWP_NOOWNERZORDER))
def get_location(self):
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
point = POINT()
point.x = rect.left
point.y = rect.top
_user32.ClientToScreen(self._hwnd, byref(point))
return (point.x, point.y)
def set_size(self, width, height):
super().set_size(width, height)
(width, height) = self._client_to_window_size(width, height)
_user32.SetWindowPos(self._hwnd, 0, 0, 0, width, height, ((SWP_NOZORDER | SWP_NOMOVE) | SWP_NOOWNERZORDER))
self.dispatch_event('on_resize', self._width, self._height)
def get_size(self):
return (self._width, self._height)
def set_minimum_size(self, width, height):
self._minimum_size = (width, height)
def set_maximum_size(self, width, height):
self._maximum_size = (width, height)
def activate(self):
_user32.SetForegroundWindow(self._hwnd)
def set_visible(self, visible=True):
if visible:
insertAfter = HWND_TOP
_user32.SetWindowPos(self._hwnd, insertAfter, 0, 0, 0, 0, ((SWP_NOMOVE | SWP_NOSIZE) | SWP_SHOWWINDOW))
self.dispatch_event('on_resize', self._width, self._height)
self.activate()
self.dispatch_event('on_show')
else:
_user32.ShowWindow(self._hwnd, SW_HIDE)
self.dispatch_event('on_hide')
self._visible = visible
self.set_mouse_platform_visible()
def minimize(self):
_user32.ShowWindow(self._hwnd, SW_MINIMIZE)
def maximize(self):
_user32.ShowWindow(self._hwnd, SW_MAXIMIZE)
def set_caption(self, caption):
self._caption = caption
_user32.SetWindowTextW(self._hwnd, c_wchar_p(caption))
def set_mouse_platform_visible(self, platform_visible=None):
if (platform_visible is None):
platform_visible = ((self._mouse_visible and (not self._exclusive_mouse) and ((not self._mouse_cursor.gl_drawable) or self._mouse_cursor.hw_drawable)) or ((not self._mouse_in_window) or (not self._has_focus)))
if (platform_visible and self._mouse_cursor.hw_drawable):
if isinstance(self._mouse_cursor, Win32MouseCursor):
cursor = self._mouse_cursor.cursor
elif isinstance(self._mouse_cursor, DefaultMouseCursor):
cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(IDC_ARROW))
else:
cursor = self._create_cursor_from_image(self._mouse_cursor)
_user32.SetClassLongPtrW(self._view_hwnd, GCL_HCURSOR, cursor)
_user32.SetCursor(cursor)
if (platform_visible == self._mouse_platform_visible):
return
self._set_cursor_visibility(platform_visible)
self._mouse_platform_visible = platform_visible
def _set_cursor_visibility(self, platform_visible):
global _win32_cursor_visible
if (_win32_cursor_visible != platform_visible):
_user32.ShowCursor(platform_visible)
_win32_cursor_visible = platform_visible
def _update_clipped_cursor(self):
if (self._in_title_bar or self._pending_click):
return
rect = RECT()
_user32.GetClientRect(self._view_hwnd, byref(rect))
_user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, byref(rect), 2)
rect.top += 1
rect.left += 1
rect.right -= 1
rect.bottom -= 1
_user32.ClipCursor(byref(rect))
def set_exclusive_mouse(self, exclusive=True):
if ((self._exclusive_mouse == exclusive) and (self._exclusive_mouse_focus == self._has_focus)):
return
raw_mouse = RAWINPUTDEVICE(1, 2, 0, None)
if (not exclusive):
raw_mouse.dwFlags = RIDEV_REMOVE
raw_mouse.hwndTarget = None
if (not _user32.RegisterRawInputDevices(byref(raw_mouse), 1, sizeof(RAWINPUTDEVICE))):
if exclusive:
raise WindowException('Cannot enter mouse exclusive mode.')
self._exclusive_mouse_buttons = 0
if (exclusive and self._has_focus):
self._update_clipped_cursor()
else:
_user32.ClipCursor(None)
self._exclusive_mouse = exclusive
self._exclusive_mouse_focus = self._has_focus
self.set_mouse_platform_visible((not exclusive))
def set_mouse_position(self, x, y, absolute=False):
if (not absolute):
rect = RECT()
_user32.GetClientRect(self._view_hwnd, byref(rect))
_user32.MapWindowPoints(self._view_hwnd, HWND_DESKTOP, byref(rect), 2)
x = (x + rect.left)
y = ((rect.top + (rect.bottom - rect.top)) - y)
_user32.SetCursorPos(x, y)
def set_exclusive_keyboard(self, exclusive=True):
if ((self._exclusive_keyboard == exclusive) and (self._exclusive_keyboard_focus == self._has_focus)):
return
if (exclusive and self._has_focus):
_user32.RegisterHotKey(self._hwnd, 0, WIN32_MOD_ALT, VK_TAB)
elif (self._exclusive_keyboard and (not exclusive)):
_user32.UnregisterHotKey(self._hwnd, 0)
self._exclusive_keyboard = exclusive
self._exclusive_keyboard_focus = self._has_focus
def get_system_mouse_cursor(self, name):
if (name == self.CURSOR_DEFAULT):
return DefaultMouseCursor()
names = {self.CURSOR_CROSSHAIR: IDC_CROSS, self.CURSOR_HAND: IDC_HAND, self.CURSOR_HELP: IDC_HELP, self.CURSOR_NO: IDC_NO, self.CURSOR_SIZE: IDC_SIZEALL, self.CURSOR_SIZE_UP: IDC_SIZENS, self.CURSOR_SIZE_UP_RIGHT: IDC_SIZENESW, self.CURSOR_SIZE_RIGHT: IDC_SIZEWE, self.CURSOR_SIZE_DOWN_RIGHT: IDC_SIZENWSE, self.CURSOR_SIZE_DOWN: IDC_SIZENS, self.CURSOR_SIZE_DOWN_LEFT: IDC_SIZENESW, self.CURSOR_SIZE_LEFT: IDC_SIZEWE, self.CURSOR_SIZE_UP_LEFT: IDC_SIZENWSE, self.CURSOR_SIZE_UP_DOWN: IDC_SIZENS, self.CURSOR_SIZE_LEFT_RIGHT: IDC_SIZEWE, self.CURSOR_TEXT: IDC_IBEAM, self.CURSOR_WAIT: IDC_WAIT, self.CURSOR_WAIT_ARROW: IDC_APPSTARTING}
if (name not in names):
raise RuntimeError(('Unknown cursor name "%s"' % name))
cursor = _user32.LoadCursorW(None, MAKEINTRESOURCE(names[name]))
return Win32MouseCursor(cursor)
def set_icon(self, *images):
def best_image(width, height):
image = images[0]
for img in images:
if ((img.width == width) and (img.height == height)):
return img
elif ((img.width >= width) and ((img.width * img.height) > (image.width * image.height))):
image = img
return image
def get_icon(image):
format = 'BGRA'
pitch = (len(format) * image.width)
header = BITMAPV5HEADER()
header.bV5Size = sizeof(header)
header.bV5Width = image.width
header.bV5Height = image.height
header.bV5Planes = 1
header.bV5BitCount = 32
header.bV5Compression = BI_BITFIELDS
header.bV5RedMask =
header.bV5GreenMask = 65280
header.bV5BlueMask = 255
header.bV5AlphaMask =
hdc = _user32.GetDC(None)
dataptr = c_void_p()
bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS, byref(dataptr), None, 0)
_user32.ReleaseDC(None, hdc)
image = image.get_image_data()
data = image.get_data(format, pitch)
memmove(dataptr, data, len(data))
mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None)
iconinfo = ICONINFO()
iconinfo.fIcon = True
iconinfo.hbmMask = mask
iconinfo.hbmColor = bitmap
icon = _user32.CreateIconIndirect(byref(iconinfo))
_gdi32.DeleteObject(mask)
_gdi32.DeleteObject(bitmap)
return icon
image = best_image(_user32.GetSystemMetrics(SM_CXICON), _user32.GetSystemMetrics(SM_CYICON))
icon = get_icon(image)
_user32.SetClassLongPtrW(self._hwnd, GCL_HICON, icon)
image = best_image(_user32.GetSystemMetrics(SM_CXSMICON), _user32.GetSystemMetrics(SM_CYSMICON))
icon = get_icon(image)
_user32.SetClassLongPtrW(self._hwnd, GCL_HICONSM, icon)
_cache()
def _create_cursor_from_image(self, cursor):
fmt = 'BGRA'
image = cursor.texture
pitch = (len(fmt) * image.width)
header = BITMAPINFOHEADER()
header.biSize = sizeof(header)
header.biWidth = image.width
header.biHeight = image.height
header.biPlanes = 1
header.biBitCount = 32
hdc = _user32.GetDC(None)
dataptr = c_void_p()
bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS, byref(dataptr), None, 0)
_user32.ReleaseDC(None, hdc)
image = image.get_image_data()
data = image.get_data(fmt, pitch)
memmove(dataptr, data, len(data))
mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None)
iconinfo = ICONINFO()
iconinfo.fIcon = False
iconinfo.hbmMask = mask
iconinfo.hbmColor = bitmap
iconinfo.xHotspot = int(cursor.hot_x)
iconinfo.yHotspot = int((image.height - cursor.hot_y))
icon = _user32.CreateIconIndirect(byref(iconinfo))
_gdi32.DeleteObject(mask)
_gdi32.DeleteObject(bitmap)
return icon
def set_clipboard_text(self, text: str):
valid = _user32.OpenClipboard(self._view_hwnd)
if (not valid):
return
_user32.EmptyClipboard()
size = ((len(text) + 1) * sizeof(WCHAR))
cb_data = _kernel32.GlobalAlloc(GMEM_MOVEABLE, size)
locked_data = _kernel32.GlobalLock(cb_data)
memmove(locked_data, text, size)
_kernel32.GlobalUnlock(cb_data)
_user32.SetClipboardData(CF_UNICODETEXT, cb_data)
_user32.CloseClipboard()
def get_clipboard_text(self) -> str:
text = ''
valid = _user32.OpenClipboard(self._view_hwnd)
if (not valid):
print('Could not open clipboard')
return ''
cb_obj = _user32.GetClipboardData(CF_UNICODETEXT)
if cb_obj:
locked_data = _kernel32.GlobalLock(cb_obj)
if locked_data:
text = ctypes.wstring_at(locked_data)
_kernel32.GlobalUnlock(cb_obj)
_user32.CloseClipboard()
return text
def _client_to_window_size(self, width, height):
rect = RECT()
rect.left = 0
rect.top = 0
rect.right = width
rect.bottom = height
_user32.AdjustWindowRectEx(byref(rect), self._ws_style, False, self._ex_ws_style)
return ((rect.right - rect.left), (rect.bottom - rect.top))
def _client_to_window_pos(self, x, y):
rect = RECT()
rect.left = x
rect.top = y
_user32.AdjustWindowRectEx(byref(rect), self._ws_style, False, self._ex_ws_style)
return (rect.left, rect.top)
def dispatch_events(self):
from pyglet import app
app.platform_event_loop.start()
self._allow_dispatch_event = True
self.dispatch_pending_events()
msg = MSG()
while _user32.PeekMessageW(byref(msg), 0, 0, 0, PM_REMOVE):
_user32.TranslateMessage(byref(msg))
_user32.DispatchMessageW(byref(msg))
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
event = self._event_queue.pop(0)
if (type(event[0]) is str):
EventDispatcher.dispatch_event(self, *event)
else:
event[0](*event[1:])
def _get_window_proc(self, event_handlers):
def f(hwnd, msg, wParam, lParam):
event_handler = event_handlers.get(msg, None)
result = None
if event_handler:
if (self._allow_dispatch_event or (not self._enable_event_queue)):
result = event_handler(msg, wParam, lParam)
else:
result = 0
self._event_queue.append((event_handler, msg, wParam, lParam))
if (result is None):
result = _user32.DefWindowProcW(hwnd, msg, wParam, lParam)
return result
return f
def _get_modifiers(self, key_lParam=0):
modifiers = 0
if (self._keyboard_state[54] or self._keyboard_state[42]):
modifiers |= key.MOD_SHIFT
if (_user32.GetKeyState(VK_CONTROL) & 65280):
modifiers |= key.MOD_CTRL
if (_user32.GetKeyState(VK_LWIN) & 65280):
modifiers |= key.MOD_WINDOWS
if (_user32.GetKeyState(VK_CAPITAL) & 255):
modifiers |= key.MOD_CAPSLOCK
if (_user32.GetKeyState(VK_NUMLOCK) & 255):
modifiers |= key.MOD_NUMLOCK
if (_user32.GetKeyState(VK_SCROLL) & 255):
modifiers |= key.MOD_SCROLLLOCK
if key_lParam:
if (key_lParam & (1 << 29)):
modifiers |= key.MOD_ALT
elif (_user32.GetKeyState(VK_MENU) < 0):
modifiers |= key.MOD_ALT
return modifiers
def _get_location(lParam):
x = c_int16((lParam & 65535)).value
y = c_int16((lParam >> 16)).value
return (x, y)
(WM_KEYDOWN)
(WM_KEYUP)
(WM_SYSKEYDOWN)
(WM_SYSKEYUP)
def _event_key(self, msg, wParam, lParam):
repeat = False
if (lParam & (1 << 30)):
if (msg not in (WM_KEYUP, WM_SYSKEYUP)):
repeat = True
ev = 'on_key_release'
else:
ev = 'on_key_press'
symbol = keymap.get(wParam, None)
if (symbol is None):
ch = _user32.MapVirtualKeyW(wParam, MAPVK_VK_TO_CHAR)
symbol = chmap.get(ch)
if (symbol is None):
symbol = key.user_key(wParam)
elif ((symbol == key.LCTRL) and (lParam & (1 << 24))):
symbol = key.RCTRL
elif ((symbol == key.LALT) and (lParam & (1 << 24))):
symbol = key.RALT
if (wParam == VK_SHIFT):
return
modifiers = self._get_modifiers(lParam)
if (not repeat):
self.dispatch_event(ev, symbol, modifiers)
ctrl = ((modifiers & key.MOD_CTRL) != 0)
if (((symbol, ctrl) in _motion_map) and (msg not in (WM_KEYUP, WM_SYSKEYUP))):
motion = _motion_map[(symbol, ctrl)]
if (modifiers & key.MOD_SHIFT):
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
if self._exclusive_keyboard:
return 0
else:
return None
(WM_NCLBUTTONDOWN)
def _event_ncl_button_down(self, msg, wParam, lParam):
self._in_title_bar = True
(WM_CAPTURECHANGED)
def _event_capture_changed(self, msg, wParam, lParam):
self._in_title_bar = False
if self._exclusive_mouse:
state = _user32.GetAsyncKeyState(VK_LBUTTON)
if (not (state & 32768)):
if self._pending_click:
self._pending_click = False
if (self._has_focus or (not self._hidden)):
self._update_clipped_cursor()
(WM_CHAR)
def _event_char(self, msg, wParam, lParam):
text = chr(wParam)
if ((unicodedata.category(text) != 'Cc') or (text == '\r')):
self.dispatch_event('on_text', text)
return 0
(WM_INPUT)
def _event_raw_input(self, msg, wParam, lParam):
hRawInput = cast(lParam, HRAWINPUT)
inp = RAWINPUT()
size = UINT(sizeof(inp))
_user32.GetRawInputData(hRawInput, RID_INPUT, byref(inp), byref(size), sizeof(RAWINPUTHEADER))
if (inp.header.dwType == RIM_TYPEMOUSE):
if (not self._exclusive_mouse):
return 0
rmouse = inp.data.mouse
if ((rmouse.usFlags & 1) == MOUSE_MOVE_RELATIVE):
if ((rmouse.lLastX != 0) or (rmouse.lLastY != 0)):
if self._exclusive_mouse_buttons:
self.dispatch_event('on_mouse_drag', 0, 0, rmouse.lLastX, (- rmouse.lLastY), self._exclusive_mouse_buttons, self._get_modifiers())
else:
self.dispatch_event('on_mouse_motion', 0, 0, rmouse.lLastX, (- rmouse.lLastY))
else:
if (self._exclusive_mouse_lpos is None):
self._exclusive_mouse_lpos = (rmouse.lLastX, rmouse.lLastY)
(last_x, last_y) = self._exclusive_mouse_lpos
rel_x = (rmouse.lLastX - last_x)
rel_y = (rmouse.lLastY - last_y)
if ((rel_x != 0) or (rel_y != 0.0)):
if self._exclusive_mouse_buttons:
self.dispatch_event('on_mouse_drag', 0, 0, rmouse.lLastX, (- rmouse.lLastY), self._exclusive_mouse_buttons, self._get_modifiers())
else:
self.dispatch_event('on_mouse_motion', 0, 0, rel_x, rel_y)
self._exclusive_mouse_lpos = (rmouse.lLastX, rmouse.lLastY)
elif (inp.header.dwType == RIM_TYPEKEYBOARD):
if (inp.data.keyboard.VKey == 255):
return 0
key_up = (inp.data.keyboard.Flags & RI_KEY_BREAK)
if (inp.data.keyboard.MakeCode == 42):
if ((not key_up) and (not self._keyboard_state[42])):
self._keyboard_state[42] = True
self.dispatch_event('on_key_press', key.LSHIFT, self._get_modifiers())
elif (key_up and self._keyboard_state[42]):
self._keyboard_state[42] = False
self.dispatch_event('on_key_release', key.LSHIFT, self._get_modifiers())
elif (inp.data.keyboard.MakeCode == 54):
if ((not key_up) and (not self._keyboard_state[54])):
self._keyboard_state[54] = True
self.dispatch_event('on_key_press', key.RSHIFT, self._get_modifiers())
elif (key_up and self._keyboard_state[54]):
self._keyboard_state[54] = False
self.dispatch_event('on_key_release', key.RSHIFT, self._get_modifiers())
return 0
(WM_MOUSEMOVE)
def _event_mousemove(self, msg, wParam, lParam):
if (self._exclusive_mouse and self._has_focus):
return 0
(x, y) = self._get_location(lParam)
y = (self._height - y)
dx = (x - self._mouse_x)
dy = (y - self._mouse_y)
if (not self._tracking):
self._mouse_in_window = True
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_enter', x, y)
self._tracking = True
track = TRACKMOUSEEVENT()
track.cbSize = sizeof(track)
track.dwFlags = TME_LEAVE
track.hwndTrack = self._view_hwnd
_user32.TrackMouseEvent(byref(track))
if ((self._mouse_x == x) and (self._mouse_y == y)):
return 0
self._mouse_x = x
self._mouse_y = y
buttons = 0
if (wParam & MK_LBUTTON):
buttons |= mouse.LEFT
if (wParam & MK_MBUTTON):
buttons |= mouse.MIDDLE
if (wParam & MK_RBUTTON):
buttons |= mouse.RIGHT
if (wParam & MK_XBUTTON1):
buttons |= mouse.MOUSE4
if (wParam & MK_XBUTTON2):
buttons |= mouse.MOUSE5
if buttons:
modifiers = self._get_modifiers()
self.dispatch_event('on_mouse_drag', x, y, dx, dy, buttons, modifiers)
else:
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
return 0
(WM_MOUSELEAVE)
def _event_mouseleave(self, msg, wParam, lParam):
point = POINT()
_user32.GetCursorPos(byref(point))
_user32.ScreenToClient(self._view_hwnd, byref(point))
x = point.x
y = (self._height - point.y)
self._tracking = False
self._mouse_in_window = False
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_leave', x, y)
return 0
def _event_mousebutton(self, ev, button, lParam):
if (ev == 'on_mouse_press'):
_user32.SetCapture(self._view_hwnd)
else:
_user32.ReleaseCapture()
(x, y) = self._get_location(lParam)
y = (self._height - y)
self.dispatch_event(ev, x, y, button, self._get_modifiers())
return 0
(WM_LBUTTONDOWN)
def _event_lbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_press', mouse.LEFT, lParam)
(WM_LBUTTONUP)
def _event_lbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_release', mouse.LEFT, lParam)
(WM_MBUTTONDOWN)
def _event_mbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_press', mouse.MIDDLE, lParam)
(WM_MBUTTONUP)
def _event_mbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_release', mouse.MIDDLE, lParam)
(WM_RBUTTONDOWN)
def _event_rbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_press', mouse.RIGHT, lParam)
(WM_RBUTTONUP)
def _event_rbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton('on_mouse_release', mouse.RIGHT, lParam)
(WM_XBUTTONDOWN)
def _event_xbuttondown(self, msg, wParam, lParam):
if (c_short((wParam >> 16)).value == 1):
button = mouse.MOUSE4
if (c_short((wParam >> 16)).value == 2):
button = mouse.MOUSE5
return self._event_mousebutton('on_mouse_press', button, lParam)
(WM_XBUTTONUP)
def _event_xbuttonup(self, msg, wParam, lParam):
if (c_short((wParam >> 16)).value == 1):
button = mouse.MOUSE4
if (c_short((wParam >> 16)).value == 2):
button = mouse.MOUSE5
return self._event_mousebutton('on_mouse_release', button, lParam)
(WM_MOUSEWHEEL)
def _event_mousewheel(self, msg, wParam, lParam):
delta = c_short((wParam >> 16)).value
self.dispatch_event('on_mouse_scroll', self._mouse_x, self._mouse_y, 0, (delta / float(WHEEL_DELTA)))
return 0
(WM_CLOSE)
def _event_close(self, msg, wParam, lParam):
self.dispatch_event('on_close')
return 0
(WM_PAINT)
def _event_paint(self, msg, wParam, lParam):
self.dispatch_event('on_expose')
return None
(WM_SIZING)
def _event_sizing(self, msg, wParam, lParam):
from pyglet import app
if (app.event_loop is not None):
app.event_loop.enter_blocking()
return 1
(WM_SIZE)
def _event_size(self, msg, wParam, lParam):
if (not self._dc):
return None
if (wParam == SIZE_MINIMIZED):
self._hidden = True
self.dispatch_event('on_hide')
return 0
if self._hidden:
self._hidden = False
self.dispatch_event('on_show')
(w, h) = self._get_location(lParam)
if (not self._fullscreen):
(self._width, self._height) = (w, h)
self._update_view_location(self._width, self._height)
if self._exclusive_mouse:
self._update_clipped_cursor()
self.switch_to()
self.dispatch_event('on_resize', self._width, self._height)
return 0
(WM_SYSCOMMAND)
def _event_syscommand(self, msg, wParam, lParam):
if ((wParam == SC_KEYMENU) and ((lParam & (1 >> 16)) <= 0)):
return 0
if ((wParam & 65520) in (SC_MOVE, SC_SIZE)):
from pyglet import app
if (app.event_loop is not None):
app.event_loop.enter_blocking()
(WM_MOVE)
def _event_move(self, msg, wParam, lParam):
(x, y) = self._get_location(lParam)
self.dispatch_event('on_move', x, y)
return 0
(WM_SETCURSOR)
def _event_setcursor(self, msg, wParam, lParam):
if (self._exclusive_mouse and (not self._mouse_platform_visible)):
(lo, hi) = self._get_location(lParam)
if (lo == HTCLIENT):
self._set_cursor_visibility(False)
return 1
elif (lo in (HTCAPTION, HTCLOSE, HTMAXBUTTON, HTMINBUTTON)):
self._set_cursor_visibility(True)
return 1
(WM_ENTERSIZEMOVE)
def _event_entersizemove(self, msg, wParam, lParam):
self._moving = True
from pyglet import app
if (app.event_loop is not None):
app.event_loop.exit_blocking()
(WM_EXITSIZEMOVE)
def _event_exitsizemove(self, msg, wParam, lParam):
self._moving = False
from pyglet import app
if (app.event_loop is not None):
app.event_loop.exit_blocking()
if self._exclusive_mouse:
self._update_clipped_cursor()
(WM_SETFOCUS)
def _event_setfocus(self, msg, wParam, lParam):
self.dispatch_event('on_activate')
self._has_focus = True
if self._exclusive_mouse:
if _user32.GetAsyncKeyState(VK_LBUTTON):
self._pending_click = True
self.set_exclusive_keyboard(self._exclusive_keyboard)
self.set_exclusive_mouse(self._exclusive_mouse)
return 0
(WM_KILLFOCUS)
def _event_killfocus(self, msg, wParam, lParam):
self.dispatch_event('on_deactivate')
self._has_focus = False
exclusive_keyboard = self._exclusive_keyboard
exclusive_mouse = self._exclusive_mouse
self.set_exclusive_keyboard(False)
self.set_exclusive_mouse(False)
for symbol in self._keyboard_state:
self._keyboard_state[symbol] = False
self._exclusive_keyboard = exclusive_keyboard
self._exclusive_keyboard_focus = False
self._exclusive_mouse = exclusive_mouse
self._exclusive_mouse_focus = False
return 0
(WM_GETMINMAXINFO)
def _event_getminmaxinfo(self, msg, wParam, lParam):
info = MINMAXINFO.from_address(lParam)
if self._minimum_size:
(info.ptMinTrackSize.x, info.ptMinTrackSize.y) = self._client_to_window_size(*self._minimum_size)
if self._maximum_size:
(info.ptMaxTrackSize.x, info.ptMaxTrackSize.y) = self._client_to_window_size(*self._maximum_size)
return 0
(WM_ERASEBKGND)
def _event_erasebkgnd(self, msg, wParam, lParam):
if self._fullscreen:
return 0
else:
return 1
(WM_ERASEBKGND)
def _event_erasebkgnd_view(self, msg, wParam, lParam):
return 1
(WM_DROPFILES)
def _event_drop_files(self, msg, wParam, lParam):
drop = wParam
file_count = _shell32.DragQueryFileW(drop, , None, 0)
point = POINT()
_shell32.DragQueryPoint(drop, ctypes.byref(point))
paths = []
for i in range(file_count):
length = _shell32.DragQueryFileW(drop, i, None, 0)
buffer = create_unicode_buffer((length + 1))
_shell32.DragQueryFileW(drop, i, buffer, (length + 1))
paths.append(buffer.value)
_shell32.DragFinish(drop)
self.dispatch_event('on_file_drop', point.x, (self._height - point.y), paths)
return 0 |
class VGG(nn.Module):
def __init__(self, builder, features):
super(VGG, self).__init__()
self.features = features
num_classes = (10 if (parser_args.set == 'CIFAR10') else 100)
self.linear = builder.conv1x1(512, num_classes)
def forward(self, x):
x = self.features(x)
x = self.linear(x)
return x.squeeze() |
class Bobby(Configurable):
handler = Method()
handler2 = Method()
foo = Option(positional=True)
bar = Option(required=False)
def think(self, context):
(yield 'different')
def __call__(self, think, *args, **kwargs):
self.handler('1', *args, **kwargs)
self.handler2('2', *args, **kwargs) |
class MLPRegression(nn.Module):
def __init__(self, input_dim=86):
super(MLPRegression, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
output = self.fc3(x)
return output |
.parametrize('fun', [ct.series, ct.parallel, ct.feedback])
.parametrize('ltype', bd_types)
.parametrize('rtype', bd_types)
def test_bdalg_type_conversions(fun, ltype, rtype, sys_dict):
leftsys = sys_dict[ltype]
rightsys = sys_dict[rtype]
expected = bd_expect[bd_types.index(ltype)][1][bd_types.index(rtype)]
if (expected is None):
return None
if (isinstance(leftsys, ct.NonlinearIOSystem) and (leftsys == rightsys)):
rightsys = leftsys.copy()
if ((expected == 'E') or (expected[0] == 'x')):
with pytest.raises((TypeError, ValueError)):
fun(leftsys, rightsys)
else:
if (fun == ct.series):
result = fun(rightsys, leftsys)
else:
result = fun(leftsys, rightsys)
assert isinstance(result, type_dict[expected])
if isinstance(result, ct.InputOutputSystem):
assert (len(result.input_labels) == result.ninputs)
assert (len(result.output_labels) == result.noutputs)
if (result.nstates is not None):
assert (len(result.state_labels) == result.nstates) |
class TransformerSentenceEncoderLayer(nn.Module):
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None):
residual = x
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.final_layer_norm(x)
return (x, attn) |
def print_table(rows, headers, nicks, order):
rows.insert(0, headers)
rows = filter_table(rows, nicks, order)
if (not rows):
return
widths = []
for c in range(len(rows[0])):
widths.append(max((len(r[c]) for r in rows)))
seperator = (' %s ' % Colorise.gray('|'))
format_string = seperator.join([('%%-%ds' % w) for w in widths])
header = []
for (i, h) in enumerate(rows.pop(0)):
header.append(h.ljust(widths[i], ' '))
line_width = (len(' '.join(header)) + 2)
header = [Colorise.bold(h) for h in header]
header_line = (' ' + (' %s ' % Colorise.gray('|')).join(header))
print_(header_line.rstrip())
print_(Colorise.gray(('-' * line_width)))
for row in rows:
print_((' ' + (format_string % tuple(row)).rstrip())) |
class TestCustomScripts():
def test_only_linter_fix(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt', '--linter')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('flake8 .', shell=True)])
def test_only_linter_check(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt', '--check', '--linter')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('flake8 .', shell=True)])
def test_only_formatter_fix(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt', '--formatter')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('isort .', shell=True), mocker.call('black .', shell=True)])
def test_only_formatter_check(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt', '--check', '--formatter')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('black --check --diff .', shell=True), mocker.call('isort --check-only --diff .', shell=True)])
def test_fix(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('flake8 .', shell=True), mocker.call('isort .', shell=True), mocker.call('black .', shell=True)])
def test_check(self, hatch, temp_dir, config_file, mocker):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['envs'] = {'hatch-static-analysis': {'config-path': 'none', 'dependencies': ['black', 'flake8', 'isort'], 'scripts': {'format-check': ['black --check --diff {args:.}', 'isort --check-only --diff {args:.}'], 'format-fix': ['isort {args:.}', 'black {args:.}'], 'lint-check': 'flake8 {args:.}', 'lint-fix': 'lint-check'}}}
project.save_config(config)
run = mocker.patch('subprocess.run', return_value=CompletedProcess([], 0, stdout=b''))
mocker.patch('hatch.env.virtual.VirtualEnvironment.exists', return_value=True)
mocker.patch('hatch.env.virtual.VirtualEnvironment.dependency_hash', return_value='')
mocker.patch('hatch.env.virtual.VirtualEnvironment.command_context')
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('fmt', '--check')
assert (result.exit_code == 0), result.output
assert (not result.output)
root_data_path = ((((data_path / 'env') / '.internal') / 'hatch-static-analysis') / '.config')
assert (not root_data_path.is_dir())
assert (run.call_args_list == [mocker.call('flake8 .', shell=True), mocker.call('black --check --diff .', shell=True), mocker.call('isort --check-only --diff .', shell=True)]) |
.parametrize('prefer_grpc', [False, True])
.parametrize('numpy_upload', [False, True])
.parametrize('local_mode', [False, True])
def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode):
vectors_path = create_random_vectors()
if numpy_upload:
vectors = np.memmap(vectors_path, dtype='float32', mode='r', shape=(NUM_VECTORS, DIM))
vectors_2 = vectors[2].tolist()
else:
vectors = [np.random.rand(DIM).tolist() for _ in range(NUM_VECTORS)]
vectors_2 = vectors[2]
payload = random_payload(NUM_VECTORS)
if local_mode:
client = QdrantClient(location=':memory:', prefer_grpc=prefer_grpc)
else:
client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT)
client.recreate_collection(collection_name=COLLECTION_NAME, vectors_config=VectorParams(size=DIM, distance=Distance.DOT), timeout=TIMEOUT)
collections = client.get_collections().collections
for collection in collections:
print(to_dict(collection))
test_collection = client.get_collection(COLLECTION_NAME)
pprint(to_dict(test_collection))
client.upload_collection(collection_name=COLLECTION_NAME, vectors=vectors, payload=payload, ids=None, parallel=2)
sleep(1)
result_count = client.count(COLLECTION_NAME, count_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.5))]))
assert (result_count.count < 900)
assert (result_count.count > 100)
client.update_collection_aliases(change_aliases_operations=[CreateAliasOperation(create_alias=CreateAlias(collection_name=COLLECTION_NAME, alias_name=COLLECTION_NAME_ALIAS))])
version = os.getenv('QDRANT_VERSION')
collection_aliases = client.get_collection_aliases(COLLECTION_NAME)
assert (collection_aliases.aliases[0].collection_name == COLLECTION_NAME)
assert (collection_aliases.aliases[0].alias_name == COLLECTION_NAME_ALIAS)
all_aliases = client.get_aliases()
assert (all_aliases.aliases[0].collection_name == COLLECTION_NAME)
assert (all_aliases.aliases[0].alias_name == COLLECTION_NAME_ALIAS)
index_create_result = client.create_payload_index(COLLECTION_NAME, field_name='rand_number', field_schema=PayloadSchemaType.FLOAT)
pprint(to_dict(index_create_result))
test_collection = client.get_collection(COLLECTION_NAME_ALIAS)
pprint(to_dict(test_collection))
query_vector = np.random.rand(DIM)
query_vector_1: List[float] = list(np.random.rand(DIM))
query_vector_2: List[float] = list(np.random.rand(DIM))
query_vector_3: List[float] = list(np.random.rand(DIM))
hits = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector, query_filter=None, with_payload=True, limit=5)
assert (len(hits) == 5)
print('Search result:')
for hit in hits:
print(hit)
client.create_payload_index(COLLECTION_NAME, 'id_str', field_schema=PayloadSchemaType.KEYWORD)
hits = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector, query_filter=Filter(must=[FieldCondition(key='id_str', match=MatchValue(value='11'))]), with_payload=True, limit=5)
assert ('11' in hits[0].payload['id_str'])
hits_should = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector, query_filter=Filter(should=[FieldCondition(key='id_str', match=MatchValue(value='10')), FieldCondition(key='id_str', match=MatchValue(value='11'))]), with_payload=True, limit=5)
hits_match_any = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector, query_filter=Filter(must=[FieldCondition(key='id_str', match=MatchAny(any=['10', '11']))]), with_payload=True, limit=5)
assert (hits_should == hits_match_any)
hits = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector, query_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.5))]), append_payload=True, limit=5)
print('Filtered search result (`rand_number` >= 0.5):')
for hit in hits:
print(hit)
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2, 3], with_payload=True, with_vectors=True)
client.create_payload_index(COLLECTION_NAME, 'words', field_schema=TextIndexParams(type='text', tokenizer=TokenizerType.WORD, min_token_len=2, max_token_len=15, lowercase=True))
for i in range(10):
query_word = random_real_word()
(hits, _offset) = client.scroll(collection_name=COLLECTION_NAME, scroll_filter=Filter(must=[FieldCondition(key='words', match=MatchText(text=query_word))]), with_payload=True, limit=10)
assert (len(hits) > 0)
for hit in hits:
assert (query_word in hit.payload['words'])
filter_1 = Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.3))])
filter_2 = Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.5))])
filter_3 = Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.7))])
search_queries = [SearchRequest(vector=query_vector_1, filter=filter_1, limit=5, with_payload=True), SearchRequest(vector=query_vector_2, filter=filter_2, limit=5, with_payload=True), SearchRequest(vector=query_vector_3, filter=filter_3, limit=5, with_payload=True)]
single_search_result_1 = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector_1, query_filter=filter_1, limit=5)
single_search_result_2 = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector_2, query_filter=filter_2, limit=5)
single_search_result_3 = client.search(collection_name=COLLECTION_NAME, query_vector=query_vector_3, query_filter=filter_3, limit=5)
batch_search_result = client.search_batch(collection_name=COLLECTION_NAME, requests=search_queries)
assert (len(batch_search_result) == 3)
assert (batch_search_result[0] == single_search_result_1)
assert (batch_search_result[1] == single_search_result_2)
assert (batch_search_result[2] == single_search_result_3)
recommend_queries = [RecommendRequest(positive=[1], negative=[], filter=filter_1, limit=5, with_payload=True), RecommendRequest(positive=[2], negative=[], filter=filter_2, limit=5, with_payload=True), RecommendRequest(positive=[3], negative=[], filter=filter_3, limit=5, with_payload=True)]
reco_result_1 = client.recommend(collection_name=COLLECTION_NAME, positive=[1], query_filter=filter_1, limit=5)
reco_result_2 = client.recommend(collection_name=COLLECTION_NAME, positive=[2], query_filter=filter_2, limit=5)
reco_result_3 = client.recommend(collection_name=COLLECTION_NAME, positive=[3], query_filter=filter_3, limit=5)
batch_reco_result = client.recommend_batch(collection_name=COLLECTION_NAME, requests=recommend_queries)
assert (len(batch_reco_result) == 3)
assert (batch_reco_result[0] == reco_result_1)
assert (batch_reco_result[1] == reco_result_2)
assert (batch_reco_result[2] == reco_result_3)
assert (len(got_points) == 3)
client.delete(collection_name=COLLECTION_NAME, wait=True, points_selector=PointIdsList(points=[2, 3]))
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2, 3], with_payload=True, with_vectors=True)
assert (len(got_points) == 1)
client.upsert(collection_name=COLLECTION_NAME, wait=True, points=[PointStruct(id=2, payload={'hello': 'world'}, vector=vectors_2)])
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2, 3], with_payload=True, with_vectors=True)
assert (len(got_points) == 2)
client.set_payload(collection_name=COLLECTION_NAME, payload={'new_key': 123}, points=[1, 2], wait=True)
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2], with_payload=True, with_vectors=True)
for point in got_points:
assert (point.payload.get('new_key') == 123)
client.delete_payload(collection_name=COLLECTION_NAME, keys=['new_key'], points=[1])
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1], with_payload=True, with_vectors=True)
for point in got_points:
assert ('new_key' not in point.payload)
client.clear_payload(collection_name=COLLECTION_NAME, points_selector=PointIdsList(points=[1, 2]))
got_points = client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2], with_payload=True, with_vectors=True)
for point in got_points:
assert (not point.payload)
positive = [1, 2, query_vector.tolist()]
negative = []
if ((version is not None) and (version < 'v1.6.0')):
positive = [1, 2]
negative = []
recommended_points = client.recommend(collection_name=COLLECTION_NAME, positive=positive, negative=negative, query_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(lte=0.5))]), limit=5, with_payload=True, with_vectors=False)
assert (len(recommended_points) == 5)
(scrolled_points, next_page) = client.scroll(collection_name=COLLECTION_NAME, scroll_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(lte=0.5))]), limit=5, offset=None, with_payload=True, with_vectors=False)
assert isinstance(next_page, (int, str))
assert (len(scrolled_points) == 5)
(_, next_page) = client.scroll(collection_name=COLLECTION_NAME, scroll_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(lte=0.5))]), limit=1000, offset=None, with_payload=True, with_vectors=False)
assert (next_page is None)
if ((version is None) or ((version >= 'v1.5.0') or (version == 'dev'))):
client.batch_update_points(collection_name=COLLECTION_NAME, ordering=models.WriteOrdering.STRONG, update_operations=[models.UpsertOperation(upsert=models.PointsList(points=[models.PointStruct(id=1, payload={'new_key': 123}, vector=vectors_2), models.PointStruct(id=2, payload={'new_key': 321}, vector=vectors_2)])), models.DeleteOperation(delete=models.PointIdsList(points=[2])), models.SetPayloadOperation(set_payload=models.SetPayload(payload={'new_key2': 321}, points=[1])), models.OverwritePayloadOperation(overwrite_payload=models.SetPayload(payload={'new_key3': 321, 'new_key4': 321}, points=[1])), models.DeletePayloadOperation(delete_payload=models.DeletePayload(keys=['new_key3'], points=[1])), models.ClearPayloadOperation(clear_payload=models.PointIdsList(points=[1])), models.UpdateVectorsOperation(update_vectors=models.UpdateVectors(points=[models.PointVectors(id=1, vector=vectors_2)])), models.DeleteVectorsOperation(delete_vectors=models.DeleteVectors(points=[1], vector=['']))]) |
class NonStructured_Encoder():
def __init__(self, sess, FLAGS, embed, num_units=None, scope='Sentence_Encoder'):
self.sess = sess
self.dim_embed_word = FLAGS.dim_embed_word
self.num_units = (num_units if (num_units is not None) else FLAGS.num_units)
self.num_layers = FLAGS.num_layers
self.train_keep_prob = FLAGS.keep_prob
self.fixed_noise = tf.placeholder(tf.int32)
self.keep_prob = tf.placeholder_with_default(1.0, ())
self.learning_rate = tf.placeholder(tf.float32)
if FLAGS.use_adam:
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
else:
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
(self.recurrent_noise_in, self.recurrent_noise_out, self.recurrent_noise) = ([], [], None)
with tf.variable_scope(scope):
self._build_embedding(embed)
self._build_input()
self._build_encoders()
self.params = []
for var in tf.trainable_variables():
if (var.name.find(os.path.join(tf.contrib.framework.get_name_scope(), scope)) == 0):
self.params.append(var)
self.grad_out = tf.gradients(tf.concat([self.enc_text, self.enc_text_cont], axis=(- 1)), self.params, tf.concat([self.grad_enc_text, self.grad_enc_text_cont], axis=(- 1)))
self.grad_out[0] = tf.convert_to_tensor(self.grad_out[0])
self.grad_in = [tf.placeholder(tf.float32, param.shape) for param in self.params]
self.train_op = self.optimizer.apply_gradients(zip(self.grad_in, self.params))
def _build_embedding(self, embed):
self.symbol2index = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=0, shared_name='in_table', name='in_table', checkpoint=True)
self.index2symbol = tf.contrib.lookup.MutableHashTable(key_dtype=tf.int64, value_dtype=tf.string, default_value='UNK', shared_name='out_table', name='out_table', checkpoint=True)
self.embed = tf.get_variable('word_embedding', dtype=tf.float32, initializer=embed)
def _build_input(self):
with tf.variable_scope('input'):
self.num_posts = tf.placeholder(tf.int32, (None,), 'num_posts')
self.text_string = tf.placeholder(tf.string, (None, None, None), 'text_string')
self.text = tf.nn.embedding_lookup(self.embed, self.symbol2index.lookup(self.text_string))
self.text_len = tf.placeholder(tf.int32, (None, None), 'text_len')
def _build_encoders(self):
with tf.variable_scope('encoders'):
self.enc_text = self._build_encoder(tf.reshape(self.text, [(tf.shape(self.text)[0] * tf.shape(self.text)[1]), tf.shape(self.text)[2], self.dim_embed_word]), tf.reshape(self.text_len, [(- 1)]), self.dim_embed_word, True, 'enc_text')
self.enc_text_cont = tf.reshape(self._build_encoder(tf.reshape(self.enc_text, [tf.shape(self.text)[0], tf.shape(self.text)[1], self.num_units]), self.num_posts, self.num_units, False, 'enc_text_cont'), [(- 1), self.num_units])
self.grad_enc_text = tf.placeholder(tf.float32, self.enc_text.shape)
self.grad_enc_text_cont = tf.placeholder(tf.float32, self.enc_text_cont.shape)
def _build_encoder(self, inputs, length, input_size, use_biencoder, scope, reuse=None):
with tf.variable_scope(scope, reuse=reuse):
if use_biencoder:
(cell_fw, cell_bw) = self._build_biencoder_cell(input_size)
(outputs, states) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, cell_bw=cell_bw, inputs=inputs, sequence_length=length, dtype=tf.float32)
enc_state = []
for i in range(self.num_layers):
enc_state.append(tf.concat([states[0][i], states[1][i]], axis=(- 1)))
return enc_state[(- 1)]
else:
cell = self._build_cell(self.num_units, input_size)
(outputs, states) = tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=length, dtype=tf.float32)
return outputs
def _build_cell(self, num_units, input_size):
cells = []
for i in range(self.num_layers):
self.recurrent_noise_in.append(tf.placeholder(tf.float32, (1, input_size)))
dropout = DropoutWrapper(tf.contrib.rnn.GRUCell(num_units), self.keep_prob, input_size=input_size, dtype=tf.float32, noise_input=self.recurrent_noise_in[(- 1)], fixed_noise=self.fixed_noise)
self.recurrent_noise_out.append(dropout.recurrent_input_noise)
cells.append(dropout)
return tf.contrib.rnn.MultiRNNCell(cells)
def _build_biencoder_cell(self, input_size):
cell_fw = self._build_cell((self.num_units / 2), input_size)
cell_bw = self._build_cell((self.num_units / 2), input_size)
return (cell_fw, cell_bw)
def initialize(self, vocab):
op_in = self.symbol2index.insert(tf.constant(vocab), tf.constant(range(len(vocab)), dtype=tf.int64))
op_out = self.index2symbol.insert(tf.constant(range(len(vocab)), dtype=tf.int64), tf.constant(vocab))
self.sess.run([op_in, op_out])
def format_data(self, data):
def padding(sent, l):
return ((sent + ['EOS']) + (['PAD'] * ((l - len(sent)) - 1)))
length = 0
for dialog in data:
for text in dialog:
length = max(length, len(text))
length += 1
(text_string, text_len) = ([], [])
for dialog in data:
text_string.append([])
text_len.append([])
for text in dialog:
text_string[(- 1)].append(padding(text, length))
text_len[(- 1)].append((len(text) + 1))
return {'text_string': np.array(text_string), 'text_len': np.array(text_len)}
def get_gradients(self, data, num_posts, grad_enc_text, grad_enc_text_cont, buffered=False):
data = self.format_data(data)
input_feed = {self.text_string: data['text_string'], self.text_len: data['text_len'], self.num_posts: num_posts, self.grad_enc_text: grad_enc_text, self.grad_enc_text_cont: grad_enc_text_cont, self.keep_prob: self.train_keep_prob, self.fixed_noise: 1}
for i in range(len(self.recurrent_noise)):
input_feed[self.recurrent_noise_in[i]] = self.recurrent_noise[i]
if buffered:
return ([self.grad_out], input_feed)
else:
return self.sess.run(self.grad_out, input_feed)
def train(self, grad, learning_rate, buffered=False):
input_feed = {}
for i in range(len(grad)):
input_feed[self.grad_in[i]] = grad[i]
input_feed[self.learning_rate] = learning_rate
if buffered:
return ([self.train_op], input_feed)
else:
self.sess.run(self.train_op, input_feed)
def infer(self, data, num_posts, is_train, buffered=False):
data = self.format_data(data)
input_feed = {self.text_string: data['text_string'], self.text_len: data['text_len'], self.num_posts: num_posts, self.fixed_noise: 0}
for noise in self.recurrent_noise_in:
input_feed[noise] = np.zeros(noise.shape)
if is_train:
input_feed[self.keep_prob] = self.train_keep_prob
output_feed = [self.enc_text, self.enc_text_cont, self.recurrent_noise_out]
if buffered:
return (output_feed, input_feed)
else:
return self.sess.run(output_feed, input_feed) |
class DistWorker(CovController):
_ensure_topdir
def start(self):
cleanup()
self.is_collocated = ((socket.gethostname() == self.config.workerinput['cov_master_host']) and (self.topdir == self.config.workerinput['cov_master_topdir']))
if (not self.is_collocated):
master_topdir = self.config.workerinput['cov_master_topdir']
worker_topdir = self.topdir
if (self.cov_source is not None):
self.cov_source = [source.replace(master_topdir, worker_topdir) for source in self.cov_source]
self.cov_config = self.cov_config.replace(master_topdir, worker_topdir)
self.cov = coverage.Coverage(source=self.cov_source, branch=self.cov_branch, data_suffix=True, config_file=self.cov_config)
self.cov.start()
self.set_env()
_ensure_topdir
def finish(self):
self.unset_env()
self.cov.stop()
if self.is_collocated:
self.cov.save()
self.config.workeroutput['cov_worker_node_id'] = self.nodeid
else:
self.cov.combine()
self.cov.save()
if (coverage.version_info < (5, 0)):
buff = StringIO()
self.cov.data.write_fileobj(buff)
data = buff.getvalue()
else:
data = self.cov.get_data().dumps()
self.config.workeroutput.update({'cov_worker_path': self.topdir, 'cov_worker_node_id': self.nodeid, 'cov_worker_data': data})
def summary(self, stream):
pass |
def freeze_bn(model):
for module in model.modules():
if isinstance(module, torch.nn.BatchNorm2d):
if hasattr(module, 'weight'):
module.weight.requires_grad_(False)
if hasattr(module, 'bias'):
module.bias.requires_grad_(False)
module.eval() |
def rtn_ftell(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('ftell hooked')
arg0 = pstate.get_argument_value(0)
if pstate.file_descriptor_exists(arg0):
desc = pstate.get_file_descriptor(arg0)
if desc.fd.seekable():
return desc.fd.tell()
else:
return (- 1) |
def get_commandline(server=False, description=None, extras=None, cmdline=None):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--comm', choices=['tcp', 'udp', 'serial', 'tls'], help='set communication, default is tcp', dest='comm', default='tcp', type=str)
parser.add_argument('-f', '--framer', choices=['ascii', 'binary', 'rtu', 'socket', 'tls'], help='set framer, default depends on --comm', dest='framer', type=str)
parser.add_argument('-l', '--log', choices=['critical', 'error', 'warning', 'info', 'debug'], help='set log level, default is info', dest='log', default='info', type=str)
parser.add_argument('-p', '--port', help='set port', dest='port', type=str)
parser.add_argument('--baudrate', help='set serial device baud rate', default=9600, type=int)
parser.add_argument('--host', help='set host, default is 127.0.0.1', dest='host', default=None, type=str)
if server:
parser.add_argument('--store', choices=['sequential', 'sparse', 'factory', 'none'], help='set type of datastore', default='sequential', type=str)
parser.add_argument('--slaves', help='set number of slaves, default is 0 (any)', default=0, type=int, nargs='+')
parser.add_argument('--context', help='ADVANCED USAGE: set datastore context object', default=None)
else:
parser.add_argument('--timeout', help='ADVANCED USAGE: set client timeout', default=10, type=float)
if extras:
for extra in extras:
parser.add_argument(extra[0], **extra[1])
args = parser.parse_args(cmdline)
comm_defaults = {'tcp': ['socket', 5020], 'udp': ['socket', 5020], 'serial': ['rtu', '/dev/ptyp0'], 'tls': ['tls', 5020]}
pymodbus_apply_logging_config(args.log.upper())
_logger.setLevel(args.log.upper())
if (not args.framer):
args.framer = comm_defaults[args.comm][0]
args.port = (args.port or comm_defaults[args.comm][1])
if ((args.comm != 'serial') and args.port):
args.port = int(args.port)
if (not args.host):
args.host = ('' if server else '127.0.0.1')
return args |
def multiplicative_jitter(x, device: torch.device, epsilon=0.01):
if (epsilon == 0):
return x
minval = torch.tensor((1.0 - epsilon), device=device)
maxval = torch.tensor((1.0 + epsilon), device=device)
uniform = uniform_map.get(device)
if (uniform is None):
uniform = torch.distributions.uniform.Uniform(low=minval, high=maxval).rsample
uniform_map[device] = uniform
return (x * uniform(x.shape)) |
def test_pattern_should_be_used2():
def parse_yesno(text):
return parse_yesno.mapping[text.lower()]
parse_yesno.mapping = {'yes': True, 'no': False, 'on': True, 'off': False, 'true': True, 'false': False}
parse_yesno.pattern = '|'.join(parse_yesno.mapping.keys())
parse_yesno.name = 'YesNo'
extra_types = {parse_yesno.name: parse_yesno}
format = 'Answer: {answer:YesNo}'
parser = parse.Parser(format, extra_types)
for (value_name, value) in parse_yesno.mapping.items():
text = ('Answer: %s' % value_name)
assert_match(parser, text, 'answer', value)
assert_match(parser, 'Answer: YES', 'answer', True)
assert_mismatch(parser, 'Answer: __YES__', 'answer') |
class ELF32_Phdr(ELF_Phdr):
Phdr_SIZE = (4 * 8)
def __init__(self, buf, endian=0):
if (len(buf) != self.Phdr_SIZE):
raise
fmt = ('<IIIIIIII' if (endian == 0) else '>IIIIIIII')
(p_type, p_offset, p_vaddr, p_paddr, p_filesz, p_memsz, p_flags, p_align) = struct.unpack(fmt, buf)
super(ELF32_Phdr, self).__init__(p_type, p_offset, p_vaddr, p_paddr, p_filesz, p_memsz, p_flags, p_align) |
class TestDateField(TestCase):
def setUp(self):
self.field = fields.DateField()
def test_deserialize_none(self):
actual_value = self.field.deserialize(None)
expected_value = None
self.assertEqual(actual_value, expected_value)
def test_deserialize_naive(self):
arbitrary_year = 2014
arbitrary_month = 3
arbitrary_day = 2
arbitrary_hour = 9
arbitrary_minute = 10
arbitrary_second = 3
naive_date_string = '{year}-{month}-{day}T{hour}:{minute}:{second}'.format(year=arbitrary_year, month=arbitrary_month, day=arbitrary_day, hour=arbitrary_hour, minute=arbitrary_minute, second=arbitrary_second)
actual_value = self.field.deserialize(naive_date_string)
expected_value = datetime.datetime(arbitrary_year, arbitrary_month, arbitrary_day, arbitrary_hour, arbitrary_minute, arbitrary_second, tzinfo=UTC)
self.assertEqual(actual_value, expected_value)
def test_deserialize_nonnaive(self):
arbitrary_year = 2014
arbitrary_month = 3
arbitrary_day = 2
arbitrary_hour = 9
arbitrary_minute = 10
arbitrary_second = 3
nonnaive_date_string = '{year}-{month}-{day}T{hour}:{minute}:{second}Z'.format(year=arbitrary_year, month=arbitrary_month, day=arbitrary_day, hour=arbitrary_hour, minute=arbitrary_minute, second=arbitrary_second)
actual_value = self.field.deserialize(nonnaive_date_string)
expected_value = datetime.datetime(arbitrary_year, arbitrary_month, arbitrary_day, arbitrary_hour, arbitrary_minute, arbitrary_second, tzinfo=UTC)
self.assertEqual(actual_value, expected_value)
def test_serialize_none(self):
actual_value = self.field.serialize(None)
expected_value = None
self.assertEqual(actual_value, expected_value)
def test_serialize_naive(self):
arbitrary_year = 2014
arbitrary_month = 3
arbitrary_day = 2
arbitrary_hour = 9
arbitrary_minute = 10
arbitrary_second = 3
arbitrary_date = datetime.datetime(arbitrary_year, arbitrary_month, arbitrary_day, arbitrary_hour, arbitrary_minute, arbitrary_second)
actual_value = self.field.serialize(arbitrary_date)
expected_value = UTC.normalize(arbitrary_date.replace(tzinfo=tzlocal())).strftime('%Y%m%dT%H%M%SZ')
self.assertEqual(actual_value, expected_value)
def test_serialize_nonnaive(self):
arbitrary_year = 2014
arbitrary_month = 3
arbitrary_day = 2
arbitrary_hour = 9
arbitrary_minute = 10
arbitrary_second = 3
arbitrary_date = datetime.datetime(arbitrary_year, arbitrary_month, arbitrary_day, arbitrary_hour, arbitrary_minute, arbitrary_second, tzinfo=timezone('America/Los_Angeles'))
actual_value = self.field.serialize(arbitrary_date)
expected_value = UTC.normalize(arbitrary_date).strftime('%Y%m%dT%H%M%SZ')
self.assertEqual(actual_value, expected_value) |
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if (oprot is not None):
self._oprot = oprot
self._seqid = 0
def example(self):
self.send_example()
return self.recv_example()
def send_example(self):
self._oprot.writeMessageBegin('example', TMessageType.CALL, self._seqid)
args = example_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_example(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if (mtype == TMessageType.EXCEPTION):
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = example_result()
result.read(iprot)
iprot.readMessageEnd()
if (result.success is not None):
return result.success
if (result.exc is not None):
raise result.exc
if (result.err is not None):
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, 'example failed: unknown result') |
class Playlist(BasePathMixin):
def __init__(self, uri, stream_info, media, base_uri):
self.uri = uri
self.base_uri = base_uri
resolution = stream_info.get('resolution')
if (resolution != None):
resolution = resolution.strip('"')
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.stream_info = StreamInfo(bandwidth=stream_info['bandwidth'], video=stream_info.get('video'), audio=stream_info.get('audio'), subtitles=stream_info.get('subtitles'), closed_captions=stream_info.get('closed_captions'), average_bandwidth=stream_info.get('average_bandwidth'), program_id=stream_info.get('program_id'), resolution=resolution_pair, codecs=stream_info.get('codecs'), frame_rate=stream_info.get('frame_rate'))
self.media = []
for media_type in ('audio', 'video', 'subtitles'):
group_id = stream_info.get(media_type)
if (not group_id):
continue
self.media += filter((lambda m: (m.group_id == group_id)), media)
def __str__(self):
media_types = []
stream_inf = [str(self.stream_info)]
for media in self.media:
if (media.type in media_types):
continue
else:
media_types += [media.type]
media_type = media.type.upper()
stream_inf.append(('%s="%s"' % (media_type, media.group_id)))
return ((('#EXT-X-STREAM-INF:' + ','.join(stream_inf)) + '\n') + self.uri) |
def main(OPTS):
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset)
has_ans_qids = [k for (k, v) in qid_to_has_ans.items() if v]
no_ans_qids = [k for (k, v) in qid_to_has_ans.items() if (not v)]
(exact_raw, f1_raw) = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if (OPTS.na_prob_file and OPTS.out_image_dir):
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
if OPTS.out_file:
with open(OPTS.out_file, 'w') as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
return out_eval |
def apply_ccx(circuit, a, b, c, use_basis_gates=True):
if use_basis_gates:
circuit.h(c)
circuit.cx(b, c)
circuit.tdg(c)
circuit.cx(a, c)
circuit.t(c)
circuit.cx(b, c)
circuit.tdg(c)
circuit.cx(a, c)
circuit.t(b)
circuit.t(c)
circuit.h(c)
circuit.cx(a, b)
circuit.t(a)
circuit.tdg(b)
circuit.cx(a, b)
else:
circuit.ccx(a, b, c) |
def resnet50_v1b(pretrained=False, local_rank=None, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs)
if (pretrained != 'None'):
if (local_rank is not None):
old_dict = torch.load(pretrained, map_location=torch.device(local_rank))
else:
old_dict = torch.load(pretrained)
model_dict = model.state_dict()
old_dict = {k: v for (k, v) in old_dict.items() if (k in model_dict)}
model_dict.update(old_dict)
model.load_state_dict(model_dict)
return model |
def _get_quicklook(area_def, data, vmin=None, vmax=None, label='Variable (units)', num_meridians=45, num_parallels=10, coast_res='110m', cmap='RdBu_r'):
import matplotlib.pyplot as plt
(coast_res, is_cartopy) = _translate_coast_resolution_to_cartopy(coast_res)
if (not is_cartopy):
return _basemap_get_quicklook(area_def, data, vmin, vmax, label, num_meridians, num_parallels, coast_res=coast_res, cmap=cmap)
if (area_def.shape != data.shape):
raise ValueError(('area_def shape %s does not match data shape %s' % (list(area_def.shape), list(data.shape))))
crs = area_def.to_cartopy_crs()
ax = plt.axes(projection=crs)
ax.coastlines(resolution=coast_res)
ax.set_global()
if (num_meridians or num_parallels):
_ = _add_gridlines(ax, num_meridians, num_parallels)
if (not (np.ma.isMaskedArray(data) and data.mask.all())):
col = ax.imshow(data, transform=crs, extent=crs.bounds, origin='upper', vmin=vmin, vmax=vmax, cmap=cmap)
plt.colorbar(col, shrink=0.5, pad=0.05).set_label(label)
return plt |
class GradientDescent(OptimizationAlgorithm):
def __init__(self, **kwargs):
default_parameters = {'learning_rate': 1.0}
restart_variables = {}
super(self.__class__, self).__init__(alg_default_parameters=default_parameters, alg_restart_variables=restart_variables, **kwargs)
def _step(self, grad):
grad = np.squeeze(grad)
return ((- grad) * self.parameters['learning_rate']) |
def news_articles(hostname: str, language: str) -> list[NewsArticle]:
site = Site.objects.filter(hostname=hostname).first()
if (not site):
raise ValueError(f'Site {hostname} not found')
return [NewsArticle.from_model(article) for article in NewsArticleModel.objects.in_site(site).order_by('-first_published_at').filter(locale__language_code=language)] |
class STTHandler():
def __init__(self, settings, pip_path, stt):
self.settings = settings
self.pip_path = pip_path
self.stt = stt
self.key = ''
def install(self):
for module in self.stt['extra_requirements']:
install_module(module, self.pip_path)
def is_installed(self):
for module in self.stt['extra_requirements']:
if (find_module(module) is None):
return False
return True
def recognize_file(self, path):
return None
def set_setting(self, name, value):
j = json.loads(self.settings.get_string('stt-settings'))
if (self.key not in j):
j[self.key] = {}
j[self.key][name] = value
self.settings.set_string('stt-settings', json.dumps(j))
def get_setting(self, name):
j = json.loads(self.settings.get_string('stt-settings'))
if ((self.key not in j) or (name not in j[self.key])):
return self.get_default_setting(name)
return j[self.key][name]
def get_default_setting(self, name):
for x in self.stt['extra_settings']:
if (x['key'] == name):
return x['default']
return None |
.filterwarnings('default')
def test_nose_deprecated_with_setup(pytester: Pytester) -> None:
pytest.importorskip('nose')
pytester.makepyfile('\n from nose.tools import with_setup\n\n def setup_fn_no_op():\n ...\n\n def teardown_fn_no_op():\n ...\n\n _setup(setup_fn_no_op, teardown_fn_no_op)\n def test_omits_warnings():\n ...\n ')
output = pytester.runpytest('-Wdefault::pytest.PytestRemovedIn8Warning')
message = ['*PytestRemovedIn8Warning: Support for nose tests is deprecated and will be removed in a future release.', '*test_nose_deprecated_with_setup.py::test_omits_warnings is using nose method: `setup_fn_no_op` (setup)', '*PytestRemovedIn8Warning: Support for nose tests is deprecated and will be removed in a future release.', '*test_nose_deprecated_with_setup.py::test_omits_warnings is using nose method: `teardown_fn_no_op` (teardown)']
output.stdout.fnmatch_lines(message)
output.assert_outcomes(passed=1) |
def test_change_truncated_size():
x = Truncated.dist(icdf_normal(0, [1, 2, 3]), lower=(- 1), size=(2, 3))
(x.eval().shape == (2, 3))
new_x = change_dist_size(x, (4, 3))
assert isinstance(new_x.owner.op, TruncatedRV)
(new_x.eval().shape == (4, 3))
new_x = change_dist_size(x, (4, 3), expand=True)
assert isinstance(new_x.owner.op, TruncatedRV)
(new_x.eval().shape == (4, 3, 2, 3)) |
class DbmsXslprocessor(DirectoryManagement):
def __init__(self, args):
logging.debug('DbmsXslprocessor object created')
DirectoryManagement.__init__(self, args)
def putFile(self, remotePath, remoteNameFile, data=None, localFile=None):
if (((localFile == None) and (data == None)) or ((localFile != None) and (data != None))):
logging.critical('To put a file, choose between a localFile or data')
if (data == None):
logging.info('Copy the {0} file to the {1} remote path like {2}'.format(localFile, remotePath, remoteNameFile))
else:
logging.info('Copy this data : `{0}` in the {2} in the {1} remote path'.format(data, remotePath, remoteNameFile))
self.__setDirectoryName__()
status = self.__createOrRemplaceDirectory__(remotePath)
if isinstance(status, Exception):
return status
if (localFile != None):
data = self.__loadFile__(localFile)
logging.debug('Decoding bytes as {0} before executing dbms_xslprocessor.clob2file'.format(self.encoding))
try:
dataStr = data.decode(self.encoding)
except Exception as e:
logging.error('Impossible to decode as {0} bytes: {1} ({2})'.format(self.encoding, repr(data), str(e)))
return Exception(e)
response = self.__execProc__('dbms_xslprocessor.clob2file', options=(dataStr, self.directoryName, remoteNameFile))
if isinstance(response, Exception):
logging.info('Impossible to create a file with dbms_xslprocessor: {0}'.format(self.cleanError(response)))
return response
return True
def getFile(self, remotePath, remoteNameFile, localFile):
READ2CLOB_GET_FILE = "\n\t\tDECLARE\n\t\t\tclob_value\tCLOB\t\t\tDEFAULT NULL;\n\t\t\tutlfile_directory VARCHAR2 (100) DEFAULT '{0}'; \n\t\t\tfilename VARCHAR2 (100) DEFAULT '{1}';\n\t\tBEGIN\n\t\t\tclob_value := DBMS_XSLPROCESSOR.read2clob (flocation => utlfile_directory, fname => filename);\n \t\t\tDBMS_OUTPUT.put_line (clob_value);\n\t\tEND;\n\t\t"
data = ''
logging.info('Trying to download the file `{0}` stored in {1}...'.format(remoteNameFile, remotePath))
self.__setDirectoryName__()
status = self.__createOrRemplaceDirectory__(remotePath)
if isinstance(status, Exception):
return status
cursor = cx_Oracle.Cursor(self.args['dbcon'])
cursor.callproc('dbms_output.enable')
try:
cursor.execute(READ2CLOB_GET_FILE.format(self.directoryName, remoteNameFile))
except Exception as e:
logging.info('Impossible to execute the query `{0}`: {1}'.format(READ2CLOB_GET_FILE, self.cleanError(e)))
self.__dropDirectory__()
return ErrorSQLRequest(e)
else:
statusVar = cursor.var(cx_Oracle.NUMBER)
lineVar = cursor.var(cx_Oracle.STRING)
while True:
cursor.callproc('dbms_output.get_line', (lineVar, statusVar))
if (statusVar.getvalue() != 0):
break
line = lineVar.getvalue()
if (line == None):
line = ''
data += line
logging.info(repr(line))
cursor.close()
logging.info('Creating local file {0}...'.format(localFile))
f = open(localFile, 'w')
f.write(data)
f.close()
return True
def testAll(self):
folder = self.__generateRandomString__()
self.args['print'].subtitle('DBMS_XSLPROCESSOR library ?')
logging.info('Simulate the file creation in the {0} folder with DBMS_XSLPROCESSOR'.format(folder))
logging.info('The file is not created remotly because the folder should not exist')
status = self.putFile(folder, 'temp.txt', data=b'data in file')
if ((status == True) or (self.ERROR_BAD_FOLDER_OR_BAD_SYSTEM_PRIV in str(status)) or (self.ERROR_FILEOPEN_FAILED in str(status))):
self.args['print'].goodNews('OK')
else:
self.args['print'].badNews('KO') |
def _even_ext(x, n, axis=(- 1)):
x = cp.asarray(x)
if (n < 1):
return x
if (n > (x.shape[axis] - 1)):
raise ValueError((('The extension length n (%d) is too big. ' + 'It must not exceed x.shape[axis]-1, which is %d.') % (n, (x.shape[axis] - 1))))
left_ext = _axis_slice(x, start=n, stop=0, step=(- 1), axis=axis)
right_ext = _axis_slice(x, start=(- 2), stop=(- (n + 2)), step=(- 1), axis=axis)
ext = cp.concatenate((left_ext, x, right_ext), axis=axis)
return ext |
('PyQt6.QtGui.QAction.triggered')
('beeref.actions.mixin.menu_structure')
('beeref.actions.mixin.actions')
def test_update_recent_files(actions_mock, menu_mock, triggered_mock, qapp):
widget = FooWidget()
widget.settings.get_recent_files.return_value = [os.path.abspath('foo.bee')]
menu_mock.__iter__.return_value = [{'menu': 'Open &Recent', 'items': '_build_recent_files'}]
widget.build_menu_and_actions()
triggered_mock.connect.reset_mock()
assert (len(widget.actions()) == 1)
qaction1 = widget.actions()[0]
assert (qaction1.text() == 'foo.bee')
widget.settings.get_recent_files.return_value = [os.path.abspath('bar.bee')]
widget.update_menu_and_actions()
triggered_mock.connect.assert_called()
assert (len(widget.actions()) == 1)
qaction1 = widget.actions()[0]
assert (qaction1.text() == 'bar.bee') |
def main():
logging.basicConfig(level=logging.DEBUG, filename='/home/xapp-logger.log', filemode='a+', format='%(asctime)-15s %(levelname)-8s %(message)s')
formatter = logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
control_sck = open_control_socket(4200)
while True:
data_sck = receive_from_socket(control_sck)
if (len(data_sck) <= 0):
if (len(data_sck) == 0):
continue
else:
logging.info('Negative value for socket')
break
else:
logging.info(('Received data: ' + repr(data_sck))) |
def dataset_walker(datasets):
for dataset in datasets:
(yield (dataset, None))
for anc_ds in dataset.attrs.get('ancillary_variables', []):
try:
anc_ds.attrs
(yield (anc_ds, dataset))
except AttributeError:
continue |
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
total = (total or len(items))
t_start = time.time()
t_last = 0
for (n, item) in enumerate(items):
t_now = time.time()
if ((t_now - t_last) > min_delay):
print(('\r%s%d/%d (%6.2f%%)' % (desc, (n + 1), total, ((n / float(total)) * 100))), end=' ')
if (n > 0):
if (displaytype == 's1k'):
next_1000 = (n + (1000 - (n % 1000)))
t_done = (t_now - t_start)
t_1k = ((t_done / n) * next_1000)
outlist = (list(divmod(t_done, 60)) + list(divmod((t_1k - t_done), 60)))
print(('(TE/ET1k: %d:%02d / %d:%02d)' % tuple(outlist)), end=' ')
else:
t_done = (t_now - t_start)
t_total = ((t_done / n) * total)
outlist = (list(divmod(t_done, 60)) + list(divmod((t_total - t_done), 60)))
print(('(TE/ETA: %d:%02d / %d:%02d)' % tuple(outlist)), end=' ')
sys.stdout.flush()
t_last = t_now
(yield item)
t_total = (time.time() - t_start)
print(('\r%s%d/%d (100.00%%) (took %d:%02d)' % ((desc, total, total) + divmod(t_total, 60)))) |
class Logger():
def print(str):
if MPIUtil.is_root_proc():
print(str)
return
def __init__(self):
self.output_file = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self._dump_str_template = ''
return
def reset(self):
self.first_row = True
self.log_headers = []
self.log_current_row = {}
if (self.output_file is not None):
self.output_file = open(output_path, 'w')
return
def configure_output_file(self, filename=None):
self.first_row = True
self.log_headers = []
self.log_current_row = {}
output_path = (filename or ('output/log_%i.txt' % int(time.time())))
out_dir = os.path.dirname(output_path)
if ((not os.path.exists(out_dir)) and MPIUtil.is_root_proc()):
os.makedirs(out_dir)
if MPIUtil.is_root_proc():
self.output_file = open(output_path, 'w')
assert osp.exists(output_path)
atexit.register(self.output_file.close)
Logger.print(('Logging data to ' + self.output_file.name))
return
def log_tabular(self, key, val):
if (self.first_row and (key not in self.log_headers)):
self.log_headers.append(key)
else:
assert (key in self.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
self.log_current_row[key] = val
return
def get_num_keys(self):
return len(self.log_headers)
def print_tabular(self):
if MPIUtil.is_root_proc():
vals = []
Logger.print(('-' * 39))
for key in self.log_headers:
val = self.log_current_row.get(key, '')
if isinstance(val, float):
valstr = ('%8.3g' % val)
elif isinstance(val, int):
valstr = str(val)
else:
valstr = val
Logger.print(('| %16s | %16s |' % (key, valstr)))
vals.append(val)
Logger.print(('-' * 39))
return
def dump_tabular(self):
if MPIUtil.is_root_proc():
if self.first_row:
self._dump_str_template = self._build_str_template()
vals = []
for key in self.log_headers:
val = self.log_current_row.get(key, '')
vals.append(val)
if (self.output_file is not None):
if self.first_row:
header_str = self._dump_str_template.format(*self.log_headers)
self.output_file.write((header_str + '\n'))
val_str = self._dump_str_template.format(*map(str, vals))
self.output_file.write((val_str + '\n'))
self.output_file.flush()
self.log_current_row.clear()
self.first_row = False
return
def _build_str_template(self):
num_keys = self.get_num_keys()
template = ('{:<25}' * num_keys)
return template |
class WebDriverHandler():
def __init__(self, command_executor):
self.command_executor = command_executor
self.original_execute = command_executor.execute
self.reahl_server = None
def uninstall(self):
self.command_executor.execute = self.original_execute
def reinstall(self):
assert self.reahl_server, 'A handler can only be reinstalled if it was installed previously'
self.install(self.reahl_server)
def install(self, reahl_server):
self.reahl_server = reahl_server
def wrapped_execute(command, params):
exceptions = []
results = []
started = Event()
def doit():
try:
started.set()
try:
r = self.original_execute(command, params)
except CannotSendRequest:
r = self.original_execute(command, params)
results.append(r)
except Exception as e:
exceptions.append(e)
raise
finally:
results.append(None)
command_thread = Thread(target=doit)
command_thread.start()
started.wait()
self.reahl_server.serve_until((lambda : ((not command_thread.is_alive()) and (not self.reahl_server.connection_is_pending(0.01)))))
if exceptions:
raise Exception(exceptions[0])
command_thread.join(5)
return results[0]
self.command_executor.execute = wrapped_execute |
def get_doc_input_bert(news, news_index, category_dict, domain_dict, subcategory_dict, args):
news_num = (len(news) + 1)
if ('title' in args.news_attributes):
news_title = np.zeros((news_num, args.num_words_title), dtype='int32')
news_title_type = np.zeros((news_num, args.num_words_title), dtype='int32')
news_title_attmask = np.zeros((news_num, args.num_words_title), dtype='int32')
else:
news_title = None
news_title_type = None
news_title_attmask = None
if ('abstract' in args.news_attributes):
news_abstract = np.zeros((news_num, args.num_words_abstract), dtype='int32')
news_abstract_type = np.zeros((news_num, args.num_words_abstract), dtype='int32')
news_abstract_attmask = np.zeros((news_num, args.num_words_abstract), dtype='int32')
else:
news_abstract = None
news_abstract_type = None
news_abstract_attmask = None
if ('body' in args.news_attributes):
news_body = np.zeros((news_num, args.num_words_body), dtype='int32')
news_body_type = np.zeros((news_num, args.num_words_body), dtype='int32')
news_body_attmask = np.zeros((news_num, args.num_words_body), dtype='int32')
else:
news_body = None
news_body_type = None
news_body_attmask = None
if ('category' in args.news_attributes):
news_category = np.zeros((news_num, 1), dtype='int32')
else:
news_category = None
if ('domain' in args.news_attributes):
news_domain = np.zeros((news_num, 1), dtype='int32')
else:
news_domain = None
if ('subcategory' in args.news_attributes):
news_subcategory = np.zeros((news_num, 1), dtype='int32')
else:
news_subcategory = None
for key in tqdm(news):
(title, abstract, body, category, domain, subcategory) = news[key]
doc_index = news_index[key]
if ('title' in args.news_attributes):
news_title[doc_index] = title['input_ids']
news_title_type[doc_index] = title['token_type_ids']
news_title_attmask[doc_index] = title['attention_mask']
if ('abstract' in args.news_attributes):
news_abstract[doc_index] = abstract['input_ids']
news_abstract_type[doc_index] = abstract['token_type_ids']
news_abstract_attmask[doc_index] = abstract['attention_mask']
if ('body' in args.news_attributes):
news_body[doc_index] = body['input_ids']
news_body_type[doc_index] = body['token_type_ids']
news_body_attmask[doc_index] = body['attention_mask']
if ('category' in args.news_attributes):
news_category[(doc_index, 0)] = (category_dict[category] if (category in category_dict) else 0)
if ('subcategory' in args.news_attributes):
news_subcategory[(doc_index, 0)] = (subcategory_dict[subcategory] if (subcategory in subcategory_dict) else 0)
if ('domain' in args.news_attributes):
news_domain[(doc_index, 0)] = (domain_dict[domain] if (domain in domain_dict) else 0)
return (news_title, news_title_type, news_title_attmask, news_abstract, news_abstract_type, news_abstract_attmask, news_body, news_body_type, news_body_attmask, news_category, news_domain, news_subcategory) |
class double_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(nn.Conv1d(in_ch, out_ch, 3, padding=1), nn.BatchNorm1d(out_ch), nn.ReLU(inplace=True), nn.Conv1d(out_ch, out_ch, 3, padding=1), nn.BatchNorm1d(out_ch), nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x |
def _template_online_dataset(**kwargs):
lqargs = []
for k in ['network', 'station', 'channel']:
if (k in kwargs):
v = kwargs.pop(k)
lqargs.append((" %s: '%s'" % (k, v)))
kwargs['qargs'] = (('\n' + '\n'.join(lqargs)) if lqargs else '{}')
return '\n--- !squirrel.Dataset\n\n{path_prefix}\n\n# Data sources to be added (LocalData, FDSNSource, CatalogSource, ...)\nsources:\n- !squirrel.FDSNSource\n\n # URL or alias of FDSN site.\n site: {site}\n\n # Uncomment to let metadata expire in 10 days:\n #expires: 10d\n\n # Waveforms can be optionally shared with other FDSN client configurations,\n # so that data is not downloaded multiple times. The downside may be that in\n # some cases more data than expected is available (if data was previously\n # downloaded for a different application).\n #shared_waveforms: true\n\n # FDSN query arguments to make metadata queries.\n # See # Time span arguments should not be added here, because they are handled\n # automatically by Squirrel.\n query_args: {qargs}\n'.format(path_prefix=path_prefix, **kwargs).strip() |
class Effect6928(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Afterburner') or mod.item.requiresSkill('High Speed Maneuvering'))), 'overloadSpeedFactorBonus', src.getModifiedItemAttr('subsystemBonusCaldariPropulsion2'), skill='Caldari Propulsion Systems', **kwargs) |
def repeat_tensors(n, x):
if torch.is_tensor(x):
x = x.unsqueeze(1)
x = x.expand((- 1), n, *([(- 1)] * len(x.shape[2:])))
x = x.reshape((x.shape[0] * n), *x.shape[2:])
elif ((type(x) is list) or (type(x) is tuple)):
x = [repeat_tensors(n, _) for _ in x]
return x |
def calc_tf_padding(x, kernel_size, stride=1, dilation=1):
(height, width) = x.size()[2:]
oh = math.ceil((height / stride))
ow = math.ceil((width / stride))
pad_h = max((((((oh - 1) * stride) + ((kernel_size - 1) * dilation)) + 1) - height), 0)
pad_w = max((((((ow - 1) * stride) + ((kernel_size - 1) * dilation)) + 1) - width), 0)
return ((pad_h // 2), (pad_h - (pad_h // 2)), (pad_w // 2), (pad_w - (pad_w // 2))) |
class Inferer():
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
torch.set_num_threads(1)
self.model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'])
self.model_preproc.load()
def load_model(self, logdir, step):
model = registry.construct('model', self.config['model'], preproc=self.model_preproc, device=self.device)
model.to(self.device)
model.eval()
saver = saver_mod.Saver({'model': model})
last_step = saver.restore(logdir, step=step, map_location=self.device, item_keys=['model'])
if (not last_step):
raise Exception(f'Attempting to infer on untrained model in {logdir}, step={step}')
return model
def infer(self, model, output_path, args):
output = open(output_path, 'w')
with torch.no_grad():
for section in args.section:
if ((self.config.get('full_data') is not None) and (args.part != 'spider')):
orig_data = registry.construct('dataset', self.config['full_data'][section])
else:
orig_data = args.data[section]
preproc_data = self.model_preproc.dataset(section, two_datasets=self.config.get('full_data'))
preproc_data.part = args.part
if args.shuffle:
idx_shuffle = list(range(len(orig_data)))
random.shuffle(idx_shuffle)
if args.limit:
idx_shuffle = idx_shuffle[:args.limit]
(sliced_orig_data, sliced_preproc_data) = ([], [])
for (i, (orig_item, preproc_item)) in enumerate(zip(orig_data, preproc_data)):
if (i in idx_shuffle):
sliced_orig_data.append(orig_item)
sliced_preproc_data.append(preproc_item)
elif args.limit:
sliced_orig_data = list(itertools.islice(orig_data, args.limit))
sliced_preproc_data = list(itertools.islice(preproc_data, args.limit))
else:
sliced_orig_data = orig_data
sliced_preproc_data = preproc_data
self._inner_infer(model, args.beam_size, args.output_history, sliced_orig_data, sliced_preproc_data, output, args.strict_decoding, section)
def _inner_infer(self, model, beam_size, output_history, sliced_orig_data, sliced_preproc_data, output, strict_decoding=False, section='val'):
for (orig_item, preproc_item) in tqdm.tqdm(zip(sliced_orig_data, sliced_preproc_data), total=len(sliced_orig_data)):
assert (orig_item.full_name == preproc_item[0]['full_name']), (orig_item.full_name, preproc_item[0]['full_name'])
decoded = self._infer_one(model, orig_item, preproc_item, beam_size, output_history, strict_decoding, section)
output.write((json.dumps({'name': orig_item.full_name, 'part': section, 'beams': decoded}, cls=ComplexEncoder) + '\n'))
output.flush()
def init_decoder_infer(self, model, data_item, section, strict_decoding):
model.decoder.schema = data_item.schema
(_, validation_info) = model.preproc.validate_item(data_item, section)
model.decoder.value_unit_dict = validation_info[0]
model.decoder.ids_to_grounding_choices = model.decoder.preproc.grammar.get_ids_to_grounding_choices(data_item.schema, validation_info[0])
for (rule, idx) in model.decoder.rules_index.items():
if (rule[1] == 'NextStepSelect'):
model.decoder.select_index = idx
if strict_decoding:
assert (len(data_item.column_data) == 1)
model.decoder.column_data = data_item.column_data[0]
model.decoder.no_vals = (len(model.decoder.value_unit_dict) == 0)
model.decoder.required_column = False
model.decoder.value_columns = set()
model.decoder.val_types_wo_cols = set()
model.decoder.no_column = False
if (not model.decoder.no_vals):
(model.decoder.no_column, model.decoder.required_column) = (True, True)
for val_units in model.decoder.value_unit_dict.values():
model.decoder.required_column = (model.decoder.required_column and all((val_unit.column for val_unit in val_units)))
for grnd_choice in model.decoder.ids_to_grounding_choices.values():
if (grnd_choice.choice_type == 'value'):
for val_unit in grnd_choice.choice:
if val_unit.column:
model.decoder.value_columns.add((val_unit.table, val_unit.column))
else:
model.decoder.val_types_wo_cols.add(val_unit.value_type)
for table in model.decoder.column_data.keys():
for (column, col_type) in model.decoder.column_data[table].items():
if (col_type in model.decoder.val_types_wo_cols):
model.decoder.value_columns.add((table, column))
model.decoder.no_column = (len(model.decoder.value_columns) == 0)
else:
model.decoder.column_data = None
return model
def _infer_one(self, model, data_item, preproc_item, beam_size, output_history=False, strict_decoding=False, section='val'):
model = self.init_decoder_infer(model, data_item, section, strict_decoding)
beams = decoder_utils.beam_search(model, preproc_item, beam_size=beam_size, max_steps=1000, strict_decoding=strict_decoding)
decoded = []
for beam in beams:
(model_output, inferred_code) = beam.inference_state.finalize()
decoded.append({'orig_question': data_item.text, 'model_output': model_output, 'inferred_code': inferred_code, 'score': beam.score, **({'choice_history': beam.choice_history, 'score_history': beam.score_history} if output_history else {})})
return decoded |
class TargetAssigner(object):
def __init__(self, similarity_calc: IouSimilarity, matcher: ArgMaxMatcher, box_coder: FasterRcnnBoxCoder, negative_class_weight: float=1.0, unmatched_cls_target: Optional[float]=None, keypoints_field_name: str=KEYPOINTS_FIELD_NAME):
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if (unmatched_cls_target is not None):
self._unmatched_cls_target = unmatched_cls_target
else:
self._unmatched_cls_target = 0.0
self._keypoints_field_name = keypoints_field_name
def assign(self, anchors: BoxList, groundtruth_boxes: BoxList, groundtruth_labels=None, groundtruth_weights=None):
if (not isinstance(anchors, box_list.BoxList)):
raise ValueError('anchors must be an BoxList')
if (not isinstance(groundtruth_boxes, box_list.BoxList)):
raise ValueError('groundtruth_boxes must be an BoxList')
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
return (cls_targets, reg_targets, match)
def _create_regression_targets(self, anchors: BoxList, groundtruth_boxes: BoxList, match: Match):
device = anchors.device()
zero_box = torch.zeros((1, 4), device=device)
matched_gt_boxes = match.gather_based_on_match(groundtruth_boxes.boxes(), unmatched_value=zero_box, ignored_value=zero_box)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(self._keypoints_field_name):
groundtruth_keypoints = groundtruth_boxes.get_field(self._keypoints_field_name)
zero_kp = torch.zeros(((1,) + groundtruth_keypoints.shape[1:]), device=device)
matched_keypoints = match.gather_based_on_match(groundtruth_keypoints, unmatched_value=zero_kp, ignored_value=zero_kp)
matched_gt_boxlist.add_field(self._keypoints_field_name, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
unmatched_ignored_reg_targets = self._default_regression_target(device).repeat(match.match_results.shape[0], 1)
matched_anchors_mask = match.matched_column_indicator()
reg_targets = torch.where(matched_anchors_mask.unsqueeze(1), matched_reg_targets, unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self, device: torch.device):
return torch.zeros(1, self._box_coder.code_size(), device=device)
def _create_classification_targets(self, groundtruth_labels, match: Match):
return match.gather_based_on_match(groundtruth_labels, unmatched_value=self._unmatched_cls_target, ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match: Match, groundtruth_weights):
return match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=0.0)
def _create_classification_weights(self, match: Match, groundtruth_weights):
return match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=self._negative_class_weight)
def box_coder(self):
return self._box_coder |
def _choose_chains(traces: Sequence[S], tune: int) -> Tuple[(List[S], int)]:
if (not traces):
raise ValueError('No traces to slice.')
lengths = [max(0, (len(trace) - tune)) for trace in traces]
if (not sum(lengths)):
raise ValueError('Not enough samples to build a trace.')
idxs = np.argsort(lengths)
l_sort = np.array(lengths)[idxs]
use_until = cast(int, np.argmax((l_sort * np.arange(1, (l_sort.shape[0] + 1))[::(- 1)])))
final_length = l_sort[use_until]
take_idx = cast(Sequence[int], idxs[use_until:])
sliced_traces = [traces[idx] for idx in take_idx]
return (sliced_traces, (final_length + tune)) |
def retry(exception_cls, max_tries=10, sleep=0.05):
assert (max_tries > 0)
def with_max_retries_call(delegate):
for i in range(max_tries):
try:
return delegate()
except exception_cls:
if ((i + 1) == max_tries):
raise
time.sleep(sleep)
def outer(fn):
is_generator = inspect.isgeneratorfunction(fn)
(fn)
def retry_fun(*args, **kwargs):
return with_max_retries_call((lambda : fn(*args, **kwargs)))
(fn)
def retry_generator_fun(*args, **kwargs):
def get_first_item():
results = fn(*args, **kwargs)
for first_result in results:
return ([first_result], results)
return ([], results)
(cache, generator) = with_max_retries_call(get_first_item)
for item in cache:
(yield item)
for item in generator:
(yield item)
if (not is_generator):
retry_fun.fn = fn
retry_fun.__reduce__ = (lambda : fn.__name__)
return retry_fun
else:
retry_generator_fun.fn = fn
retry_generator_fun.__reduce__ = (lambda : fn.__name__)
return retry_generator_fun
return outer |
class BaseClean():
clean_fns = ['to_lower', 'to_symbol', 'remove_emoji', 'clean_contractions', 'common_us_word', 'query_clean_v1', 'remove_control_char', 'remove_duplicate', 'remove_ending_underscore', 'remove_starting_underscore', 'clean_multiple_form', 'leet_clean']
def __init__(self, clean_fns=None):
if clean_fns:
self.clean_fns = clean_fns
def __call__(self, input_texts):
if (type(input_texts) == list):
for fn in self.clean_fns:
fn = eval(fn)
input_texts = fn(input_texts)
elif (type(input_texts) == str):
input_texts = [input_texts]
input_texts = self(input_texts)
input_texts = input_texts[0]
return input_texts |
def main():
(train_annot, val_annot) = (pickle.load(open('clean_train.pkl', 'rb')), pickle.load(open('clean_valid.pkl', 'rb')))
data_path = 'data_hmor'
os.makedirs(data_path, exist_ok=True)
for (annot_name, annot) in zip(('train', 'valid'), (train_annot, val_annot)):
for term in tqdm(annot):
img_path = term['image_name']
(subject, view) = (img_path.split('/')[(- 4)], img_path.split('/')[(- 2)])
calibration = json.load(open(osp.join('data', *img_path.split('/')[:(- 3)], ('calibration_%s.json' % subject))))
for cal_term in calibration['cameras']:
if ((cal_term['type'] == 'hd') and (cal_term['name'] == view)):
term.update(cal_term)
break
os.makedirs(osp.join(data_path, *img_path.split('/')[:(- 1)]), exist_ok=True)
shutil.copyfile(osp.join('data', img_path), osp.join(data_path, img_path))
pickle.dump(annot, open(('data_hmor/%s_cam.pkl' % annot_name), 'wb')) |
class TranslationTestMixin(object):
def setUp(self):
super(TranslationTestMixin, self).setUp()
self.backend = self.create_backend(data={'name': 'mockbackend'})
def create_lang_connection(self, identity, language):
contact = self.create_contact(data={'language': language})
connection = self.create_connection(data={'identity': identity, 'contact': contact, 'backend': self.backend})
return connection |
class FeedForward(nn.Module):
def __init__(self, dim_in, hidden_dim, dim_out=None, *, dropout=0.0, f=nn.Linear, activation=nn.GELU):
super().__init__()
dim_out = (dim_in if (dim_out is None) else dim_out)
self.net = nn.Sequential(f(dim_in, hidden_dim), activation(), (nn.Dropout(dropout) if (dropout > 0.0) else nn.Identity()), f(hidden_dim, dim_out), (nn.Dropout(dropout) if (dropout > 0.0) else nn.Identity()))
def forward(self, x):
x = self.net(x)
return x |
def update_diffs(module, is_similar, img, stored_img):
diffs_dir.mkdir(exist_ok=True)
diffs_rgba = None
def get_diffs_rgba(slicer):
nonlocal diffs_rgba
if (diffs_rgba is None):
diffs_rgba = np.abs((stored_img.astype('f4') - img))
diffs_rgba = (((diffs_rgba / 255) ** 0.25) * 255)
diffs_rgba = diffs_rgba.astype('u1')
return diffs_rgba[(..., slicer)]
diffs = {(diffs_dir / f'{module}-rgb.png'): slice(0, 3), (diffs_dir / f'{module}-alpha.png'): 3}
for (path, slicer) in diffs.items():
if (not is_similar):
diff = get_diffs_rgba(slicer)
iio.imwrite(path, diff)
elif path.exists():
path.unlink() |
class Tuple(Type):
def __init__(self, *elem_types):
self.elem_types = elem_types
def __eq__(self, other):
return ((self.__class__ == other.__class__) and (self.elem_types == other.elem_types))
def from_str(self, s):
if (';' in s):
segments = s.split(';')
elif (',' in s):
segments = s.split(',')
else:
segments = s.split(' ')
if (len(segments) != len(self.elem_types)):
raise ValueError(('Length mismatch: expected a tuple of length %d; got %s instead' % (len(self.elem_types), s)))
return tuple([typ.from_str(seg) for (typ, seg) in zip(self.elem_types, segments)]) |
class BertFeatExtractor(object):
def __init__(self, model_name):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.model = BertModel.from_pretrained(model_name).eval()
self.model.cuda()
def get_bert_embedding(self, text):
tokenized_text = self.tokenizer.tokenize(text)
tokenized_text = ((['[CLS]'] + tokenized_text) + ['[SEP]'])
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.Tensor([indexed_tokens]).long()
segments_tensor = torch.Tensor(([0] * len(tokenized_text))).long()
with torch.no_grad():
(encoded_layers, _) = self.model(tokens_tensor.cuda(), segments_tensor.cuda(), output_all_encoded_layers=False)
return encoded_layers.squeeze()[0] |
def gmetric_read(msg):
unpacker = Unpacker(msg)
values = dict()
unpacker.unpack_int()
values['TYPE'] = unpacker.unpack_string()
values['NAME'] = unpacker.unpack_string()
values['VAL'] = unpacker.unpack_string()
values['UNITS'] = unpacker.unpack_string()
values['SLOPE'] = slope_int2str[unpacker.unpack_int()]
values['TMAX'] = unpacker.unpack_uint()
values['DMAX'] = unpacker.unpack_uint()
unpacker.done()
return values |
def geometry_window(dataset, shapes, pad_x=0, pad_y=0, north_up=None, rotated=None, pixel_precision=None, boundless=False):
all_bounds = [bounds(shape, transform=(~ dataset.transform)) for shape in shapes]
cols = [x for (left, bottom, right, top) in all_bounds for x in ((left - pad_x), (right + pad_x), (right + pad_x), (left - pad_x))]
rows = [y for (left, bottom, right, top) in all_bounds for y in ((top - pad_y), (top - pad_y), (bottom + pad_y), (bottom + pad_y))]
(row_start, row_stop) = (int(math.floor(min(rows))), int(math.ceil(max(rows))))
(col_start, col_stop) = (int(math.floor(min(cols))), int(math.ceil(max(cols))))
window = Window(col_off=col_start, row_off=row_start, width=max((col_stop - col_start), 0.0), height=max((row_stop - row_start), 0.0))
raster_window = Window(0, 0, dataset.width, dataset.height)
if (not boundless):
window = window.intersection(raster_window)
return window |
def train_one_epoch(train_loader, model, criterion, optimizer, scheduler, epoch, logger, config, scaler=None):
model.train()
loss_list = []
for (iter, data) in enumerate(train_loader):
optimizer.zero_grad()
(images, targets) = data
(images, targets) = (images.cuda(non_blocking=True).float(), targets.cuda(non_blocking=True).float())
if config.amp:
with autocast():
(out, x0, x1, x2, x3, x4) = model(images)
loss = criterion(out, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
(out, x0, x1, x2, x3, x4) = model(images)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
now_lr = optimizer.state_dict()['param_groups'][0]['lr']
if ((iter % config.print_interval) == 0):
log_info = f'train: epoch {epoch}, iter:{iter}, loss: {np.mean(loss_list):.4f}, lr: {now_lr}'
print(log_info)
logger.info(log_info)
scheduler.step() |
def get_user_emails(list_id):
users = []
response = req(get_url(f'sub/lists/{list_id}/subscribers/'))
users.extend([x for x in response.json()['results'] if (x['is_active'] and x['is_email_verified'])])
while (response.json()['next'] is not None):
response = req(response.json()['next'])
users.extend([x for x in response.json()['results'] if (x['is_active'] and x['is_email_verified'])])
return [x['email'] for x in users if x] |
class Effect2882(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Cruise Missiles')), 'explosiveDamage', container.getModifiedItemAttr('damageMultiplierBonus'), **kwargs) |
def eval_callback(model: torch.nn.Module, num_samples: Optional[int]=None) -> float:
if (num_samples is None):
num_samples = EVAL_DATASET_SIZE
data_loader = _create_sampled_data_loader(imagenet_dataset, num_samples)
device = get_device(model)
correct = 0
with in_eval_mode(model), torch.no_grad():
for (image, label) in tqdm(data_loader):
image = image.to(device)
label = label.to(device)
logits = model(image)
top1 = logits.topk(k=1).indices
correct += (top1 == label.view_as(top1)).sum()
return (int(correct) / num_samples) |
class ProjectWindow(tk.Frame):
def __init__(self, parent, sdkpath, args):
tk.Frame.__init__(self, parent)
self.master = parent
self.sdkpath = sdkpath
self.init_window(args)
self.configs = dict()
self.ssid = str()
self.password = str()
def setState(self, thing, state):
for child in thing.winfo_children():
child.configure(state=state)
def boardtype_change_callback(self, event):
boardtype = self.boardtype.get()
if (boardtype == 'pico_w'):
self.setState(self.picowSubframe, 'enabled')
else:
self.setState(self.picowSubframe, 'disabled')
def wirelessSettings(self):
result = WirelessSettingsWindow(self)
(self.ssid, self.password) = result.get()
def init_window(self, args):
self.master.title('Raspberry Pi Pico Project Generator')
self.master.configure(bg=GetBackground())
optionsRow = 0
mainFrame = tk.Frame(self, bg=GetBackground()).grid(row=optionsRow, column=0, columnspan=6, rowspan=12)
self.logo = tk.PhotoImage(file=GetFilePath('logo_alpha.gif'))
logowidget = ttk.Label(mainFrame, image=self.logo, borderwidth=0, relief='solid').grid(row=0, column=0, columnspan=5, pady=10)
optionsRow += 2
namelbl = ttk.Label(mainFrame, text='Project Name :').grid(row=optionsRow, column=0, sticky=tk.E)
self.projectName = tk.StringVar()
if (args.name != None):
self.projectName.set(args.name)
else:
self.projectName.set('ProjectName')
nameEntry = ttk.Entry(mainFrame, textvariable=self.projectName).grid(row=optionsRow, column=1, sticky=(tk.W + tk.E), padx=5)
optionsRow += 1
locationlbl = ttk.Label(mainFrame, text='Location :').grid(row=optionsRow, column=0, sticky=tk.E)
self.locationName = tk.StringVar()
self.locationName.set((os.getcwd() if (not args.projectRoot) else args.projectRoot))
locationEntry = ttk.Entry(mainFrame, textvariable=self.locationName).grid(row=optionsRow, column=1, columnspan=3, sticky=(tk.W + tk.E), padx=5)
locationBrowse = ttk.Button(mainFrame, text='Browse', command=self.browse).grid(row=3, column=4)
optionsRow += 1
ttk.Label(mainFrame, text='Board Type :').grid(row=optionsRow, column=0, padx=4, sticky=tk.E)
self.boardtype = ttk.Combobox(mainFrame, values=boardtype_list)
self.boardtype.grid(row=4, column=1, padx=4, sticky=(tk.W + tk.E))
self.boardtype.set('pico')
self.boardtype.bind('<<ComboboxSelected>>', self.boardtype_change_callback)
optionsRow += 1
featuresframe = ttk.LabelFrame(mainFrame, text='Library Options', relief=tk.RIDGE, borderwidth=2)
featuresframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=5, ipadx=5, padx=5, pady=5, sticky=(tk.E + tk.W))
s = (len(features_list) / 3)
self.feature_checkbox_vars = []
row = 0
col = 0
for i in features_list:
var = tk.StringVar(value='')
c = features_list[i][GUI_TEXT]
cb = ttk.Checkbutton(featuresframe, text=c, var=var, onvalue=i, offvalue='')
cb.grid(row=row, column=col, padx=15, pady=2, ipadx=1, ipady=1, sticky=(tk.E + tk.W))
self.feature_checkbox_vars.append(var)
row += 1
if (row >= s):
col += 1
row = 0
optionsRow += 5
self.picowSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text='Pico Wireless Options')
self.picowSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=(tk.E + tk.W))
self.pico_wireless = tk.StringVar()
col = 0
row = 0
for i in picow_options_list:
rb = ttk.Radiobutton(self.picowSubframe, text=picow_options_list[i][GUI_TEXT], variable=self.pico_wireless, val=i)
rb.grid(row=row, column=col, padx=15, pady=1, sticky=(tk.E + tk.W))
col += 1
if (col == 3):
col = 0
row += 1
self.setState(self.picowSubframe, 'disabled')
optionsRow += 3
ooptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text='Console Options')
ooptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=(tk.E + tk.W))
self.wantUART = tk.IntVar()
self.wantUART.set(args.uart)
ttk.Checkbutton(ooptionsSubframe, text='Console over UART', variable=self.wantUART).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantUSB = tk.IntVar()
self.wantUSB.set(args.usb)
ttk.Checkbutton(ooptionsSubframe, text='Console over USB (Disables other USB use)', variable=self.wantUSB).grid(row=0, column=1, padx=4, sticky=tk.W)
optionsRow += 2
coptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text='Code Options')
coptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=3, padx=5, pady=5, ipadx=5, ipady=3, sticky=(tk.E + tk.W))
self.wantExamples = tk.IntVar()
self.wantExamples.set(args.examples)
ttk.Checkbutton(coptionsSubframe, text='Add examples for Pico library', variable=self.wantExamples).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantRunFromRAM = tk.IntVar()
self.wantRunFromRAM.set(args.runFromRAM)
ttk.Checkbutton(coptionsSubframe, text='Run from RAM', variable=self.wantRunFromRAM).grid(row=0, column=1, padx=4, sticky=tk.W)
self.wantCPP = tk.IntVar()
self.wantCPP.set(args.cpp)
ttk.Checkbutton(coptionsSubframe, text='Generate C++', variable=self.wantCPP).grid(row=0, column=3, padx=4, sticky=tk.W)
ttk.Button(coptionsSubframe, text='Advanced...', command=self.config).grid(row=0, column=4, sticky=tk.E)
self.wantCPPExceptions = tk.IntVar()
self.wantCPPExceptions.set(args.cppexceptions)
ttk.Checkbutton(coptionsSubframe, text='Enable C++ exceptions', variable=self.wantCPPExceptions).grid(row=1, column=0, padx=4, sticky=tk.W)
self.wantCPPRTTI = tk.IntVar()
self.wantCPPRTTI.set(args.cpprtti)
ttk.Checkbutton(coptionsSubframe, text='Enable C++ RTTI', variable=self.wantCPPRTTI).grid(row=1, column=1, padx=4, sticky=tk.W)
optionsRow += 3
boptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text='Build Options')
boptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=(tk.E + tk.W))
self.wantBuild = tk.IntVar()
self.wantBuild.set(args.build)
ttk.Checkbutton(boptionsSubframe, text='Run build after generation', variable=self.wantBuild).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantOverwrite = tk.IntVar()
self.wantOverwrite.set(args.overwrite)
ttk.Checkbutton(boptionsSubframe, text='Overwrite existing projects', variable=self.wantOverwrite).grid(row=0, column=1, padx=4, sticky=tk.W)
optionsRow += 2
vscodeoptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text='IDE Options')
vscodeoptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=(tk.E + tk.W))
self.wantVSCode = tk.IntVar()
if (args.project is None):
self.wantVSCode.set(False)
else:
self.wantVSCode.set(('vscode' in args.project))
ttk.Checkbutton(vscodeoptionsSubframe, text='Create VSCode project', variable=self.wantVSCode).grid(row=0, column=0, padx=4, sticky=tk.W)
ttk.Label(vscodeoptionsSubframe, text=' Debugger:').grid(row=0, column=1, padx=4, sticky=tk.W)
self.debugger = ttk.Combobox(vscodeoptionsSubframe, values=debugger_list, state='readonly')
self.debugger.grid(row=0, column=2, padx=4, sticky=tk.W)
self.debugger.current(args.debugger)
optionsRow += 2
QuitButton = ttk.Button(mainFrame, text='Quit', command=self.quit).grid(row=optionsRow, column=4, stick=tk.E, padx=10, pady=5)
OKButton = ttk.Button(mainFrame, text='OK', command=self.OK).grid(row=optionsRow, column=3, padx=4, pady=5, sticky=tk.E)
def GetFeatures(self):
features = []
i = 0
for cb in self.feature_checkbox_vars:
s = cb.get()
if (s != ''):
features.append(s)
picow_extra = self.pico_wireless.get()
if (picow_extra != 'picow_none'):
features.append(picow_extra)
return features
def quit(self):
sys.exit(ExitCodes.SUCCESS)
def OK(self):
projectPath = self.locationName.get()
features = self.GetFeatures()
projects = list()
if self.wantVSCode.get():
projects.append('vscode')
params = {'sdkPath': self.sdkpath, 'projectRoot': Path(projectPath), 'projectName': self.projectName.get(), 'wantGUI': True, 'wantOverwrite': self.wantOverwrite.get(), 'wantBuild': self.wantBuild.get(), 'boardtype': self.boardtype.get(), 'features': features, 'projects': projects, 'configs': self.configs, 'wantRunFromRAM': self.wantRunFromRAM.get(), 'wantExamples': self.wantExamples.get(), 'wantUART': self.wantUART.get(), 'wantUSB': self.wantUSB.get(), 'wantCPP': self.wantCPP.get(), 'debugger': self.debugger.current(), 'exceptions': self.wantCPPExceptions.get(), 'rtti': self.wantCPPRTTI.get(), 'ssid': self.ssid, 'password': self.password}
DoEverything(self, params)
def browse(self):
name = fd.askdirectory()
self.locationName.set(name)
def help(self):
print('Help TODO')
def config(self):
self.configs = ConfigurationWindow(self, self.configs).get() |
class CumOp(COp):
__props__ = ('axis', 'mode')
check_input = False
params_type = ParamsType(c_axis=int_t, mode=EnumList(('MODE_ADD', 'add'), ('MODE_MUL', 'mul')))
def __init__(self, axis: Optional[int]=None, mode='add'):
if (mode not in ('add', 'mul')):
raise ValueError(f'{type(self).__name__}: Unknown mode "{mode}"')
self.axis = axis
self.mode = mode
c_axis = property((lambda self: (np.MAXDIMS if (self.axis is None) else self.axis)))
def make_node(self, x):
x = ptb.as_tensor_variable(x)
out_type = x.type()
if (self.axis is None):
out_type = vector(dtype=x.dtype)
elif ((self.axis >= x.ndim) or (self.axis < (- x.ndim))):
raise ValueError(f'axis(={self.axis}) out of bounds')
return Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
if (self.mode == 'add'):
z[0] = np.cumsum(x, axis=self.axis)
else:
z[0] = np.cumprod(x, axis=self.axis)
def grad(self, inputs, output_gradients):
(x,) = inputs
(gi,) = output_gradients
if (self.axis is None):
if (self.mode == 'add'):
return [cumsum(gi[::(- 1)])[::(- 1)].reshape(x.shape)]
elif (self.mode == 'mul'):
fx = cumprod(x, axis=self.axis)
return [(cumsum((fx * gi)[::(- 1)])[::(- 1)].reshape(x.shape) / x)]
else:
raise NotImplementedError(f'{type(self).__name__}: unknown gradient for mode "{self.mode}"')
reverse_slicing = ([slice(None, None, None)] * gi.ndim)
reverse_slicing[self.axis] = slice(None, None, (- 1))
reverse_slicing = tuple(reverse_slicing)
if (self.mode == 'add'):
return [cumsum(gi[reverse_slicing], self.axis)[reverse_slicing]]
elif (self.mode == 'mul'):
fx = cumprod(x, axis=self.axis)
return [(cumsum((fx * gi)[reverse_slicing], self.axis)[reverse_slicing] / x)]
else:
raise NotImplementedError(f'{type(self).__name__}: unknown gradient for mode "{self.mode}"')
def infer_shape(self, fgraph, node, shapes):
if (self.axis is None):
return [(prod(shapes[0]),)]
return shapes
def c_code(self, node, name, inames, onames, sub):
(x,) = inames
(z,) = onames
axis = self.axis
fail = sub['fail']
params = sub['params']
code = ('\n int axis = %(params)s->c_axis;\n if (axis == 0 && PyArray_NDIM(%(x)s) == 1)\n axis = NPY_MAXDIMS;\n npy_intp shape[1] = { PyArray_SIZE(%(x)s) };\n if(axis == NPY_MAXDIMS && !(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));\n }\n\n else if(axis != NPY_MAXDIMS && !(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s))))\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE(%(x)s));\n }\n\n if (!%(z)s)\n %(fail)s;\n {\n\n PyObject * t = NULL;\n if(%(params)s->mode == MODE_ADD)\n t = PyArray_CumSum(\n %(x)s, axis,\n PyArray_TYPE(%(x)s), %(z)s);\n else if(%(params)s->mode == MODE_MUL)\n t = PyArray_CumProd(\n %(x)s, axis,\n PyArray_TYPE(%(x)s), %(z)s);\n\n if (!t){\n %(fail)s;\n }\n // Because PyArray_CumSum/CumProd returns a newly created reference on t.\n Py_XDECREF(t);\n }\n ' % locals())
return code
def c_code_cache_version(self):
return (8,)
def __str__(self):
return f'{self.__class__.__name__}{{{self.axis}, {self.mode}}}' |
def log_args_to_txt(log_txt, args):
if (not os.path.exists(log_txt)):
with open(log_txt, 'w') as txtfile:
args_ = vars(args)
args_str = ''
for (k, v) in args_.items():
args_str = ((((args_str + str(k)) + ':') + str(v)) + ',\t\n')
txtfile.write((args_str + '\n')) |
def rounding_numerical_components():
Print_Function()
(ex, ey, ez) = MV.setup('e_x e_y e_z', metric='[1,1,1]')
X = (((1.2 * ex) + (2.34 * ey)) + (0.555 * ez))
Y = (((0.333 * ex) + (4 * ey)) + (5.3 * ez))
print('X =', X)
print('Nga(X,2) =', Nga(X, 2))
print('X*Y =', (X * Y))
print('Nga(X*Y,2) =', Nga((X * Y), 2))
return |
def preprocess_for_train(image, output_height, output_width, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX):
resize_side = tf.random_uniform([], minval=resize_side_min, maxval=(resize_side_max + 1), dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) |
class ExtractPathTest(object):
def test_extract_static_path(self):
path = '/test'
assert (extract_path(path) == '/test')
def test_extract_path_with_a_single_simple_parameter(self):
path = '/test/<parameter>'
assert (extract_path(path) == '/test/{parameter}')
def test_extract_path_with_a_single_typed_parameter(self):
path = '/test/<string:parameter>'
assert (extract_path(path) == '/test/{parameter}')
def test_extract_path_with_a_single_typed_parameter_with_arguments(self):
path = '/test/<string(length=2):parameter>'
assert (extract_path(path) == '/test/{parameter}')
def test_extract_path_with_multiple_parameters(self):
path = '/test/<parameter>/<string:other>/'
assert (extract_path(path) == '/test/{parameter}/{other}/') |
def decoder_rnn(cell, inputs, enc_outputs, enc_final_states, seq_length, hidden_dim, num_glimpse, batch_size, is_train, end_of_sequence_id=0, initializer=None, max_length=None):
with tf.variable_scope('decoder_rnn') as scope:
def attention(ref, query, with_softmax, scope='attention'):
with tf.variable_scope(scope):
W_ref = tf.get_variable('W_ref', [1, hidden_dim, hidden_dim], initializer=initializer)
W_q = tf.get_variable('W_q', [hidden_dim, hidden_dim], initializer=initializer)
v = tf.get_variable('v', [hidden_dim], initializer=initializer)
encoded_ref = tf.nn.conv1d(ref, W_ref, 1, 'VALID', name='encoded_ref')
encoded_query = tf.expand_dims(tf.matmul(query, W_q, name='encoded_query'), 1)
tiled_encoded_Query = tf.tile(encoded_query, [1, tf.shape(encoded_ref)[1], 1], name='tiled_encoded_query')
scores = tf.reduce_sum((v * tf.tanh((encoded_ref + encoded_query))), [(- 1)])
if with_softmax:
return tf.nn.softmax(scores)
else:
return scores
def glimpse(ref, query, scope='glimpse'):
p = attention(ref, query, with_softmax=True, scope=scope)
alignments = tf.expand_dims(p, 2)
return tf.reduce_sum((alignments * ref), [1])
def output_fn(ref, query, num_glimpse):
if (query is None):
return tf.zeros([max_length], tf.float32)
else:
for idx in range(num_glimpse):
query = glimpse(ref, query, 'glimpse_{}'.format(idx))
return attention(ref, query, with_softmax=False, scope='attention')
def input_fn(sampled_idx):
return tf.stop_gradient(tf.gather_nd(enc_outputs, index_matrix_to_pairs(sampled_idx)))
if is_train:
decoder_fn = simple_decoder_fn_train(enc_final_states)
else:
maximum_length = tf.convert_to_tensor(max_length, tf.int32)
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
cell_output = output_fn(enc_outputs, cell_output, num_glimpse)
if (cell_state is None):
cell_state = enc_final_states
next_input = cell_input
done = tf.zeros([batch_size], dtype=tf.bool)
else:
sampled_idx = tf.cast(tf.argmax(cell_output, 1), tf.int32)
next_input = input_fn(sampled_idx)
done = tf.equal(sampled_idx, end_of_sequence_id)
done = tf.cond(tf.greater(time, maximum_length), (lambda : tf.ones([batch_size], dtype=tf.bool)), (lambda : done))
return (done, cell_state, next_input, cell_output, context_state)
(outputs, final_state, final_context_state) = dynamic_rnn_decoder(cell, decoder_fn, inputs=inputs, sequence_length=seq_length, scope=scope)
if is_train:
transposed_outputs = tf.transpose(outputs, [1, 0, 2])
fn = (lambda x: output_fn(enc_outputs, x, num_glimpse))
outputs = tf.transpose(tf.map_fn(fn, transposed_outputs), [1, 0, 2])
return (outputs, final_state, final_context_state) |
def get_cfg_tree(nsql: str):
stack: List = []
expression_stack: List = []
current_tree_node = TreeNode(name=nsql)
for idx in range(len(nsql)):
if (nsql[idx] == '('):
stack.append(idx)
if ((idx > 1) and (nsql[(idx - 2):(idx + 1)] == 'QA(') and ((idx - 2) != 0)):
tree_node = TreeNode()
current_tree_node.add_child(tree_node)
expression_stack.append(current_tree_node)
current_tree_node = tree_node
elif (nsql[idx] == ')'):
left_clause_idx = stack.pop()
if ((idx > 1) and (nsql[(left_clause_idx - 2):(left_clause_idx + 1)] == 'QA(') and ((left_clause_idx - 2) != 0)):
nsql_span = nsql[(left_clause_idx - 2):(idx + 1)]
current_tree_node.set_name(nsql_span)
current_tree_node = expression_stack.pop()
return current_tree_node |
def data_masks(all_usr_pois, item_tail):
us_lens = [len(upois) for upois in all_usr_pois]
len_max = max(us_lens)
us_pois = [(upois + (item_tail * (len_max - le))) for (upois, le) in zip(all_usr_pois, us_lens)]
us_msks = [(([1] * le) + ([0] * (len_max - le))) for le in us_lens]
return (us_pois, us_msks, len_max) |
_module()
class UNet(BaseModule):
def __init__(self, in_channels=3, base_channels=64, num_stages=5, strides=(1, 1, 1, 1, 1), enc_num_convs=(2, 2, 2, 2, 2), dec_num_convs=(2, 2, 2, 2), downsamples=(True, True, True, True), enc_dilations=(1, 1, 1, 1, 1), dec_dilations=(1, 1, 1, 1), with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), upsample_cfg=dict(type='InterpConv'), norm_eval=False, dcn=None, plugins=None, init_cfg=[dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', layer=['_BatchNorm', 'GroupNorm'], val=1)]):
super().__init__(init_cfg=init_cfg)
assert (dcn is None), 'Not implemented yet.'
assert (plugins is None), 'Not implemented yet.'
assert (len(strides) == num_stages), f'The length of strides should be equal to num_stages, while the strides is {strides}, the length of strides is {len(strides)}, and the num_stages is {num_stages}.'
assert (len(enc_num_convs) == num_stages), f'The length of enc_num_convs should be equal to num_stages, while the enc_num_convs is {enc_num_convs}, the length of enc_num_convs is {len(enc_num_convs)}, and the num_stages is {num_stages}.'
assert (len(dec_num_convs) == (num_stages - 1)), f'The length of dec_num_convs should be equal to (num_stages-1), while the dec_num_convs is {dec_num_convs}, the length of dec_num_convs is {len(dec_num_convs)}, and the num_stages is {num_stages}.'
assert (len(downsamples) == (num_stages - 1)), f'The length of downsamples should be equal to (num_stages-1), while the downsamples is {downsamples}, the length of downsamples is {len(downsamples)}, and the num_stages is {num_stages}.'
assert (len(enc_dilations) == num_stages), f'The length of enc_dilations should be equal to num_stages, while the enc_dilations is {enc_dilations}, the length of enc_dilations is {len(enc_dilations)}, and the num_stages is {num_stages}.'
assert (len(dec_dilations) == (num_stages - 1)), f'The length of dec_dilations should be equal to (num_stages-1), while the dec_dilations is {dec_dilations}, the length of dec_dilations is {len(dec_dilations)}, and the num_stages is {num_stages}.'
self.num_stages = num_stages
self.strides = strides
self.downsamples = downsamples
self.norm_eval = norm_eval
self.base_channels = base_channels
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for i in range(num_stages):
enc_conv_block = []
if (i != 0):
if ((strides[i] == 1) and downsamples[(i - 1)]):
enc_conv_block.append(nn.MaxPool2d(kernel_size=2))
upsample = ((strides[i] != 1) or downsamples[(i - 1)])
self.decoder.append(UpConvBlock(conv_block=BasicConvBlock, in_channels=(base_channels * (2 ** i)), skip_channels=(base_channels * (2 ** (i - 1))), out_channels=(base_channels * (2 ** (i - 1))), num_convs=dec_num_convs[(i - 1)], stride=1, dilation=dec_dilations[(i - 1)], with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, upsample_cfg=(upsample_cfg if upsample else None), dcn=None, plugins=None))
enc_conv_block.append(BasicConvBlock(in_channels=in_channels, out_channels=(base_channels * (2 ** i)), num_convs=enc_num_convs[i], stride=strides[i], dilation=enc_dilations[i], with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, dcn=None, plugins=None))
self.encoder.append(nn.Sequential(*enc_conv_block))
in_channels = (base_channels * (2 ** i))
def forward(self, x):
self._check_input_divisible(x)
enc_outs = []
for enc in self.encoder:
x = enc(x)
enc_outs.append(x)
dec_outs = [x]
for i in reversed(range(len(self.decoder))):
x = self.decoder[i](enc_outs[i], x)
dec_outs.append(x)
return dec_outs
def train(self, mode=True):
super().train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def _check_input_divisible(self, x):
(h, w) = x.shape[(- 2):]
whole_downsample_rate = 1
for i in range(1, self.num_stages):
if ((self.strides[i] == 2) or self.downsamples[(i - 1)]):
whole_downsample_rate *= 2
assert (((h % whole_downsample_rate) == 0) and ((w % whole_downsample_rate) == 0)), f'The input image size {(h, w)} should be divisible by the whole downsample rate {whole_downsample_rate}, when num_stages is {self.num_stages}, strides is {self.strides}, and downsamples is {self.downsamples}.' |
()
_options(dbt_flags)
_tracking
def detect(**kwargs):
print(f'Detecting tables', 'RUN')
dbt_vars = parse_dbt_vars(kwargs.get('dbt_vars'))
run_list = ['dbt', 'run', '--models', 're_data_columns', 're_data_monitored']
if dbt_vars:
run_list.extend(['--vars', yaml.dump(dbt_vars)])
add_dbt_flags(run_list, kwargs)
completed_process = subprocess.run(run_list)
completed_process.check_returncode()
print(f'Detecting tables', 'SUCCESS') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.