code stringlengths 281 23.7M |
|---|
def convert_to_nominalization(thought):
if ('Shawn started' in thought):
return "Shawn's initial possession of 5 toys and his receipt of 4 more toys from his parents results in a total of 9 toys. 5 + 4 = 9."
if ('There are originally 3 cars' in thought):
return 'The original count of 3 cars, with the addition of 2 more, leads to a total of 5 cars. 3 + 2 = 5.'
if ('Jason started' in thought):
return "Jason's commencement with 20 lollipops and the subtraction of some for Denny leaves a remainder of 12. 20 - 12 = 8."
if ('There were originally 9 computers' in thought):
return 'The initial count of 9 computers, combined with the addition of 20 over 4 days, results in 29 computers. 9 + 20 is 29.'
if ('There are 15 trees originally' in thought):
return "The grove's initial count of 15 trees and the subsequent plantation increases the total to 21 trees. So there must have been 21 - 15 = 6."
if ('Originally, Leah had' in thought):
return "Leah's starting count of 32 chocolates, combined with her sister's 42, and the subtraction of 35 eaten ones, gives a remainder of 39. 74 - 35 = 39."
if ('Olivia had' in thought):
return "Olivia's original possession of 23 dollars and the expenditure of 15 dollars on bagels leaves a balance of 8 dollars. 23 - 15 is 8."
if ('Michael started' in thought):
return "Michael's beginning with 58 golf balls and the subsequent loss of 25 over two days results in a count of 33 golf balls. 58 - 25 = 33." |
class IBMCloudStorage(_CloudStorage):
def __init__(self, context, hostname, is_secure, storage_path, access_key, secret_key, bucket_name, port=None, maximum_chunk_size_mb=None):
upload_params = {}
connect_kwargs = {'endpoint_url': _build_endpoint_url(hostname, port=port, is_secure=is_secure)}
super(IBMCloudStorage, self).__init__(context, boto3.session.Session, connect_kwargs, upload_params, storage_path, bucket_name, access_key, secret_key)
chunk_size = (maximum_chunk_size_mb if (maximum_chunk_size_mb is not None) else 100)
self.maximum_chunk_size = ((chunk_size * 1024) * 1024)
def setup(self):
self.get_cloud_bucket().Cors().put(CORSConfiguration={'CORSRules': [{'AllowedOrigins': ['*'], 'AllowedMethods': ['GET'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['Authorization']}, {'AllowedOrigins': ['*'], 'AllowedMethods': ['PUT'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['Content-Type', 'x-amz-acl', 'origin']}]}) |
()
def daily_update_keywords(day=None):
(start_date, end_date) = get_day(day)
log.info('Updating KeywordImpression for %s-%s', start_date, end_date)
KeywordImpression.objects.using('default').filter(date__gte=start_date, date__lt=end_date).delete()
keyword_mapping = defaultdict((lambda : {'decisions': 0, 'offers': 0, 'views': 0, 'clicks': 0}))
queryset = Offer.objects.using(settings.REPLICA_SLUG).filter(date__gte=start_date, date__lt=end_date)
all_topics = Topic.load_from_cache()
for values in queryset.values('publisher', 'advertisement', 'keywords', 'viewed', 'clicked').annotate(total_decisions=Count('keywords'), total_offers=Count('keywords', filter=Q(advertisement__isnull=False)), total_views=Count('keywords', filter=Q(viewed=True)), total_clicks=Count('keywords', filter=Q(clicked=True))).order_by('-total_decisions').values('publisher', 'advertisement', 'keywords', 'advertisement__flight__targeting_parameters', 'total_decisions', 'total_offers', 'total_views', 'total_clicks').iterator():
if (not (values['keywords'] and values['advertisement__flight__targeting_parameters'])):
continue
page_keywords = set(values['keywords'])
flight_targeting = values['advertisement__flight__targeting_parameters']
flight_keywords = set(flight_targeting.get('include_keywords', {}))
flight_topics = set(flight_targeting.get('include_topics', {}))
for topic in flight_topics:
if (topic in all_topics):
for kw in all_topics[topic]:
flight_keywords.add(kw)
matched_keywords = (page_keywords & flight_keywords)
for keyword in matched_keywords:
advertisement_id = values['advertisement']
publisher_id = values['publisher']
index = f'{advertisement_id}:{publisher_id}:{keyword}'
keyword_mapping[index]['decisions'] += values['total_decisions']
keyword_mapping[index]['offers'] += values['total_offers']
keyword_mapping[index]['views'] += values['total_views']
keyword_mapping[index]['clicks'] += values['total_clicks']
for (data, value) in keyword_mapping.items():
(ad, publisher, keyword) = data.split(':')
if (ad == 'None'):
ad = None
(impression, _) = KeywordImpression.objects.using('default').get_or_create(date=start_date, publisher_id=publisher, advertisement_id=ad, keyword=keyword)
KeywordImpression.objects.using('default').filter(pk=impression.pk).update(decisions=(F('decisions') + value['decisions']), offers=(F('offers') + value['offers']), views=(F('views') + value['views']), clicks=(F('clicks') + value['clicks'])) |
class ResNet_Auxiliary(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(ResNet_Auxiliary, self).__init__()
self.backbone = CIFAR_ResNet(block, num_blocks, num_classes)
self.auxiliary_classifier = Auxiliary_Classifier(block, num_blocks, num_classes)
self.final_aux_classifier = ResNet_Final_Auxiliary_Classifer(block, num_classes)
def forward(self, x, lam=0.5, index=None):
(logits, features) = self.backbone(x)
(aux_logits, aux_feats) = self.auxiliary_classifier(features[:(- 1)])
aux_feats.append(features[(- 1)])
bs = features[0].size(0)
aux_logits.append(logits)
if (self.training is False):
return (aux_logits, aux_feats)
ensemble_features = [((lam * fea[:(bs // 2)]) + ((1 - lam) * fea[index])) for fea in aux_feats]
ensemble_mixup_features = [fea[(bs // 2):] for fea in aux_feats]
ensemle_logits = self.final_aux_classifier(ensemble_features)
ensemble_mixup_logits = self.final_aux_classifier(ensemble_mixup_features)
return (aux_logits, aux_feats, ensemle_logits, ensemble_mixup_logits) |
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert (len(paths) == 1)
path = paths[0]
if update_based:
pt_regexp = re.compile('checkpoint_\\d+_(\\d+)\\.pt')
else:
pt_regexp = re.compile('checkpoint(\\d+)\\.pt')
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if (m is not None):
sort_key = int(m.group(1))
if ((upper_bound is None) or (sort_key <= upper_bound)):
entries.append((sort_key, m.group(0)))
if (len(entries) < n):
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] |
def _check_new_episode_scores(config, db, update_db):
info('Checking for new episode scores')
shows = db.get_shows(enabled=True)
for show in shows:
latest_episode = db.get_latest_episode(show)
if (latest_episode is not None):
info('For show {} ({}), episode {}'.format(show.name, show.id, latest_episode.number))
scores = db.get_episode_scores(show, latest_episode)
if (len(scores) == 0):
for handler in services.get_link_handlers().values():
info(' Checking {} ({})'.format(handler.name, handler.key))
site = db.get_link_site(key=handler.key)
link = db.get_link(show, site)
if (link is None):
error('Failed to create link')
continue
new_score = handler.get_show_score(show, link, useragent=config.useragent)
if (new_score is not None):
info(' Score: {}'.format(new_score))
db.add_episode_score(show, latest_episode, site, new_score, commit=False)
if update_db:
db.commit()
else:
info(' Already has scores, ignoring') |
def get_config():
config = get_default_configs()
training = config.training
training.batch_size = 64
training.n_iters = 2400001
training.snapshot_sampling = True
training.sde = 'vesde'
training.continuous = True
evaluate = config.eval
evaluate.num_samples = 50000
evaluate.ckpt_id = 101
evaluate.batch_size = 128
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'reverse_diffusion'
sampling.corrector = 'langevin'
sampling.iradon_K = 1.8
sampling.snr = 0.261
sampling.coeff = 0.865
sampling.task = 'sparse_mar'
sampling.n_projections = 23
sampling.expansion = 4
sampling.cs_solver = 'projection'
data = config.data
data.dataset = 'ct2d_320'
data.image_size = 320
data.num_channels = 1
data.centered = False
data.random_flip = False
data.uniform_dequantization = False
model = config.model
model.name = 'ncsnpp'
model.scale_by_sigma = True
model.sigma_max = 128.0
model.num_scales = 1000
model.ema_rate = 0.999
model.sigma_min = 0.01
model.beta_min = 0.1
model.beta_max = 20.0
model.normalization = 'GroupNorm'
model.nonlinearity = 'swish'
model.nf = 32
model.ch_mult = (1, 1, 2, 2, 2, 2, 2)
model.num_res_blocks = 2
model.attn_resolutions = (20,)
model.dropout = 0.0
model.resamp_with_conv = True
model.conditional = True
model.fir = True
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = 'biggan'
model.progressive = 'output_skip'
model.progressive_input = 'input_skip'
model.progressive_combine = 'sum'
model.attention_type = 'ddpm'
model.init_scale = 0.0
model.fourier_scale = 16
model.conv_size = 3
optim = config.optim
optim.weight_decay = 0
optim.optimizer = 'Adam'
optim.lr = 0.0002
optim.beta1 = 0.9
optim.amsgrad = False
optim.eps = 1e-08
optim.warmup = 5000
optim.grad_clip = 1.0
config.seed = 42
return config |
def importGetMutationData(lines):
mutaLinesMap = {}
currentMutaRef = None
currentMutaLines = []
consumedIndices = set()
def completeMutaLines():
if ((currentMutaRef is not None) and currentMutaLines):
mutaLinesMap[currentMutaRef] = currentMutaLines
for (i, line) in enumerate(lines):
m = mutantHeaderPattern.match(line)
if m:
completeMutaLines()
currentMutaRef = int(m.group('ref'))
currentMutaLines = []
currentMutaLines.append(m.group('tail'))
consumedIndices.add(i)
elif (not line):
completeMutaLines()
currentMutaRef = None
currentMutaLines = []
elif (currentMutaRef is not None):
currentMutaLines.append(line)
consumedIndices.add(i)
else:
completeMutaLines()
for i in sorted(consumedIndices, reverse=True):
del lines[i]
data = {}
for (ref, mutaLines) in mutaLinesMap.items():
(_, mutaType, mutaAttrs) = parseMutant(mutaLines)
data[ref] = (mutaType, mutaAttrs)
return data |
def train_video_predictor(cfg):
training_steps = 200000
frames = read_frames_from_dir(f'./images/video/{cfg.image_name}')
crop_size = (int((frames[0].shape[(- 2)] * 0.95)), int((frames[0].shape[(- 1)] * 0.95)))
train_dataset = FrameSet(frames=frames, crop_size=crop_size)
train_loader = DataLoader(train_dataset, batch_size=1, num_workers=4, shuffle=True)
model = NextNet(in_channels=6, filters_per_layer=cfg.network_filters, depth=cfg.network_depth, frame_conditioned=True)
diffusion = ConditionalDiffusion(model, training_target='noise', noise_schedule='cosine', timesteps=cfg.diffusion_timesteps)
model_callbacks = [pl.callbacks.ModelSummary(max_depth=(- 1)), pl.callbacks.ModelCheckpoint(filename='single-level-{step}', save_last=True, save_top_k=3, monitor='train_loss', mode='min')]
tb_logger = pl.loggers.TensorBoardLogger('lightning_logs/', name=cfg.image_name, version=(cfg.run_name + '_predictor'))
trainer = pl.Trainer(max_steps=training_steps, gpus=1, auto_select_gpus=True, logger=tb_logger, log_every_n_steps=10, callbacks=model_callbacks)
trainer.fit(diffusion, train_loader) |
class GaussianMLPPolicy(StochasticPolicy, LasagnePowered, Serializable):
def __init__(self, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-06, std_hidden_nonlinearity=NL.tanh, hidden_nonlinearity=NL.tanh, output_nonlinearity=None, mean_network=None, std_network=None, dist_cls=DiagonalGaussian):
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if (mean_network is None):
mean_network = MLP(input_shape=(obs_dim,), output_dim=action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if (std_network is not None):
l_log_std = std_network.output_layer
elif adaptive_std:
std_network = MLP(input_shape=(obs_dim,), input_layer=mean_network.input_layer, output_dim=action_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=None)
l_log_std = std_network.output_layer
else:
l_log_std = ParamLayer(mean_network.input_layer, num_units=action_dim, param=lasagne.init.Constant(np.log(init_std)), name='output_log_std', trainable=learn_std)
self.min_std = min_std
(mean_var, log_std_var) = L.get_output([l_mean, l_log_std])
if (self.min_std is not None):
log_std_var = TT.maximum(log_std_var, np.log(min_std))
(self._mean_var, self._log_std_var) = (mean_var, log_std_var)
self._l_mean = l_mean
self._l_log_std = l_log_std
self._dist = dist_cls(action_dim)
LasagnePowered.__init__(self, [l_mean, l_log_std])
super(GaussianMLPPolicy, self).__init__(env_spec)
self._f_dist = ext.compile_function(inputs=[obs_var], outputs=[mean_var, log_std_var])
def dist_info_sym(self, obs_var, state_info_vars=None):
(mean_var, log_std_var) = L.get_output([self._l_mean, self._l_log_std], obs_var)
if (self.min_std is not None):
log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
return dict(mean=mean_var, log_std=log_std_var)
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
(mean, log_std) = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = ((rnd * np.exp(log_std)) + mean)
return (action, dict(mean=mean, log_std=log_std))
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
(means, log_stds) = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = ((rnd * np.exp(log_stds)) + means)
return (actions, dict(mean=means, log_std=log_stds))
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
(new_mean_var, new_log_std_var) = (new_dist_info_vars['mean'], new_dist_info_vars['log_std'])
(old_mean_var, old_log_std_var) = (old_dist_info_vars['mean'], old_dist_info_vars['log_std'])
epsilon_var = ((action_var - old_mean_var) / (TT.exp(old_log_std_var) + 1e-08))
new_action_var = (new_mean_var + (epsilon_var * TT.exp(new_log_std_var)))
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path['agent_infos']['log_std'] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
def distribution(self):
return self._dist |
def test_win_vars_set(windows, xdg_envs):
pp = platform.get_platform_paths('pypyr', 'config.yaml')
assert (pp == platform.PlatformPaths(config_user=Path('/ch//pypyr/config.yaml'), config_common=[Path('/cc/pypyr/config.yaml'), Path('/cc2/pypyr/config.yaml')], data_dir_user=Path('/dh/pypyr'), data_dir_common=[Path('/dc/pypyr'), Path('/dc2/pypyr')])) |
def test_finalize_strict_too_many_args():
(bb, x, y) = _get_bb()
(x2, y2) = bb.add(TestTwoBitOp(), ctrl=x, target=y)
bb.add_register_allowed = False
with pytest.raises(BloqError, match='Finalizing does not accept Soquets.*z.*'):
bb.finalize(x=x2, y=y2, z=Soquet(RightDangle, Register('asdf', 1))) |
.parametrize('filename, expected', [('foo.bar', 'foo.bar'), ('foo"bar', 'foo%22bar'), ('foo\x00bar', 'foo%00bar'), ('foobar");alert("attack!");', 'foobar%22);alert(%22attack!%22);')])
def test_generate_pdfjs_script(filename, expected):
expected_open = 'open("qute://pdfjs/file?filename={}");'.format(expected)
actual = pdfjs._generate_pdfjs_script(filename)
assert (expected_open in actual)
assert ('PDFView' in actual) |
def downsample_conv(in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = (norm_layer or nn.BatchNorm2d)
kernel_size = (1 if ((stride == 1) and (dilation == 1)) else kernel_size)
first_dilation = ((first_dilation or dilation) if (kernel_size > 1) else 1)
p = get_padding(kernel_size, stride, first_dilation)
return nn.Sequential(*[nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), norm_layer(out_channels)]) |
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
kunit_parser.print_with_timestamp('Starting KUnit Kernel ...')
test_start = time.time()
result = linux.run_kernel(args=request.kernel_args, timeout=(None if request.alltests else request.timeout), filter_glob=request.filter_glob, build_dir=request.build_dir)
test_end = time.time()
return KunitResult(KunitStatus.SUCCESS, result, (test_end - test_start)) |
_model_spec('named_tuple', 'attrs')
def test_generic_mixed_inheritance(model_spec):
_spec.decorator
class Parent1(*model_spec.bases):
a: int
_spec.decorator
class Parent2(*model_spec.bases, Generic[T]):
b: T
_spec.decorator
class Child12(Parent1, Parent2[str]):
c: bool
assert_fields_types(Child12, {'a': int, 'b': str, 'c': bool})
_spec.decorator
class Child21(Parent2[str], Parent1):
c: bool
assert_fields_types(Child21, {'b': str, 'a': int, 'c': bool}) |
class _Definition():
def __init__(self, *args: _Setting, mandatory: Set[str], prefix: str, switch_names: Mapping[(Optional[str], str)]=None) -> None:
self._settings = args
self.mandatory = mandatory
self.prefix = prefix
if (switch_names is not None):
self._switch_names = switch_names
else:
self._switch_names = {None: _BLINK_SETTINGS}
def prefixed_settings(self) -> Iterator[Tuple[(str, _Setting)]]:
for setting in self._settings:
switch = self._switch_names.get(setting.option, self._switch_names[None])
(yield (switch, setting.with_prefix(self.prefix)))
def copy_with(self, attr: str, value: Any) -> '_Definition':
new = copy.copy(self)
setattr(new, attr, value)
return new
def copy_add_setting(self, setting: _Setting) -> '_Definition':
new = copy.copy(self)
new._settings = (self._settings + (setting,))
return new
def copy_replace_setting(self, option: str, chromium_key: str) -> '_Definition':
new = copy.deepcopy(self)
for setting in new._settings:
if (setting.option == option):
setting.chromium_key = chromium_key
return new
raise ValueError(f'Setting {option} not found in {self}') |
_on_failure
.parametrize('number_of_nodes', [3])
.parametrize('channels_per_node', [CHAIN])
def test_receive_lockedtransfer_invalidnonce(raiden_network: List[RaidenService], number_of_nodes, deposit, token_addresses, reveal_timeout, network_wait):
(app0, app1, app2) = raiden_network
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(views.state_from_raiden(app0), app0.default_registry.address, token_address)
assert token_network_address
channel0 = get_channelstate(app0, app1, token_network_address)
amount = 10
payment_identifier = PaymentID(1)
secrethash = transfer(initiator_app=app0, target_app=app2, token_address=token_address, amount=PaymentAmount(10), identifier=payment_identifier, timeout=(network_wait * number_of_nodes), routes=[[app0, app1, app2]])
repeated_nonce = Nonce(1)
expiration = (reveal_timeout * 2)
mediated_transfer_message = LockedTransfer(chain_id=UNIT_CHAIN_ID, message_identifier=make_message_identifier(), payment_identifier=payment_identifier, nonce=repeated_nonce, token_network_address=token_network_address, token=token_address, channel_identifier=channel0.identifier, transferred_amount=TokenAmount(amount), locked_amount=LockedAmount(amount), recipient=app1.address, locksroot=make_locksroot(), lock=Lock(amount=PaymentWithFeeAmount(amount), expiration=expiration, secrethash=UNIT_SECRETHASH), target=TargetAddress(app2.address), initiator=InitiatorAddress(app0.address), signature=EMPTY_SIGNATURE, metadata=Metadata(routes=[RouteMetadata(route=[app1.address, app2.address], address_metadata={})]))
sign_and_inject(mediated_transfer_message, app0.signer, app1)
with block_timeout_for_transfer_by_secrethash(app1, secrethash):
wait_assert(assert_synced_channel_state, token_network_address, app0, (deposit - amount), [], app1, (deposit + amount), []) |
def get_cast_class(orig_cls, new_base_cls):
orig_module = inspect.getmodule(orig_cls)
new_cls_name = f'{CAST_CLASS_PREFIX}{orig_cls.__name__}'
if hasattr(orig_module, new_cls_name):
new_cls = getattr(orig_module, new_cls_name)
else:
new_cls = type(new_cls_name, (new_base_cls, orig_cls), {})
new_cls.__module__ = orig_module.__name__
setattr(orig_module, new_cls_name, new_cls)
return new_cls |
class Image(SensorData):
def __init__(self, frame_number, width, height, image_type, fov, raw_data):
super(Image, self).__init__(frame_number=frame_number)
assert (len(raw_data) == ((4 * width) * height))
self.width = width
self.height = height
self.type = image_type
self.fov = fov
self.raw_data = raw_data
self._converted_data = None
def data(self):
if (self._converted_data is None):
from . import image_converter
if (self.type == 'Depth'):
self._converted_data = image_converter.depth_to_array(self)
elif (self.type == 'SemanticSegmentation'):
self._converted_data = image_converter.labels_to_array(self)
else:
self._converted_data = image_converter.to_rgb_array(self)
return self._converted_data
def save_to_disk(self, filename, format='.png'):
filename = _append_extension(filename, format)
try:
from PIL import Image as PImage
except ImportError:
raise RuntimeError('cannot import PIL, make sure pillow package is installed')
image = PImage.frombytes(mode='RGBA', size=(self.width, self.height), data=self.raw_data, decoder_name='raw')
color = image.split()
image = PImage.merge('RGB', color[2::(- 1)])
folder = os.path.dirname(filename)
if (not os.path.isdir(folder)):
os.makedirs(folder)
image.save(filename, quality=100) |
_model
def resmlp_big_24_224(pretrained=False, **kwargs):
model_args = dict(patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-06), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args)
return model |
class Solution():
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
remainders = dict()
remainders[0] = 0
pre_sum = 0
for (idx, item) in enumerate(nums):
pre_sum += item
remaind = (pre_sum % k)
if (remaind not in remainders):
remainders[remaind] = (idx + 1)
elif (remainders[remaind] < idx):
return True
return False |
class Spinner(tqdm):
prefixes = ['/', '-', '\\', '|']
def __init__(self, title: str, refresh_interval: float=0.5):
def refresh_in_loop():
while (not self._stop.is_set()):
with self._lock:
self._index = ((self._index + 1) % len(self.prefixes))
self.refresh(nolock=True)
time.sleep(refresh_interval)
self._index = 0
self._stop = threading.Event()
self._refresh_thread = threading.Thread(target=refresh_in_loop)
self._messages = [f'{prefix} {title}' for prefix in self.prefixes]
super(Spinner, self).__init__()
def __str__(self):
return self._messages[self._index]
def __enter__(self):
self._refresh_thread.start()
return super(Spinner, self).__enter__()
def __exit__(self, *args, **kwargs):
self._stop.set()
self._refresh_thread.join()
super(Spinner, self).__exit__(*args, **kwargs) |
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if (fpath is not None):
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if (self.file is not None):
self.file.write(msg)
def flush(self):
self.console.flush()
if (self.file is not None):
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if (self.file is not None):
self.file.close() |
def dumpAST(obj, ind=0, topnode=False):
indChar = ((('\t' * ind) + '-> ') if ind else '')
print((((indChar + '[') + obj.t) + ']'))
if (not (obj.title == '')):
print(((('\t' + indChar) + 'Title: ') + (obj.title or '')))
if (not (obj.info == '')):
print(((('\t' + indChar) + 'Info: ') + (obj.info or '')))
if (not (obj.destination == '')):
print(((('\t' + indChar) + 'Destination: ') + (obj.destination or '')))
if obj.is_open:
print(((('\t' + indChar) + 'Open: ') + str(obj.is_open)))
if obj.last_line_blank:
print(((('\t' + indChar) + 'Last line blank: ') + str(obj.last_line_blank)))
if obj.sourcepos:
print(((('\t' + indChar) + 'Sourcepos: ') + str(obj.sourcepos)))
if (not (obj.string_content == '')):
print(((('\t' + indChar) + 'String content: ') + (obj.string_content or '')))
if (not (obj.info == '')):
print(((('\t' + indChar) + 'Info: ') + (obj.info or '')))
if (not (obj.literal == '')):
print(((('\t' + indChar) + 'Literal: ') + (obj.literal or '')))
if obj.list_data.get('type'):
print((('\t' + indChar) + 'List Data: '))
print(((('\t\t' + indChar) + '[type] = ') + obj.list_data.get('type')))
if obj.list_data.get('bullet_char'):
print(((('\t\t' + indChar) + '[bullet_char] = ') + obj.list_data['bullet_char']))
if obj.list_data.get('start'):
print(((('\t\t' + indChar) + '[start] = ') + str(obj.list_data.get('start'))))
if obj.list_data.get('delimiter'):
print(((('\t\t' + indChar) + '[delimiter] = ') + obj.list_data.get('delimiter')))
if obj.list_data.get('padding'):
print(((('\t\t' + indChar) + '[padding] = ') + str(obj.list_data.get('padding'))))
if obj.list_data.get('marker_offset'):
print(((('\t\t' + indChar) + '[marker_offset] = ') + str(obj.list_data.get('marker_offset'))))
if obj.walker:
print((('\t' + indChar) + 'Children:'))
walker = obj.walker()
nxt = walker.nxt()
while ((nxt is not None) and (topnode is False)):
dumpAST(nxt['node'], (ind + 2), topnode=True)
nxt = walker.nxt() |
def concat_into_splits(dl_dataset, src, tgt, extracted_folders, to_folder, debug):
to_folder_tmp = f'{to_folder}_tmp'
os.makedirs(to_folder_tmp, exist_ok=True)
concat_files('train', src, tgt, extracted_folders, split_urls=dl_dataset.train_urls, path_patterns=dl_dataset.train_files_patterns, to_folder=to_folder_tmp, debug=debug)
lid_filter('train', src, tgt, to_folder_tmp, to_folder, debug)
concat_files('valid', src, tgt, extracted_folders, split_urls=dl_dataset.valid_urls, path_patterns=dl_dataset.valid_files_patterns, to_folder=to_folder, debug=debug)
concat_files('test', src, tgt, extracted_folders, split_urls=dl_dataset.test_urls, path_patterns=dl_dataset.test_files_patterns, to_folder=to_folder, debug=debug) |
class TestObject(TestCase):
def test_dump_object(self):
obj = AllDumpable(AllDumpable())
exp = {'_par_c': 10, 'par_v': None, 'par_p': 12, 'c': 1, '_c': 2, 'c_n': None, '_c_n': None, 'child': None, 'v': 3, '_v': 4, 'v_n': None, '_v_n': None, 'p': 5, '_p': 5, 'p_n': None, '_p_n': None}
exp['child'] = exp.copy()
dump = jsons.dump(obj)
self.assertDictEqual(exp, dump)
def test_dump_object_verbose(self):
class A():
def __init__(self, x):
self.x = x
class B():
def __init__(self, a: A):
self.a = a
class C():
def __init__(self, b: B):
self.b = b
c = C(B(A(42)))
dumped = jsons.dump(c, verbose=jsons.Verbosity.WITH_CLASS_INFO)
expectation = {'classes': {'/': '{}.C'.format(__name__), '/b': '{}.B'.format(__name__), '/b/a': '{}.A'.format(__name__)}}
self.assertDictEqual(expectation, dumped['-meta'])
dumped2 = jsons.dump(c, verbose=jsons.Verbosity.WITH_NOTHING)
self.assertDictEqual({'b': {'a': {'x': 42}}}, dumped2)
dumped3 = jsons.dump(c, verbose=jsons.Verbosity.WITH_DUMP_TIME)
self.assertTrue(('dump_time' in dumped3['-meta']))
self.assertTrue(('classes' not in dumped3['-meta']))
dumped4 = jsons.dump(c, verbose=jsons.Verbosity.WITH_EVERYTHING)
self.assertTrue(('dump_time' in dumped4['-meta']))
self.assertTrue(('classes' in dumped4['-meta']))
def test_dump_object_verbose_with_dict(self):
class C():
def __init__(self, d: Dict[(int, float)]):
self.d = d
c = C({42: 42.0})
expectation = {'classes': {'/': '{}.C'.format(__name__), '/d': 'typing.Dict[int, float]'}}
dumped = jsons.dump(c, verbose=jsons.Verbosity.WITH_CLASS_INFO)
self.assertDictEqual(expectation, dumped['-meta'])
loaded = jsons.load(dumped)
self.assertDictEqual({42: 42.0}, loaded.d)
def test_dump_object_strip_properties(self):
obj = AllDumpable(AllDumpable())
exp = {'_par_c': 10, 'par_v': None, 'c': 1, '_c': 2, 'c_n': None, '_c_n': None, 'child': None, 'v': 3, '_v': 4, 'v_n': None, '_v_n': None}
exp['child'] = exp.copy()
dump = jsons.dump(obj, strip_properties=True)
self.assertDictEqual(exp, dump)
def test_dump_object_strip_nulls(self):
obj = AllDumpable(AllDumpable())
exp = {'_par_c': 10, 'par_p': 12, 'c': 1, '_c': 2, 'child': None, 'v': 3, '_v': 4, 'p': 5, '_p': 5}
exp['child'] = exp.copy()
exp['child'].pop('child')
dump = jsons.dump(obj, strip_nulls=True)
self.assertDictEqual(exp, dump)
def test_dump_object_strip_privates(self):
obj = AllDumpable(AllDumpable())
exp = {'par_v': None, 'par_p': 12, 'c': 1, 'c_n': None, 'child': None, 'v': 3, 'v_n': None, 'p': 5, 'p_n': None}
exp['child'] = exp.copy()
dump = jsons.dump(obj, strip_privates=True)
self.assertDictEqual(exp, dump)
def test_dump_object_strip_class_variables(self):
obj = AllDumpable(AllDumpable())
exp = {'par_v': None, 'par_p': 12, 'child': None, 'v': 3, '_v': 4, 'v_n': None, '_v_n': None, 'p': 5, '_p': 5, 'p_n': None, '_p_n': None}
exp['child'] = exp.copy()
dump = jsons.dump(obj, strip_class_variables=True)
self.assertDictEqual(exp, dump)
def test_dump_object_strip_attr(self):
obj = AllDumpable(AllDumpable())
dump1 = jsons.dump(obj, strip_attr='v')
dump2 = jsons.dump(obj, strip_attr=('v', '_v'))
exp1 = {'_par_c': 10, 'par_v': None, 'par_p': 12, 'c': 1, '_c': 2, 'c_n': None, '_c_n': None, 'child': None, '_v': 4, 'v_n': None, '_v_n': None, 'p': 5, '_p': 5, 'p_n': None, '_p_n': None}
exp1['child'] = exp1.copy()
exp2 = {'_par_c': 10, 'par_v': None, 'par_p': 12, 'c': 1, '_c': 2, 'c_n': None, '_c_n': None, 'child': None, 'v_n': None, '_v_n': None, 'p': 5, '_p': 5, 'p_n': None, '_p_n': None}
exp2['child'] = exp2.copy()
self.assertDictEqual(exp1, dump1)
self.assertDictEqual(exp2, dump2)
def test_dump_abc_class(self):
class A(ABC):
pass
class B(A):
def __init__(self, x: int):
self.x = x
dumped = jsons.dump(B(42))
self.assertDictEqual({'x': 42}, dumped)
def test_dump_with_slots(self):
class C():
__slots__ = ('x', 'y')
def __init__(self, x):
self.x = x
self.y = 'This is no parameter'
c = C('something')
dumped = jsons.dump(c)
self.assertDictEqual(dumped, {'x': 'something', 'y': 'This is no parameter'})
def test_dump_as_parent_type(self):
class Parent():
__slots__ = ['parent_name']
def __init__(self, pname):
self.parent_name = pname
class Child(Parent):
def __init__(self, cname, pname):
Parent.__init__(self, pname)
self.child_name = cname
c = Child('John', 'William')
dumped1 = jsons.dump(c)
dumped2 = jsons.dump(c, cls=Parent, strict=True)
self.assertDictEqual({'child_name': 'John', 'parent_name': 'William'}, dumped1)
self.assertDictEqual({'parent_name': 'William'}, dumped2)
def test_dump_with_error(self):
class C():
def x(self):
raise KeyError('Some bug this is!')
with self.assertRaises(SerializationError):
jsons.dump(C())
def test_load_too_many_args(self):
class C():
def __init__(self, x: int):
self.x = x
with self.assertRaises(SignatureMismatchError):
jsons.load({'x': 1, 'y': 2}, C, strict=True)
try:
jsons.load({'x': 1, 'y': 2}, C, strict=True)
except SignatureMismatchError as err:
self.assertEqual(err.argument, 'y')
self.assertEqual(err.target, C)
self.assertDictEqual(err.source, {'x': 1, 'y': 2})
def test_load_object(self):
class A():
def __init__(self):
self.name = 'A'
class B():
def __init__(self, a: A):
self.a = a
self.name = 'B'
b = B(A())
loaded_b = jsons.load({'name': 'B', 'a': {'name': 'A'}}, B)
self.assertEqual(b.name, loaded_b.name)
self.assertEqual(b.a.name, loaded_b.a.name)
def test_load_object_verbose(self):
class BarBase():
pass
class BarA(BarBase):
def __init__(self, a: int):
self.a = a
class BarB(BarBase):
def __init__(self, b: int):
self.b = b
class Foo(BarBase):
def __init__(self, bar: BarBase):
self.bar = bar
jsons.announce_class(Foo)
jsons.announce_class(BarA)
jsons.announce_class(BarB)
jsons.announce_class(BarBase)
foo = Foo(bar=BarA(a=5))
dumped = jsons.dump(foo, verbose=True)
loaded = jsons.load(dumped)
self.assertTrue(isinstance(loaded, Foo))
self.assertTrue(isinstance(loaded.bar, BarA))
def test_load_other_attributes(self):
class C():
__annotations__ = {'y': float}
def __init__(self, x: int):
self.x = x
self.y = 0
self.z = 0
loaded = jsons.load({'x': '42', 'y': '42', 'z': '42'}, C)
self.assertEqual(42, loaded.x)
self.assertEqual(42.0, loaded.y)
self.assertEqual('42', loaded.z)
def test_load_object_without_type_hints_verbose(self):
class A():
def __init__(self, x):
self.x = x
class B():
def __init__(self, a: A):
self.a = a
class C():
def __init__(self, b: B):
self.b = b
dumped1 = {'b': {'a': {'x': 42}}, '-meta': {'classes': {'/': 'jsons.C', '/b': 'jsons.B', '/b/a': 'jsons.A'}}}
jsons.A = A
jsons.B = B
jsons.C = C
loaded = jsons.load(dumped1)
del jsons.A
del jsons.B
del jsons.C
self.assertEqual(42, loaded.b.a.x)
dumped2 = {'x': 100, '-meta': {'classes': {'/': 'custom_class'}}}
with self.assertRaises(UnknownClassError):
jsons.load(dumped2)
try:
jsons.load(dumped2)
except UnknownClassError as err:
self.assertEqual('custom_class', err.target_name)
jsons.announce_class(A, 'custom_class')
loaded2 = jsons.load(dumped2)
self.assertEqual(100, loaded2.x)
def test_dump_load_object_verbose_without_announcing(self):
class A():
def __init__(self, x):
self.x = x
class B():
def __init__(self, a: A):
self.a = a
class C():
def __init__(self, b: B):
self.b = b
c = C(B(A(42)))
dumped = jsons.dump(c, verbose=True)
loaded = jsons.load(dumped)
self.assertEqual(42, loaded.b.a.x)
def test_load_object_with_attr_getters(self):
class A():
def __init__(self, x, y):
self.x = x
self.y = y
class B():
def __init__(self, x):
self.x = x
a = A(1, 2)
loaded_a = jsons.load({'x': 1}, A, attr_getters={'y': (lambda : 2)})
self.assertEqual(a.x, loaded_a.x)
self.assertEqual(a.y, loaded_a.y)
b = B(1)
loaded_b = jsons.load({'x': 1}, B, attr_getters={'y': (lambda : 2)})
self.assertEqual(b.x, loaded_b.x)
self.assertEqual(2, loaded_b.y)
def test_load_object_with_default_value(self):
class A():
def __init__(self, x, y=2):
self.x = x
self.y = y
a = A(1)
loaded_a = jsons.load({'x': 1}, A)
self.assertEqual(a.x, loaded_a.x)
self.assertEqual(a.y, loaded_a.y)
def test_dump_load_object_deep(self):
class A():
def __init__(self):
self.name = 'A'
class B():
def __init__(self, list_a: List[A], list_dates: List[datetime.datetime]):
self.list_a = list_a
self.list_dates = list_dates
self.name = 'B'
class C():
def __init__(self, list_b: List[B]):
self.list_b = list_b
c = C([B([A(), A()], []), B([], [datetime.datetime.now(), datetime.datetime.now()])])
dumped_c = jsons.dump(c)
loaded_c = jsons.load(dumped_c, C)
self.assertEqual(loaded_c.list_b[0].name, 'B')
self.assertEqual(loaded_c.list_b[0].list_a[0].name, 'A')
self.assertEqual(loaded_c.list_b[0].list_a[1].name, 'A')
self.assertEqual(loaded_c.list_b[1].list_dates[0].year, c.list_b[1].list_dates[0].year)
self.assertEqual(loaded_c.list_b[1].list_dates[0].month, c.list_b[1].list_dates[0].month)
self.assertEqual(loaded_c.list_b[1].list_dates[0].day, c.list_b[1].list_dates[0].day)
self.assertEqual(loaded_c.list_b[1].list_dates[0].hour, c.list_b[1].list_dates[0].hour)
self.assertEqual(loaded_c.list_b[1].list_dates[0].minute, c.list_b[1].list_dates[0].minute)
self.assertEqual(loaded_c.list_b[1].list_dates[0].second, c.list_b[1].list_dates[0].second)
def test_dump_load_object_verbose(self):
h = StateHolder()
dumped = jsons.dump(h, verbose=True)
loaded = jsons.load(dumped)
self.assertEqual(type(h), type(loaded))
def test_load_object_properties(self):
class WithoutSetter():
def x(self):
return 123
class WithSetter():
def __init__(self):
self.__x = 123
def x(self):
return self.__x
def x(self, x):
self.__x = x
loaded1 = jsons.load({'x': 123}, WithoutSetter)
self.assertEqual(loaded1.x, 123)
loaded2 = jsons.load({'x': 456}, WithSetter)
self.assertEqual(loaded2.x, 456)
def test_load_slots(self):
class ClassWithSlots():
__slots__ = ('x', 'y')
def __init__(self, x):
self.x = x
self.y = 'This is not a parameter'
class ClassWithoutSlots():
def __init__(self, x):
self.x = x
self.y = 'This is not a parameter'
raw = {'x': 'something', 'y': 'something else', 'z': 'uh oh...'}
loaded_with_slots = jsons.load(raw, cls=ClassWithSlots)
loaded_without_slots = jsons.load(raw, cls=ClassWithoutSlots)
self.assertTrue(hasattr(loaded_with_slots, 'x'))
self.assertTrue(hasattr(loaded_with_slots, 'y'))
self.assertTrue((not hasattr(loaded_with_slots, 'z')))
self.assertTrue(hasattr(loaded_without_slots, 'x'))
self.assertTrue(hasattr(loaded_without_slots, 'y'))
self.assertTrue(hasattr(loaded_without_slots, 'z'))
def test_dump_with_attr_fail(self):
class FailingClass():
def i_will_fail(self):
raise KeyError('Told you so')
class C():
def __init__(self, x: int, y: FailingClass):
self.x = x
self.y = y
c = C(42, FailingClass())
with warnings.catch_warnings(record=True) as w:
dumped1 = jsons.dump(c)
self.assertTrue(('y' not in dumped1))
warn_msg = w[0].message.args[0]
self.assertTrue(('y' in warn_msg))
self.assertTrue(('Told you so' in warn_msg))
with self.assertRaises(SerializationError):
jsons.dump(c, strict=True)
try:
jsons.dump(c, strict=True)
except SerializationError as err:
self.assertTrue(('y' in err.message))
self.assertTrue(('Told you so' in err.message))
def test_dump_object_with_str_hint(self):
class C():
def __init__(self, x: 'str'):
self.x = x
dumped = jsons.dump(C('test'), cls=C)
self.assertDictEqual({'x': 'test'}, dumped)
def test_dump_and_load_with_innerclass(self):
class Outer():
class Inner():
class InnerInner(Enum):
A = 1
B = 2
C = 3
def __init__(self, inner_inner: InnerInner):
self.inner_inner = inner_inner
attr1 = Inner
def __init__(self, inner: Inner):
self.inner = inner
outer = Outer(Outer.Inner(Outer.Inner.InnerInner.B))
dumped = jsons.dump(outer, strict=True)
self.assertEqual('B', dumped['inner']['inner_inner'])
loaded = jsons.load(dumped, Outer)
self.assertEqual(Outer.Inner.InnerInner.B, loaded.inner.inner_inner)
self.assertEqual(Outer.Inner, loaded.attr1)
def test_dump_nested_object_roundtrip(self):
class A():
def __init__(self, inner):
self.inner = inner
class B():
pass
obj = A(A(B()))
obj_roundtrip = jsons.load(jsons.dump(obj, verbose=True))
self.assertTrue(isinstance(obj_roundtrip, A))
self.assertTrue(isinstance(obj_roundtrip.inner, A))
self.assertTrue(isinstance(obj_roundtrip.inner.inner, B))
def test_load_object_with_hashed_keys(self):
class C():
...
with warnings.catch_warnings(record=True) as w:
jsons.load({'additional_attr': {'-keys': {}}}, C)
self.assertEqual(1, len(w))
self.assertIn('additional_attr', str(w[(- 1)].message)) |
class BertCrfForSequenceLabeling(BertPreTrainedModel):
def __init__(self, config):
super(BertCrfForSequenceLabeling, self).__init__(config)
self.bert = BertModel(config)
if self.config.use_freezing:
self.bert = freezer.freeze_lm(self.bert)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.crf = CRF(num_tags=config.num_labels, batch_first=True)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None, return_dict=False):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,)
if (labels is not None):
loss = self.crf(emissions=logits, tags=labels, mask=attention_mask)
outputs = ((((- 1) * loss),) + outputs)
if (not return_dict):
return outputs
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
class Logger(object):
def __init__(self, file_name: str=None, file_mode: str='w', should_flush: bool=True):
self.file = None
if (file_name is not None):
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> 'Logger':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
if (len(text) == 0):
return
if (self.file is not None):
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
if (self.file is not None):
self.file.flush()
self.stdout.flush()
def close(self) -> None:
self.flush()
if (sys.stdout is self):
sys.stdout = self.stdout
if (sys.stderr is self):
sys.stderr = self.stderr
if (self.file is not None):
self.file.close() |
class File():
def __init__(self, pathspec, *, format=None, optional=False):
if (format is None):
raise TypeError('Must provide a format.')
self.pathspec = pathspec
self.format = format
self.optional = optional
def __get__(self, obj, cls=None):
if (obj is None):
return self
return BoundFile(self, obj) |
class Test_ab13bd():
A = np.array([[0.0, 1.0], [(- 0.5), (- 0.1)]])
B = np.array([[0.0], [1.0]])
C = np.eye(2)
D = np.zeros((2, 1))
(Ad, Bd, Cd, Dd, dt) = signal.cont2discrete((A, B, C, D), 0.1, method='zoh')
def test_no_change_args_ccase(self):
acopy = self.A.copy()
bcopy = self.B.copy()
ccopy = self.C.copy()
dcopy = self.D.copy()
dico = 'C'
jobn = 'H'
(n, m) = self.B.shape
p = self.C.shape[0]
analysis.ab13bd(dico, jobn, n, m, p, self.A, self.B, self.C, self.D)
assert_array_equal(self.A, acopy)
assert_array_equal(self.B, bcopy)
assert_array_equal(self.C, ccopy)
assert_array_equal(self.D, dcopy)
def test_no_change_args_dcase(self):
acopy = self.Ad.copy()
bcopy = self.Bd.copy()
ccopy = self.Cd.copy()
dcopy = self.Dd.copy()
dico = 'D'
jobn = 'H'
(n, m) = self.Bd.shape
p = self.Cd.shape[0]
analysis.ab13bd(dico, jobn, n, m, p, self.Ad, self.Bd, self.Cd, self.Dd)
assert_array_equal(self.Ad, acopy)
assert_array_equal(self.Bd, bcopy)
assert_array_equal(self.Cd, ccopy)
assert_array_equal(self.Dd, dcopy)
def test_ab13bd_2norm_ccase(self):
A = self.A
B = self.B
C = self.C
D = self.D
(n, m) = self.B.shape
p = self.C.shape[0]
dico = 'C'
jobn = 'H'
h2norm = analysis.ab13bd(dico, jobn, n, m, p, A, B, C, D)
Lc = linalg.solve_continuous_lyapunov(A, ((- B) B.T))
h2norm_Lc = np.sqrt(np.trace(((C Lc) C.T)))
print(h2norm_Lc, h2norm)
assert_allclose(h2norm_Lc, h2norm, atol=1e-05)
Lo = linalg.solve_continuous_lyapunov(A.T, ((- C.T) C))
h2norm_Lo = np.sqrt(np.trace(((B.T Lo) B)))
print(h2norm_Lo, h2norm)
assert_allclose(h2norm_Lo, h2norm, atol=1e-05)
def test_ab13bd_2norm_dcase(self):
Ad = self.Ad
Bd = self.Bd
Cd = self.Cd
Dd = self.Dd
(n, m) = Bd.shape
p = Cd.shape[0]
dico = 'D'
jobn = 'H'
h2norm = analysis.ab13bd(dico, jobn, n, m, p, Ad, Bd, Cd, Dd)
Lc = linalg.solve_discrete_lyapunov(Ad, (Bd Bd.T))
h2norm_Lc = np.sqrt(np.trace((((Cd Lc) Cd.T) + (Dd Dd.T))))
print(h2norm, h2norm_Lc)
assert_allclose(h2norm_Lc, h2norm, atol=1e-05)
Lo = linalg.solve_discrete_lyapunov(Ad.T, (Cd.T Cd))
h2norm_Lo = np.sqrt(np.trace((((Bd.T Lo) Bd) + (Dd.T Dd))))
print(h2norm, h2norm_Lo)
assert_allclose(h2norm_Lo, h2norm, atol=1e-05) |
class LuksFileSystem(LoopbackFileSystemMixin, FileSystem):
type = 'luks'
guids = ['CA7D7CCB-63ED-4C53-861C-CC']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.luks_name = None
def detect(cls, source, description):
res = super().detect(source, description)
if (description == 'LUKS Volume'):
res.update({cls: 0})
return res
(dependencies.cryptsetup)
def mount(self):
self._find_loopback()
try:
_util.check_call_(['cryptsetup', 'isLuks', self.loopback], stderr=subprocess.STDOUT)
except Exception:
logger.warning('Not a LUKS volume')
self._free_loopback()
raise IncorrectFilesystemError()
try:
extra_args = []
key = None
if self.volume.key:
(t, v) = self.volume.key.split(':', 1)
if (t == 'p'):
key = v
elif (t == 'f'):
extra_args = ['--key-file', v]
elif (t == 'm'):
extra_args = ['--master-key-file', v]
else:
logger.warning('No key material provided for %s', self.volume)
except ValueError:
logger.exception('Invalid key material provided (%s) for %s. Expecting [arg]:[value]', self.volume.key, self.volume)
self._free_loopback()
raise ArgumentError()
self.luks_name = ('image_mounter_luks_' + str(random.randint(10000, 99999)))
try:
cmd = ['cryptsetup', 'luksOpen', self.loopback, self.luks_name]
cmd.extend(extra_args)
if (not self.volume.disk.read_write):
cmd.insert(1, '-r')
if (key is not None):
logger.debug('$ {0}'.format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate(key.encode('utf-8'))
p.wait()
retcode = p.poll()
if retcode:
raise KeyInvalidError()
else:
_util.check_call_(cmd)
except ImageMounterError:
self.luks_name = None
self._free_loopback()
raise
except Exception as e:
self.luks_name = None
self._free_loopback()
raise SubsystemError(e)
size = None
try:
result = _util.check_output_(['cryptsetup', 'status', self.luks_name])
for line in result.splitlines():
if (('size:' in line) and ('key' not in line)):
size = (int(line.replace('size:', '').replace('sectors', '').strip()) * self.volume.disk.block_size)
except Exception:
pass
container = self.volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=size)
container.info['fsdescription'] = 'LUKS Volume'
container._real_path = ('/dev/mapper/' + self.luks_name)
return container
def unmount(self, allow_lazy=False):
if (self.luks_name is not None):
_util.check_call_(['cryptsetup', 'luksClose', self.luks_name], wrap_error=True, stdout=subprocess.PIPE)
self.luks_name = None
super().unmount(allow_lazy=allow_lazy) |
def test_error_message_with_parametrized_fixtures(pytester: Pytester) -> None:
pytester.copy_example('unittest/test_parametrized_fixture_error_message.py')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*test_two does not support fixtures*', '*TestSomethingElse::test_two', '*Function type: TestCaseFunction']) |
class TFMobileViTInvertedResidual(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int=1, **kwargs) -> None:
super().__init__(**kwargs)
expanded_channels = make_divisible(int(round((in_channels * config.expand_ratio))), 8)
if (stride not in [1, 2]):
raise ValueError(f'Invalid stride {stride}.')
self.use_residual = ((stride == 1) and (in_channels == out_channels))
self.expand_1x1 = TFMobileViTConvLayer(config, out_channels=expanded_channels, kernel_size=1, name='expand_1x1')
self.conv_3x3 = TFMobileViTConvLayer(config, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, name='conv_3x3')
self.reduce_1x1 = TFMobileViTConvLayer(config, out_channels=out_channels, kernel_size=1, use_activation=False, name='reduce_1x1')
def call(self, features: tf.Tensor, training: bool=False) -> tf.Tensor:
residual = features
features = self.expand_1x1(features, training=training)
features = self.conv_3x3(features, training=training)
features = self.reduce_1x1(features, training=training)
return ((residual + features) if self.use_residual else features) |
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target, p_estimate):
assert (p_target.shape == p_estimate.shape)
cdf_target = torch.cumsum(p_target, dim=1)
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = (cdf_estimate - cdf_target)
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff), 2)))
return samplewise_emd.mean() |
class SymmetricButlerVolmer(BaseKinetics):
def __init__(self, param, domain, reaction, options, phase='primary'):
super().__init__(param, domain, reaction, options, phase)
def _get_kinetics(self, j0, ne, eta_r, T, u):
Feta_RT = ((self.param.F * eta_r) / (self.param.R * T))
return (((2 * u) * j0) * pybamm.sinh(((ne * 0.5) * Feta_RT))) |
.parametrize('dtype', ['f2', 'f4', 'f8'])
def test_read_bgen__gp_dtype(shared_datadir, dtype):
path = (shared_datadir / 'example.bgen')
ds = read_bgen(path, gp_dtype=dtype)
dtype = np.dtype(dtype)
assert (ds['call_genotype_probability'].dtype == dtype)
assert (ds['call_dosage'].dtype == dtype) |
class TargetExtractor():
def __init__(self, delimiter='', targets_string=None, targets_file=None, exclude_private_ips=False, sort_targets=False, exclude_cdn_ip_networks=False, retrieve_new_cdn_ip_data=False, write_to_disk=False):
self.delimiter = delimiter
self.targets_string = str(targets_string).strip()
self.targets_file = targets_file
self.exclude_private_ips = exclude_private_ips
self.sort_targets = sort_targets
self.exclude_cdn_ip_networks = exclude_cdn_ip_networks
self.retrieve_new_cdn_ip_data = retrieve_new_cdn_ip_data
self.write_to_disk = write_to_disk
if self.exclude_cdn_ip_networks:
self.cdn_ip_networks = retrieve_cdn_ip_networks(self.retrieve_new_cdn_ip_data)
if self.targets_file:
with open(self.targets_file, 'r') as fh:
self.targets_string = fh.read().strip()
self.targets_dict = self.extract_targets(self.targets_string)
def update_disallowed_target(self, targets_dict, target):
targets_dict['disallowed_targets'].add(target)
def extract_targets(self, targets_string):
targets_dict = {'ipv4_addresses': {'as_list': set(), 'as_csv': '', 'as_nmap': '', 'total': 0}, 'ipv4_networks': {'as_list': set(), 'as_csv': '', 'as_nmap': '', 'total': 0}, 'ipv6_addresses': {'as_list': set(), 'as_csv': '', 'as_nmap': '', 'total': 0}, 'ipv6_networks': {'as_list': set(), 'as_csv': '', 'as_nmap': '', 'total': 0}, 'domains': {'as_list': set(), 'as_csv': '', 'as_nmap': '', 'total': 0}, 'invalid_targets': set(), 'invalid_targets_total': 0, 'disallowed_targets': set(), 'disallowed_targets_total': 0, 'as_list': [], 'as_csv': '', 'as_nmap': '', 'total': 0}
if self.delimiter:
print(f'Using delimiter: "{self.delimiter}"')
target_list = targets_string.split(self.delimiter)
else:
target_list = targets_string.split()
for target in target_list:
if is_ip_address(target):
ip_address = ipaddress.ip_address(target)
if ip_address.is_multicast:
print(f'IP address is a multicast IP: {ip_address}')
self.update_disallowed_target(targets_dict, ip_address)
continue
elif ip_address.is_loopback:
print(f'IP address is a loopback IP: {ip_address}')
self.update_disallowed_target(targets_dict, ip_address)
continue
elif ip_address.is_link_local:
print(f'IP address is a link local IP: {ip_address}')
self.update_disallowed_target(targets_dict, ip_address)
continue
if (ip_address.is_private and self.exclude_private_ips):
print(f'IP address is private IP: {ip_address}')
self.update_disallowed_target(targets_dict, ip_address)
continue
if ((not ip_address.is_global) and self.exclude_private_ips):
print(f'IP address is not a public IP: {ip_address}')
self.update_disallowed_target(targets_dict, ip_address)
continue
if self.exclude_cdn_ip_networks:
cdn_ip_found = False
for cdn_ip_network in self.cdn_ip_networks:
if (ip_address in cdn_ip_network):
print(f'IP address {ip_address} is in CDN network: {cdn_ip_network}')
self.update_disallowed_target(targets_dict, ip_address)
cdn_ip_found = True
break
if cdn_ip_found:
continue
if is_ipv4_address(ip_address):
targets_dict['ipv4_addresses']['as_list'].add(ip_address)
elif is_ipv6_address(ip_address):
targets_dict['ipv6_addresses']['as_list'].add(ip_address)
else:
print(f'Unknown IP address type: {ip_address}')
elif is_ip_network(target):
ip_network = ipaddress.ip_network(target, strict=False)
if (ip_network.is_private and self.exclude_private_ips):
print(f'IP network is private: {target}')
self.update_disallowed_target(targets_dict, ip_network)
continue
if (type(ip_network) == ipaddress.IPv4Network):
targets_dict['ipv4_networks']['as_list'].add(target)
else:
targets_dict['ipv6_networks']['as_list'].add(target)
elif (is_valid_fqdn(target) and domain_has_valid_fqdn(target)):
targets_dict['domains']['as_list'].add(target.strip('.'))
else:
targets_dict['invalid_targets'].add(target)
print(('=' * 10))
for target_type in ['ipv4_addresses', 'ipv4_networks', 'ipv6_addresses', 'ipv6_networks', 'domains']:
temp_list_of_objects = targets_dict[target_type]['as_list']
if self.sort_targets:
try:
temp_list_of_objects = sorted(temp_list_of_objects)
except Exception as e:
print(f"Exception sorting targets in '{target_type}': {e}")
temp_list_of_strings = [str(obj) for obj in temp_list_of_objects]
targets_dict[target_type]['as_list'] = temp_list_of_strings
targets_dict[target_type]['as_csv'] = ','.join(temp_list_of_strings)
targets_dict[target_type]['as_nmap'] = ' '.join(temp_list_of_strings)
if (target_type in ['ipv4_networks', 'ipv6_networks']):
for ip_network in temp_list_of_objects:
ip_network = ipaddress.ip_network(ip_network, strict=False)
if (type(ip_network) == ipaddress.IPv4Network):
targets_in_ip_subnet = ip_network.num_addresses
else:
targets_in_ip_subnet = ipaddress.IPv6Network(ip_network).num_addresses
targets_dict[target_type]['total'] += targets_in_ip_subnet
targets_dict['total'] += targets_in_ip_subnet
else:
targets_dict[target_type]['total'] = len(temp_list_of_strings)
targets_dict['total'] += len(temp_list_of_strings)
targets_dict['as_list'].extend(temp_list_of_strings)
targets_dict['as_csv'] = ','.join(targets_dict['as_list'])
targets_dict['as_nmap'] = ' '.join(targets_dict['as_list'])
targets_dict['invalid_targets'] = list(targets_dict['invalid_targets'])
targets_dict['invalid_targets_total'] = len(targets_dict['invalid_targets'])
targets_dict['invalid_targets'] = [str(obj) for obj in targets_dict['invalid_targets']]
targets_dict['disallowed_targets'] = list(targets_dict['disallowed_targets'])
targets_dict['disallowed_targets_total'] = len(targets_dict['disallowed_targets'])
targets_dict['disallowed_targets'] = [str(obj) for obj in targets_dict['disallowed_targets']]
if self.sort_targets:
for target_type in ['invalid_targets', 'disallowed_targets']:
try:
targets_dict[target_type].sort()
except Exception as e:
print(f"Exception sorting targets in '{target_type}': {e}")
if self.write_to_disk:
print('Writing targets_dict to disk')
with open('targets_dict.json', 'w') as fh:
fh.write(json.dumps(targets_dict, indent=4))
return targets_dict |
def test_logq_mini_2_sample_2_var(parametric_grouped_approxes, three_var_model):
(cls, kw) = parametric_grouped_approxes
approx = cls([three_var_model.one, three_var_model.two], model=three_var_model, **kw)
logq = approx.logq
logq = approx.set_size_and_deterministic(logq, 2, 0)
logq.eval() |
_module()
class ConvFCBBoxHead(BBoxHead):
def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, *args, **kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0)
if ((num_cls_convs > 0) or (num_reg_convs > 0)):
assert (num_shared_fcs == 0)
if (not self.with_cls):
assert ((num_cls_convs == 0) and (num_cls_fcs == 0))
if (not self.with_reg):
assert ((num_reg_convs == 0) and (num_reg_fcs == 0))
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
(self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
(self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
(self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)):
if (self.num_cls_fcs == 0):
self.cls_last_dim *= self.roi_feat_area
if (self.num_reg_fcs == 0):
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, (self.num_classes + 1))
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes))
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = (self.fc_cls(x_cls) if self.with_cls else None)
bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None)
return (cls_score, bbox_pred) |
def test_dataframes():
pd = pytest.importorskip('pandas')
from streamz.dataframe import DataFrame
data = [{'x': i, 'y': (2 * i)} for i in range(10)]
s = Batch(example=[{'x': 0, 'y': 0}])
sdf = s.map((lambda d: toolz.assoc(d, 'z', (d['x'] + d['y'])))).to_dataframe()
assert isinstance(sdf, DataFrame)
L = sdf.stream.sink_to_list()
for batch in toolz.partition_all(3, data):
s.emit(batch)
result = pd.concat(L)
assert (result.z.tolist() == [(3 * i) for i in range(10)]) |
def test_pype_args_with_out(mock_pipe):
context = Context({'parentkey': 'parentvalue', 'pype': {'name': 'pipe name', 'args': {'a': 'b'}, 'out': 'a'}})
context.parent = 'arb/dir'
with patch_logger('pypyr.steps.pype', logging.INFO) as mock_logger_info:
with get_arb_pipeline_scope(context):
pype.run_step(context)
mock_pipe.assert_called_once_with(name='pipe name', context_args=None, parse_input=False, loader=None, groups=None, success_group=None, failure_group=None, py_dir=None)
mocked_runner = mock_pipe.return_value.load_and_run_pipeline
mocked_runner.assert_called_once_with({'a': 'b'}, None)
assert (mock_logger_info.mock_calls == [call('pyping pipe name, without parent context.'), call('pyped pipe name.')])
assert (context == {'parentkey': 'parentvalue', 'a': 'b', 'pype': {'name': 'pipe name', 'args': {'a': 'b'}, 'out': 'a'}}) |
def kafka_service():
TOPIC = ('test-%i' % random.randint(0, 10000))
if (_kafka[0] is None):
if LAUNCH_KAFKA:
launch_kafka()
else:
raise pytest.skip.Exception('Kafka not available. To launch kafka use `export STREAMZ_LAUNCH_KAFKA=true`')
producer = ck.Producer({'bootstrap.servers': 'localhost:9092', 'topic.metadata.refresh.interval.ms': '5000'})
producer.produce('test-start-kafka', b'test')
out = producer.flush(10)
if (out > 0):
raise RuntimeError('Timeout waiting for kafka')
_kafka[0] = producer
(yield (_kafka[0], TOPIC)) |
class BpfHeaderExtractor(FileExtractor):
filename = os.path.join(LINUX_ROOT, 'tools/include/uapi/linux/bpf.h')
def get_prog_types(self):
return self.get_enum('bpf_prog_type')
def get_map_types(self):
return self.get_enum('bpf_map_type')
def get_attach_types(self):
return self.get_enum('bpf_attach_type') |
def analyze_results(lm_results: Dict, patterns_graph) -> None:
total = 0
points = 0
total_syn = 0
total_lex = 0
total_both = 0
total_no = 0
points_syn = 0
points_lex = 0
points_both = 0
points_no = 0
points_by_edge = defaultdict(list)
edges_out = defaultdict(list)
avg_entropy = []
consistent_subjects = defaultdict(list)
correct_subjects_per_pattern = defaultdict(int)
correct_patterns_per_subject = defaultdict(int)
consistency_performance = defaultdict(list)
for (pattern, vals) in lm_results.items():
for (subj, (pred, gold_obj)) in vals.items():
graph_node = get_node(patterns_graph, pattern)
if (graph_node is None):
continue
if filter_a_an_vowel_mismatch(pattern, gold_obj):
continue
correct_patterns_per_subject[subj] += int((pred == gold_obj))
correct_subjects_per_pattern[pattern] += int((pred == gold_obj))
consistent_subjects[subj].append(pred)
base_pattern_success = []
for ent_node in patterns_graph.successors(graph_node):
if ([graph_node, ent_node] not in patterns_graph.edges):
continue
entailment_type = patterns_graph.edges[(graph_node, ent_node)]
ent_pattern = ent_node.lm_pattern
if filter_a_an_vowel_mismatch(ent_pattern, gold_obj):
continue
success = (pred == lm_results[ent_pattern][subj][0])
if success:
points += 1
total += 1
base_pattern_success.append(int(success))
consistency_performance[subj].append(success)
points_by_edge[((graph_node.lm_pattern + '_') + ent_node.lm_pattern)].append(int(success))
edges_out[graph_node.lm_pattern].append(int(success))
if (entailment_type['edge_type'].syntactic_change and (not entailment_type['edge_type'].lexical_change) and (not entailment_type['edge_type'].determiner_change)):
if success:
points_syn += 1
total_syn += 1
elif (entailment_type['edge_type'].lexical_change and (not entailment_type['edge_type'].syntactic_change) and (not entailment_type['edge_type'].determiner_change)):
if success:
points_lex += 1
total_lex += 1
elif (entailment_type['edge_type'].lexical_change and entailment_type['edge_type'].syntactic_change and (not entailment_type['edge_type'].determiner_change)):
if success:
points_both += 1
total_both += 1
if ((not entailment_type['edge_type'].syntactic_change) and (not entailment_type['edge_type'].lexical_change) and (not entailment_type['edge_type'].determiner_change)):
if success:
points_no += 1
total_no += 1
base_success = (sum(base_pattern_success) / len(base_pattern_success))
ent = entropy([base_success, (1.0 - base_success)], base=2)
avg_entropy.append(ent)
if (total > 0):
print('overall', points, total, (points / total))
wandb.run.summary['consistency'] = (points / total)
else:
wandb.run.summary['consistency'] = (- 1)
if (total_syn > 0):
wandb.run.summary['syntactic_consistency'] = (points_syn / total_syn)
print('syntactic', points_syn, total_syn, (points_syn / total_syn))
else:
wandb.run.summary['syntactic_consistency'] = (- 1)
if (total_lex > 0):
wandb.run.summary['lexical_consistency'] = (points_lex / total_lex)
print('lexical', points_lex, total_lex, (points_lex / total_lex))
else:
wandb.run.summary['lexical_consistency'] = (- 1)
if (total_no > 0):
wandb.run.summary['no_change_consistency'] = (points_no / total_no)
print('no change', points_no, total_no, (points_no / total_no))
else:
wandb.run.summary['no_change_consistency'] = (- 1)
if (total_both > 0):
print('both', points_both, total_both, (points_both / total_both))
wandb.run.summary['both_consistency'] = (points_both / total_both)
else:
wandb.run.summary['both_consistency'] = (- 1)
avg_out_normalized = []
out_edges_total = 0
for (k, vals) in points_by_edge.items():
eo = (sum(edges_out[k.split('_')[0]]) / len(edges_out[k.split('_')[0]]))
avg_out_normalized.append((eo * (sum(vals) / len(vals))))
out_edges_total += eo
wandb.run.summary['avg_consistency_by_edge_out'] = (sum(avg_out_normalized) / out_edges_total)
all_consistent = 0
for (subj, preds) in consistent_subjects.items():
preds_set = set(preds)
if (len(preds_set) == 1):
all_consistent += 1
wandb.run.summary['consistent_subjects'] = (all_consistent / len(consistent_subjects))
successful_subjects = 0
for (subj, success) in correct_patterns_per_subject.items():
if (success > 0):
successful_subjects += 1
wandb.run.summary['successful_subjects'] = (successful_subjects / len(correct_patterns_per_subject))
successful_patterns = 0
for (pattern, success) in correct_subjects_per_pattern.items():
if (success > 0):
successful_patterns += 1
wandb.run.summary['successful_patterns'] = (successful_patterns / len(correct_subjects_per_pattern))
(success_for_knowledgable_patterns, total_for_knowledgable_patterns) = (0, 0)
(success_for_unknowledgable_patterns, total_for_unknowledgable_patterns) = (0, 0)
for (subj, success) in consistency_performance.items():
if (correct_patterns_per_subject[subj] > 0):
success_for_knowledgable_patterns += sum(success)
total_for_knowledgable_patterns += len(success)
else:
success_for_unknowledgable_patterns += sum(success)
total_for_unknowledgable_patterns += len(success)
if (total_for_knowledgable_patterns > 0):
wandb.run.summary['knowledgable_consistency'] = (success_for_knowledgable_patterns / total_for_knowledgable_patterns)
else:
wandb.run.summary['knowledgable_consistency'] = 0
if (total_for_unknowledgable_patterns > 0):
wandb.run.summary['unknowledgable_consistency'] = (success_for_unknowledgable_patterns / total_for_unknowledgable_patterns)
else:
wandb.run.summary['unknowledgable_consistency'] = 0
wandb.run.summary['total'] = total
wandb.run.summary['total_syn'] = total_syn
wandb.run.summary['total_lex'] = total_lex
wandb.run.summary['total_both'] = total_both
wandb.run.summary['total_no'] = total_no
wandb.run.summary['avg_entropy'] = np.average(avg_entropy)
wandb.run.summary['std_entropy'] = np.std(avg_entropy) |
def _match_list(module_rule: Tuple[(List[str], str)], logger_name: str) -> Tuple[(int, Optional[str])]:
logger_modules_split = (logger_name.split('.') if logger_name else [])
modules_split: List[str] = module_rule[0]
level: str = module_rule[1]
if (logger_modules_split == modules_split):
return (sys.maxsize, level)
else:
num_modules = len(modules_split)
if (logger_modules_split[:num_modules] == modules_split):
return (num_modules, level)
else:
return (0, None) |
_manager.tracked
def index(request: WSGIRequest) -> HttpResponse:
from core import urls
context = base.context(request)
context['urls'] = urls.musiq_paths
context['additional_keywords'] = storage.get('additional_keywords')
context['forbidden_keywords'] = storage.get('forbidden_keywords')
context['client_streaming'] = (storage.get('output') == 'client')
context['show_stream'] = ((storage.get('output') in ['client', 'icecast']) and ((not storage.get('privileged_stream')) or context['controls_enabled']))
for platform in ['youtube', 'spotify', 'soundcloud', 'jamendo']:
if (storage.get('online_suggestions') and (not storage.get('new_music_only')) and storage.get(cast(PlatformEnabled, f'{platform}_enabled'))):
suggestion_count = storage.get(cast(PlatformSuggestions, f'{platform}_suggestions'))
else:
suggestion_count = 0
context[f'{platform}_suggestions'] = suggestion_count
return render(request, 'musiq.html', context) |
class ObjectModel(BaseModel):
states: int
emission: npt.NDArray
transition: npt.NDArray
start: npt.NDArray
name: str = 'Default'
('emission', 'transition', 'start', pre=True)
def parse_array(cls, v, values):
return np.asarray(v, dtype=float)
('emission', 'transition', 'start', pre=True)
def reshape_emission_transition(cls, v, values):
shape = (values['states'], values['states'])
return np.reshape(v, shape)
('emission', 'transition', 'start', pre=True)
def reshape_start(cls, v, values):
shape = (1, values['states'])
return np.reshape(v, shape)
class Config():
arbitrary_types_allowed = True
validate_assignment = True |
def test_window_by_interval__multiple_contigs():
ds = simulate_genotype_call_dataset(n_variant=10, n_sample=3, n_contig=2)
ds['variant_position'] = (['variants'], np.array([1, 4, 6, 8, 12, 1, 21, 25, 40, 55]))
assert (not has_windows(ds))
ds['interval_contig_name'] = (['intervals'], np.array(['0', '0', '1', '1']))
ds['interval_start'] = (['intervals'], np.array([1, 7, 2, 30]))
ds['interval_stop'] = (['intervals'], np.array([5, 10, 22, 50]))
ds = window_by_interval(ds)
assert has_windows(ds)
np.testing.assert_equal(ds[window_contig].values, [0, 0, 1, 1])
np.testing.assert_equal(ds[window_start].values, [0, 3, 6, 8])
np.testing.assert_equal(ds[window_stop].values, [2, 4, 7, 9]) |
class WindowWriteTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
.gdalbin
def test_write_window(self):
name = os.path.join(self.tempdir, 'test_write_window.tif')
a = (np.ones((50, 50), dtype=rasterio.ubyte) * 127)
with rasterio.open(name, 'w', driver='GTiff', width=100, height=100, count=1, dtype=a.dtype) as s:
s.write(a, indexes=1, window=windows.Window(10, 30, 50, 50))
info = subprocess.check_output(['gdalinfo', '-stats', name])
self.assertTrue(('Minimum=0.000, Maximum=127.000, Mean=31.750, StdDev=54.993' in info.decode('utf-8')), info) |
def create_datasets(args):
div2k = DIV2K(os.path.join(args.data_path, 'DIV2K/DIV2K_train_HR'), os.path.join(args.data_path, 'DIV2K/DIV2K_train_LR_bicubic'), os.path.join(args.data_path, 'div2k_cache'), train=True, augment=args.data_augment, scale=args.scale, colors=args.colors, patch_size=args.patch_size, repeat=args.data_repeat)
train_dataloader = DataLoader(dataset=div2k, num_workers=args.threads, batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True)
valid_dataloaders = []
if ('Set5' in args.eval_sets):
set5_hr_path = os.path.join(args.data_path, 'benchmark/Set5/HR')
set5_lr_path = os.path.join(args.data_path, 'benchmark/Set5/LR_bicubic')
set5 = Benchmark(set5_hr_path, set5_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'set5', 'dataloader': DataLoader(dataset=set5, batch_size=1, shuffle=False)}]
if ('Set14' in args.eval_sets):
set14_hr_path = os.path.join(args.data_path, 'benchmark/Set14/HR')
set14_lr_path = os.path.join(args.data_path, 'benchmark/Set14/LR_bicubic')
set14 = Benchmark(set14_hr_path, set14_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'set14', 'dataloader': DataLoader(dataset=set14, batch_size=1, shuffle=False)}]
if ('B100' in args.eval_sets):
b100_hr_path = os.path.join(args.data_path, 'benchmark/B100/HR')
b100_lr_path = os.path.join(args.data_path, 'benchmark/B100/LR_bicubic')
b100 = Benchmark(b100_hr_path, b100_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'b100', 'dataloader': DataLoader(dataset=b100, batch_size=1, shuffle=False)}]
if ('Urban100' in args.eval_sets):
u100_hr_path = os.path.join(args.data_path, 'benchmark/Urban100/HR')
u100_lr_path = os.path.join(args.data_path, 'benchmark/Urban100/LR_bicubic')
u100 = Benchmark(u100_hr_path, u100_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'u100', 'dataloader': DataLoader(dataset=u100, batch_size=1, shuffle=False)}]
if ('Manga109' in args.eval_sets):
manga_hr_path = os.path.join(args.data_path, 'benchmark/Manga109/HR')
manga_lr_path = os.path.join(args.data_path, 'benchmark/Manga109/LR_bicubic')
manga = Benchmark(manga_hr_path, manga_lr_path, scale=args.scale, colors=args.colors)
valid_dataloaders += [{'name': 'manga109', 'dataloader': DataLoader(dataset=manga, batch_size=1, shuffle=False)}]
if (len(valid_dataloaders) == 0):
print('select no dataset for evaluation!')
else:
selected = ''
for i in range(1, len(valid_dataloaders)):
selected += (', ' + valid_dataloaders[i]['name'])
print('select {} for evaluation! '.format(selected))
return (train_dataloader, valid_dataloaders) |
class TranslationRoutingTest(TranslationTestMixin, RapidTest):
apps = [app.TranslationApp]
def test_translation_override(self):
en_conn = self.create_lang_connection('', 'en')
es_conn = self.create_lang_connection('', 'es')
self.receive('lang-hello', en_conn)
self.receive('lang-hello', es_conn)
self.assertEqual(len(self.outbound), 2)
self.assertEqual(self.outbound[0].text, 'hello')
self.assertEqual(self.outbound[1].text, 'hola')
def test_broadcast(self):
self.create_lang_connection('', 'en')
self.create_lang_connection('', 'en')
self.create_lang_connection('', 'en')
self.create_lang_connection('', 'es')
self.create_lang_connection('', 'es')
app.lang_broadcast()
self.assertEqual(2, len(self.outbound))
for message in self.outbound:
if (message.text == 'hello'):
self.assertEqual(3, len(message.connections))
elif (message.text == 'hola'):
self.assertEqual(2, len(message.connections))
def test_contact_settings_langauge(self):
en_conn = self.create_lang_connection('', 'en')
with override_settings(LANGUAGE_CODE='es'):
self.receive('lang-hello', en_conn)
self.assertEqual(self.outbound[0].text, 'hello') |
class AttrVI_ATTR_PXI_DEST_TRIG_BUS(RangeAttribute):
resources = [(constants.InterfaceType.pxi, 'BACKPLANE')]
py_name = ''
visa_name = 'VI_ATTR_PXI_DEST_TRIG_BUS'
visa_type = 'ViInt16'
default = (- 1)
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (1, 3, [(- 1)]) |
class _NameAttributeMapping(MutableMapping):
def __init__(self, name: Name) -> None:
self._name = name
def __getitem__(self, key: t.Union[(bytes, str)]) -> tuples.GetNameAttributeResult:
if isinstance(key, str):
key = key.encode(_utils._get_encoding())
res = rname_rfc6680.get_name_attribute(self._name, key)
res = t.cast(tuples.GetNameAttributeResult, res)
return tuples.GetNameAttributeResult(list(res.values), list(res.display_values), res.authenticated, res.complete)
def __setitem__(self, key: t.Union[(bytes, str)], value: t.Union[(tuples.GetNameAttributeResult, t.Tuple[(bytes, bool)], bytes)]) -> None:
if isinstance(key, str):
key = key.encode(_utils._get_encoding())
rname_rfc6680.delete_name_attribute(self._name, key)
attr_value: t.List[bytes]
if isinstance(value, tuples.GetNameAttributeResult):
complete = value.complete
attr_value = value.values
elif (isinstance(value, tuple) and (len(value) == 2)):
complete = t.cast(bool, value[1])
attr_value = [t.cast(bytes, value[0])]
else:
complete = False
if (isinstance(value, (str, bytes)) or (not isinstance(value, Iterable))):
attr_value = [value]
rname_rfc6680.set_name_attribute(self._name, key, attr_value, complete=complete)
def __delitem__(self, key: t.Union[(bytes, str)]) -> None:
if isinstance(key, str):
key = key.encode(_utils._get_encoding())
rname_rfc6680.delete_name_attribute(self._name, key)
def __iter__(self) -> t.Iterator[bytes]:
return iter(self._name._inquire(attrs=True).attrs)
def __len__(self) -> int:
return len(self._name._inquire(attrs=True).attrs) |
def test_create_right_lane_split_second_lane():
lanedef = xodr.LaneDef(10, 20, 1, 2, 2)
lanes = xodr.create_lanes_merge_split([lanedef], 0, 30, xodr.std_roadmark_solid_solid(), 3, 3)
assert (len(lanes.lanesections) == 3)
assert (lanes.lanesections[0].s == 0)
assert (lanes.lanesections[1].s == 10)
assert (lanes.lanesections[2].s == 20)
assert (len(lanes.lanesections[0].leftlanes) == 0)
assert (len(lanes.lanesections[1].leftlanes) == 0)
assert (len(lanes.lanesections[2].leftlanes) == 0)
assert (len(lanes.lanesections[0].rightlanes) == 1)
assert (len(lanes.lanesections[1].rightlanes) == 2)
assert (len(lanes.lanesections[2].rightlanes) == 2)
assert (lanes.lanesections[0].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[0].rightlanes[0].widths[0].a == 3)
assert (lanes.lanesections[0].rightlanes[0].widths[0].c == 0)
assert (lanes.lanesections[1].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[1].rightlanes[0].widths[0].a == 3)
assert (lanes.lanesections[1].rightlanes[0].widths[0].c == 0)
assert (lanes.lanesections[1].rightlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[1].rightlanes[1].widths[0].a == 0)
assert (lanes.lanesections[1].rightlanes[1].widths[0].c != 0)
assert (lanes.lanesections[2].rightlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[2].rightlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[2].rightlanes[0].widths[0].a == 3)
assert (lanes.lanesections[2].rightlanes[0].widths[0].c == 0)
assert (lanes.lanesections[2].rightlanes[1].widths[0].a == 3)
assert (lanes.lanesections[2].rightlanes[1].widths[0].c == 0) |
def test_add_timer(bot):
global_bot = bot
timer1_calls = 0
timer2_calls = 0
timer3_calls = 0
def timer1():
nonlocal timer1_calls
timer1_calls += 1
def timer2():
nonlocal timer2_calls
timer2_calls += 1
def timer3(bot):
nonlocal timer3_calls
timer3_calls += 1
assert (bot == global_bot)
comp = botogram.Component('test')
comp.add_timer(1, timer1)
comp.add_timer(3, timer2)
comp.add_timer(30, timer3)
bot.use(comp)
now =
for i in range(10):
scheduled = bot.scheduled_tasks((now + i))
for job in scheduled:
job()
assert (timer1_calls == 10)
assert (timer2_calls == 4)
assert (timer3_calls == 1) |
class _ZipPkgWriter(PhysPkgWriter):
def __init__(self, pkg_file):
super(_ZipPkgWriter, self).__init__()
self._zipf = ZipFile(pkg_file, 'w', compression=ZIP_DEFLATED)
def close(self):
self._zipf.close()
def write(self, pack_uri, blob):
self._zipf.writestr(pack_uri.membername, blob) |
.filterwarnings('ignore:Constructing a DIA matrix')
class TestExpm(UnaryOpMixin):
def op_numpy(self, matrix):
return scipy.linalg.expm(matrix)
shapes = shapes_square()
bad_shapes = shapes_not_square()
specialisations = [pytest.param(data.expm_csr, CSR, CSR), pytest.param(data.expm_csr_dense, CSR, Dense), pytest.param(data.expm_dense, Dense, Dense), pytest.param(data.expm_dia, Dia, Dia)] |
def gen_property_setter_ir(builder: IRBuilder, func_decl: FuncDecl, cdef: ClassDef, is_trait: bool) -> FuncIR:
name = func_decl.name
builder.enter(name)
self_reg = builder.add_argument('self', func_decl.sig.args[0].type)
value_reg = builder.add_argument('value', func_decl.sig.args[1].type)
assert name.startswith(PROPSET_PREFIX)
attr_name = name[len(PROPSET_PREFIX):]
if (not is_trait):
builder.add(SetAttr(self_reg, attr_name, value_reg, (- 1)))
builder.add(Return(builder.none()))
(args, _, blocks, ret_type, fn_info) = builder.leave()
return FuncIR(func_decl, args, blocks) |
def merge_sim_episode_with_object_config(sim_config, episode):
sim_config = merge_sim_episode_config(sim_config, episode)
sim_config.defrost()
object_templates = {}
for template in episode.object_templates:
object_templates[template['object_key']] = template['object_template']
objects = []
for obj in episode.objects:
obj.object_template = object_templates[obj.object_key]
objects.append(obj)
sim_config.objects = objects
sim_config.freeze()
return sim_config |
class MlpWithDepthwiseConv(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, extra_relu=False):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.relu = (nn.ReLU() if extra_relu else nn.Identity())
self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x, feat_size: List[int]):
x = self.fc1(x)
(B, N, C) = x.shape
x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1])
x = self.relu(x)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x |
def _summary(self, to_stdout=True, return_df=False):
lengths = {}
total_lengths = {}
lengths['pyrange'] = self.lengths(as_dict=True)
total_lengths['pyrange'] = [self.length]
if self.stranded:
c = self.merge(strand=True)
lengths['coverage_forward'] = c['+'].lengths(as_dict=True)
lengths['coverage_reverse'] = c['-'].lengths(as_dict=True)
total_lengths['coverage_forward'] = [c['+'].length]
total_lengths['coverage_reverse'] = [c['-'].length]
else:
c = self
c = c.merge(strand=False)
lengths['coverage_unstranded'] = c.lengths(as_dict=True)
total_lengths['coverage_unstranded'] = [c.length]
summaries = OrderedDict()
for (summary, d) in lengths.items():
if d:
summaries[summary] = pd.concat(d.values()).describe()
summary = pd.concat(summaries.values(), axis=1)
summary.columns = list(summaries)
df = pd.DataFrame.from_dict(total_lengths)
df.index = ['sum']
summary = pd.concat([summary, df])
if to_stdout:
str_repr = tabulate(summary, headers=summary.columns, tablefmt='psql')
print(str_repr)
if return_df:
return summary |
class FDCapture(FDCaptureBinary):
EMPTY_BUFFER = ''
def snap(self) -> str:
self._assert_state('snap', ('started', 'suspended'))
self.tmpfile.seek(0)
res = self.tmpfile.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def writeorg(self, data: str) -> None:
super().writeorg(data.encode('utf-8')) |
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs) |
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
try:
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result |
class fashion200k():
def __init__(self, path, split='train'):
super()
self.split = split
self.path = path
label_path = 'datasets/fashion200k/labels/'
print('Processing {} set'.format(split))
label_files = glob.glob((((label_path + '*_') + split) + '_*.txt'))
label_files.sort()
def caption_post_process(s):
return s.strip().replace('.', 'dotmark').replace('?', 'questionmark').replace('&', 'andmark').replace('*', 'starmark')
self.imgs = []
self.filenames = []
self.texts = []
for label_file in label_files:
print(('read ' + label_file))
with io.open(label_file, 'r', encoding='utf8') as fd:
for line in fd.readlines():
line = line.split('\t')
img = {'file_path': line[0], 'captions': [caption_post_process(line[2])], 'modifiable': False}
self.filenames += [os.path.join(self.path, img['file_path'])]
self.texts += img['captions']
self.imgs += [img]
def get_all_texts(self, get_modifiable=False):
texts = []
if (get_modifiable is False):
for img in self.imgs:
for c in img['captions']:
texts.append(c)
else:
imgs_mod = [self.imgs[i] for i in range(len(self.imgs)) if self.imgs[i]['modifiable']]
for img in imgs_mod:
for c in img['captions']:
texts.append(c)
return texts
def get_different_word(self, source_caption, target_caption):
source_words = source_caption.split()
target_words = target_caption.split()
for source_word in source_words:
if (source_word not in target_words):
break
for target_word in target_words:
if (target_word not in source_words):
break
mod_str = ((('replace ' + source_word) + ' with ') + target_word)
return (source_word, target_word, mod_str)
def generate_test_queries_(self):
file2imgid = {}
for (i, img) in enumerate(self.imgs):
file2imgid[img['file_path']] = i
with open('datasets/fashion200k/test_queries.txt') as f:
lines = f.readlines()
self.test_queries = []
self.query_filenames = []
self.modify_texts = []
for line in lines:
(source_file, target_file) = line.split()
idx = file2imgid[source_file]
target_idx = file2imgid[target_file]
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
(source_word, target_word, mod_str) = self.get_different_word(source_caption, target_caption)
self.test_queries += [{'source_img_id': idx, 'source_caption': source_caption, 'target_caption': target_caption, 'mod': {'str': mod_str}}]
self.query_filenames += [os.path.join(self.path, self.imgs[idx]['file_path'])]
self.modify_texts += [mod_str]
def caption_index_init_(self):
caption2id = {}
id2caption = {}
caption2imgids = {}
for (i, img) in enumerate(self.imgs):
for c in img['captions']:
if (not (c in caption2id)):
id2caption[len(caption2id)] = c
caption2id[c] = len(caption2id)
caption2imgids[c] = []
caption2imgids[c].append(i)
self.caption2imgids = caption2imgids
print(('unique cations = %d' % len(caption2imgids)))
parent2children_captions = {}
for c in caption2id.keys():
for w in c.split():
p = c.replace(w, '')
p = p.replace(' ', ' ').strip()
if (not (p in parent2children_captions)):
parent2children_captions[p] = []
if (c not in parent2children_captions[p]):
parent2children_captions[p].append(c)
self.parent2children_captions = parent2children_captions
for img in self.imgs:
img['modifiable'] = False
img['parent_captions'] = []
for p in parent2children_captions:
if (len(parent2children_captions[p]) >= 2):
for c in parent2children_captions[p]:
for imgid in caption2imgids[c]:
self.imgs[imgid]['modifiable'] = True
self.imgs[imgid]['parent_captions'] += [p]
num_modifiable_imgs = 0
for img in self.imgs:
if img['modifiable']:
num_modifiable_imgs += 1
self.num_modifiable_imgs = num_modifiable_imgs
print(('Modifiable images = %d' % num_modifiable_imgs))
def caption_index_sample_(self, idx):
while (not self.imgs[idx]['modifiable']):
idx = np.random.randint(0, len(self.imgs))
img = self.imgs[idx]
while True:
p = random.choice(img['parent_captions'])
c = random.choice(self.parent2children_captions[p])
if (c not in img['captions']):
break
target_idx = random.choice(self.caption2imgids[c])
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
(source_word, target_word, mod_str) = self.get_different_word(source_caption, target_caption)
return (idx, target_idx, source_word, target_word, mod_str)
def filter_to_get_modifiable_images_(self):
self.filenames = [os.path.join(self.path, self.imgs[i]['file_path']) for i in range(len(self.imgs)) if self.imgs[i]['modifiable']]
self.texts = [self.imgs[i]['captions'][0] for i in range(len(self.imgs)) if self.imgs[i]['modifiable']]
def get_unmodifiable_images_(self):
print('get unmodifiable images')
self.filenames_um = []
self.texts_um = []
for (i, img) in enumerate(self.imgs):
if (img['modifiable'] is False):
self.filenames_um += [os.path.join(self.path, img['file_path'])]
self.texts_um += img['captions']
print(('The amount of unmodifiable images is %d.' % len(self.texts_um)))
def get_item_(self):
self.items = [x.split('/')[(- 2)] for x in self.filenames]
def generate_random_train_queries_(self, n_modifications_per_image=3):
self.source_files = []
self.target_files = []
self.modify_texts = []
self.source_texts = []
self.target_texts = []
already_visited = set()
for (i, img) in enumerate(self.imgs):
if img['modifiable']:
for j in range(n_modifications_per_image):
(idx, target_idx, source_word, target_word, mod_str) = self.caption_index_sample_(i)
set1 = set(self.imgs[idx]['captions'][0].split(' '))
set2 = set(self.imgs[target_idx]['captions'][0].split(' '))
if (set1 != set2):
key = '{}-{}'.format(target_idx, idx)
inv_key = '{}-{}'.format(idx, target_idx)
if (not ((key in already_visited) or (inv_key in already_visited))):
self.source_files += [os.path.join(self.path, self.imgs[idx]['file_path'])]
self.target_files += [os.path.join(self.path, self.imgs[target_idx]['file_path'])]
self.modify_texts += [mod_str]
self.source_texts += self.imgs[idx]['captions']
self.target_texts += self.imgs[target_idx]['captions']
already_visited.add(key)
shuffle_idx = list(range(len(self.source_files)))
random.shuffle(shuffle_idx)
self.source_files = [self.source_files[i] for i in shuffle_idx]
self.target_files = [self.target_files[i] for i in shuffle_idx]
self.modify_texts = [self.modify_texts[i] for i in shuffle_idx]
self.source_texts = [self.source_texts[i] for i in shuffle_idx]
self.target_texts = [self.target_texts[i] for i in shuffle_idx]
print(('shuffling the random source-target pairs. It gives %d pairs.' % len(self.source_files))) |
class MAGNA(nn.Module):
def __init__(self, g: DGLGraph, num_layers: int, input_dim: int, hidden_dim: int, hop_num: int, alpha: float, num_classes: int, heads: list, top_k: int, feat_drop: float, attn_drop: float, negative_slope: float, edge_drop: float, topk_type: str, self_loop_number: int, undirected_graph=True, self_loop=True, layer_norm=True, feed_forward=True, head_tail_shared=True, project_dim=(- 1)):
super(MAGNA, self).__init__()
self.g = g
self.gdt_layers = nn.ModuleList()
self.self_loop = self_loop
self.number_self_loops = self_loop_number
self.undirected_graph = undirected_graph
self.layer_norm = layer_norm
self.feed_forward = feed_forward
if (project_dim > 1):
self.project = nn.Linear(in_features=input_dim, out_features=project_dim)
self.input_features = project_dim
else:
self.register_buffer('project', None)
self.input_features = input_dim
self.num_layers = num_layers
self.edge_drop = edge_drop
self.gdt_layers.append(MAGNALayer(in_feats=self.input_features, hop_num=hop_num, top_k=top_k, num_heads=heads[0], hidden_dim=hidden_dim, topk_type=topk_type, layer_norm=self.layer_norm, feed_forward=self.feed_forward, head_tail_shared=head_tail_shared, alpha=alpha, negative_slope=negative_slope, feat_drop=feat_drop, attn_drop=attn_drop))
for l in range(1, self.num_layers):
self.gdt_layers.append(MAGNALayer(in_feats=hidden_dim, hop_num=hop_num, hidden_dim=hidden_dim, num_heads=heads[l], top_k=top_k, layer_norm=self.layer_norm, feed_forward=self.feed_forward, head_tail_shared=head_tail_shared, topk_type=topk_type, alpha=alpha, negative_slope=negative_slope, feat_drop=feat_drop, attn_drop=attn_drop))
self.classifier = nn.Linear(in_features=hidden_dim, out_features=num_classes)
self.feat_drop_out = nn.Dropout(p=feat_drop)
self.reset_parameters()
def reset_parameters(self):
if isinstance(self.classifier, nn.Linear):
nn.init.xavier_normal_(self.classifier.weight.data)
if ((self.project is not None) and isinstance(self.project, nn.Linear)):
nn.init.xavier_normal_(self.project.weight.data)
def forward(self, inputs):
number_edges = self.g.number_of_edges()
if (self.project is not None):
h = self.project(self.feat_drop_out(inputs))
else:
h = inputs
for l in range(self.num_layers):
if self.undirected_graph:
drop_edge_ids = self.get_drop_edge_pair_ids((number_edges - self.number_self_loops))
else:
drop_edge_ids = self.get_drop_edge_ids((number_edges - self.number_self_loops))
h = self.gdt_layers[l](self.g, h, drop_edge_ids)
logits = self.classifier(h)
return logits
def get_drop_edge_ids(self, number_edges):
drop_edge_num = int((number_edges * self.edge_drop))
if self.training:
if (drop_edge_num > 0):
drop_edges_ids = np.random.choice(number_edges, drop_edge_num, replace=False)
drop_edges_ids = torch.from_numpy(drop_edges_ids)
else:
drop_edges_ids = None
else:
drop_edges_ids = None
return drop_edges_ids
def get_drop_edge_pair_ids(self, number_edges):
one_direct_number_edge = (number_edges // 2)
drop_edge_num = int(((one_direct_number_edge * self.edge_drop) * 0.5))
if self.training:
if (drop_edge_num > 0):
drop_edges_ids = np.random.choice(one_direct_number_edge, drop_edge_num, replace=False)
inv_drop_edges_ids = (drop_edges_ids + one_direct_number_edge)
drop_edges_ids = np.concatenate([drop_edges_ids, inv_drop_edges_ids])
drop_edges_ids = torch.from_numpy(drop_edges_ids)
else:
drop_edges_ids = None
else:
drop_edges_ids = None
return drop_edges_ids
def layer_attention_node_features(self, inputs):
number_edges = self.g.number_of_edges()
(layer_node_features, layer_attentions) = ([], [])
if (self.project is not None):
h = self.project(self.feat_drop_out(inputs))
else:
h = inputs
for l in range(self.num_layers):
if self.undirected_graph:
drop_edge_ids = self.get_drop_edge_pair_ids((number_edges - self.number_self_loops))
else:
drop_edge_ids = self.get_drop_edge_ids((number_edges - self.number_self_loops))
(h, attentions) = self.gdt_layers[l].forward_for_evaluataion(self.g, h, drop_edge_ids)
layer_node_features.append(h)
layer_attentions.append(attentions)
logits = self.classifier(h)
return (logits, layer_node_features, layer_attentions) |
def main():
(n_actors, replay_ip) = get_environ()
args = argparser()
batch_queue = Queue(maxsize=args.queue_size)
prios_queue = Queue(maxsize=args.prios_queue_size)
param_queue = Queue(maxsize=3)
procs = [Process(target=train, args=(args, n_actors, batch_queue, prios_queue, param_queue)), Process(target=send_param, args=(param_queue,)), Process(target=send_prios, args=(prios_queue, replay_ip))]
for _ in range(args.n_recv_batch_process):
p = Process(target=recv_batch, args=(batch_queue, replay_ip, args.device))
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join() |
()
def notify_of_completed_flights():
cutoff = (get_ad_day() - datetime.timedelta(days=1))
completed_flights_by_advertiser = defaultdict(list)
for flight in Flight.objects.filter(live=True).select_related():
if (flight.hard_stop and (flight.end_date <= cutoff.date())):
log.info('Flight %s is being hard stopped.', flight)
value_remaining = round(flight.value_remaining(), 2)
flight_url = generate_absolute_url(flight.get_absolute_url())
slack_message('adserver/slack/generic-message.slack', {'text': f'Flight {flight.name} was hard stopped. There was ${value_remaining:.2f} value remaining. {flight_url}'})
flight.live = False
flight.save()
update_change_reason(flight, f'Hard stopped with ${value_remaining} value remaining.')
completed_flights_by_advertiser[flight.campaign.advertiser.slug].append(flight)
elif ((flight.clicks_remaining() == 0) and (flight.views_remaining() == 0) and AdImpression.objects.filter(date__gte=cutoff, advertisement__flight=flight).exists()):
log.info('Flight %s finished in the last day.', flight)
slack_message('adserver/slack/flight-complete.slack', {'flight': flight, 'flight_url': generate_absolute_url(flight.get_absolute_url())})
flight.live = False
flight.save()
completed_flights_by_advertiser[flight.campaign.advertiser.slug].append(flight)
if settings.FRONT_ENABLED:
site = get_current_site(request=None)
for (advertiser_slug, completed_flights) in completed_flights_by_advertiser.items():
advertiser = Advertiser.objects.get(slug=advertiser_slug)
to_addresses = [u.email for u in advertiser.user_set.all() if u.notify_on_completed_flights]
if (not to_addresses):
log.debug('No recipients for the wrapup email. Skipping...')
continue
context = {'advertiser': advertiser, 'site': site, 'completed_flights': completed_flights, 'current_flights': [f for f in Flight.objects.filter(campaign__advertiser=advertiser) if (f.state == FLIGHT_STATE_CURRENT)], 'upcoming_flights': [f for f in Flight.objects.filter(campaign__advertiser=advertiser) if (f.state == FLIGHT_STATE_UPCOMING)]}
with mail.get_connection(settings.FRONT_BACKEND, sender_name=f'{site.name} Flight Tracker') as connection:
message = mail.EmailMessage((_('Advertising flight wrapup - %(name)s') % {'name': site.name}), render_to_string('adserver/email/flight_wrapup.html', context), from_email=settings.DEFAULT_FROM_EMAIL, to=to_addresses, connection=connection)
message.send() |
def _wasserstein_compute(x: torch.Tensor, y: torch.Tensor, x_weights: Optional[torch.Tensor], y_weights: Optional[torch.Tensor]) -> torch.Tensor:
device = x.device
x_sorter = torch.argsort(x)
y_sorter = torch.argsort(y)
all_values = torch.concatenate((x, y))
(all_values, _) = torch.sort(all_values)
deltas = torch.diff(all_values)
x_cdf_indices = torch.searchsorted(x[x_sorter], all_values[:(- 1)], right=True)
y_cdf_indices = torch.searchsorted(y[y_sorter], all_values[:(- 1)], right=True)
if (x_weights is None):
x_cdf = (x_cdf_indices.to(device) / x.size(0))
else:
x_sorted_cum_weights = torch.cat((torch.Tensor([0]).to(device), torch.cumsum(x_weights[x_sorter], dim=0)))
x_cdf = (x_sorted_cum_weights[x_cdf_indices] / x_sorted_cum_weights[(- 1)])
if (y_weights is None):
y_cdf = (y_cdf_indices.to(device) / y.size(0))
else:
y_sorted_cum_weights = torch.cat((torch.Tensor([0]).to(device), torch.cumsum(y_weights[y_sorter], dim=0)))
y_cdf = (y_sorted_cum_weights[y_cdf_indices] / y_sorted_cum_weights[(- 1)])
return torch.sum(torch.multiply(torch.abs((x_cdf - y_cdf)), deltas), dim=0, keepdim=True).to(device) |
class GradientEditorItem(TickSliderItem):
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, *args, **kargs):
self.currentTick = None
self.currentTickColor = None
self.rectSize = 15
self.gradRect = QtWidgets.QGraphicsRectItem(QtCore.QRectF(0, self.rectSize, 100, self.rectSize))
self.backgroundRect = QtWidgets.QGraphicsRectItem(QtCore.QRectF(0, (- self.rectSize), 100, self.rectSize))
self.backgroundRect.setBrush(QtGui.QBrush(QtCore.Qt.BrushStyle.DiagCrossPattern))
self.colorMode = 'rgb'
TickSliderItem.__init__(self, *args, **kargs)
self.colorDialog = QtWidgets.QColorDialog()
self.colorDialog.setOption(QtWidgets.QColorDialog.ColorDialogOption.ShowAlphaChannel, True)
self.colorDialog.setOption(QtWidgets.QColorDialog.ColorDialogOption.DontUseNativeDialog, True)
self.colorDialog.currentColorChanged.connect(self.currentColorChanged)
self.colorDialog.rejected.connect(self.currentColorRejected)
self.colorDialog.accepted.connect(self.currentColorAccepted)
self.backgroundRect.setParentItem(self)
self.gradRect.setParentItem(self)
self.setMaxDim((self.rectSize + self.tickSize))
self.rgbAction = QtGui.QAction(translate('GradiantEditorItem', 'RGB'), self)
self.rgbAction.setCheckable(True)
self.rgbAction.triggered.connect(self._setColorModeToRGB)
self.hsvAction = QtGui.QAction(translate('GradiantEditorItem', 'HSV'), self)
self.hsvAction.setCheckable(True)
self.hsvAction.triggered.connect(self._setColorModeToHSV)
self.menu = ColorMapMenu(showGradientSubMenu=True)
self.menu.triggered.connect(self.contextMenuClicked)
self.menu.addSeparator()
self.menu.addAction(self.rgbAction)
self.menu.addAction(self.hsvAction)
for t in list(self.ticks.keys()):
self.removeTick(t)
self.addTick(0, QtGui.QColor(0, 0, 0), True)
self.addTick(1, QtGui.QColor(255, 0, 0), True)
self.setColorMode('rgb')
self.updateGradient()
self.linkedGradients = {}
self.sigTicksChanged.connect(self._updateGradientIgnoreArgs)
self.sigTicksChangeFinished.connect(self.sigGradientChangeFinished)
def showTicks(self, show=True):
for tick in self.ticks.keys():
if show:
tick.show()
orig = getattr(self, '_allowAdd_backup', None)
if orig:
self.allowAdd = orig
else:
self._allowAdd_backup = self.allowAdd
self.allowAdd = False
tick.hide()
def setOrientation(self, orientation):
TickSliderItem.setOrientation(self, orientation)
tr = QtGui.QTransform.fromTranslate(0, self.rectSize)
self.setTransform(tr, True)
def showMenu(self, ev):
self.menu.popup(ev.screenPos().toQPoint())
def contextMenuClicked(self, action):
if (action in [self.rgbAction, self.hsvAction]):
return
(name, source) = action.data()
if (source == 'preset-gradient'):
self.loadPreset(name)
else:
if (name is None):
cmap = colormap.ColorMap(None, [0.0, 1.0])
else:
cmap = colormap.get(name, source=source)
self.setColorMap(cmap)
self.showTicks(False)
()
def loadPreset(self, name):
self.restoreState(Gradients[name])
def setColorMode(self, cm):
if (cm not in ['rgb', 'hsv']):
raise Exception(("Unknown color mode %s. Options are 'rgb' and 'hsv'." % str(cm)))
try:
self.rgbAction.blockSignals(True)
self.hsvAction.blockSignals(True)
self.rgbAction.setChecked((cm == 'rgb'))
self.hsvAction.setChecked((cm == 'hsv'))
finally:
self.rgbAction.blockSignals(False)
self.hsvAction.blockSignals(False)
self.colorMode = cm
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
def _setColorModeToRGB(self):
self.setColorMode('rgb')
def _setColorModeToHSV(self):
self.setColorMode('hsv')
def colorMap(self):
if (self.colorMode == 'hsv'):
raise NotImplementedError('hsv colormaps not yet supported')
pos = []
color = []
for (t, x) in self.listTicks():
pos.append(x)
c = t.color
color.append(c.getRgb())
return ColorMap(np.array(pos), np.array(color, dtype=np.ubyte))
def updateGradient(self):
self.gradient = self.getGradient()
self.gradRect.setBrush(QtGui.QBrush(self.gradient))
self.sigGradientChanged.emit(self)
def _updateGradientIgnoreArgs(self, *args, **kwargs):
self.updateGradient()
def setLength(self, newLen):
TickSliderItem.setLength(self, newLen)
self.backgroundRect.setRect(1, (- self.rectSize), newLen, self.rectSize)
self.gradRect.setRect(1, (- self.rectSize), newLen, self.rectSize)
self.sigTicksChanged.emit(self)
def currentColorChanged(self, color):
if (color.isValid() and (self.currentTick is not None)):
self.setTickColor(self.currentTick, color)
def currentColorRejected(self):
self.setTickColor(self.currentTick, self.currentTickColor)
def currentColorAccepted(self):
self.sigGradientChangeFinished.emit(self)
def tickClicked(self, tick, ev):
if (ev.button() == QtCore.Qt.MouseButton.LeftButton):
self.raiseColorDialog(tick)
elif (ev.button() == QtCore.Qt.MouseButton.RightButton):
self.raiseTickContextMenu(tick, ev)
def raiseColorDialog(self, tick):
if (not tick.colorChangeAllowed):
return
self.currentTick = tick
self.currentTickColor = tick.color
self.colorDialog.setCurrentColor(tick.color)
self.colorDialog.open()
def raiseTickContextMenu(self, tick, ev):
self.tickMenu = TickMenu(tick, self)
self.tickMenu.popup(ev.screenPos().toQPoint())
def tickMoveFinished(self, tick):
self.sigGradientChangeFinished.emit(self)
def getGradient(self):
g = QtGui.QLinearGradient(QtCore.QPointF(0, 0), QtCore.QPointF(self.length, 0))
if (self.colorMode == 'rgb'):
ticks = self.listTicks()
g.setStops([(x, QtGui.QColor(t.color)) for (t, x) in ticks])
elif (self.colorMode == 'hsv'):
ticks = self.listTicks()
stops = []
stops.append((ticks[0][1], ticks[0][0].color))
for i in range(1, len(ticks)):
x1 = ticks[(i - 1)][1]
x2 = ticks[i][1]
dx = ((x2 - x1) / 10.0)
for j in range(1, 10):
x = (x1 + (dx * j))
stops.append((x, self.getColor(x)))
stops.append((x2, self.getColor(x2)))
g.setStops(stops)
return g
def getColor(self, x, toQColor=True):
ticks = self.listTicks()
if (x <= ticks[0][1]):
c = ticks[0][0].color
if toQColor:
return QtGui.QColor(c)
else:
return c.getRgb()
if (x >= ticks[(- 1)][1]):
c = ticks[(- 1)][0].color
if toQColor:
return QtGui.QColor(c)
else:
return c.getRgb()
x2 = ticks[0][1]
for i in range(1, len(ticks)):
x1 = x2
x2 = ticks[i][1]
if ((x1 <= x) and (x2 >= x)):
break
dx = (x2 - x1)
if (dx == 0):
f = 0.0
else:
f = ((x - x1) / dx)
c1 = ticks[(i - 1)][0].color
c2 = ticks[i][0].color
if (self.colorMode == 'rgb'):
r = ((c1.red() * (1.0 - f)) + (c2.red() * f))
g = ((c1.green() * (1.0 - f)) + (c2.green() * f))
b = ((c1.blue() * (1.0 - f)) + (c2.blue() * f))
a = ((c1.alpha() * (1.0 - f)) + (c2.alpha() * f))
if toQColor:
return QtGui.QColor(int(r), int(g), int(b), int(a))
else:
return (r, g, b, a)
elif (self.colorMode == 'hsv'):
(h1, s1, v1, _) = c1.getHsv()
(h2, s2, v2, _) = c2.getHsv()
h = ((h1 * (1.0 - f)) + (h2 * f))
s = ((s1 * (1.0 - f)) + (s2 * f))
v = ((v1 * (1.0 - f)) + (v2 * f))
c = QtGui.QColor.fromHsv(int(h), int(s), int(v))
if toQColor:
return c
else:
return c.getRgb()
def getLookupTable(self, nPts, alpha=None):
if (alpha is None):
alpha = self.usesAlpha()
if alpha:
table = np.empty((nPts, 4), dtype=np.ubyte)
else:
table = np.empty((nPts, 3), dtype=np.ubyte)
for i in range(nPts):
x = (float(i) / (nPts - 1))
color = self.getColor(x, toQColor=False)
table[i] = color[:table.shape[1]]
return table
def usesAlpha(self):
ticks = self.listTicks()
for t in ticks:
if (t[0].color.alpha() < 255):
return True
return False
def isLookupTrivial(self):
ticks = self.listTicks()
if (len(ticks) != 2):
return False
if ((ticks[0][1] != 0.0) or (ticks[1][1] != 1.0)):
return False
c1 = ticks[0][0].color.getRgb()
c2 = ticks[1][0].color.getRgb()
if ((c1 != (0, 0, 0, 255)) or (c2 != (255, 255, 255, 255))):
return False
return True
def addTick(self, x, color=None, movable=True, finish=True):
if (color is None):
color = self.getColor(x)
t = TickSliderItem.addTick(self, x, color=color, movable=movable, finish=finish)
t.colorChangeAllowed = True
return t
def saveState(self):
ticks = []
for t in self.ticks:
c = t.color
ticks.append((self.ticks[t], c.getRgb()))
state = {'mode': self.colorMode, 'ticks': ticks, 'ticksVisible': next(iter(self.ticks)).isVisible()}
return state
def restoreState(self, state):
signalsBlocked = self.blockSignals(True)
self.setColorMode(state['mode'])
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
for t in state['ticks']:
c = QtGui.QColor(*t[1])
self.addTick(t[0], c, finish=False)
self.showTicks(state.get('ticksVisible', next(iter(self.ticks)).isVisible()))
self.blockSignals(signalsBlocked)
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
def setColorMap(self, cm):
signalsBlocked = self.blockSignals(True)
self.setColorMode('rgb')
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
colors = cm.getColors(mode='qcolor')
for i in range(len(cm.pos)):
x = cm.pos[i]
c = colors[i]
self.addTick(x, c, finish=False)
self.blockSignals(signalsBlocked)
self.sigTicksChanged.emit(self)
self.sigGradientChangeFinished.emit(self)
def linkGradient(self, slaveGradient, connect=True):
if connect:
fn = (lambda g, slave=slaveGradient: slave.restoreState(g.saveState()))
self.linkedGradients[id(slaveGradient)] = fn
self.sigGradientChanged.connect(fn)
self.sigGradientChanged.emit(self)
else:
fn = self.linkedGradients.get(id(slaveGradient), None)
if fn:
self.sigGradientChanged.disconnect(fn) |
class BugzillaError(Exception):
def get_bugzilla_error_string(exc):
return getattr(exc, 'faultString', str(exc))
def get_bugzilla_error_code(exc):
for propname in ['faultCode', 'code']:
if hasattr(exc, propname):
return getattr(exc, propname)
return None
def __init__(self, message, code=None):
self.code = code
if self.code:
message += (' (code=%s)' % self.code)
Exception.__init__(self, message) |
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def __len__(self):
return 0 |
def construct_jacobian(y, x, retain_graph=False):
x_grads = []
for (idx, y_element) in enumerate(y.flatten()):
if (x.grad is not None):
x.grad.zero_()
y_element.backward(retain_graph=(retain_graph or (idx < (y.numel() - 1))))
x_grads.append(x.grad.clone()[1])
return torch.stack(x_grads).reshape(*y.shape, *x.shape) |
.skipif((not JSON5_ENABLED), reason='test requires json5')
.parametrize('passing_data', [True, False])
def test_json5_reference(run_line, tmp_path, passing_data):
main_schemafile = (tmp_path / 'main_schema.json')
main_schemafile.write_text(json.dumps(JSON5_REF_MAIN_SCHEMA))
ref_schema = (tmp_path / 'title_schema.json5')
ref_schema.write_text(json.dumps(TITLE_SCHEMA))
doc = (tmp_path / 'doc.json')
if passing_data:
doc.write_text(json.dumps(PASSING_DOCUMENT))
else:
doc.write_text(json.dumps(FAILING_DOCUMENT))
result = run_line(['check-jsonschema', '--schemafile', str(main_schemafile), str(doc)])
assert (result.exit_code == (0 if passing_data else 1)) |
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0, bias_init=0.0, **kwargs):
super().__init__(num_features, eps=eps, momentum=momentum)
if (weight_init is not None):
nn.init.constant_(self.weight, weight_init)
if (bias_init is not None):
nn.init.constant_(self.bias, bias_init)
self.weight.requires_grad_((not weight_freeze))
self.bias.requires_grad_((not bias_freeze)) |
class RuleEnvironment(object):
__slots__ = ('idx', 'property_value_lists')
def __init__(self, idx, property_value_lists):
self.idx = idx
self.property_value_lists = property_value_lists
def append_pair_properties(self, property_values1, property_values2):
if (not (property_values1 and property_values2)):
return
if (self.property_value_lists is None):
self.property_value_lists = [[] for _ in property_values1]
for (property_list, value1, value2) in zip(self.property_value_lists, property_values1, property_values2):
if (value1 is None):
continue
if (value2 is None):
continue
delta = (value2 - value1)
property_list.append(delta) |
class UnpackType(ProperType):
__slots__ = ['type', 'from_star_syntax']
def __init__(self, typ: Type, line: int=(- 1), column: int=(- 1), from_star_syntax: bool=False) -> None:
super().__init__(line, column)
self.type = typ
self.from_star_syntax = from_star_syntax
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_unpack_type(self)
def serialize(self) -> JsonDict:
return {'.class': 'UnpackType', 'type': self.type.serialize()}
def deserialize(cls, data: JsonDict) -> UnpackType:
assert (data['.class'] == 'UnpackType')
typ = data['type']
return UnpackType(deserialize_type(typ))
def __hash__(self) -> int:
return hash(self.type)
def __eq__(self, other: object) -> bool:
return (isinstance(other, UnpackType) and (self.type == other.type)) |
def derivatives_in_toroidal_coordinates():
Print_Function()
a = symbols('a', real=True)
coords = (u, v, phi) = symbols('u v phi', real=True)
(t3d, eu, ev, ephi) = Ga.build('e_u e_v e_phi', X=[(((a * sinh(v)) * cos(phi)) / (cosh(v) - cos(u))), (((a * sinh(v)) * sin(phi)) / (cosh(v) - cos(u))), ((a * sin(u)) / (cosh(v) - cos(u)))], coords=coords, norm=True)
grad = t3d.grad
f = t3d.mv('f', 'scalar', f=True)
A = t3d.mv('A', 'vector', f=True)
B = t3d.mv('B', 'bivector', f=True)
print('f =', f)
print('A =', A)
print('B =', B)
print('grad*f =', (grad * f))
print('grad|A =', (grad | A))
print('-I*(grad^A) =', ((- t3d.i) * (grad ^ A)))
print('grad^B =', (grad ^ B))
return |
def get_sentiment(df, emotions, other_emotions, min_len=1):
data = []
for sentiment in tqdm(emotions):
res = df[df['text'].str.contains(sentiment, na=False)]
for ind in range(len(res)):
try:
t = normalize_text(res.iloc[ind].text)
if (not set(t).isdisjoint(other_emotions)):
continue
if (len(t) < min_len):
continue
if ((len(set(t)) == 1) and (t[0] == MENTION)):
continue
data.append(t)
except:
pass
return data |
class FlaxCodeGenRLForCausalLMModule(FlaxCodeGenForCausalLMModule):
config: CodeGenRLConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.transformer = FlaxCodeGenModule(self.config, dtype=self.dtype)
self.lm_head = pnn.Dense(self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), shard_axes={'kernel': ('embed', 'vocab'), 'bias': ('vocab',)})
if self.config.is_reward_model:
self.reward_head = pnn.Dense(1, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), shard_axes={'kernel': ('embed', None), 'bias': (None,)})
self.use_value_head = (self.config.has_value_head and (not self.config.is_reward_model))
if self.use_value_head:
self.value_head = FlaxValueHead(self.config, dtype=self.dtype)
def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
outputs = self.transformer(input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
if (not self.config.is_reward_model):
if self.config.tie_word_embeddings:
shared_kernel = self.transformer.variables['params']['wte']['embedding'].T
lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
else:
lm_logits = self.reward_head(hidden_states)
if self.use_value_head:
values = self.value_head(hidden_states)
if (not return_dict):
if self.use_value_head:
return ((lm_logits, values) + outputs[1:])
else:
return ((lm_logits,) + outputs[1:])
elif self.use_value_head:
return CodeGenRLOutput(logits=lm_logits, values=values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
else:
return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
_fixtures(MigrateFixture)
def test_how_migration_works(fixture):
some_object = fixture.some_object
class Migration1(Migration):
def schedule_upgrades(self):
self.schedule('drop_fk', some_object.do_something, 'drop_fk-1')
self.schedule('data', some_object.do_something, 'data-1')
self.schedule('drop_fk', some_object.do_something, 'drop_fk-2')
class Migration2(Migration):
def schedule_upgrades(self):
self.schedule('drop_fk', some_object.do_something, 'drop_fk-3')
egg = ReahlEggStub('my_egg', {'1.0': [], '1.1': [Migration1, Migration2]})
fixture.orm_control.set_schema_version_for(egg.get_versions()[0])
fixture.orm_control.migrate_db(egg)
expected_order = ['drop_fk-1', 'drop_fk-2', 'drop_fk-3', 'data-1']
assert (some_object.calls_made == expected_order) |
(frozen=True)
class Mark():
name: str
args: Tuple[(Any, ...)]
kwargs: Mapping[(str, Any)]
_param_ids_from: Optional['Mark'] = dataclasses.field(default=None, repr=False)
_param_ids_generated: Optional[Sequence[str]] = dataclasses.field(default=None, repr=False)
def __init__(self, name: str, args: Tuple[(Any, ...)], kwargs: Mapping[(str, Any)], param_ids_from: Optional['Mark']=None, param_ids_generated: Optional[Sequence[str]]=None, *, _ispytest: bool=False) -> None:
check_ispytest(_ispytest)
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'args', args)
object.__setattr__(self, 'kwargs', kwargs)
object.__setattr__(self, '_param_ids_from', param_ids_from)
object.__setattr__(self, '_param_ids_generated', param_ids_generated)
def _has_param_ids(self) -> bool:
return (('ids' in self.kwargs) or (len(self.args) >= 4))
def combined_with(self, other: 'Mark') -> 'Mark':
assert (self.name == other.name)
param_ids_from: Optional[Mark] = None
if (self.name == 'parametrize'):
if other._has_param_ids():
param_ids_from = other
elif self._has_param_ids():
param_ids_from = self
return Mark(self.name, (self.args + other.args), dict(self.kwargs, **other.kwargs), param_ids_from=param_ids_from, _ispytest=True) |
class DistillationTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
target_p = inputs['labels']
outputs = model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
logits = outputs[0]
loss = (- torch.sum((target_p * logits.log_softmax(dim=(- 1))), axis=(- 1)).mean())
if return_outputs:
return (loss, outputs)
return loss |
def add_kernel_test(cls, kernel, dim, name=None, expect=None, inputs=None, devices=['cpu']):
for device in devices:
def test_func(self):
args = []
if inputs:
args.extend(inputs)
if expect:
result = wp.array(expect, dtype=int, device=device)
output = wp.zeros_like(result)
args.append(output)
kernel.module.load(device)
capture = StdOutCapture()
capture.begin()
with CheckOutput(self):
wp.launch(kernel, dim=dim, inputs=args, device=device)
s = capture.end()
self.assertEqual(s, '')
if expect:
assert_array_equal(output, result)
if (name == None):
name = kernel.key
setattr(cls, ((name + '_') + device), test_func) |
class StreamInfo(MetadataBlock, mutagen.StreamInfo):
code = 0
bitrate = 0
def __eq__(self, other):
try:
return ((self.min_blocksize == other.min_blocksize) and (self.max_blocksize == other.max_blocksize) and (self.sample_rate == other.sample_rate) and (self.channels == other.channels) and (self.bits_per_sample == other.bits_per_sample) and (self.total_samples == other.total_samples))
except Exception:
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.min_blocksize = int(to_int_be(data.read(2)))
self.max_blocksize = int(to_int_be(data.read(2)))
self.min_framesize = int(to_int_be(data.read(3)))
self.max_framesize = int(to_int_be(data.read(3)))
sample_first = to_int_be(data.read(2))
sample_channels_bps = to_int_be(data.read(1))
bps_total = to_int_be(data.read(5))
sample_tail = (sample_channels_bps >> 4)
self.sample_rate = int(((sample_first << 4) + sample_tail))
if (not self.sample_rate):
raise error('A sample rate value of 0 is invalid')
self.channels = int((((sample_channels_bps >> 1) & 7) + 1))
bps_tail = (bps_total >> 36)
bps_head = ((sample_channels_bps & 1) << 4)
self.bits_per_sample = int(((bps_head + bps_tail) + 1))
self.total_samples = (bps_total & )
self.length = (self.total_samples / float(self.sample_rate))
self.md5_signature = to_int_be(data.read(16))
def write(self):
f = BytesIO()
f.write(struct.pack('>I', self.min_blocksize)[(- 2):])
f.write(struct.pack('>I', self.max_blocksize)[(- 2):])
f.write(struct.pack('>I', self.min_framesize)[(- 3):])
f.write(struct.pack('>I', self.max_framesize)[(- 3):])
f.write(struct.pack('>I', (self.sample_rate >> 4))[(- 2):])
byte = ((self.sample_rate & 15) << 4)
byte += (((self.channels - 1) & 7) << 1)
byte += (((self.bits_per_sample - 1) >> 4) & 1)
f.write(bchr(byte))
byte = (((self.bits_per_sample - 1) & 15) << 4)
byte += ((self.total_samples >> 32) & 15)
f.write(bchr(byte))
f.write(struct.pack('>I', (self.total_samples & )))
sig = self.md5_signature
f.write(struct.pack('>4I', ((sig >> 96) & ), ((sig >> 64) & ), ((sig >> 32) & ), (sig & )))
return f.getvalue()
def pprint(self):
return (u'FLAC, %.2f seconds, %d Hz' % (self.length, self.sample_rate)) |
class GeoLocalizedModel(models.Model):
latitude = models.DecimalField(_('latitude'), max_digits=9, decimal_places=6, blank=True, null=True)
longitude = models.DecimalField(_('longitude'), max_digits=9, decimal_places=6, blank=True, null=True)
map_link = models.URLField(_('map link'), blank=True)
class Meta():
abstract = True |
class bdist_rpm(orig.bdist_rpm):
def run(self):
SetuptoolsDeprecationWarning.emit('Deprecated command', '\n bdist_rpm is deprecated and will be removed in a future version.\n Use bdist_wheel (wheel packages) instead.\n ', see_url=' due_date=(2023, 10, 30))
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
spec = [line.replace('setup.py install ', 'setup.py install --single-version-externally-managed ').replace('%setup', '%setup -n %{name}-%{unmangled_version}') for line in spec]
return spec |
class DSAKey(common.PK):
keyType = 0
def __init__(self, key=None, private=False):
self.priv = self.pub = None
if (not isinstance(key, tuple)):
raise TypeError('4/5-tuple required for key')
if ((len(key) == 5) and private):
self.priv = DSA.construct(key)
self.pub = self.priv.publickey()
elif ((len(key) == 4) and (not private)):
self.pub = DSA.construct(key)
else:
raise TypeError('wrong number of arguments for private={0!r}: got {1} '.format(private, len(key)))
def getPublicPayload(self):
return (self.pub.p, self.pub.q, self.pub.g, self.pub.y)
def getPrivatePayload(self):
return (self.priv.p, self.priv.q, self.priv.g, self.priv.y, self.priv.x)
def fingerprint(self):
return SHA1(self.getSerializedPublicPayload())
def sign(self, data):
K = randrange(2, self.priv.q)
(r, s) = self.priv.sign(data, K)
return (long_to_bytes(r, 20) + long_to_bytes(s, 20))
def verify(self, data, sig):
(r, s) = (bytes_to_long(sig[:20]), bytes_to_long(sig[20:]))
return self.pub.verify(data, (r, s))
def __hash__(self):
return bytes_to_long(self.fingerprint())
def __eq__(self, other):
if (not isinstance(other, type(self))):
return False
return (self.fingerprint() == other.fingerprint())
def __ne__(self, other):
return (not (self == other))
def generate(cls):
privkey = DSA.generate(1024)
return cls((privkey.key.y, privkey.key.g, privkey.key.p, privkey.key.q, privkey.key.x), private=True)
def parsePayload(cls, data, private=False):
(p, data) = read_mpi(data)
(q, data) = read_mpi(data)
(g, data) = read_mpi(data)
(y, data) = read_mpi(data)
if private:
(x, data) = read_mpi(data)
return (cls((y, g, p, q, x), private=True), data)
return (cls((y, g, p, q), private=False), data) |
def test_inheritance_then_decorate():
calls = []
class Inheriting(MethodBasedConfigurable):
pass
def Concrete(*args, **kwargs):
calls.append((args, kwargs))
assert callable(Concrete.handler)
t = Concrete('foo', bar='baz')
assert callable(t.handler)
assert (len(calls) == 0)
t()
assert (len(calls) == 1) |
class Power_Widgets(object):
def left_grey(self):
return Image(scale=True, filename='~/.config/qtile/power/bar01.png')
def right_grey(self):
return Image(scale=True, filename='~/.config/qtile/power/bar06.png')
def black_red(self):
return Image(scale=True, filename='~/.config/qtile/power/bar02.png')
def grey_red(self):
return Image(scale=True, filename='~/.config/qtile/power/bar02-b.png')
def red_magenta(self):
return Image(scale=True, filename='~/.config/qtile/power/bar03.png')
def magenta_green(self):
return Image(scale=True, filename='~/.config/qtile/power/bar04.png')
def green_blue(self):
return Image(scale=True, filename='~/.config/qtile/power/bar05.png')
def blue_orange(self):
return Image(scale=True, filename='~/.config/qtile/power/bar07.png') |
.patch(PATCH_METHOD)
.patch('pynamodb.connection.base.uuid')
def test_signal_exception_post_signal(mock_uuid, mock_req):
pre_recorded = []
UUID = '123-abc'
def record_pre_dynamodb_send(sender, operation_name, table_name, req_uuid):
pre_recorded.append((operation_name, table_name, req_uuid))
def record_post_dynamodb_send(sender, operation_name, table_name, req_uuid):
raise ValueError()
pre_dynamodb_send.connect(record_pre_dynamodb_send)
post_dynamodb_send.connect(record_post_dynamodb_send)
try:
mock_uuid.uuid4.return_value = UUID
mock_req.return_value = {'TableDescription': {'TableName': 'table', 'TableStatus': 'Creating'}}
c = Connection()
c.dispatch('CreateTable', {'TableName': 'MyTable'})
assert (('CreateTable', 'MyTable', UUID) == pre_recorded[0])
finally:
pre_dynamodb_send.disconnect(record_pre_dynamodb_send)
post_dynamodb_send.disconnect(record_post_dynamodb_send) |
def response_factory():
def create_response(data, status_code=200, content_type='application/json'):
fp = BytesIO(data)
raw = HTTPResponse(fp, preload_content=False)
resp = Response()
resp.headers = CaseInsensitiveDict({'Content-Type': content_type})
resp.status_code = status_code
resp.raw = raw
return resp
return create_response |
class Request(object):
def __init__(self, client: CDPSession, requestId: Optional[str], interceptionId: Optional[str], isNavigationRequest: bool, allowInterception: bool, url: str, resourceType: str, payload: dict, frame: Optional[Frame], redirectChain: List['Request']) -> None:
self._client = client
self._requestId = requestId
self._isNavigationRequest = isNavigationRequest
self._interceptionId = interceptionId
self._allowInterception = allowInterception
self._interceptionHandled = False
self._response: Optional[Response] = None
self._failureText: Optional[str] = None
self._url = url
self._resourceType = resourceType.lower()
self._method = payload.get('method')
self._postData = payload.get('postData')
headers = payload.get('headers', {})
self._headers = {k.lower(): v for (k, v) in headers.items()}
self._frame = frame
self._redirectChain = redirectChain
self._fromMemoryCache = False
def url(self) -> str:
return self._url
def resourceType(self) -> str:
return self._resourceType
def method(self) -> Optional[str]:
return self._method
def postData(self) -> Optional[str]:
return self._postData
def headers(self) -> Dict:
return self._headers
def response(self) -> Optional['Response']:
return self._response
def frame(self) -> Optional[Frame]:
return self._frame
def isNavigationRequest(self) -> bool:
return self._isNavigationRequest
def redirectChain(self) -> List['Request']:
return copy.copy(self._redirectChain)
def failure(self) -> Optional[Dict]:
if (not self._failureText):
return None
return {'errorText': self._failureText}
async def continue_(self, overrides: Dict=None) -> None:
if (overrides is None):
overrides = {}
if (not self._allowInterception):
raise NetworkError('Request interception is not enabled.')
if self._interceptionHandled:
raise NetworkError('Request is already handled.')
self._interceptionHandled = True
opt = {'interceptionId': self._interceptionId}
opt.update(overrides)
try:
(await self._client.send('Network.continueInterceptedRequest', opt))
except Exception as e:
debugError(logger, e)
async def respond(self, response: Dict) -> None:
if self._url.startswith('data:'):
return
if (not self._allowInterception):
raise NetworkError('Request interception is not enabled.')
if self._interceptionHandled:
raise NetworkError('Request is already handled.')
self._interceptionHandled = True
if (response.get('body') and isinstance(response['body'], str)):
responseBody: Optional[bytes] = response['body'].encode('utf-8')
else:
responseBody = response.get('body')
responseHeaders = {}
if response.get('headers'):
for header in response['headers']:
responseHeaders[header.lower()] = response['headers'][header]
if response.get('contentType'):
responseHeaders['content-type'] = response['contentType']
if (responseBody and ('content-length' not in responseHeaders)):
responseHeaders['content-length'] = len(responseBody)
statusCode = response.get('status', 200)
statusText = statusTexts.get(statusCode, '')
statusLine = f'HTTP/1.1 {statusCode} {statusText}'
CRLF = '\r\n'
text = (statusLine + CRLF)
for header in responseHeaders:
text = f'{text}{header}: {responseHeaders[header]}{CRLF}'
text = (text + CRLF)
responseBuffer = text.encode('utf-8')
if responseBody:
responseBuffer = (responseBuffer + responseBody)
rawResponse = base64.b64encode(responseBuffer).decode('ascii')
try:
(await self._client.send('Network.continueInterceptedRequest', {'interceptionId': self._interceptionId, 'rawResponse': rawResponse}))
except Exception as e:
debugError(logger, e)
async def abort(self, errorCode: str='failed') -> None:
errorReason = errorReasons[errorCode]
if (not errorReason):
raise NetworkError('Unknown error code: {}'.format(errorCode))
if (not self._allowInterception):
raise NetworkError('Request interception is not enabled.')
if self._interceptionHandled:
raise NetworkError('Request is already handled.')
self._interceptionHandled = True
try:
(await self._client.send('Network.continueInterceptedRequest', dict(interceptionId=self._interceptionId, errorReason=errorReason)))
except Exception as e:
debugError(logger, e) |
def handle_action_init_mediator(chain_state: ChainState, state_change: ActionInitMediator) -> TransitionResult[ChainState]:
transfer = state_change.from_transfer
secrethash = transfer.lock.secrethash
token_network_address = transfer.balance_proof.token_network_address
return subdispatch_mediatortask(chain_state, state_change, token_network_address, secrethash) |
def main(args):
with open(args.dataset_info, 'rb') as rf:
dataset_info = pickle.load(rf)
tokenizer = MarianTokenizer.from_pretrained(args.model_string)
tokenizer.add_special_tokens({'pad_token': PAD_TOKEN})
pad_id = tokenizer.encode(PAD_TOKEN)[0]
model = MarianMTModel.from_pretrained(args.model_string, return_dict=True).to(args.device)
if (args.model_path is not None):
if os.path.isdir(args.model_path):
for (_, _, files) in os.walk(args.model_path):
for fname in files:
if fname.endswith('.ckpt'):
args.model_path = os.path.join(args.model_path, fname)
break
ckpt = torch.load(args.model_path)
try:
model.load_state_dict(ckpt['state_dict'])
except:
state_dict = {}
for key in ckpt['state_dict'].keys():
assert key.startswith('model.')
state_dict[key[6:]] = ckpt['state_dict'][key]
model.load_state_dict(state_dict)
model.eval()
checkpoint = torch.load(args.ckpt, map_location=args.device)
model_args = checkpoint['args']
conditioning_model = Model(model_args, pad_id, len(dataset_info.index2word))
conditioning_model.load_state_dict(checkpoint['state_dict'])
conditioning_model = conditioning_model.to(args.device)
conditioning_model.eval()
if args.verbose:
print("=> loaded checkpoint '{}' (epoch {})".format(args.ckpt, checkpoint['epoch']))
print('num params', num_params(conditioning_model))
inputs = []
with open(args.in_file, 'r') as rf:
for line in rf:
inputs.append(line.strip())
for inp in tqdm(inputs, total=len(inputs)):
results = predict_formality(model, tokenizer, conditioning_model, [inp], dataset_info, precondition_topk=args.precondition_topk, do_sample=args.do_sample, length_cutoff=args.length_cutoff, condition_lambda=args.condition_lambda, device=args.device)
print(results[0]) |
def backupJson(gen_data_dir, sp):
sp_dir = ((gen_data_dir + sp) + '/')
task_fds = os.listdir(sp_dir)
for task in task_fds:
task_dir = ((sp_dir + task) + '/')
trial_fds = os.listdir(task_dir)
for trial in trial_fds:
new_fn = ((task_dir + trial) + '/traj_data_backup.json')
old_fn = ((task_dir + trial) + '/traj_data.json')
shutil.copy(old_fn, new_fn) |
class KJTInputWrapper(torch.nn.Module):
def __init__(self, module_kjt_input: torch.nn.Module) -> None:
super().__init__()
self._module_kjt_input = module_kjt_input
self.add_module('_module_kjt_input', self._module_kjt_input)
def forward(self, keys: List[str], values: torch.Tensor, weights: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, offsets: Optional[torch.Tensor]=None):
kjt = KeyedJaggedTensor(keys=keys, values=values, weights=weights, lengths=lengths, offsets=offsets)
return self._module_kjt_input(kjt) |
class MovingBatchNormNd(nn.Module):
def __init__(self, num_features, eps=0.0001, decay=0.1, bn_lag=0.0, affine=True, sync=False):
super(MovingBatchNormNd, self).__init__()
self.num_features = num_features
self.sync = sync
self.affine = affine
self.eps = eps
self.decay = decay
self.bn_lag = bn_lag
self.register_buffer('step', torch.zeros(1))
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def shape(self):
raise NotImplementedError
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, x, c=None, logpx=None, reverse=False):
if reverse:
return self._reverse(x, logpx)
else:
return self._forward(x, logpx)
def _forward(self, x, logpx=None):
num_channels = x.size((- 1))
used_mean = self.running_mean.clone().detach()
used_var = self.running_var.clone().detach()
if self.training:
x_t = x.transpose(0, 2).reshape(num_channels, (- 1))
batch_mean = torch.mean(x_t, dim=1)
if self.sync:
batch_ex2 = torch.mean((x_t ** 2), dim=1)
batch_mean = reduce_tensor(batch_mean)
batch_ex2 = reduce_tensor(batch_ex2)
batch_var = (batch_ex2 - (batch_mean ** 2))
else:
batch_var = torch.var(x_t, dim=1)
if (self.bn_lag > 0):
used_mean = (batch_mean - ((1 - self.bn_lag) * (batch_mean - used_mean.detach())))
used_mean /= (1.0 - (self.bn_lag ** (self.step[0] + 1)))
used_var = (batch_var - ((1 - self.bn_lag) * (batch_var - used_var.detach())))
used_var /= (1.0 - (self.bn_lag ** (self.step[0] + 1)))
self.running_mean -= (self.decay * (self.running_mean - batch_mean.data))
self.running_var -= (self.decay * (self.running_var - batch_var.data))
self.step += 1
used_mean = used_mean.view(*self.shape).expand_as(x)
used_var = used_var.view(*self.shape).expand_as(x)
y = ((x - used_mean) * torch.exp(((- 0.5) * torch.log((used_var + self.eps)))))
if self.affine:
weight = self.weight.view(*self.shape).expand_as(x)
bias = self.bias.view(*self.shape).expand_as(x)
y = ((y * torch.exp(weight)) + bias)
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad(x, used_var).sum((- 1), keepdim=True)))
def _reverse(self, y, logpy=None):
used_mean = self.running_mean
used_var = self.running_var
if self.affine:
weight = self.weight.view(*self.shape).expand_as(y)
bias = self.bias.view(*self.shape).expand_as(y)
y = ((y - bias) * torch.exp((- weight)))
used_mean = used_mean.view(*self.shape).expand_as(y)
used_var = used_var.view(*self.shape).expand_as(y)
x = ((y * torch.exp((0.5 * torch.log((used_var + self.eps))))) + used_mean)
if (logpy is None):
return x
else:
return (x, (logpy + self._logdetgrad(x, used_var).sum((- 1), keepdim=True)))
def _logdetgrad(self, x, used_var):
logdetgrad = ((- 0.5) * torch.log((used_var + self.eps)))
if self.affine:
weight = self.weight.view(*self.shape).expand(*x.size())
logdetgrad += weight
return logdetgrad
def __repr__(self):
return '{name}({num_features}, eps={eps}, decay={decay}, bn_lag={bn_lag}, affine={affine})'.format(name=self.__class__.__name__, **self.__dict__) |
class DisplayOptionalPage(DisplayPage):
def __init__(self, parent, tabname, helptext, waittime, command=None):
logger.debug('%s: OptionalPage args: (waittime: %s, command: %s)', self.__class__.__name__, waittime, command)
DisplayPage.__init__(self, parent, tabname, helptext)
self.command = command
self.display_item = None
self.set_info_text()
self.add_options()
parent.select(self)
self.update_idletasks()
self.update_page(waittime)
def set_vars():
enabled = tk.BooleanVar()
enabled.set(True)
ready = tk.BooleanVar()
ready.set(False)
modified = tk.DoubleVar()
modified.set(None)
tk_vars = {'enabled': enabled, 'ready': ready, 'modified': modified}
logger.debug(tk_vars)
return tk_vars
def set_info_text(self):
if (not self.vars['enabled'].get()):
msg = '{} disabled'.format(self.tabname.title())
elif (self.vars['enabled'].get() and (not self.vars['ready'].get())):
msg = 'Waiting for {}...'.format(self.tabname)
else:
msg = 'Displaying {}'.format(self.tabname)
logger.debug(msg)
self.set_info(msg)
def add_options(self):
self.add_option_save()
self.add_option_enable()
def add_option_save(self):
logger.debug('Adding save option')
btnsave = ttk.Button(self.optsframe, image=get_images().icons['save'], command=self.save_items)
btnsave.pack(padx=2, side=tk.RIGHT)
Tooltip(btnsave, text='Save {}(s) to file'.format(self.tabname), wraplength=200)
def add_option_enable(self):
logger.debug('Adding enable option')
chkenable = ttk.Checkbutton(self.optsframe, variable=self.vars['enabled'], text='Enable {}'.format(self.tabname), command=self.on_chkenable_change)
chkenable.pack(side=tk.RIGHT, padx=5, anchor=tk.W)
Tooltip(chkenable, text='Enable or disable {} display'.format(self.tabname), wraplength=200)
def save_items(self):
raise NotImplementedError()
def on_chkenable_change(self):
logger.debug('Enabled checkbox changed')
if self.vars['enabled'].get():
self.subnotebook_show()
else:
self.subnotebook_hide()
self.set_info_text()
def update_page(self, waittime):
if (not self.runningtask.get()):
return
if self.vars['enabled'].get():
logger.trace('Updating page')
self.display_item_set()
self.load_display()
self.after(waittime, (lambda t=waittime: self.update_page(t)))
def display_item_set(self):
raise NotImplementedError()
def load_display(self):
if (not self.display_item):
return
logger.debug('Loading display')
self.display_item_process()
self.vars['ready'].set(True)
self.set_info_text()
def display_item_process(self):
raise NotImplementedError()
def close(self):
for child in self.winfo_children():
logger.debug('Destroying child: %s', child)
child.destroy() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.