code stringlengths 281 23.7M |
|---|
def _row_column_layout(content, flow, size, scope=None, position=OutputPosition.BOTTOM) -> Output:
if (not isinstance(content, (list, tuple, OutputList))):
content = [content]
if (not size):
size = ' '.join((('1fr' if (c is not None) else '10px') for c in content))
content = [(c if (c is not None) else put_html('<div></div>')) for c in content]
for item in content:
assert isinstance(item, Output), "put_row()/put_column()'s content must be list of put_xxx()"
style = 'grid-auto-flow: {flow}; grid-template-{flow}s: {size};'.format(flow=flow, size=size)
tpl = ('\n <div style="display: grid; %s">\n {{#contents}}\n {{& pywebio_output_parse}}\n {{/contents}}\n </div>'.strip() % html.escape(style, quote=True))
return put_widget(template=tpl, data=dict(contents=content), scope=scope, position=position) |
class Vector(QtWidgets.QGraphicsItem):
arrow_color = QtGui.QColor(*getConfig().vector_color)
arrow_brush = QtGui.QBrush(arrow_color, QtCore.Qt.SolidPattern)
arrow_pen = QtGui.QPen(arrow_brush, 1, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
relative_length = getConfig().vector_relative_length
def __init__(self, parent):
QtWidgets.QGraphicsItem.__init__(self, parent=parent)
self.p1 = QtCore.QPointF()
self.p2 = QtCore.QPointF()
self.line = QtCore.QLineF(self.p1, self.p2)
self.setOrientation(0.0, 0.0)
self.setZValue(10000)
def boundingRect(self):
return QtCore.QRectF(self.p1, self.p2)
def setOrientation(self, dEast, dNorth):
dEast *= (self.relative_length / 100)
dNorth *= (self.relative_length / 100)
self.p2.setX(dEast)
self.p2.setY(dNorth)
self.line.setP2(self.p2)
def paint(self, painter, option, parent):
if (self.line.length() == 0.0):
return
painter.setPen(self.arrow_pen)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)
arrow_length = ((self.line.length() * 0.3) * (self.relative_length / 100.0))
d = self.line.angle()
head_p1 = (self.p2 - QtCore.QPointF((np.sin(((d * d2r) + (np.pi / 3))) * arrow_length), (np.cos(((d * d2r) + (np.pi / 3))) * arrow_length)))
head_p2 = (self.p2 - QtCore.QPointF((np.sin((((d * d2r) + np.pi) - (np.pi / 3))) * arrow_length), (np.cos((((d * d2r) + np.pi) - (np.pi / 3))) * arrow_length)))
painter.drawLine(self.line)
painter.drawPolyline(*[head_p1, self.p2, head_p2]) |
def pytest_runtest_setup(item):
sanity = item.config.getoption('--sanity', False)
non_interactive = item.config.getoption('--non-interactive', False)
interactive = ((not sanity) and (not non_interactive))
if interactive:
_show_test_header(item)
_try_set_class_attribute(item, 'interactive', interactive)
_try_set_class_attribute(item, 'allow_missing_screenshots', sanity) |
def test_monkeypatch_ini(testdir: Any, mocker: MockerFixture) -> None:
stub = mocker.stub()
assert (stub.assert_called_with.__module__ != stub.__module__)
testdir.makepyfile('\n def test_foo(mocker):\n stub = mocker.stub()\n assert stub.assert_called_with.__module__ == stub.__module__\n ')
testdir.makeini('\n [pytest]\n mock_traceback_monkeypatch = false\n ')
result = testdir.runpytest_subprocess()
assert (result.ret == 0) |
def test_initialization():
x = np.random.normal(size=(13, 5))
y = np.random.randint(2, size=(13, 3))
model = MultiLabelClf()
model.initialize(x, y)
assert_equal(model.n_states, 2)
assert_equal(model.n_labels, 3)
assert_equal(model.n_features, 5)
assert_equal(model.size_joint_feature, (5 * 3))
model = MultiLabelClf(n_features=5, n_labels=3)
model.initialize(x, y)
model = MultiLabelClf(n_features=3, n_labels=3)
assert_raises(ValueError, model.initialize, X=x, Y=y) |
class Blip2Config(PretrainedConfig):
model_type = 'blip-2'
is_composition = True
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
super().__init__(**kwargs)
if (vision_config is None):
vision_config = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.')
if (qformer_config is None):
qformer_config = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.')
if (text_config is None):
text_config = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
self.vision_config = Blip2VisionConfig(**vision_config)
self.qformer_config = Blip2QFormerConfig(**qformer_config)
text_model_type = (text_config['model_type'] if ('model_type' in text_config) else 'opt')
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self.tie_word_embeddings = self.text_config.tie_word_embeddings
self.is_encoder_decoder = self.text_config.is_encoder_decoder
self.num_query_tokens = num_query_tokens
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = (self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
self.initializer_factor = 1.0
self.initializer_range = 0.02
def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: PretrainedConfig, **kwargs):
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['vision_config'] = self.vision_config.to_dict()
output['qformer_config'] = self.qformer_config.to_dict()
output['text_config'] = self.text_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def test_save_unequal_chunks_error():
ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10, n_ploidy=10, n_allele=10, n_contig=10)
with pytest.raises(ValueError, match="path '' contains an array"):
save_dataset(ds, {'.zarray': ''})
ds = ds.chunk({dim: (1, 3, 5, 1) for dim in ds.sizes})
with pytest.raises(ValueError, match='Zarr requires uniform chunk sizes. Use the `auto_rechunk`'):
save_dataset(ds, {})
ds = ds.chunk({dim: (4, 6) for dim in ds.sizes})
with pytest.raises(ValueError, match='Zarr requires uniform chunk sizes. Use the `auto_rechunk`'):
save_dataset(ds, {}) |
def test_links_1():
with Simulation(MODEL_WEIR_SETTING_PATH) as sim:
print('\n\n\nLINKS\n')
c1c2 = Links(sim)['C1:C2']
assert (c1c2.linkid == 'C1:C2')
assert (c1c2.is_conduit() == True)
assert (c1c2.is_pump() == False)
assert (c1c2.is_orifice() == False)
assert (c1c2.is_weir() == False)
assert (c1c2.is_outlet() == False)
assert (c1c2.connections == ('J1', 'J2'))
assert (c1c2.inlet_node == 'J1')
assert (c1c2.outlet_node == 'J2') |
class AppDefStatusTest(unittest.TestCase):
def test_is_terminal(self) -> None:
for s in AppState:
is_terminal = AppStatus(state=s).is_terminal()
if (s in _TERMINAL_STATES):
self.assertTrue(is_terminal)
else:
self.assertFalse(is_terminal)
def test_serialize(self) -> None:
status = AppStatus(AppState.FAILED)
serialized = repr(status)
self.assertEqual(serialized, "AppStatus:\n msg: ''\n num_restarts: 0\n roles: []\n state: FAILED (5)\n structured_error_msg: <NONE>\n ui_url: null\n")
def test_serialize_embed_json(self) -> None:
status = AppStatus(AppState.FAILED, structured_error_msg='{"message": "test error"}')
serialized = repr(status)
self.assertEqual(serialized, "AppStatus:\n msg: ''\n num_restarts: 0\n roles: []\n state: FAILED (5)\n structured_error_msg:\n message: test error\n ui_url: null\n")
def test_raise_on_status(self) -> None:
AppStatus(state=AppState.SUCCEEDED).raise_for_status()
with self.assertRaisesRegex(AppStatusError, '(?s)job did not succeed:.*FAILED.*'):
AppStatus(state=AppState.FAILED).raise_for_status()
with self.assertRaisesRegex(AppStatusError, '(?s)job did not succeed:.*CANCELLED.*'):
AppStatus(state=AppState.CANCELLED).raise_for_status()
with self.assertRaisesRegex(AppStatusError, '(?s)job did not succeed:.*RUNNING.*'):
AppStatus(state=AppState.RUNNING).raise_for_status()
def test_format_error_message(self) -> None:
rpc_error_message = 'RuntimeError(\'On WorkerInfo(id=1, name=trainer:0:0):\nRuntimeError(ShardingError(\'Table of size 715.26GB cannot be added to any rank\'))\nTraceback (most recent call last):\n..\n\')\nTraceback (most recent call last):\n File "/dev/shm/uid-0/360e3568-seed-nspid-ns-/torch/distributed/rpc/internal.py", line 190, in _run_function\n'
expected_error_message = "RuntimeError('On WorkerInfo(id=1, name=trainer:0:0):\nRuntimeError(ShardingError('Table\n of size 715.26GB cannot be added to any rank'))\nTraceback (most recent call last):\n..\n')"
status = AppStatus(state=AppState.FAILED)
actual_message = status._format_error_message(rpc_error_message, header='', width=80)
self.assertEqual(expected_error_message, actual_message)
def _get_test_app_status(self) -> AppStatus:
error_msg = '{"message":{"message":"error","errorCode":-1,"extraInfo":{"timestamp":1293182}}}'
replica1 = ReplicaStatus(id=0, state=AppState.FAILED, role='worker', hostname='localhost', structured_error_msg=error_msg)
replica2 = ReplicaStatus(id=1, state=AppState.RUNNING, role='worker', hostname='localhost')
role_status = RoleStatus(role='worker', replicas=[replica1, replica2])
return AppStatus(state=AppState.RUNNING, roles=[role_status])
def test_format_app_status(self) -> None:
os.environ['TZ'] = 'Europe/London'
time.tzset()
app_status = self._get_test_app_status()
actual_message = app_status.format()
expected_message = 'AppStatus:\n State: RUNNING\n Num Restarts: 0\n Roles:\n *worker[0]:FAILED (exitcode: -1)\n timestamp: 1970-01-16 00:13:02\n hostname: localhost\n error_msg: error\n worker[1]:RUNNING\n Msg:\n Structured Error Msg: <NONE>\n UI URL: None\n '
self.assertEqual(expected_message.split(), actual_message.split()) |
.skipif(IS_PYPY, reason='Test run with coverage on PyPy sometimes raises a RecursionError')
def test_recursion_on_inference_tip() -> None:
code = '\n class MyInnerClass:\n ...\n\n\n class MySubClass:\n inner_class = MyInnerClass\n\n\n class MyClass:\n sub_class = MySubClass()\n\n\n def get_unpatched_class(cls):\n return cls\n\n\n def get_unpatched(item):\n lookup = get_unpatched_class if isinstance(item, type) else lambda item: None\n return lookup(item)\n\n\n _Child = get_unpatched(MyClass.sub_class.inner_class)\n\n\n class Child(_Child):\n def patch(cls):\n MyClass.sub_class.inner_class = cls\n '
module = parse(code)
assert module |
class DistributedGroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None):
(_rank, _num_replicas) = get_dist_info()
if (num_replicas is None):
num_replicas = _num_replicas
if (rank is None):
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, j) in enumerate(self.group_sizes):
self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu)
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size > 0):
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
indice = indice[list(torch.randperm(int(size), generator=g))].tolist()
extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice))
indice += indice[:extra]
indices += indice
assert (len(indices) == self.total_size)
indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))]
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
_LOSSES.register_module()
class SmoothFocalLoss(nn.Module):
def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0):
super(SmoothFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_cls = (self.loss_weight * smooth_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor))
return loss_cls |
class Transparent(BaseProtocol):
async def guess(self, reader, sock, **kw):
remote = self.query_remote(sock)
return ((remote is not None) and ((sock is None) or (sock.getsockname() != remote)))
async def accept(self, reader, user, sock, **kw):
remote = self.query_remote(sock)
return (user, remote[0], remote[1])
def udp_accept(self, data, sock, **kw):
remote = self.query_remote(sock)
return (True, remote[0], remote[1], data) |
def print_loaded_dict_info(model_state_dict: Dict[(str, Any)], state_dict: Dict[(str, Any)], skip_layers: List[str], model_config: AttrDict):
extra_layers = []
max_len_model = max((len(key) for key in model_state_dict.keys()))
for layername in model_state_dict.keys():
if ((len(skip_layers) > 0) and any(((item in layername) for item in skip_layers))):
logging.info(f'Ignored layer: {layername}')
continue
if (layername in state_dict):
if ((not ('heads' in layername)) or (('heads' in layername) and (not model_config.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON)) or (('heads' in layername) and model_config.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON and model_config.FEATURE_EVAL_SETTINGS.EVAL_TRUNK_AND_HEAD)):
logging.info(f'Loaded: {layername: <{max_len_model}} of shape: {model_state_dict[layername].size()} from checkpoint')
else:
logging.info(f'Ignored layer: {layername}')
else:
logging.info(f'Not found: {layername}, not initialized')
for layername in state_dict.keys():
if (layername not in model_state_dict):
extra_layers.append(layername)
logging.info(f'Extra layers not loaded from checkpoint: {extra_layers}') |
def get_value_counts(values: List[Any]) -> List[int]:
counts = []
if all(((value is None) for value in values)):
counts.append(0)
else:
for value in values:
if (value is None):
counts.append(0)
elif (hasattr(value, '__len__') and (not isinstance(value, str))):
counts.append(len(value))
else:
counts.append(1)
return counts |
class NIREmissivePartFromReflectance(NIRReflectance):
def __init__(self, sunz_threshold=None, **kwargs):
self.sunz_threshold = sunz_threshold
super(NIREmissivePartFromReflectance, self).__init__(sunz_threshold=sunz_threshold, **kwargs)
def __call__(self, projectables, optional_datasets=None, **info):
projectables = self.match_data_arrays(projectables)
inputs = self._get_nir_inputs(projectables, optional_datasets)
return self._get_emissivity_as_dataarray(*inputs)
def _get_emissivity_as_dataarray(self, nir, da_tb11, da_tb13_4, da_sun_zenith):
logger.info('Getting emissive part of %s', nir.attrs['name'])
emissivity = self._get_emissivity_as_dask(nir.data, da_tb11, da_tb13_4, da_sun_zenith, nir.attrs)
proj = self._create_modified_dataarray(emissivity, base_dataarray=nir)
proj.attrs['units'] = 'K'
return proj
def _get_emissivity_as_dask(self, da_nir, da_tb11, da_tb13_4, da_sun_zenith, metadata):
reflectance_3x_calculator = self._init_reflectance_calculator(metadata)
reflectance_3x_calculator.reflectance_from_tbs(da_sun_zenith, da_nir, da_tb11, tb_ir_co2=da_tb13_4)
return reflectance_3x_calculator.emissive_part_3x() |
def ssl_server(request, qapp):
server = WebserverProcess(request, 'webserver_sub_ssl')
if (not hasattr(request.node, '_server_logs')):
request.node._server_logs = []
request.node._server_logs.append(('SSL server', server.captured_log))
server.start()
(yield server)
server.after_test()
server.terminate() |
class main(list):
def __init__(self, campaign, domains, mod, project_id):
global campaign_list
campaign_list = campaign
global domain_list
domain_list = domains
if (mod is not None):
global module
module = mod
i = cmd_main()
i.prompt = (((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(project_id, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + cmd2.ansi.style('/godaddy', fg=Fg.BLUE, bg=None, bold=True, underline=False)) + ')') + '$> ')
i.cmdloop() |
class Effect6404(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Structure Energy Neutralizer')), 'maxRange', src.getModifiedItemAttr('structureRigEwarOptimalBonus'), stackingPenalties=True, **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Structure Energy Neutralizer')), 'falloffEffectiveness', src.getModifiedItemAttr('structureRigEwarFalloffBonus'), stackingPenalties=True, **kwargs) |
def test_find_files_to_add() -> None:
poetry = Factory().create_poetry(project('complete'))
builder = SdistBuilder(poetry)
result = {f.relative_to_source_root() for f in builder.find_files_to_add()}
assert (result == {Path('AUTHORS'), Path('COPYING'), Path('LICENCE'), Path('LICENSE'), Path('README.rst'), Path('bin/script.sh'), Path('my_package/__init__.py'), Path('my_package/data1/test.json'), Path('my_package/sub_pkg1/__init__.py'), Path('my_package/sub_pkg2/__init__.py'), Path('my_package/sub_pkg2/data2/data.json'), Path('my_package/sub_pkg3/foo.py'), Path('pyproject.toml')}) |
def _fallback_property(func):
name = func.__name__
(func)
def new_func(self):
out = getattr(self._param_td, name)
if (out is self._param_td):
return self
return out
def setter(self, value):
return getattr(type(self._param_td), name).fset(self._param_td, value)
return property(new_func, setter) |
class TestTotalVariationLoss():
def test_call(self):
torch.manual_seed(0)
image = torch.rand(1, 3, 128, 128)
exponent = 3.0
op = loss.TotalVariationLoss(exponent=exponent)
actual = op(image)
desired = F.total_variation_loss(image, exponent=exponent)
ptu.assert_allclose(actual, desired)
def test_repr_smoke(self, encoder):
assert isinstance(repr(loss.TotalVariationLoss()), str) |
def get_optim_and_schedulers(model, args):
if (args.base_opt == 'SGD'):
base_optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.base_opt == 'Adam'):
base_optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif (args.base_opt == 'AdamW'):
base_optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
raise NotImplementedError
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(base_optimizer, T_max=args.epochs)
grad_rho_scheduler = ProportionScheduler(pytorch_lr_scheduler=lr_scheduler, max_lr=args.lr, min_lr=0.0, max_value=args.grad_rho, min_value=args.grad_rho)
grad_norm_rho_scheduler = ProportionScheduler(pytorch_lr_scheduler=lr_scheduler, max_lr=args.lr, min_lr=0.0, max_value=args.grad_norm_rho, min_value=args.grad_norm_rho)
optimizer = GAM(params=model.parameters(), base_optimizer=base_optimizer, model=model, grad_rho_scheduler=grad_rho_scheduler, grad_norm_rho_scheduler=grad_norm_rho_scheduler, adaptive=args.adaptive, args=args)
return (optimizer, base_optimizer, lr_scheduler, grad_rho_scheduler, grad_norm_rho_scheduler) |
_response(prefix='')
def request_word(word):
url = URL_FORM.format(word=word)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT)
except requests.exceptions.ConnectionError as exc:
raise Exception(_('Connection could not be established. Check your internet connection.')) from exc
if (response.status_code == 404):
return None
response.raise_for_status()
return response.text |
def main(opts):
(default_gpu, n_gpu, device) = set_cuda(opts)
if default_gpu:
LOGGER.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((opts.local_rank != (- 1))), opts.fp16))
seed = opts.seed
if (opts.local_rank != (- 1)):
seed += opts.rank
set_random_seed(seed)
if default_gpu:
save_training_meta(opts)
TB_LOGGER.create(os.path.join(opts.output_dir, 'logs'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(os.path.join(opts.output_dir, 'ckpts'))
add_log_to_file(os.path.join(opts.output_dir, 'logs', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
model_config = PretrainedConfig.from_json_file(opts.model_config)
model_config.pretrain_tasks = []
for train_dataset_config in opts.train_datasets.values():
model_config.pretrain_tasks.extend(train_dataset_config['tasks'])
model_config.pretrain_tasks = set(model_config.pretrain_tasks)
tokenizer = AutoTokenizer.from_pretrained(model_config.lang_bert_name)
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint, map_location=(lambda storage, loc: storage))
else:
checkpoint = {}
if (opts.init_pretrained == 'bert'):
tmp = AutoModel.from_pretrained(model_config.lang_bert_name)
for (param_name, param) in tmp.named_parameters():
checkpoint[param_name] = param
if (model_config.lang_bert_name == 'xlm-roberta-base'):
checkpoint['embeddings.token_type_embeddings.weight'] = torch.cat(([checkpoint['embeddings.token_type_embeddings.weight']] * 2), 0)
del tmp
elif (opts.init_pretrained == 'lxmert'):
tmp = torch.load('../datasets/pretrained/LXMERT/model_LXRT.pth', map_location=(lambda storage, loc: storage))
for (param_name, param) in tmp.items():
param_name = param_name.replace('module.', '')
if ('bert.encoder.layer' in param_name):
param_name = param_name.replace('bert.encoder.layer', 'bert.lang_encoder.layer')
checkpoint[param_name] = param
elif ('bert.encoder.x_layers' in param_name):
param_name1 = param_name.replace('bert.encoder.x_layers', 'bert.local_encoder.encoder.x_layers')
param_name2 = param_name.replace('bert.encoder.x_layers', 'bert.global_encoder.encoder.x_layers')
checkpoint[param_name1] = checkpoint[param_name2] = param
elif ('cls.predictions' in param_name):
param_name = param_name.replace('cls.predictions', 'mlm_head.predictions')
checkpoint[param_name] = param
else:
checkpoint[param_name] = param
del tmp
model_class = GlocalTextPathCMTPreTraining
model = model_class.from_pretrained(pretrained_model_name_or_path=None, config=model_config, state_dict=checkpoint)
model.train()
set_dropout(model, opts.dropout)
model = wrap_model(model, device, opts.local_rank)
del checkpoint
data_cfg = EasyDict(opts.train_datasets['R2R'])
train_nav_db = R2RTextPathData(data_cfg.train_traj_files, data_cfg.img_ft_file, data_cfg.scanvp_cands_file, data_cfg.connectivity_dir, image_prob_size=model_config.image_prob_size, image_feat_size=model_config.image_feat_size, angle_feat_size=model_config.angle_feat_size, max_txt_len=opts.max_txt_len, in_memory=True)
val_nav_db = R2RTextPathData(data_cfg.val_seen_traj_files, data_cfg.img_ft_file, data_cfg.scanvp_cands_file, data_cfg.connectivity_dir, image_prob_size=model_config.image_prob_size, image_feat_size=model_config.image_feat_size, angle_feat_size=model_config.angle_feat_size, max_txt_len=opts.max_txt_len, in_memory=True)
val2_nav_db = R2RTextPathData(data_cfg.val_unseen_traj_files, data_cfg.img_ft_file, data_cfg.scanvp_cands_file, data_cfg.connectivity_dir, image_prob_size=model_config.image_prob_size, image_feat_size=model_config.image_feat_size, angle_feat_size=model_config.angle_feat_size, max_txt_len=opts.max_txt_len, in_memory=True)
train_dataloaders = create_dataloaders(data_cfg, train_nav_db, tokenizer, True, device, opts)
val_dataloaders = create_dataloaders(data_cfg, val_nav_db, tokenizer, False, device, opts)
val2_dataloaders = create_dataloaders(data_cfg, val2_nav_db, tokenizer, False, device, opts)
meta_loader = MetaLoader(train_dataloaders, accum_steps=opts.gradient_accumulation_steps, distributed=(opts.local_rank != (- 1)), device=device)
meta_loader = PrefetchLoader(meta_loader, device)
optimizer = build_optimizer(model, opts)
task2scaler = {t: i for (i, t) in enumerate(train_dataloaders.keys())}
if opts.fp16:
grad_scaler = amp.GradScaler()
global_step = 0
LOGGER.info(f'***** Running training with {opts.world_size} GPUs *****')
LOGGER.info(' Batch size = %d', (opts.train_batch_size if (opts.local_rank == (- 1)) else (opts.train_batch_size * opts.world_size)))
LOGGER.info(' Accumulate steps = %d', opts.gradient_accumulation_steps)
LOGGER.info(' Num steps = %d', opts.num_train_steps)
task2loss = {task: RunningMeter(f'loss/{task}') for task in train_dataloaders.keys()}
n_examples = defaultdict(int)
n_in_units = defaultdict(int)
n_loss_units = defaultdict(int)
grad_norm = 0
start_time = time.time()
optimizer.zero_grad()
optimizer.step()
for (step, (name, batch)) in enumerate(meta_loader):
n_examples[name] += batch['txt_ids'].size(0)
n_in_units[name] += batch['txt_lens'].sum().item()
task = name.split('_')[0]
if opts.fp16:
with amp.autocast():
loss = model(batch, task=task, compute_loss=True)
else:
loss = model(batch, task=task, compute_loss=True)
n_loss_units[name] += loss.size(0)
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
delay_unscale = (((step + 1) % opts.gradient_accumulation_steps) != 0)
if opts.fp16:
grad_scaler.scale(loss).backward()
else:
loss.backward()
task2loss[name](loss.item())
if (((step + 1) % opts.gradient_accumulation_steps) == 0):
global_step += 1
lr_this_step = get_lr_sched(global_step, opts)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
TB_LOGGER.log_scalar_dict({ll.name: ll.val for ll in task2loss.values() if (ll.val is not None)})
TB_LOGGER.step()
if (opts.grad_norm != (- 1)):
if opts.fp16:
grad_scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
if opts.fp16:
grad_scaler.step(optimizer)
grad_scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if ((global_step % opts.log_steps) == 0):
LOGGER.info(f'Step {global_step}')
for t in train_dataloaders.keys():
tot_ex = n_examples[t]
ex_per_sec = int((tot_ex / (time.time() - start_time)))
tot_in = n_in_units[t]
in_per_sec = int((tot_in / (time.time() - start_time)))
tot_l = n_loss_units[t]
l_per_sec = int((tot_l / (time.time() - start_time)))
LOGGER.info(f'{t}: {tot_ex} examples trained at {ex_per_sec} ex/s')
TB_LOGGER.add_scalar(f'perf/{t}_ex_per_s', ex_per_sec, global_step)
TB_LOGGER.add_scalar(f'perf/{t}_in_per_s', in_per_sec, global_step)
TB_LOGGER.add_scalar(f'perf/{t}_loss_per_s', l_per_sec, global_step)
LOGGER.info('')
if ((global_step % opts.valid_steps) == 0):
LOGGER.info(f'------Step {global_step}: start validation seen------')
validate(model, val_dataloaders, setname='_seen')
LOGGER.info(f'------Step {global_step}: start validation unseen------')
validate(model, val2_dataloaders, setname='_unseen')
model_saver.save(model, global_step)
if (global_step >= opts.num_train_steps):
break
if ((global_step % opts.valid_steps) != 0):
LOGGER.info(f'------Step {global_step}: start validation seen------')
validate(model, val_dataloaders, setname='_seen')
LOGGER.info(f'------Step {global_step}: start validation unseen------')
validate(model, val2_dataloaders, setname='_unseen')
model_saver.save(model, global_step) |
def test_generate_system_pyproject_carriage_returns(example_system_pyproject: str) -> None:
cmd = SelfCommand()
cmd.system_pyproject.write_text((example_system_pyproject + '\n'))
cmd.generate_system_pyproject()
with open(cmd.system_pyproject, newline='') as f:
generated = f.read()
assert ('\r\r' not in generated) |
class ResultsTable(QtCore.QObject):
data_changed = QtCore.Signal(int, int, int, int)
def __init__(self, results, color, column_index=None, force_reload=False, wdg=None, **kwargs):
super().__init__()
self.results = results
self.color = color
self.force_reload = force_reload
self.last_row_count = 0
self.wdg = wdg
self.column_index = column_index
self.data = self.results.data
self._started = False
def data(self):
return self._data
def data(self, value):
self._data = value
if (self.column_index is not None):
self._data = self._data.set_index(self.column_index)
else:
self._data.reset_index()
def rows(self):
return self._data.shape[0]
def columns(self):
return self._data.shape[1]
def init(self):
self.last_row_count = 0
def start(self):
self._started = True
def stop(self):
self._started = False
def update_data(self):
if (not self._started):
return
if self.force_reload:
self.results.reload()
self.data = self.results.data
(current_row_count, columns) = self._data.shape
if (self.last_row_count < current_row_count):
self.data_changed.emit(self.last_row_count, 0, (current_row_count - 1), (columns - 1))
self.last_row_count = current_row_count
def set_color(self, color):
self.color = color
def set_index(self, index):
self.column_index = index |
class SourceGenerator(LocationGenerator):
nevents = Int.T(default=2)
avoid_water = Bool.T(default=False, help='Avoid sources offshore under the ocean / lakes.')
time_min = Timestamp.T(default=Timestamp.D('2017-01-01 00:00:00'))
time_max = Timestamp.T(default=Timestamp.D('2017-01-03 00:00:00'))
magnitude_min = Float.T(default=4.0, help='minimum moment magnitude')
magnitude_max = Float.T(optional=True, help='if set, maximum moment magnitude for a uniform distribution. If set to ``None``, magnitudes are drawn using a Gutenberg-Richter distribution, see :gattr:`b_value`.')
b_value = Float.T(optional=True, help='b-value for Gutenberg-Richter magnitude distribution. If unset, a value of 1 is assumed.')
source_file = String.T(optional=True, help='Path to source file. Sources are used as scenario events')
def __init__(self, *args, **kwargs):
super(SourceGenerator, self).__init__(*args, **kwargs)
if ((self.b_value is not None) and (self.magnitude_max is not None)):
raise ScenarioError(('%s: b_value and magnitude_max are mutually exclusive.' % self.__class__.__name__))
def draw_magnitude(self, rstate):
if ((self.b_value is None) and (self.magnitude_max is None)):
b_value = 1.0
else:
b_value = self.b_value
if (b_value is None):
return rstate.uniform(self.magnitude_min, self.magnitude_max)
else:
return moment_tensor.rand_to_gutenberg_richter(rstate.rand(), b_value, magnitude_min=self.magnitude_min)
def get_sources(self):
sources = []
if (self.source_file is not None):
sources = load_all(filename=self.source_file)
self.nevents = len(sources)
for ievent in range(self.nevents):
sources[ievent].name = ('scenario_ev%03d' % (ievent + 1))
return sources
else:
for ievent in range(self.nevents):
src = self.get_source(ievent)
src.name = ('scenario_ev%03d' % (ievent + 1))
sources.append(src)
return sources
def ensure_data(self, path):
fn_sources = op.join(path, 'sources.yml')
if (not op.exists(fn_sources)):
with open(fn_sources, 'w') as f:
for src in self.get_sources():
f.write(src.dump())
fn_events = op.join(path, 'events.txt')
if (not op.exists(fn_events)):
with open(fn_events, 'w') as f:
for (isrc, src) in enumerate(self.get_sources()):
f.write(src.pyrocko_event().dump())
def add_map_artists(self, automap):
pass |
class Meteor(object):
def __init__(self):
assert (_METEOR_PATH is not None)
cmd = 'java -Xmx2G -jar {} - - -l en -norm -stdio'.format(_METEOR_PATH)
self._meteor_proc = sp.Popen(cmd.split(), stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True, encoding='utf-8', bufsize=1)
self._lock = threading.Lock()
def __call__(self, summ, ref):
self._lock.acquire()
score_line = 'SCORE ||| {} ||| {}\n'.format(' '.join(ref), ' '.join(summ))
self._meteor_proc.stdin.write(score_line)
stats = self._meteor_proc.stdout.readline().strip()
eval_line = 'EVAL ||| {}\n'.format(stats)
self._meteor_proc.stdin.write(eval_line)
score = float(self._meteor_proc.stdout.readline().strip())
self._lock.release()
return score
def __del__(self):
self._lock.acquire()
self._meteor_proc.stdin.close()
self._meteor_proc.kill()
self._meteor_proc.wait()
self._lock.release() |
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states |
class Effect6559(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackMissileExplosionRadius', src.getModifiedItemAttr('aoeCloudSizeBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackMissileRangeOptimal', src.getModifiedItemAttr('maxRangeBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackTurretRangeFalloff', src.getModifiedItemAttr('falloffBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityMissilesExplosionRadius', src.getModifiedItemAttr('aoeCloudSizeBonus'), stackingPenalties=True, **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'falloff', src.getModifiedItemAttr('falloffBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackMissileRangeFalloff', src.getModifiedItemAttr('falloffBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackTurretTrackingSpeed', src.getModifiedItemAttr('trackingSpeedBonus'), stackingPenalties=True, **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'maxRange', src.getModifiedItemAttr('maxRangeBonus'), stackingPenalties=True, **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'trackingSpeed', src.getModifiedItemAttr('trackingSpeedBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackTurretRangeOptimal', src.getModifiedItemAttr('maxRangeBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityMissilesExplosionVelocity', src.getModifiedItemAttr('aoeVelocityBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityAttackMissileExplosionVelocity', src.getModifiedItemAttr('aoeVelocityBonus'), stackingPenalties=True, **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'fighterAbilityMissilesRange', src.getModifiedItemAttr('maxRangeBonus'), stackingPenalties=True, **kwargs) |
class WordInformationPreserved(Metric[torch.Tensor]):
def __init__(self: TWordInformationPreserved, *, device: Optional[torch.device]=None) -> None:
super().__init__(device=device)
self._add_state('correct_total', torch.tensor(0, dtype=torch.float64, device=self.device))
self._add_state('input_total', torch.tensor(0, dtype=torch.float64, device=self.device))
self._add_state('target_total', torch.tensor(0, dtype=torch.float64, device=self.device))
_mode()
def update(self: TWordInformationPreserved, input: Union[(str, List[str])], target: Union[(str, List[str])]) -> TWordInformationPreserved:
(correct_total, target_total, input_total) = _word_information_preserved_update(input, target)
self.correct_total += correct_total.to(self.device)
self.target_total += target_total.to(self.device)
self.input_total += input_total.to(self.device)
return self
_mode()
def compute(self: TWordInformationPreserved) -> torch.Tensor:
return _word_information_preserved_compute(self.correct_total, self.target_total, self.input_total)
_mode()
def merge_state(self: TWordInformationPreserved, metrics: Iterable[TWordInformationPreserved]) -> TWordInformationPreserved:
for metric in metrics:
self.correct_total += metric.correct_total.to(self.device)
self.target_total += metric.target_total.to(self.device)
self.input_total += metric.input_total.to(self.device)
return self |
(python=PYTHON_ALL_VERSIONS)
def tests(session: nox.Session) -> None:
posargs = session.posargs
extras = ('coverage' if RUNNING_CI else 'test')
session.install('-e', f'.[{extras}]')
if RUNNING_CI:
posargs.extend(['--cov', 'auditwheel', '--cov-branch'])
for image in _docker_images(session):
session.run('docker', 'pull', image, external=True)
session.run('pytest', '-s', *posargs)
if RUNNING_CI:
session.run('auditwheel', 'lddtree', sys.executable)
session.run('coverage', 'xml', '-ocoverage.xml') |
class TestAppConfig(AppConfig):
name = 'test_app'
def ready(self):
register_iframe('test_app.views.view_to_component_sync_func_compatibility')
register_iframe(views.view_to_component_async_func_compatibility)
register_iframe(views.ViewToComponentSyncClassCompatibility)
register_iframe(views.ViewToComponentAsyncClassCompatibility)
register_iframe(views.ViewToComponentTemplateViewClassCompatibility)
register_iframe(views.view_to_iframe_args) |
def generate_meas_calibration(results_file_path: str, runs: int):
results = []
for run in range(runs):
(cal_results, state_labels, circuit_results) = meas_calibration_circ_execution(1000, (SEED + run))
meas_cal = CompleteMeasFitter(cal_results, state_labels)
meas_filter = MeasurementFilter(meas_cal.cal_matrix, state_labels)
results_pseudo_inverse = meas_filter.apply(circuit_results, method='pseudo_inverse')
results_least_square = meas_filter.apply(circuit_results, method='least_squares')
results.append({'cal_matrix': convert_ndarray_to_list_in_data(meas_cal.cal_matrix), 'fidelity': meas_cal.readout_fidelity(), 'results': circuit_results, 'results_pseudo_inverse': results_pseudo_inverse, 'results_least_square': results_least_square})
with open(results_file_path, 'w') as results_file:
json.dump(results, results_file) |
class ViewProviderAsmElementGroup(ViewProviderAsmGroup):
_iconName = 'Assembly_Assembly_Element_Tree.svg'
def setupContextMenu(self, vobj, menu):
setupSortMenu(menu, self.sort, self.sortReverse)
ViewProviderAsmElement.setupSyncNameMenu('Sync elements names', menu, vobj)
def syncElementName(self, check=False):
if (not check):
FreeCAD.setActiveTransaction('Sync elements names')
try:
for element in self.ViewObject.Object.Group:
if (element.ViewObject.Proxy.syncElementName(check, False) and check):
return True
if check:
return False
FreeCAD.ActiveDocument.recompute()
FreeCAD.closeActiveTransaction()
except Exception:
if (not check):
FreeCAD.closeActiveTransaction(True)
def sortReverse(self):
sortChildren(self.ViewObject.Object, True)
def sort(self):
sortChildren(self.ViewObject.Object, False)
def canDropObjectEx(self, obj, owner, subname, elements):
if AsmPlainGroup.contains(self.ViewObject.Object, obj):
return True
if (not owner):
return False
if ((not elements) and (not utils.isElement((owner, subname)))):
return False
proxy = self.ViewObject.Object.Proxy
return (proxy.getAssembly().getPartGroup() == owner)
def dropObjectEx(self, vobj, obj, owner, subname, elements):
if AsmPlainGroup.tryMove(obj, self.ViewObject.Object):
return
sels = FreeCADGui.Selection.getSelectionEx('*', False)
if ((len(sels) == 1) and (len(sels[0].SubElementNames) == 1) and (sels[0].Object.getSubObject(sels[0].SubElementNames[0], 1) == vobj.Object)):
sel = sels[0]
else:
sel = None
FreeCADGui.Selection.clearSelection()
res = self._drop(obj, owner, subname, elements)
if sel:
for element in res:
FreeCADGui.Selection.addSelection(sel.Object, ((sel.SubElementNames[0] + element.Name) + '.'))
def _drop(self, obj, owner, subname, elements):
if (not elements):
elements = ['']
res = []
for element in elements:
obj = AsmElement.make(AsmElement.Selection(SelObj=None, SelSubname=None, Element=None, Group=owner, Subname=(subname + element)))
if obj:
res.append(obj)
return res
def onDelete(self, _vobj, _subs):
return False
def canDelete(self, obj):
return isTypeOf(obj, AsmPlainGroup) |
class Disc_feat(nn.Module):
def __init__(self):
super(Disc_feat, self).__init__()
self.fc11 = nn.Linear(10240, 4096)
self.fc12 = nn.Linear(4096, 4096)
self.fc13 = nn.Linear(4096, 1)
self.d = nn.Dropout(0.5)
def forward(self, x_feat):
x = self.fc13(self.d(F.relu(self.fc12(self.d(F.relu(self.fc11(x_feat)))))))
return torch.sigmoid(x) |
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction, sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if (not x):
return
(ctn, their_pcp, is_revocation, per_commitment_secret) = x
if (not is_revocation):
return
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
(this_conf, other_conf) = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey, per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
if (htlc_tx.outputs()[0].address != htlc_address):
return
gen_tx = (lambda : create_sweeptx_ctx_to_local(sweep_address=sweep_address, ctx=htlc_tx, output_idx=0, witness_script=witness_script, privkey=other_revocation_privkey, is_revocation=True, config=chan.lnworker.config))
return SweepInfo(name='redeem_htlc2', csv_delay=0, cltv_expiry=0, gen_tx=gen_tx) |
class ParentProperty():
def __get__(self, inst, owner):
return getattr(inst, '_parent', None)
def __set__(self, inst, value):
if ((getattr(inst, '_parent', None) is not None) and (value is not None)):
raise ValueError("Cannot set parent property without first setting it to 'None'.")
inst._parent = value |
def clean_value(val: Any) -> str:
if isinstance(val, (Mapping, list, set, tuple)):
raise ValueError(('Cannot clean parameter value of type %s' % str(type(val))))
if isinstance(val, (datetime.datetime, datetime.date)):
return clean_date(val)
if isinstance(val, bool):
return clean_bool(val)
if isinstance(val, Enum):
return clean_enum(val)
return str(val) |
def _add_encryption(field_class, requires_length_check=True):
class indexed_class(field_class):
def __init__(self, default_token_length=None, *args, **kwargs):
def _generate_default():
return DecryptedValue(random_string(default_token_length))
if (default_token_length is not None):
kwargs['default'] = _generate_default
field_class.__init__(self, *args, **kwargs)
assert (not self.index)
def db_value(self, value):
if (value is None):
return None
if isinstance(value, LazyEncryptedValue):
return value.encrypted_value
if isinstance(value, DecryptedValue):
value = value.value
meta = self.model._meta
return meta.encrypter.encrypt_value(value, (self.max_length if requires_length_check else None))
def python_value(self, value):
if (value is None):
return None
return LazyEncryptedValue(value, self)
def __hash__(self):
return field_class.__hash__(self)
def __eq__(self, _):
raise Exception('Disallowed operation; use `matches`')
def __mod__(self, _):
raise Exception('Disallowed operation; use `matches`')
def __pow__(self, _):
raise Exception('Disallowed operation; use `matches`')
def __contains__(self, _):
raise Exception('Disallowed operation; use `matches`')
def contains(self, _):
raise Exception('Disallowed operation; use `matches`')
def startswith(self, _):
raise Exception('Disallowed operation; use `matches`')
def endswith(self, _):
raise Exception('Disallowed operation; use `matches`')
return indexed_class |
def _generate_WS_to_parallel(i, num_nodes, num_signals, graph_hyper, weighted, weight_scale=False):
G = nx.watts_strogatz_graph(num_nodes, k=graph_hyper['k'], p=graph_hyper['p'])
W_GT = nx.adjacency_matrix(G).A
if (weighted == 'uniform'):
weights = np.random.uniform(0, 2, (num_nodes, num_nodes))
weights = ((weights + weights.T) / 2)
W_GT = (W_GT * weights)
if (weighted == 'gaussian'):
weights = np.random.normal(1, 0.05, (num_nodes, num_nodes))
weights = np.abs(weights)
weights = ((weights + weights.T) / 2)
W_GT = (W_GT * weights)
if (weighted == 'lognormal'):
weights = np.random.lognormal(0, 0.1, (num_nodes, num_nodes))
weights = ((weights + weights.T) / 2)
W_GT = (W_GT * weights)
if weight_scale:
W_GT = ((W_GT * num_nodes) / np.sum(W_GT))
L_GT = (np.diag((W_GT np.ones(num_nodes))) - W_GT)
W_GT = scipy.sparse.csr_matrix(W_GT)
cov = np.linalg.inv((L_GT + (0.0001 * np.eye(num_nodes))))
z = get_distance_halfvector(np.random.multivariate_normal(np.zeros(num_nodes), cov, num_signals))
return (z, W_GT) |
def annotate_pymodbus_logs(file: (str | os.PathLike)) -> None:
with open(file, encoding='utf-8') as in_file, tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', delete=False) as out_file:
for (i, line) in enumerate(in_file):
if (('Running transaction' in line) and (i > 0)):
out_file.write('\n')
out_file.write(line)
if ('SEND:' in line):
explained = explain_with_rapid_scada(packet=line.split('SEND:')[1].strip())
out_file.write(f'''Send explained: {explained}
Send summary: {explained.summarize()}
''')
if ('RECV:' in line):
explained = explain_with_rapid_scada(packet=line.split('RECV:')[1].strip(), is_receive=True)
out_file.write(f'''Receive explained: {explained}
Receive summary: {explained.summarize()}
''')
shutil.copyfile(out_file.name, file)
with contextlib.suppress(FileNotFoundError):
os.remove(out_file.name) |
.parametrize('environment', [{}, {'something': 'value'}, {'something': 'value', 'something_else': 'other_value'}])
.parametrize('platform_specific', [False, True])
def test_environment(environment, platform_specific, platform, intercepted_build_args, monkeypatch):
env_string = ' '.join((f'{k}={v}' for (k, v) in environment.items()))
if platform_specific:
monkeypatch.setenv(('CIBW_ENVIRONMENT_' + platform.upper()), env_string)
monkeypatch.setenv('CIBW_ENVIRONMENT', 'overwritten')
else:
monkeypatch.setenv('CIBW_ENVIRONMENT', env_string)
main()
build_options = intercepted_build_args.args[0].build_options(identifier=None)
intercepted_environment = build_options.environment
assert isinstance(intercepted_environment, ParsedEnvironment)
assert (intercepted_environment.as_dictionary(prev_environment={}) == environment) |
def test_pass_through_equal_m_constraint():
class Top(Component):
def construct(s):
s.push = CalleeIfcCL()
s.pull = CalleeIfcCL()
s.pass1 = PassThroughPlus100()
s.pass1.push //= s.push
s.inner = TestModuleNonBlockingIfc()
s.inner.push //= s.pass1.real_push
s.inner.pull //= s.pull
def line_trace(s):
return s.inner.line_trace()
def done(s):
return True
num_cycles = _test_TestModuleNonBlockingIfc(Top)
assert (num_cycles == (3 + 10)) |
class ParallelLinearQubitOperatorTest(unittest.TestCase):
def setUp(self):
self.qubit_operator = ((QubitOperator('Z3') + QubitOperator('Y0')) + QubitOperator('X1'))
self.n_qubits = 4
self.linear_operator = ParallelLinearQubitOperator(self.qubit_operator)
self.vec = numpy.array(range((2 ** self.n_qubits)))
expected_matvec = numpy.array([0, (- 1), 2, (- 3), 4, (- 5), 6, (- 7), 8, (- 9), 10, (- 11), 12, (- 13), 14, (- 15)])
expected_matvec = (expected_matvec + numpy.array([(- 8j), (- 9j), (- 10j), (- 11j), (- 12j), (- 13j), (- 14j), (- 15j), 0j, 1j, 2j, 3j, 4j, 5j, 6j, 7j]))
expected_matvec += numpy.array([4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11])
self.expected_matvec = expected_matvec
def test_init(self):
self.assertEqual(self.linear_operator.qubit_operator, self.qubit_operator)
self.assertEqual(self.linear_operator.n_qubits, self.n_qubits)
self.assertIsNone(self.linear_operator.options.pool)
cpu_count = multiprocessing.cpu_count()
default_processes = min(cpu_count, 10)
self.assertEqual(self.linear_operator.options.processes, default_processes)
self.assertEqual(len(self.linear_operator.qubit_operator_groups), min(multiprocessing.cpu_count(), 3))
self.assertEqual(QubitOperator.accumulate(self.linear_operator.qubit_operator_groups), self.qubit_operator)
for linear_operator in self.linear_operator.linear_operators:
self.assertEqual(linear_operator.n_qubits, self.n_qubits)
self.assertTrue(isinstance(linear_operator, LinearQubitOperator))
self.assertTrue(isinstance(self.linear_operator, scipy.sparse.linalg.LinearOperator))
def test_matvec(self):
self.assertIsNone(self.linear_operator.options.pool)
self.assertTrue(numpy.allclose((self.linear_operator * self.vec), self.expected_matvec))
def test_matvec_0(self):
qubit_operator = QubitOperator.zero()
vec = numpy.array([1, 2, 3, 4, 5, 6, 7, 8])
matvec_expected = numpy.zeros(vec.shape)
self.assertTrue(numpy.allclose((ParallelLinearQubitOperator(qubit_operator, 3) * vec), matvec_expected))
self.assertIsNone(self.linear_operator.options.pool)
def test_closed_workers_not_reused(self):
qubit_operator = QubitOperator('X0')
parallel_qubit_op = ParallelLinearQubitOperator(qubit_operator, 1, options=LinearQubitOperatorOptions(processes=2))
state = [1.0, 0.0]
parallel_qubit_op.dot(state)
parallel_qubit_op.dot(state)
self.assertIsNone(parallel_qubit_op.options.pool) |
def test_is_generator_for_yield_in_while() -> None:
code = '\n def paused_iter(iterable):\n while True:\n # Continue to yield the same item until `next(i)` or `i.send(False)`\n while (yield value):\n pass\n '
node = astroid.extract_node(code)
assert bool(node.is_generator()) |
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
def _force(self):
if (self._out is None):
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if (dtype is not None):
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[(..., i)] |
def _create_or_update_runtime(task_signature: str, start: float, end: float) -> None:
with DatabaseSession() as session:
runtime = session.get(Runtime, task_signature)
if (not runtime):
session.add(Runtime(task=task_signature, date=start, duration=(end - start)))
else:
for (attr, val) in (('date', start), ('duration', (end - start))):
setattr(runtime, attr, val)
session.commit() |
class RoIAlignFunction(Function):
def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.spatial_scale = spatial_scale
ctx.sample_num = sample_num
ctx.save_for_backward(rois)
ctx.feature_size = features.size()
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new_zeros(num_rois, num_channels, out_h, out_w)
if features.is_cuda:
roi_align_cuda.forward(features, rois, out_h, out_w, spatial_scale, sample_num, output)
else:
raise NotImplementedError
return output
_differentiable
def backward(ctx, grad_output):
feature_size = ctx.feature_size
spatial_scale = ctx.spatial_scale
sample_num = ctx.sample_num
rois = ctx.saved_tensors[0]
assert ((feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = feature_size
out_w = grad_output.size(3)
out_h = grad_output.size(2)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width)
roi_align_cuda.backward(grad_output.contiguous(), rois, out_h, out_w, spatial_scale, sample_num, grad_input)
return (grad_input, grad_rois, None, None, None) |
def create_line_trotter_step_circuit(parameters: FermiHubbardParameters) -> cirq.Circuit:
layout = parameters.layout
hamiltonian = parameters.hamiltonian
dt = parameters.dt
j_theta = (dt * hamiltonian.j_array)
(j_theta_even, j_theta_odd) = (j_theta[0::2], j_theta[1::2])
u_phi = ((- dt) * hamiltonian.u_array)
v_phi = ((- dt) * hamiltonian.v_array)
(v_phi_even, v_phi_odd) = (v_phi[0::2], v_phi[1::2])
local_up = ((- dt) * hamiltonian.local_up_array)
local_down = ((- dt) * hamiltonian.local_down_array)
circuit = cirq.Circuit()
circuit += cirq.Circuit((_create_hopping_ops(j_theta_even, layout.up_even_pairs), _create_hopping_ops(j_theta_even, layout.down_even_pairs)))
circuit += cirq.Circuit((_create_hopping_ops(j_theta_odd, layout.up_odd_pairs), _create_hopping_ops(j_theta_odd, layout.down_odd_pairs)))
if (not np.allclose(u_phi, 0.0)):
circuit += cirq.Circuit(_create_interaction_ops(u_phi, layout.interaction_pairs))
if (not np.allclose(v_phi_even, 0.0)):
circuit += cirq.Circuit((_create_interaction_ops(v_phi_even, layout.up_even_pairs), _create_interaction_ops(v_phi_even, layout.down_even_pairs)))
if (not np.allclose(v_phi_odd, 0.0)):
circuit += cirq.Circuit((_create_interaction_ops(v_phi_odd, layout.up_odd_pairs), _create_interaction_ops(v_phi_odd, layout.down_odd_pairs)))
if ((not np.allclose(local_up, 0.0)) or (not np.allclose(local_down, 0.0))):
circuit += cirq.Circuit((_create_local_field_ops(local_up, layout.up_qubits), _create_local_field_ops(local_down, layout.down_qubits)))
return circuit |
class SerializerTests(AuthenticatedAPITestCase):
def setUpTestData(cls):
cls.user = User.objects.create(id=5, name='james', discriminator=1)
def create_infraction(self, _type: str, active: bool):
return Infraction.objects.create(user_id=self.user.id, actor_id=self.user.id, type=_type, reason='A reason.', expires_at=dt(5018, 11, 20, 15, 52, tzinfo=UTC), active=active)
def test_is_valid_if_active_infraction_with_same_fields_exists(self):
self.create_infraction('ban', active=True)
instance = self.create_infraction('ban', active=False)
data = {'reason': 'hello'}
serializer = InfractionSerializer(instance, data=data, partial=True)
self.assertTrue(serializer.is_valid(), msg=serializer.errors)
def test_is_valid_for_new_active_infraction(self):
self.create_infraction('ban', active=False)
data = {'user': self.user.id, 'actor': self.user.id, 'type': 'ban', 'reason': 'A reason.', 'active': True}
serializer = InfractionSerializer(data=data)
self.assertTrue(serializer.is_valid(), msg=serializer.errors)
def test_validation_error_if_missing_active_field(self):
data = {'user': self.user.id, 'actor': self.user.id, 'type': 'ban', 'reason': 'A reason.'}
serializer = InfractionSerializer(data=data)
if (not serializer.is_valid()):
self.assertIn('active', serializer.errors)
code = serializer.errors['active'][0].code
msg = f'Expected failure on required active field but got {serializer.errors}'
self.assertEqual(code, 'required', msg=msg)
else:
self.fail('Validation unexpectedly succeeded.') |
class PickupObjectAction(BaseAction):
valid_actions = {'PickupObject', 'OpenObject', 'CloseObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx, low_idx=None):
if (low_idx is None):
subgoal = expert_plan[goal_idx]['planner_action']
else:
subgoal = expert_plan[goal_idx]['planner_action']['parameter'][low_idx]
(reward, done) = (self.rewards['neutral'], False)
inventory_objects = state.metadata['inventoryObjects']
if len(inventory_objects):
inv_object_id = state.metadata['inventoryObjects'][0]['objectId']
goal_object_id = subgoal['objectId']
(reward, done) = ((self.rewards['positive'], True) if (inv_object_id == goal_object_id) else (self.rewards['negative'], False))
return (reward, done) |
class BareReport(FormatterAPI):
def render_vulnerabilities(self, announcements, vulnerabilities, remediations, full, packages, fixes=()):
parsed_announcements = []
Announcement = namedtuple('Announcement', ['name'])
for announcement in get_basic_announcements(announcements, include_local=False):
normalized_message = '-'.join(announcement.get('message', 'none').lower().split())
parsed_announcements.append(Announcement(name=normalized_message))
announcements_to_render = [announcement.name for announcement in parsed_announcements]
affected_packages = list(set([v.package_name for v in vulnerabilities if (not v.ignored)]))
return ' '.join((announcements_to_render + affected_packages))
def render_licenses(self, announcements, packages_licenses):
parsed_announcements = []
for announcement in get_basic_announcements(announcements):
normalized_message = '-'.join(announcement.get('message', 'none').lower().split())
parsed_announcements.append({'license': normalized_message})
announcements_to_render = [announcement.get('license') for announcement in parsed_announcements]
licenses = list(set([pkg_li.get('license') for pkg_li in packages_licenses]))
sorted_licenses = sorted(licenses)
return ' '.join((announcements_to_render + sorted_licenses))
def render_announcements(self, announcements):
print('render_announcements bare') |
class KnownValues(unittest.TestCase):
def test_nosymm_sa4_newton(self):
mc = mcscf.CASSCF(m, 4, 4).state_average_(([0.25] * 4)).newton()
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(mc.e_states, mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_spin_sa4(self):
fcisolvers = [fci.solver(mol, singlet=(not bool(i)), symm=False) for i in range(2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[1].spin = 2
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(m, 4, 4), fcisolvers, ([0.25] * 4))
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_spin_sa4_newton(self):
fcisolvers = [fci.solver(mol, singlet=(not bool(i)), symm=False) for i in range(2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[1].spin = 2
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(m, 4, 4), fcisolvers, ([0.25] * 4)).newton()
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_pointgroup_sa4(self):
fcisolvers = [fci.solver(molsym, symm=True, singlet=False) for i in range(2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B1'
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(msym, 4, 4), fcisolvers, ([0.25] * 4))
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_pointgroup_sa4_newton(self):
fcisolvers = [fci.solver(molsym, symm=True, singlet=False) for i in range(2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B1'
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(msym, 4, 4), fcisolvers, ([0.25] * 4)).newton()
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_spin_and_pointgroup_sa4(self):
fcisolvers = [fci.solver(molsym, singlet=(not bool((i % 2)))) for i in range(4)]
fcisolvers[0].wfnsym = fcisolvers[1].wfnsym = 'B1'
fcisolvers[2].wfnsym = fcisolvers[3].wfnsym = 'A1'
fcisolvers[1].spin = fcisolvers[3].spin = 2
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(msym, 4, 4), fcisolvers, ([0.25] * 4))
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_spin_and_pointgroup_sa4_newton(self):
fcisolvers = [fci.solver(molsym, singlet=(not bool((i % 2)))) for i in range(4)]
fcisolvers[0].wfnsym = fcisolvers[1].wfnsym = 'B1'
fcisolvers[2].wfnsym = fcisolvers[3].wfnsym = 'A1'
fcisolvers[1].spin = fcisolvers[3].spin = 2
mc = mcscf.addons.state_average_mix(mcscf.CASSCF(msym, 4, 4), fcisolvers, ([0.25] * 4)).newton()
mo = mc.sort_mo([4, 5, 6, 10], base=1)
mc.kernel(mo)
self.assertAlmostEqual(mc.e_tot, mc_ref.e_tot, 8)
for (e1, e0) in zip(numpy.sort(mc.e_states), mc_ref.e_states):
self.assertAlmostEqual(e1, e0, 5)
def test_casci(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['O', (0.0, 0.0, 0.0)], ['H', (0.0, (- 0.757), 0.587)], ['H', (0.0, 0.757, 0.587)]]
mol.basis = {'H': 'sto-3g', 'O': '6-31g'}
mol.build()
m = scf.RHF(mol).run()
mc = mcscf.CASCI(m, 4, 4)
mc.fcisolver = fci.solver(mol)
mc.natorb = 1
emc = mc.kernel()[0]
self.assertAlmostEqual(emc, (- 75.), 7)
mc = mcscf.CASCI(m, 4, (3, 1))
mc.fcisolver = fci.solver(mol, False)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, (- 75.), 6)
def test_addons(self):
mc = mcscf.CASSCF(msym, 4, 4)
mc.fcisolver = fci.solver(molsym, False)
mc = mc.state_average_((0.64, 0.36))
(emc, e_ci, fcivec, mo, mo_energy) = mc.mc1step()[:5]
self.assertAlmostEqual(emc, (- 75.), 8)
mc = mcscf.CASCI(msym, 4, 4)
emc = mc.casci(mo)[0]
self.assertAlmostEqual(emc, (- 75.), 8)
mc = mcscf.CASSCF(msym, 4, 4)
mc = mc.state_specific_(2)
emc = mc.kernel()[0]
self.assertAlmostEqual(emc, (- 75.), 8) |
class ins(object):
def localin(self):
Mylogo()
print('\n\x1b[01;32mInstalling Localtunnel .......\x1b[00m\n')
if (system == 'termux'):
lt().notinl()
elif (system == 'ubuntu'):
os.system((pac + ' update'))
os.system((pac + ' upgrade -y'))
os.system((pac + ' install nodejs -y'))
os.system((pac + ' install npm -y'))
os.system('sudo npm install -g localtunnel')
if (os.path.exists('/data/data/com.termux/files/usr/bin/lt') or os.path.exists('/usr/bin/lt') or os.path.exists('/usr/local/bin/lt') or os.path.exists('/usr/sbin/lt')):
lt().locals()
else:
lt().notinl()
else:
os.system((pac + ' install sudo -y'))
os.system((pac + ' update'))
os.system((pac + ' upgrade -y'))
os.system((pac + ' install curl python-software-properties -y'))
os.system('curl -sL | sudo bash -')
os.system((pac + ' install nodejs -y'))
os.system((pac + ' install npm -y'))
os.system('npm install -g localtunnel')
if (os.path.exists('/data/data/com.termux/files/usr/bin/lt') or os.path.exists('/usr/bin/lt') or os.path.exists('/usr/local/bin/lt') or os.path.exists('/usr/sbin/lt')):
lt().locals()
else:
lt().notinl() |
class ImageNetConverter(DatasetConverter):
def _create_data_spec(self):
self.files_to_skip = set()
for other_dataset in ('Caltech101', 'Caltech256', 'CUBirds'):
duplicates_file = os.path.join(AUX_DATA_PATH, 'ImageNet_{}_duplicates.txt'.format(other_dataset))
with tf.io.gfile.GFile(duplicates_file) as fd:
duplicates = fd.read()
lines = duplicates.splitlines()
for l in lines:
l = l.strip()
if l.startswith('#'):
continue
file_path = l.split('#')[0].strip()
file_name = os.path.basename(file_path)
self.files_to_skip.add(file_name)
ilsvrc_2012_num_leaf_images_path = FLAGS.ilsvrc_2012_num_leaf_images_path
if (not ilsvrc_2012_num_leaf_images_path):
ilsvrc_2012_num_leaf_images_path = os.path.join(self.records_path, 'num_leaf_images.json')
specification = imagenet_specification.create_imagenet_specification(learning_spec.Split, self.files_to_skip, ilsvrc_2012_num_leaf_images_path)
(split_subgraphs, images_per_class, _, _, _, _) = specification
self.class_names = {}
self.dataset_spec = ds_spec.HierarchicalDatasetSpecification(self.name, split_subgraphs, images_per_class, self.class_names, self.records_path, '{}.tfrecords')
def _get_synset_ids(self, split):
return sorted([synset.wn_id for synset in imagenet_specification.get_leaves(self.dataset_spec.split_subgraphs[split])])
def create_dataset_specification_and_records(self):
train_synset_ids = self._get_synset_ids(learning_spec.Split.TRAIN)
valid_synset_ids = self._get_synset_ids(learning_spec.Split.VALID)
test_synset_ids = self._get_synset_ids(learning_spec.Split.TEST)
all_synset_ids = ((train_synset_ids + valid_synset_ids) + test_synset_ids)
set_of_directories = set((entry for entry in tf.io.gfile.listdir(self.data_root) if tf.io.gfile.isdir(os.path.join(self.data_root, entry))))
assert (set_of_directories == set(all_synset_ids)), "self.data_root should contain a directory whose name is the WordNet id of each synset that is a leaf of any split's subgraph."
for (class_label, synset_id) in enumerate(all_synset_ids):
self.class_names[class_label] = synset_id
class_path = os.path.join(self.data_root, synset_id)
class_records_path = os.path.join(self.records_path, self.dataset_spec.file_pattern.format(class_label))
write_tfrecord_from_directory(class_path, class_label, class_records_path, files_to_skip=self.files_to_skip, skip_on_error=True) |
(kw_only=True)
class Session():
config: dict[(str, Any)] = field(factory=dict)
collection_reports: list[CollectionReport] = field(factory=list)
dag: nx.DiGraph = field(factory=nx.DiGraph)
hook: HookRelay = field(factory=HookRelay)
tasks: list[PTask] = field(factory=list)
dag_report: (DagReport | None) = None
execution_reports: list[ExecutionReport] = field(factory=list)
exit_code: ExitCode = ExitCode.OK
collection_start: float = float('inf')
collection_end: float = float('inf')
execution_start: float = float('inf')
execution_end: float = float('inf')
n_tasks_failed: int = 0
scheduler: Any = None
should_stop: bool = False
warnings: list[WarningReport] = field(factory=list)
def from_config(cls, config: dict[(str, Any)]) -> Session:
hook = (config['pm'].hook if ('pm' in config) else HookRelay())
return cls(config=config, hook=hook) |
class TestUserAgent(BaseTestCase):
async def test_user_agent(self):
self.assertIn('Mozilla', (await self.page.evaluate('() => navigator.userAgent')))
(await self.page.setUserAgent('foobar'))
(await self.page.goto(self.url))
self.assertEqual('foobar', (await self.page.evaluate('() => navigator.userAgent')))
async def test_user_agent_mobile_emulate(self):
(await self.page.goto((self.url + 'static/mobile.html')))
self.assertIn('Chrome', (await self.page.evaluate('navigator.userAgent')))
(await self.page.setUserAgent('Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'))
self.assertIn('Safari', (await self.page.evaluate('navigator.userAgent'))) |
def sort_by_keywords(keywords, args):
flat = []
res = {}
cur_key = None
limit = (- 1)
for arg in args:
if (arg in keywords):
limit = keywords[arg]
if (limit == 0):
res[arg] = True
cur_key = None
limit = (- 1)
else:
cur_key = arg
continue
if (limit == 0):
cur_key = None
limit = (- 1)
if cur_key:
if (cur_key in res):
res[cur_key].append(arg)
else:
res[cur_key] = [arg]
limit -= 1
else:
flat.append(arg)
return (flat, res) |
class CLIPFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ['pixel_values']
def __init__(self, do_resize=True, size=224, resample=Image.BICUBIC, do_center_crop=True, crop_size=224, do_normalize=True, image_mean=None, image_std=None, do_convert_rgb=True, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else [0., 0.4578275, 0.])
self.image_std = (image_std if (image_std is not None) else [0., 0., 0.])
self.do_convert_rgb = do_convert_rgb
def __call__(self, images: Union[(Image.Image, np.ndarray, 'torch.Tensor', List[Image.Image], List[np.ndarray], List['torch.Tensor'])], return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchFeature:
valid_images = False
if (isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images)):
valid_images = True
elif isinstance(images, (list, tuple)):
if ((len(images) == 0) or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])):
valid_images = True
if (not valid_images):
raise ValueError('Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), `List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
is_batched = bool((isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))))
if (not is_batched):
images = [images]
if self.do_convert_rgb:
images = [self.convert_rgb(image) for image in images]
if (self.do_resize and (self.size is not None) and (self.resample is not None)):
images = [self.resize(image=image, size=self.size, resample=self.resample, default_to_square=False) for image in images]
if (self.do_center_crop and (self.crop_size is not None)):
images = [self.center_crop(image, self.crop_size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
data = {'pixel_values': images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs |
class EntryChangeNotificationControl(ResponseControl):
controlType = '2.16.840.1.113730.3.4.7'
def decodeControlValue(self, encodedControlValue):
(ecncValue, _) = decoder.decode(encodedControlValue, asn1Spec=EntryChangeNotificationValue())
self.changeType = int(ecncValue.getComponentByName('changeType'))
previousDN = ecncValue.getComponentByName('previousDN')
if previousDN.hasValue():
self.previousDN = str(previousDN)
else:
self.previousDN = None
changeNumber = ecncValue.getComponentByName('changeNumber')
if changeNumber.hasValue():
self.changeNumber = int(changeNumber)
else:
self.changeNumber = None
return (self.changeType, self.previousDN, self.changeNumber) |
def test_index_report(api, initialized_db):
with fake_security_scanner() as security_scanner:
manifest = manifest_for('devtable', 'simple', 'latest')
layers = registry_model.list_manifest_layers(manifest, storage, True)
assert (manifest.digest not in security_scanner.index_reports.keys())
assert (api.index_report(manifest.digest) is None)
(report, state) = api.index(manifest, layers)
assert (report is not None)
assert (manifest.digest in security_scanner.index_reports.keys())
index_report = api.index_report(manifest.digest)
assert (report == index_report) |
def test_TrioToken_run_sync_soon_idempotent_requeue() -> None:
record: list[None] = []
def redo(token: _core.TrioToken) -> None:
record.append(None)
with suppress(_core.RunFinishedError):
token.run_sync_soon(redo, token, idempotent=True)
async def main() -> None:
token = _core.current_trio_token()
token.run_sync_soon(redo, token, idempotent=True)
(await _core.checkpoint())
(await _core.checkpoint())
(await _core.checkpoint())
_core.run(main)
assert (len(record) >= 2) |
class XDistanceMixin(SmoothPointGetter):
_baseResolution = 50
_extraDepth = 2
def _getCommonData(self, miscParams, src, tgt):
self._prepareTimeCache(src=src, ancReload=miscParams['ancReload'], maxTime=miscParams['time'])
return {'rrMap': self._getRepsPerKey(src=src, ancReload=miscParams['ancReload'], time=miscParams['time'])}
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
distance = x
applicationMap = getApplicationPerKey(src=src, distance=distance)
y = applyReps(rrMap=commonData['rrMap'], applicationMap=applicationMap)
return y |
class SpatialDropout2D(Dropout):
_spatialdropoutNd_support
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if (data_format is None):
data_format = K.image_data_format()
if (data_format not in {'channels_last', 'channels_first'}):
raise ValueError('`data_format` must be in {`"channels_last"`, `"channels_first"`}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if (self.data_format == 'channels_first'):
noise_shape = (input_shape[0], input_shape[1], 1, 1)
else:
noise_shape = (input_shape[0], 1, 1, input_shape[3])
return noise_shape |
class ConvNextOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 1e-05 |
def require_digest_auth(resource):
p = portal.Portal(TestAuthRealm(DIGEST_AUTH_PAGE))
c = checkers.InMemoryUsernamePasswordDatabaseDontUse(digestuser=b'digestuser')
p.registerChecker(c)
cred_factory = DigestCredentialFactory('md5', b'Digest Auth protected area')
return HTTPAuthSessionWrapper(p, [cred_factory]) |
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = (planes // expansion)
self.conv1 = nn.Conv2d(inplanes, bottle_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if (residual is None):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out |
def test_jsonparse_scalar_with_key_empty():
context = Context({'ok1': 'ov1', 'jsonParse': {'json': '', 'key': 'out'}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
jsonparse.run_step(context)
assert (str(err_info.value) == 'jsonParse.json exists but is empty. It should be a valid json string for pypyr.steps.jsonparse. For example: \'{"key1": "value1", "key2": "value2"}\'') |
class RegNetCfg():
depth: int = 21
w0: int = 80
wa: float = 42.63
wm: float = 2.66
group_size: int = 24
bottle_ratio: float = 1.0
se_ratio: float = 0.0
stem_width: int = 32
downsample: Optional[str] = 'conv1x1'
linear_out: bool = False
preact: bool = False
num_features: int = 0
act_layer: Union[(str, Callable)] = 'relu'
norm_layer: Union[(str, Callable)] = 'batchnorm' |
def _test():
import torch
pretrained = False
models = [sharesnet18, sharesnet34, sharesnet50, sharesnet50b, sharesnet101, sharesnet101b, sharesnet152, sharesnet152b]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != sharesnet18) or (weight_count == 8556072))
assert ((model != sharesnet34) or (weight_count == ))
assert ((model != sharesnet50) or (weight_count == ))
assert ((model != sharesnet50b) or (weight_count == ))
assert ((model != sharesnet101) or (weight_count == ))
assert ((model != sharesnet101b) or (weight_count == ))
assert ((model != sharesnet152) or (weight_count == ))
assert ((model != sharesnet152b) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = (dim_head * heads)
self.to_qkv = nn.Conv2d(dim, (hidden_dim * 3), 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
(b, c, h, w) = x.shape
qkv = self.to_qkv(x)
(q, k, v) = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3)
k = k.softmax(dim=(- 1))
context = torch.einsum('bhdn,bhen->bhde', k, v)
out = torch.einsum('bhde,bhdn->bhen', context, q)
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
return self.to_out(out) |
class BoundMethod(UnboundMethod):
special_attributes = objectmodel.BoundMethodModel()
def __init__(self, proxy: ((nodes.FunctionDef | nodes.Lambda) | UnboundMethod), bound: SuccessfulInferenceResult) -> None:
super().__init__(proxy)
self.bound = bound
def implicit_parameters(self) -> Literal[(0, 1)]:
if (self.name == '__new__'):
return 0
return 1
def is_bound(self) -> Literal[True]:
return True
def _infer_type_new_call(self, caller: nodes.Call, context: InferenceContext) -> (nodes.ClassDef | None):
from astroid.nodes import Pass
try:
mcs = next(caller.args[0].infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
if (not isinstance(mcs, nodes.ClassDef)):
return None
if (not mcs.is_subtype_of('builtins.type')):
return None
try:
name = next(caller.args[1].infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
if (not isinstance(name, nodes.Const)):
return None
if (not isinstance(name.value, str)):
return None
try:
bases = next(caller.args[2].infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
if (not isinstance(bases, nodes.Tuple)):
return None
try:
inferred_bases = [next(elt.infer(context=context)) for elt in bases.elts]
except StopIteration as e:
raise InferenceError(context=context) from e
if any(((not isinstance(base, nodes.ClassDef)) for base in inferred_bases)):
return None
try:
attrs = next(caller.args[3].infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
if (not isinstance(attrs, nodes.Dict)):
return None
cls_locals: dict[(str, list[InferenceResult])] = collections.defaultdict(list)
for (key, value) in attrs.items:
try:
key = next(key.infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
try:
value = next(value.infer(context=context))
except StopIteration as e:
raise InferenceError(context=context) from e
if (isinstance(key, nodes.Const) and isinstance(key.value, str)):
cls_locals[key.value].append(value)
cls = mcs.__class__(name=name.value, lineno=(caller.lineno or 0), col_offset=(caller.col_offset or 0), parent=caller, end_lineno=caller.end_lineno, end_col_offset=caller.end_col_offset)
empty = Pass(parent=cls, lineno=caller.lineno, col_offset=caller.col_offset, end_lineno=caller.end_lineno, end_col_offset=caller.end_col_offset)
cls.postinit(bases=bases.elts, body=[empty], decorators=None, newstyle=True, metaclass=mcs, keywords=[])
cls.locals = cls_locals
return cls
def infer_call_result(self, caller: (SuccessfulInferenceResult | None), context: (InferenceContext | None)=None) -> Iterator[InferenceResult]:
context = bind_context_to_node(context, self.bound)
if (isinstance(self.bound, nodes.ClassDef) and (self.bound.name == 'type') and (self.name == '__new__') and isinstance(caller, nodes.Call) and (len(caller.args) == 4)):
new_cls = self._infer_type_new_call(caller, context)
if new_cls:
return iter((new_cls,))
return super().infer_call_result(caller, context)
def bool_value(self, context: (InferenceContext | None)=None) -> Literal[True]:
return True |
class LoopNonlocalControl(NonlocalControl):
def __init__(self, outer: NonlocalControl, continue_block: BasicBlock, break_block: BasicBlock) -> None:
self.outer = outer
self.continue_block = continue_block
self.break_block = break_block
def gen_break(self, builder: IRBuilder, line: int) -> None:
builder.add(Goto(self.break_block))
def gen_continue(self, builder: IRBuilder, line: int) -> None:
builder.add(Goto(self.continue_block))
def gen_return(self, builder: IRBuilder, value: Value, line: int) -> None:
self.outer.gen_return(builder, value, line) |
def test_verbose_output(testdir):
testdir.makepyfile('\n def describe_something():\n def describe_nested_ok():\n def passes():\n assert True\n def describe_nested_bad():\n def fails():\n assert False\n ')
result = testdir.runpytest('-v')
result.assert_outcomes(passed=1, failed=1)
output = [' '.join(line.split('::', 2)[2].split()) for line in result.outlines if line.startswith('test_verbose_output.py::describe_something::')]
assert (output == ['describe_nested_ok::passes PASSED [ 50%]', 'describe_nested_bad::fails FAILED [100%]']) |
class TestDicke():
def test_num_dicke_states(self):
N_list = [1, 2, 3, 4, 5, 6, 9, 10, 20, 100, 123]
dicke_states = [num_dicke_states(i) for i in N_list]
assert_array_equal(dicke_states, [2, 4, 6, 9, 12, 16, 30, 36, 121, 2601, 3906])
N = (- 1)
assert_raises(ValueError, num_dicke_states, N)
N = 0.2
assert_raises(ValueError, num_dicke_states, N)
def test_num_tls(self):
N_dicke = [2, 4, 6, 9, 12, 16, 30, 36, 121, 2601, 3906]
N = [1, 2, 3, 4, 5, 6, 9, 10, 20, 100, 123]
calculated_N = [num_tls(i) for i in N_dicke]
assert_array_equal(calculated_N, N)
def test_num_dicke_ladders(self):
ndl_true = [1, 2, 2, 3, 3, 4, 4, 5, 5]
ndl = [num_dicke_ladders(N) for N in range(1, 10)]
assert_array_equal(ndl, ndl_true)
def test_get_blocks(self):
N_list = [1, 2, 5, 7]
blocks = [np.array([2]), np.array([3, 4]), np.array([6, 10, 12]), np.array([8, 14, 18, 20])]
calculated_blocks = [get_blocks(i) for i in N_list]
for (i, j) in zip(calculated_blocks, blocks):
assert_array_equal(i, j)
def test_j_vals(self):
N_list = [1, 2, 3, 4, 7]
j_vals_real = [np.array([0.5]), np.array([0.0, 1.0]), np.array([0.5, 1.5]), np.array([0.0, 1.0, 2.0]), np.array([0.5, 1.5, 2.5, 3.5])]
j_vals_calc = [j_vals(i) for i in N_list]
for (i, j) in zip(j_vals_calc, j_vals_real):
assert_array_equal(i, j)
def test_m_vals(self):
j_list = [0.5, 1, 1.5, 2, 2.5]
m_real = [np.array([(- 0.5), 0.5]), np.array([(- 1), 0, 1]), np.array([(- 1.5), (- 0.5), 0.5, 1.5]), np.array([(- 2), (- 1), 0, 1, 2]), np.array([(- 2.5), (- 1.5), (- 0.5), 0.5, 1.5, 2.5])]
m_calc = [m_vals(i) for i in j_list]
for (i, j) in zip(m_real, m_calc):
assert_array_equal(i, j)
def test_dicke_blocks(self):
N = 3
true_matrix = (excited(N) + dicke(N, 0.5, 0.5)).unit()
test_blocks = dicke_blocks(true_matrix)
test_matrix = Qobj(block_diag(test_blocks))
assert (test_matrix == true_matrix)
N = 4
true_matrix = Qobj(block_matrix(N)).unit()
test_blocks = dicke_blocks(true_matrix)
test_matrix = Qobj(block_diag(test_blocks))
assert (test_matrix == true_matrix)
def test_dicke_blocks_full(self):
N = 3
test_blocks = dicke_blocks_full(excited(3))
test_matrix = Qobj(block_diag(test_blocks))
true_expanded = np.zeros((8, 8))
true_expanded[(0, 0)] = 1.0
assert (test_matrix == Qobj(true_expanded))
def test_dicke_function_trace(self):
N = 3
rho_mixed = (excited(N) + dicke(N, 0.5, 0.5)).unit()
f = (lambda x: x)
test_val = dicke_function_trace(f, rho_mixed)
true_val = Qobj(block_diag(dicke_blocks_full(rho_mixed))).tr()
true_val2 = rho_mixed.tr()
assert_almost_equal(test_val, true_val)
assert_almost_equal(test_val, true_val2)
f = (lambda rho: (rho ** 3))
test_val3 = dicke_function_trace(f, rho_mixed)
true_val3 = (Qobj(block_diag(dicke_blocks_full(rho_mixed))) ** 3).tr()
assert_almost_equal(test_val3, true_val3)
N = 4
rho_mixed = (excited(N) + dicke(N, 0, 0)).unit()
f = (lambda x: x)
test_val = dicke_function_trace(f, rho_mixed)
true_val = Qobj(block_diag(dicke_blocks_full(rho_mixed))).tr()
true_val2 = rho_mixed.tr()
assert_almost_equal(test_val, true_val)
assert_almost_equal(test_val, true_val2)
f = (lambda rho: (rho ** 3))
test_val3 = dicke_function_trace(f, rho_mixed)
true_val3 = (Qobj(block_diag(dicke_blocks_full(rho_mixed))) ** 3).tr()
assert np.allclose(test_val3, true_val3)
def test_entropy_vn_dicke(self):
N = 3
rho_mixed = (excited(N) + dicke(N, 0.5, 0.5)).unit()
test_val = entropy_vn_dicke(rho_mixed)
true_val = entropy_vn(Qobj(block_diag(dicke_blocks_full(rho_mixed))))
assert np.allclose(test_val, true_val)
def test_purity_dicke(self):
N = 3
rho_mixed = (excited(N) + dicke(N, 0.5, 0.5)).unit()
test_val = purity_dicke(rho_mixed)
true_val = Qobj(block_diag(dicke_blocks_full(rho_mixed))).purity()
assert np.allclose(test_val, true_val)
def test_get_index(self):
N = 1
jmm1_list = [(0.5, 0.5, 0.5), (0.5, 0.5, (- 0.5)), (0.5, (- 0.5), 0.5), (0.5, (- 0.5), (- 0.5))]
indices = [(0, 0), (0, 1), (1, 0), (1, 1)]
blocks = get_blocks(N)
calculated_indices = [get_index(N, jmm1[0], jmm1[1], jmm1[2], blocks) for jmm1 in jmm1_list]
assert_array_almost_equal(calculated_indices, indices)
N = 2
blocks = get_blocks(N)
jmm1_list = [(1, 1, 1), (1, 1, 0), (1, 1, (- 1)), (1, 0, 1), (1, 0, 0), (1, 0, (- 1)), (1, (- 1), 1), (1, (- 1), 0), (1, (- 1), (- 1)), (0, 0, 0)]
indices = [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2), (3, 3)]
calculated_indices = [get_index(N, jmm1[0], jmm1[1], jmm1[2], blocks) for jmm1 in jmm1_list]
assert_array_almost_equal(calculated_indices, indices)
N = 3
blocks = get_blocks(N)
jmm1_list = [(1.5, 1.5, 1.5), (1.5, 1.5, 0.5), (1.5, 1.5, (- 0.5)), (1.5, 1.5, (- 1.5)), (1.5, 0.5, 0.5), (1.5, (- 0.5), (- 0.5)), (1.5, (- 1.5), (- 1.5)), (1.5, (- 1.5), 1.5), (0.5, 0.5, 0.5), (0.5, 0.5, (- 0.5)), (0.5, (- 0.5), 0.5), (0.5, (- 0.5), (- 0.5))]
indices = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (2, 2), (3, 3), (3, 0), (4, 4), (4, 5), (5, 4), (5, 5)]
calculated_indices = [get_index(N, jmm1[0], jmm1[1], jmm1[2], blocks) for jmm1 in jmm1_list]
assert_array_almost_equal(calculated_indices, indices)
def test_jmm1_dictionary(self):
(d1, d2, d3, d4) = jmm1_dictionary(1)
d1_correct = {(0, 0): (0.5, 0.5, 0.5), (0, 1): (0.5, 0.5, (- 0.5)), (1, 0): (0.5, (- 0.5), 0.5), (1, 1): (0.5, (- 0.5), (- 0.5))}
d2_correct = {(0.5, (- 0.5), (- 0.5)): (1, 1), (0.5, (- 0.5), 0.5): (1, 0), (0.5, 0.5, (- 0.5)): (0, 1), (0.5, 0.5, 0.5): (0, 0)}
d3_correct = {0: (0.5, 0.5, 0.5), 1: (0.5, 0.5, (- 0.5)), 2: (0.5, (- 0.5), 0.5), 3: (0.5, (- 0.5), (- 0.5))}
d4_correct = {(0.5, (- 0.5), (- 0.5)): 3, (0.5, (- 0.5), 0.5): 2, (0.5, 0.5, (- 0.5)): 1, (0.5, 0.5, 0.5): 0}
assert_equal(d1, d1_correct)
assert_equal(d2, d2_correct)
assert_equal(d3, d3_correct)
assert_equal(d4, d4_correct)
(d1, d2, d3, d4) = jmm1_dictionary(2)
d1_correct = {(3, 3): (0.0, (- 0.0), (- 0.0)), (2, 2): (1.0, (- 1.0), (- 1.0)), (2, 1): (1.0, (- 1.0), 0.0), (2, 0): (1.0, (- 1.0), 1.0), (1, 2): (1.0, 0.0, (- 1.0)), (1, 1): (1.0, 0.0, 0.0), (1, 0): (1.0, 0.0, 1.0), (0, 2): (1.0, 1.0, (- 1.0)), (0, 1): (1.0, 1.0, 0.0), (0, 0): (1.0, 1.0, 1.0)}
d2_correct = {(0.0, (- 0.0), (- 0.0)): (3, 3), (1.0, (- 1.0), (- 1.0)): (2, 2), (1.0, (- 1.0), 0.0): (2, 1), (1.0, (- 1.0), 1.0): (2, 0), (1.0, 0.0, (- 1.0)): (1, 2), (1.0, 0.0, 0.0): (1, 1), (1.0, 0.0, 1.0): (1, 0), (1.0, 1.0, (- 1.0)): (0, 2), (1.0, 1.0, 0.0): (0, 1), (1.0, 1.0, 1.0): (0, 0)}
d3_correct = {15: (0.0, (- 0.0), (- 0.0)), 10: (1.0, (- 1.0), (- 1.0)), 9: (1.0, (- 1.0), 0.0), 8: (1.0, (- 1.0), 1.0), 6: (1.0, 0.0, (- 1.0)), 5: (1.0, 0.0, 0.0), 4: (1.0, 0.0, 1.0), 2: (1.0, 1.0, (- 1.0)), 1: (1.0, 1.0, 0.0), 0: (1.0, 1.0, 1.0)}
d4_correct = {(0.0, (- 0.0), (- 0.0)): 15, (1.0, (- 1.0), (- 1.0)): 10, (1.0, (- 1.0), 0.0): 9, (1.0, (- 1.0), 1.0): 8, (1.0, 0.0, (- 1.0)): 6, (1.0, 0.0, 0.0): 5, (1.0, 0.0, 1.0): 4, (1.0, 1.0, (- 1.0)): 2, (1.0, 1.0, 0.0): 1, (1.0, 1.0, 1.0): 0}
assert_equal(d1, d1_correct)
assert_equal(d2, d2_correct)
assert_equal(d3, d3_correct)
assert_equal(d4, d4_correct)
def test_lindbladian(self):
N = 1
gCE = 0.5
gCD = 0.5
gCP = 0.5
gE = 0.1
gD = 0.1
gP = 0.1
system = Dicke(N=N, emission=gE, pumping=gP, dephasing=gD, collective_emission=gCE, collective_pumping=gCP, collective_dephasing=gCD)
lindbladian = system.lindbladian()
Ldata = np.zeros((4, 4), dtype='complex')
Ldata[0] = [(- 0.6), 0, 0, 0.6]
Ldata[1] = [0, (- 0.9), 0, 0]
Ldata[2] = [0, 0, (- 0.9), 0]
Ldata[3] = [0.6, 0, 0, (- 0.6)]
lindbladian_correct = Qobj(Ldata, dims=[[[2], [2]], [[2], [2]]])
assert_array_almost_equal(lindbladian.full(), Ldata)
N = 2
gCE = 0.5
gCD = 0.5
gCP = 0.5
gE = 0.1
gD = 0.1
gP = 0.1
system = Dicke(N=N, emission=gE, pumping=gP, dephasing=gD, collective_emission=gCE, collective_pumping=gCP, collective_dephasing=gCD)
lindbladian = system.lindbladian()
Ldata = np.zeros((16, 16), dtype='complex')
(Ldata[0][0], Ldata[0][5], Ldata[0][15]) = ((- 1.2), 1.1, 0.1)
(Ldata[(1, 1)], Ldata[(1, 6)]) = ((- 2), 1.1)
Ldata[(2, 2)] = (- 2.3)
(Ldata[(4, 4)], Ldata[(4, 9)]) = ((- 2), 1.1)
(Ldata[(5, 0)], Ldata[(5, 5)], Ldata[(5, 10)], Ldata[(5, 15)]) = (1.1, (- 2.25), 1.1, 0.05)
(Ldata[(6, 1)], Ldata[(6, 6)]) = (1.1, (- 2))
Ldata[(8, 8)] = (- 2.3)
(Ldata[(9, 4)], Ldata[(9, 9)]) = (1.1, (- 2))
(Ldata[(10, 5)], Ldata[(10, 10)], Ldata[(10, 15)]) = (1.1, (- 1.2), 0.1)
(Ldata[(15, 0)], Ldata[(15, 5)], Ldata[(15, 10)], Ldata[(15, 15)]) = (0.1, 0.05, 0.1, (- 0.25))
lindbladian_correct = Qobj(Ldata, dims=[[[4], [4]], [[4], [4]]])
assert_array_almost_equal(lindbladian.full(), Ldata)
def test_gamma(self):
N = 6
collective_emission = 1.0
emission = 1.0
dephasing = 1.0
pumping = 1.0
collective_pumping = 1.0
model = _Dicke(N, collective_emission=collective_emission, emission=emission, dephasing=dephasing, pumping=pumping, collective_pumping=collective_pumping)
tau_calculated = [model.gamma3((3, 1, 1)), model.gamma2((2, 1, 1)), model.gamma4((1, 1, 1)), model.gamma5((3, 0, 0)), model.gamma1((2, 0, 0)), model.gamma6((1, 0, 0)), model.gamma7((3, (- 1), (- 1))), model.gamma8((2, (- 1), (- 1))), model.gamma9((1, (- 1), (- 1)))]
tau_real = [2.0, 8.0, 0.333333, 1.5, (- 19.5), 0.666667, 2.0, 8.0, 0.333333]
assert_array_almost_equal(tau_calculated, tau_real)
def test_jspin(self):
N_list = [1, 2, 3, 4, 7]
for nn in N_list:
[jx, jy, jz] = jspin(nn)
(jp, jm) = (jspin(nn, '+'), jspin(nn, '-'))
test_jxjy = ((jx * jy) - (jy * jx))
true_jxjy = (1j * jz)
test_jpjm = ((jp * jm) - (jm * jp))
true_jpjm = (2 * jz)
assert_array_almost_equal(test_jxjy.full(), true_jxjy.full())
assert_array_almost_equal(test_jpjm.full(), true_jpjm.full())
[jx, jy, jz] = jspin(nn)
(jp, jm) = (jspin(nn, '+'), jspin(nn, '-'))
test_jxjy = ((jx * jy) - (jy * jx))
true_jxjy = (1j * jz)
test_jpjm = ((jp * jm) - (jm * jp))
true_jpjm = (2 * jz)
assert_array_almost_equal(test_jxjy.full(), true_jxjy.full())
assert_array_almost_equal(test_jpjm.full(), true_jpjm.full())
assert_array_almost_equal(jspin(nn, 'x').full(), jx.full())
assert_array_almost_equal(jspin(nn, 'y').full(), jy.full())
assert_array_almost_equal(jspin(nn, 'z').full(), jz.full())
assert_array_almost_equal(jspin(nn, '+').full(), jp.full())
assert_array_almost_equal(jspin(nn, '-').full(), jm.full())
assert_raises(TypeError, jspin, nn, 'q')
def test_j_min_(self):
even = [2, 4, 6, 8]
odd = [1, 3, 5, 7]
for i in even:
assert (j_min(i) == 0)
for i in odd:
assert (j_min(i) == 0.5)
def test_energy_degeneracy(self):
true_en_deg = [1, 1, 1, 1, 1]
true_en_deg_even = [2, 6, 20]
true_en_deg_odd = [1, 1, 3, 3, 35, 35]
test_en_deg = []
test_en_deg_even = []
test_en_deg_odd = []
for nn in [1, 2, 3, 4, 7]:
test_en_deg.append(energy_degeneracy(nn, (nn / 2)))
for nn in [2, 4, 6]:
test_en_deg_even.append(energy_degeneracy(nn, 0))
for nn in [1, 3, 7]:
test_en_deg_odd.append(energy_degeneracy(nn, (1 / 2)))
test_en_deg_odd.append(energy_degeneracy(nn, ((- 1) / 2)))
assert_array_equal(test_en_deg, true_en_deg)
assert_array_equal(test_en_deg_even, true_en_deg_even)
assert_array_equal(test_en_deg_odd, true_en_deg_odd)
def test_state_degeneracy(self):
true_state_deg = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 14, 14, 42, 42]
state_deg = []
state_deg = []
for nn in [1, 2, 3, 4, 7, 8, 9, 10]:
state_deg.append(state_degeneracy(nn, (nn / 2)))
for nn in [1, 2, 3, 4, 7, 8, 9, 10]:
state_deg.append(state_degeneracy(nn, ((nn / 2) % 1)))
assert_array_equal(state_deg, true_state_deg)
assert_raises(ValueError, state_degeneracy, 2, (- 1))
def test_m_degeneracy(self):
true_m_deg = [1, 2, 2, 3, 4, 5, 5, 6]
m_deg = []
for nn in [1, 2, 3, 4, 7, 8, 9, 10]:
m_deg.append(m_degeneracy(nn, ((- (nn / 2)) % 1)))
assert_array_equal(m_deg, true_m_deg)
assert_raises(ValueError, m_degeneracy, 6, (- 6))
def test_ap(self):
true_ap_list = [110, 108, 104, 98, 90, 54, 38, 20, 0]
ap_list = []
for m in [0, 1, 2, 3, 4, 7, 8, 9, 10]:
ap_list.append((ap(10, m) ** 2))
assert_almost_equal(ap_list, true_ap_list)
def test_am(self):
true_am_list = [110, 110, 108, 104, 98, 68, 54, 38, 20]
am_list = []
for m in [0, 1, 2, 3, 4, 7, 8, 9, 10]:
am_list.append((am(10, m) ** 2))
assert_almost_equal(am_list, true_am_list)
def test_spin_algebra(self):
sx1 = [[(0.0 + 0j), (0.0 + 0j), (0.5 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.5 + 0j)], [(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.5 + 0j), (0.0 + 0j), (0.0 + 0j)]]
sx2 = [[(0.0 + 0j), (0.5 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.5 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.5 + 0j), (0.0 + 0j)]]
sy1 = [[(0.0 + 0j), (0.0 + 0j), (0.0 - 0.5j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 - 0.5j)], [(0.0 + 0.5j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0.5j), (0.0 + 0j), (0.0 + 0j)]]
sy2 = [[(0.0 + 0j), (0.0 - 0.5j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0.5j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 - 0.5j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0.5j), (0.0 + 0j)]]
sz1 = [[(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.5 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), ((- 0.5) + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), ((- 0.5) + 0j)]]
sz2 = [[(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), ((- 0.5) + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.5 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), ((- 0.5) + 0j)]]
sp1 = [[(0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)]]
sp2 = [[(0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)]]
sm1 = [[(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j)]]
sm2 = [[(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j)]]
assert_array_equal(spin_algebra(2, 'x')[0].full(), sx1)
assert_array_equal(spin_algebra(2, 'x')[1].full(), sx2)
assert_array_equal(spin_algebra(2, 'y')[0].full(), sy1)
assert_array_equal(spin_algebra(2, 'y')[1].full(), sy2)
assert_array_equal(spin_algebra(2, 'z')[0].full(), sz1)
assert_array_equal(spin_algebra(2, 'z')[1].full(), sz2)
assert_array_equal(spin_algebra(2, '+')[0].full(), sp1)
assert_array_equal(spin_algebra(2, '+')[1].full(), sp2)
assert_array_equal(spin_algebra(2, '-')[0].full(), sm1)
assert_array_equal(spin_algebra(2, '-')[1].full(), sm2)
assert_raises(TypeError, spin_algebra, 2, 'q')
def test_collective_algebra(self):
jx_n2 = [[(0.0 + 0j), (0.5 + 0j), (0.5 + 0j), (0.0 + 0j)], [(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.5 + 0j)], [(0.5 + 0j), (0.0 + 0j), (0.0 + 0j), (0.5 + 0j)], [(0.0 + 0j), (0.5 + 0j), (0.5 + 0j), (0.0 + 0j)]]
jy_n2 = [[(0.0 + 0j), (0.0 - 0.5j), (0.0 - 0.5j), (0.0 + 0j)], [(0.0 + 0.5j), (0.0 + 0j), (0.0 + 0j), (0.0 - 0.5j)], [(0.0 + 0.5j), (0.0 + 0j), (0.0 + 0j), (0.0 - 0.5j)], [(0.0 + 0j), (0.0 + 0.5j), (0.0 + 0.5j), (0.0 + 0j)]]
jz_n2 = [[(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), ((- 1.0) + 0j)]]
jp_n2 = [[(0.0 + 0j), (1.0 + 0j), (1.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)]]
jm_n2 = [[(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (1.0 + 0j), (1.0 + 0j), (0.0 + 0j)]]
assert_array_equal(jspin(2, 'x', basis='uncoupled').full(), jx_n2)
assert_array_equal(jspin(2, 'y', basis='uncoupled').full(), jy_n2)
assert_array_equal(jspin(2, 'z', basis='uncoupled').full(), jz_n2)
assert_array_equal(jspin(2, '+', basis='uncoupled').full(), jp_n2)
assert_array_equal(jspin(2, '-', basis='uncoupled').full(), jm_n2)
assert_raises(TypeError, spin_algebra, 2, 'q')
def test_block_matrix(self):
block_1 = [[1.0, 1.0], [1.0, 1.0]]
block_2 = [[1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
block_3 = [[1.0, 1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0]]
assert_equal(Qobj(block_1), Qobj(block_matrix(1)))
assert_equal(Qobj(block_2), Qobj(block_matrix(2)))
assert_equal(Qobj(block_3), Qobj(block_matrix(3)))
def test_dicke_basis(self):
N = 2
true_dicke_basis = np.zeros((4, 4))
true_dicke_basis[(1, 1)] = 0.5
true_dicke_basis[((- 1), (- 1))] = 0.5
true_dicke_basis[(0, 2)] = 0.3
true_dicke_basis[(2, 0)] = 0.3
true_dicke_basis = Qobj(true_dicke_basis)
jmm1_1 = {((N / 2), 0, 0): 0.5}
jmm1_2 = {(0, 0, 0): 0.5}
jmm1_3 = {((N / 2), (N / 2), ((N / 2) - 2)): 0.3}
jmm1_4 = {((N / 2), ((N / 2) - 2), (N / 2)): 0.3}
db1 = dicke_basis(2, jmm1_1)
db2 = dicke_basis(2, jmm1_2)
db3 = dicke_basis(2, jmm1_3)
db4 = dicke_basis(2, jmm1_4)
test_dicke_basis = (((db1 + db2) + db3) + db4)
assert_equal(test_dicke_basis, true_dicke_basis)
assert_raises(AttributeError, dicke_basis, N, None)
def test_dicke(self):
true_excited = np.zeros((4, 4))
true_excited[(0, 0)] = 1
true_superradiant = np.zeros((4, 4))
true_superradiant[(1, 1)] = 1
true_subradiant = np.zeros((4, 4))
true_subradiant[((- 1), (- 1))] = 1
test_excited = dicke(2, 1, 1)
test_superradiant = dicke(2, 1, 0)
test_subradiant = dicke(2, 0, 0)
assert_equal(test_excited, Qobj(true_excited))
assert_equal(test_superradiant, Qobj(true_superradiant))
assert_equal(test_subradiant, Qobj(true_subradiant))
def test_excited(self):
N = 3
true_state = np.zeros((6, 6))
true_state[(0, 0)] = 1
true_state = Qobj(true_state)
test_state = excited(N)
assert_equal(test_state, true_state)
N = 4
true_state = np.zeros((9, 9))
true_state[(0, 0)] = 1
true_state = Qobj(true_state)
test_state = excited(N)
assert_equal(test_state, true_state)
test_state_uncoupled = excited(2, basis='uncoupled')
assert_array_equal(test_state_uncoupled.dims, [[2, 2], [2, 2]])
assert_array_equal(test_state_uncoupled.shape, (4, 4))
assert_almost_equal(test_state_uncoupled.full()[(0, 0)], (1 + 0j))
def test_superradiant(self):
N = 3
true_state = np.zeros((6, 6))
true_state[(1, 1)] = 1
true_state = Qobj(true_state)
test_state = superradiant(N)
assert_equal(test_state, true_state)
N = 4
true_state = np.zeros((9, 9))
true_state[(2, 2)] = 1
true_state = Qobj(true_state)
test_state = superradiant(N)
assert_equal(test_state, true_state)
test_state_uncoupled = superradiant(2, basis='uncoupled')
assert_array_equal(test_state_uncoupled.dims, [[2, 2], [2, 2]])
assert_array_equal(test_state_uncoupled.shape, (4, 4))
assert_almost_equal(test_state_uncoupled.full()[(1, 1)], (1 + 0j))
def test_ghz(self):
ghz_dicke = Qobj([[0.5, 0, 0.5, 0], [0, 0, 0, 0], [0.5, 0, 0.5, 0], [0, 0, 0, 0]])
ghz_uncoupled = Qobj([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]])
ghz_uncoupled.dims = [[2, 2], [2, 2]]
assert_equal(ghz(2), ghz_dicke)
assert_equal(ghz(2, 'uncoupled'), ghz_uncoupled)
def test_ground(self):
zeros = np.zeros((4, 4), dtype=np.complex128)
gdicke = zeros.copy()
guncoupled = zeros.copy()
gdicke[(2, 2)] = 1
guncoupled[(3, 3)] = 1
dim_dicke = [[4], [4]]
dim_uncoupled = [[2, 2], [2, 2]]
test_ground_dicke = ground(2)
test_ground_uncoupled = ground(2, 'uncoupled')
assert_array_equal(test_ground_dicke.full(), gdicke)
assert_array_equal(test_ground_dicke.dims, dim_dicke)
assert_array_equal(test_ground_uncoupled.full(), guncoupled)
assert_array_equal(test_ground_uncoupled.dims, dim_uncoupled)
def test_identity_uncoupled(self):
test_identity = identity_uncoupled(4)
assert_equal(test_identity.dims, [[2, 2, 2, 2], [2, 2, 2, 2]])
assert_array_equal(np.diag(test_identity.full()), np.ones(16, np.complex128))
def test_css(self):
test_css_uncoupled = css(2, basis='uncoupled')
test_css_dicke = css(2)
css_uncoupled = (0.25 * np.ones((4, 4), dtype=np.complex128))
css_dicke = np.array([[(0.25 + 0j), (0. + 0j), (0.25 + 0j), (0.0 + 0j)], [(0. + 0j), (0.5 + 0j), (0. + 0j), (0.0 + 0j)], [(0.25 + 0j), (0. + 0j), (0.25 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)]])
assert_array_almost_equal(test_css_uncoupled.full(), css_uncoupled)
assert_array_almost_equal(test_css_dicke.full(), css_dicke)
def test_collapse_uncoupled(self):
c1 = Qobj([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0]], dims=[[2, 2], [2, 2]])
c2 = Qobj([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]], dims=[[2, 2], [2, 2]])
true_c_ops = [c1, c2]
assert_equal(true_c_ops, collapse_uncoupled(N=2, emission=1))
system = Dicke(N=2, emission=1)
assert_equal(true_c_ops, system.c_ops())
def test_get_blocks(self):
trueb1 = [2]
trueb2 = [3, 4]
trueb3 = [4, 6]
trueb4 = [5, 8, 9]
test_b1 = get_blocks(1)
test_b2 = get_blocks(2)
test_b3 = get_blocks(3)
test_b4 = get_blocks(4)
assert_equal(test_b1, trueb1)
assert_equal(test_b2, trueb2)
assert_equal(test_b3, trueb3)
def test_lindbladian_dims(self):
true_L = [[(- 4), 0, 0, 3], [0, (- 3.), 0, 0], [0, 0, (- 3.), 0], [4, 0, 0, (- 3)]]
true_L = Qobj(true_L)
true_L.dims = [[[2], [2]], [[2], [2]]]
N = 1
test_dicke = _Dicke(N=N, pumping=1, collective_pumping=2, emission=1, collective_emission=3, dephasing=0.1)
test_L = test_dicke.lindbladian()
assert_array_almost_equal(test_L.full(), true_L.full())
assert_array_equal(test_L.dims, true_L.dims)
def test_liouvillian(self):
true_L = [[(- 4), 0, 0, 3], [0, (- 3.), 0, 0], [0, 0, (- 3.), 0], [4, 0, 0, (- 3)]]
true_L = Qobj(true_L)
true_L.dims = [[[2], [2]], [[2], [2]]]
true_H = [[(1.0 + 0j), (1.0 + 0j)], [(1.0 + 0j), ((- 1.0) + 0j)]]
true_H = Qobj(true_H)
true_H.dims = [[2], [2]]
true_liouvillian = [[(- 4), (- 1j), 1j, 3], [(- 1j), ((- 3.) + 2j), 0, 1j], [1j, 0, ((- 3.) - 2j), (- 1j)], [4, (+ 1j), (- 1j), (- 3)]]
true_liouvillian = Qobj(true_liouvillian)
true_liouvillian.dims = [[[2], [2]], [[2], [2]]]
N = 1
test_piqs = Dicke(hamiltonian=(sigmaz() + sigmax()), N=N, pumping=1, collective_pumping=2, emission=1, collective_emission=3, dephasing=0.1)
test_liouvillian = test_piqs.liouvillian()
test_hamiltonian = test_piqs.hamiltonian
assert_array_almost_equal(test_liouvillian.full(), true_liouvillian.full())
assert_array_almost_equal(test_hamiltonian.full(), true_H.full())
assert_array_equal(test_liouvillian.dims, test_liouvillian.dims)
test_piqs = Dicke(N=N, pumping=1, collective_pumping=2, emission=1, collective_emission=3, dephasing=0.1)
liouv = test_piqs.liouvillian()
lindblad = test_piqs.lindbladian()
assert_equal(liouv, lindblad)
def test_gamma1(self):
true_gamma_1 = (- 2)
true_gamma_2 = (- 3)
true_gamma_3 = (- 7)
true_gamma_4 = (- 1)
true_gamma_5 = 0
true_gamma_6 = 0
N = 4
test_dicke = _Dicke(N=N, collective_emission=1)
test_gamma_1 = test_dicke.gamma1((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1)
test_gamma_2 = test_dicke.gamma1((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1, collective_emission=2)
test_gamma_3 = test_dicke.gamma1((1, 1, 1))
test_dicke = _Dicke(N=N, dephasing=4)
test_gamma_4 = test_dicke.gamma1((1, 1, 1))
test_dicke = _Dicke(N=N, collective_pumping=2)
test_gamma_5 = test_dicke.gamma1((1, 1, 1))
test_dicke = _Dicke(N=N, collective_dephasing=2)
test_gamma_6 = test_dicke.gamma1((1, 1, 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
assert_almost_equal(true_gamma_5, test_gamma_5)
assert_almost_equal(true_gamma_6, test_gamma_6)
def test_gamma2(self):
true_gamma_1 = 2
true_gamma_2 = 1.5
true_gamma_3 = 5.5
true_gamma_4 = 0
true_gamma_5 = 0
true_gamma_6 = 0
N = 4
test_dicke = _Dicke(N=N, collective_emission=1)
test_gamma_1 = test_dicke.gamma2((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1)
test_gamma_2 = test_dicke.gamma2((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1, collective_emission=2)
test_gamma_3 = test_dicke.gamma2((1, 1, 1))
test_dicke = _Dicke(N=N, dephasing=4)
test_gamma_4 = test_dicke.gamma2((1, 1, 1))
test_dicke = _Dicke(N=N, collective_pumping=2)
test_gamma_5 = test_dicke.gamma2((1, 1, 1))
test_dicke = _Dicke(N=N, collective_dephasing=2)
test_gamma_6 = test_dicke.gamma2((1, 1, 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
assert_almost_equal(true_gamma_5, test_gamma_5)
assert_almost_equal(true_gamma_6, test_gamma_6)
def test_gamma3(self):
true_gamma_1 = 0
true_gamma_2 = 1.
true_gamma_3 = 1.
true_gamma_4 = 0
true_gamma_5 = 0
true_gamma_6 = 0
N = 4
test_dicke = _Dicke(N=N, collective_emission=1)
test_gamma_1 = test_dicke.gamma3((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1)
test_gamma_2 = test_dicke.gamma3((1, 1, 1))
test_dicke = _Dicke(N=N, emission=1, collective_emission=2)
test_gamma_3 = test_dicke.gamma3((1, 1, 1))
test_dicke = _Dicke(N=N, dephasing=4)
test_gamma_4 = test_dicke.gamma3((1, 1, 1))
test_dicke = _Dicke(N=N, collective_pumping=2)
test_gamma_5 = test_dicke.gamma3((1, 1, 1))
test_dicke = _Dicke(N=N, collective_dephasing=2)
test_gamma_6 = test_dicke.gamma3((1, 1, 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
assert_almost_equal(true_gamma_5, test_gamma_5)
assert_almost_equal(true_gamma_6, test_gamma_6)
def test_gamma4(self):
true_gamma_1 = 0.
true_gamma_2 = 2
true_gamma_3 = 0
true_gamma_4 = 0.
N = 4
test_dicke = _Dicke(N=N, emission=1, collective_emission=2)
test_gamma_1 = test_dicke.gamma4((1, 1, 1))
test_gamma_2 = test_dicke.gamma4((0, 0, 0))
test_gamma_3 = test_dicke.gamma4((2, 1, 1))
test_gamma_4 = test_dicke.gamma4((1, (- 1), 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
def test_gamma5(self):
true_gamma_1 = 0
true_gamma_2 = 0
true_gamma_3 = 0.75
true_gamma_4 = 0
N = 4
test_dicke = _Dicke(N=N, dephasing=1)
test_gamma_1 = test_dicke.gamma5((1, 1, 1))
test_gamma_2 = test_dicke.gamma5((0, 0, 0))
test_gamma_3 = test_dicke.gamma5((2, 1, 1))
test_gamma_4 = test_dicke.gamma5((1, (- 1), 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
def test_gamma6(self):
true_gamma_1 = 0.25
true_gamma_2 = 1
true_gamma_3 = 0
true_gamma_4 = 0.25
N = 4
test_dicke = _Dicke(N=N, dephasing=1)
test_gamma_1 = test_dicke.gamma6((1, 1, 1))
test_gamma_2 = test_dicke.gamma6((0, 0, 0))
test_gamma_3 = test_dicke.gamma6((2, 1, 1))
test_gamma_4 = test_dicke.gamma6((1, (- 1), 1))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
def test_gamma7(self):
true_gamma_1 = 0
true_gamma_2 = 0.5
true_gamma_3 = 0
true_gamma_4 = 1.5
N = 4
test_dicke = _Dicke(N=N, pumping=1)
test_gamma_1 = test_dicke.gamma7((1, 1, 1))
test_gamma_2 = test_dicke.gamma7((2, 0, 0))
test_gamma_3 = test_dicke.gamma7((1, 0, 0))
test_gamma_4 = test_dicke.gamma7((2, (- 1), (- 1)))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
def test_gamma8(self):
true_gamma_1 = 0
true_gamma_2 = 13.5
true_gamma_3 = 5.5
true_gamma_4 = 13.5
N = 4
test_dicke = _Dicke(N=N, pumping=1, collective_pumping=2)
test_gamma_1 = test_dicke.gamma8((1, 1, 1))
test_gamma_2 = test_dicke.gamma8((2, 0, 0))
test_gamma_3 = test_dicke.gamma8((1, 0, 0))
test_gamma_4 = test_dicke.gamma8((2, (- 1), (- 1)))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4)
def test_gamma9(self):
true_gamma_1 = 1
true_gamma_2 = 0
true_gamma_3 = 0.5
true_gamma_4 = 0
N = 4
test_dicke = _Dicke(N=N, pumping=1, collective_pumping=2)
test_gamma_1 = test_dicke.gamma9((1, 1, 1))
test_gamma_2 = test_dicke.gamma9((2, 0, 0))
test_gamma_3 = test_dicke.gamma9((1, 0, 0))
test_gamma_4 = test_dicke.gamma9((2, (- 1), (- 1)))
assert_almost_equal(true_gamma_1, test_gamma_1)
assert_almost_equal(true_gamma_2, test_gamma_2)
assert_almost_equal(true_gamma_3, test_gamma_3)
assert_almost_equal(true_gamma_4, test_gamma_4) |
def get_train_features(cfg, temp_dir, train_dataset_name, resize_img, spatial_levels, image_helper, train_dataset, model):
train_features = []
def process_train_image(i, out_dir):
if ((i % LOG_FREQUENCY) == 0):
(logging.info(f'Train Image: {i}'),)
fname_out = f'{out_dir}/{i}.npy'
if PathManager.exists(fname_out):
feat = load_file(fname_out)
train_features.append(feat)
else:
fname_in = train_dataset.get_filename(i)
if is_revisited_dataset(train_dataset_name):
img = image_helper.load_and_prepare_revisited_image(fname_in)
elif is_whiten_dataset(train_dataset_name):
img = image_helper.load_and_prepare_whitening_image(fname_in)
else:
img = image_helper.load_and_prepare_image(fname_in, roi=None)
v = torch.autograd.Variable(img.unsqueeze(0))
vc = v.cuda()
activation_map = model(vc)[0].cpu()
if (cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == 'rmac'):
descriptors = get_rmac_descriptors(activation_map, spatial_levels)
else:
descriptors = activation_map
save_file(descriptors.data.numpy(), fname_out)
train_features.append(descriptors.data.numpy())
num_images = train_dataset.get_num_images()
out_dir = f'{temp_dir}/{train_dataset_name}_S{resize_img}_features_train'
makedir(out_dir)
for i in range(num_images):
process_train_image(i, out_dir)
if (cfg.IMG_RETRIEVAL.FEATS_PROCESSING_TYPE == 'gem'):
gem_out_fname = f'{out_dir}/{train_dataset_name}_GeM.npy'
train_features = torch.tensor(np.concatenate(train_features))
train_features = gem_pool_and_save_features(train_features, p=cfg.IMG_RETRIEVAL.GEM_POOL_POWER, add_bias=True, gem_out_fname=gem_out_fname)
train_features = np.vstack([x.reshape((- 1), x.shape[(- 1)]) for x in train_features])
logging.info(f'Train features size: {train_features.shape}')
return train_features |
class MemcacheContextFactory(ContextFactory):
PROM_PREFIX = 'memcached_client_pool'
PROM_LABELS = ['memcached_pool']
pool_size_gauge = Gauge(f'{PROM_PREFIX}_max_size', 'Maximum number of connections allowed in this pool', PROM_LABELS)
used_connections_gauge = Gauge(f'{PROM_PREFIX}_active_connections', 'Number of connections in this pool currently in use', PROM_LABELS)
free_connections_gauge = Gauge(f'{PROM_PREFIX}_idle_connections', 'Number of free connections in this pool', PROM_LABELS)
def __init__(self, pooled_client: PooledClient, name: str='default'):
self.pooled_client = pooled_client
self.name = name
def report_memcache_runtime_metrics(self, batch: metrics.Client) -> None:
pool = self.pooled_client.client_pool
self.pool_size_gauge.labels(self.name).set(pool.max_size)
self.free_connections_gauge.labels(self.name).set(len(pool.free))
self.used_connections_gauge.labels(self.name).set(len(pool.used))
batch.gauge('pool.in_use').replace(len(pool.used))
batch.gauge('pool.open_and_available').replace(len(pool.free))
batch.gauge('pool.size').replace(pool.max_size)
def make_object_for_context(self, name: str, span: Span) -> 'MonitoredMemcacheConnection':
return MonitoredMemcacheConnection(name, span, self.pooled_client) |
class STFTLoss(torch.nn.Module):
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window='hann_window'):
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
window = getattr(torch, window)(win_length)
self.register_buffer('window', window)
self.spectral_convergence_loss = SpectralConvergenceLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
def forward(self, x, y):
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
sc_loss = self.spectral_convergence_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return (sc_loss, mag_loss) |
class SentimentTriple(BaseModel):
aspect: List
opinion: List
sentiment: Text
def from_sentiment_triple(cls, labels: Tuple[(List, List, Text)]):
relation = {'': 'POS', '': 'NEG', '': 'NEU'}
assert (len(labels) == 3)
return cls(aspect=labels[0], opinion=labels[1], sentiment=(relation[labels[2]] if (labels[2] in relation.keys()) else labels[2])) |
class LilyPondStyle(Style):
name = 'lilypond'
web_style_gallery_exclude = True
styles = {Token.Text: '', Token.Keyword: 'bold', Token.Comment: 'italic #A3AAB2', Token.String: '#AB0909', Token.String.Escape: '#C46C6C', Token.String.Symbol: 'noinherit', Token.Pitch: '', Token.Number: '#976806', Token.ChordModifier: '#976806', Token.Name.Lvalue: '#08547A', Token.Name.BackslashReference: '#08547A', Token.Name.Builtin.MusicCommand: 'bold #08547A', Token.Name.Builtin.PaperVariable: 'bold #6C5A05', Token.Name.Builtin.HeaderVariable: 'bold #6C5A05', Token.Name.Builtin.MusicFunction: 'bold #08547A', Token.Name.Builtin.Clef: 'bold #08547A', Token.Name.Builtin.Scale: 'bold #08547A', Token.Name.Builtin.RepeatType: '#08547A', Token.Name.Builtin.Dynamic: '#68175A', Token.Name.Builtin.Articulation: '#68175A', Token.Name.Builtin.SchemeFunction: 'bold #A83401', Token.Name.Builtin.SchemeBuiltin: 'bold', Token.Name.Builtin.MarkupCommand: 'bold #831E71', Token.Name.Builtin.Context: 'bold #038B8B', Token.Name.Builtin.ContextProperty: '#038B8B', Token.Name.Builtin.Grob: 'bold #0C7441', Token.Name.Builtin.GrobProperty: '#0C7441', Token.Name.Builtin.Translator: 'bold #6200A4'} |
class ResourceRequirementEditor():
def __init__(self, parent: QWidget, layout: QHBoxLayout, resource_database: ResourceDatabase, item: ResourceRequirement):
self.parent = parent
self.layout = layout
self.resource_database = resource_database
self.resource_type_combo = _create_resource_type_combo(item.resource.resource_type, parent, resource_database)
self.resource_type_combo.setMinimumWidth(75)
self.resource_type_combo.setMaximumWidth(75)
self.resource_name_combo = _create_resource_name_combo(self.resource_database, item.resource.resource_type, item.resource, self.parent)
self.negate_combo = ScrollProtectedComboBox(parent)
self.negate_combo.addItem('', False)
self.negate_combo.addItem('<', True)
self.negate_combo.setCurrentIndex(int(item.negate))
self.negate_combo.setMinimumWidth(40)
self.negate_combo.setMaximumWidth(40)
self.negate_check = QtWidgets.QCheckBox(parent)
self.negate_check.setChecked(item.negate)
self.amount_edit = QLineEdit(parent)
self.amount_edit.setValidator(QIntValidator(1, 10000))
self.amount_edit.setText(str(item.amount))
self.amount_edit.setMinimumWidth(45)
self.amount_edit.setMaximumWidth(45)
self.amount_combo = ScrollProtectedComboBox(parent)
for trick_level in iterate_enum(LayoutTrickLevel):
self.amount_combo.addItem(trick_level.long_name, userData=trick_level.as_number)
signal_handling.set_combo_with_value(self.amount_combo, item.amount)
for widget in self._all_widgets:
self.layout.addWidget(widget)
self.resource_type_combo.currentIndexChanged.connect(self._update_type)
self._update_visible_elements_by_type()
def resource_type(self) -> ResourceType:
return self.resource_type_combo.currentData()
def _update_visible_elements_by_type(self):
resource_type = self.resource_type
if (resource_type == ResourceType.DAMAGE):
self.negate_combo.setCurrentIndex(0)
self.negate_check.setText(('Before' if (resource_type == ResourceType.EVENT) else 'Not'))
self.negate_check.setVisible((resource_type in {ResourceType.EVENT, ResourceType.VERSION, ResourceType.MISC}))
self.negate_combo.setVisible((resource_type in {ResourceType.ITEM, ResourceType.DAMAGE}))
self.negate_combo.setEnabled((resource_type == ResourceType.ITEM))
self.amount_edit.setVisible((resource_type in {ResourceType.ITEM, ResourceType.DAMAGE}))
self.amount_combo.setVisible((resource_type == ResourceType.TRICK))
def _update_type(self):
old_combo = self.resource_name_combo
self.resource_name_combo = _create_resource_name_combo(self.resource_database, self.resource_type_combo.currentData(), None, self.parent)
self.layout.replaceWidget(old_combo, self.resource_name_combo)
old_combo.deleteLater()
self._update_visible_elements_by_type()
def deleteLater(self):
for widget in self._all_widgets:
widget.deleteLater()
def _all_widgets(self) -> typing.Iterable[QWidget]:
(yield self.resource_type_combo)
(yield self.negate_check)
(yield self.resource_name_combo)
(yield self.negate_combo)
(yield self.amount_edit)
(yield self.amount_combo)
def current_requirement(self) -> ResourceRequirement:
resource_type = self.resource_type
if (resource_type == ResourceType.TRICK):
quantity: int = self.amount_combo.currentData()
elif (resource_type == ResourceType.EVENT):
quantity = 1
else:
quantity = int(self.amount_edit.text())
if (resource_type == ResourceType.ITEM):
negate: bool = self.negate_combo.currentData()
elif (resource_type in {ResourceType.EVENT, ResourceType.MISC}):
negate = self.negate_check.isChecked()
else:
negate = False
return ResourceRequirement.create(self.resource_name_combo.currentData(), quantity, negate) |
class Decoderv2(nn.Module):
def __init__(self, up_in, x_in, n_out):
super(Decoderv2, self).__init__()
up_out = x_out = (n_out // 2)
self.x_conv = nn.Conv2d(x_in, x_out, 1, bias=False)
self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)
self.bn = nn.BatchNorm2d(n_out)
self.relu = nn.ReLU(True)
self.s_att = SpatialAttention2d(n_out)
self.c_att = GAB(n_out, 16)
def forward(self, up_p, x_p):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
cat_p = torch.cat([up_p, x_p], 1)
cat_p = self.relu(self.bn(cat_p))
s = self.s_att(cat_p)
c = self.c_att(cat_p)
return (s + c) |
_fixtures(SqlAlchemyFixture, DeferredActionFixture)
def test_deferred_action_times_out_with_shared_requirements(sql_alchemy_fixture, deferred_action_fixture):
fixture = deferred_action_fixture
with sql_alchemy_fixture.persistent_test_classes(fixture.MyDeferredAction, fixture.SomeObject):
requirements1 = [Requirement()]
requirements2 = [Requirement(), Requirement()]
deferred_action1 = fixture.MyDeferredAction(fixture.one_object, requirements=requirements2, deadline=fixture.future_time)
Session.add(deferred_action1)
deferred_action2 = fixture.MyDeferredAction(fixture.another_object, requirements=(requirements1 + requirements2), deadline=fixture.future_time)
Session.add(deferred_action2)
Session.flush()
deferred_action1.deadline = fixture.past_time
ReahlEgg.do_daily_maintenance_for_egg('reahl-domain')
assert fixture.one_object.deadline_flag
assert (not fixture.another_object.deadline_flag)
assert (Session.query(Requirement).count() == 3)
assert (Session.query(DeferredAction).count() == 1)
for requirement in (requirements1 + requirements2):
assert (set(requirement.deferred_actions) == {deferred_action2})
deferred_action2.deadline = fixture.past_time
ReahlEgg.do_daily_maintenance_for_egg('reahl-domain')
assert fixture.one_object.deadline_flag
assert fixture.another_object.deadline_flag
assert (Session.query(Requirement).count() == 0)
assert (Session.query(DeferredAction).count() == 0) |
def get_datasets(data_cfg: DataConfig) -> Tuple[(Subset[CharDataset], Subset[CharDataset], CharDataset)]:
dataset = CharDataset(data_cfg)
train_len = int((len(dataset) * data_cfg.train_split))
(train_set, eval_set) = random_split(dataset, [train_len, (len(dataset) - train_len)])
return (train_set, eval_set, dataset) |
class ApacheRole(Role):
def __available_site_for(self, name):
return ('/etc/apache2/sites-available/%s' % name)
def __enabled_site_for(self, name):
return ('/etc/apache2/sites-enabled/%s' % name)
def __init__(self, prov, context):
super(ApacheRole, self).__init__(prov, context)
self.must_restart = False
def provision(self):
with self.using(AptitudeRole) as aptitude:
aptitude.ensure_package_installed('apache2')
def cleanup(self):
super(ApacheRole, self).cleanup()
if self.must_restart:
self.restart()
def ensure_mod(self, mod):
with self.using(AptitudeRole) as aptitude:
aptitude.ensure_package_installed(('libapache2-mod-%s' % mod))
self.execute(('a2enmod %s' % mod), sudo=True)
self.ensure_restart()
def create_site(self, site, template, options={}):
self.update_file(template, self.__available_site_for(site), options=options, sudo=True)
self.ensure_restart()
def ensure_site_enabled(self, site):
with settings(warn_only=True):
self.remote_symlink(from_file=self.__available_site_for(site), to_file=self.__enabled_site_for(site), sudo=True)
self.ensure_restart()
def ensure_site_disabled(self, site):
with settings(warn_only=True):
self.remove_file(self.__enabled_site_for(site), sudo=True)
self.ensure_restart()
def ensure_restart(self):
self.must_restart = True
def restart(self):
self.execute('service apache2 restart', sudo=True)
self.must_restart = False |
class HeaderChecker():
def __init__(self, caplog, stubs):
self.caplog = caplog
self.stubs = stubs
def check_filename(self, header, filename, expected_inline=False):
reply = self.stubs.FakeNetworkReply(headers={'Content-Disposition': header})
(cd_inline, cd_filename) =
assert (cd_filename is not None)
assert (cd_filename == filename)
assert (cd_inline == expected_inline)
def check_ignored(self, header):
reply = self.stubs.FakeNetworkReply(headers={'Content-Disposition': header})
with self.caplog.at_level(logging.ERROR, 'network'):
(cd_inline, cd_filename) =
assert (cd_filename == DEFAULT_NAME)
assert cd_inline
def check_unnamed(self, header):
reply = self.stubs.FakeNetworkReply(headers={'Content-Disposition': header})
(cd_inline, cd_filename) =
assert (cd_filename == DEFAULT_NAME)
assert (not cd_inline) |
class PosTransformerEncoderLayerNoFFN(TransformerEncoderLayerNoFFN):
def __init__(self, d_model, nhead, dropout):
super().__init__(d_model, nhead, dropout)
def forward(self, src, pos, src_mask=None, src_key_padding_mask=None):
src2 = self.self_attn((src + pos), (src + pos), src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src = self.norm1(src)
return src |
def test_basename_natural2():
fsos = [create_filesystem_object(path) for path in ('hello', 'hello.txt', 'hello0.txt', 'hello1.txt', 'hello2.txt', 'hello3.txthello10.txt', 'hello11.txt', 'hello12.txt', 'hello13.txthello100.txt', 'hello101.txt', 'hello102.txt', 'hello103.txthello110.txt', 'hello111.txt', 'hello112.txt', 'hello113.txt')]
assert (fsos == sorted(fsos[::(- 1)], key=operator.attrgetter('basename_natural')))
assert (fsos == sorted(fsos[::(- 1)], key=operator.attrgetter('basename_natural_lower'))) |
class BeachballView(qw.QWidget):
def __init__(self, *args):
qw.QWidget.__init__(self, *args)
mt = mtm.MomentTensor(m=mtm.symmat6(1.0, (- 1.0), 2.0, 0.0, (- 2.0), 1.0))
self._mt = mt
self.set_moment_tensor(mt)
def set_moment_tensor(self, mt):
self._mt = mt
self.update()
def paintEvent(self, paint_ev):
painter = qw.QPainter(self)
painter.setRenderHint(qw.QPainter.Antialiasing)
self.drawit(painter)
def drawit(self, p):
h = self.height()
w = self.width()
s = (min(h, w) * 0.9)
xproj = Projection()
xproj.set_in_range((- 1.0), 1.0)
xproj.set_out_range(((w - s) / 2.0), (w - ((w - s) / 2.0)))
yproj = Projection()
yproj.set_in_range((- 1.0), 1.0)
yproj.set_out_range((h - ((h - s) / 2.0)), ((h - s) / 2.0))
mt = self._mt
mt_devi = mt.deviatoric()
eig = mt_devi.eigensystem()
group_to_color = {'P': plot.graph_colors[0], 'T': plot.graph_colors[1]}
for (group, patches, patches_lower, patches_upper, lines, lines_lower, lines_upper) in beachball.eig2gx(eig):
color = group_to_color[group]
brush = qw.QBrush(qw.QColor(*color))
p.setBrush(brush)
pen = qw.QPen(qw.QColor(*color))
pen.setWidth(1)
p.setPen(pen)
for poly in patches_lower:
(px, py, pz) = poly.T
points = make_QPolygonF(xproj(px), yproj(py))
p.drawPolygon(points)
color = (0, 0, 0)
pen = qw.QPen(qw.QColor(*color))
pen.setWidth(2)
p.setPen(pen)
for poly in lines_lower:
(px, py, pz) = poly.T
points = make_QPolygonF(xproj(px), yproj(py))
p.drawPolyline(points) |
class Xception65(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d):
super().__init__()
output_stride = cfg.MODEL.OUTPUT_STRIDE
if (output_stride == 32):
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
exit_block_stride = 2
elif (output_stride == 16):
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
exit_block_stride = 1
elif (output_stride == 8):
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
exit_block_stride = 1
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(32)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(64)
self.block1 = XceptionBlock([64, 128, 128, 128], stride=2, norm_layer=norm_layer)
self.block2 = XceptionBlock([128, 256, 256, 256], stride=2, low_feat=True, norm_layer=norm_layer)
self.block3 = XceptionBlock([256, 728, 728, 728], stride=entry_block3_stride, low_feat=True, norm_layer=norm_layer)
self.block4 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block5 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block6 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block7 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block8 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block9 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block10 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block11 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block12 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block13 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block14 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block15 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block16 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block17 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block18 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block19 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, skip_connection_type='sum', norm_layer=norm_layer)
self.block20 = XceptionBlock([728, 728, 1024, 1024], stride=exit_block_stride, dilation=exit_block_dilations[0], norm_layer=norm_layer)
self.block21 = XceptionBlock([1024, 1536, 1536, 2048], dilation=exit_block_dilations[1], skip_connection_type='none', relu_first=False, norm_layer=norm_layer)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
(x, c1) = self.block2(x)
(x, c2) = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
c3 = self.block19(x)
x = self.block20(c3)
c4 = self.block21(x)
return (c1, c2, c3, c4) |
class WBLogger():
def __init__(self, opts):
wandb_run_name = os.path.basename(opts.exp_dir)
wandb.init(project='pixel2style2pixel', config=vars(opts), name=wandb_run_name)
def log_best_model():
wandb.run.summary['best-model-save-time'] = datetime.datetime.now()
def log(prefix, metrics_dict, global_step):
log_dict = {f'{prefix}_{key}': value for (key, value) in metrics_dict.items()}
log_dict['global_step'] = global_step
wandb.log(log_dict)
def log_dataset_wandb(dataset, dataset_name, n_images=16):
idxs = np.random.choice(a=range(len(dataset)), size=n_images, replace=False)
data = [wandb.Image(dataset.source_paths[idx]) for idx in idxs]
wandb.log({f'{dataset_name} Data Samples': data})
def log_images_to_wandb(x, y, y_hat, id_logs, prefix, step, opts):
im_data = []
column_names = ['Source', 'Target', 'Output']
if (id_logs is not None):
column_names.append('ID Diff Output to Target')
for i in range(len(x)):
cur_im_data = [wandb.Image(common.log_input_image(x[i], opts)), wandb.Image(common.tensor2im(y[i])), wandb.Image(common.tensor2im(y_hat[i]))]
if (id_logs is not None):
cur_im_data.append(id_logs[i]['diff_target'])
im_data.append(cur_im_data)
outputs_table = wandb.Table(data=im_data, columns=column_names)
wandb.log({f'{prefix.title()} Step {step} Output Samples': outputs_table}) |
class DevhostSt(SimpleDownloader):
__name__ = 'DevhostSt'
__type__ = 'downloader'
__version__ = '0.11'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'D-h.st downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('zapp-brannigan', 'fuerst.')]
NAME_PATTERN = '<span title="(?P<N>.*?)"'
SIZE_PATTERN = '</span> \\((?P<S>[\\d.,]+) (?P<U>[\\w^_]+)\\)<br'
HASHSUM_PATTERN = '>(?P<H>.*?) Sum</span>: (?P<D>.*?)<br'
OFFLINE_PATTERN = '>File Not Found'
LINK_FREE_PATTERN = "var product_download_url= \\'(.+?)\\'"
def setup(self):
self.multi_dl = True
self.chunk_limit = 1 |
_config
def test_fullscreen_on_top(xmanager):
conn = xcbq.Connection(xmanager.display)
def _wnd(name):
return xmanager.c.window[{w['name']: w['id'] for w in xmanager.c.windows()}[name]]
def _clients():
root = conn.default_screen.root.wid
q = conn.conn.core.QueryTree(root).reply()
stack = list(q.children)
wins = [(w['name'], stack.index(w['id'])) for w in xmanager.c.windows()]
wins.sort(key=(lambda x: x[1]))
return [x[0] for x in wins]
xmanager.test_window('one', floating=True)
xmanager.test_window('two')
assert (_clients() == ['two', 'one'])
_wnd('two').enable_fullscreen()
_wnd('two').focus()
assert (_clients() == ['one', 'two'])
_wnd('one').focus()
assert (_clients() == ['two', 'one'])
_wnd('two').focus()
_wnd('two').toggle_fullscreen()
assert (_clients() == ['two', 'one']) |
class KTZ1(DataElementGroup):
is_sepa = DataElementField(type='jn', _d='Kontoverwendung SEPA')
iban = DataElementField(type='an', max_length=34, _d='IBAN')
bic = DataElementField(type='an', max_length=11, _d='BIC')
account_number = DataElementField(type='id', _d='Konto-/Depotnummer')
subaccount_number = DataElementField(type='id', _d='Unterkontomerkmal')
bank_identifier = DataElementGroupField(type=BankIdentifier, _d='Kreditinstitutskennung')
def as_sepa_account(self):
from fints.models import SEPAAccount
if (not self.is_sepa):
return None
return SEPAAccount(self.iban, self.bic, self.account_number, self.subaccount_number, self.bank_identifier.bank_code)
def from_sepa_account(cls, acc):
return cls(is_sepa=True, iban=acc.iban, bic=acc.bic, account_number=acc.accountnumber, subaccount_number=acc.subaccount, bank_identifier=BankIdentifier(country_identifier=BankIdentifier.COUNTRY_ALPHA_TO_NUMERIC[acc.bic[4:6]], bank_code=acc.blz)) |
class random_crystal():
def __init__(self, dim=3, group=227, species=['C'], numIons=8, factor=1.1, thickness=None, area=None, lattice=None, sites=None, conventional=True, tm=Tol_matrix(prototype='atomic'), use_hall=False):
self.source = 'Random'
self.valid = False
self.factor = factor
self.min_density = 0.75
self.dim = dim
self.area = area
self.thickness = thickness
self.lattice0 = lattice
if (dim == 3):
self.PBC = [1, 1, 1]
elif (dim == 2):
self.PBC = [1, 1, 0]
elif (dim == 1):
self.PBC = [0, 0, 1]
elif (dim == 0):
self.PBC = [0, 0, 0]
if (type(group) == Group):
self.group = group
else:
self.group = Group(group, dim=self.dim, use_hall=use_hall)
self.number = self.group.number
self.symbol = self.group.symbol
numIons = np.array(numIons)
if (not conventional):
mul = self.group.cellsize()
else:
mul = 1
self.numIons = (numIons * mul)
self.species = species
if (type(tm) == Tol_matrix):
self.tol_matrix = tm
else:
self.tol_matrix = Tol_matrix(prototype=tm)
self.set_sites(sites)
(compat, self.degrees) = self.group.check_compatible(self.numIons)
if (not compat):
self.valid = False
msg = ('Compoisition ' + str(self.numIons))
msg += ' not compatible with symmetry '
msg += str(self.group.number)
raise Comp_CompatibilityError(msg)
else:
self.set_elemental_volumes()
self.set_crystal()
def __str__(self):
if self.valid:
s = '------Crystal from {:s}------'.format(self.source)
s += '\nDimension: {}'.format(self.dim)
s += '\nGroup: {} ({})'.format(self.symbol, self.number)
s += '\n{}'.format(self.lattice)
s += '\nWyckoff sites:'
for wyc in self.atom_sites:
s += '\n\t{}'.format(wyc)
else:
s = '\nStructure not available.'
return s
def __repr__(self):
return str(self)
def set_sites(self, sites):
self.sites = {}
for (i, specie) in enumerate(self.species):
if ((sites is not None) and (sites[i] is not None) and (len(sites[i]) > 0)):
self.sites[specie] = []
self._check_consistency(sites[i], self.numIons[i])
if (type(sites[i]) is dict):
for item in sites[i].items():
id = self.group.get_index_by_letter(item[0])
self.sites[specie].append((id, item[1]))
elif (type(sites[i]) is list):
for site in sites[i]:
if (type(site) is tuple):
(letter, x, y, z) = site
id = self.group.get_index_by_letter(letter)
self.sites[specie].append((id, (x, y, z)))
else:
id = self.group.get_index_by_letter(site)
self.sites[specie].append(id)
else:
self.sites[specie] = None
def set_elemental_volumes(self):
self.elemental_volumes = []
for specie in self.species:
sp = Element(specie)
(vol1, vol2) = ((sp.covalent_radius ** 3), (sp.vdw_radius ** 3))
self.elemental_volumes.append([(((4 / 3) * np.pi) * vol1), (((4 / 3) * np.pi) * vol2)])
def set_volume(self):
volume = 0
for (i, numIon) in enumerate(self.numIons):
[vmin, vmax] = self.elemental_volumes[i]
volume += (numIon * random.uniform(vmin, vmax))
self.volume = (self.factor * volume)
if ((self.volume / sum(self.numIons)) < self.min_density):
self.volume = (sum(self.numIons) * self.min_density)
def set_lattice(self, lattice):
if (lattice is not None):
self.lattice = lattice
self.volume = lattice.volume
if (lattice.PBC != self.PBC):
self.lattice.PBC = self.PBC
else:
if (self.dim == 2):
if (self.number in range(3, 8)):
unique_axis = 'c'
else:
unique_axis = 'a'
elif (self.dim == 1):
if (self.number in range(3, 8)):
unique_axis = 'a'
else:
unique_axis = 'c'
else:
unique_axis = 'c'
good_lattice = False
for cycle in range(10):
try:
self.lattice = Lattice(self.group.lattice_type, self.volume, PBC=self.PBC, unique_axis=unique_axis, thickness=self.thickness, area=self.area)
good_lattice = True
break
except VolumeError:
self.volume *= 1.1
msg = 'Warning: increase the volume by 1.1 times: '
msg += '{:.2f}'.format(self.volume)
print(msg)
if (not good_lattice):
msg = 'Volume estimation {:.2f} is very bad'.format(self.volume)
msg += ' with the given composition '
msg += str(self.numIons)
raise RuntimeError(msg)
def set_crystal(self):
self.numattempts = 0
if (not self.degrees):
self.lattice_attempts = 5
self.coord_attempts = 5
else:
self.lattice_attempts = 40
self.coord_attempts = 10
for cycle1 in range(self.lattice_attempts):
self.set_volume()
self.set_lattice(self.lattice0)
self.cycle1 = cycle1
for cycle2 in range(self.coord_attempts):
self.cycle2 = cycle2
output = self._set_coords()
if output:
self.atom_sites = output
break
if self.valid:
return
else:
self.lattice.reset_matrix()
return
def _set_coords(self):
wyks = []
cell = self.lattice.matrix
for (numIon, specie) in zip(self.numIons, self.species):
output = self._set_ion_wyckoffs(numIon, specie, cell, wyks)
if (output is not None):
wyks.extend(output)
else:
return None
self.valid = True
return wyks
def _set_ion_wyckoffs(self, numIon, specie, cell, wyks):
numIon_added = 0
tol = self.tol_matrix.get_tol(specie, specie)
wyckoff_sites_tmp = []
sites_list = deepcopy(self.sites[specie])
if (sites_list is not None):
wyckoff_attempts = max((len(sites_list) * 2), 10)
else:
min_wyckoffs = int((numIon / len(self.group.wyckoffs_organized[0][0])))
wyckoff_attempts = max((2 * min_wyckoffs), 10)
cycle = 0
while (cycle < wyckoff_attempts):
if ((sites_list is not None) and (len(sites_list) > 0)):
site = sites_list[0]
else:
site = None
new_site = None
if (type(site) is tuple):
(index, xyz) = site
wp = self.group[index]
new_site = atom_site(wp, xyz, specie)
else:
if (site is not None):
wp = self.group[site]
pt = self.lattice.generate_point()
if (len(wp.short_distances(pt, cell, tol)) > 0):
cycle += 1
continue
else:
pt = wp.project(pt, cell, self.PBC)
else:
wp = choose_wyckoff(self.group, (numIon - numIon_added), site, self.dim)
if (wp is not False):
passed_wp_check = True
pt = self.lattice.generate_point()
(pt, wp, _) = wp.merge(pt, cell, tol, group=self.group)
if (wp is not False):
if ((self.dim == 2) and (self.thickness is not None) and (self.thickness < 0.1)):
pt[(- 1)] = 0.5
new_site = atom_site(wp, pt, specie)
if self.check_wp(wyckoff_sites_tmp, wyks, cell, new_site):
if ((sites_list is not None) and (len(sites_list) > 0)):
sites_list.pop(0)
wyckoff_sites_tmp.append(new_site)
numIon_added += new_site.multiplicity
if (numIon_added == numIon):
return wyckoff_sites_tmp
cycle += 1
self.numattempts += 1
return None
def check_wp(self, wyckoff_sites_tmp, wyks, cell, new_site):
if (new_site is None):
return False
for ws in (wyckoff_sites_tmp + wyks):
if (not new_site.check_with_ws2(ws, cell, self.tol_matrix)):
return False
return True
def _check_consistency(self, site, numIon):
num = 0
for s in site:
if (type(s) is dict):
for key in s.keys():
num += int(key[:(- 1)])
else:
num += int(s[:(- 1)])
if (numIon == num):
return True
else:
diff = (numIon - num)
if (diff > 0):
(compat, self.degrees) = self.group.check_compatible([diff])
if compat:
return True
else:
msg = '\nfrom numIons: {:d}'.format(numIon)
msg += '\nfrom Wyckoff list: {:d}'.format(num)
msg += '\nThe number is incompatible with composition: '
mse += str(site)
raise ValueError(msg)
else:
msg = '\nfrom numIons: {:d}'.format(numIon)
msg += '\nfrom Wyckoff list: {:d}'.format(num)
msg += '\nThe requested number is greater than composition: '
msg += str(site)
raise ValueError(msg) |
def system_details_to_str(d: Dict[(str, Union[(str, Dict[(str, DebugInfo)])])], indent: str='') -> str:
details = ['Machine Details:', (' Platform ID: %s' % d.get('platform', 'n/a')), (' Processor: %s' % d.get('processor', 'n/a')), '', 'Python:', (' Implementation: %s' % d.get('implementation', 'n/a')), (' Executable: %s' % d.get('executable', 'n/a')), (' Version: %s' % d.get('python', 'n/a')), (' Compiler: %s' % d.get('compiler', 'n/a')), (' Architecture: %s' % d.get('architecture', 'n/a')), (' Build: %s (#%s)' % (d.get('builddate', 'n/a'), d.get('buildno', 'n/a'))), (' Unicode: %s' % d.get('unicode', 'n/a')), '', ('PyVISA Version: %s' % d.get('pyvisa', 'n/a')), '']
def _to_list(key, value, indent_level=0):
sp = ((' ' * indent_level) * 3)
if isinstance(value, str):
if key:
return [('%s%s: %s' % (sp, key, value))]
else:
return [('%s%s' % (sp, value))]
elif isinstance(value, dict):
if key:
al = [('%s%s:' % (sp, key))]
else:
al = []
for (k, v) in value.items():
al.extend(_to_list(k, v, (indent_level + 1)))
return al
elif isinstance(value, (tuple, list)):
if key:
al = [('%s%s:' % (sp, key))]
else:
al = []
for v in value:
al.extend(_to_list(None, v, (indent_level + 1)))
return al
else:
return [('%s' % value)]
details.extend(_to_list('Backends', d['backends']))
joiner = ('\n' + indent)
return ((indent + joiner.join(details)) + '\n') |
def test_notify_exception(pytester: Pytester, capfd) -> None:
config = pytester.parseconfig()
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
(_, err) = capfd.readouterr()
assert ('ValueError' in err)
class A():
def pytest_internalerror(self):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo, config.option)
(_, err) = capfd.readouterr()
assert (not err)
config = pytester.parseconfig('-p', 'no:terminal')
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
(_, err) = capfd.readouterr()
assert ('ValueError' in err) |
class ShardedIterator(CountingIterator):
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if ((shard_id < 0) or (shard_id >= num_shards)):
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil((len(iterable) / float(num_shards))))
itr = map(operator.itemgetter(1), itertools.zip_longest(range(sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value))
super().__init__(itr, start=int(math.ceil((getattr(iterable, 'n', 0) / float(num_shards)))), total=sharded_len) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.