code stringlengths 281 23.7M |
|---|
def load():
vgg = VGG16(weights=None, input_shape=(224, 224, 3))
x = vgg.layers[(- 2)].output
predictions_class = Dense(4, activation='softmax', name='predictions_class')(x)
prediction = [predictions_class]
model = Model(inputs=vgg.input, outputs=prediction)
sgd = SGD(lr=1e-05, momentum=0.9)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights('/Users/xiaofeng/Code/Github/dataset/CHINESE_OCR/angle/modelAngle.h5')
return model |
def run(dataset_dir):
if (not tf.gfile.Exists(dataset_dir)):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if (tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename)):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!') |
class FakeSubscriptionApi(object):
def __init__(self):
self.subscription_extended = False
self.subscription_created = False
def lookup_subscription(self, customer_id, sku_id):
return None
def create_entitlement(self, customer_id, sku_id):
self.subscription_created = True
def extend_subscription(self, subscription_id, end_date):
self.subscription_extended = True
def get_subscription_sku(self, subscription_id):
if (id == 12345):
return 'FakeSku'
else:
return None
def get_list_of_subscriptions(self, account_number, filter_out_org_bindings=False, convert_to_stripe_plans=False):
if (account_number == DEV_ACCOUNT_NUMBER):
return [{'id': 12345, 'sku': 'FakeSku', 'privateRepos': 0}]
return [] |
class Migration(migrations.Migration):
dependencies = [('projects', '0042_allow_site_null')]
operations = [migrations.AlterField(model_name='project', name='catalog', field=models.ForeignKey(help_text='The catalog which will be used for this project.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='projects', to='questions.Catalog', verbose_name='Catalog'))] |
(st.builds(Download, timestamp=st.shared(st.dates(), key='extract-item-data').map((lambda i: arrow.Arrow.fromdate(i)))), st.shared(st.dates(), key='extract-item-data').map((lambda i: f'{i.year:04}{i.month:02}{i.day:02}')))
def test_extract_item_data(download, expected):
assert (extract_item_date(download) == expected) |
class CornerPool(nn.Module):
pool_functions = {'bottom': BottomPoolFunction, 'left': LeftPoolFunction, 'right': RightPoolFunction, 'top': TopPoolFunction}
cummax_dim_flip = {'bottom': (2, False), 'left': (3, True), 'right': (3, False), 'top': (2, True)}
def __init__(self, mode):
super(CornerPool, self).__init__()
assert (mode in self.pool_functions)
self.mode = mode
self.corner_pool = self.pool_functions[mode]
def forward(self, x):
if ((torch.__version__ != 'parrots') and (torch.__version__ >= '1.5.0')):
if torch.onnx.is_in_onnx_export():
assert (torch.__version__ >= '1.7.0'), "When `cummax` serves as an intermediate component whose outputs is used as inputs for another modules, it's expected that pytorch version must be >= 1.7.0, otherwise Error appears like: `RuntimeError: tuple appears in op that does not forward tuples, unsupported kind: prim::PythonOp`."
(dim, flip) = self.cummax_dim_flip[self.mode]
if flip:
x = x.flip(dim)
(pool_tensor, _) = torch.cummax(x, dim=dim)
if flip:
pool_tensor = pool_tensor.flip(dim)
return pool_tensor
else:
return self.corner_pool.apply(x) |
class EigenQuaternionPrinter():
def __init__(self, val):
type = val.type
if (type.code == gdb.TYPE_CODE_REF):
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator():
def __init__(self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
element = self.currentElement
if (self.currentElement >= 4):
raise StopIteration
self.currentElement = (self.currentElement + 1)
item = self.dataPtr.dereference()
self.dataPtr = (self.dataPtr + 1)
return (('[%s]' % (self.elementNames[element],)), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return ('Eigen::Quaternion<%s> (data ptr: %s)' % (self.innerType, self.data)) |
def test_one_hot():
y = np.hstack(((np.ones((10,)) * 0), (np.ones((10,)) * 1), (np.ones((10,)) * 2)))
(Y, labels) = one_hot(y)
assert (len(np.setdiff1d(np.unique(Y), [0, 1])) == 0)
assert np.all((labels == np.unique(y)))
assert (Y.shape[0] == y.shape[0])
assert (Y.shape[1] == len(labels)) |
def init_lmhead_dense_buffer():
args = get_args()
batch_pp = (args.batch_size // args.summa_dim)
seq_length = args.seq_length
hidden_pp = (args.hidden_size // args.summa_dim)
global _LMHEAD_DENSE_BUFFER
assert (_LMHEAD_DENSE_BUFFER is None), '_LMHEAD_DENSE_BUFFER is already initialized'
space = ((batch_pp * seq_length) * hidden_pp)
name = 'lm-head dense buffer'
_LMHEAD_DENSE_BUFFER = allocate_mem_buff(name, space, args.params_dtype, track_usage=False) |
class FxDialog(Factory.Popup):
def __init__(self, app, plugins, config, callback):
self.app = app
self.config = config
self.callback = callback
self.fx = self.app.fx
if (self.fx.get_history_config(allow_none=True) is None):
self.fx.set_history_config(True)
self.has_history_rates = self.fx.get_history_config()
Factory.Popup.__init__(self)
self.add_currencies()
def add_exchanges(self):
ex = self.ids.exchanges
if self.fx.is_enabled():
exchanges = sorted(self.fx.get_exchanges_by_ccy(self.fx.get_currency(), self.has_history_rates))
mx = self.fx.exchange.name()
if (mx in exchanges):
ex.text = mx
elif exchanges:
ex.text = exchanges[0]
else:
ex.text = ''
else:
exchanges = []
ex.text = ''
ex.values = exchanges
def on_exchange(self, text):
if (not text):
return
if (self.fx.is_enabled() and (text != self.fx.exchange.name())):
self.fx.set_exchange(text)
def add_currencies(self):
currencies = ([_('None')] + self.fx.get_currencies(self.has_history_rates))
my_ccy = (self.fx.get_currency() if self.fx.is_enabled() else _('None'))
self.ids.ccy.values = currencies
self.ids.ccy.text = my_ccy
def on_checkbox_history(self, checked):
self.fx.set_history_config(checked)
self.has_history_rates = checked
self.add_currencies()
self.on_currency(self.ids.ccy.text)
def on_currency(self, ccy):
b = (ccy != _('None'))
self.fx.set_enabled(b)
if b:
if (ccy != self.fx.get_currency()):
self.fx.set_currency(ccy)
self.app.fiat_unit = ccy
else:
self.app.is_fiat = False
Clock.schedule_once((lambda dt: self.add_exchanges())) |
class Adafactor(Optimizer):
def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False):
require_version('torch>=1.5.0')
if ((lr is not None) and relative_step):
raise ValueError('Cannot combine manual `lr` and `relative_step=True` options')
if (warmup_init and (not relative_step)):
raise ValueError('`warmup_init=True` requires `relative_step=True`')
defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init)
super().__init__(params, defaults)
def _get_lr(param_group, param_state):
rel_step_sz = param_group['lr']
if param_group['relative_step']:
min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01)
rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step'])))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps'][1], param_state['RMS'])
return (param_scale * rel_step_sz)
def _get_options(param_group, param_shape):
factored = (len(param_shape) >= 2)
use_first_moment = (param_group['beta1'] is not None)
return (factored, use_first_moment)
def _rms(tensor):
return (tensor.norm(2) / (tensor.numel() ** 0.5))
def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_().unsqueeze((- 1))
c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if (grad.dtype in {torch.float16, torch.bfloat16}):
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
(factored, use_first_moment) = self._get_options(group, grad_shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad)
state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
lr = self._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
update = ((grad ** 2) + group['eps'][0])
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t))
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t))
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t))
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(update, alpha=(1 - group['beta1']))
update = exp_avg
if (group['weight_decay'] != 0):
p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * lr))
p_data_fp32.add_((- update))
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p.data.copy_(p_data_fp32)
return loss |
def remove_spectral_norm(module):
name = 'weight'
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module)) |
class QuantizableBasicConv2d(inception_module.BasicConv2d):
def __init__(self, *args, **kwargs):
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, ['conv', 'bn', 'relu'], inplace=True) |
class InlineInputLocation():
def __init__(self, latitude, longitude, live_period=None):
self.latitude = latitude
self.longitude = longitude
self.live_period = live_period
def _serialize(self):
args = {'latitude': self.latitude, 'longitude': self.longitude}
if (self.live_period is not None):
args['live_period'] = self.live_period
return args |
def write_color_old(text, attr=None):
res = []
chunks = terminal_escape.split(text)
n = 0
if (attr is None):
attr = 15
for chunk in chunks:
m = escape_parts.match(chunk)
if m:
for part in m.group(1).split(u';'):
if (part == u'0'):
attr = 0
elif (part == u'7'):
attr |= 16384
if (part == u'1'):
attr |= 8
elif ((len(part) == 2) and (u'30' <= part <= u'37')):
part = (int(part) - 30)
attr = ((((attr & (~ 7)) | ((part & 1) << 2)) | (part & 2)) | ((part & 4) >> 2))
elif ((len(part) == 2) and (u'40' <= part <= u'47')):
part = (int(part) - 40)
attr = ((((attr & (~ 112)) | ((part & 1) << 6)) | ((part & 2) << 4)) | ((part & 4) << 2))
continue
n += len(chunk)
if chunk:
res.append(((u'0x%x' % attr), chunk))
return res |
class MBConv(nn.Module):
def __init__(self, cnf: MBConvConfig, norm_layer: Callable[(..., nn.Module)], se_layer: Callable[(..., nn.Module)]=SqueezeExcitation) -> None:
super().__init__()
if (not (1 <= cnf.stride <= 2)):
raise ValueError('illegal stride value')
self.use_res_connect = ((cnf.stride == 1) and (cnf.input_channels == cnf.out_channels))
layers: List[nn.Module] = []
activation_layer = nn.SiLU
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
if (expanded_channels != cnf.input_channels):
layers.append(Conv2dNormActivation(cnf.input_channels, expanded_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer))
layers.append(Conv2dNormActivation(expanded_channels, expanded_channels, kernel_size=cnf.kernel, stride=cnf.stride, groups=expanded_channels, norm_layer=norm_layer, activation_layer=activation_layer))
squeeze_channels = max(1, (cnf.input_channels // 4))
layers.append(se_layer(expanded_channels, squeeze_channels, activation=partial(nn.SiLU, inplace=True)))
layers.append(Conv2dNormActivation(expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None))
self.block = nn.Sequential(*layers)
self.out_channels = cnf.out_channels
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result += input
return result |
class Properties():
DEFAULTS: ClassVar['Properties'] = None
TARGET_TYPE: ClassVar[Type] = None
def kwargs(self):
return {key: value for (key, value) in self.__dict__.items() if (value is not EMPTY)}
def extract(self, subset_type: Type) -> 'Properties':
field_names = [field.name for field in fields(subset_type)]
return subset_type(**_partial_dict(self.__dict__, *field_names))
def partial_dict(self, *args) -> Dict[(str, Any)]:
return _partial_dict(self.__dict__, *args) |
class Aggregate(Function):
def forward(ctx, A, X, C):
ctx.save_for_backward(A, X, C)
return (X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) - C.unsqueeze(0).unsqueeze(0)).mul_(A.unsqueeze(3)).sum(1)
def backward(ctx, GE):
(A, X, C) = ctx.saved_variables
gradA = (X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) - C.unsqueeze(0).unsqueeze(0)).mul_(GE.unsqueeze(1)).sum(3)
gradX = torch.bmm(A, GE)
gradC = A.sum(1).unsqueeze(2).mul(GE).mul_((- 1)).sum(0)
return (gradA, gradX, gradC) |
_rewriter([Blockwise])
def local_useless_unbatched_blockwise(fgraph, node):
op = node.op
inputs = node.inputs
batch_ndims = node.op.batch_ndim(node)
if all((all(inp.type.broadcastable[:batch_ndims]) for inp in inputs)):
if batch_ndims:
axis = tuple(range(batch_ndims))
inputs = [inp.squeeze(axis) for inp in inputs]
new_outs = op.core_op.make_node(*inputs).outputs
if batch_ndims:
new_outs = [shape_padleft(out, batch_ndims) for out in new_outs]
return copy_stack_trace(node.outputs, new_outs) |
def intersectionAndUnion(output, target, K, ignore_index=255):
assert (output.ndim in [1, 2, 3])
assert (output.shape == target.shape)
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where((target == ignore_index))[0]] = ignore_index
intersection = output[np.where((output == target))[0]]
(area_intersection, _) = np.histogram(intersection, bins=np.arange((K + 1)))
(area_output, _) = np.histogram(output, bins=np.arange((K + 1)))
(area_target, _) = np.histogram(target, bins=np.arange((K + 1)))
area_union = ((area_output + area_target) - area_intersection)
return (area_intersection, area_union, area_target) |
def fit_transform(x_text, words_dict, max_sen_len, max_doc_len):
(x, sen_len, doc_len) = ([], [], [])
for (index, doc) in enumerate(x_text):
t_sen_len = ([0] * max_doc_len)
t_x = np.zeros((max_doc_len, max_sen_len), dtype=int)
sentences = doc.split('<sssss>')
i = 0
for sen in sentences:
j = 0
for word in sen.strip().split():
if (j >= max_sen_len):
break
if (word not in words_dict):
continue
t_x[(i, j)] = words_dict[word]
j += 1
t_sen_len[i] = j
i += 1
if (i >= max_doc_len):
break
doc_len.append(i)
sen_len.append(t_sen_len)
x.append(t_x)
return (np.asarray(x), np.asarray(sen_len), np.asarray(doc_len)) |
class TwoRoundDeterministicRewardEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Discrete(3)
self._reset()
def _step(self, action):
rewards = [[0, 3], [1, 2]]
assert self.action_space.contains(action)
if (self.firstAction is None):
self.firstAction = action
reward = 0
done = False
else:
reward = rewards[self.firstAction][action]
done = True
return (self._get_obs(), reward, done, {})
def _get_obs(self):
if (self.firstAction is None):
return 2
else:
return self.firstAction
def _reset(self):
self.firstAction = None
return self._get_obs() |
class MultiRC(Task):
VERSION = 1
DATASET_PATH = 'super_glue'
DATASET_NAME = 'multirc'
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if (self._training_docs is None):
self._training_docs = list(self.dataset['train'])
return self._training_docs
def validation_docs(self):
return self.dataset['validation']
def doc_to_text(self, doc):
return f'''{doc['paragraph']}
Question: {doc['question']}
Answer:'''
def doc_to_target(self, doc):
return (' ' + self.format_answer(answer=doc['answer'], label=doc['label']))
def format_answer(answer, label):
label_str = ('yes' if label else 'no')
return f'''{answer}
Is the answer correct? {label_str}'''
def construct_requests(self, doc, ctx):
true_choice = self.format_answer(answer=doc['answer'], label=True)
false_choice = self.format_answer(answer=doc['answer'], label=False)
(ll_true_choice, _) = rf.loglikelihood(ctx, f' {true_choice}')
(ll_false_choice, _) = rf.loglikelihood(ctx, f' {false_choice}')
return (ll_true_choice, ll_false_choice)
def process_results(self, doc, results):
(ll_true_choice, ll_false_choice) = results
pred = (ll_true_choice > ll_false_choice)
return {'acc': (pred, doc)}
def higher_is_better(self):
return {'acc': True}
def aggregation(self):
return {'acc': acc_all} |
class Layer():
def __init__(self, module, name, weight_shape, output_shape):
self.module = module
self.name = str(name)
self.weight_shape = weight_shape
self.output_shape = output_shape
self.picked_for_compression = False
self.type_specific_params = None
self._set_type_specific_params(module)
def _set_type_specific_params(self, module): |
def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
num_features = 1280
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=32, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=se_layer, **kwargs)
features_only = False
model_cls = MobileNetV3
kwargs_filter = None
if model_kwargs.pop('features_only', False):
features_only = True
kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
model = build_model_with_cfg(model_cls, variant, pretrained, pretrained_strict=(not features_only), kwargs_filter=kwargs_filter, **model_kwargs)
if features_only:
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model |
class JointTestOptions(BoxToMaskOptions):
def initialize(self):
BoxToMaskOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'))
self.parser.add_arugment('--results_dir', type=str, default='results/')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0)
self.parser.add_argument('--phase', type=str, default='test')
self.parser.add_argument('--how_many', type=int, default=50)
self.parser.add_argument('--num_samples', type=int, default=1)
self.parser.add_argument('--which_epoch', type=str, default='latest')
self.parser.add_argument('--pix2pix_name', type=str, default='label2city')
self.parser.add_argument('--pix2pix_model', type=str, default='CVAE_imggen')
self.parser.add_argument('--pix2pix_norm', type=str, default='instance')
self.parser.add_argument('--pix2pix_use_dropout', action='store_true', help='use dropout for the generator')
self.parser.add_argument('--pix2pix_input_layout', action='store_true', help='input the layout in recognition model')
self.parser.add_argument('--pix2pix_batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--pix2pix_loadSize', type=int, default=1024, help='scale images to this size')
self.parser.add_argument('--pix2pix_fineSize', type=int, default=512, help='then crop to this size')
self.parser.add_argument('--pix2pix_label_nc', type=int, default=35, help='# of input image channels')
self.parser.add_argument('--pix2pix_output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--pix2pix_netG', type=str, default='global', help='selects model to use for netG')
self.parser.add_argument('--pix2pix_ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--pix2pix_n_downsample_global', type=int, default=4, help='number of downsampling layers in netG')
self.parser.add_argument('--pix2pix_n_blocks_global', type=int, default=9, help='number of residual blocks in the global generator network')
self.parser.add_argument('--pix2pix_n_blocks_local', type=int, default=3, help='number of residual blocks in the local enhancer network')
self.parser.add_argument('--pix2pix_n_local_enhancers', type=int, default=1, help='number of local enhancers to use')
self.parser.add_argument('--pix2pix_niter_fix_global', type=int, default=0, help='number of epochs that we only train the outmost local enhancer')
self.parser.add_argument('--pix2pix_instance_feat', action='store_true', help='if specified, add encoded instance features as input')
self.parser.add_argument('--pix2pix_label_feat', action='store_true', help='if specified, add encoded label features as input')
self.parser.add_argument('--pix2pix_feat_num', type=int, default=3, help='vector length for encoded features')
self.parser.add_argument('--pix2pix_load_features', action='store_true', help='if specified, load precomputed feature maps')
self.parser.add_argument('--pix2pix_n_downsample_E', type=int, default=3, help='# of downsampling layers in encoder')
self.parser.add_argument('--pix2pix_nef', type=int, default=16, help='# of encoder filters in the first conv layer')
self.parser.add_argument('--pix2pix_n_clusters', type=int, default=10, help='number of clusters for features')
self.parser.add_argument('--pix2pix_z_dim', type=int, default=32, help='size of latent vector z')
self.parser.add_argument('--pix2pix_z_embed_dim', type=int, default=64, help='size of embedding vector for z')
self.isTrain = False |
class NonchalantHttpxRequest(HTTPXRequest):
async def _request_wrapper(self, method: str, url: str, request_data: Optional[RequestData]=None, read_timeout: ODVInput[float]=DEFAULT_NONE, connect_timeout: ODVInput[float]=DEFAULT_NONE, write_timeout: ODVInput[float]=DEFAULT_NONE, pool_timeout: ODVInput[float]=DEFAULT_NONE) -> bytes:
try:
return (await super()._request_wrapper(method=method, url=url, request_data=request_data, read_timeout=read_timeout, write_timeout=write_timeout, connect_timeout=connect_timeout, pool_timeout=pool_timeout))
except RetryAfter as e:
pytest.xfail(f'Not waiting for flood control: {e}')
except TimedOut as e:
pytest.xfail(f'Ignoring TimedOut error: {e}') |
def to_string(decorated_class):
def __str__(self):
attributes = [attr for attr in dir(self) if ((not attr.startswith('_')) and (not (hasattr(self.__dict__[attr], '__call__') if (attr in self.__dict__) else hasattr(decorated_class.__dict__[attr], '__call__'))))]
output_format = [(f'{attr}={self.__dict__[attr]}' if (attr in self.__dict__) else f'{attr}={decorated_class.__dict__[attr]}') for attr in attributes]
return f"{decorated_class.__name__}[{', '.join(output_format)}]"
decorated_class.__str__ = __str__
return decorated_class |
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
(out, err) = capsys.readouterr()
assert (out == 'hello\n')
assert (not err)
assert (app.called_postcmd == 1)
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
(out, err) = capsys.readouterr()
assert (out == 'hello\n')
assert (not err)
assert (app.called_postcmd == 2)
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
(out, err) = capsys.readouterr()
assert (out == 'hello\n')
assert (not err)
assert (app.called_postcmd == 3) |
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netD == 'basic'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'n_layers'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'pixel'):
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD |
def _create_pr_per_tolerance_graph(pr_data_frame: DataFrame, methods: List[str]) -> Figure:
tolerances = _extract_tolerances(pr_data_frame, methods)
active_tolerance = tolerances[0]
active_pr_data_frame = pr_data_frame[(pr_data_frame[SpottingEvaluation.TOLERANCE] == active_tolerance)]
fig = px.line(active_pr_data_frame, y=SpottingEvaluation.PRECISION, x=SpottingEvaluation.RECALL, text=SpottingEvaluation.THRESHOLD, color=METHOD, category_orders={METHOD: methods})
buttons = []
for tolerance in tolerances:
(tolerance_precisions, tolerance_recalls, tolerance_thresholds) = _tolerance_precision_recall(pr_data_frame, tolerance, methods)
args = [{'y': tolerance_precisions, 'x': tolerance_recalls, 'text': tolerance_thresholds, 'labels': methods}]
button_label = f'Tolerance: {tolerance}'
new_button = dict(method='restyle', label=button_label, visible=True, args=args)
buttons.append(new_button)
fig.update_traces(mode='markers+lines')
update_menus = [dict(buttons=buttons, direction='down', showactive=True, active=0)]
fig.update_layout(title_text=PR_PER_TOLERANCE_GRAPH_TITLE, xaxis_title=RECALL_AXIS_TITLE, yaxis_title=PRECISION_AXIS_TITLE, height=PR_PER_TOLERANCE_GRAPH_HEIGHT, width=PR_PER_TOLERANCE_GRAPH_WIDTH, legend_title_text='', updatemenus=update_menus, xaxis_range=PR_RECALL_RANGE, yaxis_range=PR_PRECISION_RANGE)
return fig |
def _get_heuristic_col_headers(adjusted_table, row_index, col_index):
adjusted_cell = adjusted_table[row_index][col_index]
adjusted_col_start = adjusted_cell['adjusted_col_start']
adjusted_col_end = adjusted_cell['adjusted_col_end']
col_headers = []
for r in range(0, row_index):
row = adjusted_table[r]
for cell in row:
if ((cell['adjusted_col_start'] < adjusted_col_end) and (cell['adjusted_col_end'] > adjusted_col_start)):
if cell['is_header']:
col_headers.append(cell)
return col_headers |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keyword', help='VM search parameter')
parser.add_argument('-p1', '--powerOn', help='power on', action='store_true')
parser.add_argument('-p0', '--powerOff', help='power off', action='store_true')
parser.add_argument('hypervisorConfig', help='json hypervisor config')
args = parser.parse_args()
vmServer = vm_automation.esxiServer.createFromFile(args.hypervisorConfig, './power.log')
if (vmServer != None):
vmServer.enumerateVms()
for vm in vmServer.vmList:
if ((args.keyword == None) or (args.keyword in vm.vmName)):
if args.powerOn:
vm.powerOn()
if args.powerOff:
if ((vm.checkTools() == 'TOOLS_READY') and vm.isPoweredOn()):
vm.vmObject.ShutdownGuest()
else:
vm.powerOff() |
def measure_time(net, input, n_times):
net.eval()
warm_up = 20
sum_time = 0
for i in range((warm_up + n_times)):
torch.cuda.synchronize()
t0 = time.perf_counter()
out = net(input)
torch.cuda.synchronize()
t1 = time.perf_counter()
if (i >= warm_up):
sum_time += (t1 - t0)
return ((sum_time * 1000) / n_times) |
def test_gitlab_attribute_get():
o = types.GitlabAttribute('whatever')
assert (o.get() == 'whatever')
o.set_from_cli('whatever2')
assert (o.get() == 'whatever2')
assert (o.get_for_api(key='spam') == ('spam', 'whatever2'))
o = types.GitlabAttribute()
assert (o._value is None) |
def test_yield_logs_for_export(first_model, second_model, combined_model, initialized_db):
now = datetime.now()
with freeze_time(now):
first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple', ip='1.2.3.4')
later = (now + timedelta(minutes=60))
first_logs = list(first_model.yield_logs_for_export(now, later))[0]
second_logs = list(second_model.yield_logs_for_export(now, later))[0]
combined = list(combined_model.yield_logs_for_export(now, later))
full_combined = []
for subset in combined:
full_combined.extend(subset)
assert (len(full_combined) == (len(first_logs) + len(second_logs)))
assert (full_combined == (first_logs + second_logs)) |
class LazyI18nString():
def __init__(self, data: Optional[Union[(str, Dict[(str, str)])]]):
self.data = data
if (isinstance(self.data, str) and (self.data is not None)):
try:
j = json.loads(self.data)
except ValueError:
pass
else:
self.data = j
def __str__(self) -> str:
return self.localize((translation.get_language() or settings.LANGUAGE_CODE))
def __bool__(self) -> bool:
if (not self.data):
return False
if isinstance(self.data, dict):
return any(self.data.values())
return True
def localize(self, lng: str) -> str:
if (self.data is None):
return ''
if isinstance(self.data, dict):
firstpart = lng.split('-')[0]
similar = [l for l in self.data.keys() if ((l.startswith((firstpart + '-')) or (firstpart == l)) and (l != lng))]
if self.data.get(lng):
return self.data[lng]
elif self.data.get(firstpart):
return self.data[firstpart]
elif (similar and any([self.data.get(s) for s in similar])):
for s in similar:
if self.data.get(s):
return self.data.get(s)
elif self.data.get(settings.LANGUAGE_CODE):
return self.data[settings.LANGUAGE_CODE]
else:
filled = [f for f in self.data.values() if f]
if filled:
return filled[0]
else:
return ''
else:
return str(self.data)
def map(self, f):
self.data = {k: f(v) for (k, v) in self.data.items()}
def __repr__(self) -> str:
return ('<LazyI18nString: %s>' % repr(self.data))
def __lt__(self, other) -> bool:
return (str(self) < str(other))
def __format__(self, format_spec):
return self.__str__()
def __eq__(self, other):
if (other is None):
return False
if hasattr(other, 'data'):
return (self.data == other.data)
return (self.data == other)
class LazyGettextProxy():
def __init__(self, lazygettext):
self.lazygettext = lazygettext
def __getitem__(self, item):
with override(item):
return str(gettext(self.lazygettext))
def __contains__(self, item):
return True
def __str__(self):
return str(gettext(self.lazygettext))
def __repr__(self):
return ('<LazyGettextProxy: %s>' % repr(self.lazygettext))
def from_gettext(cls, lazygettext) -> 'LazyI18nString':
l = LazyI18nString({})
l.data = cls.LazyGettextProxy(lazygettext)
return l |
class MyInnerGraphOp(Op, HasInnerGraph):
__props__ = ()
def __init__(self, inner_inputs, inner_outputs):
input_replacements = [(v, NominalVariable(n, v.type)) for (n, v) in enumerate(inner_inputs) if (not isinstance(v, Constant))]
outputs = clone_replace(inner_outputs, replace=input_replacements)
(_, inputs) = (zip(*input_replacements) if input_replacements else (None, []))
self.fgraph = FunctionGraph(inputs, outputs, clone=False)
def make_node(self, *inputs):
outputs = [inputs[0].type()]
return Apply(self, list(inputs), outputs)
def perform(self, *args, **kwargs):
raise NotImplementedError('No Python implementation available.')
def fn(self):
raise NotImplementedError('No Python implementation available.')
def inner_inputs(self):
return self.fgraph.inputs
def inner_outputs(self):
return self.fgraph.outputs
def clone(self):
return type(self)(self.fgraph.inputs, self.fgraph.outputs) |
class HvcsBase():
DEFAULT_ENV_TOKEN_NAME = 'HVCS_TOKEN'
def __init__(self, remote_url: str, hvcs_domain: (str | None)=None, hvcs_api_domain: (str | None)=None, token: (str | None)=None) -> None:
self.hvcs_domain = hvcs_domain
self.hvcs_api_domain = hvcs_api_domain
self.token = token
auth = (None if (not self.token) else TokenAuth(self.token))
self._remote_url = remote_url
self.session = build_requests_session(auth=auth)
_cache(maxsize=1)
def _get_repository_owner_and_name(self) -> tuple[(str, str)]:
parsed_git_url = parse_git_url(self._remote_url)
return (parsed_git_url.namespace, parsed_git_url.repo_name)
def repo_name(self) -> str:
(_, _name) = self._get_repository_owner_and_name()
return _name
def owner(self) -> str:
(_owner, _) = self._get_repository_owner_and_name()
return _owner
def compare_url(self, from_rev: str, to_rev: str) -> str:
_not_supported(self, 'compare_url')
return ''
def upload_dists(self, tag: str, dist_glob: str) -> int:
_not_supported(self, 'upload_dists')
return 0
def create_release(self, tag: str, release_notes: str, prerelease: bool=False) -> (int | str):
_not_supported(self, 'create_release')
return (- 1)
def get_release_id_by_tag(self, tag: str) -> (int | None):
_not_supported(self, 'get_release_id_by_tag')
return None
def edit_release_notes(self, release_id: int, release_notes: str) -> int:
_not_supported(self, 'edit_release_notes')
return (- 1)
def create_or_update_release(self, tag: str, release_notes: str, prerelease: bool=False) -> (int | str):
_not_supported(self, 'create_or_update_release')
return (- 1)
def asset_upload_url(self, release_id: str) -> (str | None):
_not_supported(self, 'asset_upload_url')
return None
def upload_asset(self, release_id: (int | str), file: str, label: (str | None)=None) -> bool:
_not_supported(self, 'upload_asset')
return True
def remote_url(self, use_token: bool) -> str:
_not_supported(self, 'remote_url')
return ''
def commit_hash_url(self, commit_hash: str) -> str:
_not_supported(self, 'commit_hash_url')
return ''
def pull_request_url(self, pr_number: str) -> str:
_not_supported(self, 'pull_request_url')
return '' |
def save_json_yaml(encoding_file_path: str, encodings_dict: dict):
encoding_file_path_json = encoding_file_path
encoding_file_path_yaml = (encoding_file_path + '.yaml')
with open(encoding_file_path_json, 'w') as encoding_fp_json:
json.dump(encodings_dict, encoding_fp_json, sort_keys=True, indent=4)
with open(encoding_file_path_yaml, 'w') as encoding_fp_yaml:
yaml.dump(encodings_dict, encoding_fp_yaml, default_flow_style=False, allow_unicode=True) |
def test_list_project_deploy_tokens(gitlab_cli, deploy_token):
cmd = ['-v', 'project-deploy-token', 'list', '--project-id', deploy_token.project_id]
ret = gitlab_cli(cmd)
assert ret.success
assert (deploy_token.name in ret.stdout)
assert (str(deploy_token.id) in ret.stdout)
assert (deploy_token.username in ret.stdout)
assert (deploy_token.expires_at in ret.stdout)
assert (deploy_token.scopes[0] in ret.stdout) |
class _PrivateActionFactory():
def parse_privateaction(element):
if element.findall('LongitudinalAction/SpeedAction/SpeedActionTarget/AbsoluteTargetSpeed'):
return AbsoluteSpeedAction.parse(element)
elif element.findall('LongitudinalAction/SpeedAction/SpeedActionTarget/RelativeTargetSpeed'):
return RelativeSpeedAction.parse(element)
elif element.findall('LongitudinalAction/LongitudinalDistanceAction'):
return LongitudinalDistanceAction.parse(element)
elif element.findall('LateralAction/LaneChangeAction/LaneChangeTarget/AbsoluteTargetLane'):
return AbsoluteLaneChangeAction.parse(element)
elif element.findall('LateralAction/LaneChangeAction/LaneChangeTarget/RelativeTargetLane'):
return RelativeLaneChangeAction.parse(element)
elif element.findall('LateralAction/LaneOffsetAction/LaneOffsetTarget/AbsoluteTargetLaneOffset'):
return AbsoluteLaneOffsetAction.parse(element)
elif element.findall('LateralAction/LaneOffsetAction/LaneOffsetTarget/RelativeTargetLaneOffset'):
return RelativeLaneOffsetAction.parse(element)
elif element.findall('LateralAction/LateralDistanceAction'):
return LateralDistanceAction.parse(element)
elif element.findall('VisibilityAction'):
return VisibilityAction.parse(element)
elif element.findall('SynchronizeAction'):
return SynchronizeAction.parse(element)
elif element.findall('ActivateControllerAction'):
return ActivateControllerAction.parse(element)
elif element.findall('ControllerAction'):
return ControllerAction.parse(element)
elif element.findall('TeleportAction'):
return TeleportAction.parse(element)
elif element.findall('RoutingAction/AssignRouteAction'):
return AssignRouteAction.parse(element)
elif element.findall('RoutingAction/FollowTrajectoryAction'):
return FollowTrajectoryAction.parse(element)
elif element.findall('RoutingAction/AcquirePositionAction'):
return AcquirePositionAction.parse(element)
elif element.findall('AppearanceAction/AnimationAction'):
return AnimationAction.parse(element)
elif element.findall('LongitudinalAction/SpeedProfileAction'):
return SpeedProfileAction.parse(element)
elif element.findall('AppearanceAction/LightStateAction'):
return LightStateAction.parse(element)
else:
raise NotAValidElement('element ', element, 'is not a valid PrivateAction') |
class DataPrep(object):
def __init__(self, raw_df: pd.DataFrame, categorical: list, log: list, mixed: dict, integer: list, type: dict, test_ratio: float):
self.categorical_columns = categorical
self.log_columns = log
self.mixed_columns = mixed
self.integer_columns = integer
self.column_types = dict()
self.column_types['categorical'] = []
self.column_types['mixed'] = {}
self.lower_bounds = {}
self.label_encoder_list = []
target_col = list(type.values())[0]
y_real = raw_df[target_col]
X_real = raw_df.drop(columns=[target_col])
(X_train_real, y_train_real) = (X_real, y_real)
X_train_real[target_col] = y_train_real
self.df = X_train_real
self.df = self.df.replace(' ', np.nan)
self.df = self.df.fillna('empty')
all_columns = set(self.df.columns)
irrelevant_missing_columns = set(self.categorical_columns)
relevant_missing_columns = list((all_columns - irrelevant_missing_columns))
for i in relevant_missing_columns:
if (i in self.log_columns):
if ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i] = [(- 9999999)]
elif (i in list(self.mixed_columns.keys())):
if ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i].append((- 9999999))
elif ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i] = [(- 9999999)]
if self.log_columns:
for log_column in self.log_columns:
valid_indices = []
for (idx, val) in enumerate(self.df[log_column].values):
if (val != (- 9999999)):
valid_indices.append(idx)
eps = 1
lower = np.min(self.df[log_column].iloc[valid_indices].values)
self.lower_bounds[log_column] = lower
if (lower > 0):
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log(x) if (x != (- 9999999)) else (- 9999999))))
elif (lower == 0):
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log((x + eps)) if (x != (- 9999999)) else (- 9999999))))
else:
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log(((x - lower) + eps)) if (x != (- 9999999)) else (- 9999999))))
for (column_index, column) in enumerate(self.df.columns):
if (column in self.categorical_columns):
label_encoder = preprocessing.LabelEncoder()
self.df[column] = self.df[column].astype(str)
label_encoder.fit(self.df[column])
current_label_encoder = dict()
current_label_encoder['column'] = column
current_label_encoder['label_encoder'] = label_encoder
transformed_column = label_encoder.transform(self.df[column])
self.df[column] = transformed_column
self.label_encoder_list.append(current_label_encoder)
self.column_types['categorical'].append(column_index)
elif (column in self.mixed_columns):
self.column_types['mixed'][column_index] = self.mixed_columns[column]
super().__init__()
def inverse_prep(self, data, eps=1):
df_sample = pd.DataFrame(data, columns=self.df.columns)
for i in range(len(self.label_encoder_list)):
le = self.label_encoder_list[i]['label_encoder']
df_sample[self.label_encoder_list[i]['column']] = df_sample[self.label_encoder_list[i]['column']].astype(int)
df_sample[self.label_encoder_list[i]['column']] = le.inverse_transform(df_sample[self.label_encoder_list[i]['column']])
if self.log_columns:
for i in df_sample:
if (i in self.log_columns):
lower_bound = self.lower_bounds[i]
if (lower_bound > 0):
df_sample[i].apply((lambda x: np.exp(x)))
elif (lower_bound == 0):
df_sample[i] = df_sample[i].apply((lambda x: (np.ceil((np.exp(x) - eps)) if ((np.exp(x) - eps) < 0) else (np.exp(x) - eps))))
else:
df_sample[i] = df_sample[i].apply((lambda x: ((np.exp(x) - eps) + lower_bound)))
if self.integer_columns:
for column in self.integer_columns:
df_sample[column] = np.round(df_sample[column].values)
df_sample[column] = df_sample[column].astype(int)
df_sample.replace((- 9999999), np.nan, inplace=True)
df_sample.replace('empty', np.nan, inplace=True)
return df_sample |
class Block(Action, Mutation):
def mutate(_root, info, sender, subject):
if sender.blocked.filter(pk=subject.pk).exists():
sender.blocked.remove(subject)
return Block(feedback=_('removed blockages'))
sender.following.remove(subject)
subject.following.remove(sender)
sender.blocked.add(subject)
sender.favorite_entries.remove(*sender.favorite_entries.filter(author__in=[subject]))
return Block(feedback=_('the person is now blocked'), redirect=info.context.build_absolute_uri(reverse('home'))) |
def _expand_manifest_paths(paths: List[str], filesystem: Optional[Union[(S3FileSystem, s3fs.S3FileSystem)]], content_type_provider: Callable[([str], ContentType)]) -> Tuple[(Dict[(ContentType, List[str])], CachedFileMetadataProvider)]:
assert (len(paths) == 1), f'Expected 1 manifest path, found {len(paths)}.'
path = paths[0]
with filesystem.open_input_file(path) as f:
manifest = Manifest(json.loads(f.read()))
content_type_to_paths = {}
meta_provider = CachedFileMetadataProvider({})
if (not manifest.entries):
logger.warning(f'No entries to read in Redshift Manifest: {path}')
else:
(content_type_to_paths, meta_provider) = _read_manifest_entry_paths(manifest.entries, (manifest.meta.content_type if manifest.meta else None), content_type_provider)
return (content_type_to_paths, meta_provider) |
def GaussianNoising(tensor, sigma, mean=0.0, noise_size=None, min=(- 1.0), max=1.0):
if (noise_size is None):
size = tensor.size()
else:
size = noise_size
noise = torch.FloatTensor(np.random.normal(loc=mean, scale=sigma, size=size))
return torch.clamp((noise + tensor), min=min, max=max) |
def test_add_constraint_with_optional(app: PoetryTestApplication, repo: TestRepository, tester: CommandTester) -> None:
repo.add_package(get_package('cachy', '0.2.0'))
tester.execute('cachy=0.2.0 --optional')
expected = '\nUpdating dependencies\nResolving dependencies...\n\nNo dependencies to install or update\n\nWriting lock file\n'
assert (tester.io.fetch_output() == expected)
assert isinstance(tester.command, InstallerCommand)
assert (tester.command.installer.executor.installations_count == 0)
pyproject: dict[(str, Any)] = app.poetry.file.read()
content = pyproject['tool']['poetry']
assert ('cachy' in content['dependencies'])
assert (content['dependencies']['cachy'] == {'version': '0.2.0', 'optional': True}) |
def attr_hook(ctx: FunctionContext) -> Type:
default = get_proper_type(ctx.default_return_type)
assert isinstance(default, Instance)
if (default.type.fullname == 'mod.Attr'):
attr_base = default
else:
attr_base = None
for base in default.type.bases:
if (base.type.fullname == 'mod.Attr'):
attr_base = base
break
assert (attr_base is not None)
last_arg_exprs = ctx.args[(- 1)]
if any(((isinstance(expr, NameExpr) and (expr.name == 'True')) for expr in last_arg_exprs)):
return attr_base
assert (len(attr_base.args) == 1)
arg_type = attr_base.args[0]
return Instance(attr_base.type, [UnionType([arg_type, NoneType()])], line=default.line, column=default.column) |
def pblock_061(content):
stage_number = int(get1(content, b'03'))
fir = sxml.FIR(name=get1(content, b'04', optional=True), input_units=sxml.Units(name=punit(get1(content, b'06'))), output_units=sxml.Units(name=punit(get1(content, b'07'))), symmetry=psymmetry(get1(content, b'05')), numerator_coefficient_list=list(map(pnc, getn(content, b'09'))))
return (stage_number, fir) |
def listen(ip, data_dir):
date_pattern = re.compile(b'\\d\\d\\d\\d-\\d\\d-\\d\\d\\d\\d:\\d\\d:\\d\\d')
data_dir = pathlib.Path(data_dir)
live_dir = data_dir.joinpath('live')
patients_dir = data_dir.joinpath('patients')
patient_icom_data = patients.PatientIcomData(patients_dir)
def archive_by_patient(ip, data):
patient_icom_data.update_data(ip, data)
ip_directory = live_dir.joinpath(ip)
ip_directory.mkdir(exist_ok=True, parents=True)
s = initialise_socket(ip)
try:
data = b''
while True:
try:
data += s.recv(BUFFER_SIZE)
except socket.timeout:
logging.warning('Socket connection timed out, retrying connection')
logging.info(s)
s.close()
logging.info(s)
s = initialise_socket(ip)
continue
matches = date_pattern.finditer(data)
try:
span = next(matches).span()
except StopIteration:
continue
previous_start_location = get_start_location_from_date_span(span)
for match in matches:
new_start_location = get_start_location_from_date_span(match.span())
data_to_save = data[previous_start_location:new_start_location]
save_an_icom_batch(date_pattern, ip_directory, data_to_save)
archive_by_patient(ip, data_to_save)
previous_start_location = new_start_location
data = data[previous_start_location:]
finally:
s.close()
logging.info(s) |
def test_yamlrepresenter_dumps(temp_file_creator):
payload = {'key1': 'value1', 'key2': 'value2', 'key3': [0, 1, 2]}
representer = filesystem.YamlRepresenter()
file_path = temp_file_creator()
with open(file_path, representer.write_mode) as file:
representer.dump(file, payload)
assert (file_path.read_text() == 'key1: value1\nkey2: value2\nkey3:\n - 0\n - 1\n - 2\n') |
def test_validate_well_structured_too_many():
(q0, q1) = cirq.LineQubit.range(2)
circuit = cirq.Circuit([cirq.Moment([cirq.PhasedXPowGate(phase_exponent=0).on(q0)]), cirq.Moment([cirq.PhasedXPowGate(phase_exponent=0.5).on(q0)]), cirq.Moment([cg.SYC(q0, q1)]), cirq.measure(q0, q1, key='z')])
with pytest.raises(BadlyStructuredCircuitError) as e:
validate_well_structured(circuit)
assert e.match('Too many PhX') |
class TestRFC4514():
def test_invalid(self, subtests):
for value in ['C=US,CN=Joe , Smith,DC=example', ',C=US,CN=Joe , Smith,DC=example', 'C=US,UNKNOWN=Joe , Smith,DC=example', 'C=US,CN,DC=example', 'C=US,FOOBAR=example']:
with subtests.test():
with pytest.raises(ValueError):
Name.from_rfc4514_string(value)
def test_valid(self, subtests):
for (value, expected) in [('CN=James \\"Jim\\" Smith\\, III', Name([NameAttribute(NameOID.COMMON_NAME, 'James "Jim" Smith, III')])), ('UID=\\# escape\\+\\,\\;\\00this\\ ', Name([NameAttribute(NameOID.USER_ID, '# escape+,;\x00this ')])), ('2.5.4.3=James \\"Jim\\" Smith\\, III', Name([NameAttribute(NameOID.COMMON_NAME, 'James "Jim" Smith, III')])), ('ST=', Name([NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, '')])), ('OU=Sales+CN=J. Smith,DC=example,DC=net', Name([RelativeDistinguishedName([NameAttribute(NameOID.DOMAIN_COMPONENT, 'net')]), RelativeDistinguishedName([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example')]), RelativeDistinguishedName([NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, 'Sales'), NameAttribute(NameOID.COMMON_NAME, 'J. Smith')])])), ('CN=cryptography.io,O=PyCA,L=,ST=,C=US', Name([NameAttribute(NameOID.COUNTRY_NAME, 'US'), NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ''), NameAttribute(NameOID.LOCALITY_NAME, ''), NameAttribute(NameOID.ORGANIZATION_NAME, 'PyCA'), NameAttribute(NameOID.COMMON_NAME, 'cryptography.io')])), ('C=US,CN=Joe \\, Smith,DC=example', Name([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example'), NameAttribute(NameOID.COMMON_NAME, 'Joe , Smith'), NameAttribute(NameOID.COUNTRY_NAME, 'US')])), ('C=US,CN=Jane \\"J\\,S\\" Smith,DC=example', Name([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example'), NameAttribute(NameOID.COMMON_NAME, 'Jane "J,S" Smith'), NameAttribute(NameOID.COUNTRY_NAME, 'US')])), ('C=US,CN=\\"Jane J\\,S Smith\\",DC=example', Name([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example'), NameAttribute(NameOID.COMMON_NAME, '"Jane J,S Smith"'), NameAttribute(NameOID.COUNTRY_NAME, 'US')])), ('C=US,CN=\\"Jane \\"J\\,S\\" Smith\\",DC=example', Name([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example'), NameAttribute(NameOID.COMMON_NAME, '"Jane "J,S" Smith"'), NameAttribute(NameOID.COUNTRY_NAME, 'US')])), ('C=US,CN=Jane=Smith,DC=example', Name([NameAttribute(NameOID.DOMAIN_COMPONENT, 'example'), NameAttribute(NameOID.COMMON_NAME, 'Jane=Smith'), NameAttribute(NameOID.COUNTRY_NAME, 'US')])), ('CN=#616263', Name([NameAttribute(NameOID.COMMON_NAME, 'abc')])), ('CN=', Name([NameAttribute(NameOID.COMMON_NAME, '')])), ('CN=\\\\123', Name([NameAttribute(NameOID.COMMON_NAME, '\\123')])), ('CN=\\\\\\;', Name([NameAttribute(NameOID.COMMON_NAME, '\\;')])), ('CN=\\\\#123', Name([NameAttribute(NameOID.COMMON_NAME, '\\#123')])), ('2.5.4.10=abc', Name([NameAttribute(NameOID.ORGANIZATION_NAME, 'abc')]))]:
with subtests.test():
result = Name.from_rfc4514_string(value)
assert (result == expected)
def test_attr_name_override(self):
assert (Name.from_rfc4514_string('CN=Santa Claus,E=', {'E': NameOID.EMAIL_ADDRESS}) == Name([NameAttribute(NameOID.EMAIL_ADDRESS, ''), NameAttribute(NameOID.COMMON_NAME, 'Santa Claus')]))
assert (Name.from_rfc4514_string('CN=Santa Claus', {'CN': NameOID.EMAIL_ADDRESS}) == Name([NameAttribute(NameOID.EMAIL_ADDRESS, 'Santa Claus')]))
def test_generate_parse(self):
name_value = Name([NameAttribute(NameOID.COMMON_NAME, 'Common Name 1'), NameAttribute(NameOID.LOCALITY_NAME, 'City for Name 1'), NameAttribute(NameOID.ORGANIZATION_NAME, 'Name 1 Organization')])
assert (Name.from_rfc4514_string(name_value.rfc4514_string()) == name_value)
name_string = 'O=Organization,L=City,CN=Common Name'
assert (Name.from_rfc4514_string(name_string).rfc4514_string() == name_string) |
class SesquialteralHourglass(nn.Module):
def __init__(self, down1_seq, skip1_seq, up_seq, skip2_seq, down2_seq, merge_type='cat'):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == (len(skip1_seq) - 1))
assert (merge_type in ['cat', 'add'])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if (y is not None):
if (self.merge_type == 'cat'):
x = torch.cat((x, y), dim=1)
elif (self.merge_type == 'add'):
x = (x + y)
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[(i + 1)](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[((self.depth - 1) - i)]
x = self._merge(x, y)
y = self.skip2_seq[(i + 1)](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[((self.depth - 1) - i)]
x = self._merge(x, y)
return x |
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if (header == 'PF'):
color = True
elif (header == 'Pf'):
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', file.readline())
if dim_match:
(width, height) = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if (scale < 0):
endian = '<'
scale = (- scale)
else:
endian = '>'
data = np.fromfile(file, (endian + 'f'))
shape = ((height, width, 3) if color else (height, width))
data = np.reshape(data, shape)
data = np.flipud(data)
return (data, scale) |
def _non_fully_commuting_terms(hamiltonian: QubitOperator) -> List[QubitOperator]:
terms = list([QubitOperator(key) for key in hamiltonian.terms.keys()])
T = []
for i in range(len(terms)):
if any(((not _commutes(terms[i], terms[j])) for j in range(len(terms)))):
T.append(terms[i])
return T |
class Export(Plugin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project = None
self.snapshot = None
def render(self):
raise NotImplementedError
def submit(self):
raise NotImplementedError
def get_set(self, path, set_prefix=''):
return self.project.values.filter(snapshot=self.snapshot, attribute__path=path, set_prefix=set_prefix).order_by('set_index', 'collection_index')
def get_values(self, path, set_prefix='', set_index=0):
return self.project.values.filter(snapshot=self.snapshot, attribute__path=path, set_prefix=set_prefix, set_index=set_index).order_by('collection_index')
def get_value(self, path, set_prefix='', set_index=0, collection_index=0):
try:
return self.get_values(path, set_prefix=set_prefix, set_index=set_index)[collection_index]
except IndexError:
return None
def get_text(self, path, set_prefix='', set_index=0, collection_index=0):
try:
return self.get_values(path, set_prefix=set_prefix, set_index=set_index)[collection_index].text
except IndexError:
return None
def get_timestamp(self, path, set_prefix='', set_index=0, collection_index=0):
try:
return self.get_values(path, set_prefix=set_prefix, set_index=set_index)[collection_index].value.isoformat()
except (IndexError, AttributeError):
return None
def get_year(self, path, set_prefix='', set_index=0, collection_index=0):
try:
return self.get_values(path, set_prefix=set_prefix, set_index=set_index)[collection_index].value.year
except (IndexError, AttributeError):
return None
def get_list(self, path, set_prefix='', set_index=0):
values = self.get_values(path, set_prefix=set_prefix, set_index=set_index)
return [value.text for value in values if value.text]
def get_bool(self, path, set_prefix='', set_index=0, collection_index=0):
value = self.get_value(path, set_prefix=set_prefix, set_index=set_index, collection_index=collection_index)
if value:
return (True if (value.text == '1') else False)
else:
return None
def get_option(self, options, path, set_prefix='', set_index=0, collection_index=0, default=None):
value = self.get_value(path, set_prefix=set_prefix, set_index=set_index, collection_index=collection_index)
if (value and value.option):
return options.get(value.option.path, default)
else:
return default |
def compare(start, end):
start = tuple((int(n) for n in start))
end = tuple((int(n) for n in end))
if (start == end):
return True
now = time.localtime()[3:5]
if ((start < now < end) or (start < now > end < start)):
return True
elif ((start > end) and ((now > start) or (now < end))):
return True
return False |
.parametrize('rollback_enabled, expected_delete_calls, expected_retarget_tag_calls', [(True, ['deleted', 'zzerror', 'updated', 'created'], ['updated']), (False, ['deleted', 'zzerror'], [])])
_existing_mirrors
('util.repomirror.skopeomirror.SkopeoMirror.run_skopeo')
('workers.repomirrorworker.retarget_tag')
('workers.repomirrorworker.delete_tag')
('workers.repomirrorworker.app')
def test_rollback(mock_app, delete_tag_mock, retarget_tag_mock, run_skopeo_mock, expected_retarget_tag_calls, expected_delete_calls, rollback_enabled, initialized_db, app):
mock_app.config = {'REPO_MIRROR_ROLLBACK': rollback_enabled, 'REPO_MIRROR': True, 'REPO_MIRROR_SERVER_HOSTNAME': 'localhost:5000', 'TESTING': True}
(mirror, repo) = create_mirror_repo_robot(['updated', 'created', 'zzerror'])
_create_tag(repo, 'updated')
_create_tag(repo, 'deleted')
skopeo_calls = [{'args': ['/usr/bin/skopeo', 'list-tags', '--tls-verify=True', 'docker://registry.example.com/namespace/repository'], 'results': SkopeoResults(True, [], '{"Tags": ["latest", "zzerror", "created", "updated"]}', '')}, {'args': ['/usr/bin/skopeo', 'copy', '--all', '--remove-signatures', '--src-tls-verify=True', '--dest-tls-verify=True', '--dest-creds', ('%s:%s' % (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot))), 'docker://registry.example.com/namespace/repository:created', 'docker://localhost:5000/mirror/repo:created'], 'results': SkopeoResults(True, [], 'Success', '')}, {'args': ['/usr/bin/skopeo', 'copy', '--all', '--remove-signatures', '--src-tls-verify=True', '--dest-tls-verify=True', '--dest-creds', ('%s:%s' % (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot))), 'docker://registry.example.com/namespace/repository:updated', 'docker://localhost:5000/mirror/repo:updated'], 'results': SkopeoResults(True, [], 'Success', '')}, {'args': ['/usr/bin/skopeo', 'copy', '--all', '--remove-signatures', '--src-tls-verify=True', '--dest-tls-verify=True', '--dest-creds', ('%s:%s' % (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot))), 'docker://registry.example.com/namespace/repository:zzerror', 'docker://localhost:5000/mirror/repo:zzerror'], 'results': SkopeoResults(False, [], '', 'ERROR')}]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert (args == skopeo_call['args'])
assert (proxy == {})
if ((args[1] == 'copy') and args[8].endswith(':updated')):
_create_tag(repo, 'updated')
elif ((args[1] == 'copy') and args[8].endswith(':created')):
_create_tag(repo, 'created')
elif ((args[1] == 'copy') and args[8].endswith(':zzerror')):
_create_tag(repo, 'zzerror')
return skopeo_call['results']
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
def retarget_tag_test(name, manifest, is_reversion=False):
assert (expected_retarget_tag_calls.pop(0) == name)
assert is_reversion
def delete_tag_test(repository_id, tag_name):
assert (expected_delete_calls.pop(0) == tag_name)
run_skopeo_mock.side_effect = skopeo_test
retarget_tag_mock.side_effect = retarget_tag_test
delete_tag_mock.side_effect = delete_tag_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert ([] == skopeo_calls)
assert ([] == expected_retarget_tag_calls)
assert ([] == expected_delete_calls) |
def add_new_game_command(sub_parsers):
parser: ArgumentParser = sub_parsers.add_parser('add-new-game', help='Loads the preset files and saves then again with the latest version')
parser.add_argument('--enum-name', help='The name of the RandovaniaGame enum, used in code.', required=True)
parser.add_argument('--enum-value', help='The value of the RandovaniaGame enum, used in all data formats.', required=True)
parser.add_argument('--short-name', help="Used as class prefix, and for user-facing strings that can't fit the long name.", required=True)
parser.add_argument('--long-name', help='The full name of your game.', required=True)
parser.set_defaults(func=new_game_command_logic) |
_subclass('low_rank_gaussian')
class LRGaussian(Inference):
def __init__(self, base, base_args, base_kwargs, var_clamp=1e-06):
super(LRGaussian, self).__init__()
self.var_clamp = var_clamp
self.dist = None
def fit(self, mean, variance, cov_factor):
variance = torch.clamp(variance, self.var_clamp)
self.dist = LowRankMultivariateNormal(loc=mean, cov_diag=variance, cov_factor=cov_factor.t())
def sample(self, scale=0.5, seed=None):
if (seed is not None):
torch.manual_seed(seed)
unscaled_sample = self.dist.rsample()
scaled_sample = (((scale ** 0.5) * (unscaled_sample - self.dist.loc)) + self.dist.loc)
return scaled_sample
def log_prob(self, sample):
return self.dist.log_prob(sample) |
def test_incorrect_reshape_motion_model():
with open(CONFIG_FILE, 'r') as config_file:
config = json.load(config_file)['TrackerConfig']
m = config['MotionModel']['measurements']
s = config['MotionModel']['states']
with pytest.raises(ValueError):
config['MotionModel']['measurements'] = (m + 1)
_ = utils.read_motion_model(config)
with pytest.raises(ValueError):
config['MotionModel']['measurements'] = m
config['MotionModel']['states'] = (s + 1)
_ = utils.read_motion_model(config) |
class ResNet18(chainer.Chain):
def __init__(self):
super(ResNet18, self).__init__(conv1_relu=ConvolutionBlock(3, 32), res2a_relu=ResidualBlock(32, 32), res2b_relu=ResidualBlock(32, 32), res3a_relu=ResidualBlockB(32, 64), res3b_relu=ResidualBlock(64, 64), res4a_relu=ResidualBlockB(64, 128), res4b_relu=ResidualBlock(128, 128), res5a_relu=ResidualBlockB(128, 256), res5b_relu=ResidualBlock(256, 256))
def __call__(self, TEST, x):
h = self.conv1_relu(TEST, x)
h = chainer.functions.max_pooling_2d(h, 3, 2, 1)
h = self.res2a_relu(TEST, h)
h = self.res2b_relu(TEST, h)
h = self.res3a_relu(TEST, h)
h = self.res3b_relu(TEST, h)
h = self.res4a_relu(TEST, h)
h = self.res4b_relu(TEST, h)
h = self.res5a_relu(TEST, h)
h = self.res5b_relu(TEST, h)
y = chainer.functions.average_pooling_2d(h, h.data.shape[2:])
return y |
class HaskellLexer(RegexLexer):
name = 'Haskell'
url = '
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
version_added = '0.8'
reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else', 'family', 'if', 'in', 'infix[lr]?', 'instance', 'let', 'newtype', 'of', 'then', 'type', 'where', '_')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {'root': [('\\s+', Whitespace), ('--(?![!#$%&*+./<=>?^|_~:\\\\]).*?$', Comment.Single), ('\\{-', Comment.Multiline, 'comment'), ('\\bimport\\b', Keyword.Reserved, 'import'), ('\\bmodule\\b', Keyword.Reserved, 'module'), ('\\berror\\b', Name.Exception), (("\\b(%s)(?!\\')\\b" % '|'.join(reserved)), Keyword.Reserved), ("'[^\\\\]'", String.Char), ((('^[_' + uni.Ll) + "][\\w\\']*"), Name.Function), ((("'?[_" + uni.Ll) + "][\\w']*"), Name), ((("('')?[" + uni.Lu) + "][\\w\\']*"), Keyword.Type), ((("(')[" + uni.Lu) + "][\\w\\']*"), Keyword.Type), ("(')\\[[^\\]]*\\]", Keyword.Type), ("(')\\([^)]*\\)", Keyword.Type), ("(')[:!#$%&*+.\\\\/<=>?^|~-]+", Keyword.Type), ('\\\\(?![:!#$%&*+.\\\\/<=>?^|~-]+)', Name.Function), ('(<-|::|->|=>|=)(?![:!#$%&*+.\\\\/<=>?^|~-]+)', Operator.Word), (':[:!#$%&*+.\\\\/<=>?^|~-]*', Keyword.Type), ('[:!#$%&*+.\\\\/<=>?^|~-]+', Operator), ('0[xX]_*[\\da-fA-F](_*[\\da-fA-F])*_*[pP][+-]?\\d(_*\\d)*', Number.Float), ('0[xX]_*[\\da-fA-F](_*[\\da-fA-F])*\\.[\\da-fA-F](_*[\\da-fA-F])*(_*[pP][+-]?\\d(_*\\d)*)?', Number.Float), ('\\d(_*\\d)*_*[eE][+-]?\\d(_*\\d)*', Number.Float), ('\\d(_*\\d)*\\.\\d(_*\\d)*(_*[eE][+-]?\\d(_*\\d)*)?', Number.Float), ('0[bB]_*[01](_*[01])*', Number.Bin), ('0[oO]_*[0-7](_*[0-7])*', Number.Oct), ('0[xX]_*[\\da-fA-F](_*[\\da-fA-F])*', Number.Hex), ('\\d(_*\\d)*', Number.Integer), ("'", String.Char, 'character'), ('"', String, 'string'), ('\\[\\]', Keyword.Type), ('\\(\\)', Name.Builtin), ('[][(),;`{}]', Punctuation)], 'import': [('\\s+', Whitespace), ('"', String, 'string'), ('\\)', Punctuation, '#pop'), ('qualified\\b', Keyword), ((((('([' + uni.Lu) + '][\\w.]*)(\\s+)(as)(\\s+)([') + uni.Lu) + '][\\w.]*)'), bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'), ((('([' + uni.Lu) + '][\\w.]*)(\\s+)(hiding)(\\s+)(\\()'), bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'), ((('([' + uni.Lu) + '][\\w.]*)(\\s+)(\\()'), bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), ('[\\w.]+', Name.Namespace, '#pop')], 'module': [('\\s+', Whitespace), ((('([' + uni.Lu) + '][\\w.]*)(\\s+)(\\()'), bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), ((('[' + uni.Lu) + '][\\w.]*'), Name.Namespace, '#pop')], 'funclist': [('\\s+', Whitespace), ((('[' + uni.Lu) + ']\\w*'), Keyword.Type), ((("(_[\\w\\']+|[" + uni.Ll) + "][\\w\\']*)"), Name.Function), ('--(?![!#$%&*+./<=>?^|_~:\\\\]).*?$', Comment.Single), ('\\{-', Comment.Multiline, 'comment'), (',', Punctuation), ('[:!#$%&*+.\\\\/<=>?^|~-]+', Operator), ('\\(', Punctuation, ('funclist', 'funclist')), ('\\)', Punctuation, '#pop:2')], 'comment': [('[^-{}]+', Comment.Multiline), ('\\{-', Comment.Multiline, '#push'), ('-\\}', Comment.Multiline, '#pop'), ('[-{}]', Comment.Multiline)], 'character': [("[^\\\\']'", String.Char, '#pop'), ('\\\\', String.Escape, 'escape'), ("'", String.Char, '#pop')], 'string': [('[^\\\\"]+', String), ('\\\\', String.Escape, 'escape'), ('"', String, '#pop')], 'escape': [('[abfnrtv"\\\'&\\\\]', String.Escape, '#pop'), ((('\\^[][' + uni.Lu) + '^_]'), String.Escape, '#pop'), ('|'.join(ascii), String.Escape, '#pop'), ('o[0-7]+', String.Escape, '#pop'), ('x[\\da-fA-F]+', String.Escape, '#pop'), ('\\d+', String.Escape, '#pop'), ('(\\s+)(\\\\)', bygroups(Whitespace, String.Escape), '#pop')]} |
def agestring(delta):
retval = ''
if (delta.days > 0):
retval += ('%d days, ' % delta.days)
total_seconds = ((delta.microseconds + ((delta.seconds + ((delta.days * 24) * 3600)) * (10 ** 6))) / (10 ** 6))
if (total_seconds > 3600):
retval += ('%02d:' % (delta.seconds / 3600))
if (total_seconds > 60):
retval += ('%02d:' % ((delta.seconds % 3600) / 60))
retval += ('%02d' % ((delta.seconds % 60),))
if (total_seconds < 60):
retval += ' seconds'
return retval |
class SegmentDataset(object):
def __init__(self, seq_d, seg_len=20, seg_shift=8, rand_seg=False):
self.seq_d = seq_d
self.seg_len = seg_len
self.seg_shift = seg_shift
self.rand_seg = rand_seg
self.seqlist = self.seq_d.seqlist
self.feats = self.seq_d.feats
self.lens = self.seq_d.lens
self.labs_d = self.seq_d.labs_d
self.talabseqs_d = self.seq_d.talabseqs_d
def seq_iterator(self, bs, lab_names=[], talab_names=[], seqs=None, shuffle=False, rem=True, mapper=None):
return self.seq_d.iterator(bs, lab_names, talab_names, seqs, shuffle, rem, mapper)
def iterator(self, seg_bs, seg_shift=None, rand_seg=None, seg_shuffle=False, seg_rem=True, seq_bs=(- 1), lab_names=[], talab_names=[], seqs=None, seq_shuffle=False, seq_rem=True, seq_mapper=None):
seqs = (self.seqlist if (seqs is None) else seqs)
seq_bs = (len(seqs) if (seq_bs == (- 1)) else seg_bs)
seg_shift = (self.seg_shift if (seg_shift is None) else seg_shift)
rand_seg = (self.rand_seg if (rand_seg is None) else rand_seg)
seq_iterator = self.seq_iterator(seq_bs, lab_names, talab_names, seqs, seq_shuffle, seq_rem, seq_mapper)
for (seq_keys, seq_feats, seq_lens, seq_labs, seq_talabs) in seq_iterator:
(segs, seq_nsegs) = make_segs(seq_keys, seq_lens, seq_labs, seq_talabs, self.seg_len, seg_shift, rand_seg)
if seg_shuffle:
np.random.shuffle(segs)
(keys, feats, nsegs, labs, talabs) = ([], [], [], [], [])
seq2idx = dict([(seq, i) for (i, seq) in enumerate(seq_keys)])
for seg in segs:
idx = seq2idx[seg.seq]
keys.append(seq_keys[idx])
feats.append(seq_feats[idx][seg.start:seg.end])
nsegs.append(seq_nsegs[idx])
labs.append(seg.lab)
talabs.append(seg.talab)
if (len(keys) == seg_bs):
(yield (keys, feats, nsegs, labs, talabs))
(keys, feats, nsegs, labs, talabs) = ([], [], [], [], [])
if (seg_rem and bool(keys)):
(yield (keys, feats, nsegs, labs, talabs))
def lab2nseg(self, lab_name, seg_shift=None):
lab2nseg = defaultdict(int)
seg_shift = (self.seg_shift if (seg_shift is None) else seg_shift)
for seq in self.seqlist:
nseg = (((self.lens[seq] - self.seg_len) // seg_shift) + 1)
lab = self.labs_d[lab_name][seq]
lab2nseg[lab] += nseg
return lab2nseg
def get_shape(self):
seq_shape = self.seq_d.get_shape()
return ((self.seg_len,) + seq_shape[1:]) |
class CategoricalRV(RandomVariable):
name = 'categorical'
ndim_supp = 0
ndims_params = [1]
dtype = 'int64'
_print_name = ('Categorical', '\\operatorname{Categorical}')
def __call__(self, p, size=None, **kwargs):
return super().__call__(p, size=size, **kwargs)
def rng_fn(cls, rng, p, size):
if (size is None):
size = p.shape[:(- 1)]
else:
if (len(size) < (p.ndim - 1)):
raise ValueError('`size` is incompatible with the shape of `p`')
for (s, ps) in zip(reversed(size), reversed(p.shape[:(- 1)])):
if ((s == 1) and (ps != 1)):
raise ValueError('`size` is incompatible with the shape of `p`')
unif_samples = rng.uniform(size=size)
samples = vsearchsorted(p.cumsum(axis=(- 1)), unif_samples)
return samples |
class ChildWindowSpecificationFromWrapperTests(unittest.TestCase):
def setUp(self):
Timings.defaults()
self.app = Application(backend='win32').start(_notepad_exe())
self.ctrlspec = self.app.window(found_index=0).find().by(class_name='Edit')
def tearDown(self):
self.app.kill()
def test_wrapper_object(self):
self.assertEqual(True, isinstance(self.ctrlspec, WindowSpecification))
self.assertEqual(True, isinstance(self.ctrlspec.find(), hwndwrapper.HwndWrapper))
def test_parent(self):
dlg = self.ctrlspec.parent()
sub_spec = dlg.by(class_name='Edit')
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), 'Edit')
self.assertEqual(self.ctrlspec.handle, sub_spec.handle)
def test_dump_tree_file_output(self):
output_filename = 'test_dump_tree.txt'
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, 'r') as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue((".by(class_name='Edit')" in content))
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_properties(self):
self.assertEqual(self.ctrlspec.class_name(), 'Edit')
self.assertTrue(self.ctrlspec.exists()) |
def get_pad_articulation_state(art, max_dof):
(base_pos, base_quat, base_vel, base_ang_vel, qpos, qvel) = get_articulation_state(art)
k = len(qpos)
pad_obj_internal_state = np.zeros((2 * max_dof))
pad_obj_internal_state[:k] = qpos
pad_obj_internal_state[max_dof:(max_dof + k)] = qvel
return np.concatenate([base_pos, base_quat, base_vel, base_ang_vel, pad_obj_internal_state]) |
class BiEncoder(nn.Module):
def __init__(self, bi_layer, num_layers):
super().__init__()
self.layers = _get_clones(bi_layer, num_layers)
self.num_layers = num_layers
def forward(self, vis_feats, pos_feats, padding_mask, text_feats, text_padding_mask, end_points={}, detected_feats=None, detected_mask=None):
for (i, layer) in enumerate(self.layers):
(vis_feats, text_feats) = layer(vis_feats, pos_feats, padding_mask, text_feats, text_padding_mask, end_points, detected_feats=detected_feats, detected_mask=detected_mask)
if ('lv_attention' in end_points):
end_points[('lv_attention%d' % i)] = end_points['lv_attention']
return (vis_feats, text_feats) |
class MultiDirectionBaseEnv(Serializable):
def __init__(self, velocity_reward_weight=1.0, survive_reward=0, ctrl_cost_coeff=0, contact_cost_coeff=0, velocity_deviation_cost_coeff=0, *args, **kwargs):
self._velocity_reward_weight = velocity_reward_weight
self._survive_reward = survive_reward
self._ctrl_cost_coeff = ctrl_cost_coeff
self._contact_cost_coeff = contact_cost_coeff
self._velocity_deviation_cost_coeff = velocity_deviation_cost_coeff
Serializable.quick_init(self, locals())
def velocity_reward(self):
xy_velocities = self.get_body_comvel('torso')[:2]
xy_velocity = np.linalg.norm(xy_velocities)
velocity_reward = (self._velocity_reward_weight * xy_velocity)
return velocity_reward
def survive_reward(self):
return self._survive_reward
def control_cost(self, action):
(lb, ub) = self.action_bounds
scaling = ((ub - lb) / 2.0)
return ((0.5 * self._ctrl_cost_coeff) * np.sum(np.square((action / scaling))))
def contact_cost(self):
return (((0.5 * self._contact_cost_coeff) * np.sum(np.square(np.clip(self.model.data.cfrc_ext, (- 1), 1)))),)
def is_healthy(self):
return True
def velocity_deviation_cost(self):
velocity_deviation_cost = ((0.5 * self._velocity_deviation_cost_coeff) * np.sum(np.square(self.get_body_comvel('torso')[2:])))
return velocity_deviation_cost
def done(self):
done = (not self.is_healthy)
return done
def step(self, action):
self.forward_dynamics(action)
reward = ((((self.velocity_reward + self.survive_reward) - self.control_cost(action)) - self.contact_cost) - self.velocity_deviation_cost)
next_observation = self.get_current_obs()
return Step(next_observation, float(reward), self.done)
def log_diagnostics(self, paths, *args, **kwargs):
logs = get_multi_direction_logs(paths)
for row in logs:
logger.record_tabular(*row) |
class TestRequestMixin(object):
def mock_request(self, GET=None, POST=None):
r = HttpRequest()
r.path = MOCK_PATH
r.method = ('POST' if (POST is not None) else 'GET')
r.GET = (GET or QueryDict(''))
r.POST = (POST or QueryDict(''))
r._messages = CookieStorage(r)
r.user = User.objects.get_or_create(username='test_user')[0]
return r |
def test_photo():
photo_small = 'AgACAgIAAx0CAAGgr9AAAgmZX7b7IPLRl8NcV3EJkzHwI1gwT-oAAq2nMRuBpLlJPJY-URZfhTkgfeqKEAADAQADAgADbQADAZ8BAAEeBA'
photo_small_unique = 'AQADIH3qihAAAwGfAQAB'
photo_medium = 'AgACAgIAAx0CAAGgr9AAAgmZX7b7IPLRl8NcV3EJkzHwI1gwT-oAAq2nMRuBpLlJPJY-URZfhTkgfeqKEAADAQADAgADeAADAp8BAAEeBA'
photo_medium_unique = 'AQADIH3qihAAAwKfAQAB'
photo_big = 'AgACAgIAAx0CAAGgr9AAAgmZX7b7IPLRl8NcV3EJkzHwI1gwT-oAAq2nMRuBpLlJPJY-URZfhTkgfeqKEAADAQADAgADeQAD_54BAAEeBA'
photo_big_unique = 'AQADIH3qihAAA_-eAQAB'
check(photo_small, FileType.PHOTO)
check_unique(photo_small_unique, FileUniqueType.PHOTO)
check(photo_medium, FileType.PHOTO)
check_unique(photo_medium_unique, FileUniqueType.PHOTO)
check(photo_big, FileType.PHOTO)
check_unique(photo_big_unique, FileUniqueType.PHOTO) |
def DescriptorChecksum(desc: str) -> str:
c = 1
cls = 0
clscount = 0
for ch in desc:
try:
pos = _INPUT_CHARSET_INV[ch]
except KeyError:
return ''
c = PolyMod(c, (pos & 31))
cls = ((cls * 3) + (pos >> 5))
clscount += 1
if (clscount == 3):
c = PolyMod(c, cls)
cls = 0
clscount = 0
if (clscount > 0):
c = PolyMod(c, cls)
for j in range(0, 8):
c = PolyMod(c, 0)
c ^= 1
ret = ([''] * 8)
for j in range(0, 8):
ret[j] = _CHECKSUM_CHARSET[((c >> (5 * (7 - j))) & 31)]
return ''.join(ret) |
def main():
project_root = Path(__file__).resolve().parent.parent
coverage_summary = (project_root / 'coverage-summary.json')
coverage_data = json.loads(coverage_summary.read_text(encoding='utf-8'))
total_data = coverage_data.pop('total')
lines = ['\n', 'Package | Statements\n', '--- | ---\n']
for (package, data) in sorted(coverage_data.items()):
statements_covered = data['statements_covered']
statements = data['statements']
rate = ((Decimal(statements_covered) / Decimal(statements)) * 100)
rate = rate.quantize(PRECISION, rounding=ROUND_DOWN)
lines.append(f'''{package} | {(100 if (rate == 100) else rate)}% ({statements_covered} / {statements})
''')
total_statements_covered = total_data['statements_covered']
total_statements = total_data['statements']
total_rate = ((Decimal(total_statements_covered) / Decimal(total_statements)) * 100)
total_rate = total_rate.quantize(PRECISION, rounding=ROUND_DOWN)
color = ('ok' if (float(total_rate) >= 95) else 'critical')
lines.insert(0, f'''
lines.append(f'''**Summary** | {(100 if (total_rate == 100) else total_rate)}% ({total_statements_covered} / {total_statements})
''')
coverage_report = (project_root / 'coverage-report.md')
with coverage_report.open('w', encoding='utf-8') as f:
f.write(''.join(lines)) |
class TestErrorsWarnings():
def setup_method(self) -> None:
pol1 = Polygon([(0, 0), (1, 0), (1, 1)])
pol2 = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
pol3 = Polygon([(2, 0), (3, 0), (3, 1), (2, 1)])
polygon_dict = {'geometry': [pol1, pol2, pol3]}
point = Point(10, 10)
point_dict = {'weight': 4, 'geometry': [point]}
self.gdf_fac = geopandas.GeoDataFrame(polygon_dict, crs='EPSG:4326')
self.gdf_dem = geopandas.GeoDataFrame(point_dict, crs='EPSG:4326')
self.gdf_dem_crs = self.gdf_dem.to_crs('EPSG:3857')
self.gdf_dem_buffered = self.gdf_dem.copy()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning, message='Geometry is in a geographic CRS')
self.gdf_dem_buffered['geometry'] = self.gdf_dem.buffer(2)
def test_error_lscpb_different_crs(self):
with pytest.warns(UserWarning, match='Facility geodataframe contains mixed type'):
with pytest.raises(ValueError, match='Geodataframes crs are different: '):
LSCPB.from_geodataframe(self.gdf_dem_crs, self.gdf_fac, 'geometry', 'geometry', 10, pulp.PULP_CBC_CMD(msg=False))
def test_warning_lscpb_demand_geodataframe(self):
with pytest.warns(UserWarning, match='Demand geodataframe contains mixed type'):
LSCPB.from_geodataframe(self.gdf_dem_buffered, self.gdf_fac, 'geometry', 'geometry', 100, pulp.PULP_CBC_CMD(msg=False))
def test_attribute_error_add_backup_covering_constraint(self):
with pytest.raises(AttributeError, match='Before setting backup coverage'):
dummy_class = LSCPB('dummy', pulp.LpProblem('name'), pulp.PULP_CBC_CMD(msg=False))
dummy_fac_r = 0
dummy_cli_r = 0
FacilityModelBuilder.add_backup_covering_constraint(dummy_class, dummy_fac_r, dummy_cli_r) |
class Trainer(object):
def __init__(self, params=Params(), out_dir='', verbose=False):
if verbose:
print('Trainer inited with:\n{}'.format(str(params.__dict__)))
self.p = params
self.log_dir = os.path.join(out_dir, 'logs')
os.makedirs(self.log_dir, exist_ok=True)
self.cross_entropy = nn.CrossEntropyLoss()
tb_dir = os.path.join(out_dir, 'tensorboard')
self.models_dir = os.path.join(out_dir, 'models')
self.images_dir = os.path.join(self.log_dir, 'images')
os.makedirs(tb_dir, exist_ok=True)
os.makedirs(self.models_dir, exist_ok=True)
os.makedirs(self.images_dir, exist_ok=True)
self.checkpoint = os.path.join(out_dir, 'checkpoint.pt')
self.writer = SummaryWriter(tb_dir)
self.out_json = os.path.join(self.log_dir, 'stat.json')
self.fixed_test_noise = None
def make_shifts(self, latent_dim):
target_indices = torch.randint(0, self.p.directions_count, [self.p.batch_size], device='cuda')
if (self.p.shift_distribution == ShiftDistribution.NORMAL):
shifts = torch.randn(target_indices.shape, device='cuda')
elif (self.p.shift_distribution == ShiftDistribution.UNIFORM):
shifts = ((2.0 * torch.rand(target_indices.shape, device='cuda')) - 1.0)
shifts = (self.p.shift_scale * shifts)
shifts[((shifts < self.p.min_shift) & (shifts > 0))] = self.p.min_shift
shifts[((shifts > (- self.p.min_shift)) & (shifts < 0))] = (- self.p.min_shift)
try:
latent_dim[0]
latent_dim = list(latent_dim)
except Exception:
latent_dim = [latent_dim]
z_shift = torch.zeros(([self.p.batch_size] + latent_dim), device='cuda')
for (i, (index, val)) in enumerate(zip(target_indices, shifts)):
z_shift[i][index] += val
return (target_indices, shifts, z_shift)
def log_train(self, step, should_print=True, stats=()):
if should_print:
out_text = '{}% [step {}]'.format(int(((100 * step) / self.p.n_steps)), step)
for named_value in stats:
out_text += ' | {}: {:.2f}'.format(*named_value)
print(out_text)
for named_value in stats:
self.writer.add_scalar(named_value[0], named_value[1], step)
with open(self.out_json, 'w') as out:
stat_dict = {named_value[0]: named_value[1] for named_value in stats}
json.dump(stat_dict, out)
def log_interpolation(self, G, deformator, step):
noise = make_noise(1, G.dim_z, self.p.truncation).cuda()
if (self.fixed_test_noise is None):
self.fixed_test_noise = noise.clone()
for (z, prefix) in zip([noise, self.fixed_test_noise], ['rand', 'fixed']):
fig = make_interpolation_chart(G, deformator, z=z, shifts_r=(3 * self.p.shift_scale), shifts_count=3, dims_count=15, dpi=500)
self.writer.add_figure('{}_deformed_interpolation'.format(prefix), fig, step)
fig_to_image(fig).convert('RGB').save(os.path.join(self.images_dir, '{}_{}.jpg'.format(prefix, step)))
def start_from_checkpoint(self, G, deformator, shift_predictor):
step = 0
if os.path.isfile(self.checkpoint):
state_dict = torch.load(self.checkpoint)
step = state_dict['step']
deformator.load_state_dict(state_dict['deformator'])
shift_predictor.load_state_dict(state_dict['shift_predictor'])
G.load_state_dict(state_dict['generator'])
print('starting from step {}'.format(step))
return step
def save_checkpoint(self, G, deformator, shift_predictor, step):
state_dict = {'step': step, 'deformator': deformator.state_dict(), 'shift_predictor': shift_predictor.state_dict(), 'generator': G.state_dict()}
torch.save(state_dict, self.checkpoint)
def save_models(self, deformator, shift_predictor, step):
torch.save(deformator.state_dict(), os.path.join(self.models_dir, 'deformator_{}.pt'.format(step)))
torch.save(shift_predictor.state_dict(), os.path.join(self.models_dir, 'shift_predictor_{}.pt'.format(step)))
def log_accuracy(self, G, deformator, shift_predictor, step):
deformator.eval()
shift_predictor.eval()
accuracy = validate_classifier(G, deformator, shift_predictor, trainer=self)
self.writer.add_scalar('accuracy', accuracy.item(), step)
deformator.train()
shift_predictor.train()
return accuracy
def log(self, G, deformator, shift_predictor, step, avgs, is_latent):
if ((step % self.p.steps_per_log) == 0):
self.log_train(step, True, [avg.flush() for avg in avgs])
if (is_latent and ((step % self.p.steps_per_img_log) == 0)):
self.log_interpolation(G, deformator, step)
if (((step % self.p.steps_per_backup) == 0) and (step > 0)):
self.save_checkpoint(G, deformator, shift_predictor, step)
if is_latent:
accuracy = self.log_accuracy(G, deformator, shift_predictor, step)
print('Step {} accuracy: {:.3}'.format(step, accuracy.item()))
if (((step % self.p.steps_per_save) == 0) and (step > 0)):
self.save_models(deformator, shift_predictor, step)
def train(self, G, deformator, shift_predictor, multi_gpu=False):
G.cuda().eval()
deformator.cuda().train()
shift_predictor.cuda().train()
should_gen_classes = is_conditional(G)
if multi_gpu:
G = DataParallelPassthrough(G)
deformator_opt = (torch.optim.Adam(deformator.parameters(), lr=self.p.deformator_lr) if (deformator.type not in [DeformatorType.ID, DeformatorType.RANDOM]) else None)
shift_predictor_opt = torch.optim.Adam(shift_predictor.parameters(), lr=self.p.shift_predictor_lr)
avgs = (MeanTracker('percent'), MeanTracker('loss'), MeanTracker('direction_loss'), MeanTracker('shift_loss'))
(avg_correct_percent, avg_loss, avg_label_loss, avg_shift_loss) = avgs
recovered_step = self.start_from_checkpoint(G, deformator, shift_predictor)
for step in range(recovered_step, self.p.n_steps, 1):
G.zero_grad()
deformator.zero_grad()
shift_predictor.zero_grad()
z = make_noise(self.p.batch_size, G.dim_z, self.p.truncation).cuda()
if (self.p.deformator_target == 'latent'):
(target_indices, shifts, basis_shift) = self.make_shifts(deformator.input_dim)
else:
(target_indices, shifts, basis_shift) = self.make_shifts(G.dim_z)
if should_gen_classes:
classes = G.mixed_classes(z.shape[0])
if (self.p.deformator_target == 'latent'):
shift = deformator(basis_shift)
if should_gen_classes:
if (self.p.deformator_target == 'latent'):
imgs = G(z, classes)
imgs_shifted = G.gen_shifted(z, shift, classes)
elif self.p.deformator_target.startswith('weight'):
imgs = G(z, classes)
deformator.deformate(target_indices, shifts)
imgs_shifted = G(z, classes)
deformator.disable_deformation()
elif (self.p.deformator_target == 'latent'):
imgs = G(z)
imgs_shifted = G.gen_shifted(z, shift)
elif self.p.deformator_target.startswith('weight'):
imgs = G(z)
deformator.deformate(target_indices, shifts)
imgs_shifted = G(z)
deformator.disable_deformation()
(logits, shift_prediction) = shift_predictor(imgs, imgs_shifted)
logit_loss = (self.p.label_weight * self.cross_entropy(logits, target_indices))
shift_loss = (self.p.shift_weight * torch.mean(torch.abs((shift_prediction - shifts))))
loss = (logit_loss + shift_loss)
loss.backward()
if (deformator_opt is not None):
deformator_opt.step()
shift_predictor_opt.step()
avg_correct_percent.add(torch.mean((torch.argmax(logits, dim=1) == target_indices).to(torch.float32)).detach())
avg_loss.add(loss.item())
avg_label_loss.add(logit_loss.item())
avg_shift_loss.add(shift_loss)
self.log(G, deformator, shift_predictor, step, avgs, is_latent=(self.p.deformator_target == 'latent')) |
def rm_handler(args):
try:
log.info('Queuing rm of {} on {}', args.target_path, ('all hosts' if (not args.host) else ', '.join(sorted(args.host))))
patch_hosts(args.target_path, patch_mode=0, hosts=(args.host if args.host else None))
except Exception as e:
sys.exit(('Error: ' + str(e))) |
class ObjectBlock():
name = None
FBs = None
inputs = None
outputs = None
def __init__(self, name):
self.name = name
self.FBs = {}
self.inputs = {}
self.outputs = {}
def add_io(self, io):
if issubclass(type(io), Input):
self.inputs[io.name] = io
else:
self.outputs[io.name] = io |
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--modelDir', help='model directory', type=str, default='')
parser.add_argument('--logDir', help='log directory', type=str, default='')
parser.add_argument('--dataDir', help='data directory', type=str, default='')
parser.add_argument('--prevModelDir', help='prev Model directory', type=str, default='')
args = parser.parse_args()
return args |
class VCSDependency(Dependency):
def __init__(self, name: str, vcs: str, source: str, branch: (str | None)=None, tag: (str | None)=None, rev: (str | None)=None, resolved_rev: (str | None)=None, directory: (str | None)=None, groups: (Iterable[str] | None)=None, optional: bool=False, develop: bool=False, extras: (Iterable[str] | None)=None) -> None:
self._vcs = vcs
self._source = source
self._branch = branch
self._tag = tag
self._rev = rev
self._directory = directory
self._develop = develop
super().__init__(name, '*', groups=groups, optional=optional, allows_prereleases=True, source_type=self._vcs.lower(), source_url=self._source, source_reference=(branch or tag or rev or 'HEAD'), source_resolved_reference=resolved_rev, source_subdirectory=directory, extras=extras)
def vcs(self) -> str:
return self._vcs
def source(self) -> str:
return self._source
def branch(self) -> (str | None):
return self._branch
def tag(self) -> (str | None):
return self._tag
def rev(self) -> (str | None):
return self._rev
def directory(self) -> (str | None):
return self._directory
def develop(self) -> bool:
return self._develop
def reference(self) -> str:
reference = (self._branch or self._tag or self._rev or '')
return reference
def pretty_constraint(self) -> str:
if self._branch:
what = 'branch'
version = self._branch
elif self._tag:
what = 'tag'
version = self._tag
elif self._rev:
what = 'rev'
version = self._rev
else:
return ''
return f'{what} {version}'
def _base_pep_508_name(self, *, resolved: bool=False) -> str:
from poetry.core.vcs import git
requirement = self.complete_pretty_name
parsed_url = git.ParsedUrl.parse(self._source)
if (parsed_url.protocol is not None):
requirement += f' {self._vcs}+{self._source}'
else:
requirement += f' {self._vcs}+ssh://{parsed_url.format()}'
if (resolved and self.source_resolved_reference):
requirement += f'{self.source_resolved_reference}'
elif self.reference:
requirement += f'{self.reference}'
if self._directory:
requirement += f'#subdirectory={self._directory}'
return requirement
def base_pep_508_name(self) -> str:
requirement = self._base_pep_508_name()
return requirement
def base_pep_508_name_resolved(self) -> str:
requirement = self._base_pep_508_name(resolved=True)
return requirement
def is_vcs(self) -> bool:
return True |
def create_tree_items_for_requirement(tree: QtWidgets.QTreeWidget, root: (QtWidgets.QTreeWidget | QtWidgets.QTreeWidgetItem), requirement: Requirement) -> QtWidgets.QTreeWidgetItem:
parents: list[(QtWidgets.QTreeWidget | QtWidgets.QTreeWidgetItem)] = [root]
result = None
for (depth, text) in pretty_print.pretty_print_requirement(requirement):
item = QtWidgets.QTreeWidgetItem(parents[depth])
item.setExpanded(True)
if (result is None):
result = item
if ('of the following' in text):
item.setText(0, text)
if (len(parents) == (depth + 1)):
parents.append(item)
else:
parents[(depth + 1)] = item
else:
label = QtWidgets.QLabel()
if text.startswith('# '):
text = re.sub('( '<a href="\\1">\\1</a>', text[2:])
label.setStyleSheet('font-weight: bold; color: green')
label.setOpenExternalLinks(True)
else:
max_size = 100
if (len(text) > max_size):
lines = [text]
while (len(lines[(- 1)]) > max_size):
and_i = lines[(- 1)].rfind(' and ', 0, max_size)
or_i = lines[(- 1)].rfind(' or ', 0, max_size)
i = max(and_i, or_i)
if (i == (- 1)):
break
lines.append(lines[(- 1)][i:])
lines[(- 2)] = lines[(- 2)][:i]
text = '\n'.join(lines)
label.setText(text)
tree.setItemWidget(item, 0, label)
return result |
class CrocLexer(RegexLexer):
name = 'Croc'
url = '
filenames = ['*.croc']
aliases = ['croc']
mimetypes = ['text/x-crocsrc']
version_added = ''
tokens = {'root': [('\\n', Whitespace), ('\\s+', Whitespace), ('(//.*?)(\\n)', bygroups(Comment.Single, Whitespace)), ('/\\*', Comment.Multiline, 'nestedcomment'), (words(('as', 'assert', 'break', 'case', 'catch', 'class', 'continue', 'default', 'do', 'else', 'finally', 'for', 'foreach', 'function', 'global', 'namespace', 'if', 'import', 'in', 'is', 'local', 'module', 'return', 'scope', 'super', 'switch', 'this', 'throw', 'try', 'vararg', 'while', 'with', 'yield'), suffix='\\b'), Keyword), ('(false|true|null)\\b', Keyword.Constant), ('([0-9][0-9_]*)(?=[.eE])(\\.[0-9][0-9_]*)?([eE][+\\-]?[0-9_]+)?', Number.Float), ('0[bB][01][01_]*', Number.Bin), ('0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), ('([0-9][0-9_]*)(?![.eE])', Number.Integer), ('\'(\\\\[\'"\\\\nrt]|\\\\x[0-9a-fA-F]{2}|\\\\[0-9]{1,3}|\\\\u[0-9a-fA-F]{4}|\\\\U[0-9a-fA-F]{8}|.)\'', String.Char), ('"(""|[^"])*"', String), ('`(``|[^`])*`', String), ("'(''|[^'])*'", String), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String), ('(~=|\\^=|%=|\\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\\?=|-\\>|<<=|<<|<=|\\+\\+|\\+=|--|-=|\\|\\||\\|=|&&|&=|\\.\\.|/=)|[-/.&$|\\+<>!()\\[\\]{}?,;:=*%^~#\\\\]', Punctuation), ('[a-zA-Z_]\\w*', Name)], 'nestedcomment': [('[^*/]+', Comment.Multiline), ('/\\*', Comment.Multiline, '#push'), ('\\*/', Comment.Multiline, '#pop'), ('[*/]', Comment.Multiline)]} |
def convert_ecp_to_nwchem(symb, ecp):
symb = _std_symbol(symb)
res = [('%-2s nelec %d' % (symb, ecp[0]))]
for ecp_block in ecp[1]:
l = ecp_block[0]
if (l == (- 1)):
res.append(('%-2s ul' % symb))
else:
res.append(('%-2s %s' % (symb, SPDF[l].lower())))
for (r_order, dat) in enumerate(ecp_block[1]):
for (e, c) in dat:
res.append(('%d %15.9f %15.9f' % (r_order, e, c)))
return '\n'.join(res) |
def _generate_supported_model_classes(model_name: Type[PretrainedConfig], supported_tasks: Optional[Union[(str, List[str])]]=None) -> List[Type[PreTrainedModel]]:
model_config_class = CONFIG_MAPPING[model_name]
task_mapping = {'default': MODEL_MAPPING, 'pretraining': MODEL_FOR_PRETRAINING_MAPPING, 'next-sentence-prediction': MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, 'masked-lm': MODEL_FOR_MASKED_LM_MAPPING, 'causal-lm': MODEL_FOR_CAUSAL_LM_MAPPING, 'seq2seq-lm': MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, 'multiple-choice': MODEL_FOR_MULTIPLE_CHOICE_MAPPING, 'question-answering': MODEL_FOR_QUESTION_ANSWERING_MAPPING, 'sequence-classification': MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, 'token-classification': MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, 'image-classification': MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING}
if (supported_tasks is None):
supported_tasks = task_mapping.keys()
if isinstance(supported_tasks, str):
supported_tasks = [supported_tasks]
model_classes = []
for task in supported_tasks:
model_class = task_mapping[task].get(model_config_class, None)
if model_class:
model_classes.append(model_class)
return model_classes |
def pin_memory_fn(data, device=None):
if hasattr(data, 'pin_memory'):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
pinned_data = {k: pin_memory_fn(sample, device) for (k, sample) in data.items()}
try:
return type(data)(pinned_data)
except TypeError:
return pinned_data
elif isinstance(data, collections.abc.Sequence):
pinned_data = [pin_memory_fn(sample, device) for sample in data]
try:
return type(data)(pinned_data)
except TypeError:
return pinned_data
else:
return data |
def parse_genia() -> None:
output_dir_path = 'data/genia/'
os.makedirs(output_dir_path, mode=493, exist_ok=True)
output_file_list = ['genia.train', 'genia.dev', 'genia.test']
dataset_size_list = [15022, 1669, 1855]
do_lower_case = ('-cased' not in config.bert_model)
with open(CORPUS_FILE_PATH, 'r') as f:
for (output_file, dataset_size) in zip(output_file_list, dataset_size_list):
output_lines = []
sent_count = 0
token_count = 0
for tag in TAG_SET:
TAG_SET[tag] = Stat()
for line in f:
line = line.strip()
if (line.find(SENTENCE_BEGIN_TAG) > (- 1)):
assert (line.find(SENTENCE_END_TAG) > (- 1))
(words, labels) = parse_line(line, do_lower_case)
output_lines.append((words + '\n'))
output_lines.append((labels + '\n'))
output_lines.append('\n')
sent_count += 1
token_count += len(words.split(' '))
if (sent_count == dataset_size):
with open((output_dir_path + output_file), 'w') as f2:
f2.writelines(output_lines)
print('')
print('--- {}'.format(output_file))
print('# of sentences:\t{:6d}'.format(sent_count))
print('# of tokens:\t{:6d}'.format(token_count))
total = 0
total_layer = []
total_ignored = 0
for (_, stat) in TAG_SET.items():
total += stat.total
for (depth, num) in enumerate(stat.layer):
if (len(total_layer) == depth):
total_layer.append(0)
total_layer[depth] += num
total_ignored += stat.ignored
print('total # of mentions:\t{}\t(layer:\t{},\tignored:\t{})'.format(total, total_layer, total_ignored))
for (tag, stat) in TAG_SET.items():
print('\t{}:\t{:5d}\t(layer:\t{},\tignored:\t{})'.format(tag, stat.total, stat.layer, stat.ignored))
ave_labels = 0
for (_, stat) in TAG_SET.items():
ave_labels += stat.num_labels
ave_labels /= (token_count * len(TAG_SET))
print('average # of labels:\t{:.2f}'.format(ave_labels))
break |
class TimingSuite(TestSuite):
def save_test_time(self, test_name, duration):
file_prefix = getattr(settings, 'TESTS_REPORT_TMP_FILES_PREFIX', '_tests_report_')
file_name = '{}{}.txt'.format(file_prefix, os.getpid())
with open(file_name, 'a+') as f:
f.write('{name},{duration:.6f}\n'.format(name=test_name, duration=duration))
def run(self, result, debug=False):
topLevel = False
if (getattr(result, '_testRunEntered', False) is False):
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
start_time = _time()
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)):
continue
if (not debug):
test(result)
else:
test.debug()
self.save_test_time(str(test), (_time() - start_time))
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result |
class RCElement(pybamm.BaseSubModel):
def __init__(self, param, element_number, options=None):
super().__init__(param)
self.element_number = element_number
self.model_options = options
def get_fundamental_variables(self):
vrc = pybamm.Variable(f'Element-{self.element_number} overpotential [V]')
variables = {f'Element-{self.element_number} overpotential [V]': vrc}
return variables
def get_coupled_variables(self, variables):
T_cell = variables['Cell temperature [degC]']
current = variables['Current [A]']
soc = variables['SoC']
r = self.param.rcr_element(f'R{self.element_number} [Ohm]', T_cell, current, soc)
c = self.param.rcr_element(f'C{self.element_number} [F]', T_cell, current, soc)
tau = (r * c)
vrc = variables[f'Element-{self.element_number} overpotential [V]']
Q_irr = ((- current) * vrc)
variables.update({f'R{self.element_number} [Ohm]': r, f'C{self.element_number} [F]': c, f'tau{self.element_number} [s]': tau, (f'Element-{self.element_number} ' + 'irreversible heat generation [W]'): Q_irr})
return variables
def set_rhs(self, variables):
vrc = variables[f'Element-{self.element_number} overpotential [V]']
current = variables['Current [A]']
r = variables[f'R{self.element_number} [Ohm]']
tau = variables[f'tau{self.element_number} [s]']
self.rhs = {vrc: (((- vrc) / tau) - ((current * r) / tau))}
def set_initial_conditions(self, variables):
vrc = variables[f'Element-{self.element_number} overpotential [V]']
self.initial_conditions = {vrc: self.param.initial_rc_overpotential(self.element_number)} |
class MockRole(CustomMockMixin, unittest.mock.Mock, ColourMixin, HashableMixin):
spec_set = role_instance
def __init__(self, **kwargs) -> None:
default_kwargs = {'id': next(self.discord_id), 'name': 'role', 'position': 1, 'colour': discord.Colour(), 'permissions': discord.Permissions()}
super().__init__(**collections.ChainMap(kwargs, default_kwargs))
if isinstance(self.colour, int):
self.colour = discord.Colour(self.colour)
if isinstance(self.permissions, int):
self.permissions = discord.Permissions(self.permissions)
if ('mention' not in kwargs):
self.mention = f'&{self.name}'
def __lt__(self, other):
return (self.position < other.position)
def __ge__(self, other):
return (self.position >= other.position) |
.parametrize('blacklist, expected', [(['ab*'], expected_text(('a', 'yellow', 'a', 'message-info cmd-aa'))), (['*'], '')])
def test_blacklist(keyhint, config_stub, blacklist, expected):
config_stub.val.keyhint.blacklist = blacklist
bindings = {'normal': {'aa': 'message-info cmd-aa', 'ab': 'message-info cmd-ab', 'aba': 'message-info cmd-aba', 'abb': 'message-info cmd-abb', 'xd': 'message-info cmd-xd', 'xe': 'message-info cmd-xe'}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint(usertypes.KeyMode.normal, 'a')
assert (keyhint.text() == expected) |
class LatentGridCRF(GridCRF, LatentGraphCRF):
def __init__(self, n_labels=None, n_features=None, n_states_per_label=2, inference_method=None, neighborhood=4):
LatentGraphCRF.__init__(self, n_labels, n_features, n_states_per_label, inference_method=inference_method)
GridCRF.__init__(self, n_states=self.n_states, n_features=self.n_features, neighborhood=neighborhood, inference_method=inference_method)
def _set_size_joint_feature(self):
LatentGraphCRF._set_size_joint_feature(self)
def initialize(self, X, Y):
LatentGraphCRF.initialize(self, X, Y)
def init_latent(self, X, Y):
edges = [[make_grid_edges(x, neighborhood=self.neighborhood, return_lists=False)] for x in X]
H = kmeans_init(X.reshape(X.shape[0], (- 1), self.n_features), Y.reshape(Y.shape[0], (- 1)), edges, n_labels=self.n_labels, n_states_per_label=self.n_states_per_label)
return np.array(H).reshape(Y.shape)
def loss_augmented_inference(self, x, h, w, relaxed=False, return_energy=False):
h = LatentGraphCRF.loss_augmented_inference(self, x, h.ravel(), w, relaxed, return_energy)
return self._reshape_y(h, x.shape, return_energy)
def latent(self, x, y, w):
res = LatentGraphCRF.latent(self, x, y.ravel(), w)
return res.reshape(y.shape)
def continuous_loss(self, y, y_hat):
return LatentGraphCRF.continuous_loss(self, y.ravel(), y_hat.reshape((- 1), y_hat.shape[(- 1)])) |
.slow
.parametrize('alg', algos_disc)
def test_discrete_identity(alg):
kwargs = learn_kwargs[alg]
kwargs.update(common_kwargs)
learn_fn = (lambda e: get_learn_function(alg)(env=e, **kwargs))
env_fn = (lambda : DiscreteIdentityEnv(10, episode_len=100))
simple_test(env_fn, learn_fn, 0.9) |
def test_serializer_create_text(db, settings):
class MockedProject():
file_size = 1
class MockedView():
action = 'create'
project = MockedProject()
settings.PROJECT_FILE_QUOTA = '0'
validator = ValueQuotaValidator()
serializer = ValueSerializer()
serializer.context['view'] = MockedView()
validator({'value_type': 'text'}, serializer) |
def visible(widget, width=None, height=None):
own_window = False
toplevel = widget.get_toplevel()
if (not isinstance(toplevel, Gtk.Window)):
window = Gtk.Window(type=Gtk.WindowType.POPUP)
window.add(widget)
own_window = True
else:
window = toplevel
if ((width is not None) and (height is not None)):
window.resize(width, height)
window.show_all()
while Gtk.events_pending():
Gtk.main_iteration()
assert widget.get_visible()
assert window.get_visible()
(yield widget)
while Gtk.events_pending():
Gtk.main_iteration()
window.hide()
if own_window:
window.remove(widget)
window.destroy()
while Gtk.events_pending():
Gtk.main_iteration() |
def map(y_true, y_pred, rel_threshold=0):
s = 0.0
y_true = _to_list(np.squeeze(y_true).tolist())
y_pred = _to_list(np.squeeze(y_pred).tolist())
c = zip(y_true, y_pred)
random.shuffle(c)
c = sorted(c, key=(lambda x: x[1]), reverse=True)
ipos = 0
for (j, (g, p)) in enumerate(c):
if (g > rel_threshold):
ipos += 1.0
s += (ipos / (j + 1.0))
if (ipos == 0):
s = 0.0
else:
s /= ipos
return s |
def test_extract_link_hrefs(app, client):
crawler = Crawler(client=client, initial_paths=['/'], rules=(PERMISSIVE_HYPERLINKS_ONLY_RULE_SET + REQUEST_EXTERNAL_RESOURCE_LINKS_RULE_SET))
crawler.crawl()
link_nodes = crawler.graph.get_nodes_by_source('link')
assert (len(link_nodes) == 1)
assert (link_nodes[0].path == '/style.css')
assert (link_nodes[0].status_code == 200) |
class TestFermionicTransformation(QiskitChemistryTestCase):
def setUp(self):
super().setUp()
try:
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735', unit=UnitsType.ANGSTROM, charge=0, spin=0, basis='sto3g')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
self.driver = driver
def _validate_vars(self, fermionic_transformation, energy_shift=0.0, ph_energy_shift=0.0):
self.assertAlmostEqual(fermionic_transformation._hf_energy, (- 1.117), places=3)
self.assertAlmostEqual(fermionic_transformation._energy_shift, energy_shift)
self.assertAlmostEqual(fermionic_transformation._ph_energy_shift, ph_energy_shift)
def _validate_info(self, fermionic_transformation, num_particles=None, num_orbitals=4, actual_two_qubit_reduction=False):
num_particles = (num_particles if (num_particles is not None) else (1, 1))
z2symmetries = fermionic_transformation.molecule_info.pop('z2_symmetries')
self.assertEqual(z2symmetries.is_empty(), True)
self.assertEqual(fermionic_transformation.molecule_info, {'num_particles': num_particles, 'num_orbitals': num_orbitals, 'two_qubit_reduction': actual_two_qubit_reduction})
def _validate_input_object(self, qubit_op, num_qubits=4, num_paulis=15):
self.assertTrue(isinstance(qubit_op, OperatorBase))
self.assertIsNotNone(qubit_op)
self.assertEqual(qubit_op.num_qubits, num_qubits)
self.assertEqual(len(qubit_op.oplist), num_paulis)
def test_output(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.PARITY, two_qubit_reduction=True, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation, actual_two_qubit_reduction=True)
self._validate_input_object(qubit_op, num_qubits=2, num_paulis=5)
def test_jordan_wigner(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_jordan_wigner_2q(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=True, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation, actual_two_qubit_reduction=False)
self._validate_input_object(qubit_op)
def test_parity(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.PARITY, two_qubit_reduction=False, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_bravyi_kitaev(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.BRAVYI_KITAEV, two_qubit_reduction=False, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_particle_hole(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.PARTICLE_HOLE, qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False, freeze_core=False, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation, ph_energy_shift=(- 1.))
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_freeze_core(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False, freeze_core=True, orbital_reduction=[])
(qubit_op, _) = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_orbital_reduction(self):
fermionic_transformation = FermionicTransformation(transformation=TransformationType.FULL, qubit_mapping=QubitMappingType.JORDAN_WIGNER, two_qubit_reduction=False, freeze_core=False, orbital_reduction=[(- 1)])
qmolecule = self.driver.run()
fer_op = FermionicOperator(h1=qmolecule.one_body_integrals, h2=qmolecule.two_body_integrals)
dummy = fer_op.total_particle_number()
expected = (((I ^ I) - (0.5 * (I ^ Z))) - (0.5 * (Z ^ I)))
(qubit_op, aux_ops) = fermionic_transformation.transform(self.driver, [dummy])
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation, num_orbitals=2)
self._validate_input_object(qubit_op, num_qubits=2, num_paulis=4)
self.assertEqual(aux_ops[6], expected) |
('beeref.view.BeeGraphicsView.fitInView')
('beeref.view.BeeGraphicsView.centerOn')
def test_fit_rect_toggle_when_previous(center_mock, fit_mock, view):
item = MagicMock()
view.previous_transform = {'toggle_item': item, 'transform': QtGui.QTransform.fromScale(2, 2), 'center': QtCore.QPointF(30, 40)}
view.setSceneRect(QtCore.QRectF((- 2000), (- 2000), 4000, 4000))
rect = QtCore.QRectF(30, 40, 100, 80)
view.fit_rect(rect, toggle_item=item)
fit_mock.assert_not_called()
center_mock.assert_called_once_with(QtCore.QPointF(30, 40))
assert (view.get_scale() == 2) |
('pypyr.config.config.default_encoding', new='utf-16')
def test_json_pass_with_encoding(fs):
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents='{\n "key1": "value1",\n "key2": "value2",\n "key3": "value3"\n}\n', encoding='utf-16')
context = pypyr.parser.jsonfile.get_parsed_context([in_path])
assert context, "context shouldn't be None"
assert (len(context) == 3), 'context should have 3 items'
assert (context['key2'] == 'value2'), 'key2 should be value2' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.