code stringlengths 101 5.91M |
|---|
def Xor(a, b, ctx=None):
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b = s.cast(b)
return BoolRef(Z3_mk_xor(ctx.ref(), a.as_ast(), b.as_ast()), ctx) |
def evaluate(j, e, solver, scores1, scores2, data_loader, logdir, reference_point, split, result_dict):
assert (split in ['train', 'val', 'test'])
mode = 'pf'
if (mode == 'pf'):
assert (len(scores1) == len(scores2) <= 3), 'Cannot generate cirlce points for more than 3 dimensions.'
n_test_rays = 25
test_rays = utils.circle_points(n_test_rays, dim=len(scores1))
elif (mode == 'mcr'):
test_rays = np.ones((1, len(scores1)))
test_rays /= test_rays.sum(axis=1).reshape(1, 1)
else:
raise ValueError()
print(test_rays[0])
score_values1 = np.array([])
score_values2 = np.array([])
for (k, batch) in enumerate(data_loader):
print(f'eval batch {(k + 1)} of {len(data_loader)}')
batch = utils.dict_to_cuda(batch)
s1 = []
s2 = []
for l in solver.eval_step(batch, test_rays):
batch.update(l)
s1.append([s(**batch) for s in scores1])
s2.append([s(**batch) for s in scores2])
if (score_values1.size == 0):
score_values1 = np.array(s1)
score_values2 = np.array(s2)
else:
score_values1 += np.array(s1)
score_values2 += np.array(s2)
score_values1 /= len(data_loader)
score_values2 /= len(data_loader)
hv = HyperVolume(reference_point)
if (mode == 'pf'):
pareto_front = utils.ParetoFront([s.__class__.__name__ for s in scores1], logdir, '{}_{:03d}'.format(split, e))
pareto_front.append(score_values1)
pareto_front.plot()
volume = hv.compute(score_values1)
else:
volume = (- 1)
result = {'scores_loss': score_values1.tolist(), 'scores_mcr': score_values2.tolist(), 'hv': volume, 'task': j, 'max_epoch_so_far': (- 1), 'max_volume_so_far': (- 1), 'training_time_so_far': (- 1)}
result.update(solver.log())
result_dict[f'start_{j}'][f'epoch_{e}'] = result
with open((pathlib.Path(logdir) / f'{split}_results.json'), 'w') as file:
json.dump(result_dict, file)
return result_dict |
def _make_pretrained_resnext101_wsl(use_pretrained):
resnet = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
return _make_resnet_backbone(resnet) |
class BenchmarkingZeroShotDataDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.1.0')
BUILDER_CONFIGS = [datasets.BuilderConfig(name='topic', version=VERSION, description='Topic classifcation dataset based on Yahoo news groups.'), datasets.BuilderConfig(name='emotion', version=VERSION, description='An emotion detection dataset (based on Unified emotions).'), datasets.BuilderConfig(name='situation', version=VERSION, description='An emergency situation dataset.')]
def _info(self):
if (self.config.name == 'topic'):
features = datasets.Features({'text': datasets.Value('string'), 'label': datasets.ClassLabel(names=['Society & Culture', 'Science & Mathematics', 'Health', 'Education & Reference', 'Computers & Internet', 'Sports', 'Business & Finance', 'Entertainment & Music', 'Family & Relationships', 'Politics & Government'])})
elif (self.config.name == 'emotion'):
features = datasets.Features({'text': datasets.Value('string'), 'label': datasets.ClassLabel(names=['anger', 'disgust', 'fear', 'guilt', 'joy', 'love', 'noemo', 'sadness', 'shame', 'surprise'])})
elif (self.config.name == 'situation'):
features = datasets.Features({'text': datasets.Value('string'), 'label': datasets.Sequence(datasets.Value('string'))})
else:
raise NotImplementedError(self.config.name)
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'data_dir': data_dir, 'split': 'train'}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'data_dir': data_dir, 'split': 'test'}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'data_dir': data_dir, 'split': 'dev'})]
def _generate_examples(self, data_dir, split):
data_path = Path(data_dir).joinpath('BenchmarkingZeroShot', self.config.name)
if (self.config.name == 'topic'):
(yield from read(data_path, split, split_label=False))
elif (self.config.name == 'situation'):
(yield from read(data_path, split, split_label=True))
elif (self.config.name == 'emotion'):
(yield from read(data_path, split, split_label=False, fieldnames=['label', 'tag', 'text']))
else:
raise NotImplementedError(self.config.name) |
_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sample_size == 0):
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if (sample_size != 0):
targets = targets[masked_tokens]
loss = F.nll_loss(F.log_softmax(logits.view((- 1), logits.size((- 1))), dim=(- 1), dtype=torch.float32), targets.view((- 1)), reduction='sum', ignore_index=self.padding_idx)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
loss = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss / sample_size) / math.log(2)), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output |
_utils.test(arch=supported_archs_taichi_ndarray)
def test_compiled_functions():
_test_compiled_functions() |
def main(N, bc):
SD = FunctionSpace(N, 'Laguerre', bc=bcs[bc])
u = TrialFunction(SD)
v = TestFunction(SD)
fj = Array(SD, buffer=fe)
f_hat = Function(SD)
f_hat = inner(v, fj, output_array=f_hat)
A = inner(v, (- div(grad(u))))
sol = la.Solver(A)
u_hat = Function(SD)
u_hat = sol(f_hat, u_hat)
uj = u_hat.backward()
ua = Array(SD, buffer=ue)
error = np.sqrt(inner(1, ((uj - ua) ** 2)))
d = {0: 'Dirichlet', 1: 'Neumann'}
print(f'laguerre_poisson1D {d[bc]:10s} L2 error {error:2.6e}')
if ('pytest' not in os.environ):
import matplotlib.pyplot as plt
xx = np.linspace(0, 16, 100)
plt.plot(xx, lambdify(x, ue)(xx), 'r', xx, u_hat.eval(xx), 'bo', markersize=2)
plt.show()
else:
assert (error < 1e-05)
point = np.array([0.1, 0.2])
p = SD.eval(point, u_hat)
assert np.allclose(p, lambdify(x, ue)(point), atol=1e-05) |
class PhotometricAug(object):
def __init__(self, transform=None):
self.transform = transform
def __call__(self, img):
n = random.randint(0, 2)
if (n == (- 1)):
transformed_image = TF.invert(img.copy())
elif (n == 0):
transformed_image = img.copy().convert('L').filter(ImageFilter.FIND_EDGES).convert('RGB')
elif (n == (- 2)):
transformed_image = img.copy()
draw = ImageDraw.Draw(transformed_image)
(width, height) = img.size
x0 = random.randint(0, (width - 1))
y0 = random.randint(0, (height - 1))
wl = ((width - x0) // 4)
hl = ((height - y0) // 4)
if ((wl > 5) and (hl > 5)):
x1 = min(width, (x0 + random.randint(1, wl)))
y1 = min(height, (y0 + random.randint(1, hl)))
draw.rectangle(((x0, y0), (x1, y1)), fill='black')
else:
transformed_image = self.transform(img)
return transformed_image |
def write_ranking(corpus_indices, corpus_scores, q_lookup, ranking_save_file):
with open(ranking_save_file, 'w') as f:
for (qid, q_doc_scores, q_doc_indices) in zip(q_lookup, corpus_scores, corpus_indices):
score_list = [(s, idx) for (s, idx) in zip(q_doc_scores, q_doc_indices)]
score_list = sorted(score_list, key=(lambda x: x[0]), reverse=True)
rank = 1
for (s, idx) in score_list:
f.write(f'''{qid} {idx} {rank} {s}
''')
rank += 1 |
class ResNet101(nn.Module):
def __init__(self, block, layers, num_classes, BatchNorm, bn_clr=False):
super(ResNet101, self).__init__()
self.inplanes = 64
self.bn_clr = bn_clr
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
if BatchNorm:
self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
else:
self.bn1 = nn.GroupNorm(4, 64, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm)
self.layer5 = self._make_pred_layer(Classifier_Module2, 2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
if self.bn_clr:
if BatchNorm:
self.bn_pretrain = nn.BatchNorm2d(2048, affine=affine_par)
else:
self.bn_pretrain = nn.GroupNorm(32, 2048, affine=affine_par)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=True):
downsample = None
groups_per_layer = int(np.minimum(32, ((planes * block.expansion) / 16)))
if BatchNorm:
norm = nn.BatchNorm2d((planes * block.expansion), affine=affine_par)
else:
norm = nn.GroupNorm(groups_per_layer, (planes * block.expansion), affine=affine_par)
if ((stride != 1) or (self.inplanes != (planes * block.expansion)) or (dilation == 2) or (dilation == 4)):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), norm)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_pred_layer(self, block, inplanes, dilation_series, padding_series, num_classes):
return block(inplanes, dilation_series, padding_series, num_classes)
def forward(self, x, lbl=None):
(_, _, h, w) = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.bn_clr:
x = self.bn_pretrain(x)
out = self.layer5(x, get_feat=False)
out = F.interpolate(out, size=(h, w), mode='bilinear', align_corners=True)
if (lbl is not None):
loss = self.CrossEntropy2d(out, lbl)
return (out, loss)
return out
def get_1x_lr_params(self):
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
(yield k)
def get_10x_lr_params(self):
b = []
if self.bn_clr:
b.append(self.bn_pretrain.parameters())
b.append(self.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
(yield i)
def optim_parameters(self, args):
return [{'params': self.get_1x_lr_params(), 'lr': args.lr_semseg}, {'params': self.get_10x_lr_params(), 'lr': (10 * args.lr_semseg)}]
def adjust_learning_rate(self, args, optimizer, i):
lr = (args.lr_semseg * ((1 - (float(i) / args.num_steps)) ** args.power))
optimizer.param_groups[0]['lr'] = lr
if (len(optimizer.param_groups) > 1):
optimizer.param_groups[1]['lr'] = (lr * 10)
def CrossEntropy2d(self, predict, target):
assert (not target.requires_grad)
assert (predict.dim() == 4)
assert (target.dim() == 3)
assert (predict.size(0) == target.size(0)), '{0} vs {1} '.format(predict.size(0), target.size(0))
assert (predict.size(2) == target.size(1)), '{0} vs {1} '.format(predict.size(2), target.size(1))
assert (predict.size(3) == target.size(2)), '{0} vs {1} '.format(predict.size(3), target.size(3))
ce_loss = nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL)
loss = ce_loss(predict, target.type(torch.long))
return loss |
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout, out_dim=None, search=False):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
if (out_dim is None):
out_dim = dim
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.drop = nn.Dropout(dropout)
if search:
self.alpha = nn.Parameter(torch.ones(1, 1, hidden_dim))
def unwrapped(self):
return self
def forward(self, x):
x = self.fc1(x)
if hasattr(self, 'alpha'):
x = (x * self.alpha)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 32, 3)
self.fc1 = nn.Linear(((32 * 5) * 5), 32)
self.fc2 = nn.Linear(32, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), (- 1))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
def train_epoch(self, batch_generator):
from openfl.federated.task import PyTorchTaskRunner
self.optimizer.set_old_weights([p for p in self.parameters()])
return PyTorchTaskRunner.train_epoch(self, batch_generator) |
class SelectFromModel(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
_parameter_constraints: dict = {'estimator': [HasMethods('fit')], 'threshold': [Interval(Real, None, None, closed='both'), str, None], 'prefit': ['boolean'], 'norm_order': [Interval(Integral, None, (- 1), closed='right'), Interval(Integral, 1, None, closed='left'), Options(Real, {np.inf, (- np.inf)})], 'max_features': [Interval(Integral, 0, None, closed='left'), callable, None], 'importance_getter': [str, callable]}
def __init__(self, estimator, *, threshold=None, prefit=False, norm_order=1, max_features=None, importance_getter='auto'):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
self.importance_getter = importance_getter
self.norm_order = norm_order
self.max_features = max_features
def _get_support_mask(self):
estimator = getattr(self, 'estimator_', self.estimator)
max_features = getattr(self, 'max_features_', self.max_features)
if self.prefit:
try:
check_is_fitted(self.estimator)
except NotFittedError as exc:
raise NotFittedError('When `prefit=True`, `estimator` is expected to be a fitted estimator.') from exc
if callable(max_features):
raise NotFittedError('When `prefit=True` and `max_features` is a callable, call `fit` before calling `transform`.')
elif ((max_features is not None) and (not isinstance(max_features, Integral))):
raise ValueError(f'`max_features` must be an integer. Got `max_features={max_features}` instead.')
scores = _get_feature_importances(estimator=estimator, getter=self.importance_getter, transform_func='norm', norm_order=self.norm_order)
threshold = _calculate_threshold(estimator, scores, self.threshold)
if (self.max_features is not None):
mask = np.zeros_like(scores, dtype=bool)
candidate_indices = np.argsort((- scores), kind='mergesort')[:max_features]
mask[candidate_indices] = True
else:
mask = np.ones_like(scores, dtype=bool)
mask[(scores < threshold)] = False
return mask
def _check_max_features(self, X):
if (self.max_features is not None):
n_features = _num_features(X)
if callable(self.max_features):
max_features = self.max_features(X)
else:
max_features = self.max_features
check_scalar(max_features, 'max_features', Integral, min_val=0, max_val=n_features)
self.max_features_ = max_features
_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y=None, **fit_params):
self._check_max_features(X)
if self.prefit:
try:
check_is_fitted(self.estimator)
except NotFittedError as exc:
raise NotFittedError('When `prefit=True`, `estimator` is expected to be a fitted estimator.') from exc
self.estimator_ = deepcopy(self.estimator)
elif _routing_enabled():
routed_params = process_routing(self, 'fit', **fit_params)
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **routed_params.estimator.fit)
else:
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
if hasattr(self.estimator_, 'feature_names_in_'):
self.feature_names_in_ = self.estimator_.feature_names_in_
else:
self._check_feature_names(X, reset=True)
return self
def threshold_(self):
scores = _get_feature_importances(estimator=self.estimator_, getter=self.importance_getter, transform_func='norm', norm_order=self.norm_order)
return _calculate_threshold(self.estimator, scores, self.threshold)
_if(_estimator_has('partial_fit'))
_fit_context(prefer_skip_nested_validation=False)
def partial_fit(self, X, y=None, **partial_fit_params):
first_call = (not hasattr(self, 'estimator_'))
if first_call:
self._check_max_features(X)
if self.prefit:
if first_call:
try:
check_is_fitted(self.estimator)
except NotFittedError as exc:
raise NotFittedError('When `prefit=True`, `estimator` is expected to be a fitted estimator.') from exc
self.estimator_ = deepcopy(self.estimator)
return self
if first_call:
self.estimator_ = clone(self.estimator)
if _routing_enabled():
routed_params = process_routing(self, 'partial_fit', **partial_fit_params)
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit)
else:
self.estimator_.partial_fit(X, y, **partial_fit_params)
if hasattr(self.estimator_, 'feature_names_in_'):
self.feature_names_in_ = self.estimator_.feature_names_in_
else:
self._check_feature_names(X, reset=first_call)
return self
def n_features_in_(self):
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError('{} object has no n_features_in_ attribute.'.format(self.__class__.__name__)) from nfe
return self.estimator_.n_features_in_
def get_metadata_routing(self):
router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(callee='partial_fit', caller='partial_fit').add(callee='fit', caller='fit'))
return router
def _more_tags(self):
return {'allow_nan': _safe_tags(self.estimator, key='allow_nan')} |
def RenderRegion2(points, points2, lines, region, filename):
dwg = svgwrite.Drawing(filename, profile='tiny')
for line in lines:
x1 = (1000 - int((((line[0] - region[0]) / (region[2] - region[0])) * 1000)))
y1 = int((((line[1] - region[1]) / (region[3] - region[1])) * 1000))
x2 = (1000 - int((((line[2] - region[0]) / (region[2] - region[0])) * 1000)))
y2 = int((((line[3] - region[1]) / (region[3] - region[1])) * 1000))
dwg.add(dwg.line((y1, x1), (y2, x2), stroke='blue'))
for p in points:
x = (1000 - int((((p[0] - region[0]) / (region[2] - region[0])) * 1000)))
y = int((((p[1] - region[1]) / (region[3] - region[1])) * 1000))
dwg.add(dwg.circle(center=(y, x), r=2, stroke='green', fill='green'))
dwg.save() |
class ResNet(object):
def __init__(self, hps, images, labels, mode):
self.hps = hps
self._images = images
self.labels = labels
self.mode = mode
self._extra_train_ops = []
def build_graph(self):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._build_model()
if (self.mode == 'train'):
self._build_train_op()
self.summaries = tf.merge_all_summaries()
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _build_model(self):
with tf.variable_scope('init'):
x = self._images
x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self.hps.use_bottleneck:
res_func = self._bottleneck_residual
filters = [16, 64, 128, 256]
else:
res_func = self._residual
filters = [16, 16, 32, 64]
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0])
for i in xrange(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_1_%d' % i)):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1])
for i in xrange(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_2_%d' % i)):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2])
for i in xrange(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_3_%d' % i)):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
logits = self._fully_connected(x, self.hps.num_classes)
self.predictions = tf.nn.softmax(logits)
with tf.variable_scope('costs'):
xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.labels)
self.cost = tf.reduce_mean(xent, name='xent')
self.cost += self._decay()
tf.scalar_summary('cost', self.cost)
def _build_train_op(self):
self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
tf.scalar_summary('learning rate', self.lrn_rate)
trainable_variables = tf.trainable_variables()
grads = tf.gradients(self.cost, trainable_variables)
if (self.hps.optimizer == 'sgd'):
optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
elif (self.hps.optimizer == 'mom'):
optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)
apply_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=self.global_step, name='train_step')
train_ops = ([apply_op] + self._extra_train_ops)
self.train_op = tf.group(*train_ops)
def _batch_norm(self, name, x):
with tf.variable_scope(name):
params_shape = [x.get_shape()[(- 1)]]
beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
if (self.mode == 'train'):
(mean, variance) = tf.nn.moments(x, [0, 1, 2], name='moments')
moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
self._extra_train_ops.append(moving_averages.assign_moving_average(moving_mean, mean, 0.9))
self._extra_train_ops.append(moving_averages.assign_moving_average(moving_variance, variance, 0.9))
else:
mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
tf.histogram_summary(mean.op.name, mean)
tf.histogram_summary(variance.op.name, variance)
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.get_shape())
return y
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]])
x += orig_x
tf.logging.info('image after unit %s', x.get_shape())
return x
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, (out_filter / 4), stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, (out_filter / 4), (out_filter / 4), [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv3', x, 1, (out_filter / 4), out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
tf.logging.info('image after unit %s', x.get_shape())
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
return tf.mul(self.hps.weight_decay_rate, tf.add_n(costs))
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.select(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
x = tf.reshape(x, [self.hps.batch_size, (- 1)])
w = tf.get_variable('DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert (x.get_shape().ndims == 4)
return tf.reduce_mean(x, [1, 2]) |
def test_string_operations_unary_with_arg_slice():
pyarrow = pytest.importorskip('pyarrow')
if (packaging.version.Version(pyarrow.__version__) < packaging.version.Version('13')):
pytest.xfail('pyarrow<13 fails to perform this slice')
assert (ak.str.slice([['hello', 'world!'], [], ["it's a beautiful day!"]], 1, highlevel=True).attrs == {})
assert (ak.str.slice([['hello', 'world!'], [], ["it's a beautiful day!"]], 1, highlevel=True, attrs=SOME_ATTRS).attrs is SOME_ATTRS)
array = ak.Array([['hello', 'world!'], [], ["it's a beautiful day!"]], attrs=SOME_ATTRS)
assert (ak.str.slice(array, 1, highlevel=True).attrs is SOME_ATTRS)
assert (ak.str.slice(array, 1, highlevel=True, attrs=OTHER_ATTRS).attrs is OTHER_ATTRS) |
def train():
if (not os.path.isdir(args.outputpath[0])):
os.mkdir(args.outputpath[0])
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = ((output_file_name + '_{}_'.format(args.actf[0])) + 'x'.join([str(x) for x in args.layers]))
x = Variable('x', dtype=args.dtype[0])
y = Variable('y', dtype=args.dtype[0])
if args.independent_networks[0]:
Uxy = Functional('Uxy', [x, y], args.layers, args.actf[0])
Vxy = Functional('Vxy', [x, y], args.layers, args.actf[0])
Sxx = Functional('Sxx', [x, y], args.layers, args.actf[0])
Syy = Functional('Syy', [x, y], args.layers, args.actf[0])
Sxy = Functional('Sxy', [x, y], args.layers, args.actf[0])
else:
(Uxy, Vxy, Sxx, Syy, Sxy) = Functional(['Uxy', 'Vxy', 'Sxx', 'Syy', 'Sxy'], [x, y], args.layers, args.actf[0]).split()
lame1 = Parameter(2.0, inputs=[x, y], name='lame1')
lame2 = Parameter(2.0, inputs=[x, y], name='lame2')
C11 = ((2 * lame2) + lame1)
C12 = lame1
C33 = (2 * lame2)
Exx = diff(Uxy, x)
Eyy = diff(Vxy, y)
Exy = ((diff(Uxy, y) + diff(Vxy, x)) * 0.5)
d1 = Data(Uxy)
d2 = Data(Vxy)
d3 = Data(Sxx)
d4 = Data(Syy)
d5 = Data(Sxy)
c1 = Tie(Sxx, ((Exx * C11) + (Eyy * C12)))
c2 = Tie(Syy, ((Eyy * C11) + (Exx * C12)))
c3 = Tie(Sxy, (Exy * C33))
Lx = (diff(Sxx, x) + diff(Sxy, y))
Ly = (diff(Sxy, x) + diff(Syy, y))
model = SciModel(inputs=[x, y], targets=[d1, d2, d3, d4, d5, c1, c2, c3, Lx, Ly], loss_func='mse')
with open('{}_summary'.format(fname), 'w') as fobj:
model.summary(print_fn=(lambda x: fobj.write((x + '\n'))))
(XMIN, XMAX) = (0.0, 1.0)
(YMIN, YMAX) = (0.0, 1.0)
Xmesh = np.linspace(XMIN, XMAX, args.numx[0]).reshape(((- 1), 1))
Ymesh = np.linspace(YMIN, YMAX, args.numy[0]).reshape(((- 1), 1))
(X, Y) = np.meshgrid(Xmesh, Ymesh)
input_data = [X.reshape((- 1), 1), Y.reshape((- 1), 1)]
data_d1 = dispx(input_data)
data_d2 = dispy(input_data)
data_d3 = stressxx(input_data)
data_d4 = stressyy(input_data)
data_d5 = stressxy(input_data)
data_c1 = 'zeros'
data_c2 = 'zeros'
data_c3 = 'zeros'
data_Lx = bodyfx(input_data)
data_Ly = bodyfy(input_data)
target_data = [data_d1, data_d2, data_d3, data_d4, data_d5, data_c1, data_c2, data_c3, data_Lx, data_Ly]
training_time = time.time()
history = model.train(x_true=input_data, y_true=target_data, epochs=args.epochs[0], batch_size=args.batchsize[0], shuffle=args.shuffle[0], learning_rate=args.learningrate[0], stop_after=args.stopafter[0], verbose=args.verbose[0], save_weights_to='{}_WEIGHTS'.format(fname), save_weights_freq=args.savefreq[0])
training_time = (time.time() - training_time)
for loss in history.history:
np.savetxt((fname + '_{}'.format('_'.join(loss.split('/')))), np.array(history.history[loss]).reshape((- 1), 1))
time_steps = np.linspace(0, training_time, len(history.history['loss']))
np.savetxt((fname + '_Time'), time_steps.reshape((- 1), 1))
Xmesh_plot = np.linspace(XMIN, XMAX, args.numxplot[0]).reshape(((- 1), 1))
Ymesh_plot = np.linspace(YMIN, YMAX, args.numyplot[0]).reshape(((- 1), 1))
(X_plot, Y_plot) = np.meshgrid(Xmesh_plot, Ymesh_plot)
input_plot = [X_plot.reshape((- 1), 1), Y_plot.reshape((- 1), 1)]
lame1_pred = lame1.eval(model, input_plot)
lame2_pred = lame2.eval(model, input_plot)
Uxy_pred = Uxy.eval(model, input_plot)
Vxy_pred = Vxy.eval(model, input_plot)
Exx_pred = Exx.eval(model, input_plot)
Eyy_pred = Eyy.eval(model, input_plot)
Exy_pred = Exy.eval(model, input_plot)
Sxx_pred = Sxx.eval(model, input_plot)
Syy_pred = Syy.eval(model, input_plot)
Sxy_pred = Sxy.eval(model, input_plot)
np.savetxt((fname + '_Xmesh'), X_plot, delimiter=', ')
np.savetxt((fname + '_Ymesh'), Y_plot, delimiter=', ')
np.savetxt((fname + '_lame1'), lame1_pred, delimiter=', ')
np.savetxt((fname + '_lame2'), lame2_pred, delimiter=', ')
np.savetxt((fname + '_Uxy'), Uxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Vxy'), Vxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Exx'), Exx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Eyy'), Eyy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Exy'), Exy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Sxx'), Sxx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Syy'), Syy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt((fname + '_Sxy'), Sxy_pred.reshape(X_plot.shape), delimiter=', ') |
def check_tree(tree, layer):
if (len(tree.children_nodes) > 0):
now_str = ('%snon_leaf: %s:%s, %s:%s\n' % (('\t' * layer), tree.tag, tree.token, tree.node_index, tree.parent_index))
s = ''.join([check_tree(node, (layer + 1)) for node in tree.children_nodes])
return (now_str + s)
else:
return ('%sleaf: %s:%s, %s:%s\n' % (('\t' * layer), tree.tag, tree.token, tree.node_index, tree.parent_index)) |
class SBDSegmentation(data.Dataset):
URL = '
FILE = 'benchmark.tgz'
MD5 = '82b4d87ceb2ed10f6038a1cba92111cb'
def __init__(self, root=Path.db_root_dir('sbd'), split='val', transform=None, download=False, preprocess=False, area_thres=0, retname=True):
self.root = root
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
self.dataset_dir = os.path.join(self.root, 'benchmark_RELEASE', 'dataset')
_mask_dir = os.path.join(self.dataset_dir, 'inst')
_image_dir = os.path.join(self.dataset_dir, 'img')
if (self.area_thres != 0):
self.obj_list_file = os.path.join(self.dataset_dir, ((('_'.join(self.split) + '_instances_area_thres-') + str(area_thres)) + '.txt'))
else:
self.obj_list_file = os.path.join(self.dataset_dir, (('_'.join(self.split) + '_instances') + '.txt'))
if download:
self._download()
if (not self._check_integrity()):
raise RuntimeError('Dataset file downloaded is corrupted.')
self.im_ids = []
self.images = []
self.masks = []
for splt in self.split:
with open(os.path.join(self.dataset_dir, (splt + '.txt')), 'r') as f:
lines = f.read().splitlines()
for line in lines:
_image = os.path.join(_image_dir, (line + '.jpg'))
_mask = os.path.join(_mask_dir, (line + '.mat'))
assert os.path.isfile(_image)
assert os.path.isfile(_mask)
self.im_ids.append(line)
self.images.append(_image)
self.masks.append(_mask)
assert (len(self.images) == len(self.masks))
if ((not self._check_preprocess()) or preprocess):
print('Preprocessing SBD dataset, this will take long, but it will be done only once.')
self._preprocess()
self.obj_list = []
num_images = 0
for ii in range(len(self.im_ids)):
if (self.im_ids[ii] in self.obj_dict.keys()):
flag = False
for jj in range(len(self.obj_dict[self.im_ids[ii]])):
if (self.obj_dict[self.im_ids[ii]][jj] != (- 1)):
self.obj_list.append([ii, jj])
flag = True
if flag:
num_images += 1
print('Number of images: {:d}\nNumber of objects: {:d}'.format(num_images, len(self.obj_list)))
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
_void_pixels = (_target == 255).astype(np.float32)
sample = {'image': _img, 'gt': _target, 'void_pixels': _void_pixels}
if self.retname:
_im_ii = self.obj_list[index][0]
_obj_ii = self.obj_list[index][1]
sample['meta'] = {'image': str(self.im_ids[_im_ii]), 'object': str(_obj_ii), 'im_size': (_img.shape[0], _img.shape[1]), 'category': self.obj_dict[self.im_ids[_im_ii]][_obj_ii]}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.obj_list)
def _check_integrity(self):
_fpath = os.path.join(self.root, self.FILE)
if (not os.path.isfile(_fpath)):
print('{} does not exist'.format(_fpath))
return False
_md5c = hashlib.md5(open(_fpath, 'rb').read()).hexdigest()
if (_md5c != self.MD5):
print(' MD5({}) did not match MD5({}) expected for {}'.format(_md5c, self.MD5, _fpath))
return False
return True
def _check_preprocess(self):
_obj_list_file = self.obj_list_file
if (not os.path.isfile(_obj_list_file)):
return False
else:
self.obj_dict = json.load(open(_obj_list_file, 'r'))
return (list(np.sort([str(x) for x in self.obj_dict.keys()])) == list(np.sort(self.im_ids)))
def _preprocess(self):
self.obj_dict = {}
obj_counter = 0
for ii in range(len(self.im_ids)):
tmp = scipy.io.loadmat(self.masks[ii])
_mask = tmp['GTinst'][0]['Segmentation'][0]
_cat_ids = tmp['GTinst'][0]['Categories'][0].astype(int)
_mask_ids = np.unique(_mask)
n_obj = _mask_ids[(- 1)]
assert (n_obj == len(_cat_ids))
for jj in range(n_obj):
temp = np.where((_mask == (jj + 1)))
obj_area = len(temp[0])
if (obj_area < self.area_thres):
_cat_ids[jj] = (- 1)
obj_counter += 1
self.obj_dict[self.im_ids[ii]] = np.squeeze(_cat_ids, 1).tolist()
with open(self.obj_list_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.im_ids[0], json.dumps(self.obj_dict[self.im_ids[0]])))
for ii in range(1, len(self.im_ids)):
outfile.write(',\n\t"{:s}": {:s}'.format(self.im_ids[ii], json.dumps(self.obj_dict[self.im_ids[ii]])))
outfile.write('\n}\n')
print('Pre-processing finished')
def _download(self):
_fpath = os.path.join(self.root, self.FILE)
try:
os.makedirs(self.root)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
else:
print(((('Downloading ' + self.URL) + ' to ') + _fpath))
def _progress(count, block_size, total_size):
sys.stdout.write(('\r>> %s %.1f%%' % (_fpath, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
cwd = os.getcwd()
print('Extracting tar file')
tar = tarfile.open(_fpath)
os.chdir(self.root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def _make_img_gt_point_pair(self, index):
_im_ii = self.obj_list[index][0]
_obj_ii = self.obj_list[index][1]
_img = np.array(Image.open(self.images[_im_ii]).convert('RGB')).astype(np.float32)
_tmp = scipy.io.loadmat(self.masks[_im_ii])['GTinst'][0]['Segmentation'][0]
_target = (_tmp == (_obj_ii + 1)).astype(np.float32)
return (_img, _target)
def __str__(self):
return (((('SBDSegmentation(split=' + str(self.split)) + ', area_thres=') + str(self.area_thres)) + ')') |
def test_error_handling():
class NotConvertible(SDFGConvertible):
def __call__(self, a):
import numpy as np
print('A very pythonic method', a)
def __sdfg__(self, *args, **kwargs):
raise NotADirectoryError('I am not really convertible')
def __sdfg_signature__(self):
return ([], [])
A = np.random.rand(20)
with pytest.raises(NotADirectoryError):
def testprogram(A, nc: dace.compiletime):
nc(A)
testprogram(A, NotConvertible()) |
class TransformsConfig(object):
def __init__(self):
pass
def get_transforms(self):
pass |
class RationalCuspidalSubgroup(CuspidalSubgroup_generic):
def _repr_(self):
return ('Rational cuspidal subgroup %sover QQ of %s' % (self._invariants_repr(), self.abelian_variety()))
def lattice(self):
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_subgroup=True)
self.__lattice = lattice
return lattice |
_model
def SReT_T_wo_slice(pretrained=False, **kwargs):
model = RecursiveTransformer(image_size=224, patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[4, 10, 6], recursive_num=[2, 5, 3], heads=[2, 4, 8], mlp_ratio=3.6, **kwargs)
if pretrained:
state_dict = torch.load('SReT_T_wo_slice.pth', map_location='cpu')
model.load_state_dict(state_dict['model'])
return model |
class JointExtractionDecoderConfig(Config, JointExtractionDecoderMixin):
def __init__(self, ck_decoder: Union[(SingleDecoderConfigBase, str)]='span_classification', attr_decoder: Union[(SingleDecoderConfigBase, str)]=None, rel_decoder: Union[(SingleDecoderConfigBase, str)]='span_rel_classification', **kwargs):
if isinstance(ck_decoder, SingleDecoderConfigBase):
self.ck_decoder = ck_decoder
elif ck_decoder.lower().startswith('sequence_tagging'):
self.ck_decoder = SequenceTaggingDecoderConfig()
elif ck_decoder.lower().startswith('span_classification'):
self.ck_decoder = SpanClassificationDecoderConfig()
elif ck_decoder.lower().startswith('boundary'):
self.ck_decoder = BoundarySelectionDecoderConfig()
elif ck_decoder.lower().startswith('specific_span_cls'):
self.ck_decoder = SpecificSpanClsDecoderConfig()
if (isinstance(attr_decoder, SingleDecoderConfigBase) or (attr_decoder is None)):
self.attr_decoder = attr_decoder
elif attr_decoder.lower().startswith('span_attr'):
self.attr_decoder = SpanAttrClassificationDecoderConfig()
if (isinstance(rel_decoder, SingleDecoderConfigBase) or (rel_decoder is None)):
self.rel_decoder = rel_decoder
elif rel_decoder.lower().startswith('span_rel'):
self.rel_decoder = SpanRelClassificationDecoderConfig()
elif rel_decoder.lower().startswith('specific_span_rel'):
self.rel_decoder = SpecificSpanRelClsDecoderConfig()
elif rel_decoder.lower().startswith('specific_span_sparse_rel'):
self.rel_decoder = SpecificSpanSparseRelClsDecoderConfig()
self.ck_loss_weight = kwargs.pop('ck_loss_weight', 1.0)
self.attr_loss_weight = kwargs.pop('attr_loss_weight', 1.0)
self.rel_loss_weight = kwargs.pop('rel_loss_weight', 1.0)
self.share_embeddings = kwargs.pop('share_embeddings', False)
super().__init__(**kwargs)
def valid(self):
return (all((decoder.valid for decoder in self.decoders)) and (len(list(self.decoders)) >= 2))
def name(self):
return self._name_sep.join([decoder.name for decoder in self.decoders])
def __repr__(self):
return self._repr_config_attrs(self.__dict__)
def in_dim(self):
return self.ck_decoder.in_dim
_dim.setter
def in_dim(self, dim: int):
for decoder in self.decoders:
decoder.in_dim = dim
def max_span_size(self):
return self.ck_decoder.max_span_size
def build_vocab(self, *partitions):
for decoder in self.decoders:
decoder.build_vocab(*partitions)
def instantiate(self):
return JointExtractionDecoder(self) |
def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0):
dataset = JsonDatasetRel(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb(gt=args.do_val))
all_results = multi_gpu_test_net_on_dataset(args, dataset_name, proposal_file, num_images, output_dir)
else:
all_results = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id)
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
logger.info('Starting evaluation now...')
if ((dataset_name.find('vg') >= 0) or (dataset_name.find('vrd') >= 0)):
task_evaluation_vg_and_vrd.eval_rel_results(all_results, output_dir, args.do_val)
else:
task_evaluation_sg.eval_rel_results(all_results, output_dir, args.do_val, args.do_vis, args.do_special)
return all_results |
def concepts2adj(node_ids):
global id2relation
cids = np.array(node_ids, dtype=np.int32)
n_rel = len(id2relation)
n_node = cids.shape[0]
adj = np.zeros((n_rel, n_node, n_node), dtype=np.uint8)
for s in range(n_node):
for t in range(n_node):
(s_c, t_c) = (cids[s], cids[t])
if cpnet.has_edge(s_c, t_c):
for e_attr in cpnet[s_c][t_c].values():
if ((e_attr['rel'] >= 0) and (e_attr['rel'] < n_rel)):
adj[e_attr['rel']][s][t] = 1
adj = coo_matrix(adj.reshape((- 1), n_node))
return (adj, cids) |
.parametrize('media_type, content, definition', (('application/json', b'{"random": "text"}', {'responses': {'200': {'description': 'text', 'content': {'application/json': {'schema': SUCCESS_SCHEMA}}}}}), ('application/json', b'{"random": "text"}', {'responses': {'default': {'description': 'text', 'content': {'application/json': {'schema': SUCCESS_SCHEMA}}}}}), ('application/problem+json', b'{"random": "text"}', {'responses': {'default': {'description': 'text', 'content': {'application/problem+json': {'schema': SUCCESS_SCHEMA}}}}})))
def test_response_schema_conformance_invalid_openapi(openapi_30, media_type, content, definition, response_factory):
response = response_factory.requests(content=content, content_type=media_type)
case = make_case(openapi_30, definition)
with pytest.raises(AssertionError):
response_schema_conformance(response, case)
assert (not case.operation.is_response_valid(response)) |
def U_15(params, wires):
qml.RY(params[0], wires=wires[0])
qml.RY(params[1], wires=wires[1])
qml.CNOT(wires=[wires[1], wires[0]])
qml.RY(params[2], wires=wires[0])
qml.RY(params[3], wires=wires[1])
qml.CNOT(wires=[wires[0], wires[1]]) |
class SympyAdam(SympyPredictingOptimizer):
collect_order = ['v', 'm', 'theta']
def __init__(self):
self.theta = Symbol('theta')
self.grad = Symbol('g')
self.weight_decay = 0
(self.exp_avg, self.exp_avg_sq) = (Symbol('m'), Symbol('v'))
(self.beta1, self.beta2) = (Symbol('\\beta_{1}'), Symbol('\\beta_{2}'))
self.eps = Symbol('\\epsilon')
self.lr = Symbol('\\eta')
self.timestep = 0
def step(self):
d_p = tplus_time(self.grad, self.timestep)
self.timestep += 1
bias_correction1 = (1 - (self.beta1 ** self.timestep))
bias_correction2 = (1 - (self.beta2 ** self.timestep))
self.exp_avg = ((self.beta1 * self.exp_avg) + ((1 - self.beta1) * d_p))
self.exp_avg_sq = ((self.beta2 * self.exp_avg_sq) + ((1 - self.beta2) * (d_p ** 2)))
denom = ((sympy.sqrt(self.exp_avg_sq) / sympy.sqrt(bias_correction2)) + self.eps)
step_size = (self.lr / bias_correction1)
self.theta = (self.theta - (step_size * (self.exp_avg / denom)))
def prediction(self, nsteps):
timestep = self.timestep
beta1 = self.beta1
beta2 = self.beta1
exp_avg = self.exp_avg
exp_avg_sq = self.exp_avg_sq
eps = self.eps
lr = self.lr
theta = self.theta
momentum_coeff = 0
for i in range(1, (nsteps + 1)):
timestep += 1
bias_correction1 = (1 - (beta1 ** timestep))
bias_correction2 = (1 - (beta2 ** timestep))
momentum_coeff += ((sympy.sqrt(bias_correction2) / bias_correction1) * (beta1 ** i))
a = (exp_avg / (sympy.sqrt(exp_avg_sq) + eps))
theta = (theta - ((lr * momentum_coeff) * a))
return (theta, exp_avg) |
_numpy_output(non_zero=True, check_dtype=True)
def test_ufunc_degrees_u(A: dace.uint32[10]):
return np.degrees(A) |
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_shape = [kernel_size, num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training, bn_decay=bn_decay, scope='bn')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
()
('configfile')
('reads', nargs=1)
('--queries', nargs=(- 1))
('-f', '--force', is_flag=True)
def init(configfile, force, reads, __queries):
stubname = os.path.basename(configfile)
if configfile.endswith('.conf'):
stubname = stubname[:(- 5)]
else:
configfile += '.conf'
if (os.path.exists(configfile) and (not force)):
print(f"** ERROR: configfile '{configfile}' already exists.")
return (- 1)
if __queries:
query_str = ('- ' + '\n- '.join(__queries))
else:
query_str = '- # <-- put query genome list HERE'
print(f"creating configfile '{configfile}' for project '{stubname}'")
with open(configfile, 'wt') as fp:
fp.write(f'''# basic configuration:
catlas_base: {stubname}
input_sequences:
- {reads}
ksize: 31
radius: 1
search:
{query_str}
# END
''') |
def _add_conv(out, channels=1, kernel=1, stride=1, pad=0, num_group=1, active=True, relu6=False, num_sync_bn_devices=(- 1)):
out.add(nn.Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
if (num_sync_bn_devices == (- 1)):
out.add(nn.BatchNorm(scale=True))
else:
out.add(gluon.contrib.nn.SyncBatchNorm(epsilon=1e-05, momentum=0.9, num_devices=num_sync_bn_devices))
if active:
out.add((RELU6() if relu6 else nn.Activation('relu'))) |
class TestOpTreeEvaluation():
(scope='module')
def _create_random_circuits(self) -> OpTreeList:
circuit1 = random_circuit(2, 2, seed=2).decompose(reps=1)
circuit2 = random_circuit(2, 2, seed=0).decompose(reps=1)
return OpTreeList([circuit1, circuit2])
(scope='module')
def _create_param_circuits(self) -> Tuple[(OpTreeList, List[dict])]:
p = ParameterVector('p', 2)
circuit1 = QuantumCircuit(2)
circuit1.rx(p[0], 0)
circuit1.rx(p[1], 1)
circuit2 = QuantumCircuit(2)
circuit2.ry(p[0], 0)
circuit2.ry(p[1], 1)
dictionary1 = {p[0]: 0.25, p[1]: 0.5}
dictionary2 = {p[0]: 0.33, p[1]: 0.44}
return (OpTreeList([circuit1, circuit2]), [dictionary1, dictionary2])
(scope='module')
def _create_operator_z(self) -> Tuple[(OpTreeSum, List[dict])]:
x = ParameterVector('x', 2)
observable1 = SparsePauliOp(['IZ', 'ZI'], [x[0], x[1]])
observable2 = SparsePauliOp(['II', 'ZZ'], [x[0], x[1]])
observable = OpTreeSum([observable1, observable2])
dictionary1 = {x[0]: 1.0, x[1]: 0.5}
dictionary2 = {x[0]: 0.3, x[1]: 0.2}
return (observable, [dictionary1, dictionary2])
(scope='module')
def _create_operator_xy(self) -> Tuple[(OpTreeSum, dict)]:
x = ParameterVector('x', 2)
observable1 = SparsePauliOp(['XY', 'YX'], [x[0], x[1]])
observable2 = SparsePauliOp(['ZZ', 'YY'], [x[0], x[1]])
observable = OpTreeSum([observable1, observable2])
dictionary = {x[0]: 1.0, x[1]: 0.5}
return (observable, dictionary)
def test_estimator_z(self, _create_random_circuits, _create_operator_z):
reference_values = np.array([1., 1.])
val = OpTree.evaluate.evaluate_with_estimator(_create_random_circuits, _create_operator_z[0], {}, _create_operator_z[1][0], Estimator())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_z[0])
val = OpTree.evaluate.evaluate_tree_with_estimator(expectation_tree, _create_operator_z[1][0], Estimator())
assert np.allclose(val, reference_values)
def test_sampler_z(self, _create_random_circuits, _create_operator_z):
reference_values = np.array([1., 1.])
val = OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, _create_operator_z[0], {}, _create_operator_z[1][0], Sampler())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_z[0])
val = OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree, _create_operator_z[1][0], Sampler())
assert np.allclose(val, reference_values)
def test_estimator_xy(self, _create_random_circuits, _create_operator_xy):
reference_values = np.array([(- 0.), (- 0.)])
val = OpTree.evaluate.evaluate_with_estimator(_create_random_circuits, _create_operator_xy[0], {}, _create_operator_xy[1], Estimator())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_xy[0])
val = OpTree.evaluate.evaluate_tree_with_estimator(expectation_tree, _create_operator_xy[1], Estimator())
assert np.allclose(val, reference_values)
def test_sampler_xy(self, _create_random_circuits, _create_operator_xy):
reference_values = np.array([(- 0.), (- 0.)])
with pytest.raises(ValueError):
OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, _create_operator_xy[0], {}, _create_operator_xy[1], Sampler())
op_in_z_base = OpTree.evaluate.transform_to_zbasis(_create_operator_xy[0])
val = OpTree.evaluate.evaluate_with_sampler(_create_random_circuits, op_in_z_base, {}, _create_operator_xy[1], Sampler())
assert np.allclose(val, reference_values)
expectation_tree = OpTree.gen_expectation_tree(_create_random_circuits, _create_operator_xy[0])
with pytest.raises(ValueError):
OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree, _create_operator_xy[1], Sampler())
expectation_tree_in_z_base = OpTree.evaluate.transform_to_zbasis(expectation_tree)
val = OpTree.evaluate.evaluate_tree_with_sampler(expectation_tree_in_z_base, _create_operator_xy[1], Sampler())
assert np.allclose(val, reference_values)
def test_estimator_multi_dict(self, _create_param_circuits, _create_operator_z):
reference_values = np.array([[[2., 2.], [0., 0.]], [[2., 2.], [0., 0.]]])
val = OpTree.evaluate.evaluate_with_estimator(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Estimator())
assert np.allclose(val, reference_values)
reference_values = np.array([[2., 2.], [0., 0.]])
val = OpTree.evaluate.evaluate_with_estimator(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Estimator(), dictionaries_combined=True)
assert np.allclose(val, reference_values)
def test_sampler_multi_dict(self, _create_param_circuits, _create_operator_z):
reference_values = np.array([[[2., 2.], [0., 0.]], [[2., 2.], [0., 0.]]])
val = OpTree.evaluate.evaluate_with_sampler(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Sampler())
assert np.allclose(val, reference_values)
reference_values = np.array([[2., 2.], [0., 0.]])
val = OpTree.evaluate.evaluate_with_sampler(_create_param_circuits[0], _create_operator_z[0], _create_param_circuits[1], _create_operator_z[1], Sampler(), dictionaries_combined=True)
assert np.allclose(val, reference_values) |
def create_or_update_issue(body=''):
link = f'[{args.ci_name}]({args.link_to_ci_run})'
issue = get_issue()
max_body_length = 60000
original_body_length = len(body)
if (original_body_length > max_body_length):
body = f'''{body[:max_body_length]}
...
Body was too long ({original_body_length} characters) and was shortened'''
if (issue is None):
header = f'**CI failed on {link}** ({date_str})'
issue = issue_repo.create_issue(title=title, body=f'''{header}
{body}''')
print(f'Created issue in {args.issue_repo}#{issue.number}')
sys.exit()
else:
header = f'**CI is still failing on {link}** ({date_str})'
issue.edit(body=f'''{header}
{body}''')
print(f'Commented on issue: {args.issue_repo}#{issue.number}')
sys.exit() |
def info_arrow(source, target, data, keys):
if (data['direction'] == 'fwd'):
m = f'{source.id}->{target.id}'
else:
m = f'{target.id}<-{source.id}'
for key in keys:
if (key == 'a'):
m += f" a={data['a']:.3f}"
elif (key == 'b_size'):
b_size = get_size(data['b'])
m += f' b_size={b_size}'
else:
m += f' {key}={data.get(key)}'
return m |
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if ('_break' in rr):
break
return ret
if (('_check' in rules) and (not rules['_check'](var))):
return ret
if ('need' in rules):
res = applyrules({'needs': rules['need']}, d, var)
if ('needs' in res):
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if (k == 'separatorsfor'):
ret[k] = rules[k]
continue
if isinstance(rules[k], str):
ret[k] = replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k] = []
for i in rules[k]:
ar = applyrules({k: i}, d, var)
if (k in ar):
ret[k].append(ar[k])
elif (k[0] == '_'):
continue
elif isinstance(rules[k], dict):
ret[k] = []
for k1 in rules[k].keys():
if (isinstance(k1, types.FunctionType) and k1(var)):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res = applyrules({'supertext': i}, d, var)
if ('supertext' in res):
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
i = rules[k][k1]
if isinstance(i, dict):
res = applyrules({'supertext': i}, d)
if ('supertext' in res):
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
errmess(('applyrules: ignoring rule %s.\n' % repr(rules[k])))
if isinstance(ret[k], list):
if (len(ret[k]) == 1):
ret[k] = ret[k][0]
if (ret[k] == []):
del ret[k]
return ret |
class Tokenizer():
def __init__(self, vocab_fname=None, bpe_fname=None, lang=None, pad=1, separator=''):
self.separator = separator
self.lang = lang
if bpe_fname:
with open(bpe_fname, 'r') as bpe_codes:
self.bpe = subword_nmt.apply_bpe.BPE(bpe_codes)
if vocab_fname:
self.build_vocabulary(vocab_fname, pad)
if lang:
self.init_moses(lang)
def init_moses(self, lang):
self.moses_tokenizer = sacremoses.MosesTokenizer(lang['src'])
self.moses_detokenizer = sacremoses.MosesDetokenizer(lang['tgt'])
def build_vocabulary(self, vocab_fname, pad):
logging.info(f'Building vocabulary from {vocab_fname}')
vocab = [config.PAD_TOKEN, config.UNK_TOKEN, config.BOS_TOKEN, config.EOS_TOKEN]
with open(vocab_fname) as vfile:
for line in vfile:
vocab.append(line.strip())
self.pad_vocabulary(vocab, pad)
self.vocab_size = len(vocab)
logging.info(f'Size of vocabulary: {self.vocab_size}')
self.tok2idx = defaultdict(partial(int, config.UNK))
for (idx, token) in enumerate(vocab):
self.tok2idx[token] = idx
self.idx2tok = {}
for (key, value) in self.tok2idx.items():
self.idx2tok[value] = key
def pad_vocabulary(self, vocab, pad):
vocab_size = len(vocab)
padded_vocab_size = ((((vocab_size + pad) - 1) // pad) * pad)
for i in range(0, (padded_vocab_size - vocab_size)):
token = f'madeupword{i:04d}'
vocab.append(token)
assert ((len(vocab) % pad) == 0)
def get_state(self):
logging.info(f'Saving state of the tokenizer')
state = {'lang': self.lang, 'separator': self.separator, 'vocab_size': self.vocab_size, 'bpe': self.bpe, 'tok2idx': self.tok2idx, 'idx2tok': self.idx2tok}
return state
def set_state(self, state):
logging.info(f'Restoring state of the tokenizer')
self.lang = state['lang']
self.separator = state['separator']
self.vocab_size = state['vocab_size']
self.bpe = state['bpe']
self.tok2idx = state['tok2idx']
self.idx2tok = state['idx2tok']
self.init_moses(self.lang)
def segment(self, line):
line = line.strip().split()
entry = [self.tok2idx[i] for i in line]
entry = (([config.BOS] + entry) + [config.EOS])
return entry
def tokenize(self, line):
tokenized = self.moses_tokenizer.tokenize(line, return_str=True)
bpe = self.bpe.process_line(tokenized)
segmented = self.segment(bpe)
tensor = torch.tensor(segmented)
return tensor
def detokenize_bpe(self, inp, delim=' '):
detok = delim.join([self.idx2tok[idx] for idx in inp])
detok = detok.replace((self.separator + ' '), '')
detok = detok.replace(self.separator, '')
detok = detok.replace(config.BOS_TOKEN, '')
detok = detok.replace(config.EOS_TOKEN, '')
detok = detok.replace(config.PAD_TOKEN, '')
detok = detok.strip()
return detok
def detokenize_moses(self, inp):
output = self.moses_detokenizer.detokenize(inp.split())
return output
def detokenize(self, inp):
detok_bpe = self.detokenize_bpe(inp)
output = self.detokenize_moses(detok_bpe)
return output |
def benchmark(clf, custom_name=False):
print(('_' * 80))
print('Training: ')
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = (time() - t0)
print(f'train time: {train_time:.3}s')
t0 = time()
pred = clf.predict(X_test)
test_time = (time() - t0)
print(f'test time: {test_time:.3}s')
score = metrics.accuracy_score(y_test, pred)
print(f'accuracy: {score:.3}')
if hasattr(clf, 'coef_'):
print(f'dimensionality: {clf.coef_.shape[1]}')
print(f'density: {density(clf.coef_)}')
print()
print()
if custom_name:
clf_descr = str(custom_name)
else:
clf_descr = clf.__class__.__name__
return (clf_descr, score, train_time, test_time) |
class InceptionResNetV2(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionResNetV2, self).__init__()
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17))
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1), Block17(scale=0.1))
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2), Block8(scale=0.2))
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)
self.last_linear = nn.Linear(1536, num_classes)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def logits(self, features):
x = self.avgpool_1a(features)
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x |
class TestDistBackend(MultiProcessTestCase):
def setUpClass(cls):
os.environ['MASTER_ADDR'] = str(MASTER_ADDR)
os.environ['MASTER_PORT'] = str(MASTER_PORT)
os.environ['NCCL_ASYNC_ERROR_HANDLING'] = '1'
super().setUpClass()
def setUp(self):
super().setUp()
initialize_temp_directories()
Barrier.init()
self.skip_return_code_checks = []
def tearDown(self):
cleanup_temp_dir()
super().tearDown()
def init_method(self):
return '{}{file_name}'.format(FILE_SCHEMA, file_name=self.file_name)
def _run(cls, rank, test_name, file_name, pipe):
if ((BACKEND == 'nccl') and (not torch.cuda.is_available())):
sys.exit(TEST_SKIPS['no_cuda'].exit_code)
self = cls(test_name)
self.rank = rank
self.file_name = file_name
if (torch.cuda.is_available() and (torch.cuda.device_count() < int(self.world_size))):
sys.exit(TEST_SKIPS[f'multi-gpu-{self.world_size}'].exit_code)
try:
pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)
timeout = timedelta(seconds=pg_timeout_seconds)
dist.init_process_group(init_method=self.init_method, backend=BACKEND, world_size=int(self.world_size), rank=self.rank, timeout=timeout)
except RuntimeError as e:
if ('recompile' in e.args[0]):
sys.exit(TEST_SKIPS['backend_unavailable'].exit_code)
raise
self._barrier()
self.run_test(test_name, pipe)
self._barrier()
dist.destroy_process_group()
sys.exit(0)
def world_size(self):
return os.environ['WORLD_SIZE'] |
_spec_function('ice')
def get_ice_spec(**kwargs) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.ice_scenario.ICEScenario', args=kwargs)
return RunSpec(name=(('ice' + (':' if (len(kwargs) > 0) else '')) + ','.join((f'{k}={v}' for (k, v) in sorted(kwargs.items())))), scenario_spec=scenario_spec, adapter_spec=get_language_modeling_adapter_spec(), metric_specs=get_basic_metric_specs([]), groups=['ice']) |
def register_Ns3Icmpv4L4Protocol_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv4L4Protocol const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDownTarget', 'ns3::IpL4Protocol::DownTargetCallback', [], is_const=True, is_virtual=True)
cls.add_method('GetDownTarget6', 'ns3::IpL4Protocol::DownTargetCallback6', [], is_const=True, is_virtual=True)
cls.add_method('GetProtocolNumber', 'int', [], is_const=True, is_virtual=True)
cls.add_method('GetStaticProtocolNumber', 'uint16_t', [], is_static=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')], is_virtual=True)
cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv6Header const &', 'header'), param('ns3::Ptr< ns3::Ipv6Interface >', 'incomingInterface')], is_virtual=True)
cls.add_method('SendDestUnreachFragNeeded', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData'), param('uint16_t', 'nextHopMtu')])
cls.add_method('SendDestUnreachPort', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData')])
cls.add_method('SendTimeExceededTtl', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData')])
cls.add_method('SetDownTarget', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetDownTarget6', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Address, ns3::Ipv6Address, unsigned char, ns3::Ptr< ns3::Ipv6Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')])
cls.add_static_attribute('PROT_NUMBER', 'uint8_t const', is_const=True)
cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
return |
class MobileNetV2(nn.Module):
def __init__(self, opt, width_mult=1.0, round_nearest=8, block=None):
super().__init__()
if (block is None):
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
if opt.pre_img:
print('adding pre_img layer...')
self.pre_img_layer = nn.Sequential(nn.Conv2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False), nn.BatchNorm2d(input_channel))
if opt.pre_hm:
print('adding pre_hm layer...')
self.pre_hm_layer = nn.Sequential(nn.Conv2d(1, input_channel, kernel_size=3, padding=1, stride=2, bias=False), nn.BatchNorm2d(input_channel))
features = [ConvBNReLU(3, input_channel, stride=2)]
self.key_block = [True]
all_channels = [input_channel]
self.channels = [input_channel]
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
if (stride == 2):
self.key_block.append(True)
else:
self.key_block.append(False)
all_channels.append(output_channel)
for i in range((len(self.key_block) - 1)):
if self.key_block[(i + 1)]:
self.key_block[i] = True
self.key_block[(i + 1)] = False
self.channels.append(all_channels[i])
self.key_block[(- 1)] = True
self.channels.append(all_channels[(- 1)])
print('channels', self.channels)
self.features = nn.ModuleList(features)
print('len(self.features)', len(self.features))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'])
self.load_state_dict(state_dict, strict=False)
def forward(self, inputs, pre_img=None, pre_hm=None):
x = self.features[0](inputs)
if (pre_img is not None):
x = (x + self.pre_img_layer(pre_img))
if (pre_hm is not None):
x = (x + self.pre_hm_layer(pre_hm))
y = [x]
for i in range(1, len(self.features)):
x = self.features[i](x)
if self.key_block[i]:
y.append(x)
return y |
class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
.parametrize('input_meters, expected_resolution', [(5000, 6), (50000, 3)])
def test__get_resolution(h3_tess, input_meters, expected_resolution):
assert (h3_tess._get_resolution(base_shape=bbox, meters=input_meters) == expected_resolution) |
def mad(values, n):
if (len(values) < n):
values += ([0] * int((n - len(values))))
values.sort()
if (n == 2):
return (values[0], values[0])
values_m = ((n // 2) if (n % 2) else ((n // 2) - 1))
m = values[values_m]
sd = (sum([abs((m - lv)) for lv in values]) / float(n))
return (m, sd) |
def mobilenet_v2(pretrained=False, progress=True, filter_size=1, **kwargs):
model = MobileNetV2(filter_size=filter_size, **kwargs)
return model |
def register_Ns3BoxValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Box const &', 'value')])
cls.add_constructor([param('ns3::BoxValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Box', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Box const &', 'value')])
return |
def fully_connected(x, units, use_bias=True, scope='fully_connected'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x |
def tmp_git_index() -> T.Iterator[str]:
try:
tmp = tempfile.NamedTemporaryFile(prefix='gitindex', delete=False)
tmp.close()
(yield tmp.name)
finally:
try:
os.remove(tmp.name)
except OSError:
pass |
class Validator():
def __init__(self, translator, source, reference, batch_size=3, beam_size=0):
self.translator = translator
self.source = source
self.reference = reference
self.sentence_count = len(source)
self.reference_word_count = sum([(len(data.tokenize(sentence)) + 1) for sentence in self.reference])
self.batch_size = batch_size
self.beam_size = beam_size
lengths = [len(data.tokenize(sentence)) for sentence in self.source]
self.true2sorted = sorted(range(self.sentence_count), key=(lambda x: (- lengths[x])))
self.sorted2true = sorted(range(self.sentence_count), key=(lambda x: self.true2sorted[x]))
self.sorted_source = [self.source[i] for i in self.true2sorted]
self.sorted_reference = [self.reference[i] for i in self.true2sorted]
def perplexity(self):
loss = 0
for i in range(0, self.sentence_count, self.batch_size):
j = min((i + self.batch_size), self.sentence_count)
loss += self.translator.score(self.sorted_source[i:j], self.sorted_reference[i:j], train=False).data
return np.exp((loss / self.reference_word_count))
def translate(self):
translations = []
for i in range(0, self.sentence_count, self.batch_size):
j = min((i + self.batch_size), self.sentence_count)
batch = self.sorted_source[i:j]
if (self.beam_size <= 0):
translations += self.translator.greedy(batch, train=False)
else:
translations += self.translator.beam_search(batch, train=False, beam_size=self.beam_size)
return [translations[i] for i in self.sorted2true] |
def test_forward_beam_seq_lens():
from returnn.tensor import Dim, batch_dim
def _get_model(**_kwargs):
return torch.nn.Module()
def _forward_step(*, extern_data: TensorDict, **_kwargs):
data = extern_data['data']
assert (data.dims[0] == batch_dim)
time_dim = data.dims[1]
feat_dim = data.dims[2]
beam_dim = Dim(dimension=5, name='beam')
with rf.set_default_device_ctx(time_dim.dyn_size_ext.device):
ext_seq_lens = rf.relu(rf.combine_bc(time_dim.dyn_size_ext, '-', rf.range_over_dim(beam_dim, dtype=time_dim.dyn_size_ext.dtype)))
assert (set(ext_seq_lens.dims) == {batch_dim, beam_dim})
ext_time_dim = Dim(ext_seq_lens, name='time_with_beam')
ext_data = rf.expand_dim(data, beam_dim)
(ext_data, _) = rf.replace_dim(ext_data, in_dim=time_dim, out_dim=ext_time_dim)
assert (set(ext_data.dims) == {batch_dim, beam_dim, ext_time_dim, feat_dim})
rf.get_run_ctx().mark_as_output(ext_data, 'ext_data', dims=(batch_dim, beam_dim, ext_time_dim, feat_dim))
max_sizes = set()
class _ForwardCallback(ForwardCallbackIface):
def process_seq(self, *, seq_tag: str, outputs: TensorDict):
out: Tensor = outputs['ext_data']
(beam_dim, ext_time_dim, feat_dim) = out.dims
assert isinstance(ext_time_dim.dyn_size_ext.raw_tensor, numpy.ndarray)
assert (ext_time_dim.dyn_size_ext.dims == (beam_dim,))
max_size = max(ext_time_dim.dyn_size_ext.raw_tensor)
assert (set(ext_time_dim.dyn_size_ext.raw_tensor) == set(range(max(((max_size - beam_dim.dimension) + 1), 0), (max_size + 1))))
max_sizes.add(max_size)
config = Config(dict(task='forward', batch_size=500, extern_data={'data': {'dim': 9}}, get_model=_get_model, forward_step=_forward_step))
dataset = init_dataset({'class': 'Task12AXDataset', 'num_seqs': 100, 'name': 'dev', 'fixed_random_seed': 1})
callback = _ForwardCallback()
with global_config_ctx(config):
dataset.init_seq_order(epoch=1)
engine = Engine(config=config)
engine.init_network_from_config()
engine.forward_with_callback(callback=callback, dataset=dataset)
assert (len(max_sizes) > 1) |
def test_creat_from_J(spectrum):
actual = TARDISSpectrum(spectrum._frequency, spectrum.luminosity.to('J / s'))
compare_spectra(actual, spectrum) |
.parametrize(['energy', 'theta_C'], [(511000.0, 1.0), (255500.0, np.pi), (0.0, (2.0 * np.pi)), (.0, (np.pi / 2.0))])
def test_klein_nishina(energy, theta_C):
actual = util.klein_nishina(energy, theta_C)
kappa = util.kappa_calculation(energy)
expected = (((R_ELECTRON_SQUARED / 2) * ((1.0 + (kappa * (1.0 - np.cos(theta_C)))) ** (- 2.0))) * ((1.0 + (np.cos(theta_C) ** 2.0)) + (((kappa ** 2.0) * ((1.0 - np.cos(theta_C)) ** 2.0)) / (1.0 + (kappa * (1.0 - np.cos(theta_C)))))))
npt.assert_almost_equal(actual, expected) |
class TestGenerateIndices(TestCase):
def test_make_range_if_int(self):
ind = generate_indices(6, [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_pass_through_index_array(self):
ind = generate_indices(np.arange(6), [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_exclude(self):
ind = generate_indices(6, [3])
self.assertFalse((3 in ind)) |
def boost_get_toolset(self, cc):
toolset = cc
if (not cc):
build_platform = Utils.unversioned_sys_platform()
if (build_platform in BOOST_TOOLSETS):
cc = build_platform
else:
cc = self.env.CXX_NAME
if (cc in BOOST_TOOLSETS):
toolset = BOOST_TOOLSETS[cc]
return ((isinstance(toolset, str) and toolset) or toolset(self.env)) |
def make_weights(distribution: str, adjacency: sparse.csr_matrix) -> np.ndarray:
n = adjacency.shape[0]
distribution = distribution.lower()
if (distribution == 'degree'):
node_weights_vec = adjacency.dot(np.ones(adjacency.shape[1]))
elif (distribution == 'uniform'):
node_weights_vec = np.ones(n)
else:
raise ValueError('Unknown distribution of node weights.')
return node_weights_vec |
(scope='module')
def fake_embeddings(tmp_path_factory):
words = sorted(set([x.lower() for y in SENTENCES for x in y]))
words = words[:(- 1)]
embedding_dir = tmp_path_factory.mktemp('data')
embedding_txt = (embedding_dir / 'embedding.txt')
embedding_pt = (embedding_dir / 'embedding.pt')
embedding = np.random.random((len(words), EMB_DIM))
with open(embedding_txt, 'w', encoding='utf-8') as fout:
for (word, emb) in zip(words, embedding):
fout.write(word)
fout.write('\t')
fout.write('\t'.join((str(x) for x in emb)))
fout.write('\n')
pt = pretrain.Pretrain(str(embedding_pt), str(embedding_txt))
pt.load()
assert os.path.exists(embedding_pt)
return embedding_pt |
class GelfandTsetlinPattern(ClonableArray, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(self, gt):
return GelfandTsetlinPatterns()(gt)
def check(self):
assert all(((self[(i - 1)][j] >= self[i][j] >= self[(i - 1)][(j + 1)]) for i in range(1, len(self)) for j in range(len(self[i]))))
def _hash_(self) -> int:
return hash(tuple(map(tuple, self)))
def _repr_diagram(self) -> str:
ret = ''
for (i, row) in enumerate(self):
if (i != 0):
ret += '\n'
ret += (' ' * i)
ret += ' '.join((('%3s' % val) for val in row))
return ret
def pp(self):
print(self._repr_diagram())
def _latex_(self) -> str:
n = len(self)
if (n == 0):
return '\\emptyset'
ret = (('\\begin{array}{' + ('c' * ((n * 2) - 1))) + '}\n')
for (i, row) in enumerate(self):
if (i > 0):
ret += ' \\\\\n'
ret += ('& ' * i)
ret += ' & & '.join((repr(val) for val in row))
ret += (' &' * i)
return (ret + '\n\\end{array}')
_map(name='to semistandard tableau')
def to_tableau(self):
ret = []
for (i, row) in enumerate(reversed(self)):
for (j, val) in enumerate(row):
if (j >= len(ret)):
if (val == 0):
break
ret.append(([(i + 1)] * val))
else:
ret[j].extend(([(i + 1)] * (val - len(ret[j]))))
S = SemistandardTableaux(max_entry=len(self))
return S(ret)
_method
def boxed_entries(self) -> tuple:
ret = []
for i in range(1, len(self)):
for j in range(len(self[i])):
if (self[i][j] == self[(i - 1)][j]):
ret.append((i, j))
return tuple(ret)
_method
def circled_entries(self) -> tuple:
ret = []
for i in range(1, len(self)):
for j in range(len(self[i])):
if (self[i][j] == self[(i - 1)][(j + 1)]):
ret.append((i, j))
return tuple(ret)
_method
def special_entries(self) -> tuple:
ret = []
for i in range(1, len(self)):
for j in range(len(self[i])):
if ((self[(i - 1)][j] > self[i][j]) and (self[i][j] > self[(i - 1)][(j + 1)])):
ret.append((i, j))
return tuple(ret)
def number_of_boxes(self) -> int:
return len(self.boxed_entries())
def number_of_circles(self) -> int:
return len(self.circled_entries())
def number_of_special_entries(self) -> int:
return len(self.special_entries())
def is_strict(self) -> bool:
return (not any(((row[i] == row[(i + 1)]) for row in self for i in range((len(row) - 1)))))
def row_sums(self) -> list:
return [sum((self[i][j] for j in range(len(self[i])))) for i in range(len(self))]
def weight(self) -> tuple:
wt = ([self.row_sums()[(- 1)]] + [(self.row_sums()[(i - 1)] - self.row_sums()[i]) for i in reversed(range(1, len(self[0])))])
return tuple(wt)
def Tokuyama_coefficient(self, name='t'):
R = PolynomialRing(ZZ, name)
t = R.gen(0)
if (not self.is_strict()):
return R.zero()
return (((t + 1) ** self.number_of_special_entries()) * (t ** self.number_of_boxes()))
_map(order=2, name='Bender-Knuth involution')
def bender_knuth_involution(self, i) -> GelfandTsetlinPattern:
n = len(self)
def toggle(i, j):
if (i == (n - 1)):
return ((self[(n - 2)][0] + self[(n - 2)][1]) - self[(n - 1)][0])
if (j == 0):
left = self[(i - 1)][0]
else:
left = min(self[(i - 1)][j], self[(i + 1)][(j - 1)])
if (j == ((n - i) - 1)):
right = self[(i - 1)][(j + 1)]
else:
right = max(self[(i - 1)][(j + 1)], self[(i + 1)][j])
return ((left + right) - self[i][j])
if (not (0 < i < n)):
raise ValueError(f'must have 0 < {i} < {n}')
r = (n - i)
P = self.parent()
data = [list(row) for row in self]
data[r] = [toggle(r, s) for s in range(i)]
return P.element_class(P, data) |
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = (['lapack_atlas'] + atlas_threads_info._lib_names) |
def dirichlet_coefficients(redshift, alpha0, alpha1, z1=1.0, weight=None):
if ((np.ndim(alpha0) != 1) or (np.ndim(alpha1) != 1)):
raise ValueError('alpha0, alpha1 must be 1D arrays')
if (len(alpha0) != len(alpha1)):
raise ValueError('alpha0 and alpha1 must have the same length')
if ((weight is not None) and ((np.ndim(weight) != 1) or (len(weight) != len(alpha0)))):
raise ValueError('weight must be 1D and match alpha0, alpha1')
redshift = np.expand_dims(redshift, (- 1))
alpha = (np.power(alpha0, (1 - (redshift / z1))) * np.power(alpha1, (redshift / z1)))
coeff = np.random.gamma(alpha)
if (weight is not None):
coeff *= weight
coeff /= coeff.sum(axis=(- 1))[(..., np.newaxis)]
return coeff |
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
(inputs, targets) = (inputs.to(device), targets.to(device))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
(_, predicted) = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), ('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct) / total), correct, total)))
acc = ((100.0 * correct) / total)
if (acc > best_acc):
print('Saving..')
if (not os.path.isdir('checkpoint')):
os.mkdir('checkpoint')
save_model(net, save_path)
best_acc = acc |
class TestDistances(unittest.TestCase):
def test_input(self):
adjacency = test_graph()
with self.assertRaises(ValueError):
get_distances(adjacency)
with self.assertRaises(ValueError):
get_distances(adjacency, source=0, source_row=5)
def test_algo(self):
adjacency = test_graph()
distances = get_distances(adjacency, 0)
distances_ = np.array([0, 1, 3, 2, 2, 3, 2, 3, 4, 4])
self.assertTrue(all((distances == distances_)))
distances = get_distances(adjacency, [0, 5])
distances_ = np.array([0, 1, 3, 2, 1, 0, 1, 2, 4, 3])
self.assertTrue(all((distances == distances_)))
adjacency = test_graph_empty()
source = [0, 3]
distances = get_distances(adjacency, source)
distances_ = (- np.ones(len(distances), dtype=int))
distances_[source] = 0
self.assertTrue(all((distances == distances_)))
adjacency = test_digraph()
distances = get_distances(adjacency, [0])
distances_ = np.array([0, 1, 3, 2, 2, 3, (- 1), (- 1), (- 1), (- 1)])
self.assertTrue(all((distances == distances_)))
biadjacency = test_bigraph()
(distances_row, distances_col) = get_distances(biadjacency, [0])
(distances_row_, distances_col_) = (np.array([0, (- 1), 2, (- 1), (- 1), (- 1)]), np.array([3, 1, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]))
self.assertTrue(all((distances_row == distances_row_)))
self.assertTrue(all((distances_col == distances_col_)))
adjacency = test_graph()
(distances_row, distances_col) = get_distances(adjacency, source_col=[0])
self.assertTrue(all((distances_row % 2)))
self.assertTrue(all(((distances_col + 1) % 2)))
biadjacency = test_bigraph()
(distances_row, distances_col) = get_distances(biadjacency, source=0, source_col=[1, 2])
(distances_row_, distances_col_) = (np.array([0, 1, 1, (- 1), (- 1), (- 1)]), np.array([2, 0, 0, 2, (- 1), (- 1), (- 1), (- 1)]))
self.assertTrue(all((distances_row == distances_row_)))
self.assertTrue(all((distances_col == distances_col_))) |
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(resnest_model_urls['resnest200'], progress=True, check_hash=True))
return model |
class IDS(object):
def __init__(self, p=50, stationary_p=True, inital_seed=None):
np.random.seed(inital_seed)
self.maxRequiredStep = np.sin(((15.0 / 180.0) * np.pi))
self.gsBound = 1.5
self.gsSetPointDependency = 0.02
self.gsScale = ((2.0 * self.gsBound) + (100.0 * self.gsSetPointDependency))
self.CRF = 3.0
self.CRC = 1.0
self.CRGS = 25.0
self.stationary_p = stationary_p
self.gsEnvironment = GoldstoneEnvironment(24, self.maxRequiredStep, (self.maxRequiredStep / 2.0))
self.state = OrderedDict()
self.state['o'] = np.zeros(10)
self.state['coc'] = 0
self.state['fb'] = 0.0
self.state['oc'] = 0
self.state['hg'] = 0.0
self.state['hv'] = 0.0
self.state['he'] = 0.0
self.state['gs_domain'] = self.gsEnvironment._dynamics.Domain.positive.value
self.state['gs_sys_response'] = self.gsEnvironment._dynamics.System_Response.advantageous.value
self.state['gs_phi_idx'] = 0
self.state['ge'] = 0.0
self.state['ve'] = 0.0
self.state['MC'] = 0.0
self.observable_keys = ['p', 'v', 'g', 'h', 'f', 'c', 'cost', 'reward']
self.state['p'] = p
self.state['v'] = 50.0
self.state['g'] = 50.0
self.state['h'] = 50.0
self.state['f'] = 0.0
self.state['c'] = 0.0
self.state['cost'] = 0.0
self.state['reward'] = 0.0
self.init = True
self.defineNewSequence()
self.step(np.zeros(3))
def visibleState(self):
return np.concatenate([np.array(self.state[k]).ravel() for k in self.observable_keys])
def markovState(self):
return np.concatenate([np.array(self.state[k]).ravel() for k in self.state.keys()])
def step(self, delta):
self.updateSetPoint()
self.addAction(delta)
self.updateFatigue()
self.updateCurrentOperationalCost()
self.updateOperationalCostConvolution()
self.updateGS()
self.updateOperationalCosts()
self.updateCost()
def updateSetPoint(self):
if (self.stationary_p == True):
return
else:
if (self._p_step == self._p_steps):
self.defineNewSequence()
new_p = (self.state['p'] + self._p_ch)
if ((new_p > 100) or (new_p < 0)):
if (np.random.rand() > 0.5):
self._p_ch *= (- 1)
new_p = np.clip(new_p, 0, 100)
self.state['p'] = new_p
self._p_step += 1
def addAction(self, delta):
delta = np.clip(delta, (- 1), 1)
self.state['v'] = np.clip((self.state['v'] + delta[0]), 0.0, 100.0)
self.state['g'] = np.clip((self.state['g'] + (10 * delta[1])), 0.0, 100.0)
self.state['h'] = np.clip((self.state['h'] + ((((self.maxRequiredStep / 0.9) * 100.0) / self.gsScale) * delta[2])), 0.0, 100.0)
self.state['he'] = np.clip(((((self.gsScale * self.state['h']) / 100.0) - (self.gsSetPointDependency * self.state['p'])) - self.gsBound), (- self.gsBound), self.gsBound)
def updateFatigue(self):
expLambda = 0.1
actionTolerance = 0.05
fatigueAmplification = 1.1
fatigueAmplificationMax = 5.0
fatigueAmplificationStart = 1.2
dyn = 0.0
velocity = self.state['v']
gain = self.state['g']
setpoint = self.state['p']
hidden_gain = self.state['hg']
hidden_velocity = self.state['hv']
effAct = EffectiveAction(velocity, gain, setpoint)
effAct_velocity = effAct.getEffectiveVelocity()
effAct_gain = effAct.getEffectiveGain()
self.state['ge'] = effAct_gain
self.state['ve'] = effAct_velocity
noise_e_g = np.random.exponential(expLambda)
noise_e_v = np.random.exponential(expLambda)
noise_u_g = np.random.rand()
noise_u_v = np.random.rand()
noise_b_g = np.float(np.random.binomial(1, np.clip(effAct_gain, 0.001, 0.999)))
noise_b_v = np.float(np.random.binomial(1, np.clip(effAct_velocity, 0.001, 0.999)))
noise_gain = (2.0 * ((1.0 / (1.0 + np.exp((- noise_e_g)))) - 0.5))
noise_velocity = (2.0 * ((1.0 / (1.0 + np.exp((- noise_e_v)))) - 0.5))
noise_gain += ((((1 - noise_gain) * noise_u_g) * noise_b_g) * effAct_gain)
noise_velocity += ((((1 - noise_velocity) * noise_u_v) * noise_b_v) * effAct_velocity)
if (effAct_gain <= actionTolerance):
hidden_gain = effAct_gain
elif (hidden_gain >= fatigueAmplificationStart):
hidden_gain = np.minimum(fatigueAmplificationMax, (fatigueAmplification * hidden_gain))
else:
hidden_gain = ((0.9 * hidden_gain) + (noise_gain / 3.0))
if (effAct_velocity <= actionTolerance):
hidden_velocity = effAct_velocity
elif (hidden_velocity >= fatigueAmplificationStart):
hidden_velocity = np.minimum(fatigueAmplificationMax, (fatigueAmplification * hidden_velocity))
else:
hidden_velocity = ((0.9 * hidden_velocity) + (noise_velocity / 3.0))
if (np.maximum(hidden_velocity, hidden_gain) == fatigueAmplificationMax):
alpha = (1.0 / (1.0 + np.exp((- np.random.normal(2.4, 0.4)))))
else:
alpha = np.maximum(noise_velocity, noise_gain)
fb = np.maximum(0, ((30000.0 / ((5 * velocity) + 100)) - (0.01 * (gain ** 2))))
self.state['hv'] = hidden_velocity
self.state['hg'] = hidden_gain
self.state['f'] = ((fb * (1 + (2 * alpha))) / 3.0)
self.state['fb'] = fb
def updateCurrentOperationalCost(self):
CostSetPoint = 2.0
CostVelocity = 4.0
CostGain = 2.5
gain = self.state['g']
velocity = self.state['v']
setpoint = self.state['p']
costs = (((CostSetPoint * setpoint) + (CostGain * gain)) + (CostVelocity * velocity))
o = np.exp((costs / 100.0))
self.state['coc'] = o
if (self.init == True):
self.state['o'] += o
self.init = False
else:
self.state['o'][:(- 1)] = self.state['o'][1:]
self.state['o'][(- 1)] = o
def updateOperationalCostConvolution(self):
ConvArray = np.array([0.11111, 0.22222, 0.33333, 0.22222, 0.11111, 0.0, 0.0, 0.0, 0.0, 0.0])
self.state['oc'] = np.dot(self.state['o'], ConvArray)
def updateGS(self):
setpoint = self.state['p']
shift = self.state['h']
effective_shift = self.state['he']
domain = self.state['gs_domain']
phi_idx = self.state['gs_phi_idx']
system_response = self.state['gs_sys_response']
(reward, domain, phi_idx, system_response) = self.gsEnvironment.state_transition(self.gsEnvironment._dynamics.Domain(domain), phi_idx, self.gsEnvironment._dynamics.System_Response(system_response), effective_shift)
self.state['MC'] = (- reward)
self.state['gs_domain'] = domain.value
self.state['gs_sys_response'] = system_response.value
self.state['gs_phi_idx'] = phi_idx
def updateOperationalCosts(self):
rGS = self.state['MC']
eNewHidden = (self.state['oc'] - (self.CRGS * (rGS - 1.0)))
operationalcosts = (eNewHidden - (np.random.randn() * (1 + (0.005 * eNewHidden))))
self.state['c'] = operationalcosts
def updateCost(self):
fatigue = self.state['f']
consumption = self.state['c']
cost = ((self.CRF * fatigue) + (self.CRC * consumption))
self.state['cost'] = cost
self.state['reward'] = (- cost)
def defineNewSequence(self):
length = np.random.randint(1, 100)
self._p_steps = length
self._p_step = 0
p_ch = ((2 * np.random.rand()) - 1)
if (np.random.rand() < 0.1):
p_ch *= 0.0
self._p_ch = p_ch |
class _BaseCurveDisplay():
def _plot_curve(self, x_data, *, ax=None, negate_score=False, score_name=None, score_type='test', log_scale='deprecated', std_display_style='fill_between', line_kw=None, fill_between_kw=None, errorbar_kw=None):
check_matplotlib_support(f'{self.__class__.__name__}.plot')
import matplotlib.pyplot as plt
if (ax is None):
(_, ax) = plt.subplots()
if negate_score:
(train_scores, test_scores) = ((- self.train_scores), (- self.test_scores))
else:
(train_scores, test_scores) = (self.train_scores, self.test_scores)
if (std_display_style not in ('errorbar', 'fill_between', None)):
raise ValueError(f"Unknown std_display_style: {std_display_style}. Should be one of 'errorbar', 'fill_between', or None.")
if (score_type not in ('test', 'train', 'both')):
raise ValueError(f"Unknown score_type: {score_type}. Should be one of 'test', 'train', or 'both'.")
if (score_type == 'train'):
scores = {'Train': train_scores}
elif (score_type == 'test'):
scores = {'Test': test_scores}
else:
scores = {'Train': train_scores, 'Test': test_scores}
if (std_display_style in ('fill_between', None)):
if (line_kw is None):
line_kw = {}
self.lines_ = []
for (line_label, score) in scores.items():
self.lines_.append(*ax.plot(x_data, score.mean(axis=1), label=line_label, **line_kw))
self.errorbar_ = None
self.fill_between_ = None
if (std_display_style == 'errorbar'):
if (errorbar_kw is None):
errorbar_kw = {}
self.errorbar_ = []
for (line_label, score) in scores.items():
self.errorbar_.append(ax.errorbar(x_data, score.mean(axis=1), score.std(axis=1), label=line_label, **errorbar_kw))
(self.lines_, self.fill_between_) = (None, None)
elif (std_display_style == 'fill_between'):
if (fill_between_kw is None):
fill_between_kw = {}
default_fill_between_kw = {'alpha': 0.5}
fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
self.fill_between_ = []
for (line_label, score) in scores.items():
self.fill_between_.append(ax.fill_between(x_data, (score.mean(axis=1) - score.std(axis=1)), (score.mean(axis=1) + score.std(axis=1)), **fill_between_kw))
score_name = (self.score_name if (score_name is None) else score_name)
ax.legend()
if (log_scale != 'deprecated'):
warnings.warn('The `log_scale` parameter is deprecated as of version 1.3 and will be removed in 1.5. You can use display.ax_.set_xscale and display.ax_.set_yscale instead.', FutureWarning)
xscale = ('log' if log_scale else 'linear')
elif (_interval_max_min_ratio(x_data) > 5):
xscale = ('symlog' if (x_data.min() <= 0) else 'log')
else:
xscale = 'linear'
ax.set_xscale(xscale)
ax.set_ylabel(f'{score_name}')
self.ax_ = ax
self.figure_ = ax.figure |
class STVQAAccuracyEvaluator():
def __init__(self):
self.answer_processor = EvalAIAnswerProcessor()
def eval_pred_list(self, pred_list):
pred_scores = []
for entry in pred_list:
pred_answer = self.answer_processor(entry['pred_answer'])
gts = [self.answer_processor(a) for a in entry['gt_answers']]
score = (1.0 if (pred_answer in gts) else 0.0)
pred_scores.append(score)
accuracy = (sum(pred_scores) / len(pred_scores))
return accuracy |
def run_coco_eval(anno_json, pred_json, name):
coco_gt = COCO(anno_json)
coco_dt = coco_gt.loadRes(pred_json)
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
save_eval_results(coco_eval.stats, name)
return coco_eval.stats |
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='GMM')
parser.add_argument('--gpu_ids', default='')
parser.add_argument('-j', '--workers', type=int, default=1)
parser.add_argument('-b', '--batch-size', type=int, default=4)
parser.add_argument('--dataroot', default='data')
parser.add_argument('--datamode', default='train')
parser.add_argument('--stage', default='GMM')
parser.add_argument('--data_list', default='train_pairs.txt')
parser.add_argument('--fine_width', type=int, default=192)
parser.add_argument('--fine_height', type=int, default=256)
parser.add_argument('--radius', type=int, default=5)
parser.add_argument('--grid_size', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='save checkpoint infos')
parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for initialization')
parser.add_argument('--display_count', type=int, default=20)
parser.add_argument('--save_count', type=int, default=100)
parser.add_argument('--keep_step', type=int, default=100000)
parser.add_argument('--decay_step', type=int, default=100000)
parser.add_argument('--shuffle', action='store_true', help='shuffle input data')
opt = parser.parse_args()
return opt |
def test():
mode = int(sys.argv[1])
clusters = int(sys.argv[2])
beta = float(sys.argv[3])
inputName = sys.argv[4]
old_assignmentsName = sys.argv[5]
outputName = sys.argv[6]
if (mode == 1):
runHyperParameterTests(inputName, outputName, clusters, beta, old_assignmentsName)
else:
runNonMotifTICC(inputName, outputName, clusters, beta, old_assignmentsName) |
class TBool(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
Val = _swig_property(_snap.TBool_Val_get, _snap.TBool_Val_set)
Rnd = _swig_property(_snap.TBool_Rnd_get, _snap.TBool_Rnd_set)
def __nonzero__(self):
return _snap.TBool___nonzero__(self)
__bool__ = __nonzero__
def __init__(self, *args):
_snap.TBool_swiginit(self, _snap.new_TBool(*args))
def Load(self, SIn):
return _snap.TBool_Load(self, SIn)
def Save(self, SOut):
return _snap.TBool_Save(self, SOut)
def __eq__(self, Bool):
return _snap.TBool___eq__(self, Bool)
def __lt__(self, Bool):
return _snap.TBool___lt__(self, Bool)
def __call__(self):
return _snap.TBool___call__(self)
def GetMemUsed(self):
return _snap.TBool_GetMemUsed(self)
def GetPrimHashCd(self):
return _snap.TBool_GetPrimHashCd(self)
def GetSecHashCd(self):
return _snap.TBool_GetSecHashCd(self)
def GetRnd():
return _snap.TBool_GetRnd()
GetRnd = staticmethod(GetRnd)
def GetStr(*args):
return _snap.TBool_GetStr(*args)
GetStr = staticmethod(GetStr)
def GetYNStr(Val):
return _snap.TBool_GetYNStr(Val)
GetYNStr = staticmethod(GetYNStr)
def GetYesNoStr(Val):
return _snap.TBool_GetYesNoStr(Val)
GetYesNoStr = staticmethod(GetYesNoStr)
def Get01Str(Val):
return _snap.TBool_Get01Str(Val)
Get01Str = staticmethod(Get01Str)
def IsValStr(Str):
return _snap.TBool_IsValStr(Str)
IsValStr = staticmethod(IsValStr)
def GetValFromStr(*args):
return _snap.TBool_GetValFromStr(*args)
GetValFromStr = staticmethod(GetValFromStr)
__swig_destroy__ = _snap.delete_TBool |
def intmod_gap_to_sage(x):
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
s = str(x)
m = re.search('Z\\(([0-9]*)\\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(Integer(m.group(1))))
m = re.match('Zmod[np]ZObj\\( ([0-9]*), ([0-9]*) \\)', s)
if m:
return Mod(Integer(m.group(1)), Integer(m.group(2)))
raise ValueError(("Unable to convert Gap element '%s'" % s)) |
def digraph_logistic_regression():
digraph = LocalClassifierPerLevel(local_classifier=LogisticRegression())
digraph.hierarchy_ = nx.DiGraph([('a', 'b'), ('a', 'c')])
digraph.y_ = np.array([['a', 'b'], ['a', 'c']])
digraph.X_ = np.array([[1, 2], [3, 4]])
digraph.logger_ = logging.getLogger('LCPL')
digraph.root_ = 'a'
digraph.sample_weight_ = None
digraph.separator_ = '::HiClass::Separator::'
digraph.masks_ = [[True, True], [True, True]]
return digraph |
def b(tableau, star=0, base_ring=QQ):
t = Tableau(tableau)
if star:
t = t.restrict((t.size() - star))
cs = t.column_stabilizer().list()
n = t.size()
sgalg = SymmetricGroupAlgebra(base_ring, n)
one = base_ring.one()
P = Permutation
if (len(tableau) == 0):
return sgalg.one()
cd = dict(((P(v), (v.sign() * one)) for v in cs))
return sgalg._from_dict(cd) |
class TestComputeAverageFeaturesFromImages():
cases_grid = [(DataLoader(DummyDataset(torch.tensor([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], dtype=torch.float64), ['class_1', 'class_2', 'class_2']), batch_size=3), torch.tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=torch.float64)), (DataLoader(DummyDataset(torch.tensor([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]], dtype=torch.float64), ['class_1', 'class_2', 'class_2']), batch_size=2), torch.tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype=torch.float64)), (DataLoader(DummyDataset(torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], dtype=torch.float32), ['class_1', 'class_2', 'class_2']), batch_size=2), torch.tensor([1.0, 1.0, 1.0], dtype=torch.float32)), (DataLoader(DummyDataset(torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], dtype=torch.float32), [1, 2, 2]), batch_size=2), torch.tensor([1.0, 1.0, 1.0], dtype=torch.float32))]
.parametrize('dataloader, expected_average', cases_grid)
def test_returns_expected_average(dataloader, expected_average):
output_tensor = compute_average_features_from_images(dataloader, nn.Identity())
torch.testing.assert_close(output_tensor, expected_average) |
class RANSACRegressor(_RoutingNotSupportedMixin, MetaEstimatorMixin, RegressorMixin, MultiOutputMixin, BaseEstimator):
_parameter_constraints: dict = {'estimator': [HasMethods(['fit', 'score', 'predict']), None], 'min_samples': [Interval(Integral, 1, None, closed='left'), Interval(RealNotInt, 0, 1, closed='both'), None], 'residual_threshold': [Interval(Real, 0, None, closed='left'), None], 'is_data_valid': [callable, None], 'is_model_valid': [callable, None], 'max_trials': [Interval(Integral, 0, None, closed='left'), Options(Real, {np.inf})], 'max_skips': [Interval(Integral, 0, None, closed='left'), Options(Real, {np.inf})], 'stop_n_inliers': [Interval(Integral, 0, None, closed='left'), Options(Real, {np.inf})], 'stop_score': [Interval(Real, None, None, closed='both')], 'stop_probability': [Interval(Real, 0, 1, closed='both')], 'loss': [StrOptions({'absolute_error', 'squared_error'}), callable], 'random_state': ['random_state']}
def __init__(self, estimator=None, *, min_samples=None, residual_threshold=None, is_data_valid=None, is_model_valid=None, max_trials=100, max_skips=np.inf, stop_n_inliers=np.inf, stop_score=np.inf, stop_probability=0.99, loss='absolute_error', random_state=None):
self.estimator = estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.random_state = random_state
self.loss = loss
_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y, sample_weight=None):
_raise_for_unsupported_routing(self, 'fit', sample_weight=sample_weight)
check_X_params = dict(accept_sparse='csr', force_all_finite=False)
check_y_params = dict(ensure_2d=False)
(X, y) = self._validate_data(X, y, validate_separately=(check_X_params, check_y_params))
check_consistent_length(X, y)
if (self.estimator is not None):
estimator = clone(self.estimator)
else:
estimator = LinearRegression()
if (self.min_samples is None):
if (not isinstance(estimator, LinearRegression)):
raise ValueError('`min_samples` needs to be explicitly set when estimator is not a LinearRegression.')
min_samples = (X.shape[1] + 1)
elif (0 < self.min_samples < 1):
min_samples = np.ceil((self.min_samples * X.shape[0]))
elif (self.min_samples >= 1):
min_samples = self.min_samples
if (min_samples > X.shape[0]):
raise ValueError(('`min_samples` may not be larger than number of samples: n_samples = %d.' % X.shape[0]))
if (self.residual_threshold is None):
residual_threshold = np.median(np.abs((y - np.median(y))))
else:
residual_threshold = self.residual_threshold
if (self.loss == 'absolute_error'):
if (y.ndim == 1):
loss_function = (lambda y_true, y_pred: np.abs((y_true - y_pred)))
else:
loss_function = (lambda y_true, y_pred: np.sum(np.abs((y_true - y_pred)), axis=1))
elif (self.loss == 'squared_error'):
if (y.ndim == 1):
loss_function = (lambda y_true, y_pred: ((y_true - y_pred) ** 2))
else:
loss_function = (lambda y_true, y_pred: np.sum(((y_true - y_pred) ** 2), axis=1))
elif callable(self.loss):
loss_function = self.loss
random_state = check_random_state(self.random_state)
try:
estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(estimator, 'sample_weight')
estimator_name = type(estimator).__name__
if ((sample_weight is not None) and (not estimator_fit_has_sample_weight)):
raise ValueError(('%s does not support sample_weight. Samples weights are only used for the calibration itself.' % estimator_name))
if (sample_weight is not None):
sample_weight = _check_sample_weight(sample_weight, X)
n_inliers_best = 1
score_best = (- np.inf)
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
inlier_best_idxs_subset = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
self.n_trials_ = 0
max_trials = self.max_trials
while (self.n_trials_ < max_trials):
self.n_trials_ += 1
if (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips):
break
subset_idxs = sample_without_replacement(n_samples, min_samples, random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
if ((self.is_data_valid is not None) and (not self.is_data_valid(X_subset, y_subset))):
self.n_skips_invalid_data_ += 1
continue
if (sample_weight is None):
estimator.fit(X_subset, y_subset)
else:
estimator.fit(X_subset, y_subset, sample_weight=sample_weight[subset_idxs])
if ((self.is_model_valid is not None) and (not self.is_model_valid(estimator, X_subset, y_subset))):
self.n_skips_invalid_model_ += 1
continue
y_pred = estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
inlier_mask_subset = (residuals_subset <= residual_threshold)
n_inliers_subset = np.sum(inlier_mask_subset)
if (n_inliers_subset < n_inliers_best):
self.n_skips_no_inliers_ += 1
continue
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
score_subset = estimator.score(X_inlier_subset, y_inlier_subset)
if ((n_inliers_subset == n_inliers_best) and (score_subset < score_best)):
continue
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
inlier_best_idxs_subset = inlier_idxs_subset
max_trials = min(max_trials, _dynamic_max_trials(n_inliers_best, n_samples, min_samples, self.stop_probability))
if ((n_inliers_best >= self.stop_n_inliers) or (score_best >= self.stop_score)):
break
if (inlier_mask_best is None):
if (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips):
raise ValueError('RANSAC skipped more iterations than `max_skips` without finding a valid consensus set. Iterations were skipped because each randomly chosen sub-sample failed the passing criteria. See estimator attributes for diagnostics (n_skips*).')
else:
raise ValueError('RANSAC could not find a valid consensus set. All `max_trials` iterations were skipped because each randomly chosen sub-sample failed the passing criteria. See estimator attributes for diagnostics (n_skips*).')
elif (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips):
warnings.warn('RANSAC found a valid consensus set but exited early due to skipping more iterations than `max_skips`. See estimator attributes for diagnostics (n_skips*).', ConvergenceWarning)
if (sample_weight is None):
estimator.fit(X_inlier_best, y_inlier_best)
else:
estimator.fit(X_inlier_best, y_inlier_best, sample_weight=sample_weight[inlier_best_idxs_subset])
self.estimator_ = estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
check_is_fitted(self)
X = self._validate_data(X, force_all_finite=False, accept_sparse=True, reset=False)
return self.estimator_.predict(X)
def score(self, X, y):
check_is_fitted(self)
X = self._validate_data(X, force_all_finite=False, accept_sparse=True, reset=False)
return self.estimator_.score(X, y)
def _more_tags(self):
return {'_xfail_checks': {'check_sample_weights_invariance': 'zero sample_weight is not equivalent to removing samples'}} |
class GiraffeLayer(nn.Module):
def __init__(self, in_channels, strides, fpn_config, inner_fpn_channels, outer_fpn_channels, separable_conv=False, merge_type='conv'):
super(GiraffeLayer, self).__init__()
self.in_channels = in_channels
self.strides = strides
self.num_levels = len(in_channels)
self.conv_bn_relu_pattern = False
fpn_in_channels = in_channels
fpn_strides = strides
self.fnode = nn.ModuleList()
reduction_base = strides[0]
for (i, fnode_cfg) in fpn_config.items():
if (fnode_cfg['is_out'] == 1):
fpn_channels = outer_fpn_channels
else:
fpn_channels = inner_fpn_channels
reduction = fnode_cfg['reduction']
fpn_channels_idx = int(math.log((reduction // reduction_base), 2))
combine = GiraffeCombine(fpn_in_channels, fpn_strides, fpn_config, fpn_channels, tuple(fnode_cfg['inputs_offsets']), target_reduction=reduction, weight_method=fnode_cfg['weight_method'])
after_combine = nn.Sequential()
in_channel_sum = 0
out_channels = 0
for input_offset in fnode_cfg['inputs_offsets']:
in_channel_sum += fpn_in_channels[input_offset]
out_channels = fpn_channels[fpn_channels_idx]
if (merge_type == 'csp'):
after_combine.add_module('CspLayer', CSPLayer(in_channel_sum, out_channels, 2, shortcut=True, depthwise=False, act='silu'))
elif (merge_type == 'shuffle'):
after_combine.add_module('shuffleBlock', InvertedResidual(in_channel_sum, in_channel_sum, stride=1))
after_combine.add_module('conv1x1', nn.Conv2d(in_channel_sum, out_channels, 1, 1, 0))
elif (merge_type == 'conv'):
after_combine.add_module('conv1x1', nn.Conv2d(in_channel_sum, out_channels, 1, 1, 0))
after_combine.add_module('conv', (DepthwiseConvModule(out_channels, out_channels, 3, 1, 1, norm_cfg=dict(type='BN'), activation='Swish') if separable_conv else ConvModule(out_channels, out_channels, 3, 1, 1, norm_cfg=dict(type='BN'), activation='Swish')))
self.fnode.append(GiraffeNode(combine=combine, after_combine=after_combine))
fpn_in_channels.append(fpn_channels[fpn_channels_idx])
fpn_strides.append(reduction)
def forward(self, x):
for fn in self.fnode:
x.append(fn(x))
return x[(- self.num_levels):] |
def test_array_num():
A = np.random.randint(10, size=(N.get(),), dtype=np.int64)
B = array_num(A)
assert np.array_equal((A + 5), B) |
def read_and_decode1(filename_queue):
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'file_bytes': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([NUM_TAGS], tf.float32)})
image = tf.image.decode_jpeg(features['file_bytes'], channels=3, try_recover_truncated=True)
image = ((tf.cast(image, tf.float32) * (2.0 / 255)) - 1)
if (CHANNELS == 1):
image = tf.reduce_mean(image, reduction_indices=[2], keep_dims=True)
image.set_shape([None, None, None])
shape = tf.cast(tf.shape(image), tf.float32)
height_pad = tf.maximum(tf.ceil(((96 - shape[0]) / 2)), 0)
height_pad = tf.reshape(height_pad, [1, 1])
width_pad = tf.maximum(tf.ceil(((96 - shape[1]) / 2)), 0)
width_pad = tf.reshape(width_pad, [1, 1])
height_pad = tf.tile(height_pad, [1, 2])
width_pad = tf.tile(width_pad, [1, 2])
paddings = tf.concat(0, [height_pad, width_pad, tf.zeros([1, 2])])
paddings = tf.cast(paddings, tf.int32)
image = tf.pad(image, paddings)
image = tf.random_crop(image, [96, 96, CHANNELS])
image = tf.image.resize_images(image, IMAGE_SIZE, IMAGE_SIZE, method=tf.image.ResizeMethod.AREA)
image = tf.image.random_flip_left_right(image)
label = features['label']
label = tf.slice(label, [0], [NUM_TAGS_TO_USE])
return (image, label) |
class geom_gen(rv_discrete):
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return ((p <= 1) & (p >= 0))
def _pmf(self, k, p):
return (np.power((1 - p), (k - 1)) * p)
def _logpmf(self, k, p):
return (special.xlog1py((k - 1), (- p)) + log(p))
def _cdf(self, x, p):
k = floor(x)
return (- expm1((log1p((- p)) * k)))
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return (k * log1p((- p)))
def _ppf(self, q, p):
vals = ceil((log1p((- q)) / log1p((- p))))
temp = self._cdf((vals - 1), p)
return np.where(((temp >= q) & (vals > 0)), (vals - 1), vals)
def _stats(self, p):
mu = (1.0 / p)
qr = (1.0 - p)
var = ((qr / p) / p)
g1 = ((2.0 - p) / sqrt(qr))
g2 = (np.polyval([1, (- 6), 6], p) / (1.0 - p))
return (mu, var, g1, g2) |
def analyze_corpus_in_numbers(lengths, dict_paragraphs, labels_train, output_dir):
print('number of files in corpus {}'.format(len(lengths.keys())))
avg_length = []
for (key, value) in lengths.items():
if value.get('intro'):
intro_len = value.get('intro')
else:
intro_len = 0
if value.get('lengths_paragraphs'):
para_len = sum([x for x in value.get('lengths_paragraphs') if x])
else:
para_len = 0
avg_length.append((intro_len + para_len))
print('the documents have an average length of {}'.format(np.mean(avg_length)))
print('the documents have an min length of {}'.format(np.min(avg_length)))
print('the documents have an max length of {}'.format(np.max(avg_length)))
print('average number of paragraphs per document {}'.format(np.mean([len(value.get('lengths_paragraphs')) for (key, value) in lengths.items()])))
para = []
for (key, value) in lengths.items():
if value.get('lengths_paragraphs'):
list = value.get('lengths_paragraphs')
list_wo_none = [x for x in list if x]
para.extend(list_wo_none)
print('the paragraphs have an average length of {}'.format(np.mean(para)))
print('the shortest paragraph has {} words'.format(np.min(para)))
print('the longest paragraph has {} words'.format(np.max(para)))
plot_hist(np.array([x for x in para if (x < 1000)]), 'number of words', 'Paragraph length distribution', output_dir)
print('there are in total {} paragraphs'.format(len(para)))
print('average number of relevant documents for train {}'.format(np.mean([len(value) for value in labels_train.values()]))) |
class BNFoldingNet(nn.Module):
def __init__(self, test_layer, functional, fold_applied):
super(BNFoldingNet, self).__init__()
self.conv1 = test_layer
self.fold_applied = fold_applied
self.bn = nn.BatchNorm2d(test_layer.out_channels)
self.functional = functional
def forward(self, inp):
x1 = self.conv1(inp)
if self.functional:
x = nn.functional.batch_norm(x1, self.bn.running_mean, self.bn.running_var, self.bn.weight, self.bn.bias, training=self.bn.training, momentum=self.bn.momentum, eps=self.bn.eps)
else:
x = self.bn(x1)
x = torch.relu(x)
if (not self.fold_applied):
x = (x + x1)
return x |
def _construct_sparse_coder(Estimator):
dictionary = np.array([[0, 1, 0], [(- 1), (- 1), 2], [1, 1, 1], [0, 1, 1], [0, 2, 1]], dtype=np.float64)
return Estimator(dictionary=dictionary) |
def loadGloveModel(gloveFile):
print('Loading pretrained word vectors...')
with open(gloveFile, 'r') as f:
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print('Done.', len(model), ' words loaded!')
return model |
class DoxyClass(DoxyCompound):
__module__ = 'gnuradio.utils.doxyxml'
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
self.set_parameters(self._retrieved_data.compounddef)
self.process_memberdefs()
brief_description = property((lambda self: self.data()['brief_description']))
detailed_description = property((lambda self: self.data()['detailed_description']))
params = property((lambda self: self.data()['params'])) |
class DictGatherDataParallel(nn.DataParallel):
def gather(self, outputs, output_device):
return dict_gather(outputs, output_device, dim=self.dim) |
def aggregate_metrics(questions):
total = len(questions)
exact_match = np.zeros(2)
f1_scores = np.zeros(2)
for mc in range(2):
exact_match[mc] = ((100 * np.sum(np.array([questions[x].em[mc] for x in questions]))) / total)
f1_scores[mc] = ((100 * np.sum(np.array([questions[x].f1[mc] for x in questions]))) / total)
model_names = questions[list(questions.keys())[0]].model_names
print('\nAggregate Scores:')
for model_count in range(2):
print('Model {0} EM = {1:.2f}'.format(model_names[model_count], exact_match[model_count]))
print('Model {0} F1 = {1:.2f}'.format(model_names[model_count], f1_scores[model_count])) |
def CVFT(x_sat, x_grd, keep_prob, trainable):
def conv_layer(x, kernel_dim, input_dim, output_dim, stride, trainable, activated, name='ot_conv', activation_function=tf.nn.relu):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
weight = tf.get_variable(name='weights', shape=[kernel_dim, kernel_dim, input_dim, output_dim], trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable(name='biases', shape=[output_dim], trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
out = (tf.nn.conv2d(x, weight, strides=[1, stride, stride, 1], padding='SAME') + bias)
if activated:
out = activation_function(out)
return out
def fc_layer(x, trainable, name='ot_fc'):
(height, width, channel) = x.get_shape().as_list()[1:]
assert (channel == 1)
in_dimension = (height * width)
out_dimension = (in_dimension ** 2)
input_feature = tf.reshape(x, [(- 1), (height * width)])
with tf.variable_scope(name):
weight = tf.get_variable(name='weights', shape=[in_dimension, out_dimension], trainable=trainable, initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.005), regularizer=tf.contrib.layers.l2_regularizer(0.01))
bias = tf.get_variable(name='biases', shape=[out_dimension], trainable=trainable, initializer=tf.constant_initializer(np.eye(in_dimension).reshape((in_dimension ** 2))))
out = (tf.matmul(input_feature, weight) + bias)
out = tf.reshape(out, [(- 1), in_dimension, in_dimension])
return out
def ot(input_feature, trainable, name='ot'):
(height, width, channel) = input_feature.get_shape().as_list()[1:]
conv_feature = conv_layer(input_feature, kernel_dim=1, input_dim=channel, output_dim=1, stride=1, trainable=trainable, activated=True, name=(name + 'ot_conv'))
fc_feature = fc_layer(conv_feature, trainable, name=(name + 'ot_fc'))
ot_matrix = sinkhorn((fc_feature * (- 100.0)))
return ot_matrix
def apply_ot(input_feature, ot_matrix):
(height, width, channel) = input_feature.get_shape().as_list()[1:]
in_dimension = ot_matrix.get_shape().as_list()[1]
reshape_input = tf.transpose(tf.reshape(input_feature, [(- 1), in_dimension, channel]), [0, 2, 1])
out = tf.einsum('bci, bio -> bco', reshape_input, ot_matrix)
output_feature = tf.reshape(tf.transpose(out, [0, 2, 1]), [(- 1), height, width, channel])
return output_feature
vgg_grd = VGG16()
grd_vgg = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
grd_vgg = conv_layer(grd_vgg, kernel_dim=3, input_dim=512, output_dim=64, stride=2, trainable=trainable, activated=True, name='grd_conv')
vgg_sat = VGG16()
sat_vgg = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
sat_vgg = conv_layer(sat_vgg, kernel_dim=3, input_dim=512, output_dim=64, stride=2, trainable=trainable, activated=True, name='sat_conv')
(height, width, channel) = sat_vgg.get_shape().as_list()[1:]
grd_vgg = tf.image.resize_bilinear(grd_vgg, [height, width])
ot_matrix_grd_branch = ot(grd_vgg, trainable, name='ot_grd_branch')
grd_ot = apply_ot(grd_vgg, ot_matrix_grd_branch)
sat_ot = sat_vgg
(grd_height, grd_width, grd_channel) = grd_ot.get_shape().as_list()[1:]
grd_global = tf.reshape(grd_ot, [(- 1), ((grd_height * grd_width) * grd_channel)])
(sat_height, sat_width, sat_channel) = sat_ot.get_shape().as_list()[1:]
sat_global = tf.reshape(sat_ot, [(- 1), ((sat_height * sat_width) * sat_channel)])
return (tf.nn.l2_normalize(sat_global, dim=1), tf.nn.l2_normalize(grd_global, dim=1)) |
class NAS_FPN():
def __init__(self):
super(NAS_FPN, self).__init__()
pass
def forward(self, x):
pass |
class Decoder():
def __init__(self, labels, lm_path=None, alpha=1, beta=1.5, cutoff_top_n=40, cutoff_prob=0.99, beam_width=200, num_processes=24, blank_id=0):
self.vocab_list = (['_'] + labels)
self._decoder = CTCBeamDecoder((['_'] + labels[1:]), lm_path, alpha, beta, cutoff_top_n, cutoff_prob, beam_width, num_processes, blank_id)
def convert_to_string(self, tokens, seq_len=None):
if (not seq_len):
seq_len = tokens.size(0)
out = []
for i in range(seq_len):
if (len(out) == 0):
if (tokens[i] != 0):
out.append(tokens[i])
elif ((tokens[i] != 0) and (tokens[i] != tokens[(i - 1)])):
out.append(tokens[i])
return ''.join((self.vocab_list[i] for i in out))
def decode_beam(self, logits, seq_lens):
decoded = []
tlogits = logits.transpose(0, 1)
(beam_result, beam_scores, timesteps, out_seq_len) = self._decoder.decode(tlogits.softmax((- 1)), seq_lens)
for i in range(tlogits.size(0)):
output_str = ''.join(map((lambda x: self.vocab_list[x]), beam_result[i][0][:out_seq_len[i][0]]))
decoded.append(output_str)
return decoded
def decode_greedy(self, logits, seq_lens):
decoded = []
tlogits = logits.transpose(0, 1)
(_, tokens) = torch.max(tlogits, 2)
for i in range(tlogits.size(0)):
output_str = self.convert_to_string(tokens[i], seq_lens[i])
decoded.append(output_str)
return decoded
def get_mean(self, decoded, gt, individual_length, func):
total_norm = 0.0
length = len(decoded)
for i in range(0, length):
val = float(func(decoded[i], gt[i]))
total_norm += (val / individual_length)
return (total_norm / length)
def wer(self, r, h):
d = np.zeros(((len(r) + 1) * (len(h) + 1)), dtype=np.uint8)
d = d.reshape(((len(r) + 1), (len(h) + 1)))
for i in range((len(r) + 1)):
for j in range((len(h) + 1)):
if (i == 0):
d[0][j] = j
elif (j == 0):
d[i][0] = i
for i in range(1, (len(r) + 1)):
for j in range(1, (len(h) + 1)):
if (r[(i - 1)] == h[(j - 1)]):
d[i][j] = d[(i - 1)][(j - 1)]
else:
substitution = (d[(i - 1)][(j - 1)] + 1)
insertion = (d[i][(j - 1)] + 1)
deletion = (d[(i - 1)][j] + 1)
d[i][j] = min(substitution, insertion, deletion)
return d[len(r)][len(h)]
def wer_sentence(self, r, h):
return self.wer(r.split(), h.split())
def cer_batch(self, decoded, gt):
assert (len(decoded) == len(gt)), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
mean_indiv_len = np.mean([len(s) for s in gt])
return self.get_mean(decoded, gt, mean_indiv_len, editdistance.eval)
def wer_batch(self, decoded, gt):
assert (len(decoded) == len(gt)), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
mean_indiv_len = np.mean([len(s.split()) for s in gt])
return self.get_mean(decoded, gt, mean_indiv_len, self.wer_sentence) |
def Empty(s):
if isinstance(s, SeqSortRef):
return SeqRef(Z3_mk_seq_empty(s.ctx_ref(), s.ast), s.ctx)
if isinstance(s, ReSortRef):
return ReRef(Z3_mk_re_empty(s.ctx_ref(), s.ast), s.ctx)
raise Z3Exception('Non-sequence, non-regular expression sort passed to Empty') |
class SawyerBoxCloseEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.12
goal_low = ((- 0.1), 0.85, 0.1329)
goal_high = (0.1, 0.95, 0.1331)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.55, 0.02)
obj_high = (0.05, 0.6, 0.02)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.02], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)}
self.goal = np.array([0.0, 0.9, 0.133])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_box.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, _, reachDist, pickRew, _, placingDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'pickRew': pickRew, 'epRew': reward, 'goalDist': placingDist, 'success': float((placingDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('handle').copy()
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('handle')[2]
self.boxheight = self.get_body_com('box')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
if self.random_init:
goal_pos = self._get_state_rand_vec()
while (np.linalg.norm((goal_pos[:2] - goal_pos[(- 3):(- 1)])) < 0.25):
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
self._target_pos = goal_pos[(- 3):]
self.sim.model.body_pos[self.model.body_name2id('box')] = np.concatenate((self._target_pos[:2], [self.boxheight]))
self._set_obj_xyz(self.obj_init_pos)
self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
heightTarget = self.heightTarget
placeGoal = self._target_pos
placingDist = np.linalg.norm((objPos - placeGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
def reachReward():
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)]))
zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)]))
if (reachDistxy < 0.05):
reachRew = (- reachDist)
else:
reachRew = ((- reachDistxy) - (2 * zRew))
if (reachDist < 0.05):
reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50))
return (reachRew, reachDist)
def pickCompletionCriteria():
tolerance = 0.01
if (objPos[2] >= (heightTarget - tolerance)):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02))
def orig_pickReward():
hScale = 100
if (self.pickCompleted and (not objDropped())):
return (hScale * heightTarget)
elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))):
return (hScale * min(heightTarget, objPos[2]))
else:
return 0
def placeReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped()))
if cond:
placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3)))))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
(reachRew, reachDist) = reachReward()
pickRew = orig_pickReward()
(placeRew, placingDist) = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = ((reachRew + pickRew) + placeRew)
return [reward, reachRew, reachDist, pickRew, placeRew, placingDist] |
class MyTestCase(unittest.TestCase):
def test_simple(self):
def eyetest():
return np.eye(N)
self.assertTrue(np.allclose(eyetest(N=5), np.eye(5)))
def test_rect(self):
def eyetest():
return np.eye(N, (N + 1))
self.assertTrue(np.allclose(eyetest(N=5), np.eye(5, 6)))
def test_rect_subdiagonal(self):
def eyetest():
return np.eye(N, (N + 1), (- 1))
self.assertTrue(np.allclose(eyetest(N=5), np.eye(5, 6, (- 1))))
def test_superdiagonal(self):
def eyetest():
return np.eye(N, k=2)
self.assertTrue(np.allclose(eyetest(N=5), np.eye(5, k=2))) |
def torch_recovery(obj, path, end_of_epoch, device=None):
del end_of_epoch
try:
obj.load_state_dict(torch.load(path, map_location=device), strict=True)
except TypeError:
obj.load_state_dict(torch.load(path, map_location=device)) |
def manual_seed(args_or_seed: Union[(int, argparse.Namespace)], fix_cudnn=False):
if hasattr(args_or_seed, 'seed'):
args_or_seed = args_or_seed.seed
random.seed(args_or_seed)
np.random.seed(args_or_seed)
torch.manual_seed(args_or_seed)
torch.cuda.manual_seed_all(args_or_seed)
os.environ['PYTHONHASHSEED'] = str(args_or_seed)
if fix_cudnn:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
def revert_sync_batchnorm(module):
module_output = module
module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
if hasattr(mmcv, 'ops'):
module_checklist.append(mmcv.ops.SyncBatchNorm)
if isinstance(module, tuple(module_checklist)):
module_output = _BatchNormXd(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
module_output.training = module.training
if hasattr(module, 'qconfig'):
module_output.qconfig = module.qconfig
for (name, child) in module.named_children():
module_output.add_module(name, revert_sync_batchnorm(child))
del module
return module_output |
def test_multi_objective_max_loss_negative():
with pytest.raises(ValueError):
MultiObjectiveCDV(analytical, max_empirical_losses=[max_empirical_loss_neg, max_empirical_loss_neg]) |
class LPPool1d(_LPPoolNd):
kernel_size: _size_1_t
stride: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return cF.complex_fcaller(F.lp_pool1d, input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode) |
def count_paren_parity(tree):
count = 0
for char in tree:
if (char == '('):
count += 1
elif (char == ')'):
count -= 1
return count |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.