code stringlengths 101 5.91M |
|---|
def load_user_goal(filename):
data = read_s3_json(S3_BUCKET_NAME, filename)
key = list(data.keys())[0]
return data[key] |
def auto_decode(data):
for (bom, encoding) in BOMS:
if data.startswith(bom):
return data[len(bom):].decode(encoding)
for line in data.split(b'\n')[:2]:
if ((line[0:1] == b'#') and ENCODING_RE.search(line)):
encoding = ENCODING_RE.search(line).groups()[0].decode('ascii')
return data.decode(encoding)
return data.decode((locale.getpreferredencoding(False) or sys.getdefaultencoding())) |
def test_maxpool1d_padding_valid():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=3, padding='valid', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
def save_json(json_file, filename):
with open(filename, 'w') as f:
json.dump(json_file, f, indent=4, sort_keys=False) |
def test_no_branchless_code_object_register_multiple():
tracer = ExecutionTracer()
tracer.register_code_object(MagicMock())
tracer.register_code_object(MagicMock())
tracer.register_predicate(MagicMock(code_object_id=0))
tracer.register_predicate(MagicMock(code_object_id=0))
assert (tracer.get_subject_properties().branch_less_code_objects == OrderedSet([1])) |
def get_mutualinfo_obs_network_args(env, embedding_dim):
network_args = dict(name='mutualinfo_obs_network', input_shape=(embedding_dim,), output_dim=1, hidden_sizes=(64, 64), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, batch_normalization=False)
return network_args |
def coco_eval_with_return(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert (res_type in ['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'])
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if (result_types == ['proposal_fast']):
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for (i, num) in enumerate(max_dets):
print('{}\t= {:.4f}'.format(num, ar[i]))
return
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = ('bbox' if (res_type == 'proposal') else res_type)
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if (res_type == 'proposal'):
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if ((res_type == 'segm') or (res_type == 'bbox')):
metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl']
eval_results[res_type] = {metric_names[i]: cocoEval.stats[i] for i in range(len(metric_names))}
else:
eval_results[res_type] = cocoEval.stats
return eval_results |
def sample_gumbel(shape, eps=1e-20):
unif = torch.rand(*shape).to(device)
g = (- torch.log((- torch.log((unif + eps)))))
return g |
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
self._value = init_value
self._gamma = gamma
def update(self, new_val):
if (self._value is None):
self._value = new_val
else:
self._value = ((self._gamma * self._value) + ((1.0 - self._gamma) * new_val))
def __float__(self):
return self._value |
class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class BASE_USE(nn.Module):
def __init__(self, img_size=512, in_chans=3, num_stages=4, num_layers=[2, 2, 2, 2], embed_dims=[64, 128, 320, 512], mlp_ratios=[8, 8, 4, 4], num_heads=[8, 8, 8, 8], qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=partial(nn.LayerNorm, eps=1e-06), conv_norm=nn.BatchNorm2d, **kwargs):
super(BASE_USE, self).__init__()
self.num_stages = num_stages
self.stem = nn.Sequential(Conv2d_BN(in_chans, (embed_dims[0] // 2), kernel_size=3, stride=2, pad=1, act_layer=nn.Hardswish), Conv2d_BN((embed_dims[0] // 2), embed_dims[0], kernel_size=3, stride=2, pad=1, act_layer=nn.Hardswish))
self.patch_embed_stages = nn.ModuleList([DWCPatchEmbed(in_chans=(embed_dims[idx] if (idx == 0) else embed_dims[(idx - 1)]), embed_dim=embed_dims[idx], patch_size=3, stride=(1 if (idx == 0) else 2), conv_norm=conv_norm) for idx in range(self.num_stages)])
self.mhsa_stages = nn.ModuleList([MHSA_stage(embed_dims[idx], num_layers=num_layers[idx], num_heads=num_heads[idx], mlp_ratio=mlp_ratios[idx], qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer) for idx in range(self.num_stages)])
self.encoder_adapters = nn.ModuleList([SEBlock(embed_dims[idx], reduction=8) for idx in range(self.num_stages)])
self.bridge_adapter = SEBlock((embed_dims[3] * 2), reduction=8)
self.decoder_adapters = nn.ModuleList([SEBlock(embed_dims[((- idx) - 1)], reduction=8) for idx in range(self.num_stages)])
self.bridge = nn.Sequential(nn.Conv2d(embed_dims[3], embed_dims[3], kernel_size=3, stride=1, padding=1), conv_norm(embed_dims[3]), nn.ReLU(inplace=True), nn.Conv2d(embed_dims[3], (embed_dims[3] * 2), kernel_size=3, stride=1, padding=1), conv_norm((embed_dims[3] * 2)), nn.ReLU(inplace=True))
self.mhsa_list = []
for idx in range(self.num_stages):
self.mhsa_list.append(MHSA_stage(embed_dims[idx], num_layers=num_layers[idx], num_heads=num_heads[idx], mlp_ratio=mlp_ratios[idx], qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer))
self.decoder1 = UnetDecodingBlockTransformer((embed_dims[3] * 2), embed_dims[3], self.mhsa_list[3], conv_norm=conv_norm)
self.decoder2 = UnetDecodingBlockTransformer(embed_dims[3], embed_dims[2], self.mhsa_list[2], conv_norm=conv_norm)
self.decoder3 = UnetDecodingBlockTransformer(embed_dims[2], embed_dims[1], self.mhsa_list[1], conv_norm=conv_norm)
self.decoder4 = UnetDecodingBlockTransformer(embed_dims[1], embed_dims[0], self.mhsa_list[0], conv_norm=conv_norm)
self.finalconv = nn.Sequential(nn.Conv2d(embed_dims[0], 1, kernel_size=1))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, out_feat=False, out_seg=True):
img_size = x.size()[2:]
x = self.stem(x)
encoder_outs = []
for idx in range(self.num_stages):
x = self.patch_embed_stages[idx](x)
(B, C, H, W) = x.shape
x = rearrange(x, 'b c h w -> b (h w) c')
x = self.mhsa_stages[idx](x, H, W)
x = rearrange(x, 'b (h w) c -> b c h w', w=W, h=H).contiguous()
encoder_outs.append(self.encoder_adapters[idx](x))
if (out_seg == False):
return {'seg': None, 'feat': nn.functional.adaptive_avg_pool2d(encoder_outs[3], 1).reshape(B, (- 1))}
out = self.bridge(encoder_outs[3])
out = self.bridge_adapter(out)
out = self.decoder1(out, encoder_outs[3])
out = self.decoder_adapters[0](out)
out = self.decoder2(out, encoder_outs[2])
out = self.decoder_adapters[1](out)
out = self.decoder3(out, encoder_outs[1])
out = self.decoder_adapters[2](out)
out = self.decoder4(out, encoder_outs[0])
out = self.decoder_adapters[3](out)
out = nn.functional.interpolate(out, size=img_size, mode='bilinear', align_corners=False)
out = self.finalconv(out)
if out_feat:
return {'seg': out, 'feat': nn.functional.adaptive_avg_pool2d(encoder_outs[3], 1).reshape(B, (- 1))}
else:
return out |
def report_gen(target, data):
target.write('#+\n')
target.write('# The following parameters are assigned with default values. These parameters can\n')
target.write('# be overridden through the make command line\n')
target.write('#+\n')
target.write('\n')
if (('testinfo' in data) and ('profile' in data['testinfo']) and (data['testinfo']['profile'] == 'no')):
pass
else:
target.write('PROFILE := no\n')
target.write('\n')
target.write('#Generates profile summary report\n')
target.write('ifeq ($(PROFILE), yes)\n')
target.write('LDCLFLAGS += --profile_kernel data:all:all:all\n')
target.write('endif\n')
target.write('\n')
target.write('DEBUG := no\n')
target.write('B_TEMP = `$(ABS_COMMON_REPO)/common/utility/parse_platform_list.py $(DEVICE)`\n')
target.write('\n')
target.write('#Generates debug summary report\n')
target.write('ifeq ($(DEBUG), yes)\n')
target.write('LDCLFLAGS += --dk list_ports\n')
target.write('endif\n')
target.write('\n') |
def prefix(*args: str):
global PREFIX_STACK
l = list(map(str, args))
try:
PREFIX_STACK.insert(0, l)
(yield)
finally:
assert (PREFIX_STACK[0] is l)
PREFIX_STACK.pop(0) |
def mk_auto_src():
if (not ONLY_MAKEFILES):
exec_pyg_scripts()
mk_pat_db()
mk_all_install_tactic_cpps()
mk_all_mem_initializer_cpps()
mk_all_gparams_register_modules() |
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if (name is None):
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial) |
def entity_coverage_with_elq(split):
threshold = (- 5)
dataset = load_json(f'outputs/WebQSP.{split}.expr.json')
linking_result = load_json('misc/webqsp_{}_elq{}_mid.json'.format(split, threshold))
linking_result = dict([(x['id'], x) for x in linking_result])
counted = 0
covered_cnt = 0
equal_cnt = 0
parsable_cnt = 0
fpr_values = []
for (i, data) in enumerate(dataset):
skip = True
for pidx in range(0, len(data['Parses'])):
np = data['Parses'][pidx]
if ((np['AnnotatorComment']['QuestionQuality'] == 'Good') and (np['AnnotatorComment']['ParseQuality'] == 'Complete')):
skip = False
if ((len(data['Parses']) == 0) or skip):
continue
counted += 1
gt_s_expr = [parse['SExpr'] for parse in data['Parses']]
gt_s_expr = [x for x in gt_s_expr if (x != 'null')]
if (not gt_s_expr):
continue
parsable_cnt += 1
gt_entities_sets = [set(extract_entities(x)) for x in gt_s_expr]
lnk_result = linking_result[data['QuestionId']]
extracted_entities = set(lnk_result['freebase_ids'])
is_covered = any([x.issubset(extracted_entities) for x in gt_entities_sets])
covered_cnt += is_covered
is_equal = any([(x == extracted_entities) for x in gt_entities_sets])
equal_cnt += is_equal
fpr_values.append(fpr_evaluate(extracted_entities, gt_entities_sets))
print((parsable_cnt / counted), parsable_cnt, counted)
print((covered_cnt / parsable_cnt), (equal_cnt / parsable_cnt), len(dataset))
print((covered_cnt / counted), (equal_cnt / counted), len(dataset))
agg_f1 = sum([x[0] for x in fpr_values])
agg_pre = sum([x[1] for x in fpr_values])
agg_rec = sum([x[2] for x in fpr_values])
print('F1', (agg_f1 / parsable_cnt), (agg_f1 / counted))
print('Pre', (agg_pre / parsable_cnt), (agg_pre / counted))
print('Rec', (agg_rec / parsable_cnt), (agg_rec / counted)) |
class WolpertWolf(EntropyEstimator):
def __init__(self, alpha):
super().__init__()
self.alpha = self.check_alpha(alpha)
_function
def fit(self, nk, k=None, zk=None):
if (k is None):
raise NddError('Wolper-Wolf estimator needs k')
if (k == 1):
(self.estimate_, self.err_) = (PZERO, PZERO)
return self
if (zk is not None):
(self.estimate_, self.err_) = ndd.fnsb.ww_from_multiplicities(nk, zk, k, self.alpha)
else:
(self.estimate_, self.err_) = ndd.fnsb.ww(nk, k, self.alpha)
return self |
def gen_nsml_report(acc_train, aux_out_train, acc_dev, aux_out_dev):
(ave_loss, acc_lx, acc_x) = acc_train
(grad_abs_mean_mean, grad_abs_mean_sig, grad_abs_sig_mean) = aux_out_train
(ave_loss_t, acc_lx_t, acc_x_t) = acc_dev
nsml.report(step=epoch, epoch=epoch, epochs_total=args.tepoch, train__loss=ave_loss, train__acc_lx=acc_lx, train__acc_x=acc_x, train_grad_abs_mean_mean=float(grad_abs_mean_mean), train_grad_abs_mean_sig=float(grad_abs_mean_sig), train_grad_abs_sig_mean=float(grad_abs_sig_mean), dev__loss=ave_loss_t, dev__acc_lx_t=acc_lx_t, dev__acc_x_t=acc_x_t, scope=locals()) |
class BiaffineScorer(nn.Module):
def __init__(self, n_in=800, n_out=400, n_out_label=1, bias_x=True, bias_y=False, scaling=False, dropout=0.33):
super(BiaffineScorer, self).__init__()
self.l = MLP(n_in=n_in, n_out=n_out, dropout=dropout)
self.r = MLP(n_in=n_in, n_out=n_out, dropout=dropout)
self.attn = Biaffine(n_in=n_out, n_out=n_out_label, bias_x=bias_x, bias_y=bias_y)
self.scaling = (0 if (not scaling) else (n_out ** (1 / 4)))
self.n_in = n_in
def forward(self, h):
left = self.l(h)
right = self.r(h)
if self.scaling:
left = (left / self.scaling)
right = (right / self.scaling)
return self.attn(left, right)
def forward_v2(self, h, q):
left = self.l(h)
right = self.r(q)
if self.scaling:
left = (left / self.scaling)
right = (right / self.scaling)
return self.attn(left, right)
def forward_v3(self, h, q):
src = self.l(h)
dec = self.r(q)
return self.attn.forward_v2(src, dec)
def forward_linear(self, h, q):
src = self.l(h)
dec = self.r(q)
return self.attn.forward2(src, dec) |
class Net(nn.Module):
def __init__(self):
torch.manual_seed(0)
super(Net, self).__init__()
self.model = torchvision.models.mobilenet_v2(pretrained=True)
self.model.requires_grad_(False)
self.model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=200, bias=True)
def forward(self, x):
x = self.model.forward(x)
return x |
((not workspace.C.use_mkldnn), 'No MKLDNN support.')
class DropoutTest(hu.HypothesisTestCase):
(X=hu.tensor(), in_place=st.booleans(), ratio=st.floats(0, 0.999), **mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
op = core.CreateOperator('Dropout', ['X'], [('X' if in_place else 'Y')], ratio=ratio, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
def reference_dropout_test(x):
return (x, np.ones(x.shape, dtype=np.bool))
self.assertReferenceChecks(gc, op, [X], reference_dropout_test, outputs_to_check=[0])
(X=hu.tensor(), in_place=st.booleans(), output_mask=st.booleans(), **mu.gcs)
(True, 'Skip duo to different rand seed.')
def test_dropout_ratio0(self, X, in_place, output_mask, gc, dc):
is_test = (not output_mask)
op = core.CreateOperator('Dropout', ['X'], ([('X' if in_place else 'Y')] + (['mask'] if output_mask else [])), ratio=0.0, is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
def reference_dropout_ratio0(x):
return ((x,) if is_test else (x, np.ones(x.shape, dtype=np.bool)))
self.assertReferenceChecks(gc, op, [X], reference_dropout_ratio0, outputs_to_check=[0]) |
def get_chord_sequence(ev_seq, chord_evs):
ev_seq = [x for x in ev_seq if any(((x in chord_evs[typ]) for typ in chord_evs.keys()))]
legal_seq = []
cnt = 0
for (i, ev) in enumerate(ev_seq):
cnt += 1
if ((ev in chord_evs['Chord-Slash']) and (cnt == 3)):
cnt = 0
legal_seq.extend(ev_seq[(i - 2):(i + 1)])
ev_seq = legal_seq
assert (not (len(ev_seq) % 3))
chords = []
for i in range(0, len(ev_seq), 3):
chords.append(ev_seq[i:(i + 3)])
return chords |
def reduce_lr_on_platu_step_fn(scheduler: Any, i_iter: int, loss_value: float):
scheduler.step(loss_value) |
class Function_factorial(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'factorial', latex_name='{\\rm factorial}', conversions=dict(maxima='factorial', mathematica='Factorial', sympy='factorial', fricas='factorial', giac='factorial'))
def _eval_(self, x):
if isinstance(x, (int, Integer)):
try:
return x.factorial()
except OverflowError:
return
elif isinstance(x, Rational):
from sage.functions.gamma import gamma
return gamma((x + 1))
elif (isinstance(x, Element) and hasattr(x.parent(), 'precision')):
return (x + 1).gamma()
elif self._is_numerical(x):
from sage.functions.gamma import gamma
return gamma((x + 1)) |
def get_param_space(trial):
trial.suggest_float('learning_rate', 0.0001, 0.001, log=True)
trial.suggest_float('lr_decay_rate', 0.7, 1.0, log=True)
trial.suggest_categorical('weight_decay', [1e-06, 1e-07, 0])
trial.suggest_int('layers', 1, 6)
trial.suggest_int('set_layers', 0, 0)
trial.suggest_int('pe_embed_k', 0, 20)
trial.suggest_float('dropout', 0, 0.3, step=0.1)
trial.suggest_categorical('hidden_size', [128, 256, 512])
trial.suggest_categorical('batch_size', [8, 16, 32, 64])
trial.suggest_categorical('mpnn_type', ['GGNN'])
trial.suggest_categorical('pool_op', ['avg', 'attn'])
trial.suggest_categorical('root_encode', ['gnn'])
trial.suggest_categorical('embed_adduct', [True])
trial.suggest_categorical('inject_early', [False, True]) |
class CUDATestBase(DeviceTypeTestBase):
device_type = 'cuda'
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
primary_device: ClassVar[str]
cudnn_version: ClassVar[Any]
no_magma: ClassVar[bool]
no_cudnn: ClassVar[bool]
def has_cudnn(self):
return (not self.no_cudnn)
def get_primary_device(cls):
return cls.primary_device
def get_all_devices(cls):
primary_device_idx = int(cls.get_primary_device().split(':')[1])
num_devices = torch.cuda.device_count()
prim_device = cls.get_primary_device()
cuda_str = 'cuda:{0}'
non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if (idx != primary_device_idx)]
return ([prim_device] + non_primary_devices)
def setUpClass(cls):
t = torch.ones(1).cuda()
cls.no_magma = (not torch.cuda.has_magma)
cls.no_cudnn = (not torch.backends.cudnn.is_acceptable(t))
cls.cudnn_version = (None if cls.no_cudnn else torch.backends.cudnn.version())
cls.primary_device = 'cuda:{0}'.format(torch.cuda.current_device()) |
def get_caffe_resolver():
global SHARED_CAFFE_RESOLVER
if (SHARED_CAFFE_RESOLVER is None):
SHARED_CAFFE_RESOLVER = CaffeResolver()
return SHARED_CAFFE_RESOLVER |
class TestResNetForward():
.parametrize('layers,planes,output_size', [([1, 1, 1, 1], [16, 32, 14, 8], 8), ([1, 1, 3, 4], [16, 32, 14, 8], 8), ([1, 1, 1, 1], [16, 32, 14, 1], 1), ([1, 1, 1, 1], [16, 32, 14, 8], 8), ([1, 1, 1, 1], [4, 4, 4, 4], 4)])
def test_basicblock_resnets_output_vector_of_correct_size_without_fc(layers, planes, output_size):
n_images = 5
model = ResNet(block=BasicBlock, layers=layers, planes=planes, use_fc=False, use_pooling=True)
input_images = torch.ones((n_images, 3, 84, 84))
assert (model(input_images).shape == (n_images, output_size)) |
def from_rank(r, n, k):
if (k < 0):
raise ValueError('k must be > 0')
if (k > n):
raise ValueError('k must be <= n')
if ((n == 0) or (k == 0)):
return ()
if (n < 0):
raise ValueError('n must be >= 0')
B = binomial(n, k)
if ((r < 0) or (r >= B)):
raise ValueError('r must satisfy 0 <= r < binomial(n, k)')
if (k == 1):
return (r,)
n0 = n
D = ([0] * k)
inverse = False
if (k < (n0 / 2)):
inverse = True
k = (n - k)
r = ((B - 1) - r)
B = ((B * k) // n0)
m = 0
i = 0
j = 0
m2 = 0
d = 0
while (d < (k - 1)):
if (B > r):
if (i < (k - 2)):
if (((n0 - 1) - m) == 0):
B = 1
else:
B = ((B * ((k - 1) - i)) // ((n0 - 1) - m))
d += 1
if inverse:
for e in range(m2, (m + i)):
D[j] = e
j += 1
m2 = ((m + i) + 1)
else:
D[i] = (m + i)
i += 1
n0 -= 1
else:
r -= B
if (((n0 - 1) - m) == 0):
B = 1
else:
B = ((B * (((n0 - m) - k) + i)) // ((n0 - 1) - m))
m += 1
if inverse:
for e in range(m2, (((n0 + r) + i) - B)):
D[j] = e
j += 1
for e in range(((((n0 + r) + i) + 1) - B), n):
D[j] = e
j += 1
else:
D[(k - 1)] = ((((n0 + r) + k) - 1) - B)
return tuple(D) |
class Generator(nn.Module):
def __init__(self, w_out, h_out, num_features, num_blocks, code_size):
super(Generator, self).__init__()
pad_w = []
pad_h = []
w = w_out
h = h_out
for i in range((len(num_features) - 1)):
if ((w % 4) == 2):
pad_w.append(1)
w = ((w + 2) // 2)
else:
pad_w.append(0)
w = (w // 2)
if ((h % 4) == 2):
pad_h.append(1)
h = ((h + 2) // 2)
else:
pad_h.append(0)
h = (h // 2)
w = (w // 2)
h = (h // 2)
pad_w.append(0)
pad_h.append(0)
self.net = nn.Sequential()
self.initial_fc = modules.WeightNormalizedLinear(code_size, ((num_features[(- 1)] * h) * w), scale=True, bias=True, init_factor=0.01)
self.initial_size = (num_features[(- 1)], h, w)
self.initial_prelu = nn.PReLU(num_features[(- 1)])
for i in range(len(num_features)):
level = ((len(num_features) - 1) - i)
f = num_features[level]
if (level == 0):
f_next = 3
else:
f_next = num_features[(level - 1)]
for j in range(num_blocks[level]):
if (j == (num_blocks[level] - 1)):
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f_next, 2, pad_h[level], pad_w[level], gen_last_block=(level == 0)))
else:
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f, 1, 0, 0))
def forward(self, input):
return F.sigmoid(self.net(self.initial_prelu(self.initial_fc(input).contiguous().view(input.size(0), *self.initial_size)))) |
def lift_for_SL(A, N=None):
from sage.matrix.special import identity_matrix, diagonal_matrix, block_diagonal_matrix
from sage.misc.misc_c import prod
ring = A.parent().base_ring()
if (N is None):
if (ring is ZZ):
raise ValueError('you must choose the modulus')
else:
N = ring.characteristic()
m = A.nrows()
if (m <= 1):
return identity_matrix(ZZ, m)
AZZ = A.change_ring(ZZ)
(D, U, V) = AZZ.smith_form()
diag = diagonal_matrix(([(- 1)] + ([1] * (m - 1))))
if (U.det() == (- 1)):
U = (diag * U)
if (V.det() == (- 1)):
V = (V * diag)
a = [((U.row(i) * AZZ) * V.column(i)) for i in range(m)]
b = prod(a[1:])
Winv = identity_matrix(m)
Winv[(1, 0)] = (1 - b)
Winv[(0, 1)] = (- 1)
Winv[(1, 1)] = b
Xinv = identity_matrix(m)
Xinv[(0, 1)] = a[1]
Cp = diagonal_matrix(a[1:])
Cp[(0, 0)] *= a[0]
C = lift_for_SL(Cp, N)
Cpp = block_diagonal_matrix(identity_matrix(1), C)
Cpp[(1, 0)] = (1 - a[0])
return (((((~ U) * Winv) * Cpp) * Xinv) * (~ V)).change_ring(ZZ) |
def get_pssm_for_file(filename):
scop_id = filename.split('/')[(- 1)].split('.')[0]
pssm_for_scop_id = []
with open(filename, 'r') as f:
lines = f.read().split()
position_mutations = []
for (i, line) in enumerate(lines[2:]):
if (((i % 20) == 0) and (i != 0)):
pssm_for_scop_id.append(position_mutations)
position_mutations = []
mutation_score = int(line.split(':')[1])
position_mutations.append(mutation_score)
pssm_for_scop_id.append(position_mutations)
return (scop_id, pssm_for_scop_id) |
def StarGraph(n):
G = Graph({0: list(range(1, (n + 1)))}, name='Star graph', format='dict_of_lists')
G.set_pos({0: (0, 0)})
G._circle_embedding(list(range(1, (n + 1))), angle=(pi / 2))
return G |
def test_toms748_scan(tmp_path, hypotest_args):
(_, data, model) = hypotest_args
results = pyhf.infer.intervals.upper_limits.toms748_scan(data, model, 0, 5, rtol=1e-08)
assert (len(results) == 2)
(observed_limit, expected_limits) = results
observed_cls = pyhf.infer.hypotest(observed_limit, data, model, model.config.suggested_init(), model.config.suggested_bounds())
expected_cls = np.array([pyhf.infer.hypotest(expected_limits[i], data, model, model.config.suggested_init(), model.config.suggested_bounds(), return_expected_set=True)[1][i] for i in range(5)])
assert (observed_cls == pytest.approx(0.05))
assert (expected_cls == pytest.approx(0.05)) |
def to_f16(t):
return jax.tree_map((lambda x: (x.astype(jnp.float16) if (x.dtype == jnp.float32) else x)), t) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-train_src', required=True)
parser.add_argument('-train_tgt', required=True)
parser.add_argument('-valid_src', required=True)
parser.add_argument('-valid_tgt', required=True)
parser.add_argument('-save_data', required=True)
parser.add_argument('-max_len', '--max_word_seq_len', type=int, default=50)
parser.add_argument('-min_word_count', type=int, default=5)
parser.add_argument('-keep_case', action='store_true')
parser.add_argument('-share_vocab', action='store_true')
parser.add_argument('-vocab', default=None)
opt = parser.parse_args()
opt.max_token_seq_len = (opt.max_word_seq_len + 2)
train_src_word_insts = read_instances_from_file(opt.train_src, opt.max_word_seq_len, opt.keep_case)
train_tgt_word_insts = read_instances_from_file(opt.train_tgt, opt.max_word_seq_len, opt.keep_case)
if (len(train_src_word_insts) != len(train_tgt_word_insts)):
print('[Warning] The training instance count is not equal.')
min_inst_count = min(len(train_src_word_insts), len(train_tgt_word_insts))
train_src_word_insts = train_src_word_insts[:min_inst_count]
train_tgt_word_insts = train_tgt_word_insts[:min_inst_count]
(train_src_word_insts, train_tgt_word_insts) = list(zip(*[(s, t) for (s, t) in zip(train_src_word_insts, train_tgt_word_insts) if (s and t)]))
valid_src_word_insts = read_instances_from_file(opt.valid_src, opt.max_word_seq_len, opt.keep_case)
valid_tgt_word_insts = read_instances_from_file(opt.valid_tgt, opt.max_word_seq_len, opt.keep_case)
if (len(valid_src_word_insts) != len(valid_tgt_word_insts)):
print('[Warning] The validation instance count is not equal.')
min_inst_count = min(len(valid_src_word_insts), len(valid_tgt_word_insts))
valid_src_word_insts = valid_src_word_insts[:min_inst_count]
valid_tgt_word_insts = valid_tgt_word_insts[:min_inst_count]
(valid_src_word_insts, valid_tgt_word_insts) = list(zip(*[(s, t) for (s, t) in zip(valid_src_word_insts, valid_tgt_word_insts) if (s and t)]))
if opt.vocab:
predefined_data = torch.load(opt.vocab)
assert ('dict' in predefined_data)
print('[Info] Pre-defined vocabulary found.')
src_word2idx = predefined_data['dict']['src']
tgt_word2idx = predefined_data['dict']['tgt']
elif opt.share_vocab:
print('[Info] Build shared vocabulary for source and target.')
word2idx = build_vocab_idx((train_src_word_insts + train_tgt_word_insts), opt.min_word_count)
src_word2idx = tgt_word2idx = word2idx
else:
print('[Info] Build vocabulary for source.')
src_word2idx = build_vocab_idx(train_src_word_insts, opt.min_word_count)
print('[Info] Build vocabulary for target.')
tgt_word2idx = build_vocab_idx(train_tgt_word_insts, opt.min_word_count)
print('[Info] Convert source word instances into sequences of word index.')
train_src_insts = convert_instance_to_idx_seq(train_src_word_insts, src_word2idx)
valid_src_insts = convert_instance_to_idx_seq(valid_src_word_insts, src_word2idx)
print('[Info] Convert target word instances into sequences of word index.')
train_tgt_insts = convert_instance_to_idx_seq(train_tgt_word_insts, tgt_word2idx)
valid_tgt_insts = convert_instance_to_idx_seq(valid_tgt_word_insts, tgt_word2idx)
data = {'settings': opt, 'dict': {'src': src_word2idx, 'tgt': tgt_word2idx}, 'train': {'src': train_src_insts, 'tgt': train_tgt_insts}, 'valid': {'src': valid_src_insts, 'tgt': valid_tgt_insts}}
print('[Info] Dumping the processed data to pickle file', opt.save_data)
torch.save(data, opt.save_data)
print('[Info] Finish.') |
class MockGlorotInitializer(Initializer):
def __init__(self):
pass
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
return tf.constant(((np.random.rand(*shape) - 0.5) * 0.0001), dtype=dtype) |
class ExtIndexObject():
def __init__(self, idx_entry: ExtIndex, parameters: 'Parameters', slot: Optional[int]=None) -> None:
self.idx_entry = idx_entry
self._parameters = parameters
self.slot = slot
self.name: Optional[str] = None
(self.type, self.std_idx_entry) = self.convert_to_np_idx_entry(idx_entry)
def convert_to_np_slice_entry(self, slice_entry: ExtSliceEntry) -> NpSliceEntry:
if isinstance(slice_entry, (int, np.integer)):
return slice_entry
if (slice_entry is None):
return None
if isinstance(slice_entry, (float, complex)):
assert isinstance(self.name, str)
return idx_for_value(slice_entry, self._parameters.paramvals_by_name[self.name])
raise TypeError('Invalid slice entry: {}'.format(slice_entry))
def convert_to_np_idx_entry(self, idx_entry: ExtIndex) -> Tuple[(str, NpIndex)]:
if isinstance(idx_entry, (int, np.integer)):
return ('int', idx_entry)
if (idx_entry is Ellipsis):
return ('ellipsis', idx_entry)
if isinstance(idx_entry, (tuple, list)):
return ('tuple', idx_entry)
if isinstance(idx_entry, (float, complex)):
return ('val', idx_for_value(idx_entry, self._parameters[self.slot]))
if (isinstance(idx_entry, slice) and isinstance(idx_entry.start, str)):
self.name = idx_entry.start
start = self.convert_to_np_slice_entry(idx_entry.stop)
if isinstance(start, (complex, float)):
start = idx_for_value(start, self._parameters[self.slot])
stop = self.convert_to_np_slice_entry(idx_entry.step)
if isinstance(stop, (complex, float)):
stop = idx_for_value(stop, self._parameters[self.slot])
if (isinstance(start, (int, np.integer)) and (stop is None)):
return ('slice.name', start)
return ('slice.name', slice(start, stop, None))
if isinstance(idx_entry, slice):
start = self.convert_to_np_slice_entry(idx_entry.start)
stop = self.convert_to_np_slice_entry(idx_entry.stop)
if ((idx_entry.step is None) or isinstance(idx_entry.step, (int, np.integer))):
step = self.convert_to_np_slice_entry(idx_entry.step)
else:
raise TypeError('slice.step can only be int or None. Found {} instead.'.format(idx_entry.step))
return ('slice', slice(start, stop, step))
raise TypeError('Invalid index: {}'.format(idx_entry)) |
class InvalidRegularExpression(OperationSchemaError):
__module__ = 'builtins'
def from_hypothesis_jsonschema_message(cls, message: str) -> InvalidRegularExpression:
match = re.search("pattern='(.*?)'.*?\\((.*?)\\)", message)
if match:
message = f'Invalid regular expression. Pattern `{match.group(1)}` is not recognized - `{match.group(2)}`'
return cls(message) |
class FastRCNNTest(unittest.TestCase):
def test_fast_rcnn(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = FastRCNNOutputLayers(ShapeSpec(channels=box_head_output_size), box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), num_classes=5)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32)
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = Boxes(proposal_boxes)
proposal.gt_boxes = Boxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage():
losses = box_predictor.losses(predictions, [proposal])
expected_losses = {'loss_cls': torch.tensor(1.), 'loss_box_reg': torch.tensor(4.)}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
def test_fast_rcnn_empty_batch(self, device='cpu'):
box_predictor = FastRCNNOutputLayers(ShapeSpec(channels=10), box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), num_classes=8).to(device=device)
logits = torch.randn(0, 100, requires_grad=True, device=device)
deltas = torch.randn(0, 4, requires_grad=True, device=device)
losses = box_predictor.losses([logits, deltas], [])
for value in losses.values():
self.assertTrue(torch.allclose(value, torch.zeros_like(value)))
sum(losses.values()).backward()
self.assertTrue((logits.grad is not None))
self.assertTrue((deltas.grad is not None))
(predictions, _) = box_predictor.inference([logits, deltas], [])
self.assertEqual(len(predictions), 0)
((not torch.cuda.is_available()), 'CUDA not available')
def test_fast_rcnn_empty_batch_cuda(self):
self.test_fast_rcnn_empty_batch(device=torch.device('cuda'))
def test_fast_rcnn_rotated(self):
torch.manual_seed(132)
box_head_output_size = 8
box_predictor = RotatedFastRCNNOutputLayers(ShapeSpec(channels=box_head_output_size), box2box_transform=Box2BoxTransformRotated(weights=(10, 10, 5, 5, 1)), num_classes=5)
feature_pooled = torch.rand(2, box_head_output_size)
predictions = box_predictor(feature_pooled)
proposal_boxes = torch.tensor([[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32)
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
proposal = Instances((10, 10))
proposal.proposal_boxes = RotatedBoxes(proposal_boxes)
proposal.gt_boxes = RotatedBoxes(gt_boxes)
proposal.gt_classes = torch.tensor([1, 2])
with EventStorage():
losses = box_predictor.losses(predictions, [proposal])
expected_losses = {'loss_cls': torch.tensor(1.), 'loss_box_reg': torch.tensor(4.)}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name]) |
def test_visualize_graph_for_single_table():
data = pd.DataFrame({'\\|=/#$324%^,"&*()><...': ['a', 'b', 'c']})
metadata = SingleTableMetadata()
metadata.detect_from_dataframe(data)
model = GaussianCopulaSynthesizer(metadata)
metadata.visualize()
metadata.validate()
model.fit(data)
model.sample(10) |
class ScraperSpiderMiddleware(object):
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
return None
def process_spider_output(response, result, spider):
for i in result:
(yield i)
def process_spider_exception(response, exception, spider):
pass
def process_start_requests(start_requests, spider):
for r in start_requests:
(yield r)
def spider_opened(self, spider):
spider.logger.info(('Spider opened: %s' % spider.name)) |
def get_test_data(sample_size=1000, embedding_size=4, sparse_feature_num=1, dense_feature_num=1, sequence_feature=['sum', 'mean', 'max'], classification=True, include_length=False, hash_flag=False, prefix=''):
feature_columns = []
model_input = {}
if ('weight' in sequence_feature):
feature_columns.append(VarLenSparseFeat(SparseFeat((prefix + 'weighted_seq'), vocabulary_size=2, embedding_dim=embedding_size), maxlen=3, length_name=((prefix + 'weighted_seq') + '_seq_length'), weight_name=(prefix + 'weight')))
(s_input, s_len_input) = gen_sequence(2, 3, sample_size)
model_input[(prefix + 'weighted_seq')] = s_input
model_input[(prefix + 'weight')] = np.random.randn(sample_size, 3, 1)
model_input[((prefix + 'weighted_seq') + '_seq_length')] = s_len_input
sequence_feature.pop(sequence_feature.index('weight'))
for i in range(sparse_feature_num):
dim = np.random.randint(1, 10)
feature_columns.append(SparseFeat(((prefix + 'sparse_feature_') + str(i)), dim, embedding_size, dtype=torch.int32))
for i in range(dense_feature_num):
feature_columns.append(DenseFeat(((prefix + 'dense_feature_') + str(i)), 1, dtype=torch.float32))
for (i, mode) in enumerate(sequence_feature):
dim = np.random.randint(1, 10)
maxlen = np.random.randint(1, 10)
feature_columns.append(VarLenSparseFeat(SparseFeat(((prefix + 'sequence_') + mode), vocabulary_size=dim, embedding_dim=embedding_size), maxlen=maxlen, combiner=mode))
for fc in feature_columns:
if isinstance(fc, SparseFeat):
model_input[fc.name] = np.random.randint(0, fc.vocabulary_size, sample_size)
elif isinstance(fc, DenseFeat):
model_input[fc.name] = np.random.random(sample_size)
else:
(s_input, s_len_input) = gen_sequence(fc.vocabulary_size, fc.maxlen, sample_size)
model_input[fc.name] = s_input
if include_length:
fc.length_name = (((prefix + 'sequence_') + str(i)) + '_seq_length')
model_input[(((prefix + 'sequence_') + str(i)) + '_seq_length')] = s_len_input
if classification:
y = np.random.randint(0, 2, sample_size)
else:
y = np.random.random(sample_size)
return (model_input, y, feature_columns) |
class TFCvtOutput(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='dense')
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
hidden_state = (hidden_state + input_tensor)
return hidden_state |
def is_response_abstained(generation, fn_type):
if (fn_type == 'perplexity_ai'):
return perplexity_ai_abstain_detect(generation)
elif (fn_type == 'generic'):
return generic_abstain_detect(generation)
else:
return False |
def to_categorical(mask, num_classes, channel='channel_first'):
if ((channel != 'channel_first') and (channel != 'channel_last')):
assert False, "channel should be either 'channel_first' or 'channel_last'"
assert (num_classes > 1), 'num_classes should be greater than 1'
unique = np.unique(mask)
assert (len(unique) <= num_classes), 'number of unique values should be smaller or equal to the num_classes'
assert (np.max(unique) < num_classes), 'maximum value in the mask should be smaller than the num_classes'
if (mask.shape[1] == 1):
mask = np.squeeze(mask, axis=1)
if (mask.shape[(- 1)] == 1):
mask = np.squeeze(mask, axis=(- 1))
eye = np.eye(num_classes, dtype='uint8')
output = eye[mask]
if (channel == 'channel_first'):
output = np.moveaxis(output, (- 1), 1)
return output |
def tv_loss_on_voxel_hash(query, feature, G0=16, growth_factor=1.5, T0=(2 ** 15), L=16, D=2, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], boundary_check=False, ctx=None):
func = TVLossOnVoxelHash(ctx, G0, growth_factor, T0, L, D, min_, max_, boundary_check)
return func(query, feature) |
def extract_value(string, key):
escaped_key = re.escape(key)
pattern = '(?P<key>{})\\s*:\\s*(?P<value>[-+]?\\d*\\.\\d+([eE][-+]?\\d+)?)'.format(escaped_key)
match = re.search(pattern, string)
if match:
value = match.group('value')
return value
else:
return None |
.parametrize('func_module', PARAM_VALIDATION_FUNCTION_LIST)
def test_function_param_validation(func_module):
(module_name, func_name) = func_module.rsplit('.', 1)
module = import_module(module_name)
func = getattr(module, func_name)
func_sig = signature(func)
func_params = [p.name for p in func_sig.parameters.values() if (p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD))]
parameter_constraints = getattr(func, '_skl_parameter_constraints')
required_params = [p.name for p in func_sig.parameters.values() if ((p.default is p.empty) and (p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)))]
valid_required_params = {}
for param_name in required_params:
if (parameter_constraints[param_name] == 'no_validation'):
valid_required_params[param_name] = 1
else:
valid_required_params[param_name] = generate_valid_param(make_constraint(parameter_constraints[param_name][0]))
if func_params:
validation_params = parameter_constraints.keys()
unexpected_params = (set(validation_params) - set(func_params))
missing_params = (set(func_params) - set(validation_params))
err_msg = f'''Mismatch between _parameter_constraints and the parameters of {func_name}.
Consider the unexpected parameters {unexpected_params} and expected but missing parameters {missing_params}
'''
assert (set(validation_params) == set(func_params)), err_msg
param_with_bad_type = type('BadType', (), {})()
for param_name in func_params:
constraints = parameter_constraints[param_name]
if (constraints == 'no_validation'):
continue
match = f"The '{param_name}' parameter of {func_name} must be .* Got .* instead."
with pytest.raises(ValueError, match=match):
func(**{**valid_required_params, param_name: param_with_bad_type})
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
try:
bad_value = generate_invalid_param_val(constraint)
except NotImplementedError:
continue
with pytest.raises(ValueError, match=match):
func(**{**valid_required_params, param_name: bad_value}) |
def matched(s, c1, c2):
count = 0
for i in s:
if (i == c1):
count += 1
elif (i == c2):
count -= 1
if (count < 0):
return False
return (count == 0) |
class SwitchTransformersForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def max_pool(bottom, ks=2, stride=2):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride) |
.torch
def test_tensor_feature_setters(some_num_tensor_feature, some_cat_tensor_feature):
some_num_tensor_feature._set_feature_hint(FeatureHint.RATING)
some_num_tensor_feature._set_feature_sources([TensorFeatureSource(FeatureSource.INTERACTIONS, 'fake1')])
some_num_tensor_feature._set_tensor_dim(42)
some_cat_tensor_feature._set_cardinality(42)
some_cat_tensor_feature._set_embedding_dim(42)
assert (some_num_tensor_feature.feature_hint == FeatureHint.RATING)
assert (len(some_cat_tensor_feature.feature_sources) == 1)
assert (some_num_tensor_feature.tensor_dim == 42)
assert (some_cat_tensor_feature.cardinality == 42)
assert (some_cat_tensor_feature.embedding_dim == 42) |
def show_image_labels(image, label, img, lbl):
images = torch.cat([image, img], dim=2)
labels = torch.cat([label, lbl], dim=2)
image_labels = torch.cat([images, labels], dim=1)
image_labels = Image.fromarray(image_labels.permute(1, 2, 0).numpy())
return image_labels |
class FunctionalExpression():
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=' '):
print(('%s%s' % (indent, self._dump())))
for part in self.parts:
part.dump((indent + ' '))
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError('Cannot instantiate condition: not normalized') |
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers='', prereleases=None):
specifiers = [s.strip() for s in specifiers.split(',') if s.strip()]
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
self._specs = frozenset(parsed)
self._prereleases = prereleases
def __repr__(self):
pre = (', prereleases={0!r}'.format(self.prereleases) if (self._prereleases is not None) else '')
return '<SpecifierSet({0!r}{1})>'.format(str(self), pre)
def __str__(self):
return ','.join(sorted((str(s) for s in self._specs)))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif (not isinstance(other, SpecifierSet)):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset((self._specs | other._specs))
if ((self._prereleases is None) and (other._prereleases is not None)):
specifier._prereleases = other._prereleases
elif ((self._prereleases is not None) and (other._prereleases is None)):
specifier._prereleases = self._prereleases
elif (self._prereleases == other._prereleases):
specifier._prereleases = self._prereleases
else:
raise ValueError('Cannot combine SpecifierSets with True and False prerelease overrides.')
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif (not isinstance(other, SpecifierSet)):
return NotImplemented
return (self._specs == other._specs)
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif (not isinstance(other, SpecifierSet)):
return NotImplemented
return (self._specs != other._specs)
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
def prereleases(self):
if (self._prereleases is not None):
return self._prereleases
if (not self._specs):
return None
return any((s.prereleases for s in self._specs))
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
if (not isinstance(item, (LegacyVersion, Version))):
item = parse(item)
if (prereleases is None):
prereleases = self.prereleases
if ((not prereleases) and item.is_prerelease):
return False
return all((s.contains(item, prereleases=prereleases) for s in self._specs))
def filter(self, iterable, prereleases=None):
if (prereleases is None):
prereleases = self.prereleases
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
else:
filtered = []
found_prereleases = []
for item in iterable:
if (not isinstance(item, (LegacyVersion, Version))):
parsed_version = parse(item)
else:
parsed_version = item
if isinstance(parsed_version, LegacyVersion):
continue
if (parsed_version.is_prerelease and (not prereleases)):
if (not filtered):
found_prereleases.append(item)
else:
filtered.append(item)
if ((not filtered) and found_prereleases and (prereleases is None)):
return found_prereleases
return filtered |
def register_Ns3MgtAssocResponseHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::MgtAssocResponseHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetCapabilities', 'ns3::CapabilityInformation', [], is_const=True)
cls.add_method('GetEdcaParameterSet', 'ns3::EdcaParameterSet', [], is_const=True)
cls.add_method('GetErpInformation', 'ns3::ErpInformation', [], is_const=True)
cls.add_method('GetExtendedCapabilities', 'ns3::ExtendedCapabilities', [], is_const=True)
cls.add_method('GetHeCapabilities', 'ns3::HeCapabilities', [], is_const=True)
cls.add_method('GetHeOperation', 'ns3::HeOperation', [], is_const=True)
cls.add_method('GetHtCapabilities', 'ns3::HtCapabilities', [], is_const=True)
cls.add_method('GetHtOperation', 'ns3::HtOperation', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetStatusCode', 'ns3::StatusCode', [])
cls.add_method('GetSupportedRates', 'ns3::SupportedRates', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetVhtCapabilities', 'ns3::VhtCapabilities', [], is_const=True)
cls.add_method('GetVhtOperation', 'ns3::VhtOperation', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetAssociationId', 'void', [param('uint16_t', 'aid')])
cls.add_method('SetCapabilities', 'void', [param('ns3::CapabilityInformation', 'capabilities')])
cls.add_method('SetEdcaParameterSet', 'void', [param('ns3::EdcaParameterSet', 'edcaParameterSet')])
cls.add_method('SetErpInformation', 'void', [param('ns3::ErpInformation', 'erpInformation')])
cls.add_method('SetExtendedCapabilities', 'void', [param('ns3::ExtendedCapabilities', 'extendedcapabilities')])
cls.add_method('SetHeCapabilities', 'void', [param('ns3::HeCapabilities', 'hecapabilities')])
cls.add_method('SetHeOperation', 'void', [param('ns3::HeOperation', 'heoperation')])
cls.add_method('SetHtCapabilities', 'void', [param('ns3::HtCapabilities', 'htcapabilities')])
cls.add_method('SetHtOperation', 'void', [param('ns3::HtOperation', 'htoperation')])
cls.add_method('SetStatusCode', 'void', [param('ns3::StatusCode', 'code')])
cls.add_method('SetSupportedRates', 'void', [param('ns3::SupportedRates', 'rates')])
cls.add_method('SetVhtCapabilities', 'void', [param('ns3::VhtCapabilities', 'vhtcapabilities')])
cls.add_method('SetVhtOperation', 'void', [param('ns3::VhtOperation', 'vhtoperation')])
return |
def test_two_connectivity_holes():
expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)
observed = remove_small_holes(test_holes_image, area_threshold=3, connectivity=2)
assert_array_equal(observed, expected) |
def build_tfrecord_input(training=True):
filenames = gfile.Glob(os.path.join(FLAGS.data_dir, '*'))
if (not filenames):
raise RuntimeError('No data files found.')
index = int(np.floor((FLAGS.train_val_split * len(filenames))))
if training:
filenames = filenames[:index]
else:
filenames = filenames[index:]
filename_queue = tf.train.string_input_producer(filenames, shuffle=True)
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(filename_queue)
(image_seq, state_seq, action_seq) = ([], [], [])
for i in range(FLAGS.sequence_length):
image_name = (('move/' + str(i)) + '/image/encoded')
action_name = (('move/' + str(i)) + '/commanded_pose/vec_pitch_yaw')
state_name = (('move/' + str(i)) + '/endeffector/vec_pitch_yaw')
if FLAGS.use_state:
features = {image_name: tf.FixedLenFeature([1], tf.string), action_name: tf.FixedLenFeature([STATE_DIM], tf.float32), state_name: tf.FixedLenFeature([STATE_DIM], tf.float32)}
else:
features = {image_name: tf.FixedLenFeature([1], tf.string)}
features = tf.parse_single_example(serialized_example, features=features)
image_buffer = tf.reshape(features[image_name], shape=[])
image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)
image.set_shape([ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])
if (IMG_HEIGHT != IMG_WIDTH):
raise ValueError('Unequal height and width unsupported')
crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)
image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)
image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])
image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])
image = (tf.cast(image, tf.float32) / 255.0)
image_seq.append(image)
if FLAGS.use_state:
state = tf.reshape(features[state_name], shape=[1, STATE_DIM])
state_seq.append(state)
action = tf.reshape(features[action_name], shape=[1, STATE_DIM])
action_seq.append(action)
image_seq = tf.concat(0, image_seq)
if FLAGS.use_state:
state_seq = tf.concat(0, state_seq)
action_seq = tf.concat(0, action_seq)
[image_batch, action_batch, state_batch] = tf.train.batch([image_seq, action_seq, state_seq], FLAGS.batch_size, num_threads=FLAGS.batch_size, capacity=(100 * FLAGS.batch_size))
return (image_batch, action_batch, state_batch)
else:
image_batch = tf.train.batch([image_seq], FLAGS.batch_size, num_threads=FLAGS.batch_size, capacity=(100 * FLAGS.batch_size))
zeros_batch = tf.zeros([FLAGS.batch_size, FLAGS.sequence_length, STATE_DIM])
return (image_batch, zeros_batch, zeros_batch) |
class ImageFolderCustomClass(data.Dataset):
def __init__(self, root, transform=None, target_transform=None, loader=default_loader, custom_class_to_idx=None):
if (custom_class_to_idx is None):
(classes, class_to_idx) = find_classes(root)
else:
class_to_idx = custom_class_to_idx
classes = list(class_to_idx.keys())
imgs = make_dataset(root, class_to_idx)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
(path, target) = self.imgs[index]
img = self.loader(path)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
class ConditionGen(nn.Module):
def __init__(self, z_dim, nlabels, embed_size=256):
super().__init__()
self.embedding = nn.Embedding(nlabels, embed_size)
self.latent_dim = (z_dim + embed_size)
self.z_dim = z_dim
self.nlabels = nlabels
self.embed_size = embed_size
def forward(self, z, y):
assert (z.size(0) == y.size(0))
batch_size = z.size(0)
if (y.dtype is torch.int64):
yembed = self.embedding(y)
else:
yembed = y
yembed = (yembed / torch.norm(yembed, p=2, dim=1, keepdim=True))
return torch.cat([z, yembed], dim=1) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_mc_tva(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([tva.compact(val)] + result)
return result |
class LxmertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class Block(nn.Module):
def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, activation=F.relu, upsample=False, num_classes=0):
super(Block, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = ((in_ch != out_ch) or upsample)
if (h_ch is None):
h_ch = out_ch
self.num_classes = num_classes
self.c1 = nn.Conv2d(in_ch, h_ch, ksize, 1, pad)
self.c2 = nn.Conv2d(h_ch, out_ch, ksize, 1, pad)
if (self.num_classes > 0):
self.b1 = CategoricalConditionalBatchNorm2d(num_classes, in_ch)
self.b2 = CategoricalConditionalBatchNorm2d(num_classes, h_ch)
else:
self.b1 = nn.BatchNorm2d(in_ch)
self.b2 = nn.BatchNorm2d(h_ch)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_ch, out_ch, 1)
def _initialize(self):
init.xavier_uniform_(self.c1.weight.tensor, gain=math.sqrt(2))
init.xavier_uniform_(self.c2.weight.tensor, gain=math.sqrt(2))
if self.learnable_sc:
init.xavier_uniform_(self.c_sc.weight.tensor, gain=1)
def forward(self, x, y=None, z=None, **kwargs):
return (self.shortcut(x) + self.residual(x, y, z))
def shortcut(self, x, **kwargs):
if self.learnable_sc:
if self.upsample:
h = _upsample(x)
h = self.c_sc(h)
return h
else:
return x
def residual(self, x, y=None, z=None, **kwargs):
if (y is not None):
h = self.b1(x, y, **kwargs)
else:
h = self.b1(x)
h = self.activation(h)
if self.upsample:
h = _upsample(h)
h = self.c1(h)
if (y is not None):
h = self.b2(h, y, **kwargs)
else:
h = self.b2(h)
return self.c2(self.activation(h)) |
def _get_keys_from_observation_space(observation_space: GymnasiumDictSpace) -> Sequence[str]:
return sorted(list(observation_space.keys())) |
def _calc_df_coefficient_C_l1_l2_Taylor_coeff(l1, l2, p1, p2, derivs_list):
tot = 0
l1fact_inv = (1 / factorial(l1))
l2fact_inv = (1 / factorial(l2))
prefactor = (l1fact_inv * l2fact_inv)
for m1 in range((l1 + 1)):
for r1 in range(((l1 - m1) + 1)):
for m2 in range((l2 + 1)):
for r2 in range(((l2 - m2) + 1)):
val = ((prefactor * derivs_list[(r1 + r2)]) * _Psi_coeff(l1, l2, p1, p2, m1, m2, r1, r2))
tot = (tot + val)
return tot |
class Ok(Generic[T]):
__slots__ = ('_value',)
def __init__(self, value: T):
self._value = value
def ok(self) -> T:
return self._value |
def get_lisp_from_graph_query(graph_query):
G = nx.MultiDiGraph()
aggregation = 'none'
arg_node = None
for node in graph_query['nodes']:
G.add_node(node['nid'], id=node['id'], type=node['node_type'], question=node['question_node'], function=node['function'], cla=node['class'])
if (node['question_node'] == 1):
qid = node['nid']
if (node['function'] != 'none'):
aggregation = node['function']
if node['function'].__contains__('arg'):
arg_node = node['nid']
for edge in graph_query['edges']:
G.add_edge(edge['start'], edge['end'], relation=edge['relation'], reverse=False, visited=False)
G.add_edge(edge['end'], edge['start'], relation=edge['relation'], reverse=True, visited=False)
if ('count' == aggregation):
return count_function(G, qid)
else:
return none_function(G, qid, arg_node=arg_node) |
def get_new_exemplars(dict_of_features, normalised_features_dict, dict_of_means, exemp_size_dict):
overlapping_exemplars_indices = get_overlap_region_exemplars(normalised_features_dict)
overlapping_exemplars = {label: np.array(features)[overlapping_exemplars_indices[label][:exemp_size_dict[label]]] for (label, features) in dict_of_features.items()}
filtered_features = {label: np.delete(features, overlapping_exemplars_indices[label][:exemp_size_dict[label]], axis=0) for (label, features) in dict_of_features.items()}
normalised_features_dict = {label: np.delete(features, overlapping_exemplars_indices[label][:exemp_size_dict[label]], axis=0) for (label, features) in normalised_features_dict.items()}
edge_exemplar_indices = get_edge_region_exemplars(normalised_features_dict)
edge_exemplars = {label: np.array(features)[edge_exemplar_indices[label]] for (label, features) in filtered_features.items()}
reqd_exemplars_per_class = {key: (exemp_size_dict[key] - len(features)) for (key, features) in overlapping_exemplars.items()}
total_exemplars = {label: np.vstack((overlapping_exemplars[label], edge_exemplars[label][:reqd_exemplars_per_class[label]])) for label in dict_of_features.keys()}
filtered_features = {label: np.delete(features, edge_exemplar_indices[label][:exemp_size_dict[label]], axis=0) for (label, features) in filtered_features.items()}
normalised_features_dict = {label: np.delete(features, edge_exemplar_indices[label][:exemp_size_dict[label]], axis=0) for (label, features) in normalised_features_dict.items()}
reqd_exemplars_per_class = {key: (exemp_size_dict[key] - len(features)) for (key, features) in total_exemplars.items()}
interior_exemplar_indices = get_interior_region_exemplars(normalised_features_dict, dict_of_means, reqd_exemplars_per_class)
interior_exemplars = {label: np.array(features)[interior_exemplar_indices[label]] for (label, features) in filtered_features.items() if (reqd_exemplars_per_class[label] > 0)}
total_exemplars = {label: (np.vstack((interior_exemplars[label], total_exemplars[label])) if (reqd_exemplars_per_class[label] > 0) else total_exemplars[label]) for label in dict_of_features.keys()}
return total_exemplars |
def test_check_clustering_error():
rng = np.random.RandomState(42)
noise = rng.rand(500)
wavelength = (np.linspace(0.01, 1, 500) * 1e-06)
msg = 'Clustering metrics expects discrete values but received continuous values for label, and continuous values for target'
with pytest.warns(UserWarning, match=msg):
check_clusterings(wavelength, noise) |
class ResNet101(nn.Module):
def __init__(self, block, layers, num_classes, BatchNorm, bn_clr=False):
self.inplanes = 64
self.bn_clr = bn_clr
super(ResNet101, self).__init__()
self.ce_loss = nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm)
self.layer5 = self._make_pred_layer(Classifier_Module2, 2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
if self.bn_clr:
self.bn_pretrain = BatchNorm(2048, affine=affine_par)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion)) or (dilation == 2) or (dilation == 4)):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion), affine=affine_par))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_pred_layer(self, block, inplanes, dilation_series, padding_series, num_classes):
return block(inplanes, dilation_series, padding_series, num_classes)
def forward(self, x, lbl=None):
(_, _, h, w) = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.bn_clr:
x = self.bn_pretrain(x)
out = self.layer5(x, get_feat=False)
out = F.interpolate(out, size=(h, w), mode='bilinear', align_corners=True)
if (lbl is not None):
loss = self.CrossEntropy2d(out, lbl)
return (out, loss)
return out
def get_1x_lr_params(self):
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
(yield k)
def get_10x_lr_params(self):
b = []
if self.bn_clr:
b.append(self.bn_pretrain.parameters())
b.append(self.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
(yield i)
def optim_parameters(self, args):
return [{'params': self.get_1x_lr_params(), 'lr': args.lr_semseg}, {'params': self.get_10x_lr_params(), 'lr': (10 * args.lr_semseg)}]
def adjust_learning_rate(self, args, optimizer, i):
lr = (args.lr_semseg * ((1 - (float(i) / args.num_steps)) ** args.power))
optimizer.param_groups[0]['lr'] = lr
if (len(optimizer.param_groups) > 1):
optimizer.param_groups[1]['lr'] = (lr * 10)
def CrossEntropy2d(self, predict, target):
assert (not target.requires_grad)
assert (predict.dim() == 4)
assert (target.dim() == 3)
assert (predict.size(0) == target.size(0)), '{0} vs {1} '.format(predict.size(0), target.size(0))
assert (predict.size(2) == target.size(1)), '{0} vs {1} '.format(predict.size(2), target.size(1))
assert (predict.size(3) == target.size(2)), '{0} vs {1} '.format(predict.size(3), target.size(3))
loss_fn = nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL)
loss = loss_fn(predict, target.type(torch.long))
return loss |
def train(train_loader, models, CE, optimizers, epoch, logger, logging):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
for m in models:
m.train()
end = time.time()
for (i, (inputs, target)) in enumerate(train_loader):
global_step = ((epoch * len(train_loader)) + i)
target = target.cuda()
inputs = inputs.cuda()
(b, c, h, w) = inputs.size()
depth = torch.mean(inputs, dim=1).view(b, 1, h, w).repeat(1, c, 1, 1)
(h1, h2, h3, h4, h5) = models[0](inputs, depth, gumbel=True)
(d0, d1, d2, d3, d4) = models[1](depth)
output = models[2](h1, h2, h3, h4, h5, d0, d1, d2, d3, d4)
loss = (CE(output, target) * 1.0)
(prec1, prec5) = accuracy(output.data, target, topk=(1, 5))
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
losses.update(to_python_float(reduced_loss), inputs.size(0))
top1.update(to_python_float(prec1), inputs.size(0))
top5.update(to_python_float(prec5), inputs.size(0))
for op in optimizers:
op.zero_grad()
loss.backward()
for op in optimizers:
op.step()
batch_time.update((time.time() - end))
end = time.time()
if (((i % 50) == 0) and (args.rank == 0)):
logging.info('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} \ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
logger.add_scalar('train/losses', losses.avg, global_step=global_step)
logger.add_scalar('train/top1', top1.avg, global_step=global_step)
logger.add_scalar('train/top5', top5.avg, global_step=global_step)
logger.add_scalar('train/lr', optimizers[0].param_groups[0]['lr'], global_step=global_step) |
def register_Ns3RrComponentCarrierManager_methods(root_module, cls):
cls.add_constructor([param('ns3::RrComponentCarrierManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoReportBufferStatus', 'void', [param('ns3::LteMacSapProvider::ReportBufferStatusParameters', 'params')], visibility='protected', is_virtual=True)
cls.add_method('DoUlReceiveMacCe', 'void', [param('ns3::MacCeListElement_s', 'bsr'), param('uint8_t', 'componentCarrierId')], visibility='protected', is_virtual=True)
cls.add_method('DoUlReceiveSr', 'void', [param('uint16_t', 'rnti'), param('uint8_t', 'componentCarrierId')], visibility='protected', is_virtual=True)
return |
def auread(path, channel_first=False, raw_format_param=None, **kwargs):
from nnabla.utils.data_source_loader import ResourceFileReader
source = ResourceFileReader(path)
return backend_manager.module.auread(source, channel_first=channel_first, raw_format_param=None, **kwargs) |
def test_broadcast_float_int_union():
this = ak.contents.NumpyArray(np.arange(4), parameters={'name': 'this'})
that_1 = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([0, 1, 0, 1], dtype='int8')), ak.contents.NumpyArray(np.arange(4)), valid_when=True, parameters={'name': 'that'})
that_2 = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([0, 1, 0, 1], dtype='int8')), ak.from_iter(['1', '2', '3', '4'], highlevel=False), valid_when=True, parameters={'name': 'other'})
that = ak.contents.UnionArray(ak.index.Index8(np.array([0, 1, 0, 1], dtype='int8')), ak.index.Index32(np.array([0, 0, 1, 1], dtype='int32')), [that_1, that_2])
(this_next, that_next) = ak.operations.ak_broadcast_arrays.broadcast_arrays(this, that, highlevel=False)
assert (this.parameters == this_next.parameters)
assert (that.parameters == that_next.parameters) |
class Cartpole(_Cartpole):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.episode_id = 0
self.episode_return = 0
self.bsuite_id = 'cartpole/0'
def reset(self) -> dm_env.TimeStep:
self.episode_id += 1
self.episode_return = 0
return super().reset()
def step(self, action: int) -> dm_env.TimeStep:
timestep = super().step(action)
if (timestep.reward is not None):
self.episode_return += timestep.reward
return timestep |
def score_nights_dataset(model, test_loader, device):
logging.info('Evaluating NIGHTS dataset.')
d0s = []
d1s = []
targets = []
with torch.no_grad():
for (i, (img_ref, img_left, img_right, target, idx)) in tqdm(enumerate(test_loader), total=len(test_loader)):
(img_ref, img_left, img_right, target) = (img_ref.to(device), img_left.to(device), img_right.to(device), target.to(device))
dist_0 = model(img_ref, img_left)
dist_1 = model(img_ref, img_right)
if (len(dist_0.shape) < 1):
dist_0 = dist_0.unsqueeze(0)
dist_1 = dist_1.unsqueeze(0)
dist_0 = dist_0.unsqueeze(1)
dist_1 = dist_1.unsqueeze(1)
target = target.unsqueeze(1)
d0s.append(dist_0)
d1s.append(dist_1)
targets.append(target)
d0s = torch.cat(d0s, dim=0)
d1s = torch.cat(d1s, dim=0)
targets = torch.cat(targets, dim=0)
scores = ((((d0s < d1s) * (1.0 - targets)) + ((d1s < d0s) * targets)) + ((d1s == d0s) * 0.5))
twoafc_score = torch.mean(scores, dim=0)
logging.info(f'2AFC score: {str(twoafc_score)}')
return twoafc_score |
()
def test_write_file(file_system_agents: List[Agent], patched_api_requestor: None, monkeypatch: pytest.MonkeyPatch, level_to_run: int, challenge_name: str) -> None:
file_system_agent = file_system_agents[(level_to_run - 1)]
run_interaction_loop(monkeypatch, file_system_agent, CYCLE_COUNT_PER_LEVEL[(level_to_run - 1)], challenge_name, level_to_run)
expected_outputs = EXPECTED_OUTPUTS_PER_LEVEL[(level_to_run - 1)]
for (file_name, expected_lines) in expected_outputs.items():
file_path = get_workspace_path(file_system_agent, file_name)
content = read_file(file_path, file_system_agent)
for expected_line in expected_lines:
assert (expected_line in content), f"Expected '{expected_line}' in file {file_name}, but it was not found" |
_pytesseract
_tokenizers
class LayoutLMv2ProcessorTest(unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
def setUp(self):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
image_processor_map = {'do_resize': True, 'size': 224, 'apply_ocr': True}
self.tmpdirname = tempfile.mkdtemp()
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
self.image_processing_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.image_processing_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(image_processor_map) + '\n'))
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_image_processor(self, **kwargs):
return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
image_processor = self.get_image_processor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = LayoutLMv2Processor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname, use_fast=False, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = LayoutLMv2Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
def test_overflowing_tokens(self):
from datasets import load_dataset
datasets = load_dataset('nielsr/funsd')
processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased', revision='no_ocr')
def preprocess_data(examples):
images = [Image.open(path).convert('RGB') for path in examples['image_path']]
words = examples['words']
boxes = examples['bboxes']
word_labels = examples['ner_tags']
encoded_inputs = processor(images, words, boxes=boxes, word_labels=word_labels, padding='max_length', truncation=True, return_overflowing_tokens=True, stride=50, return_offsets_mapping=True, return_tensors='pt')
return encoded_inputs
train_data = preprocess_data(datasets['train'])
self.assertEqual(len(train_data['image']), len(train_data['input_ids'])) |
def _seg_26():
return [(11472, 'M', u''), (11473, 'V'), (11474, 'M', u''), (11475, 'V'), (11476, 'M', u''), (11477, 'V'), (11478, 'M', u''), (11479, 'V'), (11480, 'M', u''), (11481, 'V'), (11482, 'M', u''), (11483, 'V'), (11484, 'M', u''), (11485, 'V'), (11486, 'M', u''), (11487, 'V'), (11488, 'M', u''), (11489, 'V'), (11490, 'M', u''), (11491, 'V'), (11499, 'M', u''), (11500, 'V'), (11501, 'M', u''), (11502, 'V'), (11506, 'M', u''), (11507, 'V'), (11508, 'X'), (11513, 'V'), (11558, 'X'), (11559, 'V'), (11560, 'X'), (11565, 'V'), (11566, 'X'), (11568, 'V'), (11624, 'X'), (11631, 'M', u''), (11632, 'V'), (11633, 'X'), (11647, 'V'), (11671, 'X'), (11680, 'V'), (11687, 'X'), (11688, 'V'), (11695, 'X'), (11696, 'V'), (11703, 'X'), (11704, 'V'), (11711, 'X'), (11712, 'V'), (11719, 'X'), (11720, 'V'), (11727, 'X'), (11728, 'V'), (11735, 'X'), (11736, 'V'), (11743, 'X'), (11744, 'V'), (11850, 'X'), (11904, 'V'), (11930, 'X'), (11931, 'V'), (11935, 'M', u''), (11936, 'V'), (12019, 'M', u''), (12020, 'X'), (12032, 'M', u''), (12033, 'M', u''), (12034, 'M', u''), (12035, 'M', u''), (12036, 'M', u''), (12037, 'M', u''), (12038, 'M', u''), (12039, 'M', u''), (12040, 'M', u''), (12041, 'M', u''), (12042, 'M', u''), (12043, 'M', u''), (12044, 'M', u''), (12045, 'M', u''), (12046, 'M', u''), (12047, 'M', u''), (12048, 'M', u''), (12049, 'M', u''), (12050, 'M', u''), (12051, 'M', u''), (12052, 'M', u''), (12053, 'M', u''), (12054, 'M', u''), (12055, 'M', u''), (12056, 'M', u''), (12057, 'M', u''), (12058, 'M', u''), (12059, 'M', u''), (12060, 'M', u''), (12061, 'M', u''), (12062, 'M', u''), (12063, 'M', u''), (12064, 'M', u''), (12065, 'M', u''), (12066, 'M', u'')] |
class Length(object):
def __init__(self, min=(- 1), max=(- 1), message=None):
assert ((min != (- 1)) or (max != (- 1))), 'At least one of `min` or `max` must be specified.'
assert ((max == (- 1)) or (min <= max)), '`min` cannot be more than `max`.'
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
l = ((field.data and len(field.data)) or 0)
if ((l < self.min) or ((self.max != (- 1)) and (l > self.max))):
message = self.message
if (message is None):
if (self.max == (- 1)):
message = field.ngettext('Field must be at least %(min)d character long.', 'Field must be at least %(min)d characters long.', self.min)
elif (self.min == (- 1)):
message = field.ngettext('Field cannot be longer than %(max)d character.', 'Field cannot be longer than %(max)d characters.', self.max)
elif (self.min == self.max):
message = field.ngettext('Field must be exactly %(max)d character long.', 'Field must be exactly %(max)d characters long.', self.max)
else:
message = field.gettext('Field must be between %(min)d and %(max)d characters long.')
raise ValidationError((message % dict(min=self.min, max=self.max, length=l))) |
.parametrize('decorators, expected', [pytest.param('property', False), pytest.param(['property', 'contextmanager'], True)])
def test__has_decorator(comments_tree, decorators, expected):
public_function = get_function_node_from_ast(comments_tree, 'public_function')
assert (has_decorator(astroid_to_ast(public_function), decorators) == expected) |
def build_rnnt(num_classes: int, input_dim: int, num_encoder_layers: int=4, num_decoder_layers: int=1, encoder_hidden_state_dim: int=320, decoder_hidden_state_dim: int=512, output_dim: int=512, rnn_type: str='lstm', bidirectional: bool=True, encoder_dropout_p: float=0.2, decoder_dropout_p: float=0.2, sos_id: int=1, eos_id: int=2) -> nn.DataParallel:
return nn.DataParallel(RNNTransducer(num_classes=num_classes, input_dim=input_dim, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, encoder_hidden_state_dim=encoder_hidden_state_dim, decoder_hidden_state_dim=decoder_hidden_state_dim, output_dim=output_dim, rnn_type=rnn_type, bidirectional=bidirectional, encoder_dropout_p=encoder_dropout_p, decoder_dropout_p=decoder_dropout_p, sos_id=sos_id, eos_id=eos_id)) |
def Get_dataloader(path, batch):
transforms_ = [transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
train_dataloader = DataLoader(ImageDataset(path, transforms_=transforms_), batch_size=batch, shuffle=True, num_workers=2, drop_last=True)
return train_dataloader |
def parse_inputs(args):
try:
filename = args[1]
lamb = float(args[2])
alpha = float(args[3])
gamma = float(args[4])
max_norm = float(args[5])
max_steps = int(args[6])
(X, Y) = ([], [])
with open(filename) as f:
for line in f:
nums = [float(x) for x in line.split()]
X.append(nums[:(- 1)])
Y.append(nums[(- 1)])
X = np.array(X)
Y = np.array(Y)
except:
print(usage_str)
exit(0)
return (X, Y, lamb, alpha, gamma, max_norm, max_steps) |
class BModel():
def __init__(self, bmodel_file):
with bmodel_context(self):
self.head = None
binary_desc = None
binary = None
self.file_name = bmodel_file
with open(bmodel_file, 'rb') as file_obj:
file_obj.seek(0, 0)
self.head = np.frombuffer(file_obj.read(bmodel_header_type.itemsize), dtype=bmodel_header_type)
binary_desc = file_obj.read(self.head['flatbuffers_size'][0])
binary = memoryview(bytearray(file_obj.read(self.head['binary_size'][0])))
bmodel: bmodel_fbs.Model = bmodel_fbs.Model.GetRootAsModel(binary_desc, 0)
self.binary = binary
self.chip = bmodel.Chip().decode()
self.version = bmodel.Version().decode()
self.type = bmodel.Type().decode()
self.kernel_module = KernelModule(bmodel.KernelModule(), binary)
self.neuron_size = bmodel.NeuronSize()
self.time = bmodel.Time().decode()
self.net: List[Net] = FBSArray(bmodel, ('Net', Net), binary)
self.device_num = bmodel.DeviceNum()
self.core_num = self.net[0].parameter[0].core_num
self.context = get_target_context(self.chip)
def __repr__(self):
return pformat(self.__dict__)
def serialize(self, file_name):
import flatbuffers
builder = flatbuffers.Builder(1024)
payload = []
def save_binary(data):
start = len(payload)
size = len(data)
payload.extend(data)
return (start, size)
module = bmodel_fbs.Model
chip = builder.CreateString(self.chip)
version = builder.CreateString(self.version)
type_ = builder.CreateString(self.type)
time = builder.CreateString(self.time)
net = self.net.serialize(builder, save_binary)
kernel_module = self.kernel_module.serialize(builder, save_binary)
module.Start(builder)
module.AddType(builder, type_)
module.AddVersion(builder, version)
module.AddTime(builder, time)
module.AddChip(builder, chip)
module.AddNet(builder, net)
module.AddNeuronSize(builder, self.neuron_size)
if kernel_module:
module.AddKernelModule(builder, kernel_module)
module.AddDeviceNum(builder, self.device_num)
model = bmodel_fbs.Model.End(builder)
builder.Finish(model)
buffer = builder.Output()
magic = self.head['magic']
header_size = self.head['header_size']
reserved = self.head['reserved']
header = np.array((magic, header_size, len(buffer), len(payload), reserved), dtype=bmodel_header_type)
with open(file_name, 'w') as f:
header.tofile(f)
np.array(buffer).tofile(f)
np.array(payload, np.uint8).tofile(f)
def decode_cpu_op(self, cpu_param: CpuParam):
return cpu_param
def decode_dynamic_ir(self, ir_buffer):
return [ir_buffer] |
def l2_loss(pred, target, reduction='mean'):
assert ((pred.size() == target.size()) and (target.numel() > 0))
assert (pred.size()[0] == target.size()[0])
batch_size = pred.size()[0]
loss = torch.norm((pred - target).view(batch_size, (- 1)), p=2, dim=1, keepdim=True)
if (reduction == 'mean'):
loss = loss.mean()
elif (reduction == 'sum'):
loss = loss.sum()
return loss |
class MaximumAngleCalculator(MeshQualityCalculator):
def compute(self, mesh: fenics.Mesh) -> np.ndarray:
comm = mesh.mpi_comm()
ghost_offset = mesh.topology().ghost_offset(mesh.topology().dim())
maximum_angle_array = self._quality_object.maximum_angle(mesh).array()[:ghost_offset]
maximum_angle_list: np.ndarray = comm.gather(maximum_angle_array, root=0)
if (comm.rank == 0):
maximum_angle_list = np.concatenate(maximum_angle_list, axis=None)
else:
maximum_angle_list = np.zeros(1)
return maximum_angle_list |
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
super().__init__()
s = fp.readline()
if (s[:13] != b'STARTFONT 2.1'):
raise SyntaxError('not a valid BDF file')
props = {}
comments = []
while True:
s = fp.readline()
if ((not s) or (s[:13] == b'ENDPROPERTIES')):
break
i = s.find(b' ')
props[s[:i].decode('ascii')] = s[(i + 1):(- 1)].decode('ascii')
if (s[:i] in [b'COMMENT', b'COPYRIGHT']):
if (s.find(b'LogicalFontDescription') < 0):
comments.append(s[(i + 1):(- 1)].decode('ascii'))
while True:
c = bdf_char(fp)
if (not c):
break
(id, ch, (xy, dst, src), im) = c
if (0 <= ch < len(self.glyph)):
self.glyph[ch] = (xy, dst, src, im) |
def write_temp_2tag(filename, bio_data):
doc = []
sentences = bio_data.split('\n\n')
for sentence in sentences:
doc.append([])
for word in sentence.split('\n'):
(text, tags) = word.split('\t', maxsplit=1)
doc[(- 1)].append({'text': text, 'multi_ner': tags.split()})
with open(filename, 'w', encoding='utf-8') as fout:
json.dump(doc, fout) |
_builder('textcaps_caption')
class TextCapsCapBuilder(BaseDatasetBuilder):
train_dataset_cls = TextCapsCapDataset
eval_dataset_cls = TextCapsCapEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/textcaps/defaults.yaml'} |
class PairBasicEvaluator(BasicEvaluator):
def evaluate(self, predict, ground_truth):
predict = [x for x in predict if (x != STEP_IDX)]
ground_truth = [x for x in ground_truth if (x != STEP_IDX)]
return super().evaluate(predict, ground_truth) |
def hashing_trick(word, n, hash_function=None):
if (hash_function is None):
hash_function = hash
elif (hash_function == 'md5'):
hash_function = (lambda w: int(md5(w.encode()).hexdigest(), 16))
return ((hash_function(word) % (n - 1)) + 1) |
def move_billing_codes(patient: RawPatient) -> RawPatient:
visit_starts: Dict[(int, datetime.datetime)] = {}
visit_ends: Dict[(int, datetime.datetime)] = {}
tables_w_billing_codes: List[str] = ['condition_occurrence', 'procedure_occurrence', 'observation']
for event in patient.events:
if ((event.omop_table == 'visit_occurrence') and (event.visit_id is not None)):
if ((event.start is None) or (event.end is None)):
raise RuntimeError(f'Missing visit start/end time for visit_occurrence_id {event.visit_id}')
visit_starts[event.visit_id] = event.start
visit_ends[event.visit_id] = event.end
new_events = []
for event in patient.events:
if ((event.omop_table in tables_w_billing_codes) and (event.visit_id is not None)):
visit_start = visit_starts.get(event.visit_id)
if (event.start == visit_start):
visit_end = visit_ends.get(event.visit_id)
event.start = visit_end
if (event.end is not None):
event.end = max(event.end, visit_end)
new_events.append(event)
else:
new_events.append(event)
else:
new_events.append(event)
patient.events = new_events
patient.resort()
return patient |
def resnet152_retinanet(num_classes, inputs=None, **kwargs):
return resnet_retinanet(num_classes=num_classes, backbone='resnet152', inputs=inputs, **kwargs) |
class TestCoder(unittest.TestCase):
def test_coder_can_io(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
with NamedTemporaryFile() as tmp_fp:
coder.to_file(tmp_fp.name)
other_coder = HuffmanCoder.from_file(tmp_fp.name)
self.assertEqual(coder, other_coder)
def test_coder_can_encode_decode(self):
data = make_data()
builder = make_code_builder(data)
coder = builder.build_code()
encoded = [coder.encode(sentence) for sentence in data]
decoded = [[n.symbol for n in coder.decode(enc)] for enc in encoded]
self.assertEqual(decoded, data)
unseen_data = make_data()
unseen_encoded = [coder.encode(sentence) for sentence in unseen_data]
unseen_decoded = [[n.symbol for n in coder.decode(enc)] for enc in unseen_encoded]
self.assertEqual(unseen_decoded, unseen_data) |
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d], stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (final_endpoint == end_point):
return (net, end_points)
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
def normalize(audio, target_level=(- 25)):
rms = ((audio ** 2).mean() ** 0.5)
scalar = ((10 ** (target_level / 20)) / (rms + EPS))
audio = (audio * scalar)
return audio |
def normalize_adj_torch(adj):
if (len(adj.size()) == 4):
new_r = torch.zeros(adj.size()).type_as(adj)
for i in range(adj.size(1)):
adj_item = adj[(0, i)]
rowsum = adj_item.sum(1)
d_inv_sqrt = rowsum.pow_((- 0.5))
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj_item), d_mat_inv_sqrt)
new_r[(0, i, ...)] = r
return new_r
rowsum = adj.sum(1)
d_inv_sqrt = rowsum.pow_((- 0.5))
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return r |
def _get_video_feat_by_vid(_feat_dir, vid):
_feat_path = os.path.join(_feat_dir, f'{vid}.npz')
_feat = np.load(_feat_path)['features'].astype(np.float32)
_feat = l2_normalize_np_array(_feat)
return _feat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.