code stringlengths 101 5.91M |
|---|
class OrderedSetPartition(ClonableArray, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(cls, parts=None, from_word=None, check=True):
if ((parts is None) and (from_word is None)):
P = OrderedSetPartitions([])
return P.element_class(P, [])
W = Words(infinite=False)
if from_word:
return OrderedSetPartitions().from_finite_word(W(from_word))
if ((parts in W) or (parts and ((parts[0] in ZZ) or isinstance(parts[0], str)))):
return OrderedSetPartitions().from_finite_word(W(parts))
P = OrderedSetPartitions(set((x for p in parts for x in p)))
return P.element_class(P, parts, check=check)
def __init__(self, parent, s, check=True):
ClonableArray.__init__(self, parent, [frozenset(part) for part in s], check=check)
def _repr_(self):
return (('[' + ', '.join(((('{' + repr(sorted(x))[1:(- 1)]) + '}') for x in self))) + ']')
def check(self):
par = parent(self)
assert (self in par), ('%s not in %s' % (self, par))
def base_set(self):
try:
return parent(self)._set
except AttributeError:
return frozenset((x for part in self for x in part))
def base_set_cardinality(self):
try:
return len(parent(self)._set)
except AttributeError:
return sum((len(part) for part in self))
size = base_set_cardinality
def length(self):
return len(self)
_map(name='to composition')
def to_composition(self):
return Composition([len(p) for p in self])
def sum(osps):
lset = set((x for osp in osps for x in osp.base_set()))
return OrderedSetPartitions(lset)(sum((list(i) for i in osps), []))
def reversed(self):
par = parent(self)
return par(list(reversed(self)))
def complement(self):
if (len(self) <= 1):
return self
base_set = self.base_set()
m = min(base_set)
M = max(base_set)
mM = (m + M)
par = parent(self)
return par([[(mM - i) for i in part] for part in self])
def finer(self):
par = parent(self)
if (not self):
return FiniteEnumeratedSet([self])
return FiniteEnumeratedSet([par(sum((list(i) for i in C), [])) for C in product(*[OrderedSetPartitions(X) for X in self])])
def is_finer(self, co2):
co1 = self
if (co1.base_set() != co2.base_set()):
raise ValueError(('ordered set partitions self (= %s) and co2 (= %s) must be of the same set' % (self, co2)))
i1 = 0
for j2 in co2:
sum1 = set()
while (len(sum1) < len(j2)):
sum1 = sum1.union(co1[i1])
i1 += 1
if (not sum1.issubset(j2)):
return False
return True
def fatten(self, grouping):
result = ([None] * len(grouping))
j = 0
for i in range(len(grouping)):
result[i] = set().union(*self[j:(j + grouping[i])])
j += grouping[i]
return parent(self)(result)
def fatter(self):
return Compositions(len(self)).map(self.fatten)
def bottom_up_osp(X, comp):
xs = sorted(X)
result = ([None] * len(comp))
j = 0
for i in range(len(comp)):
result[i] = set(xs[j:(j + comp[i])])
j += comp[i]
return OrderedSetPartitions(X)(result)
def strongly_finer(self):
par = parent(self)
if (not self):
return FiniteEnumeratedSet([self])
buo = OrderedSetPartition.bottom_up_osp
return FiniteEnumeratedSet([par(sum((list(P) for P in C), [])) for C in product(*[[buo(X, comp) for comp in Compositions(len(X))] for X in self])])
def is_strongly_finer(self, co2):
co1 = self
if (co1.base_set() != co2.base_set()):
raise ValueError(('ordered set partitions self (= %s) and co2 (= %s) must be of the same set' % (self, co2)))
i1 = 0
for j2 in co2:
sum1 = set()
while (len(sum1) < len(j2)):
next = co1[i1]
if (sum1 and (max(sum1) >= min(next))):
return False
sum1 = sum1.union(next)
i1 += 1
if (not sum1.issubset(j2)):
return False
return True
def strongly_fatter(self):
c = [sorted(X) for X in self]
l = (len(c) - 1)
g = (([(- 1)] + [i for i in range(l) if (c[i][(- 1)] > c[(i + 1)][0])]) + [l])
subcomps = [OrderedSetPartition(c[(g[i] + 1):(g[(i + 1)] + 1)]) for i in range((len(g) - 1))]
fattenings = [list(subcomp.fatter()) for subcomp in subcomps]
return FiniteEnumeratedSet([OrderedSetPartition(sum([list(gg) for gg in fattening], [])) for fattening in product(*fattenings)])
_map(name='to packed word')
def to_packed_word(self):
X = sorted(self.base_set())
out = {}
for i in range(len(self)):
for letter in self[i]:
out[letter] = i
W = Words(infinite=False)
return W([(out[letter] + 1) for letter in X])
def number_of_inversions(self):
num_invs = 0
for (m, part) in enumerate(self):
i = min(part)
for ell in range(m):
num_invs += sum((1 for j in self[ell] if (i < j)))
return ZZ(num_invs) |
class Normalize(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor):
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return (self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)) |
def group_dicts_by_first_key(list_of_dicts: List[Dict[(str, float)]]) -> Dict[(str, List[Dict[(str, float)]])]:
first_key = get_first_key_of_dictionary(list_of_dicts[0])
final_grouped = defaultdict(list)
for inner_dict in list_of_dicts:
final_grouped[inner_dict[first_key]].append(inner_dict)
return dict(final_grouped) |
def yield_top_down_sequence(tree, transition_scheme=TransitionScheme.TOP_DOWN_UNARY):
if tree.is_preterminal():
(yield Shift())
return
if tree.is_leaf():
return
if (transition_scheme is TransitionScheme.TOP_DOWN_UNARY):
if (len(tree.children) == 1):
labels = []
while ((not tree.is_preterminal()) and (len(tree.children) == 1)):
labels.append(tree.label)
tree = tree.children[0]
for transition in yield_top_down_sequence(tree, transition_scheme):
(yield transition)
(yield CompoundUnary(*labels))
return
if (transition_scheme is TransitionScheme.TOP_DOWN_COMPOUND):
labels = [tree.label]
while ((len(tree.children) == 1) and (not tree.children[0].is_preterminal())):
tree = tree.children[0]
labels.append(tree.label)
(yield OpenConstituent(*labels))
else:
(yield OpenConstituent(tree.label))
for child in tree.children:
for transition in yield_top_down_sequence(child, transition_scheme):
(yield transition)
(yield CloseConstituent()) |
def run_export_bbox_cams(args, cfg, data_dict, save_path=None):
verbose = (args.block_num <= 1)
if verbose:
print('Export bbox and cameras...')
if (save_path is None):
save_path = args.export_bbox_and_cams_only
(xyz_min, xyz_max) = compute_bbox_by_cam_frustrm(args=args, cfg=cfg, **data_dict)
(poses, HW, Ks, i_train) = (data_dict['poses'], data_dict['HW'], data_dict['Ks'], data_dict['i_train'])
(near, far) = (data_dict['near'], data_dict['far'])
if (data_dict['near_clip'] is not None):
near = data_dict['near_clip']
cam_lst = []
for (c2w, (H, W), K) in zip(poses[i_train], HW[i_train], Ks[i_train]):
(rays_o, rays_d, viewdirs) = dvgo.get_rays_of_a_view(H, W, K, c2w, cfg.data.ndc, inverse_y=cfg.data.inverse_y, flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
cam_o = rays_o[(0, 0)].cpu().numpy()
cam_d = rays_d[([0, 0, (- 1), (- 1)], [0, (- 1), 0, (- 1)])].cpu().numpy()
frustrum_height = (max(near, (far * 0.05)) * cfg.vis.height_rate)
cam_lst.append(np.array([cam_o, *(cam_o + (cam_d * frustrum_height))]))
dir_name = os.path.dirname(save_path)
Path(dir_name).mkdir(parents=True, exist_ok=True)
np.savez_compressed(save_path, xyz_min=xyz_min.cpu().numpy(), xyz_max=xyz_max.cpu().numpy(), cam_lst=np.array(cam_lst)) |
class DIPNet(nn.Module):
def __init__(self, depth, base, decoder_block_num, norm=nn.InstanceNorm3d, encoder_norm=nn.Identity, use_skip=False):
super(DIPNet, self).__init__()
self.encoder = CNNEncoder(depth, base, encoder_norm)
self.decoder = CNNDecoder(depth, base, decoder_block_num, norm=norm, use_skip=use_skip)
self.output = nn.Conv3d(base, 1, 1, 1, 0, bias=False)
def forward(self, x):
(btm, skips) = self.encoder(x)
top = self.decoder(btm, skips)
return self.output(top) |
def load_filepaths_and_text(filename, split='|'):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text |
def test_dependent_symbol():
outer_sdfg = dace.SDFG('map_fission_with_dependent_symbol')
outer_sdfg.add_symbol('fidx', dace.int32)
outer_sdfg.add_symbol('lidx', dace.int32)
outer_sdfg.add_array('A', (2, 10), dtype=dace.int32)
outer_sdfg.add_array('B', (2, 10), dtype=dace.int32)
inner_sdfg = dace.SDFG('inner')
inner_sdfg.add_symbol('first', dace.int32)
inner_sdfg.add_symbol('last', dace.int32)
inner_sdfg.add_array('A0', (10,), dtype=dace.int32)
inner_sdfg.add_array('A1', (10,), dtype=dace.int32)
inner_sdfg.add_array('B0', (10,), dtype=dace.int32)
inner_sdfg.add_array('B1', (10,), dtype=dace.int32)
inner_state = inner_sdfg.add_state('inner_state', is_start_state=True)
inner_state.add_mapped_tasklet(name='plus', map_ranges={'j': 'first:last'}, inputs={'__a0': dace.Memlet(data='A0', subset='j'), '__a1': dace.Memlet(data='A1', subset='j')}, outputs={'__b0': dace.Memlet(data='B0', subset='j')}, code='__b0 = __a0 + __a1', external_edges=True)
inner_sdfg2 = dace.SDFG('inner2')
inner_sdfg2.add_symbol('first', dace.int32)
inner_sdfg2.add_symbol('last', dace.int32)
inner_sdfg2.add_array('A0', (10,), dtype=dace.int32)
inner_sdfg2.add_array('A1', (10,), dtype=dace.int32)
inner_sdfg2.add_array('B1', (10,), dtype=dace.int32)
inner_state2 = inner_sdfg2.add_state('inner_state2', is_start_state=True)
inner_state2.add_mapped_tasklet(name='minus', map_ranges={'j': 'first:last'}, inputs={'__a0': dace.Memlet(data='A0', subset='j'), '__a1': dace.Memlet(data='A1', subset='j')}, outputs={'__b1': dace.Memlet(data='B1', subset='j')}, code='__b1 = __a0 - __a1', external_edges=True)
nsdfg = inner_state.add_nested_sdfg(inner_sdfg2, None, {'A0', 'A1'}, {'B1'})
a0 = inner_state.add_access('A0')
a1 = inner_state.add_access('A1')
b1 = inner_state.add_access('B1')
inner_state.add_edge(a0, None, nsdfg, 'A0', dace.Memlet(data='A0', subset='0:10'))
inner_state.add_edge(a1, None, nsdfg, 'A1', dace.Memlet(data='A1', subset='0:10'))
inner_state.add_edge(nsdfg, 'B1', b1, None, dace.Memlet(data='B1', subset='0:10'))
outer_state = outer_sdfg.add_state('outer_state', is_start_state=True)
a = outer_state.add_access('A')
b = outer_state.add_access('B')
(me, mx) = outer_state.add_map('map', {'i': '0:2'})
inner_sdfg_node = outer_state.add_nested_sdfg(inner_sdfg, None, {'A0', 'A1'}, {'B0', 'B1'}, symbol_mapping={'first': 'max(0, i - fidx)', 'last': 'min(10, i + lidx)'})
outer_state.add_memlet_path(a, me, inner_sdfg_node, memlet=dace.Memlet(data='A', subset='0, 0:10'), dst_conn='A0')
outer_state.add_memlet_path(a, me, inner_sdfg_node, memlet=dace.Memlet(data='A', subset='1, 0:10'), dst_conn='A1')
outer_state.add_memlet_path(inner_sdfg_node, mx, b, memlet=dace.Memlet(data='B', subset='0, 0:10'), src_conn='B0')
outer_state.add_memlet_path(inner_sdfg_node, mx, b, memlet=dace.Memlet(data='B', subset='1, 0:10'), src_conn='B1')
sdutils.consolidate_edges(outer_sdfg)
A = np.arange(20, dtype=np.int32).reshape((2, 10)).copy()
ref = np.zeros_like(A)
ref_sdfg = copy.deepcopy(outer_sdfg)
ref_sdfg.name = f'{ref_sdfg.name}_ref'
ref_sdfg(A=A, B=ref, fidx=1, lidx=5)
MapFission.apply_to(outer_sdfg, expr_index=1, map_entry=me, nested_sdfg=inner_sdfg_node)
outer_sdfg.apply_transformations_repeated(InlineSDFG)
val = np.zeros_like(A)
outer_sdfg(A=A, B=val, fidx=1, lidx=5)
assert np.array_equal(val, ref) |
def main(args):
params = set_params(args.data, args.task)
train_dataset = UncertainTripleDataset(params.data_dir, 'train.tsv')
train_test_dataset = UncertainTripleDataset(params.data_dir, 'train.tsv')
dev_dataset = UncertainTripleDataset(params.data_dir, 'val.tsv')
test_dataset = UncertainTripleDataset(params.data_dir, 'test.tsv')
print(params.whichmodel)
print(params.early_stop)
run = wandb_initialize(params)
if (not os.path.exists(params.model_dir)):
os.makedirs(params.model_dir)
model = get_new_model(params)
wandb.watch(model)
optimizer = torch.optim.Adam(model.parameters(), lr=params.LR)
run_train(model, run, train_dataset, train_test_dataset, dev_dataset, test_dataset, optimizer, params)
print('done') |
def _get_codegen_gemm_opts(ashape, astride, bshape, bstride, cshape, cstride):
opt = get_gemm_opts(astride, bstride, cstride)
bopt = get_batchmm_opts(ashape, astride, bshape, bstride, cshape, cstride)
opt['M'] = ashape[(- 2)]
opt['N'] = bshape[(- 1)]
opt['K'] = ashape[(- 1)]
if opt['swap']:
if bopt:
(bopt['sa'], bopt['sb']) = (bopt['sb'], bopt['sa'])
(opt['lda'], opt['ldb']) = (opt['ldb'], opt['lda'])
(opt['ta'], opt['tb']) = (opt['tb'], opt['ta'])
(opt['M'], opt['N']) = (opt['N'], opt['M'])
if bopt:
opt['stride_a'] = bopt['sa']
opt['stride_b'] = bopt['sb']
opt['stride_c'] = bopt['sc']
opt['BATCH'] = bopt['b']
else:
opt['BATCH'] = None
return opt |
def replace_message_content(content: str, replacements: List[Dict[(str, str)]]) -> str:
for replacement in replacements:
pattern = re.compile(replacement['regex'])
content = pattern.sub(replacement['replacement'], content)
return content |
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False) |
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1, bias=False)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False)
self.relu = nn.ReLU(True)
def forward(self, x):
x = (x + self.conv2(self.relu(self.conv1(x))))
return x |
class ControlC(Callback):
def quit_all():
import sys
sys.exit(0)
def __init__(self, quit_and_do, action=quit_all):
super(ControlC, self).__init__()
if (type(quit_and_do) != bool):
raise ValueError('In KeyBoardInterrupt, quit_and_do arguemnt must be a bool.')
self.quit_and_do = quit_and_do
self.action = action
def on_exception(self, exception):
if isinstance(exception, KeyboardInterrupt):
if (self.quit_and_do is True):
self.action()
else:
pass
else:
raise exception |
def _seg_21():
return [(8178, 'M', u''), (8179, 'M', u''), (8180, 'M', u''), (8181, 'X'), (8182, 'V'), (8183, 'M', u''), (8184, 'M', u''), (8185, 'M', u''), (8186, 'M', u''), (8187, 'M', u''), (8188, 'M', u''), (8189, '3', u' '), (8190, '3', u' '), (8191, 'X'), (8192, '3', u' '), (8203, 'I'), (8204, 'D', u''), (8206, 'X'), (8208, 'V'), (8209, 'M', u''), (8210, 'V'), (8215, '3', u' '), (8216, 'V'), (8228, 'X'), (8231, 'V'), (8232, 'X'), (8239, '3', u' '), (8240, 'V'), (8243, 'M', u''), (8244, 'M', u''), (8245, 'V'), (8246, 'M', u''), (8247, 'M', u''), (8248, 'V'), (8252, '3', u'!!'), (8253, 'V'), (8254, '3', u' '), (8255, 'V'), (8263, '3', u'??'), (8264, '3', u'?!'), (8265, '3', u'!?'), (8266, 'V'), (8279, 'M', u''), (8280, 'V'), (8287, '3', u' '), (8288, 'I'), (8289, 'X'), (8292, 'I'), (8293, 'X'), (8304, 'M', u'0'), (8305, 'M', u'i'), (8306, 'X'), (8308, 'M', u'4'), (8309, 'M', u'5'), (8310, 'M', u'6'), (8311, 'M', u'7'), (8312, 'M', u'8'), (8313, 'M', u'9'), (8314, '3', u'+'), (8315, 'M', u''), (8316, '3', u'='), (8317, '3', u'('), (8318, '3', u')'), (8319, 'M', u'n'), (8320, 'M', u'0'), (8321, 'M', u'1'), (8322, 'M', u'2'), (8323, 'M', u'3'), (8324, 'M', u'4'), (8325, 'M', u'5'), (8326, 'M', u'6'), (8327, 'M', u'7'), (8328, 'M', u'8'), (8329, 'M', u'9'), (8330, '3', u'+'), (8331, 'M', u''), (8332, '3', u'='), (8333, '3', u'('), (8334, '3', u')'), (8335, 'X'), (8336, 'M', u'a'), (8337, 'M', u'e'), (8338, 'M', u'o'), (8339, 'M', u'x'), (8340, 'M', u''), (8341, 'M', u'h'), (8342, 'M', u'k'), (8343, 'M', u'l'), (8344, 'M', u'm'), (8345, 'M', u'n'), (8346, 'M', u'p'), (8347, 'M', u's'), (8348, 'M', u't'), (8349, 'X'), (8352, 'V'), (8360, 'M', u'rs'), (8361, 'V'), (8384, 'X'), (8400, 'V'), (8433, 'X')] |
class Speech2Text2Processor():
def __init__(self, feature_extractor, tokenizer):
if (not isinstance(feature_extractor, SequenceFeatureExtractor)):
raise ValueError(f'`feature_extractor` has to be of type {SequenceFeatureExtractor.__class__}, but is {type(feature_extractor)}')
if (not isinstance(tokenizer, Speech2Text2Tokenizer)):
raise ValueError(f'`tokenizer` has to be of type {Speech2Text2Tokenizer.__class__}, but is {type(tokenizer)}')
self.feature_extractor = feature_extractor
self.tokenizer = tokenizer
self.current_processor = self.feature_extractor
def save_pretrained(self, save_directory):
self.feature_extractor.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
feature_extractor = AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer = Speech2Text2Tokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls(feature_extractor=feature_extractor, tokenizer=tokenizer)
def __call__(self, *args, **kwargs):
return self.current_processor(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def as_target_processor(self):
self.current_processor = self.tokenizer
(yield)
self.current_processor = self.feature_extractor |
_module()
class PascalContextDataset59(CustomDataset):
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset59, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', split=split, reduce_zero_label=True, **kwargs)
assert (self.file_client.exists(self.img_dir) and (self.split is not None)) |
class D_NLayers(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
super(D_NLayers, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func != nn.BatchNorm2d)
else:
use_bias = (norm_layer != nn.BatchNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input) |
_task('speech_pretraining')
class AudioPretrainingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-rate', default=16000, type=int, help='target sample rate. audio files will be up/down sampled to this rate')
parser.add_argument('--max-sample-size', default=None, type=int, help='max sample size to crop to for batching. default = min sample length')
parser.add_argument('--min-sample-size', default=None, type=int, help='min sample size to crop to for batching. default = same as --max-sample-size')
def __init__(self, args):
super().__init__(args)
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split, **kwargs):
manifest = os.path.join(self.args.data, '{}.tsv'.format((split + '_a')))
self.datasets[split] = RawAudioDataset(manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.min_sample_size)
def target_dictionary(self):
return None |
_task('sentence_ranking')
class SentenceRankingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, help='number of sentences to be ranked')
parser.add_argument('--init-token', type=int, help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, help='add separator token between inputs')
parser.add_argument('--no-shuffle', action='store_true')
parser.add_argument('--truncate-sequence', action='store_true', help='Truncate sequence to max_positions')
parser.add_argument('--max-option-length', type=int, help='max length for each option')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
def load_dictionary(cls, args, filename, source=True):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking'
data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
input_options = [make_dataset('input{idx}'.format(idx=(idx + 1)), self.source_dictionary) for idx in range(self.args.num_classes)]
if (self.args.separator_token is not None):
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if (self.args.init_token is not None):
input_option = PrependTokenDataset(input_option, self.args.init_token)
if (self.args.max_option_length is not None):
input_option = TruncateDataset(input_option, self.args.max_option_length)
src_token = ConcatSentencesDataset(input_option, input0)
if self.args.truncate_sequence:
src_token = TruncateDataset(src_token, self.args.max_positions)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)}
for src_token_idx in range(len(src_tokens)):
dataset.update({'net_input{idx}'.format(idx=(src_token_idx + 1)): {'src_tokens': RightPadDataset(src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False)}})
label_path = '{}.label'.format(get_path('label', split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(target=RawLabelDataset([int(x.strip()) for x in h.readlines()]))
nested_dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])])
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(getattr(args, 'ranking_head_name', 'sentence_classification_head'), num_classes=1)
return model
def max_positions(self):
return self.args.max_positions
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
class TestSuiteLineCoverageFunction(TestSuiteCoverageFunction):
def compute_coverage(self, individual) -> float:
results = self._run_test_suite_chromosome(individual)
merged_trace = analyze_results(results)
tracer = self._executor.tracer
return compute_line_coverage(merged_trace, tracer.get_subject_properties()) |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Ipv4Route'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('InetSocketAddress', import_from_module='ns.network')
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NodeContainer', import_from_module='ns.network')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('Timer', import_from_module='ns.core')
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('DsdvHelper', parent=root_module['ns3::Ipv4RoutingHelper'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ipv4L3Protocol::DropReason', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'const ns3::Ipv4Header &', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<const ns3::Packet>', 'const ns3::Ipv4Header &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('Ipv4ListRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol'])
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module) |
class PackagingTest(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._temporary_files = []
def temp(self):
t = NamedTemporaryFile()
name = t.name
if IS_WINDOWS:
t.close()
else:
self._temporary_files.append(t)
return name
def tearDown(self):
for t in self._temporary_files:
t.close()
self._temporary_files = []
def test_saving_source(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
he.save_source_file('foo', str((packaging_directory / 'module_a.py')))
he.save_source_file('foodir', str((packaging_directory / 'package_a')))
hi = PackageImporter(filename)
foo = hi.import_module('foo')
s = hi.import_module('foodir.subpackage')
self.assertEqual(foo.result, 'module_a')
self.assertEqual(s.result, 'package_a.subpackage')
def test_saving_string(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
src = 'import math\nthe_math = math\n'
he.save_source_string('my_mod', src)
hi = PackageImporter(filename)
m = hi.import_module('math')
import math
self.assertIs(m, math)
my_mod = hi.import_module('my_mod')
self.assertIs(my_mod.math, math)
def test_save_module(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
import module_a
import package_a
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
hi = PackageImporter(filename)
module_a_i = hi.import_module('module_a')
self.assertEqual(module_a_i.result, 'module_a')
self.assertIsNot(module_a, module_a_i)
package_a_i = hi.import_module('package_a')
self.assertEqual(package_a_i.result, 'package_a')
self.assertIsNot(package_a_i, package_a)
def test_pickle(self):
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
obj2 = package_a.PackageAObject(obj)
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
he.save_pickle('obj', 'obj.pkl', obj2)
hi = PackageImporter(filename)
sp = hi.import_module('package_a.subpackage')
with self.assertRaises(ImportError):
hi.import_module('module_a')
obj_loaded = hi.load_pickle('obj', 'obj.pkl')
self.assertIsNot(obj2, obj_loaded)
self.assertIsInstance(obj_loaded.obj, sp.PackageASubpackageObject)
self.assertIsNot(package_a.subpackage.PackageASubpackageObject, sp.PackageASubpackageObject)
def test_resources(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
he.save_text('main', 'main', 'my string')
he.save_binary('main', 'main_binary', 'my string'.encode('utf-8'))
src = "import resources\nt = resources.load_text('main', 'main')\nb = resources.load_binary('main', 'main_binary')\n"
he.save_source_string('main', src, is_package=True)
hi = PackageImporter(filename)
m = hi.import_module('main')
self.assertEqual(m.t, 'my string')
self.assertEqual(m.b, 'my string'.encode('utf-8'))
def test_extern(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
he.extern_modules(['package_a.subpackage', 'module_a'])
he.save_module('package_a')
hi = PackageImporter(filename)
import package_a.subpackage
import module_a
module_a_im = hi.import_module('module_a')
hi.import_module('package_a.subpackage')
package_a_im = hi.import_module('package_a')
self.assertIs(module_a, module_a_im)
self.assertIsNot(package_a, package_a_im)
self.assertIs(package_a.subpackage, package_a_im.subpackage)
(((version_info.major < 3) or (version_info.minor < 7)), 'mock uses __getattr__ a 3.7 feature')
def test_mock(self):
filename = self.temp()
with PackageExporter(filename, verbose=False) as he:
he.mock_modules(['package_a.subpackage', 'module_a'])
he.save_module('package_a')
hi = PackageImporter(filename)
import package_a.subpackage
_ = package_a.subpackage
import module_a
_ = module_a
m = hi.import_module('package_a.subpackage')
r = m.result
with self.assertRaisesRegex(NotImplementedError, 'was mocked out'):
r()
(((version_info.major < 3) or (version_info.minor < 7)), 'mock uses __getattr__ a 3.7 feature')
def test_custom_requires(self):
filename = self.temp()
class Custom(PackageExporter):
def require_module(self, name, dependencies):
if (name == 'module_a'):
self.mock_module('module_a')
elif (name == 'package_a'):
self.save_source_string('package_a', 'import module_a\nresult = 5\n')
else:
raise NotImplementedError('wat')
with Custom(filename, verbose=False) as he:
he.save_source_string('main', 'import package_a\n')
hi = PackageImporter(filename)
hi.import_module('module_a').should_be_mocked
bar = hi.import_module('package_a')
self.assertEqual(bar.result, 5)
def test_resnet(self):
resnet = resnet18()
f1 = self.temp()
with PackageExporter(f1, verbose=False) as e:
e.save_pickle('model', 'model.pkl', resnet)
buf = StringIO()
e._write_dep_graph(failing_module='torch', output_file=buf)
self.assertIn('torchvision.models.resnet', buf.getvalue())
i = PackageImporter(f1)
r2 = i.load_pickle('model', 'model.pkl')
input = torch.rand(1, 3, 224, 224)
ref = resnet(input)
self.assertTrue(torch.allclose(r2(input), ref))
torchvision = i.import_module('torchvision')
f2 = self.temp()
with PackageExporter(f2, verbose=False) as e:
e.importers.insert(0, i.import_module)
e.save_pickle('model', 'model.pkl', r2)
i2 = PackageImporter(f2)
r3 = i2.load_pickle('model', 'model.pkl')
self.assertTrue(torch.allclose(r3(input), ref))
import zipfile
zf = zipfile.ZipFile(f1, 'r')
with TemporaryDirectory() as td:
zf.extractall(path=td)
iz = PackageImporter(str((Path(td) / Path(f1).name)))
r4 = iz.load_pickle('model', 'model.pkl')
self.assertTrue(torch.allclose(r4(input), ref))
def test_model_save(self):
resnet = resnet18()
f1 = self.temp()
with PackageExporter(f1, verbose=False) as e:
e.save_pickle('model', 'pickled', resnet)
src = "import resources # gives you access to the importer from within the package\n\n# server knows to call model.load() to get the model,\n# maybe in the future it passes options as arguments by convension\ndef load():\n return resources.load_pickle('model', 'pickled')\n "
e.save_source_string('model', src, is_package=True)
f2 = self.temp()
with PackageExporter(f2, verbose=False) as e:
e.save_pickle('model', 'state_dict', resnet.state_dict())
src = "import resources # gives you access to the importer from within the package\nfrom torchvision.models.resnet import resnet18\ndef load():\n # if you want, you can later edit how resnet is constructed here\n # to edit the model in the package, while still loading the original\n # state dict weights\n r = resnet18()\n state_dict = resources.load_pickle('model', 'state_dict')\n r.load_state_dict(state_dict)\n return r\n "
e.save_source_string('model', src, is_package=True)
input = torch.rand(1, 3, 224, 224)
results = []
for m in [f1, f2]:
importer = PackageImporter(m)
the_model = importer.import_module('model').load()
r = the_model(input)
results.append(r)
self.assertTrue(torch.allclose(*results)) |
def convert_clone_examples_to_features(item):
(example, example_index, tokenizer, args) = item
if ((args.model_type in ['t5', 'codet5']) and args.add_task_prefix):
source_str = '{}: {}'.format(args.task, example.source)
target_str = '{}: {}'.format(args.task, example.target)
else:
source_str = example.source
target_str = example.target
code1 = tokenizer.encode(source_str, max_length=args.max_source_length, padding='max_length', truncation=True)
code2 = tokenizer.encode(target_str, max_length=args.max_source_length, padding='max_length', truncation=True)
source_ids = (code1 + code2)
return CloneInputFeatures(example_index, source_ids, example.label, example.url1, example.url2) |
def _to_complete_list(poly, length):
L = poly.coefficients(sparse=False)
return (L + ([poly.base_ring().zero()] * (length - len(L)))) |
.parametrize('seed', [313])
.parametrize('seed_num_arrays', [314])
.parametrize('ij_indexing', [True, False])
.parametrize('num_arrays', [2, 3, 4, 5])
.parametrize('ctx, func_name', list_context('Meshgrid'))
def test_meshgrid(seed, seed_num_arrays, ij_indexing, num_arrays, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
rng_num_arrays = np.random.RandomState(seed_num_arrays)
inputs = [rng.randn(rng_num_arrays.randint(1, 7)) for _ in range(num_arrays)]
function_tester(rng, F.meshgrid, ref_meshgrid, inputs, func_kwargs=dict(ij_indexing=ij_indexing), backward=([True] * num_arrays), ctx=ctx, func_name=func_name, disable_half_test=True, atol_b=0.03, atol_accum=1e-05) |
class A000108(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
def _repr_(self):
return 'Catalan numbers: C(n) = binomial(2n,n)/(n+1) = (2n)!/(n!(n+1)!). Also called Segner numbers.'
def _eval(self, n):
return combinat.catalan_number(n) |
def test_edvr_model():
model_cfg = dict(type='EDVR', generator=dict(type='EDVRNet', in_channels=3, out_channels=3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=False), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='sum'))
train_cfg = None
test_cfg = None
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
assert (restorer.__class__.__name__ == 'EDVR')
assert isinstance(restorer.generator, EDVRNet)
assert isinstance(restorer.pixel_loss, L1Loss)
inputs = torch.rand(1, 5, 3, 8, 8)
targets = torch.rand(1, 3, 32, 32)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999))
optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))}
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 32, 32))
model_cfg['generator']['with_tsa'] = True
with pytest.raises(KeyError):
train_cfg = dict(other_conent='xxx')
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = restorer.train_step(data_batch, optimizer)
train_cfg = None
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = restorer.train_step(data_batch, optimizer)
train_cfg = mmcv.ConfigDict(tsa_iter=1)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))}
outputs = restorer.train_step(data_batch, optimizer)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert (outputs['num_samples'] == 1)
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert (outputs['results']['output'].size() == (1, 3, 32, 32))
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert torch.is_tensor(output)
assert (output.size() == (1, 3, 32, 32))
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert (outputs['output'].size() == (1, 3, 32, 32))
with torch.no_grad():
outputs = restorer(inputs.cuda(), test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert (outputs['output'].size() == (1, 3, 32, 32))
if torch.cuda.is_available():
train_cfg = mmcv.ConfigDict(tsa_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'meta': [{'gt_path': 'fake_path/fake_name.png', 'key': '000/'}]}
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
with pytest.raises(AssertionError):
restorer(lq=inputs.cuda(), test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration='100') |
def test_multiple_modes_sequentially():
arr = np.array([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected, sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected, sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected, sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected, sndi.minimum_filter(arr, size=5, mode=modes)) |
_config
def task_mlm_itm_webvid():
exp_name = 'mlm_itm'
datasets = ['webvid']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 1024
max_epoch = 10
max_image_len = (- 1) |
class FairseqDecoder(nn.Module):
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, prev_output_tokens, encoder_out):
raise NotImplementedError
def get_normalized_probs(self, net_output, log_probs, sample):
if (hasattr(self, 'adaptive_softmax') and (self.adaptive_softmax is not None)):
assert ((sample is not None) and ('target' in sample))
out = self.adaptive_softmax.get_log_prob(net_output[0], sample['target'])
return (out.exp_() if (not log_probs) else out)
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
def max_positions(self):
return 1000000.0
def upgrade_state_dict(self, state_dict):
return state_dict |
class NonStaticControlFlowGuard():
def __init__(self, status: NonStaticControlFlowStatus):
self.status = status
def __enter__(self):
self.prev = self.status.is_in_non_static_control_flow
self.status.is_in_non_static_control_flow = True
def __exit__(self, exc_type, exc_val, exc_tb):
self.status.is_in_non_static_control_flow = self.prev |
class CNNLayer(nn.Module):
def __init__(self, obs_shape, hidden_size, use_orthogonal, activation_id, kernel_size=3, stride=1):
super(CNNLayer, self).__init__()
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, (lambda x: nn.init.constant_(x, 0)), gain=gain)
input_channel = obs_shape[0]
input_width = obs_shape[1]
input_height = obs_shape[2]
self.cnn = nn.Sequential(init_(nn.Conv2d(in_channels=input_channel, out_channels=(hidden_size // 2), kernel_size=kernel_size, stride=stride)), active_func, Flatten(), init_(nn.Linear((((hidden_size // 2) * ((input_width - kernel_size) + stride)) * ((input_height - kernel_size) + stride)), hidden_size)), active_func, init_(nn.Linear(hidden_size, hidden_size)), active_func)
def forward(self, x):
x = (x / 255.0)
x = self.cnn(x)
return x |
def batch_fc_normalization_layer(input_layer, dimension):
(mean, variance) = tf.nn.moments(input_layer, axes=[0])
beta = tf.get_variable('beta', dimension, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', dimension, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
fc_bn_layer = tf.nn.batch_normalization(input_layer, mean, variance, beta, gamma, BN_EPSILON)
return fc_bn_layer |
def get_model_name(cfg):
name = '{model}_{num_layers}'.format(model=cfg.MODEL, num_layers=cfg.POSE_RESNET.NUM_LAYERS)
deconv_suffix = ''.join(('d{}'.format(num_filters) for num_filters in cfg.POSE_RESNET.NUM_DECONV_FILTERS))
full_name = '{height}x{width}_{name}_{deconv_suffix}'.format(height=cfg.NETWORK.IMAGE_SIZE[1], width=cfg.NETWORK.IMAGE_SIZE[0], name=name, deconv_suffix=deconv_suffix)
return (name, full_name) |
def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args):
if (args.input_type == 'binary'):
loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj)
elif (args.input_type == 'multinomial'):
loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args)
else:
raise ValueError(('Invalid input type for calculate loss: %s.' % args.input_type))
return loss |
def do_env(env, text, titleline, counter, format):
(label, titleline) = get_label(titleline)
titleline = titleline.strip()
if titleline:
titleline = (': ' + titleline)
template = '\n===== ${env.capitalize()} ${counter} ${titleline} =====\n% if label:\nlabel{${label}}\n% endif\n${text}\n\n'
return Template(template).render(**vars()) |
class Siamese(pl.LightningModule):
def __init__(self, train_dataset: Dataset, dev_dataset: Dataset, input_dim, hidden_dim, batch_size, verbose=True, same_weights=True, compare_by: str='cosine'):
super().__init__()
self.l1 = torch.nn.Linear(input_dim, hidden_dim, bias=True).double()
if (not same_weights):
self.l2 = torch.nn.Linear(input_dim, hidden_dim, bias=True).double()
else:
self.l2 = self.l1.double()
self.same_weights = same_weights
self.verbose = verbose
self.compare_by = compare_by
self.cosine_sim = torch.nn.CosineSimilarity(dim=1)
(self.w_out, self.b_out) = (torch.nn.Parameter(torch.rand(1)), torch.nn.Parameter(torch.zeros(1)))
self.train_data = train_dataset
self.dev_data = dev_dataset
self.train_gen = torch.utils.data.DataLoader(self.train_data, batch_size=batch_size, drop_last=False, shuffle=True)
self.dev_gen = torch.utils.data.DataLoader(self.dev_data, batch_size=batch_size, drop_last=False, shuffle=True)
self.acc = None
self.loss_fn = torch.nn.BCEWithLogitsLoss()
def forward(self, x1, x2):
h1 = self.l1(x1)
h2 = self.l2(x2)
return (h1, h2)
def train_network(self, num_epochs):
trainer = Trainer(max_nb_epochs=num_epochs, min_nb_epochs=num_epochs, show_progress_bar=self.verbose)
trainer.fit(self)
return self.acc
def get_final_representaton_for_sigmoid(self, h1, h2):
if (self.compare_by == 'cosine'):
scores = self.cosine_sim(h1, h2)
elif (self.compare_by == 'dot_product'):
scores = torch.sum((h1 * h2), axis=1)
elif (self.compare_by == 'l2'):
scores = torch.sum(((h1 - h2) ** 2), axis=1)
else:
raise Exception('Unsupported comparison method')
scores = ((self.w_out * scores) + self.b_out)
return scores
def training_step(self, batch, batch_nb):
(x1, x2, y) = batch
(h1, h2) = self.forward(x1, x2)
similarity_scores = self.get_final_representaton_for_sigmoid(h1, h2)
loss_val = self.loss_fn(similarity_scores, y)
correct = ((similarity_scores > 0).int() == y.int()).int()
acc = (torch.sum(correct).float() / len(y))
return {'loss': loss_val, 'val_acc': acc}
def validation_step(self, batch, batch_nb):
(x1, x2, y) = batch
(h1, h2) = self.forward(x1, x2)
similarity_scores = self.get_final_representaton_for_sigmoid(h1, h2)
loss_val = self.loss_fn(similarity_scores, y)
correct = ((similarity_scores > 0).int() == y.int()).int()
acc = (torch.sum(correct).float() / len(y))
return {'val_loss': loss_val, 'val_acc': acc}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
self.acc = avg_acc
return {'avg_val_loss': avg_loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), weight_decay=0.0001)
_loader
def train_dataloader(self):
return self.train_gen
_loader
def val_dataloader(self):
return self.dev_gen |
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(base_losses, base_top1, base_top5) = (AverageMeter(), AverageMeter(), AverageMeter())
(arch_losses, arch_top1, arch_top5) = (AverageMeter(), AverageMeter(), AverageMeter())
end = time.time()
network.train()
for (step, (base_inputs, base_targets, arch_inputs, arch_targets)) in enumerate(xloader):
scheduler.update(None, ((1.0 * step) / len(xloader)))
base_inputs = base_inputs.cuda(non_blocking=True)
arch_inputs = arch_inputs.cuda(non_blocking=True)
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
data_time.update((time.time() - end))
if (algo == 'setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif (algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (algo == 'random'):
network.set_cal_mode('urs', None)
elif (algo == 'enas'):
with torch.no_grad():
network.controller.eval()
(_, _, sampled_arch) = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
(_, logits) = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
w_optimizer.step()
(base_prec1, base_prec5) = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
if (algo == 'setn'):
network.set_cal_mode('joint')
elif (algo == 'gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif (algo == 'random'):
network.set_cal_mode('urs', None)
elif (algo != 'enas'):
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if (algo == 'darts-v2'):
(arch_loss, logits) = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)
a_optimizer.step()
elif ((algo == 'random') or (algo == 'enas')):
with torch.no_grad():
(_, logits) = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
else:
(_, logits) = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
(arch_prec1, arch_prec5) = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((step % print_freq) == 0) or ((step + 1) == len(xloader))):
Sstr = (('*SEARCH* ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) {top1.val:.2f} ({top1.avg:.2f}) {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) {top1.val:.2f} ({top1.avg:.2f}) {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Wstr) + ' ') + Astr))
return (base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg) |
def add_distributed_training_args(parser):
group = parser.add_argument_group('Distributed training')
group.add_argument('--distributed-world-size', type=int, metavar='N', default=max(1, torch.cuda.device_count()), help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to establish initial connetion')
group.add_argument('--distributed-port', default=(- 1), type=int, help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int, help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true', help='do not spawn multiple processes even if multiple GPUs are visible')
group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB', help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true', help="don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data")
group.add_argument('--find-unused-parameters', default=False, action='store_true', help='disable unused parameter detection (not applicable to no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true', help='Enable fast sync of stats between nodes, this hardcodes to sync only some default stats from logging_output.')
return group |
class Triangle():
def __init__(self, a, b, c, color=0):
self._a = a
self._b = b
self._c = c
self._color = color
def str(self):
return ('%s %s %s %s' % (self._a, self._b, self._c, self._color))
def set_color(self, color):
self._color = color
def get_vertices(self):
return (self._a, self._b, self._c) |
def get_keyframe_data(boxes_and_labels):
def sec_to_frame(sec):
return ((sec - 900) * FPS)
keyframe_indices = []
keyframe_boxes_and_labels = []
count = 0
for video_idx in range(len(boxes_and_labels)):
sec_idx = 0
keyframe_boxes_and_labels.append([])
for sec in boxes_and_labels[video_idx].keys():
if (sec not in AVA_VALID_FRAMES):
continue
if (len(boxes_and_labels[video_idx][sec]) > 0):
keyframe_indices.append((video_idx, sec_idx, sec, sec_to_frame(sec)))
keyframe_boxes_and_labels[video_idx].append(boxes_and_labels[video_idx][sec])
sec_idx += 1
count += 1
logger.info(('%d keyframes used.' % count))
return (keyframe_indices, keyframe_boxes_and_labels) |
class UniversalCyclotomicFieldElement(FieldElement):
def __init__(self, parent, obj):
self._obj = obj
FieldElement.__init__(self, parent)
def __bool__(self):
return bool(self._obj)
def __reduce__(self):
return (self.parent(), (str(self),))
def __eq__(self, other):
if (parent(self) is not parent(other)):
from sage.structure.element import coercion_model as cm
try:
(self, other) = cm.canonical_coercion(self, other)
except TypeError:
return False
return (self == other)
return (self._obj == other._obj)
def __ne__(self, other):
return (not (self == other))
def real(self):
P = self.parent()
return P.element_class(P, self._obj.RealPart())
real_part = real
def imag(self):
P = self.parent()
return P.element_class(P, self._obj.ImaginaryPart())
imag_part = imag
def is_real(self):
return (self._obj.RealPart() == self._obj)
def is_integral(self):
return self._obj.IsIntegralCyclotomic().sage()
def conductor(self):
return ZZ(self._obj.Conductor())
def _symbolic_(self, R):
from sage.symbolic.constants import pi, I
k = ZZ(self._obj.Conductor())
coeffs = self._obj.CoeffsCyc(k).sage()
s = R.zero()
for a in range(k):
if coeffs[a]:
s += (coeffs[a] * ((((2 * a) * I) * pi) / k).exp())
return s
def to_cyclotomic_field(self, R=None):
from sage.rings.number_field.number_field import CyclotomicField
k = ZZ(self._obj.Conductor())
Rcan = CyclotomicField(k)
if (R is None):
R = Rcan
obj = self._obj
if obj.IsRat():
return R(obj.sage())
zeta = Rcan.gen()
coeffs = obj.CoeffsCyc(k).sage()
return R(sum(((coeffs[a] * (zeta ** a)) for a in range(k))))
def __hash__(self):
k = ZZ(self._obj.Conductor())
coeffs = self._obj.CoeffsCyc(k).sage()
if (k == 1):
return hash(coeffs[0])
else:
return hash(((k,) + tuple(coeffs)))
def _algebraic_(self, R):
return R(QQbar(self))
def __float__(self):
from sage.rings.real_mpfr import RR
return float(RR(self))
def __complex__(self):
f = self.parent().coerce_embedding()
return complex(f(self))
def _eval_complex_(self, R):
if self._obj.IsRat():
return R(self._obj.sage())
k = ZZ(self._obj.Conductor())
coeffs = self._obj.CoeffsCyc(k).sage()
zeta = R.zeta(k)
s = sum(((coeffs[i] * (zeta ** i)) for i in range(k)))
if self.is_real():
return R(s.real())
return s
_complex_mpfi_ = _eval_complex_
_complex_mpfr_field_ = _eval_complex_
def _eval_real_(self, R):
if (not self.is_real()):
raise TypeError('self is not real')
if self._obj.IsRat():
return R(self._obj.sage())
k = ZZ(self._obj.Conductor())
coeffs = self._obj.CoeffsCyc(k).sage()
t = ((2 * R.pi()) / k)
return sum(((coeffs[i] * (i * t).cos()) for i in range(k)))
_mpfr_ = _eval_real_
def _richcmp_(self, other, op):
if (self._obj == other._obj):
return rich_to_bool(op, 0)
s = self.real_part()
o = other.real_part()
if (s == o):
s = self.imag_part()
o = other.imag_part()
from sage.rings.real_mpfi import RealIntervalField
prec = 53
R = RealIntervalField(prec)
sa = s._eval_real_(R)
oa = o._eval_real_(R)
while sa.overlaps(oa):
prec <<= 2
R = RealIntervalField(prec)
sa = s._eval_real_(R)
oa = o._eval_real_(R)
return sa._richcmp_(oa, op)
def denominator(self):
return ZZ(self._obj.DenominatorCyc())
def multiplicative_order(self):
return self._obj.Order().sage()
def additive_order(self):
return (Infinity if self else ZZ.zero())
def is_rational(self):
return self._obj.IsRat().sage()
def _rational_(self):
if (not self._obj.IsRat()):
raise TypeError('Unable to coerce to a rational')
return Rational(self._obj.sage())
def _repr_(self):
s = str(self._obj)
first_char = s[0]
s = s[1:].replace('+', ' + ').replace('-', ' - ')
return (first_char + s)
def _add_(self, other):
P = self.parent()
return P.element_class(P, (self._obj + other._obj))
def _sub_(self, other):
P = self.parent()
return P.element_class(P, (self._obj - other._obj))
def __neg__(self):
P = self.parent()
return P.element_class(P, (- self._obj))
def _mul_(self, other):
P = self.parent()
return P.element_class(P, (self._obj * other._obj))
def _div_(self, other):
P = self.parent()
try:
return P.element_class(P, (self._obj / other._obj))
except ValueError:
raise ZeroDivisionError('division by zero')
def __invert__(self):
P = self.parent()
return P.element_class(P, (~ self._obj))
inverse = __invert__
def _pow_(self, other):
if other._obj.IsRat():
other = other._obj.sage()
num = other.numerator()
den = other.denominator()
if den.is_one():
return (self ** num)
if ((den == 2) and self._obj.IsRat()):
return (self.sqrt() ** num)
else:
raise NotImplementedError('no powering implemented beyond square root of rationals')
raise NotImplementedError('no powering implemented for non-rational exponents')
def is_square(self):
if self._obj.IsRat():
return True
k = self._obj.Conductor()
coeffs = self._obj.CoeffsCyc(k).sage()
if (sum((bool(x) for x in coeffs)) == 1):
return True
raise NotImplementedError('is_square() not fully implemented for elements of Universal Cyclotomic Field')
def sqrt(self, extend=True, all=False):
if all:
s = self.sqrt(all=False)
return [s, (- s)]
UCF = self.parent()
if self._obj.IsRat():
D = self._obj.sage()
if self._obj.IsInt():
return UCF_sqrt_int(D, UCF)
else:
return (UCF_sqrt_int(D.numerator(), UCF) / UCF_sqrt_int(D.denominator(), UCF))
k = self._obj.Conductor()
coeffs = self._obj.CoeffsCyc(k).sage()
if (sum((bool(x) for x in coeffs)) == 1):
for (i, x) in enumerate(coeffs):
if x:
break
return (UCF(x).sqrt() * UCF.zeta((2 * k), i))
if extend:
return QQbar(self).sqrt()
else:
raise NotImplementedError('sqrt() not fully implemented for elements of Universal Cyclotomic Field')
def conjugate(self):
P = self.parent()
return P.element_class(P, self._obj.ComplexConjugate())
def galois_conjugates(self, n=None):
P = self.parent()
obj = self._obj
k = obj.Conductor().sage()
n = (k if (n is None) else ZZ(n))
if (not k.divides(n)):
raise ValueError('n = {} must be a multiple of the conductor ({})'.format(n, k))
return [P.element_class(P, obj.GaloisCyc(i)) for i in n.coprime_integers(n)]
def __abs__(self):
square = (self * self.conjugate())
return AA(square).sqrt()
abs = __abs__
def norm_of_galois_extension(self):
obj = self._obj
k = obj.Conductor().sage()
return libgap.Product(libgap([obj.GaloisCyc(i) for i in range(k) if (k.gcd(i) == 1)])).sage()
def minpoly(self, var='x'):
gap_p = libgap.MinimalPolynomial(libgap.eval('Rationals'), self._obj)
return QQ[var](QQ['x_1'](str(gap_p))) |
def list_all_keys(client, bucket, prefix, max_keys=None):
objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix)
if (objects.get('Contents') == None):
return []
keys = list(map((lambda x: x['Key']), objects.get('Contents', [])))
truncated = objects['IsTruncated']
next_marker = objects.get('NextMarker')
while truncated:
objects = client.list_objects(Bucket=bucket, Prefix=prefix, Delimiter=prefix, Marker=next_marker)
truncated = objects['IsTruncated']
next_marker = objects.get('NextMarker')
keys += list(map((lambda x: x['Key']), objects['Contents']))
if ((max_keys is not None) and (len(keys) >= max_keys)):
break
return list(filter((lambda x: (len(x) > 0)), keys)) |
_LAYERS.register_module()
class ConvAudio(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, op='concat', stride=1, padding=0, dilation=1, groups=1, bias=False):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert (op in ['concat', 'sum'])
self.op = op
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.output_padding = (0, 0)
self.transposed = False
self.conv_1 = ConvModule(in_channels, out_channels, kernel_size=(kernel_size[0], 1), stride=stride, padding=((kernel_size[0] // 2), 0), bias=bias, conv_cfg=dict(type='Conv'), norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'))
self.conv_2 = ConvModule(in_channels, out_channels, kernel_size=(1, kernel_size[1]), stride=stride, padding=(0, (kernel_size[1] // 2)), bias=bias, conv_cfg=dict(type='Conv'), norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'))
self.init_weights()
def forward(self, x):
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if (self.op == 'concat'):
out = torch.cat([x_1, x_2], 1)
else:
out = (x_1 + x_2)
return out
def init_weights(self):
kaiming_init(self.conv_1.conv)
kaiming_init(self.conv_2.conv)
constant_init(self.conv_1.bn, 1, bias=0)
constant_init(self.conv_2.bn, 1, bias=0) |
def nodes_builder(model: GraphModule, module_dict: Dict, to_numpy: Callable) -> Tuple[(List, List, List, Dict)]:
inputs = []
outputs = []
nodes = []
output_nodes = []
fx_node_2_graph_node = {}
for node in model.graph.nodes:
framework_attr = dict(node.kwargs)
node_has_activation = True
if (node.target in module_dict.keys()):
node_module = module_dict[node.target]
node_type = type(node_module)
framework_attr = node_module.__dict__
fullargspec = inspect.getfullargspec(node_type.__init__).args
framework_attr = {k: v for (k, v) in framework_attr.items() if (k in fullargspec)}
if (hasattr(node_module, BIAS) and (BIAS in fullargspec)):
framework_attr[BIAS] = (False if (node_module.bias is None) else True)
elif (node.op == CALL_FUNCTION):
node_type = node.target
if (node_type == getattr):
node_has_activation = False
Logger.warning('Pytorch model has a parameter or constant Tensor value. This can cause unexpected behaviour when converting the model.')
elif (node.op == PLACEHOLDER):
node_type = DummyPlaceHolder
elif (node.op == OUTPUT):
output_nodes += node.all_input_nodes
continue
elif (node.op == CALL_METHOD):
if hasattr(torch, node.target):
node_type = getattr(torch, node.target)
elif hasattr(torch.Tensor, node.target):
node_type = getattr(torch.Tensor, node.target)
else:
raise Exception(f"Call method of type '{node.target}' is currently not supported.")
elif (node.op == GET_ATTR):
if (node.meta[TYPE] == torch.Tensor):
node_type = BufferHolder
else:
node_type = ConstantHolder
node_has_activation = False
Logger.warning('Pytorch model has a parameter or constant Tensor value. This can cause unexpected behaviour when converting the model.')
else:
raise Exception(f'Unknown node type: {node.name}')
weights = {}
if (node.target in module_dict.keys()):
named_parameters_weights = {name: to_numpy(parameter) for (name, parameter) in module_dict[node.target].named_parameters()}
named_buffer_weights = {name: to_numpy(parameter) for (name, parameter) in module_dict[node.target].named_buffers() if (len(parameter.shape) > 0)}
weights.update(named_parameters_weights)
weights.update(named_buffer_weights)
if (node.op == GET_ATTR):
if (node_type == ConstantHolder):
weights = extract_holder_weights(CONSTANT, node.target, model, weights, to_numpy)
framework_attr.update(const_size=weights.get(CONSTANT).shape)
elif (node_type == BufferHolder):
weights = extract_holder_weights(BUFFER, node.target, model, weights, to_numpy)
framework_attr.update(name=node.name)
input_shape = []
if (node.op != PLACEHOLDER):
for input_node in node.all_input_nodes:
tensor_meta = input_node.meta
if (tensor_meta[TYPE] == torch.Tensor):
input_shape += [list(tensor_meta[TENSOR_META].shape)]
elif (tensor_meta[TYPE] == tuple):
input_shape += [list(n.shape) for n in tensor_meta[TENSOR_META]]
elif (tensor_meta[TYPE] == int):
input_shape += [[1]]
if (node.meta[TYPE] == torch.Tensor):
output_shape = [list(node.meta[TENSOR_META].shape)]
elif (node.meta[TYPE] in (list, tuple)):
output_shape = [list(m.shape) for m in node.meta[TENSOR_META]]
elif (node.meta[TYPE] == int):
output_shape = [[1]]
else:
output_shape = []
framework_attr_filtered = {}
for (k, v) in framework_attr.items():
if (not isinstance(v, torch.fx.node.Node)):
framework_attr_filtered[k] = v
framework_attr = framework_attr_filtered
node_kwargs = {}
for (k, v) in node.kwargs.items():
if (not isinstance(v, torch.fx.node.Node)):
node_kwargs[k] = v
if (node.op in [CALL_METHOD, CALL_FUNCTION]):
graph_node_type = FunctionalNode
inputs_as_list1 = ((len(node.args) > 0) and isinstance(node.args[0], (list, tuple)) and all([isinstance(n, torch.fx.node.Node) for n in node.args[0]]))
inputs_as_list = (inputs_as_list1 or ((len(node.args) > 0) and (node.args[0].op == PLACEHOLDER) and (node.args[0].meta[TYPE] in (list, tuple))))
if inputs_as_list:
num_inputs = 1
else:
input_counter = 0
for in_node in node.all_input_nodes:
for arg in node.args:
if (arg == in_node):
input_counter += 1
num_inputs = max(len(node.all_input_nodes), input_counter)
op_call_args = list(node.args[num_inputs:])
for arg in op_call_args:
if isinstance(arg, torch.fx.node.Node):
op_call_args.remove(arg)
kwargs = {FUNCTIONAL_OP: node_type, OP_CALL_ARGS: op_call_args, OP_CALL_KWARGS: node_kwargs, INPUTS_AS_LIST: inputs_as_list}
else:
graph_node_type = BaseNode
kwargs = {}
graph_node = graph_node_type(name=node.name, framework_attr=framework_attr, input_shape=input_shape, output_shape=output_shape, weights=weights, layer_class=node_type, has_activation=node_has_activation, **kwargs)
if (node.op == PLACEHOLDER):
for ii in range(len(output_shape)):
inputs.append(graph_node)
fx_node_2_graph_node[node] = graph_node
nodes.append(graph_node)
for node in output_nodes:
outputs.append(OutTensor(fx_node_2_graph_node[node], output_nodes.index(node)))
return (nodes, inputs, outputs, fx_node_2_graph_node) |
class Distributed(object):
def __init__(self, num_workers=1, backend='multiprocessing', verbose=False):
self.client = Parallel(n_jobs=num_workers, backend='multiprocessing', prefer='processes')
self.num_workers = num_workers
self.verbose = verbose
if self.verbose:
print(self.client) |
def TrivialBundle(X, rank=1):
if (not is_ToricVariety(X)):
raise ValueError('not a toric variety')
base_ring = X.base_ring()
filtrations = {ray: FilteredVectorSpace(rank, 0, base_ring=base_ring) for ray in X.fan().rays()}
from . import klyachko
return klyachko.Bundle(X, filtrations, check=True) |
class MethodAveragePrecision(Enum):
EVERY_POINT_INTERPOLATION = 1
ELEVEN_POINT_INTERPOLATION = 2 |
def parse_args():
parser = argparse.ArgumentParser(description='Script that converts part of a wikipedia dump to silver standard trees')
parser.add_argument('--output_file', default='vi_wiki_tokenized.txt', help='Where to write the tokenized lines')
parser.add_argument('--lang', default='vi', help='Which language tools to use for tokenization and POS')
parser.add_argument('--input_dir', default='extern_data/vietnamese/wikipedia/text/AA', help='Path to the wikipedia dump after processing by wikiextractor')
parser.add_argument('--bert_tokenizer', default=None, help='Which bert tokenizer (if any) to use to filter long sentences')
add_length_args(parser)
args = parser.parse_args()
return args |
def use_original_bracket(text: str):
return text.replace('-lrb-', '(').replace('-rrb-', ')').replace('-LRB-', '(').replace('-RRB-', ')').replace('-lsb-', '[').replace('-rsb-', ']').replace('-LSB-', '[').replace('-RSB-', ']') |
class AutoModelForVision2Seq(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _int64_list_feature(values):
if (not isinstance(values, collections.Iterable)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) |
def check_slot_inform(value_label, inform_label, label_maps):
value = inform_label
if (value_label == inform_label):
value = value_label
elif is_in_list(inform_label, value_label):
value = value_label
elif is_in_list(value_label, inform_label):
value = value_label
elif (inform_label in label_maps):
for inform_label_variant in label_maps[inform_label]:
if (value_label == inform_label_variant):
value = value_label
break
elif is_in_list(inform_label_variant, value_label):
value = value_label
break
elif is_in_list(value_label, inform_label_variant):
value = value_label
break
elif (value_label in label_maps):
for value_label_variant in label_maps[value_label]:
if (value_label_variant == inform_label):
value = value_label
break
elif is_in_list(inform_label, value_label_variant):
value = value_label
break
elif is_in_list(value_label_variant, inform_label):
value = value_label
break
return value |
class mumps_struc_c_4(ctypes.Structure):
_fields_ = [('sym', mumps_int), ('par', mumps_int), ('job', mumps_int), ('comm_fortran', mumps_int), ('icntl', (mumps_int * 40)), ('cntl', (mumps_real * 15)), ('n', mumps_int), ('nz_alloc', mumps_int), ('nz', mumps_int), ('irn', mumps_pint), ('jcn', mumps_pint), ('a', mumps_pcomplex), ('nz_loc', mumps_int), ('irn_loc', mumps_pint), ('jcn_loc', mumps_pint), ('a_loc', mumps_pcomplex), ('nelt', mumps_int), ('eltptr', mumps_pint), ('eltvar', mumps_pint), ('a_elt', mumps_pcomplex), ('perm_in', mumps_pint), ('sym_perm', mumps_pint), ('uns_perm', mumps_pint), ('colsca', mumps_preal), ('rowsca', mumps_preal), ('rhs', mumps_pcomplex), ('redrhs', mumps_pcomplex), ('rhs_sparse', mumps_pcomplex), ('sol_loc', mumps_pcomplex), ('irhs_sparse', mumps_pint), ('irhs_ptr', mumps_pint), ('isol_loc', mumps_pint), ('nrhs', mumps_int), ('lrhs', mumps_int), ('lredrhs', mumps_int), ('nz_rhs', mumps_int), ('lsol_loc', mumps_int), ('schur_mloc', mumps_int), ('schur_nloc', mumps_int), ('schur_lld', mumps_int), ('mblock', mumps_int), ('nblock', mumps_int), ('nprow', mumps_int), ('npcol', mumps_int), ('info', (mumps_int * 40)), ('infog', (mumps_int * 40)), ('rinfo', (mumps_real * 40)), ('rinfog', (mumps_real * 40)), ('deficiency', mumps_int), ('pivnul_list', mumps_pint), ('mapping', mumps_pint), ('size_schur', mumps_int), ('listvar_schur', mumps_pint), ('schur', mumps_pcomplex), ('instance_number', mumps_int), ('wk_user', mumps_pcomplex), ('version_number', (ctypes.c_char * ((14 + 1) + 1))), ('ooc_tmpdir', (ctypes.c_char * 256)), ('ooc_prefix', (ctypes.c_char * 64)), ('write_problem', (ctypes.c_char * 256)), ('lwk_user', mumps_int)] |
class ConcatCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConcatCell, self).__init__((in_channels * 2), out_channels, **kwargs)
def _binary_op(self, x1, x2):
ret = torch.cat([x1, x2], dim=1)
return ret |
def bench3():
desc = "Some basic arithmetic with very large Rational numbers: '(2/3)^100001 * (17/19)^100001"
t = cputime()
a = ((QQ((2, 3)) ** 100001) * (QQ((17, 19)) ** 100001))
return (desc, cputime(t)) |
def test_ListOffsetArray_NumpyArray():
a = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6])), ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7])))
assert (a.to_typetracer().form == a.form)
assert (a.to_typetracer().form.type == a.form.type)
assert (len(a) == 3)
with pytest.raises(IndexError):
a[3]
with pytest.raises(IndexError):
a[(- 4)]
assert isinstance(a[2], ak.contents.numpyarray.NumpyArray)
assert (a.to_typetracer()[2].form == a[2].form)
assert (len(a[0]) == 3)
assert (len(a[1]) == 0)
assert (len(a[2]) == 2)
assert (len(a[(- 3)]) == 3)
assert (len(a[(- 2)]) == 0)
assert (len(a[(- 1)]) == 2)
assert (a[0][(- 1)] == 3.3)
assert (a[2][(- 1)] == 5.5)
assert isinstance(a[1:], ak.contents.listoffsetarray.ListOffsetArray)
assert (a.to_typetracer()[1:].form == a[1:].form)
assert (len(a[1:]) == 2)
assert (len(a[(- 2):]) == 2)
assert (len(a[1:100]) == 2)
assert (len(a[(- 2):100]) == 2)
with pytest.raises(IndexError):
a['bad']
with pytest.raises(IndexError):
a[['bad', 'good', 'ok']] |
def Res101_Deeplab(num_classes=21):
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes)
return model |
def format_checker_each_file(category, input_data_path):
print('[I] Checking', category.upper(), 'category')
try:
input_data = read_json_line(input_data_path)
except:
input_data = None
print('[ERROR] check your file format, should be .jsonl')
assert (len(input_data) == 500), 'check the number of predictions, should be 500'
for each_line in input_data:
curr_keys = each_line.keys()
assert ('id' in curr_keys), 'input missing id field'
assert ('predicted_annotation' in curr_keys), 'input missing predicted annotations'
for each_pred in each_line['predicted_annotation'].items():
assert isinstance(each_pred[1], list), (each_pred[0] + ' contains prediction with no list format')
if (category == 'positive'):
assert (len(each_line['predicted_annotation']) == 9), 'check number of slots'
if (category == 'negative'):
assert (len(each_line['predicted_annotation']) == 7), 'check number of slots'
if (category == 'can_not_test'):
assert (len(each_line['predicted_annotation']) == 5), 'check number of slots'
if (category == 'death'):
assert (len(each_line['predicted_annotation']) == 5), 'check number of slots'
if (category == 'cure'):
assert (len(each_line['predicted_annotation']) == 3), 'check number of slots'
print('[I] You have passed the format checker for', category.upper(), 'category')
return None |
class Generator(nn.Module):
def __init__(self, z_dim, shared_dim, img_size, g_conv_dim, g_spectral_norm, attention, attention_after_nth_gen_block, activation_fn, conditional_strategy, num_classes, initialize, G_depth, mixed_precision):
super(Generator, self).__init__()
self.in_dims = [512, 256, 128]
self.out_dims = [256, 128, 64]
self.z_dim = z_dim
self.num_classes = num_classes
self.mixed_precision = mixed_precision
conditional_bn = (True if (conditional_strategy in ['ACGAN', 'ProjGAN', 'ContraGAN', 'Proxy_NCA_GAN', 'NT_Xent_GAN', 'ECGAN']) else False)
if g_spectral_norm:
self.linear0 = snlinear(in_features=self.z_dim, out_features=((self.in_dims[0] * 4) * 4))
else:
self.linear0 = linear(in_features=self.z_dim, out_features=((self.in_dims[0] * 4) * 4))
self.blocks = []
for index in range(len(self.in_dims)):
self.blocks += [[GenBlock(in_channels=self.in_dims[index], out_channels=self.out_dims[index], g_spectral_norm=g_spectral_norm, activation_fn=activation_fn, conditional_bn=conditional_bn, num_classes=self.num_classes)]]
if (((index + 1) == attention_after_nth_gen_block) and (attention is True)):
self.blocks += [[Self_Attn(self.out_dims[index], g_spectral_norm)]]
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
if g_spectral_norm:
self.conv4 = snconv2d(in_channels=self.out_dims[(- 1)], out_channels=3, kernel_size=3, stride=1, padding=1)
else:
self.conv4 = conv2d(in_channels=self.out_dims[(- 1)], out_channels=3, kernel_size=3, stride=1, padding=1)
self.tanh = nn.Tanh()
if (initialize is not False):
init_weights(self.modules, initialize)
def forward(self, z, label, evaluation=False):
with (torch.cuda.amp.autocast() if ((self.mixed_precision is True) and (evaluation is False)) else dummy_context_mgr()) as mp:
act = self.linear0(z)
act = act.view((- 1), self.in_dims[0], 4, 4)
for (index, blocklist) in enumerate(self.blocks):
for block in blocklist:
if isinstance(block, Self_Attn):
act = block(act)
else:
act = block(act, label)
act = self.conv4(act)
out = self.tanh(act)
return out |
class SingularFunctionFactory():
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(("Singular Function Factory has no attribute '%s'" % name))
try:
return singular_function(name)
except NameError:
if name.endswith('__lib'):
name = name[:(- 5)]
lib((name + '.lib'))
return SingularFunctionFactory()
else:
raise NameError(("function or package '%s' unknown." % name))
def __dir__(self):
return list_of_functions() |
def generate_test(filename):
[sp_min, sp_max, ap_min, ap_max] = np.load('data/timbre_model/min_max_record.npy')
condi = get_condition(filename)
(sp, raw_sp) = generate_timbre(0, sp_max, sp_min, condi, None)
plt.imshow(np.log(np.transpose(sp)), aspect='auto', origin='bottom', interpolation='none')
plt.show()
sp1 = load_timbre((('data/timbre_model/test/sp/' + filename) + '_sp.npy'), 0, sp_max, sp_min)
plt.imshow(np.log(np.transpose(sp1)), aspect='auto', origin='bottom', interpolation='none')
plt.show()
(ap, raw_ap) = generate_timbre(1, ap_max, ap_min, condi, raw_sp)
plt.imshow(np.log(np.transpose(ap)), aspect='auto', origin='bottom', interpolation='none')
plt.show()
ap1 = load_timbre((('data/timbre_model/test/ap/' + filename) + '_ap.npy'), 1, ap_max, ap_min)
plt.imshow(np.log(np.transpose(ap1)), aspect='auto', origin='bottom', interpolation='none')
plt.show()
path = (('data/raw/' + filename) + '.raw')
(_f0, _sp, code_sp, _ap, code_ap) = process_wav(path)
synthesized = pw.synthesize(_f0, sp, ap, 32000, pw.default_frame_period)
sf.write((('./data/gen_wav/' + filename) + '.wav'), synthesized, 32000) |
def preprocess_function(examples, tokenizer, lowercase, **kwargs):
if lowercase:
examples['input'] = [example.lower() for example in examples['input']]
model_inputs = tokenizer(text=examples['input'], max_length=MAX_LENGTH, padding='max_length', truncation=True, return_tensors='pt')
labels = tokenizer(text=examples['output'], max_length=256, padding='max_length', truncation=True, return_tensors='pt')
model_inputs['labels'] = labels['input_ids']
return model_inputs |
def test_yolact_head_loss():
s = 550
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3)}]
train_cfg = mmcv.Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=(- 1), gt_max_assign_all=False), smoothl1_beta=1.0, allowed_border=(- 1), pos_weight=(- 1), neg_pos_ratio=3, debug=False, min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(num_classes=80, in_channels=256, feat_channels=256, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, base_sizes=[8, 16, 32, 64, 128], ratios=[0.5, 1.0, 2.0], strides=[(550.0 / x) for x in [69, 35, 18, 9, 5]], centers=[(((550 * 0.5) / x), ((550 * 0.5) / x)) for x in [69, 35, 18, 9, 5]]), bbox_coder=dict(type='DeltaXYWHBBoxCoder', target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True, train_cfg=train_cfg)
segm_head = YOLACTSegmHead(in_channels=256, num_classes=80, loss_segm=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(num_classes=80, in_channels=256, num_protos=32, max_masks_to_train=100, loss_mask_weight=6.125)
feat = [torch.rand(1, 256, feat_size, feat_size) for feat_size in [69, 35, 18, 9, 5]]
(cls_score, bbox_pred, coeff_pred) = bbox_head.forward(feat)
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
(empty_gt_losses, sampling_results) = bbox_head.loss(cls_score, bbox_pred, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=gt_bboxes_ignore)
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert (empty_cls_loss.item() > 0), 'cls loss should be non-zero'
assert (empty_box_loss.item() == 0), 'there should be no box loss when there are no true boxes'
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas, sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results)
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert (empty_segm_loss.item() == 0), 'there should be no segm loss when there are no true boxes'
assert (empty_mask_loss == 0), 'there should be no mask loss when there are no true boxes'
gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
(one_gt_losses, sampling_results) = bbox_head.loss(cls_score, bbox_pred, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert (one_gt_cls_loss.item() > 0), 'cls loss should be non-zero'
assert (one_gt_box_loss.item() > 0), 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas, sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert (one_gt_segm_loss.item() > 0), 'segm loss should be non-zero'
assert (one_gt_mask_loss.item() > 0), 'mask loss should be non-zero' |
def clean_ro_cui(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
def data_generator(train_arguments, test_arguments):
train_generator = datagenerator(**train_arguments)
test_generator = datagenerator(**test_arguments)
return (train_generator, test_generator) |
def determineMaxWindowSize(dtype, limit=None):
vmem = psutil.virtual_memory()
maxSize = math.floor(math.sqrt((vmem.available / np.dtype(dtype).itemsize)))
if ((limit is None) or (limit >= maxSize)):
return maxSize
else:
return limit |
def test_columnar_convert_column_default_selected_columns():
converter = ColumnarConverter(name='x', default_type='foo', type_column=None, column_defaults={'before': 123}, selected_columns={'before': 'after'}, transform_columns={})
(ids, columns, type_info) = converter.convert({'x': _EMPTY_DF, 'y': _EMPTY_DF})
_check_type_info(type_info, [('x', _empty_array(2)), ('y', _empty_array(2))])
assert ('before' not in columns)
np.testing.assert_array_equal(columns['after'], 123) |
def combine_dicts(d1, d2):
comb = d1
for k in d2:
if (k not in comb):
comb[k] = d2[k]
else:
for val in d2[k]:
if (val not in comb[k]):
comb[k].append(val)
return comb |
def convert_to_cancer_stage(row):
stage_list = []
for (idx, number) in enumerate(row):
diameter_cm = ((number / (math.pi / 6)) ** (1.0 / 3.0))
if (diameter_cm < 3):
stage = 1
elif ((diameter_cm >= 3) and (diameter_cm < 4)):
stage = 2
elif ((diameter_cm >= 4) and (diameter_cm < 5)):
stage = 3
elif (diameter_cm >= 5):
stage = 4
stage_list.append(stage)
return stage_list |
def getTreeBuilder(treeType, implementation=None, **kwargs):
treeType = treeType.lower()
if (treeType not in treeBuilderCache):
if (treeType == 'dom'):
from . import dom
if (implementation is None):
from xml.dom import minidom
implementation = minidom
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif (treeType == 'lxml'):
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif (treeType == 'etree'):
from . import etree
if (implementation is None):
implementation = default_etree
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError(('Unrecognised treebuilder "%s" ' % treeType))
return treeBuilderCache.get(treeType) |
def separate_branch(config_path: str) -> Tuple[(str, str)]:
segments = config_path.split('')
if (len(segments) == 1):
return (segments[0], 'master')
elif (len(segments) == 2):
return (segments[0], segments[1])
else:
raise ValueError(f'Multiple branches in the config path {config_path}') |
def WeakTableaux(k, shape, weight, representation='core'):
if (representation == 'core'):
return WeakTableaux_core(k, shape, weight)
elif (representation == 'bounded'):
return WeakTableaux_bounded(k, shape, weight)
elif (representation == 'factorized_permutation'):
return WeakTableaux_factorized_permutation(k, shape, weight)
else:
raise NotImplementedError("The representation option needs to be 'core', 'bounded', or 'factorized_permutation'") |
def main(argv):
parser = OptionParser(usage='Usage: %prog [options] modulename\nUtility script to create a basic template for a new ns-3 module')
(options, args) = parser.parse_args()
if (len(args) != 1):
parser.print_help()
return 1
modname = args[0].lower()
if (False in [word.isalnum() for word in modname.split('-')]):
print('Module name should only contain alphanumeric characters and dashes', file=sys.stderr)
return 2
assert (os.path.sep not in modname)
moduledir = os.path.join(os.path.dirname(__file__), modname)
if os.path.exists(moduledir):
print(('Module %r already exists' % (modname,)), file=sys.stderr)
return 2
print(("Creating module %r, run './waf configure' to include it in the build" % (modname,)))
os.mkdir(moduledir)
wscript = open(os.path.join(moduledir, 'wscript'), 'wt')
wscript.write((WSCRIPT_TEMPLATE % dict(MODULE=modname)))
wscript.close()
modeldir = os.path.join(moduledir, 'model')
os.mkdir(modeldir)
model_cc = open(os.path.join(moduledir, 'model', ('%s.cc' % modname)), 'wt')
model_cc.write((MODEL_CC_TEMPLATE % dict(MODULE=modname)))
model_cc.close()
model_h = open(os.path.join(moduledir, 'model', ('%s.h' % modname)), 'wt')
model_h.write((MODEL_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD=('%s_H' % modname.replace('-', '_').upper()))))
model_h.close()
testdir = os.path.join(moduledir, 'test')
os.mkdir(testdir)
test_cc = open(os.path.join(moduledir, 'test', ('%s-test-suite.cc' % modname)), 'wt')
test_cc.write((TEST_CC_TEMPLATE % dict(MODULE=modname, CAPITALIZED=''.join([word.capitalize() for word in modname.split('-')]), COMPOUND=''.join(([modname.split('-')[0]] + [word.capitalize() for word in modname.split('-')[1:]])))))
test_cc.close()
helperdir = os.path.join(moduledir, 'helper')
os.mkdir(helperdir)
helper_cc = open(os.path.join(moduledir, 'helper', ('%s-helper.cc' % modname)), 'wt')
helper_cc.write((HELPER_CC_TEMPLATE % dict(MODULE=modname)))
helper_cc.close()
helper_h = open(os.path.join(moduledir, 'helper', ('%s-helper.h' % modname)), 'wt')
helper_h.write((HELPER_H_TEMPLATE % dict(MODULE=modname, INCLUDE_GUARD=('%s_HELPER_H' % modname.replace('-', '_').upper()))))
helper_h.close()
examplesdir = os.path.join(moduledir, 'examples')
os.mkdir(examplesdir)
examples_wscript = open(os.path.join(examplesdir, 'wscript'), 'wt')
examples_wscript.write((EXAMPLES_WSCRIPT_TEMPLATE % dict(MODULE=modname)))
examples_wscript.close()
example_cc = open(os.path.join(moduledir, 'examples', ('%s-example.cc' % modname)), 'wt')
example_cc.write((EXAMPLE_CC_TEMPLATE % dict(MODULE=modname)))
example_cc.close()
docdir = os.path.join(moduledir, 'doc')
os.mkdir(docdir)
doc_rst = open(os.path.join(moduledir, 'doc', ('%s.rst' % modname)), 'wt')
doc_rst.write((DOC_RST_TEMPLATE % dict(MODULE=modname)))
doc_rst.close()
return 0 |
def _test_pow_int_base_int_exp(dt_base, dt_exp):
z = ti.field(dt_base, shape=())
def func(x: dt_base, y: dt_exp):
z[None] = (x ** y)
for x in range((- 5), 5):
for y in range(0, 10):
func(x, y)
assert (z[None] == (x ** y)) |
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray) |
class MultiPolynomialFunctor(ConstructionFunctor):
rank = 9
def __init__(self, vars, term_order):
Functor.__init__(self, Rings(), Rings())
self.vars = vars
self.term_order = term_order
def _apply_functor(self, R):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(R, self.vars)
def __eq__(self, other):
if isinstance(other, MultiPolynomialFunctor):
return ((self.vars == other.vars) and (self.term_order == other.term_order))
elif isinstance(other, PolynomialFunctor):
return (self.vars == (other.var,))
else:
return False
def __ne__(self, other):
return (not (self == other))
__hash__ = ConstructionFunctor.__hash__
def __mul__(self, other):
if isinstance(other, IdentityConstructionFunctor):
return self
if isinstance(other, MultiPolynomialFunctor):
if (self.term_order != other.term_order):
raise CoercionException(('Incompatible term orders (%s,%s).' % (self.term_order, other.term_order)))
if set(self.vars).intersection(other.vars):
raise CoercionException(('Overlapping variables (%s,%s)' % (self.vars, other.vars)))
return MultiPolynomialFunctor((other.vars + self.vars), self.term_order)
elif (isinstance(other, CompositeConstructionFunctor) and isinstance(other.all[(- 1)], MultiPolynomialFunctor)):
return CompositeConstructionFunctor(other.all[:(- 1)], (self * other.all[(- 1)]))
else:
return CompositeConstructionFunctor(other, self)
def merge(self, other):
if (self == other):
return self
else:
return None
def expand(self):
if (len(self.vars) <= 1):
return [self]
else:
return [MultiPolynomialFunctor((x,), self.term_order) for x in reversed(self.vars)]
def _repr_(self):
return ('MPoly[%s]' % ','.join(self.vars)) |
class LaionDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.shuffle(1000, handler=wds.warn_and_continue), wds.decode('pilrgb', handler=wds.warn_and_continue), wds.to_tuple('jpg', 'json', handler=wds.warn_and_continue), wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), wds.map(self.to_dict, handler=wds.warn_and_continue))
def to_dict(self, sample):
return {'image': sample[0], 'answer': self.text_processor(sample[1]['caption'])} |
def get_parser():
parser = argparse.ArgumentParser(description='reads text from stdin and outputs normalized, lid-filtered version to stdout')
parser.add_argument('--fasttext-model', help='path to fasttext model', default='lid.187.bin')
parser.add_argument('--lang', help='language id', required=True)
parser.add_argument('--lid-threshold', type=float, help='threshold for this lang id probability', default=0.4)
return parser |
class FusedBatchNormalizationBackward(PythonFunction):
def __init__(self, ctx, axes=[], decay_rate=0.9, eps=1e-05, batch_stat=True, nonlinearity='relu'):
super(FusedBatchNormalizationBackward, self).__init__(ctx)
self._func = _F.FusedBatchNormalization(ctx, axes, decay_rate, eps, batch_stat, nonlinearity)
self.axes = axes
self.decay_rate = decay_rate
self.eps = eps
self.batch_stat = batch_stat
self.nonlinearity = nonlinearity
self._is_add = False
def name(self):
return self.__class__.__name__
def args(self):
return self._func.args
def is_add(self):
return self._is_add
_add.setter
def is_add(self, is_add):
self._is_add = is_add
def _create_fwd_inputs_outputs(self, inputs, outputs):
x0 = nn.Variable(inputs[1].shape).apply(need_grad=True)
b0 = nn.Variable(inputs[2].shape).apply(need_grad=True)
g0 = nn.Variable(inputs[3].shape).apply(need_grad=True)
rm = nn.Variable(inputs[4].shape).apply(need_grad=False)
rv = nn.Variable(inputs[5].shape).apply(need_grad=False)
z0 = (nn.Variable(inputs[7].shape).apply(need_grad=True) if self._is_add else None)
inputs_fwd = ([x0, b0, g0, rm, rv, z0] if z0 else [x0, b0, g0, rm, rv])
oshape = inputs[0].shape
outputs_fwd = [nn.Variable(oshape)]
return (inputs_fwd, outputs_fwd)
def min_inputs(self):
return 7
def min_outputs(self):
return (4 if self._is_add else 3)
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
return True
def setup_impl(self, inputs, outputs):
(inputs_fwd, outputs_fwd) = self._create_fwd_inputs_outputs(inputs, outputs)
self._func.setup(inputs_fwd, outputs_fwd)
dx_shape = inputs_fwd[0].shape
db_shape = inputs_fwd[1].shape
dg_shape = inputs_fwd[2].shape
dz_shape = (inputs_fwd[5].shape if self._is_add else None)
outputs[0].reset_shape(dx_shape, True)
outputs[1].reset_shape(db_shape, True)
outputs[2].reset_shape(dg_shape, True)
(outputs[3].reset_shape(dz_shape, True) if self._is_add else None)
def forward_impl(self, inputs, outputs):
(inputs_fwd, outputs_fwd) = self._create_fwd_inputs_outputs(inputs, outputs)
x0 = inputs[1].data
b0 = inputs[2].data
g0 = inputs[3].data
z0 = (inputs[7].data if self._is_add else None)
inputs_fwd[0].data = x0
inputs_fwd[1].data = b0
inputs_fwd[2].data = g0
if self._is_add:
inputs_fwd[5].data = z0
if (not self.batch_stat):
rm = inputs[4].data
rv = inputs[5].data
inputs_fwd[3].data = rm
inputs_fwd[4].data = rv
dx0 = outputs[0].data
db0 = outputs[1].data
dg0 = outputs[2].data
inputs_fwd[0].grad = dx0
inputs_fwd[1].grad = db0
inputs_fwd[2].grad = dg0
if self._is_add:
dz0 = outputs[3].data
inputs_fwd[5].grad = dz0
dy = inputs[0].data
outputs_fwd[0].grad = dy
(self._func.forward(inputs_fwd, outputs_fwd) if (self.batch_stat and (not ('cudnn' in self.ctx.backend))) else None)
self._func.backward(inputs_fwd, outputs_fwd, ([False] * len(inputs_fwd)))
def backward_impl(self, inputs, outputs, propagate_down, accum):
g_dx0 = nn.Variable(outputs[0].shape).apply(data=outputs[0].grad)
g_db0 = nn.Variable(outputs[1].shape).apply(data=outputs[1].grad)
g_dg0 = nn.Variable(outputs[2].shape).apply(data=outputs[2].grad)
g_dz0 = (nn.Variable(outputs[3].shape).apply(data=outputs[3].grad) if self._is_add else None)
dy = nn.Variable(inputs[0].shape).apply(data=inputs[0].data, need_grad=True)
x0 = nn.Variable(inputs[1].shape).apply(data=inputs[1].data, need_grad=True)
b0 = nn.Variable(inputs[2].shape).apply(data=inputs[2].data, need_grad=True)
g0 = nn.Variable(inputs[3].shape).apply(data=inputs[3].data, need_grad=True)
rm = nn.Variable(inputs[4].shape).apply(data=inputs[4].data)
rv = nn.Variable(inputs[5].shape).apply(data=inputs[5].data)
y0 = nn.Variable(inputs[6].shape).apply(data=inputs[6].data)
z0 = (nn.Variable(inputs[7].shape).apply(data=inputs[7].data, need_grad=True) if self._is_add else None)
with nn.auto_forward():
(g_dy_, g_x0_, g_b0_, g_g0_) = double_backward(g_dx0, g_db0, g_dg0, g_dz0, dy, x0, b0, g0, rm, rv, y0, z0, self.axes, self.decay_rate, self.eps, self.nonlinearity, self.batch_stat)
g_dy = inputs[0].grad
g_x0 = inputs[1].grad
g_g0 = inputs[3].grad
g_z0 = (inputs[7].grad if self._is_add else None)
if propagate_down[0]:
if accum[0]:
g_dy += g_dy_.data
else:
g_dy.copy_from(g_dy_.data)
if propagate_down[1]:
if accum[1]:
g_x0 += g_x0_.data
else:
g_x0.copy_from(g_x0_.data)
if propagate_down[3]:
if accum[3]:
g_g0 += g_g0_.data
else:
g_g0.copy_from(g_g0_.data) |
def check_compatibility(urllib3_version, chardet_version):
urllib3_version = urllib3_version.split('.')
assert (urllib3_version != ['dev'])
if (len(urllib3_version) == 2):
urllib3_version.append('0')
(major, minor, patch) = urllib3_version
(major, minor, patch) = (int(major), int(minor), int(patch))
assert (major == 1)
assert (minor >= 21)
assert (minor <= 23)
(major, minor, patch) = chardet_version.split('.')[:3]
(major, minor, patch) = (int(major), int(minor), int(patch))
assert (major == 3)
assert (minor < 1)
assert (patch >= 2) |
def tensor_init_for_desc(name: str, desc: data.Data, zeros=False) -> str:
return f'''Tensor {name} = torch::{('zeros' if zeros else 'empty')}(
{{{', '.join((str(s) for s in desc.shape))}}},
torch::TensorOptions()
.dtype(torch::{typeclass_to_torch_cpp_type(desc.dtype)})
.device(torch::{('kCUDA' if is_cuda(desc.storage) else 'kCPU')})
.layout(torch::kStrided));
''' |
.core
def test_sum_pandas(df):
res = pd.DataFrame()
cv = KFolds(n_folds=2, seed=1337, session_id_column='session_id', query_column='user_id')
for (_, test) in cv.split(df):
res = res.append(test, ignore_index=True)
res = res.sort_values(['user_id', 'item_id']).reset_index(drop=True)
assert all((res == df)) |
def spawn_2D_maze(map, border_tile, border_size=(1, 1), base_pos=5, maze_height=3):
blocks = []
item = get_tile(border_tile)
for h in range(maze_height):
for j in range((- border_size[0]), 0):
for i in range((- border_size[1]), (len(map[0]) + border_size[1])):
blocks.append(Block(position=Point(x=i, y=(base_pos + h), z=j), type=item))
for j in range(len(map), (len(map) + border_size[0])):
for i in range((- border_size[1]), (len(map[0]) + border_size[1])):
blocks.append(Block(position=Point(x=i, y=(base_pos + h), z=j), type=item))
for j in range((- border_size[1]), 0):
for i in range(0, len(map)):
blocks.append(Block(position=Point(x=j, y=(base_pos + h), z=i), type=item))
for j in range(len(map[0]), (len(map[0]) + border_size[1])):
for i in range(0, len(map)):
blocks.append(Block(position=Point(x=j, y=(base_pos + h), z=i), type=item))
for j in range(len(map)):
for i in range(len(map[j])):
item = get_tile(map[j][i])
for h in range(maze_height):
blocks.append(Block(position=Point(x=i, y=(base_pos + h), z=j), type=item, orientation=NORTH))
CLIENT.spawnBlocks(Blocks(blocks=blocks)) |
def receive_user_input(config_generator: YamlGenerator):
bot_name = input('Input bot name: ')
config_generator.add_bot_name(bot_name)
while True:
task_name = input('Input a task name: ')
if task_name:
config_generator.add_task(task_name)
entity_names = input("Input entity names, separated by '||' (entity_1||entity_2||entity_3): ").split('||')
for entity in entity_names:
if entity:
config_generator.add_entity(task_name, entity)
continue_input = input('Continue adding tasks? (Input yes or no): ')
if (continue_input == 'no'):
faq = input('Do you want to also add FAQs? (Input yes or no): ')
if (faq == 'yes'):
faq_names = input("Input FAQ names, separated by '||' (FAQ_1||FAQ_2||FAQ_3): ").split('||')
for faq_name in faq_names:
config_generator.add_faq(faq_name)
break
else:
break
config_generator.generate_yaml_file() |
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = ((dst + 1) % world_size)
if (hops > 0):
return rpc.rpc_sync(worker_name(next_dst), my_py_nested_call, args=(t1, t2, next_dst, world_size, (hops - 1)))
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2)) |
def conv3d_args_preprocessor(args, kwargs):
converted = []
if (len(args) > 5):
raise TypeError('Layer can receive at most 4 positional arguments.')
if (len(args) == 5):
if (isinstance(args[2], int) and isinstance(args[3], int) and isinstance(args[4], int)):
kernel_size = (args[2], args[3], args[4])
args = [args[0], args[1], kernel_size]
converted.append(('kernel_size', 'kernel_dim*'))
elif ((len(args) == 4) and isinstance(args[3], int)):
if (isinstance(args[2], int) and isinstance(args[3], int)):
new_keywords = ['padding', 'strides', 'data_format']
for kwd in new_keywords:
if (kwd in kwargs):
raise ValueError('It seems that you are using the Keras 2 and you are passing both `kernel_size` and `strides` as integer positional arguments. For safety reasons, this is disallowed. Pass `strides` as a keyword argument instead.')
if ('kernel_dim3' in kwargs):
kernel_size = (args[2], args[3], kwargs.pop('kernel_dim3'))
args = [args[0], args[1], kernel_size]
converted.append(('kernel_size', 'kernel_dim*'))
elif (len(args) == 3):
if (('kernel_dim2' in kwargs) and ('kernel_dim3' in kwargs)):
kernel_size = (args[2], kwargs.pop('kernel_dim2'), kwargs.pop('kernel_dim3'))
args = [args[0], args[1], kernel_size]
converted.append(('kernel_size', 'kernel_dim*'))
elif (len(args) == 2):
if (('kernel_dim1' in kwargs) and ('kernel_dim2' in kwargs) and ('kernel_dim3' in kwargs)):
kernel_size = (kwargs.pop('kernel_dim1'), kwargs.pop('kernel_dim2'), kwargs.pop('kernel_dim3'))
args = [args[0], args[1], kernel_size]
converted.append(('kernel_size', 'kernel_dim*'))
elif (len(args) == 1):
if (('kernel_dim1' in kwargs) and ('kernel_dim2' in kwargs) and ('kernel_dim3' in kwargs)):
kernel_size = (kwargs.pop('kernel_dim1'), kwargs.pop('kernel_dim2'), kwargs.pop('kernel_dim3'))
kwargs['kernel_size'] = kernel_size
converted.append(('kernel_size', 'nb_row/nb_col'))
return (args, kwargs, converted) |
def multihead_callback_re_init(model):
for layer in model.layers:
if (layer.name.find('multihead') >= 0):
layer.bias.assign(layer.bias_initializer(layer.bias.shape))
layer.kernel.assign(layer.kernel_initializer(layer.kernel.shape)) |
def default_collate(batch):
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if (torch.utils.data.get_worker_info() is not None):
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') or (elem_type.__name__ == 'memmap')):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif (elem.shape == ()):
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
it = iter(batch)
elem_size = len(next(it))
if (not all(((len(elem) == elem_size) for elem in it))):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type)) |
class CksumTestCase(unittest.TestCase):
def test_cksum_bytes(self):
cksum = CksumAlgorithm()
cksum.update(b'The quick brown fox jumps over the lazy dog\n')
self.assertEqual(cksum.hexdigest(), '')
def test_cksum_string(self):
cksum = CksumAlgorithm()
cksum.update('The quick brown fox jumps over the lazy dog\n')
self.assertEqual(cksum.hexdigest(), '') |
def download_glue():
data_dir = os.path.join(DATA_DIR, 'glue_data')
subprocess.call(['python', 'data/download/download_glue_data.py', '--data_dir', data_dir, '--tasks', 'all']) |
class GradientCheckerOptimizer(torch.optim.AdamW):
def step(self, *args, **kwargs):
for group in self.param_groups:
for p in group['params']:
assert (p.grad is not None), f'grad is None for: {p}'
super().step(*args, **kwargs) |
def join_lines(lines_enum):
primary_line_number = None
new_line = []
for (line_number, line) in lines_enum:
if ((not line.endswith('\\')) or COMMENT_RE.match(line)):
if COMMENT_RE.match(line):
line = (' ' + line)
if new_line:
new_line.append(line)
assert (primary_line_number is not None)
(yield (primary_line_number, ''.join(new_line)))
new_line = []
else:
(yield (line_number, line))
else:
if (not new_line):
primary_line_number = line_number
new_line.append(line.strip('\\'))
if new_line:
assert (primary_line_number is not None)
(yield (primary_line_number, ''.join(new_line))) |
class Block(nn.Module):
def __init__(self, dim, head, reduction_ratio=1, mlp_ratio=4, dpr=0.0):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = Attention(dim, head, reduction_ratio)
self.drop_path = (DropPath(dpr) if (dpr > 0.0) else nn.Identity())
self.norm2 = nn.LayerNorm(dim)
self.mlp = MLP(dim, int((dim * mlp_ratio)))
def forward(self, x: Tensor, H, W) -> Tensor:
x = (x + self.drop_path(self.attn(self.norm1(x), H, W)))
x = (x + self.drop_path(self.mlp(self.norm2(x), H, W)))
return x |
def load_mimic_dataset(diag_or_proc_param, note_category_param, icd_seq_num_param):
note_events_df = generate_notes_df(note_category_param)
(diagnoses_icd, procedures_icd) = load_diag_procs(icd_seq_num_param)
(diagnoses_dict, procedures_dict) = generate_dicts(diagnoses_icd, procedures_icd)
(diagnoses_df, procedures_df, codes_df) = generate_outcomes_dfs(diagnoses_dict, procedures_dict)
merged_df = generate_merged_df(note_events_df, diagnoses_df, procedures_df, codes_df)
if (diag_or_proc_param == 'diag'):
merged_df = merged_df.drop('PROC_CODES', axis=1)
merged_df = merged_df.rename(columns={'DIAG_CODES': 'ICD9_CODE'})
elif (diag_or_proc_param == 'proc'):
merged_df = merged_df.drop('DIAG_CODES', axis=1)
merged_df = merged_df.rename(columns={'PROC_CODES': 'ICD9_CODE'})
return merged_df |
def test_sieve():
assert (Sieve.generate_primes(50) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47])
assert (len(Sieve.generate_primes(1009)) == 169) |
class MixtureBatchNorm2d(nn.BatchNorm2d):
def __init__(self, k, num_channels, eps=1e-05, momentum=0.1, track_running_stats=True):
super(MixtureBatchNorm2d, self).__init__(num_channels, eps=eps, momentum=momentum, affine=False, track_running_stats=track_running_stats)
self.k = k
self.weight_ = nn.Parameter(torch.Tensor(k, num_channels))
self.bias_ = nn.Parameter(torch.Tensor(k, num_channels))
self.attention_weights = AttentionWeights(k, num_channels, norm='BN')
self._init_params()
def _init_params(self):
nn.init.normal_(self.weight_, 1, 0.1)
nn.init.normal_(self.bias_, 0, 0.1)
def forward(self, x):
output = super(MixtureBatchNorm2d, self).forward(x)
size = output.size()
y = self.attention_weights(x)
weight = (y self.weight_)
bias = (y self.bias_)
weight = weight.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
bias = bias.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
return ((weight * output) + bias) |
def get_dtype_size(dtype):
if (dtype == torch.bool):
return (1 / 8)
bit_search = re.search('[^\\d](\\d+)$', str(dtype))
if (bit_search is None):
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.')
bit_size = int(bit_search.groups()[0])
return (bit_size // 8) |
class GotoLocationAction(BaseAction):
valid_actions = {'MoveAhead', 'RotateLeft', 'RotateRight', 'LookUp', 'LookDown', 'Teleport', 'TeleportFull'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
curr_pose = state.pose_discrete
prev_pose = prev_state.pose_discrete
tar_pose = tuple([int(i) for i in subgoal['location'].split('|')[1:]])
(prev_actions, _) = self.gt_graph.get_shortest_path(prev_pose, tar_pose)
(curr_actions, _) = self.gt_graph.get_shortest_path(curr_pose, tar_pose)
prev_distance = len(prev_actions)
curr_distance = len(curr_actions)
reward = ((prev_distance - curr_distance) * 0.2)
done = (curr_distance < self.rewards['min_reach_distance'])
if done:
reward += self.rewards['positive']
return (reward, done) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.