code stringlengths 101 5.91M |
|---|
def read_image_npy(img_path):
img = np.load(img_path)
img = binary_fill_holes(img)
mask = img.copy()
img = img.astype(np.uint8)
img[mask] = 255
(ret, thresh) = cv2.threshold(img, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
return (thresh, cnt) |
class docParamNameList(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, parametername=None):
if (parametername is None):
self.parametername = []
else:
self.parametername = parametername
def factory(*args_, **kwargs_):
if docParamNameList.subclass:
return docParamNameList.subclass(*args_, **kwargs_)
else:
return docParamNameList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parametername(self):
return self.parametername
def set_parametername(self, parametername):
self.parametername = parametername
def add_parametername(self, value):
self.parametername.append(value)
def insert_parametername(self, index, value):
self.parametername[index] = value
def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
for parametername_ in self.parametername:
parametername_.export(outfile, level, namespace_, name_='parametername')
def hasContent_(self):
if (self.parametername is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParamNameList'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parametername=[\n')
level += 1
for parametername in self.parametername:
showIndent(outfile, level)
outfile.write('model_.parametername(\n')
parametername.exportLiteral(outfile, level, name_='parametername')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'parametername')):
obj_ = docParamName.factory()
obj_.build(child_)
self.parametername.append(obj_) |
_module()
class ResNet(nn.Module):
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=64, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, multi_grid=None, contract_dilation=False, with_cp=False, zero_init_residual=True):
super(ResNet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (plugins is not None):
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
stage_multi_grid = (multi_grid if (i == (len(self.stage_blocks) - 1)) else None)
planes = (base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, multi_grid=stage_multi_grid, contract_dilation=contract_dilation)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1)))
def make_stage_plugins(self, plugins, stage_idx):
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert ((stages is None) or (len(stages) == self.num_stages))
if ((stages is None) or stages[stage_idx]):
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
return ResLayer(**kwargs)
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m, 'conv2_offset')):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
def check_tangent_matrix(conf, vec_x0, fun, fun_grad):
vec_x = vec_x0.copy()
delta = conf.delta
vec_r = fun(vec_x)
mtx_a0 = fun_grad(vec_x)
mtx_a = mtx_a0.tocsc()
mtx_d = mtx_a.copy()
mtx_d.data[:] = 0.0
vec_dx = nm.zeros_like(vec_r)
for ic in range(vec_dx.shape[0]):
vec_dx[ic] = delta
xx = (vec_x.copy() - vec_dx)
vec_r1 = fun(xx)
vec_dx[ic] = (- delta)
xx = (vec_x.copy() - vec_dx)
vec_r2 = fun(xx)
vec_dx[ic] = 0.0
vec = ((0.5 * (vec_r2 - vec_r1)) / delta)
ir = mtx_a.indices[mtx_a.indptr[ic]:mtx_a.indptr[(ic + 1)]]
mtx_d.data[mtx_a.indptr[ic]:mtx_a.indptr[(ic + 1)]] = vec[ir]
vec_r = fun(vec_x)
timer = Timer(start=True)
output(mtx_a, '.. analytical')
output(mtx_d, '.. difference')
import sfepy.base.plotutils as plu
plu.plot_matrix_diff(mtx_d, mtx_a, delta, ['difference', 'analytical'], conf.check)
return timer.stop() |
class PeakLocalMaxSuite():
def setup(self):
mask = np.zeros([500, 500], dtype=bool)
(x, y) = np.indices((500, 500))
x_c = (((x // 20) * 20) + 10)
y_c = (((y // 20) * 20) + 10)
mask[((((x - x_c) ** 2) + ((y - y_c) ** 2)) < (8 ** 2))] = True
(self.labels, num_objs) = ndi.label(mask)
self.dist = ndi.distance_transform_edt(mask)
def time_peak_local_max(self):
peak_local_max(self.dist, labels=self.labels, min_distance=20, exclude_border=False, **peak_kwargs) |
class FiniteWordPath_2d_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_2d, FiniteWord_class):
pass |
def convert_conll03_file(filename, short_name):
assert ('en_conll03.' in filename)
if (not os.path.exists(filename)):
raise FileNotFoundError(('Cannot convert missing file %s' % filename))
new_filename = filename.replace('en_conll03.', (short_name + '.conll03.'))
with open(filename) as fin:
doc = json.load(fin)
for sentence in doc:
for word in sentence:
ner = word['ner']
word['multi_ner'] = ('-', '-', ner)
with open(new_filename, 'w') as fout:
json.dump(doc, fout, indent=2) |
def check_constituents(train_constituents, trees, treebank_name):
constituents = parse_tree.Tree.get_unique_constituent_labels(trees)
for con in constituents:
if (con not in train_constituents):
raise RuntimeError("Found label {} in the {} set which don't exist in the train set".format(con, treebank_name)) |
def load_detectron_weight(net, detectron_weight_file):
(name_mapping, orphan_in_detectron) = net.detectron_weight_mapping
with open(detectron_weight_file, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
params = net.state_dict()
for (p_name, p_tensor) in params.items():
d_name = name_mapping[p_name]
if isinstance(d_name, str):
p_tensor.copy_(torch.Tensor(src_blobs[d_name])) |
def test_geterr():
err = sc.geterr()
for (key, value) in err.items():
assert_((key in _sf_error_code_map))
assert_((value in _sf_error_actions)) |
def update_config(config, *, impossible_strategy, class_loss_weight):
class IdentificationClassificationConfig(type(config)):
def __init__(self, impossible_strategy='ignore', class_loss_weight=1.0, **kwargs):
super().__init__(**kwargs)
self.impossible_strategy = impossible_strategy
self.class_loss_weight = class_loss_weight
def from_config(cls, config, *, impossible_strategy, class_loss_weight):
kwargs = config.to_dict()
assert ('impossible_strategy' not in kwargs)
kwargs['impossible_strategy'] = impossible_strategy
assert ('class_loss_weight' not in kwargs)
kwargs['class_loss_weight'] = class_loss_weight
return cls(**kwargs)
return IdentificationClassificationConfig.from_config(config, impossible_strategy=impossible_strategy, class_loss_weight=class_loss_weight) |
class SchemeMorphism_point(SchemeMorphism):
def _repr_(self):
return self._codomain.ambient_space()._repr_generic_point(self._coords)
def _latex_(self):
return self._codomain.ambient_space()._latex_generic_point(self._coords)
def __getitem__(self, n):
return self._coords[n]
def __iter__(self):
return iter(self._coords)
def __tuple__(self):
return self._coords
def __len__(self):
return len(self._coords)
def _richcmp_(self, other, op):
if (not isinstance(other, SchemeMorphism_point)):
try:
other = self._codomain.ambient_space()(other)
except TypeError:
return NotImplemented
return richcmp(self._coords, other._coords, op)
def scheme(self):
return self._codomain
def change_ring(self, R, check=True):
S = self.codomain().change_ring(R)
Q = [R(t) for t in self]
return S.point(Q, check=check)
def __copy__(self):
return self._codomain.point(self._coords, check=False)
def specialization(self, D=None, phi=None, ambient=None):
if (D is None):
if (phi is None):
raise ValueError('either the dictionary or the specialization must be provided')
else:
from sage.rings.polynomial.flatten import SpecializationMorphism
phi = SpecializationMorphism(self.codomain().ambient_space().coordinate_ring(), D)
if (ambient is None):
ambient = self.codomain()
if isinstance(ambient, AlgebraicScheme_subscheme):
ambient = ambient.specialization(phi=phi)
else:
ambient = ambient.change_ring(phi.codomain().base_ring())
psi = ambient.ambient_space().coordinate_ring().hom([0 for i in range(ambient.ambient_space().ngens())], ambient.base_ring())
return ambient([psi(phi(t)) for t in self]) |
def is_whole(x):
try:
x = numpy.float64(x)
except ValueError:
return False
return x.is_integer() |
def hfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None, *, plan=None):
if (plan is not None):
raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')
return hfftn(x, s, axes, norm, overwrite_x, workers) |
_start_docstrings('The bare MobileViT model outputting raw hidden-states without any specific head on top.', MOBILEVIT_START_DOCSTRING)
class MobileViTModel(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool=True):
super().__init__(config)
self.config = config
self.expand_output = expand_output
self.conv_stem = MobileViTConvLayer(config, in_channels=config.num_channels, out_channels=config.neck_hidden_sizes[0], kernel_size=3, stride=2)
self.encoder = MobileViTEncoder(config)
if self.expand_output:
self.conv_1x1_exp = MobileViTConvLayer(config, in_channels=config.neck_hidden_sizes[5], out_channels=config.neck_hidden_sizes[6], kernel_size=1)
self.post_init()
def _prune_heads(self, heads_to_prune):
for (layer_index, heads) in heads_to_prune.items():
mobilevit_layer = self.encoder.layer[layer_index]
if isinstance(mobilevit_layer, MobileViTLayer):
for transformer_layer in mobilevit_layer.transformer.layer:
transformer_layer.attention.prune_heads(heads)
_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(tuple, BaseModelOutputWithPoolingAndNoAttention)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.expand_output:
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
pooled_output = torch.mean(last_hidden_state, dim=[(- 2), (- 1)], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if (not return_dict):
output = ((last_hidden_state, pooled_output) if (pooled_output is not None) else (last_hidden_state,))
return (output + encoder_outputs[1:])
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) |
def is_even(self, allow_rescaling_flag=True) -> bool:
return (self.parity(allow_rescaling_flag) == 'even') |
def write_syscall_consts(f, arch, mode):
f.write('// This file has been autogenerated. DO NOT MODIFY!\n')
undefined_syscall = (- 1)
valid_syscalls = 0
invalid_syscalls = 0
for (name, obj) in sorted(syscalls.all(), key=(lambda x: arch_syscall_number(arch, x))):
syscall_number = getattr(obj, arch)
if (syscall_number is not None):
enum_number = syscall_number
valid_syscalls += 1
else:
enum_number = undefined_syscall
undefined_syscall -= 1
invalid_syscalls += 1
if (mode == SyscallGen.CONST_ASSERTS):
if (arch == 'x86'):
f.write(('const_assert_eq!(X86Arch::%s, %d);\n' % (name.upper(), enum_number)))
elif (arch == 'x64'):
f.write(('const_assert_eq!(X64Arch::%s, %d);\n' % (name.upper(), enum_number)))
elif (mode == SyscallGen.DEFAULT):
f.write(('pub const %s: i32 = %d;\n' % (name.upper(), enum_number)))
elif (mode == SyscallGen.TRAIT):
f.write(('const %s: i32;\n' % name.upper()))
elif (mode == SyscallGen.TRAIT_IMPL):
f.write(('const %s: i32 = %d;\n' % (name.upper(), enum_number)))
if (mode == SyscallGen.CONST_ASSERTS):
if (arch == 'x86'):
f.write(('const_assert_eq!(X86Arch::VALID_SYSCALL_COUNT, %d);\n' % valid_syscalls))
f.write(('const_assert_eq!(X86Arch::INVALID_SYSCALL_COUNT, %d);\n' % invalid_syscalls))
elif (arch == 'x64'):
f.write(('const_assert_eq!(X64Arch::VALID_SYSCALL_COUNT, %d);\n' % valid_syscalls))
f.write(('const_assert_eq!(X64Arch::INVALID_SYSCALL_COUNT, %d);\n' % invalid_syscalls))
elif (mode == SyscallGen.DEFAULT):
f.write(('pub const VALID_SYSCALL_COUNT: i32 = %d;\n' % valid_syscalls))
f.write(('pub const INVALID_SYSCALL_COUNT: i32 = %d;\n' % invalid_syscalls))
elif (mode == SyscallGen.TRAIT):
f.write('const VALID_SYSCALL_COUNT: i32;\n')
f.write('const INVALID_SYSCALL_COUNT: i32;\n')
elif (mode == SyscallGen.TRAIT_IMPL):
f.write(('const VALID_SYSCALL_COUNT: i32 = %d;\n' % valid_syscalls))
f.write(('const INVALID_SYSCALL_COUNT: i32 = %d;\n' % invalid_syscalls)) |
class LempDecisionRuleStats(IndexDecisionRuleStats):
def __init__(self, sample_time_col, dec_rule_df, lemp_truth_df, index_truth_df, blocked_mm_truth_df):
super(LempDecisionRuleStats, self).__init__(sample_time_col, dec_rule_df, lemp_truth_df, index_truth_df, blocked_mm_truth_df)
def correct(self, row):
return ((self.blocked_mm_true_runtime(row['model'], row['K']) > self.index_true_runtime(row)) == row['lemp_wins'])
def overhead_runtime(self, row):
return ((row[self.BLOCKED_MM_COL] * row['num_users']) if row['lemp_wins'] else (self.preproc_runtime(row) + (row[self.sample_time_col] * row['num_users'])))
def preproc_runtime(self, row):
return (row['preproc_time'] + row['index_time'])
def estimate_index_runtime(self, row):
return (row[self.sample_time_col] * self.num_users(row['model']))
def index_true_runtime(self, row):
return self.lemp_true_runtime(row['model'], row['K'])
def optimizer_runtime_no_overhead(self, row):
return (self.index_true_runtime(row) if row['lemp_wins'] else self.blocked_mm_true_runtime(row['model'], row['K']))
def estimate_blocked_mm_runtime(self, row):
return (row[self.BLOCKED_MM_COL] * row['num_users']) |
class EagerBatcher():
def __init__(self, args, rank=0, nranks=1):
(self.rank, self.nranks) = (rank, nranks)
(self.bsize, self.accumsteps) = (args.bsize, args.accumsteps)
self.query_tokenizer = QueryTokenizer(args.query_maxlen)
self.doc_tokenizer = DocTokenizer(args.doc_maxlen)
self.tensorize_triples = partial(tensorize_triples, self.query_tokenizer, self.doc_tokenizer)
self.triples_path = args.triples
self._reset_triples()
def _reset_triples(self):
self.reader = open(self.triples_path, mode='r', encoding='utf-8')
self.position = 0
def __iter__(self):
return self
def __next__(self):
(queries, positives, negatives) = ([], [], [])
for (line_idx, line) in zip(range((self.bsize * self.nranks)), self.reader):
if (((self.position + line_idx) % self.nranks) != self.rank):
continue
(query, pos, neg) = line.strip().split('\t')
queries.append(query)
positives.append(pos)
negatives.append(neg)
self.position += (line_idx + 1)
if (len(queries) < self.bsize):
raise StopIteration
return self.collate(queries, positives, negatives)
def collate(self, queries, positives, negatives):
assert (len(queries) == len(positives) == len(negatives) == self.bsize)
return self.tensorize_triples(queries, positives, negatives, (self.bsize // self.accumsteps))
def skip_to_batch(self, batch_idx, intended_batch_size):
self._reset_triples()
Run.warn(f'Skipping to batch #{batch_idx} (with intended_batch_size = {intended_batch_size}) for training.')
_ = [self.reader.readline() for _ in range((batch_idx * intended_batch_size))]
return None |
class RefCosine(object):
def __init__(self, init_lr, max_iter):
self.init_lr = init_lr
self.max_iter = max_iter
def get_learning_rate(self, iter):
return (self.init_lr * ((math.cos((((iter * 1.0) / self.max_iter) * math.pi)) + 1.0) * 0.5)) |
def test_state_adjoint_problems(CG1, geometry, rng, u, y_d, ocp, bcs, y, p):
trial = TrialFunction(CG1)
test = TestFunction(CG1)
state = Function(CG1)
adjoint = Function(CG1)
a = (inner(grad(trial), grad(test)) * geometry.dx)
L_state = ((u * test) * geometry.dx)
L_adjoint = (((- (state - y_d)) * test) * geometry.dx)
y_d.vector().set_local(rng.rand(y_d.vector().local_size()))
y_d.vector().apply('')
u.vector().set_local(rng.rand(u.vector().local_size()))
u.vector().apply('')
ocp.compute_state_variables()
ocp.compute_adjoint_variables()
solve((a == L_state), state, bcs)
solve((a == L_adjoint), adjoint, bcs)
assert np.allclose(state.vector()[:], y.vector()[:])
assert np.allclose(adjoint.vector()[:], p.vector()[:]) |
class GTN(nn.Module):
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers):
super(GTN, self).__init__()
self.num_edge = num_edge
self.num_channels = num_channels
self.num_nodes = num_nodes
self.w_in = w_in
self.w_out = w_out
self.num_class = num_class
self.num_layers = num_layers
layers = []
for i in range(num_layers):
if (i == 0):
layers.append(GTLayer(num_edge, num_channels, num_nodes, first=True))
else:
layers.append(GTLayer(num_edge, num_channels, num_nodes, first=False))
self.layers = nn.ModuleList(layers)
self.loss = nn.CrossEntropyLoss()
self.gcn = GCNConv(in_channels=self.w_in, out_channels=w_out)
self.linear1 = nn.Linear((self.w_out * self.num_channels), self.w_out)
self.linear2 = nn.Linear(self.w_out, self.num_class)
def normalization(self, H):
norm_H = []
for i in range(self.num_channels):
(edge, value) = H[i]
(edge, value) = remove_self_loops(edge, value)
(deg_row, deg_col) = self.norm(edge.detach(), self.num_nodes, value)
value = (deg_col * value)
norm_H.append((edge, value))
return norm_H
def norm(self, edge_index, num_nodes, edge_weight, improved=False, dtype=None):
if (edge_weight is None):
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
edge_weight = edge_weight.view((- 1))
assert (edge_weight.size(0) == edge_index.size(1))
(row, col) = edge_index
deg = scatter_add(edge_weight.clone(), col, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow((- 1))
deg_inv_sqrt[(deg_inv_sqrt == float('inf'))] = 0
return (deg_inv_sqrt[row], deg_inv_sqrt[col])
def forward(self, A, X, target_x, target):
Ws = []
for i in range(self.num_layers):
if (i == 0):
(H, W) = self.layers[i](A)
else:
(H, W) = self.layers[i](A, H)
H = self.normalization(H)
Ws.append(W)
for i in range(self.num_channels):
if (i == 0):
(edge_index, edge_weight) = (H[i][0], H[i][1])
X_ = self.gcn(X, edge_index=edge_index.detach(), edge_weight=edge_weight)
X_ = F.relu(X_)
else:
(edge_index, edge_weight) = (H[i][0], H[i][1])
X_ = torch.cat((X_, F.relu(self.gcn(X, edge_index=edge_index.detach(), edge_weight=edge_weight))), dim=1)
X_ = self.linear1(X_)
X_ = F.relu(X_)
y = self.linear2(X_[target_x])
loss = self.loss(y, target)
return (loss, y, Ws) |
_handler
def value_analysis(term, smt, name, exact, restrict):
arg = term._args[0]
try:
return smt.get_analysis(name, arg)
except KeyError:
pass
ty = smt.type(term)
with smt.local_defined(), smt.local_nonpoison() as nx:
x = smt.eval(arg)
z = exact(x, ty)
if isinstance(arg, Constant):
return z
r = smt.new_analysis(name, arg, type=ty)
smt.add_aux(*mk_implies(nx, [restrict(r, z)]))
return r |
def gaussian_beam_z_axis_x_pol(x_grid, y_grid, z_grid, w0, center, R, omega, polarity, eps_val) -> complex:
x = (((R[(0, 0)] * (x_grid - center[0])) + (R[(0, 1)] * (y_grid - center[1]))) + (R[(0, 2)] * (z_grid - center[2])))
y = (((R[(1, 0)] * (x_grid - center[0])) + (R[(1, 1)] * (y_grid - center[1]))) + (R[(1, 2)] * (z_grid - center[2])))
z = (((R[(2, 0)] * (x_grid - center[0])) + (R[(2, 1)] * (y_grid - center[1]))) + (R[(2, 2)] * (z_grid - center[2])))
wlen = ((2.0 * np.pi) / (omega * np.sqrt(eps_val)))
k = ((2.0 * np.pi) / wlen)
z_r = ((np.pi * (w0 ** 2)) / wlen)
w_z = (w0 * ((1 + ((z / z_r) ** 2)) ** 0.5))
inv_R_z = np.zeros_like(z_grid)
inv_R_z[(z != 0)] = np.power((z[(z != 0)] * (1 + ((z_r / z[(z != 0)]) ** 2))), (- 1))
gouy_z = np.arctan((z / z_r))
r2 = ((x ** 2) + (y ** 2))
imp = np.sqrt((1 / eps_val))
return (((((np.sqrt(imp) * 2) / (np.sqrt(np.pi) * w0)) * (w0 / w_z)) * np.exp(((- r2) / (w_z ** 2)))) * np.exp((((- 1j) * polarity) * (((k * z) + ((k * inv_R_z) * (r2 / 2))) - gouy_z)))) |
def write_dataset_best(documents, test_documents, output_dir, dataset_name):
random.shuffle(documents)
num_train = int((len(documents) * 0.85))
num_dev = int((len(documents) * 0.15))
os.makedirs(output_dir, exist_ok=True)
write_section(output_dir, dataset_name, 'train', documents[:num_train])
write_section(output_dir, dataset_name, 'dev', documents[num_train:(num_train + num_dev)])
write_section(output_dir, dataset_name, 'test', test_documents) |
class FastaDataset(torch.utils.data.Dataset):
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f'{path}.fasta.idx.npy')
if cache_indices:
if self.cache.exists():
(self.offsets, self.sizes) = np.load(self.cache)
else:
(self.offsets, self.sizes) = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
(self.offsets, self.sizes) = self._build_index(path)
def _get_file(self):
if (not hasattr(self.threadlocal, 'f')):
self.threadlocal.f = open(self.fn, 'r')
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ''
while ((line != '') and (line[0] != '>')):
seq += line.strip()
line = f.readline()
return (desc, seq)
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(f"cat {path} | tqdm --bytes --total $(wc -c < {path})| grep --byte-offset '^>' -o | cut -d: -f1", shell=True)
fasta_lengths = subprocess.check_output(f"""cat {path} | tqdm --bytes --total $(wc -c < {path})| awk '/^>/ {{print "";next;}} {{ printf("%s",$0);}}' | tail -n+2 | awk '{{print length($1)}}'""", shell=True)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=' ')
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=' ')
return (bytes_np, sizes_np)
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for (i, v) in self.__dict__.items():
if (i != 'threadlocal'):
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, 'f'):
self.threadlocal.f.close()
del self.threadlocal.f
def exists(path):
return os.path.exists(fasta_file_path(path)) |
def get_init_sup_samples(args, sampler, COMMON, train_samples, OUTD):
previous_pairs = dict()
previous_errors = False
cnd_drop_n = (args.dataset == constants.CAM16)
cnd_drop_n &= (args.al_type != constants.AL_WSL)
cnd = (args.al_type not in [constants.AL_FULL_SUP, constants.AL_WSL])
cnd &= (args.al_it == 0)
if cnd:
set_default_seed()
train_samples = sampler.sample_init_random_samples(train_samples)
set_default_seed()
base_f = 'train_{}.csv'.format(args.al_it)
al_outf = join(COMMON, base_f)
csv_writer(clear_rootpath(train_samples, args), al_outf)
shutil.copyfile(al_outf, join(OUTD, base_f))
cnd = (args.al_type not in [constants.AL_FULL_SUP, constants.AL_WSL])
cnd &= (args.al_it > 0)
if cnd:
lfiles = [join(COMMON, 'train_{}.csv'.format(t)) for t in range((args.al_it + 1))]
if ((args.al_type == constants.AL_LP) and (args.task == constants.SEG)):
fz = join(COMMON, 'train_pairs_{}.pkl'.format((args.al_it - 1)))
with open(fz, 'rb') as fp:
previous_pairs = pkl.load(fp)
train_samples = []
rootpath = get_rootpath_2_dataset(args)
for fx in lfiles:
train_samples.extend(csv_loader(fx, rootpath, drop_normal=cnd_drop_n))
for tt in range(len(train_samples)):
train_samples[tt][4] = constants.L
if ('CC_CLUSTER' in os.environ.keys()):
for i in range(len(train_samples)):
front = os.sep.join(train_samples[i][1].split(os.sep)[:3])
cnd = (front != os.environ['SLURM_TMPDIR'])
if cnd:
train_samples[i][1] = train_samples[i][1].replace(front, os.environ['SLURM_TMPDIR'])
if (args.task == constants.SEG):
train_samples[i][2] = train_samples[i][2].replace(front, os.environ['SLURM_TMPDIR'])
previous_errors = True
assert (not previous_errors), 'ERROR.'
set_default_seed()
for i in range(100):
random.shuffle(train_samples)
set_default_seed()
return (train_samples, previous_pairs, previous_errors) |
class TestClarksonWoodruffTransform():
rng = np.random.RandomState(seed=)
n_rows = 2000
n_cols = 100
density = 0.1
n_sketch_rows = 200
seeds = [, , , , , , , , , ]
A_dense = rng.randn(n_rows, n_cols)
A_csc = rand(n_rows, n_cols, density=density, format='csc', random_state=rng)
A_csr = rand(n_rows, n_cols, density=density, format='csr', random_state=rng)
A_coo = rand(n_rows, n_cols, density=density, format='coo', random_state=rng)
test_matrices = [A_dense, A_csc, A_csr, A_coo]
x = (rng.randn(n_rows, 1) / np.sqrt(n_rows))
def test_sketch_dimensions(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch = clarkson_woodruff_transform(A, self.n_sketch_rows, seed=seed)
assert_((sketch.shape == (self.n_sketch_rows, self.n_cols)))
def test_seed_returns_identical_transform_matrix(self):
for A in self.test_matrices:
for seed in self.seeds:
S1 = cwt_matrix(self.n_sketch_rows, self.n_rows, seed=seed).toarray()
S2 = cwt_matrix(self.n_sketch_rows, self.n_rows, seed=seed).toarray()
assert_equal(S1, S2)
def test_seed_returns_identically(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch1 = clarkson_woodruff_transform(A, self.n_sketch_rows, seed=seed)
sketch2 = clarkson_woodruff_transform(A, self.n_sketch_rows, seed=seed)
if issparse(sketch1):
sketch1 = sketch1.toarray()
if issparse(sketch2):
sketch2 = sketch2.toarray()
assert_equal(sketch1, sketch2)
def test_sketch_preserves_frobenius_norm(self):
n_errors = 0
for A in self.test_matrices:
if issparse(A):
true_norm = norm(A)
else:
true_norm = np.linalg.norm(A)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(A, self.n_sketch_rows, seed=seed)
if issparse(sketch):
sketch_norm = norm(sketch)
else:
sketch_norm = np.linalg.norm(sketch)
if (np.abs((true_norm - sketch_norm)) > (0.1 * true_norm)):
n_errors += 1
assert_((n_errors == 0))
def test_sketch_preserves_vector_norm(self):
n_errors = 0
n_sketch_rows = int(np.ceil((2.0 / (0.01 * (0.5 ** 2)))))
true_norm = np.linalg.norm(self.x)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(self.x, n_sketch_rows, seed=seed)
sketch_norm = np.linalg.norm(sketch)
if (np.abs((true_norm - sketch_norm)) > (0.5 * true_norm)):
n_errors += 1
assert_((n_errors == 0)) |
def _group_params(agg, df):
params = [re.sub('\\[\\d+\\]$', '', x, 1) for x in df.keys() if x.endswith(']')]
param_counts = Counter(params)
for (param_name, count) in param_counts.items():
df[param_name] = agg([df['{}[{}]'.format(param_name, i)] for i in range(1, count)])
return df |
def test_set_action_space(as_custom):
as_custom.set_action_space(custom_action_lower_bound, custom_action_upper_bound)
assert (as_custom.get_action_space().low == custom_action_lower_bound).all()
assert (as_custom.get_action_space().high == custom_action_upper_bound).all()
assert (as_custom.normalize_action(custom_action_upper_bound) == custom_action_norm_upper_bound).all()
assert (as_custom.normalize_action(custom_action_lower_bound) == custom_action_norm_lower_bound).all |
class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Virus(Cell):
def __init__(self, gameServer, owner, position, radius):
Cell.__init__(self, gameServer, owner, position, radius)
self.cellType = 2
self.isSpiked = True
self.isMotherCell = False
self.color = Color(42, 255, 42)
def canEat(self, cell):
if (len(self.gameServer.nodesVirus) < self.gameServer.config.virusMaxAmount):
return (cell.cellType == 3)
def onEat(self, prey):
self.setRadius(math.sqrt((self.size + prey.size)))
if (self.radius >= self.gameServer.config.virusMaxRadius):
self.setRadius(self.gameServer.config.virusMinRadius)
self.gameServer.shootVirus(self, prey.boostDirection.angle())
def onEaten(self, cell):
if (not cell.owner):
return
config = self.gameServer.config
cellsLeft = ((config.virusMaxCells or config.playerMaxCells) - len(cell.owner.cells))
if (cellsLeft <= 0):
return
splitMin = ((config.virusMaxPoppedRadius ** 2) / 100)
cellMass = cell.mass
splits = []
if config.virusEqualPopRadius:
splitCount = min(math.floor((cellMass / splitMin)), cellsLeft)
splitMass = (cellMass / (1 + splitCount))
splits = (splits + [splitMass for _ in range(splitCount)])
return self.explodeCell(cell, splits)
if ((cellMass / cellsLeft) < splitMin):
splitCount = 2
splitMass = (cellMass / splitCount)
while ((splitMass > splitMin) and ((splitCount * 2) < cellsLeft)):
splitCount *= 2
splitMass = (cellMass / splitCount)
splitMass = (cellMass / (splitCount + 1))
splits = (splits + [splitMass for _ in range(splitCount)])
splitCount = 0
return self.explodeCell(cell, splits)
splitMass = (cellMass / 2)
massLeft = (cellMass / 2)
while (cellsLeft > 1):
cellsLeft -= 1
if ((massLeft / cellsLeft) < splitMin):
splitMass = (massLeft / cellsLeft)
splits = (splits + [splitMass for _ in range(cellsLeft)])
cellsLeft = 0
while ((splitMass >= massLeft) and (cellsLeft > 0)):
splitMass /= 2
splits.append(splitMass)
massLeft -= splitMass
self.explodeCell(cell, splits)
def explodeCell(self, cell, splits):
for s in splits:
self.gameServer.splitPlayerCell(cell.owner, cell, ((2 * math.pi) * random.random()), s)
def onAdd(self, gameServer):
gameServer.nodesVirus.append(self)
def onRemove(self, gameServer):
if (self in gameServer.nodesVirus):
gameServer.nodesVirus.remove(self) |
def export_meta_graph(export_dir, worker_id):
export_meta_graph_path = os.path.join(export_dir, ('worker-%d_metagraph' % worker_id))
parallax_log.debug(('Exporting graph of worker %d to %s' % (worker_id, export_meta_graph_path)))
tf.train.export_meta_graph(export_meta_graph_path, as_text=True) |
def register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter< ns3::PbbAddressBlock > > const &', 'o')])
return |
def test_interpretation():
interp = pyhf.compat.interpret_rootname('gamma_foo_0')
assert (interp['constrained'] == 'n/a')
assert (not interp['is_scalar'])
assert (interp['name'] == 'foo')
assert (interp['element'] == 0)
interp = pyhf.compat.interpret_rootname('alpha_foo')
assert interp['constrained']
assert interp['is_scalar']
assert (interp['name'] == 'foo')
assert (interp['element'] == 'n/a')
interp = pyhf.compat.interpret_rootname('mu')
assert (not interp['constrained'])
assert interp['is_scalar']
assert (interp['name'] == 'mu')
assert (interp['element'] == 'n/a')
interp = pyhf.compat.interpret_rootname('Lumi')
assert (interp['name'] == 'lumi')
interp = pyhf.compat.interpret_rootname('Lumi')
assert (interp['name'] == 'lumi')
with pytest.raises(ValueError):
pyhf.compat.interpret_rootname('gamma_foo')
with pytest.raises(ValueError):
pyhf.compat.interpret_rootname('alpha_') |
def ground_formulas(match_parse, formulas, references={}):
core_parse = match_parse.graph_parse.core_parse
singular_variables = set(itertools.chain(*[_get_singular_variables(formula) for formula in formulas]))
grounded_variable_sets = []
for variable in singular_variables:
grounded_variable = _ground_variable(match_parse, variable, references)
if isinstance(grounded_variable, FormulaNode):
grounded_variable_sets.append([grounded_variable])
else:
grounded_variable_sets.append(grounded_variable.children)
scores = []
grounded_formulas_list = []
combinations = list(itertools.product(*grounded_variable_sets))
for combination in combinations:
grounded_formulas = _combination_to_grounded_formulas(match_parse, formulas, combination, singular_variables)
local_scores = [core_parse.evaluate(f) for f in grounded_formulas]
scores.append(sum((s.conf for s in local_scores if (s is not None))))
grounded_formulas_list.append(grounded_formulas)
(max_score, max_gf) = max(zip(scores, grounded_formulas_list), key=(lambda pair: pair[0]))
return max_gf |
def simGetDoubleSignal(signalName):
val = ffi.new('double*')
ret = lib.simGetDoubleSignal(signalName.encode('ascii'), val)
_check_return(ret)
return (ret, val[0]) |
def eval_np(module, *args, **kwargs):
torch_args = tuple((torch_ify(x) for x in args))
torch_kwargs = {k: torch_ify(v) for (k, v) in kwargs.items()}
outputs = module(*torch_args, **torch_kwargs)
return elem_or_tuple_to_numpy(outputs) |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer'])
register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper'])
register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3Reservation_methods(root_module, root_module['ns3::Reservation'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Tap_methods(root_module, root_module['ns3::Tap'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3UanAddress_methods(root_module, root_module['ns3::UanAddress'])
register_Ns3UanHelper_methods(root_module, root_module['ns3::UanHelper'])
register_Ns3UanModesList_methods(root_module, root_module['ns3::UanModesList'])
register_Ns3UanPacketArrival_methods(root_module, root_module['ns3::UanPacketArrival'])
register_Ns3UanPdp_methods(root_module, root_module['ns3::UanPdp'])
register_Ns3UanPhyListener_methods(root_module, root_module['ns3::UanPhyListener'])
register_Ns3UanTxMode_methods(root_module, root_module['ns3::UanTxMode'])
register_Ns3UanTxModeFactory_methods(root_module, root_module['ns3::UanTxModeFactory'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3AcousticModemEnergyModelHelper_methods(root_module, root_module['ns3::AcousticModemEnergyModelHelper'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UanHeaderCommon_methods(root_module, root_module['ns3::UanHeaderCommon'])
register_Ns3UanHeaderRcAck_methods(root_module, root_module['ns3::UanHeaderRcAck'])
register_Ns3UanHeaderRcCts_methods(root_module, root_module['ns3::UanHeaderRcCts'])
register_Ns3UanHeaderRcCtsGlobal_methods(root_module, root_module['ns3::UanHeaderRcCtsGlobal'])
register_Ns3UanHeaderRcData_methods(root_module, root_module['ns3::UanHeaderRcData'])
register_Ns3UanHeaderRcRts_methods(root_module, root_module['ns3::UanHeaderRcRts'])
register_Ns3UanMac_methods(root_module, root_module['ns3::UanMac'])
register_Ns3UanMacAloha_methods(root_module, root_module['ns3::UanMacAloha'])
register_Ns3UanMacCw_methods(root_module, root_module['ns3::UanMacCw'])
register_Ns3UanMacRc_methods(root_module, root_module['ns3::UanMacRc'])
register_Ns3UanMacRcGw_methods(root_module, root_module['ns3::UanMacRcGw'])
register_Ns3UanNoiseModel_methods(root_module, root_module['ns3::UanNoiseModel'])
register_Ns3UanNoiseModelDefault_methods(root_module, root_module['ns3::UanNoiseModelDefault'])
register_Ns3UanPhy_methods(root_module, root_module['ns3::UanPhy'])
register_Ns3UanPhyCalcSinr_methods(root_module, root_module['ns3::UanPhyCalcSinr'])
register_Ns3UanPhyCalcSinrDefault_methods(root_module, root_module['ns3::UanPhyCalcSinrDefault'])
register_Ns3UanPhyCalcSinrDual_methods(root_module, root_module['ns3::UanPhyCalcSinrDual'])
register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, root_module['ns3::UanPhyCalcSinrFhFsk'])
register_Ns3UanPhyDual_methods(root_module, root_module['ns3::UanPhyDual'])
register_Ns3UanPhyGen_methods(root_module, root_module['ns3::UanPhyGen'])
register_Ns3UanPhyPer_methods(root_module, root_module['ns3::UanPhyPer'])
register_Ns3UanPhyPerCommonModes_methods(root_module, root_module['ns3::UanPhyPerCommonModes'])
register_Ns3UanPhyPerGenDefault_methods(root_module, root_module['ns3::UanPhyPerGenDefault'])
register_Ns3UanPhyPerUmodem_methods(root_module, root_module['ns3::UanPhyPerUmodem'])
register_Ns3UanPropModel_methods(root_module, root_module['ns3::UanPropModel'])
register_Ns3UanPropModelIdeal_methods(root_module, root_module['ns3::UanPropModelIdeal'])
register_Ns3UanPropModelThorp_methods(root_module, root_module['ns3::UanPropModelThorp'])
register_Ns3UanTransducer_methods(root_module, root_module['ns3::UanTransducer'])
register_Ns3UanTransducerHd_methods(root_module, root_module['ns3::UanTransducerHd'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnergyHarvester_methods(root_module, root_module['ns3::EnergyHarvester'])
register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource'])
register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UanChannel_methods(root_module, root_module['ns3::UanChannel'])
register_Ns3UanModesListChecker_methods(root_module, root_module['ns3::UanModesListChecker'])
register_Ns3UanModesListValue_methods(root_module, root_module['ns3::UanModesListValue'])
register_Ns3UanNetDevice_methods(root_module, root_module['ns3::UanNetDevice'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3AcousticModemEnergyModel_methods(root_module, root_module['ns3::AcousticModemEnergyModel'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3MobilityModel__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Double_Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3UanAddress_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::UanAddress, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Const_ns3UanAddress___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, const ns3::UanAddress &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Double_Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Packet__gt___Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Unsigned_int_Unsigned_int_Double_Unsigned_int_Double_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Time, ns3::Time, unsigned int, unsigned int, double, unsigned int, double, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return |
def _get_uid(name):
if ((getpwnam is None) or (name is None)):
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if (result is not None):
return result[2]
return None |
class BernoulliMLPRegressor(StochasticRegressor):
def __init__(self, input_shape, output_dim, name='BernoulliMLPRegressor', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.sigmoid, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), optimizer=None, optimizer_args=None, tr_optimizer=None, tr_optimizer_args=None, use_trust_region=True, max_kl_step=0.01, normalize_inputs=True, layer_normalization=False):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
optimizer_args = (optimizer_args or dict())
tr_optimizer_args = (tr_optimizer_args or dict())
if (optimizer is None):
self._optimizer = make_optimizer(LbfgsOptimizer, **optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
if (tr_optimizer is None):
self._tr_optimizer = make_optimizer(ConjugateGradientOptimizer, **tr_optimizer_args)
else:
self._tr_optimizer = make_optimizer(tr_optimizer, **tr_optimizer_args)
self._first_optimized = False
self.model = NormalizedInputMLPModel(input_shape, output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._dist = Bernoulli(output_dim)
self._network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=((None,) + self._input_shape))
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32, name='ys', shape=(None, self._output_dim))
old_prob_var = tf.compat.v1.placeholder(dtype=tf.float32, name='old_prob', shape=(None, self._output_dim))
y_hat = self._network.y_hat
old_info_vars = dict(p=old_prob_var)
info_vars = dict(p=y_hat)
mean_kl = tf.reduce_mean(self._dist.kl_sym(old_info_vars, info_vars))
loss = (- tf.reduce_mean(self._dist.log_likelihood_sym(ys_var, info_vars)))
predicted = (y_hat >= 0.5)
self._f_predict = tensor_utils.compile_function([input_var], predicted)
self._f_prob = tensor_utils.compile_function([input_var], y_hat)
self._optimizer.update_opt(loss=loss, target=self, inputs=[input_var, ys_var])
self._tr_optimizer.update_opt(loss=loss, target=self, inputs=[input_var, ys_var, old_prob_var], leq_constraint=(mean_kl, self._max_kl_step))
def fit(self, xs, ys):
if self._normalize_inputs:
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load((np.std(xs, axis=0, keepdims=True) + 1e-08))
if (self._use_trust_region and self._first_optimized):
old_prob = self._f_prob(xs)
inputs = [xs, ys, old_prob]
optimizer = self._tr_optimizer
else:
inputs = [xs, ys]
optimizer = self._optimizer
loss_before = optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), (loss_before - loss_after))
self._first_optimized = True
def predict(self, xs):
return self._f_predict(xs)
def sample_predict(self, xs):
p = self._f_prob(xs)
return self._dist.sample(dict(p=p))
def predict_log_likelihood(self, xs, ys):
p = self._f_prob(xs)
return self._dist.log_likelihood(ys, dict(p=p))
def log_likelihood_sym(self, x_var, y_var, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
(prob, _, _) = self.model.build(x_var, name=name).outputs
return self._dist.log_likelihood_sym(y_var, dict(p=prob))
def dist_info_sym(self, input_var, state_info_vars=None, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
(prob, _, _) = self.model.build(input_var, name=name).outputs
return dict(prob=prob)
def recurrent(self):
return False
def vectorized(self):
return True
def distribution(self):
return self._dist
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_f_prob']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def boost_get_version(self, dir):
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"$', re.M)
try:
val = re_but.search(self.__boost_get_version_file(dir).read()).group(1)
except:
val = self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[dir], execute=True, define_ret=True)
return val |
_utils.test(require=ti.extension.adstack)
def test_mixed_inner_loops():
x = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
arr = ti.field(dtype=ti.f32, shape=5)
loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
def mixed_inner_loops():
for i in arr:
loss[None] += ti.sin(x[None])
for j in range(2):
loss[None] += (ti.sin(x[None]) + 1.0)
loss.grad[None] = 1.0
x[None] = 0.0
mixed_inner_loops()
mixed_inner_loops.grad()
assert (loss[None] == 10.0)
assert (x.grad[None] == 15.0) |
.parametrize('estimator', [LinearRegression(), DumbEstimator()])
.parametrize('sample_weight', [None, np.ones_like(y_toy)])
def test_fit_estimator(estimator: Any, sample_weight: Optional[NDArray]) -> None:
estimator = fit_estimator(estimator, X_toy, y_toy, sample_weight)
check_is_fitted(estimator) |
def test_capacitor_error_massages():
error = 'The input unit for the capacitor is not correct. Look at the documentation for the correct input format.'
with pytest.raises(ValueError, match=error):
Capacitor(10, 'H') |
def test_tokenizer():
char_tokenizer = CharacterTokenizer()
phone_tokenizer = default_phoneme_tokenizer()
char_text = 'HELLO WORLD'
char_text_enc = char_tokenizer.encode(char_text)
char_text_dec = char_tokenizer.decode(char_text_enc)
assert isinstance(char_text_enc, list)
assert (char_text == char_text_dec) |
def cast_to_iterable(value):
if isinstance(value, (list, tuple)):
return value
return [value] |
def simRMLPos(dofs, smallestTimeStep, flags, currentPosVelAccel, maxVelAccelJerk, selection, targetPosVel):
smallestTimeStep = ffi.cast('double', smallestTimeStep)
handle = lib.simRMLPos(dofs, smallestTimeStep, flags, currentPosVelAccel, maxVelAccelJerk, selection, targetPosVel, ffi.NULL)
_check_return(handle)
return handle |
def get_evaluation_metrics(fs, fp, engine):
assert (len(fs) == len(fp))
for (ls, lp) in tqdm(zip(fs, fp), total=len(fs)):
(correct, match) = eval_fun(ls, lp, engine)
grades.append(correct)
exact_match.append(match)
lf_acc = (sum(exact_match) / len(exact_match))
ex_acc = (sum(grades) / len(grades))
return (lf_acc, ex_acc) |
def get_loader(data_args, transform_args, split, task_sequence, su_frac, nih_frac, batch_size, is_training=False, shuffle=False, study_level=False, frontal_lateral=False, return_info_dict=False):
if is_training:
study_level = data_args.train_on_studies
datasets = []
if (su_frac != 0):
datasets.append(SUDataset(data_args.su_data_dir, transform_args, split=split, is_training=is_training, tasks_to=task_sequence, frac=su_frac, study_level=study_level, frontal_lateral=frontal_lateral, toy=data_args.toy, return_info_dict=return_info_dict))
if (nih_frac != 0):
datasets.append(NIHDataset(data_args.nih_data_dir, transform_args, split=split, is_training=is_training, tasks_to=task_sequence, frac=nih_frac, toy=data_args.toy))
if (len(datasets) == 2):
assert (study_level is False), "Currently, you can't create concatenated datasets when training on studies"
dataset = ConcatDataset(datasets)
else:
dataset = datasets[0]
if study_level:
collate_fn = PadCollate(dim=0)
loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=8, collate_fn=collate_fn)
else:
loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=8)
return loader |
class VectorFieldDualFreeModule(DiffFormFreeModule):
def __init__(self, vector_field_module):
DiffFormFreeModule.__init__(self, vector_field_module, 1)
def tensor_type(self):
return (0, 1) |
def test_getSubscription10():
url = (brokerIp + '/ngsi10/updateContext')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata17), headers=headers)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
assert (r.status_code == 200) |
class Evaluator(abc.ABC):
def __init__(self, metrics: typing.List[pymia_metric.Metric]):
self.metrics = metrics
self.results = []
def evaluate(self, prediction: typing.Union[(sitk.Image, np.ndarray)], reference: typing.Union[(sitk.Image, np.ndarray)], id_: str, **kwargs):
raise NotImplementedError
def clear(self):
self.results = [] |
_torch
((not is_torch_greater_or_equal_than_1_10), 'BridgeTower is only available in torch v1.10+')
class BridgeTowerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((BridgeTowerModel, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning) if is_torch_available() else ())
pipeline_model_mapping = ({'feature-extraction': BridgeTowerModel} if is_torch_available() else {})
is_training = False
test_headmasking = False
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
has_attentions = False
(reason='Does not work on the tiny model as we keep hitting edge cases.')
def test_cpu_offload(self):
pass
(reason='Does not work on the tiny model as we keep hitting edge cases.')
def test_disk_offload(self):
pass
(reason='Does not work on the tiny model as we keep hitting edge cases.')
def test_model_parallelism(self):
pass
def extract_output(self, outputs, model_class):
return (outputs['pooler_output'] if (model_class == 'BridgeTowerModel') else outputs['logits'])
def setUp(self):
self.model_tester = BridgeTowerModelTester(self)
self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_and_text_retrieval(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_and_text_retrieval(*config_and_inputs)
def test_for_masked_language_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_language_modeling(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BridgeTowerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_save_load_fast_init_from_base(self):
super().test_save_load_fast_init_from_base()
def test_save_load(self):
(config, input_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**input_dict)
out_2 = self.extract_output(outputs, model_class.__name__)
out_2 = out_2.cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
with torch.no_grad():
after_outputs = model(**input_dict)
out_1 = self.extract_output(after_outputs, model_class.__name__)
out_1 = out_1.cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs((out_1 - out_2)))
self.assertLessEqual(max_diff, 1e-05)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
(hidden_states_text, hidden_states_vision, hidden_states_cross) = (outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states)
expected_num_layers = self.model_tester.expected_num_hidden_layers
self.assertEqual(sum((len(hidden_states_text), len(hidden_states_vision), len(hidden_states_cross))), expected_num_layers)
seq_length = self.model_tester.text_model_tester.seq_length
num_image_features = self.model_tester.vision_model_tester.num_image_features
self.assertListEqual(list(hidden_states_text[0].shape[(- 2):]), [seq_length, self.model_tester.text_model_tester.hidden_size])
self.assertListEqual(list(hidden_states_vision[0].shape), [num_image_features, 1, self.model_tester.vision_model_tester.hidden_size])
self.assertListEqual(list(hidden_states_cross[0][0].shape[(- 2):]), [seq_length, self.model_tester.text_model_tester.hidden_size])
self.assertListEqual(list(hidden_states_cross[0][1].shape[(- 2):]), [num_image_features, self.model_tester.vision_model_tester.hidden_size])
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
check_hidden_states_output(inputs_dict, config, model_class)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
hidden_states = outputs.hidden_states[0][0]
hidden_states.retain_grad()
if self.has_attentions:
attentions = outputs.attentions[0][0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for (name, param) in model.named_parameters():
if param.requires_grad:
if (name == 'logit_scale'):
self.assertAlmostEqual(param.data.item(), config.logit_scale_init_value, delta=0.001, msg=f'Parameter {name} of model {model_class} seems not properly initialized')
else:
self.assertIn(((param.data.mean() * .0).round() / .0).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized')
(reason='Bridge Tower does not have input/output embeddings. So this test is not applicable.')
def test_model_common_attributes(self):
pass
(reason='Bridge Tower does not have input/output embeddings. Thus this test is not applicable.')
def test_inputs_embeds(self):
pass |
def onehot_from_logits(logits, eps=0.0):
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
if (eps == 0.0):
return argmax_acs
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
return torch.stack([(argmax_acs[i] if (r > eps) else rand_acs[i]) for (i, r) in enumerate(torch.rand(logits.shape[0]))]) |
def minimal_grid(x, y, tol=1e-06, rel=False):
import numpy as np
from scipy.interpolate import CubicSpline as spline
deg = 3
if callable(tol):
next_sample = tol
else:
if rel:
ymax = np.max(np.abs(y))
if (ymax == 0.0):
raise ValueError('All input `y` data samples are zero.')
tol *= ymax
def next_sample(x, y, y_greedy):
errors = np.abs((y - y_greedy))
i_next = np.argmax(errors)
if (errors[i_next] < tol):
return None
return i_next
include_sample = np.zeros(len(x), dtype=bool)
include_sample[np.linspace(0, (len(x) - 1), num=(deg + 1), dtype=int)] = True
for _ in range(len(x)):
s = spline(x[include_sample], y[include_sample])
i_next = next_sample(x, y, s(x))
if (i_next is None):
break
include_sample[i_next] = True
return include_sample |
def generate_keypair(pubkey_path: PathLike, pem_path: PathLike):
key = rsa.generate_private_key(backend=crypto_default_backend(), public_exponent=65537, key_size=4096)
private_key = key.private_bytes(crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.TraditionalOpenSSL, crypto_serialization.NoEncryption())
public_key = key.public_key().public_bytes(crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH)
Path(pubkey_path).write_bytes(public_key)
Path(pem_path).write_bytes(private_key)
chmod(pem_path, 384) |
class CNNModel(Model):
def __init__(self, filter_dims, num_filters, strides, padding, name=None, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer()):
super().__init__(name)
self._filter_dims = filter_dims
self._num_filters = num_filters
self._strides = strides
self._padding = padding
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
def _build(self, state_input, name=None):
return cnn(input_var=state_input, filter_dims=self._filter_dims, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, num_filters=self._num_filters, strides=self._strides, padding=self._padding, name='cnn') |
def val_epoch(model, data_loader, epoch, args, summary, device):
model.eval()
iterator = tqdm(data_loader)
(mIoU_ac, mAP_ac, mF1_ac) = ([], [], [])
for (i, (image, mask)) in enumerate(iterator):
image = image.to(device)
mask = mask.to(device)
pred_mask_ac = model(image, args.iter)
pred_mask_ac = F.interpolate(pred_mask_ac, size=mask.shape[1:], mode='bilinear')
loss_masks_ac = F.mse_loss(pred_mask_ac.squeeze(1), mask)
iou_ac = get_iou(pred_mask_ac.gt(0.2), mask.byte())
ap_ac = get_ap_scores(pred_mask_ac, mask)
mIoU_ac += iou_ac
mAP_ac += ap_ac
iterator.set_description('(val | {}) Epoch [{epoch}/{epochs}] :: Loss AC {loss_ac:.4f}'.format(((args.checkname + '_') + args.exp), epoch=(epoch + 1), epochs=args.epochs, loss_ac=loss_masks_ac.item()))
global_step = (((epoch // args.eval_rate) * len(data_loader)) + i)
summary.add_scalar('val/loss_ac', loss_masks_ac.item(), global_step)
summary.add_scalar('val/iou_ac', np.mean(iou_ac), global_step)
summary.add_scalar('val/ap_ac', np.mean(ap_ac), global_step)
ind = np.argwhere((np.array(iou_ac) < 0.5)).flatten().tolist()
summary.visualize_image('val', image, mask.unsqueeze(1)[ind], pred_mask_ac[ind], pred_mask_ac[ind], global_step)
if ind:
summary.visualize_image('val_BAD', image[ind], mask.unsqueeze(1)[ind], pred_mask_ac[ind], pred_mask_ac[ind], global_step)
return (np.mean(mIoU_ac), np.mean(mAP_ac)) |
def update_datasplits(cfg):
assert isinstance(cfg.data.sources, (tuple, list))
assert isinstance(cfg.data.sources, (tuple, list))
if isinstance(cfg.data.sources[0], (tuple, list)):
assert (len(cfg.data.sources) == 1)
cfg.data.sources = cfg.data.sources[0]
if isinstance(cfg.data.targets[0], (tuple, list)):
assert (len(cfg.data.targets) == 1)
cfg.data.targets = cfg.data.targets[0] |
class FixedGridODESolver(metaclass=abc.ABCMeta):
def __init__(self, func, y0, grid_constructor=None, transforms=None):
self.func = func
self.y0 = y0
if (grid_constructor is None):
grid_constructor = (lambda f, y0, t: t)
self.grid_constructor = grid_constructor
if (transforms is None):
transforms = [(lambda x: x) for _ in range(len(y0))]
self.transforms = transforms
def order(self):
def step_func(self, func, t, dt, y, u):
def integrate(self, t, u=None):
_assert_increasing(t)
if (u is None):
u = ([None] * len(t))
t = t.type_as(self.y0[0])
time_grid = self.grid_constructor(self.func, self.y0, t)
assert ((time_grid[0] == t[0]) and (time_grid[(- 1)] == t[(- 1)]))
time_grid = time_grid.to(self.y0[0])
solution = [self.y0]
j = 1
y0 = self.y0
for (t0, t1) in zip(time_grid[:(- 1)], time_grid[1:]):
dy = self.step_func(self.func, t0, (t1 - t0), y0, u=u[(j - 1)])
y1 = tuple((trans((y0_ + dy_)) for (y0_, dy_, trans) in zip(y0, dy, self.transforms)))
y0 = y1
while ((j < len(t)) and (t1 >= t[j])):
solution.append(self._linear_interp(t0, t1, y0, y1, t[j]))
j += 1
return tuple(map(torch.stack, tuple(zip(*solution))))
def _linear_interp(self, t0, t1, y0, y1, t):
if (t == t0):
return y0
if (t == t1):
return y1
(t0, t1, t) = (t0.to(y0[0]), t1.to(y0[0]), t.to(y0[0]))
slope = tuple((((y1_ - y0_) / (t1 - t0)) for (y0_, y1_) in zip(y0, y1)))
return tuple(((y0_ + (slope_ * (t - t0))) for (y0_, slope_) in zip(y0, slope))) |
def process_file(bert_model, bert_tokenizer, fasttext_model, batch_size, language, ud_file, output_file):
logging.info('Processing file {}'.format(ud_file))
logging.info('PHASE ONE: reading file and tokenizing')
(all_target_tokens, all_bert_tokens, all_bert2target_map, all_ud) = tokenize_ud(ud_file, bert_tokenizer)
logging.info('PHASE TWO: padding, batching, and embedding for bert')
all_bert_embeddings = embed_bert(all_bert_tokens, batch_size, bert_model, bert_tokenizer)
logging.info('PHASE THREE: re-merging BERT tokens')
(bert_embeddings, words) = combine_bert(all_target_tokens, all_bert2target_map, all_bert_tokens, all_bert_embeddings)
del all_target_tokens, all_bert2target_map, all_bert_tokens, all_bert_embeddings
logging.info('PHASE FOUR: getting fasttext embeddings')
fast_embeddings = get_fasttext(fasttext_model, words)
logging.info('PHASE FIVE: saving')
output_data_raw = list(zip(bert_embeddings, fast_embeddings, all_ud, words))
del bert_embeddings, fast_embeddings, all_ud, words
output_data = [(bert_embs, fast_embs, ud, words) for (bert_embs, fast_embs, ud, words) in output_data_raw if (bert_embs != [])]
del output_data_raw
output_ud = [(ud, words) for (_, _, ud, words) in output_data]
output_bert = [(bert_embs, words) for (bert_embs, _, _, words) in output_data]
output_fast = [(fast_embs, words) for (_, fast_embs, _, words) in output_data]
del output_data
util.write_data((output_file % 'ud'), output_ud)
del output_ud
util.write_data((output_file % 'fast'), output_fast)
del output_fast
util.write_data((output_file % 'bert'), output_bert)
del output_bert
logging.info('Completed {}'.format(ud_file)) |
def test_warm_start_equivalence():
(X, y) = make_hastie_10_2(n_samples=20, random_state=1)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, random_state=43)
clf_ws = EasyEnsembleClassifier(n_estimators=5, warm_start=True, random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = EasyEnsembleClassifier(n_estimators=10, warm_start=False, random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_allclose(y1, y2) |
def test_workspace_poiless(datadir):
with open(datadir.joinpath('poiless.json'), encoding='utf-8') as spec_file:
spec = json.load(spec_file)
ws = pyhf.Workspace(spec)
model = ws.model()
assert (model.config.poi_name is None)
assert (model.config.poi_index is None) |
def test_calculate_indexes_when_indexes_supplied(msa_sampler):
indexes = [2, 3, 4, 5]
leader_length = 1
max_len = 5
rollover = False
(out_indexes, last_i) = msa_sampler.calculate_indexes(indexes, leader_length, max_len, rollover)
assert (out_indexes == [2, 3, 4, 5])
assert (last_i == (- 1)) |
_optimizer('adam', dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
def __init__(self, cfg: FairseqAdamConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = ((not getattr(cfg, 'use_old_adam', False)) and (fused_adam_cls is not None) and torch.cuda.is_available())
if getattr(cfg, 'tpu', False):
if self.cfg.fp16_adam_stats:
raise NotImplementedError('--fp16-adam-stats is only supported on GPU')
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info('using FusedAdam')
self._optimizer = fused_adam_cls(params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config)
else:
if self.cfg.fp16_adam_stats:
raise NotImplementedError('--fp16-adam-stats is only supported with FusedAdamV1')
self._optimizer = Adam(params, **self.optimizer_config)
def optimizer_config(self):
return {'lr': (self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr), 'betas': (eval(self.cfg.adam_betas) if isinstance(self.cfg.adam_betas, str) else OmegaConf.to_container(self.cfg.adam_betas)), 'eps': self.cfg.adam_eps, 'weight_decay': self.cfg.weight_decay}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM) |
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if (special_model is not None):
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if ((config_path in benchmark_pool) and (config_path not in benchmark_configs)):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write((config + '\n')) |
def subexpressions_list(f, pars=None):
from sage.functions.trig import sin, cos, arcsin, arctan, arccos
variables = f[0].arguments()
if (not pars):
parameters = []
else:
parameters = pars
varpar = (list(parameters) + list(variables))
F = symbolic_expression([i(*variables) for i in f]).function(*varpar)
lis = flatten([fast_callable(i, vars=varpar).op_list() for i in F], max_level=1)
stack = []
const = []
stackcomp = []
detail = []
for i in lis:
if (i[0] == 'load_arg'):
stack.append(varpar[i[1]])
elif (i[0] == 'ipow'):
if (i[1] in NN):
basis = stack[(- 1)]
for j in range((i[1] - 1)):
a = stack.pop((- 1))
detail.append(('mul', a, basis))
stack.append((a * basis))
stackcomp.append(stack[(- 1)])
else:
detail.append(('pow', stack[(- 1)], i[1]))
stack[(- 1)] = (stack[(- 1)] ** i[1])
stackcomp.append(stack[(- 1)])
elif (i[0] == 'load_const'):
const.append(i[1])
stack.append(i[1])
elif (i == 'mul'):
a = stack.pop((- 1))
b = stack.pop((- 1))
detail.append(('mul', a, b))
stack.append((a * b))
stackcomp.append(stack[(- 1)])
elif (i == 'div'):
a = stack.pop((- 1))
b = stack.pop((- 1))
detail.append(('div', a, b))
stack.append((b / a))
stackcomp.append(stack[(- 1)])
elif (i == 'add'):
a = stack.pop((- 1))
b = stack.pop((- 1))
detail.append(('add', a, b))
stack.append((a + b))
stackcomp.append(stack[(- 1)])
elif (i == 'pow'):
a = stack.pop((- 1))
b = stack.pop((- 1))
detail.append(('pow', b, a))
stack.append((b ** a))
stackcomp.append(stack[(- 1)])
elif ((i[0] == 'py_call') and (str(i[1]) == 'log')):
a = stack.pop((- 1))
detail.append(('log', a))
stack.append(log(a))
stackcomp.append(stack[(- 1)])
elif ((i[0] == 'py_call') and (str(i[1]) == 'exp')):
a = stack.pop((- 1))
detail.append(('exp', a))
stack.append(exp(a))
stackcomp.append(stack[(- 1)])
elif ((i[0] == 'py_call') and (str(i[1]) == 'sin')):
a = stack.pop((- 1))
detail.append(('sin', a))
detail.append(('cos', a))
stackcomp.append(sin(a))
stackcomp.append(cos(a))
stack.append(sin(a))
elif ((i[0] == 'py_call') and (str(i[1]) == 'cos')):
a = stack.pop((- 1))
detail.append(('sin', a))
detail.append(('cos', a))
stackcomp.append(sin(a))
stackcomp.append(cos(a))
stack.append(cos(a))
elif ((i[0] == 'py_call') and (str(i[1]) == 'tan')):
a = stack.pop((- 1))
b = sin(a)
c = cos(a)
detail.append(('sin', a))
detail.append(('cos', a))
detail.append(('div', b, c))
stackcomp.append(b)
stackcomp.append(c)
stackcomp.append((b / c))
stack.append((b / c))
elif ((i[0] == 'py_call') and (str(i[1]) == 'arctan')):
a = stack.pop((- 1))
detail.append(('mul', a, a))
detail.append(('add', 1, (a * a)))
detail.append(('atan', a))
stackcomp.append((a * a))
stackcomp.append((1 + (a * a)))
stackcomp.append(arctan(a))
stack.append(arctan(a))
elif ((i[0] == 'py_call') and (str(i[1]) == 'arcsin')):
a = stack.pop((- 1))
detail.append(('mul', a, a))
detail.append(('mul', (- 1), (a * a)))
detail.append(('add', 1, ((- a) * a)))
detail.append(('pow', (1 - (a * a)), 0.5))
detail.append(('asin', a))
stackcomp.append((a * a))
stackcomp.append(((- a) * a))
stackcomp.append((1 - (a * a)))
stackcomp.append(sqrt((1 - (a * a))))
stackcomp.append(arcsin(a))
stack.append(arcsin(a))
elif ((i[0] == 'py_call') and (str(i[1]) == 'arccos')):
a = stack.pop((- 1))
detail.append(('mul', a, a))
detail.append(('mul', (- 1), (a * a)))
detail.append(('add', 1, ((- a) * a)))
detail.append(('pow', (1 - (a * a)), 0.5))
detail.append(('mul', (- 1), sqrt((1 - (a * a)))))
detail.append(('acos', a))
stackcomp.append((a * a))
stackcomp.append(((- a) * a))
stackcomp.append((1 - (a * a)))
stackcomp.append(sqrt((1 - (a * a))))
stackcomp.append((- sqrt((1 - (a * a)))))
stackcomp.append(arccos(a))
stack.append(arccos(a))
elif ((i[0] == 'py_call') and ('sqrt' in str(i[1]))):
a = stack.pop((- 1))
detail.append(('pow', a, 0.5))
stackcomp.append(sqrt(a))
stack.append(sqrt(a))
elif (i == 'neg'):
a = stack.pop((- 1))
detail.append(('mul', (- 1), a))
stack.append((- a))
stackcomp.append((- a))
return (stackcomp, detail) |
def _get_env(environment, name):
value = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName('{0!r} does not exist in evaluation environment.'.format(name))
return value |
class DDFFNet(nn.Module):
def __init__(self, focal_stack_size, output_dims=1, cc1_enabled=False, cc2_enabled=False, cc3_enabled=True, cc4_enabled=False, cc5_enabled=False, bias=False, pretrained='no_bn'):
super(DDFFNet, self).__init__()
self.autoencoder = DDFFAutoEncoder(output_dims, cc1_enabled, cc2_enabled, cc3_enabled, cc4_enabled, cc5_enabled, bias=bias)
self.scoring = nn.Conv2d((focal_stack_size * output_dims), output_dims, 1, bias=False)
self.apply(self.weights_init)
if (pretrained == 'no_bn'):
autoencoder_state_dict = self.autoencoder.state_dict()
pretrained_dict = torchvision.models.vgg16(pretrained=True).features.state_dict()
pretrained_dict = self.__map_state_dict(pretrained_dict, bias=bias)
autoencoder_state_dict.update(pretrained_dict)
self.autoencoder.load_state_dict(autoencoder_state_dict)
elif (pretrained == 'bn'):
autoencoder_state_dict = self.autoencoder.state_dict()
pretrained_dict = torchvision.models.vgg16_bn(pretrained=True).features.state_dict()
pretrained_dict = self.__map_state_dict_bn(pretrained_dict, bias=bias)
autoencoder_state_dict.update(pretrained_dict)
self.autoencoder.load_state_dict(autoencoder_state_dict)
elif (pretrained is not None):
autoencoder_state_dict = self.autoencoder.state_dict()
pretrained_weights = np.load(pretrained, encoding='latin1').item()
pretrained_dict = self.__map_state_dict_tf(pretrained_weights, bias=bias)
autoencoder_state_dict.update(pretrained_dict)
self.autoencoder.load_state_dict(autoencoder_state_dict)
def forward(self, images):
image_features = self.autoencoder(images.view((- 1), *images.shape[2:]))
image_features = image_features.view(images.shape[0], (- 1), *image_features.shape[2:])
result = self.scoring(image_features)
return result
def weights_init(self, m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if (m.bias is not None):
m.bias.data.fill_(0)
elif (classname.find('BatchNorm') != (- 1)):
m.weight.data.normal_(0, 1.0)
m.running_var.normal_(0, 1.0)
m.running_mean.fill_(0)
m.bias.data.fill_(0)
def __map_state_dict(self, vgg16_features_dict, bias):
layer_mappings = {'0.weight': 'conv1_1.weight', '2.weight': 'conv1_2.weight', '5.weight': 'conv2_1.weight', '7.weight': 'conv2_2.weight', '10.weight': 'conv3_1.weight', '12.weight': 'conv3_2.weight', '14.weight': 'conv3_3.weight', '17.weight': 'conv4_1.weight', '19.weight': 'conv4_2.weight', '21.weight': 'conv4_3.weight', '24.weight': 'conv5_1.weight', '26.weight': 'conv5_2.weight', '28.weight': 'conv5_3.weight'}
if bias:
layer_mappings.update({'0.bias': 'conv1_1.bias', '2.bias': 'conv1_2.bias', '5.bias': 'conv2_1.bias', '7.bias': 'conv2_2.bias', '10.bias': 'conv3_1.bias', '12.bias': 'conv3_2.bias', '14.bias': 'conv3_3.bias', '17.bias': 'conv4_1.bias', '19.bias': 'conv4_2.bias', '21.bias': 'conv4_3.bias', '24.bias': 'conv5_1.bias', '26.bias': 'conv5_2.bias', '28.bias': 'conv5_3.bias'})
pretrained_dict = {layer_mappings[k]: v for (k, v) in vgg16_features_dict.items() if (k in layer_mappings)}
return pretrained_dict
def __map_state_dict_bn(self, vgg16_features_dict, bias):
layer_mappings = {'0.weight': 'conv1_1.weight', '1.weight': 'conv1_1_bn.weight', '1.bias': 'conv1_1_bn.bias', '1.running_mean': 'conv1_1_bn.running_mean', '1.running_var': 'conv1_1_bn.running_var', '3.weight': 'conv1_2.weight', '4.weight': 'conv1_2_bn.weight', '4.bias': 'conv1_2_bn.bias', '4.running_mean': 'conv1_2_bn.running_mean', '4.running_var': 'conv1_2_bn.running_var', '7.weight': 'conv2_1.weight', '8.weight': 'conv2_1_bn.weight', '8.bias': 'conv2_1_bn.bias', '8.running_mean': 'conv2_1_bn.running_mean', '8.running_var': 'conv2_1_bn.running_var', '10.weight': 'conv2_2.weight', '11.weight': 'conv2_2_bn.weight', '11.bias': 'conv2_2_bn.bias', '11.running_mean': 'conv2_2_bn.running_mean', '11.running_var': 'conv2_2_bn.running_var', '14.weight': 'conv3_1.weight', '15.weight': 'conv3_1_bn.weight', '15.bias': 'conv3_1_bn.bias', '15.running_mean': 'conv3_1_bn.running_mean', '15.running_var': 'conv3_1_bn.running_var', '17.weight': 'conv3_2.weight', '18.weight': 'conv3_2_bn.weight', '18.bias': 'conv3_2_bn.bias', '18.running_mean': 'conv3_2_bn.running_mean', '18.running_var': 'conv3_2_bn.running_var', '20.weight': 'conv3_3.weight', '21.weight': 'conv3_3_bn.weight', '21.bias': 'conv3_3_bn.bias', '21.running_mean': 'conv3_3_bn.running_mean', '21.running_var': 'conv3_3_bn.running_var', '24.weight': 'conv4_1.weight', '25.weight': 'conv4_1_bn.weight', '25.bias': 'conv4_1_bn.bias', '25.running_mean': 'conv4_1_bn.running_mean', '25.running_var': 'conv4_1_bn.running_var', '27.weight': 'conv4_2.weight', '28.weight': 'conv4_2_bn.weight', '28.bias': 'conv4_2_bn.bias', '28.running_mean': 'conv4_2_bn.running_mean', '28.running_var': 'conv4_2_bn.running_var', '30.weight': 'conv4_3.weight', '31.weight': 'conv4_3_bn.weight', '31.bias': 'conv4_3_bn.bias', '31.running_mean': 'conv4_3_bn.running_mean', '31.running_var': 'conv4_3_bn.running_var', '34.weight': 'conv5_1.weight', '35.weight': 'conv5_1_bn.weight', '35.bias': 'conv5_1_bn.bias', '35.running_mean': 'conv5_1_bn.running_mean', '35.running_var': 'conv5_1_bn.running_var', '37.weight': 'conv5_2.weight', '38.weight': 'conv5_2_bn.weight', '38.bias': 'conv5_2_bn.bias', '38.running_mean': 'conv5_2_bn.running_mean', '38.running_var': 'conv5_2_bn.running_var', '40.weight': 'conv5_3.weight', '41.weight': 'conv5_3_bn.weight', '41.bias': 'conv5_3_bn.bias', '41.running_mean': 'conv5_3_bn.running_mean', '41.running_var': 'conv5_3_bn.running_var'}
if bias:
layer_mappings.update({'0.bias': 'conv1_1.bias', '3.bias': 'conv1_2.bias', '7.bias': 'conv2_1.bias', '10.bias': 'conv2_2.bias', '14.bias': 'conv3_1.bias', '17.bias': 'conv3_2.bias', '20.bias': 'conv3_3.bias', '24.bias': 'conv4_1.bias', '27.bias': 'conv4_2.bias', '30.bias': 'conv4_3.bias', '34.bias': 'conv5_1.bias', '37.bias': 'conv5_2.bias', '40.bias': 'conv5_3.bias'})
pretrained_dict = {layer_mappings[k]: v for (k, v) in vgg16_features_dict.items() if (k in layer_mappings)}
return pretrained_dict
def __map_state_dict_tf(self, vgg16_features, bias):
pretrained_dict = {'conv1_1.weight': torch.from_numpy(vgg16_features['conv1_1'][0].transpose((3, 2, 0, 1))).float(), 'conv1_2.weight': torch.from_numpy(vgg16_features['conv1_2'][0].transpose((3, 2, 0, 1))).float(), 'conv2_1.weight': torch.from_numpy(vgg16_features['conv2_1'][0].transpose((3, 2, 0, 1))).float(), 'conv2_2.weight': torch.from_numpy(vgg16_features['conv2_2'][0].transpose((3, 2, 0, 1))).float(), 'conv3_1.weight': torch.from_numpy(vgg16_features['conv3_1'][0].transpose((3, 2, 0, 1))).float(), 'conv3_2.weight': torch.from_numpy(vgg16_features['conv3_2'][0].transpose((3, 2, 0, 1))).float(), 'conv3_3.weight': torch.from_numpy(vgg16_features['conv3_3'][0].transpose((3, 2, 0, 1))).float(), 'conv4_1.weight': torch.from_numpy(vgg16_features['conv4_1'][0].transpose((3, 2, 0, 1))).float(), 'conv4_2.weight': torch.from_numpy(vgg16_features['conv4_2'][0].transpose((3, 2, 0, 1))).float(), 'conv4_3.weight': torch.from_numpy(vgg16_features['conv4_3'][0].transpose((3, 2, 0, 1))).float(), 'conv5_1.weight': torch.from_numpy(vgg16_features['conv5_1'][0].transpose((3, 2, 0, 1))).float(), 'conv5_2.weight': torch.from_numpy(vgg16_features['conv5_2'][0].transpose((3, 2, 0, 1))).float(), 'conv5_3.weight': torch.from_numpy(vgg16_features['conv5_3'][0].transpose((3, 2, 0, 1))).float()}
if bias:
pretrained_dict.update({'conv1_1.bias': torch.from_numpy(vgg16_features['conv1_1'][1]).float(), 'conv1_2.bias': torch.from_numpy(vgg16_features['conv1_2'][1]).float(), 'conv2_1.bias': torch.from_numpy(vgg16_features['conv2_1'][1]).float(), 'conv2_2.bias': torch.from_numpy(vgg16_features['conv2_2'][1]).float(), 'conv3_1.bias': torch.from_numpy(vgg16_features['conv3_1'][1]).float(), 'conv3_2.bias': torch.from_numpy(vgg16_features['conv3_2'][1]).float(), 'conv3_3.bias': torch.from_numpy(vgg16_features['conv3_3'][1]).float(), 'conv4_1.bias': torch.from_numpy(vgg16_features['conv4_1'][1]).float(), 'conv4_2.bias': torch.from_numpy(vgg16_features['conv4_2'][1]).float(), 'conv4_3.bias': torch.from_numpy(vgg16_features['conv4_3'][1]).float(), 'conv5_1.bias': torch.from_numpy(vgg16_features['conv5_1'][1]).float(), 'conv5_2.bias': torch.from_numpy(vgg16_features['conv5_2'][1]).float(), 'conv5_3.bias': torch.from_numpy(vgg16_features['conv5_3'][1]).float()})
return pretrained_dict |
class ConstructorSlot(InternalMethodSlot):
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
entry = scope.lookup_here(self.method)
if ((self.slot_name != 'tp_new') and scope.parent_type.base_type and (not scope.has_pyobject_attrs) and (not scope.has_memoryview_attrs) and (not scope.has_cpp_class_attrs) and (not (entry and entry.is_special))):
parent_type_scope = scope.parent_type.base_type.scope
if (scope.parent_scope is parent_type_scope.parent_scope):
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if (entry.visibility != 'extern'):
return self.slot_code(parent_type_scope)
if (entry and (not entry.is_special)):
return '0'
return InternalMethodSlot.slot_code(self, scope) |
def test_data_frame_integers(tmp_path):
filename = os.path.join(tmp_path, 'test-integers.root')
ak_array_x = ak.Array([1, 2, 3, 4, 5])
ak_array_y = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
data_frame = ak.to_rdataframe({'x': ak_array_x, 'y': ak_array_y})
assert (data_frame.GetColumnType('x') == 'int64_t')
assert (data_frame.GetColumnType('y') == 'double')
ak_array_out = ak.from_rdataframe(data_frame, columns=('x', 'y'))
assert (ak_array_x.to_list() == ak_array_out['x'].to_list())
assert (ak_array_y.to_list() == ak_array_out['y'].to_list())
data_frame.Snapshot('Test', filename, ('x', 'y')) |
def build_shared_mlp(mlp_spec: List[int], bn: bool=True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(nn.Conv2d(mlp_spec[(i - 1)], mlp_spec[i], kernel_size=1, bias=(not bn)))
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers) |
class ModelTraining(ABC):
def __init__(self, *args, **kwargs):
self.model = None
def train_model(self, x_train, y_train, x_val, y_val, force_device):
raise NotImplementedError() |
class Agent():
def __init__(self, state_size, is_eval=False, model_name=''):
self.state_size = state_size
self.action_size = 5
self.memory = deque(maxlen=2000)
self.inventory1 = []
self.inventory2 = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = (load_model(('models/' + model_name)) if is_eval else self._model())
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation='relu'))
model.add(Dense(units=32, activation='relu'))
model.add(Dense(units=8, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=0.0001))
return model
def act(self, state):
if ((not self.is_eval) and (random.random() <= self.epsilon)):
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
mini_batch = random.sample(self.memory, batch_size)
for (state, action, reward, next_state, done) in mini_batch:
target = reward
if (not done):
target = (reward + (self.gamma * np.amax(self.model.predict(next_state)[0])))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if (self.epsilon > self.epsilon_min):
self.epsilon *= self.epsilon_decay |
def test_loop_inlining_regular_for():
sdfg = dace.SDFG('inlining')
state0 = sdfg.add_state('state0', is_start_block=True)
loop1 = LoopRegion(label='loop1', condition_expr='i < 10', loop_var='i', initialize_expr='i = 0', update_expr='i = i + 1', inverted=False)
sdfg.add_node(loop1)
state1 = loop1.add_state('state1', is_start_block=True)
state2 = loop1.add_state('state2')
loop1.add_edge(state1, state2, dace.InterstateEdge())
state3 = sdfg.add_state('state3')
sdfg.add_edge(state0, loop1, dace.InterstateEdge())
sdfg.add_edge(loop1, state3, dace.InterstateEdge())
sdutils.inline_loop_blocks(sdfg)
states = sdfg.nodes()
assert (len(states) == 8)
assert (state0 in states)
assert (state1 in states)
assert (state2 in states)
assert (state3 in states) |
def predict(path):
img = Image.open(path).resize((224, 224))
x = np.asarray(img.convert('RGB'))
x = ((x - x.mean()) / x.std())
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
np.sort(preds)
print("Model's top 3 predicted:")
top3 = np.argsort((- preds[0]))[:3]
return [classes[i] for i in top3] |
def test_indexedoption_of_union_of_option_1():
with pytest.raises(TypeError, match=' must either be comprised of entirely optional contents'):
ak.contents.UnionArray(ak.index.Index8(np.array([0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 0, 2, 1, 2, 3, 3, 4, 5, 4], dtype=np.int64)), [ak.from_iter([0.0, 1.1, 2.2, 3.3, None, 5.5], highlevel=False), ak.from_iter(['zero', 'one', 'two', 'three', 'four'], highlevel=False)])
unionarray = ak.contents.UnionArray.simplified(ak.index.Index8(np.array([0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1], dtype=np.int8)), ak.index.Index64(np.array([0, 1, 0, 2, 1, 2, 3, 3, 4, 5, 4], dtype=np.int64)), [ak.from_iter([0.0, 1.1, 2.2, 3.3, None, 5.5], highlevel=False), ak.from_iter(['zero', 'one', 'two', 'three', 'four'], highlevel=False)])
indexedoptionarray = ak.contents.IndexedOptionArray.simplified(ak.index.Index64(np.array([(- 1), 4, 3, (- 1), 3, 8, 7, 6, (- 1)], np.int64)), unionarray)
assert (indexedoptionarray.to_list() == [None, 'one', 2.2, None, 2.2, None, 3.3, 'three', None]) |
def _download_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if (tsize is not None):
t.total = tsize
t.update(((b - last_b[0]) * bsize))
last_b[0] = b
return inner |
class TestKerasTPModel(unittest.TestCase):
def test_keras_layers_with_params(self):
conv_with_params = LayerFilterParams(Conv2D, Greater('filters', 2), Smaller('filters', 4), activation='softmax', kernel_size=(3, 4), filters=3)
conv = Conv2D(filters=3, kernel_size=(3, 4), activation='softmax')
self.assertTrue(get_node(conv).is_match_filter_params(conv_with_params))
conv = Conv2D(filters=2, kernel_size=(3, 4), activation='softmax')
self.assertFalse(get_node(conv).is_match_filter_params(conv_with_params))
conv = Conv2DTranspose(filters=3, kernel_size=(3, 4), activation='softmax')
self.assertFalse(get_node(conv).is_match_filter_params(conv_with_params))
relu_with_params = LayerFilterParams(ReLU, (GreaterEq('max_value', 0.5) | Smaller('max_value', 0.2)))
self.assertTrue(get_node(ReLU(max_value=0.1)).is_match_filter_params(relu_with_params))
self.assertTrue(get_node(ReLU(max_value=0.5)).is_match_filter_params(relu_with_params))
self.assertFalse(get_node(ReLU(max_value=0.3)).is_match_filter_params(relu_with_params))
relu_with_params = LayerFilterParams(ReLU, (Eq('max_value', None) | Eq('max_value', 6)))
self.assertTrue(get_node(ReLU()).is_match_filter_params(relu_with_params))
self.assertTrue(get_node(ReLU(max_value=6)).is_match_filter_params(relu_with_params))
self.assertFalse(get_node(ReLU(max_value=8)).is_match_filter_params(relu_with_params))
lrelu_with_params = LayerFilterParams(tf.nn.leaky_relu, SmallerEq('alpha', 2))
self.assertTrue(get_node(partial(tf.nn.leaky_relu, alpha=0.4)).is_match_filter_params(lrelu_with_params))
self.assertTrue(get_node(partial(tf.nn.leaky_relu, alpha=2)).is_match_filter_params(lrelu_with_params))
self.assertFalse(get_node(partial(tf.nn.leaky_relu, alpha=2.1)).is_match_filter_params(lrelu_with_params))
lrelu_with_params = LayerFilterParams(tf.nn.leaky_relu)
self.assertTrue(get_node(partial(tf.nn.leaky_relu, alpha=0.4)).is_match_filter_params(lrelu_with_params))
conv_filter_contains = LayerFilterParams(Conv2D, Contains('name', 'conv'))
conv = Conv2D(filters=3, kernel_size=(3, 4), name='conv')
self.assertTrue(get_node(conv).is_match_filter_params(conv_filter_contains))
conv = Conv2D(filters=3, kernel_size=(3, 4), name='layer_conv_0')
self.assertTrue(get_node(conv).is_match_filter_params(conv_filter_contains))
conv = Conv2D(filters=2, kernel_size=(3, 4), name='CONVOLUTION')
self.assertFalse(get_node(conv).is_match_filter_params(conv_filter_contains))
def test_get_layers_by_op(self):
hm = tp.TargetPlatformModel(tp.QuantizationConfigOptions([TEST_QC]))
with hm:
op_obj = tp.OperatorsSet('opsetA')
fw_tp = TargetPlatformCapabilities(hm)
with fw_tp:
opset_layers = [Conv2D, LayerFilterParams(ReLU, max_value=2)]
tp.OperationsSetToLayers('opsetA', opset_layers)
self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA'), opset_layers)
self.assertEqual(fw_tp.get_layers_by_opset(op_obj), opset_layers)
def test_get_layers_by_opconcat(self):
hm = tp.TargetPlatformModel(tp.QuantizationConfigOptions([TEST_QC]))
with hm:
op_obj_a = tp.OperatorsSet('opsetA')
op_obj_b = tp.OperatorsSet('opsetB')
op_concat = tp.OperatorSetConcat(op_obj_a, op_obj_b)
fw_tp = TargetPlatformCapabilities(hm)
with fw_tp:
opset_layers_a = [Conv2D]
opset_layers_b = [LayerFilterParams(ReLU, max_value=2)]
tp.OperationsSetToLayers('opsetA', opset_layers_a)
tp.OperationsSetToLayers('opsetB', opset_layers_b)
self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA_opsetB'), (opset_layers_a + opset_layers_b))
self.assertEqual(fw_tp.get_layers_by_opset(op_concat), (opset_layers_a + opset_layers_b))
def test_layer_attached_to_multiple_opsets(self):
hm = tp.TargetPlatformModel(tp.QuantizationConfigOptions([TEST_QC]))
with hm:
tp.OperatorsSet('opsetA')
tp.OperatorsSet('opsetB')
fw_tp = TargetPlatformCapabilities(hm)
with self.assertRaises(Exception) as e:
with fw_tp:
tp.OperationsSetToLayers('opsetA', [Conv2D])
tp.OperationsSetToLayers('opsetB', [Conv2D])
self.assertEqual('Found layer Conv2D in more than one OperatorsSet', str(e.exception))
def test_filter_layer_attached_to_multiple_opsets(self):
hm = tp.TargetPlatformModel(tp.QuantizationConfigOptions([TEST_QC]))
with hm:
tp.OperatorsSet('opsetA')
tp.OperatorsSet('opsetB')
fw_tp = TargetPlatformCapabilities(hm)
with self.assertRaises(Exception) as e:
with fw_tp:
tp.OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation='relu')])
tp.OperationsSetToLayers('opsetB', [LayerFilterParams(Activation, activation='relu')])
self.assertEqual('Found layer Activation(activation=relu) in more than one OperatorsSet', str(e.exception))
def test_qco_by_keras_layer(self):
default_qco = tp.QuantizationConfigOptions([TEST_QC])
hm = tp.TargetPlatformModel(default_qco, name='test')
with hm:
mixed_precision_configuration_options = tp.QuantizationConfigOptions([TEST_QC, TEST_QC.clone_and_edit(weights_n_bits=4), TEST_QC.clone_and_edit(weights_n_bits=2)], base_config=TEST_QC)
tp.OperatorsSet('conv', mixed_precision_configuration_options)
sevenbit_qco = TEST_QCO.clone_and_edit(activation_n_bits=7)
tp.OperatorsSet('tanh', sevenbit_qco)
tp.OperatorsSet('relu')
hm_keras = tp.TargetPlatformCapabilities(hm, name='fw_test')
with hm_keras:
tp.OperationsSetToLayers('conv', [Conv2D])
tp.OperationsSetToLayers('tanh', [tf.nn.tanh])
tp.OperationsSetToLayers('relu', [LayerFilterParams(Activation, activation='relu')])
conv_node = get_node(Conv2D(1, 1))
tanh_node = get_node(tf.nn.tanh)
relu_node = get_node(Activation('relu'))
conv_qco = conv_node.get_qco(hm_keras)
tanh_qco = tanh_node.get_qco(hm_keras)
relu_qco = relu_node.get_qco(hm_keras)
self.assertEqual(conv_qco, mixed_precision_configuration_options)
self.assertEqual(tanh_qco, sevenbit_qco)
self.assertEqual(relu_qco, default_qco)
def test_opset_not_in_tp(self):
default_qco = tp.QuantizationConfigOptions([TEST_QC])
hm = tp.TargetPlatformModel(default_qco)
hm_keras = tp.TargetPlatformCapabilities(hm)
with self.assertRaises(Exception) as e:
with hm_keras:
tp.OperationsSetToLayers('conv', [Conv2D])
self.assertEqual('conv is not defined in the target platform model that is associated with the target platform capabilities.', str(e.exception))
def test_keras_fusing_patterns(self):
default_qco = tp.QuantizationConfigOptions([TEST_QC])
hm = tp.TargetPlatformModel(default_qco)
with hm:
a = tp.OperatorsSet('opA')
b = tp.OperatorsSet('opB')
c = tp.OperatorsSet('opC')
tp.Fusing([a, b, c])
tp.Fusing([a, c])
hm_keras = tp.TargetPlatformCapabilities(hm)
with hm_keras:
tp.OperationsSetToLayers('opA', [Conv2D])
tp.OperationsSetToLayers('opB', [tf.nn.tanh])
tp.OperationsSetToLayers('opC', [LayerFilterParams(ReLU, Greater('max_value', 7), negative_slope=0)])
fusings = hm_keras.get_fusing_patterns()
self.assertEqual(len(fusings), 2)
(p0, p1) = (fusings[0], fusings[1])
self.assertEqual(len(p0), 3)
self.assertEqual(p0[0], Conv2D)
self.assertEqual(p0[1], tf.nn.tanh)
self.assertEqual(p0[2], LayerFilterParams(ReLU, Greater('max_value', 7), negative_slope=0))
self.assertEqual(len(p1), 2)
self.assertEqual(p1[0], Conv2D)
self.assertEqual(p1[1], LayerFilterParams(ReLU, Greater('max_value', 7), negative_slope=0)) |
def convert_bytes(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if (num < 1024.0):
return ('%3.1f %s' % (num, x))
num /= 1024.0 |
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, min_pad_coverage=0.75, overlap=0.5):
assert (0 <= overlap < 1)
assert (0 < min_pad_coverage <= 1)
samples_per_frame = int(((sampling_rate * mel_window_step) / 1000))
n_frames = int(np.ceil(((n_samples + 1) / samples_per_frame)))
frame_step = max(int(np.round((partial_utterance_n_frames * (1 - overlap)))), 1)
(wav_slices, mel_slices) = ([], [])
steps = max(1, (((n_frames - partial_utterance_n_frames) + frame_step) + 1))
for i in range(0, steps, frame_step):
mel_range = np.array([i, (i + partial_utterance_n_frames)])
wav_range = (mel_range * samples_per_frame)
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
last_wav_range = wav_slices[(- 1)]
coverage = ((n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start))
if ((coverage < min_pad_coverage) and (len(mel_slices) > 1)):
mel_slices = mel_slices[:(- 1)]
wav_slices = wav_slices[:(- 1)]
return (wav_slices, mel_slices) |
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2, extra_condition):
maxiter = 10
i = 0
delta1 = 0.2
delta2 = 0.1
phi_rec = phi0
a_rec = 0
while True:
dalpha = (a_hi - a_lo)
if (dalpha < 0):
(a, b) = (a_hi, a_lo)
else:
(a, b) = (a_lo, a_hi)
if (i > 0):
cchk = (delta1 * dalpha)
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec)
if ((i == 0) or (a_j is None) or (a_j > (b - cchk)) or (a_j < (a + cchk))):
qchk = (delta2 * dalpha)
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if ((a_j is None) or (a_j > (b - qchk)) or (a_j < (a + qchk))):
a_j = (a_lo + (0.5 * dalpha))
phi_aj = phi(a_j)
if ((phi_aj > (phi0 + ((c1 * a_j) * derphi0))) or (phi_aj >= phi_lo)):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if ((abs(derphi_aj) <= ((- c2) * derphi0)) and extra_condition(a_j, phi_aj)):
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if ((derphi_aj * (a_hi - a_lo)) >= 0):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
a_star = None
val_star = None
valprime_star = None
break
return (a_star, val_star, valprime_star) |
class lomax_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('c', False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
return ((c * 1.0) / ((1.0 + x) ** (c + 1.0)))
def _logpdf(self, x, c):
return (np.log(c) - ((c + 1) * sc.log1p(x)))
def _cdf(self, x, c):
return (- sc.expm1(((- c) * sc.log1p(x))))
def _sf(self, x, c):
return np.exp(((- c) * sc.log1p(x)))
def _logsf(self, x, c):
return ((- c) * sc.log1p(x))
def _ppf(self, q, c):
return sc.expm1(((- sc.log1p((- q))) / c))
def _isf(self, q, c):
return ((q ** ((- 1.0) / c)) - 1)
def _stats(self, c):
(mu, mu2, g1, g2) = pareto.stats(c, loc=(- 1.0), moments='mvsk')
return (mu, mu2, g1, g2)
def _entropy(self, c):
return ((1 + (1.0 / c)) - np.log(c)) |
def instantiate(cfg):
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={'allow_objects': True})
if isinstance(cfg, list):
return [instantiate(x) for x in cfg]
if (isinstance(cfg, abc.Mapping) and ('_target_' in cfg)):
cfg = {k: instantiate(v) for (k, v) in cfg.items()}
cls = cfg.pop('_target_')
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert (cls is not None), cls_name
else:
try:
cls_name = ((cls.__module__ + '.') + cls.__qualname__)
except Exception:
cls_name = str(cls)
assert callable(cls), f'_target_ {cls} does not define a callable object'
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f'Error when instantiating {cls_name}!')
raise
return cfg |
def test_from_wsgi(testdir, graphql_path):
testdir.make_test(f'''
from test.apps._graphql._flask.app import app
schema = schemathesis.graphql.from_wsgi("{graphql_path}", app=app)
()
(max_examples=10, deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_(request, case):
request.config.HYPOTHESIS_CASES += 1
assert case.path == "{graphql_path}"
assert case.operation.definition.field_name in case.body
response = case.call_wsgi()
assert response.status_code == 200
case.validate_response(response)
''')
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=4)
result.stdout.re_match_lines(['test_from_wsgi.py::test_\\[Query.getBooks] PASSED', 'test_from_wsgi.py::test_\\[Query.getAuthors] PASSED', 'test_from_wsgi.py::test_\\[Mutation.addBook] PASSED', 'test_from_wsgi.py::test_\\[Mutation.addAuthor] PASSED', 'Hypothesis calls: 40']) |
def test_batch_tile():
v = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
tiled = batch_tile(v, 3)
assert_tensor_equal(tiled, [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
v = torch.LongTensor([1, 2, 3])
assert_tensor_equal(batch_tile(v, 4), [[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) |
_model_architecture('convtransformer', 'convtransformer_espnet')
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) |
class CollectorIterator(object):
def __init__(self, collector):
self._collector = collector
self._idx = 0
def __iter__(self):
return self
def next(self, timeout=None):
try:
apply_result = self._collector._get_result(self._idx, timeout)
except IndexError:
self._idx = 0
raise StopIteration
except:
self._idx = 0
raise
self._idx += 1
assert apply_result.ready()
return apply_result.get(0)
def __next__(self):
return self.next() |
def get_data_from_features_or_inputs(tokenizer: BertTokenizer, label_list: List[str], feature: Optional[InputFeatures]=None, inputs: Optional[Dict[(str, torch.Tensor)]]=None) -> Tuple[(str, str, str)]:
if ((feature is not None) and (inputs is None)):
inputs = default_data_collator([feature])
elif ((feature is None) and (inputs is not None)):
pass
elif ((feature is None) and (inputs is None)):
raise ValueError
elif ((feature is not None) and (inputs is not None)):
raise ValueError
(X, Y) = decode_one_example(tokenizer=tokenizer, label_list=label_list, inputs=inputs, logits=None)
(premise, hypothesis) = X.split('[CLS]')[1].split('[SEP]')[:2]
return (premise.strip(), hypothesis.strip(), Y) |
class TestAssertAllclose(object):
def test_simple(self):
x = 0.001
y = 1e-09
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_raises(AssertionError, assert_allclose, a, b)
b[(- 1)] = (y * (1 + 1e-08))
assert_allclose(a, b)
assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-09)
assert_allclose(6, 10, rtol=0.5)
assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
assert_allclose(a, a)
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_(('Mismatch: 25%\nMax absolute difference: 1\nMax relative difference: 0.5' in msg))
def test_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_allclose(a, b, equal_nan=True)
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_array_equal(a, b)
assert_array_almost_equal(a, b)
assert_array_less(a, b)
assert_allclose(a, b)
def test_report_max_relative_error(self):
a = np.array([0, 1])
b = np.array([0, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_(('Max relative difference: 0.5' in msg)) |
def train_one_epoch(model, optimizer, train_loader, lr_scheduler, lr_warmup_scheduler, accumulated_iter, train_epoch, optim_cfg, rank, logger, log_buffer, log_interval):
for (i, data_batch) in enumerate(train_loader):
if ((lr_warmup_scheduler is not None) and (accumulated_iter <= lr_warmup_scheduler.T_max)):
cur_lr_scheduler = lr_warmup_scheduler
else:
cur_lr_scheduler = lr_scheduler
cur_lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
model.train()
optimizer.zero_grad()
outputs = batch_processor(model, data_batch)
outputs['loss'].backward()
clip_grad_norm_(model.parameters(), **optim_cfg.grad_clip)
optimizer.step()
accumulated_iter += 1
log_buffer.update(outputs['log_vars'], outputs['num_samples'])
if ((rank == 0) and (((i + 1) % log_interval) == 0)):
log_buffer.average()
disp_str = 'epoch[%d][%d/%d]: lr: %f, '
for k in log_buffer.output.keys():
disp_str += (k + ': %f, ')
disp_str = disp_str[:(- 2)]
logger.info((disp_str % (train_epoch, (i + 1), len(train_loader), cur_lr, *log_buffer.output.values())))
log_buffer.clear()
return accumulated_iter |
def CppExtension(name, sources, *args, **kwargs):
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths()
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
libraries.append('torch_python')
kwargs['libraries'] = libraries
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs) |
def _read_string(f):
length = _read_long(f)
if (length > 0):
chars = _read_bytes(f, length).decode('latin1')
_align_32(f)
else:
chars = ''
return chars |
def maybe_filter_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
cats = []
for cat in coco_api.dataset['categories']:
cat_id = cat['id']
if (cat_id not in cat_id_2_cont_id):
continue
cont_id = cat_id_2_cont_id[cat_id]
if ((cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id)):
cats.append(cat)
coco_api.dataset['categories'] = cats
anns = []
for ann in coco_api.dataset['annotations']:
cat_id = ann['category_id']
if (cat_id not in cat_id_2_cont_id):
continue
cont_id = cat_id_2_cont_id[cat_id]
ann['category_id'] = cont_id_2_cat_id[cont_id]
anns.append(ann)
coco_api.dataset['annotations'] = anns
coco_api.createIndex() |
def create_test_bash_info(commands, model_test_dict, port, script_name, partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f'''
echo '{config}' &'''
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info) |
def _filter_layers(layers, include_tags):
if (include_tags is None):
return layers
include_tags = set(include_tags)
return [l for l in layers if (not include_tags.isdisjoint(l.tags))] |
def register_node_type(node_meta_type: str=NodeMetaType.OPTPLAN_NODE, context_stack: OptplanContextStack=GLOBAL_CONTEXT_STACK):
def decorator(cls):
assert (len(cls._schema.fields['type'].choices) == 1)
node_type = cls._schema.fields['type'].choices[0]
def not_implemented(unused_params, unused_workspace):
raise NotImplementedError('Node type has no creator implemented: {}'.format(node_type))
context_stack.peek().register_node_type(node_meta_type, node_type, cls, not_implemented)
return cls
return decorator |
class RootBlock(nn.Module):
def apply(self, x, width):
x = fixed_padding(x, 7)
x = StdConv(x, width, (7, 7), (2, 2), padding='VALID', bias=False, name='conv_root')
x = fixed_padding(x, 3)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='VALID')
return x |
class MKException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.