code
stringlengths
101
5.91M
def load_wav_to_torch(full_path): (sampling_rate, data) = read(full_path) return (torch.FloatTensor(data.astype(np.float32)), sampling_rate)
def calculate_metrics(threshold, dist, actual_issame): predict_issame = np.less(dist, threshold) true_positives = np.sum(np.logical_and(predict_issame, actual_issame)) false_positives = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) true_negatives = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame))) false_negatives = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) true_positive_rate = (0 if ((true_positives + false_negatives) == 0) else (float(true_positives) / float((true_positives + false_negatives)))) false_positive_rate = (0 if ((false_positives + true_negatives) == 0) else (float(false_positives) / float((false_positives + true_negatives)))) precision = (0 if ((true_positives + false_positives) == 0) else (float(true_positives) / float((true_positives + false_positives)))) recall = (0 if ((true_positives + false_negatives) == 0) else (float(true_positives) / float((true_positives + false_negatives)))) accuracy = (float((true_positives + true_negatives)) / dist.size) return (true_positive_rate, false_positive_rate, precision, recall, accuracy)
def query_on_voxel(query, feature, min_, max_, use_ste=False, boundary_check=False, ctx=None): func = LanczosQueryOnVoxel(ctx, min_, max_, use_ste, boundary_check) return func(query, feature)
def _simplify_cells(d): for key in d: if isinstance(d[key], mat_struct): d[key] = _matstruct_to_dict(d[key]) elif _has_struct(d[key]): d[key] = _inspect_cell_array(d[key]) return d
def decompress(data_blocks): d = zlib.decompressobj() for data in data_blocks: (yield bytearray(d.decompress(data))) (yield bytearray(d.flush()))
class TwistedAffineIndices(UniqueRepresentation, Set_generic): def __classcall_private__(cls, cartan_type): cartan_type = CartanType(cartan_type) if ((not cartan_type.is_affine()) or cartan_type.is_untwisted_affine()): raise ValueError('the Cartan type must be a twisted affine type') return super().__classcall__(cls, cartan_type) def __init__(self, cartan_type): self._cartan_type = cartan_type if (cartan_type.type() == 'BC'): finite_ct = cartan_type.classical().dual() n = finite_ct.rank() Q = finite_ct.relabel({(n - i): i for i in range(n)}).root_system().root_lattice() self._roots = tuple(Q.roots()) self._ac = tuple(Q.simple_coroots()) CP = cartesian_product(([range(3)] * n)) if (cartan_type.rank() == 2): self._short_roots = (self._roots + tuple(((2 * r) for r in Q.roots()))) else: self._short_roots = (self._roots + tuple(((2 * r) for r in Q.short_roots()))) self._short_roots += self._ac facade = cartesian_product([self._short_roots, ZZ]) else: Q = cartan_type.classical().root_system().root_lattice() self._roots = tuple(Q.roots()) self._ac = tuple(Q.simple_coroots()) self._short_roots = tuple(Q.short_roots()) ac = Q.simple_coroots() self._short_roots += tuple([ac[i] for i in Q.index_set() if Q.simple_root(i).is_short_root()]) facade = cartesian_product([(self._roots + self._ac), ZZ]) from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets super().__init__(facade=facade, category=InfiniteEnumeratedSets()) def __contains__(self, x): if (x not in self._facade_for[0]): return False x = self._facade_for[0](x) return (((x[1] % 2) == 0) or (x[0] in self._short_roots)) def __iter__(self): if (self._cartan_type.type() == 'BC'): finite_ct = self._cartan_type.classical().dual() n = finite_ct.rank() finite_ct = finite_ct.relabel({(n - i): i for i in range(n)}) else: finite_ct = self._cartan_type.classical() Q = finite_ct.root_system().root_lattice() P = self._facade_for[0] for i in ZZ: if (i % 2): for r in self._short_roots: (yield P((r, i))) else: for r in self._roots: (yield P((r, i))) for r in self._ac: (yield P((r, i)))
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.relu = nn.ReLU() self.conv2 = conv3x3(planes, planes) self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out = (out + residual) out = self.relu(out) return out
def variation_of_information(image0=None, image1=None, *, table=None, ignore_labels=()): (h0g1, h1g0) = _vi_tables(image0, image1, table=table, ignore_labels=ignore_labels) return np.array([h1g0.sum(), h0g1.sum()])
(Output('female-corpus-stats', 'children'), [Input('topic-data', 'data'), Input('date-dropdown', 'value')]) def display_female_corpus_stats(data, date_val): female_corpus_size = data['params']['femaleDominantArticleCount'] display_text = f''' In {num2str_month(date_val)}, there were {female_corpus_size:,} articles in the corpus that quoted more women than men. ''' return dcc.Markdown(display_text)
class ArchGenerate(BaseArchGenerate): def __init__(self, super_network, config): super(ArchGenerate, self).__init__(super_network, config) def derive_archs(self, betas, head_alphas, stack_alphas, if_display=True): self.update_arch_params(betas, head_alphas, stack_alphas) derived_archs = [] (ch_path, derived_chs) = self.derive_chs() layer_count = 0 for (i, (ch_idx, ch)) in enumerate(zip(ch_path, derived_chs)): if ((ch_idx == 0) or (i == (len(derived_chs) - 1))): continue block_idx = (ch_idx - 1) input_config = self.input_configs[block_idx] head_id = input_config['in_block_idx'].index(ch_path[(i - 1)]) head_alpha = self.head_alphas[block_idx][head_id] head_op = self.derive_ops(head_alpha, 'head') stride = input_config['strides'][input_config['in_block_idx'].index(ch_path[(i - 1)])] stack_ops = [] for stack_alpha in self.stack_alphas[block_idx]: stack_op = self.derive_ops(stack_alpha, 'stack') if (stack_op != 'skip_connect'): stack_ops.append(stack_op) layer_count += 1 derived_archs.append([[derived_chs[(i - 1)], ch], head_op, stack_ops, len(stack_ops), stride]) layer_count += len(derived_archs) if if_display: logging.info(('Derived arch: \n' + '|\n'.join(map(str, derived_archs)))) logging.info('Total {} layers.'.format(layer_count)) return derived_archs
class rel_pyramid_module(nn.Module): def __init__(self, num_backbone_stages): super().__init__() fpn_dim = cfg.FPN.DIM self.num_backbone_stages = num_backbone_stages self.prd_conv_lateral = nn.ModuleList() for i in range(self.num_backbone_stages): if cfg.FPN.USE_GN: self.prd_conv_lateral.append(nn.Sequential(nn.Conv2d(fpn_dim, fpn_dim, 1, 1, 0, bias=False), nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim, eps=cfg.GROUP_NORM.EPSILON))) else: self.prd_conv_lateral.append(nn.Conv2d(fpn_dim, fpn_dim, 1, 1, 0)) self.posthoc_modules = nn.ModuleList() for i in range(self.num_backbone_stages): if cfg.FPN.USE_GN: self.posthoc_modules.append(nn.Sequential(nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1, bias=False), nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim, eps=cfg.GROUP_NORM.EPSILON))) else: self.posthoc_modules.append(nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1)) self._init_weights() def _init_weights(self): for m in self.modules(): if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)): mynn.init.XavierFill(m.weight) if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, blob_conv): rel_lateral_inner_blob = None rel_lateral_output_blobs = [] for i in range(self.num_backbone_stages): if (rel_lateral_inner_blob is not None): bu = F.max_pool2d(rel_lateral_inner_blob, 2, stride=2) rel_lateral_inner_blob = (self.prd_conv_lateral[i](blob_conv[((- 1) - i)]) + bu) else: rel_lateral_inner_blob = self.prd_conv_lateral[i](blob_conv[((- 1) - i)]) rel_lateral_output_blobs.append(self.posthoc_modules[i](rel_lateral_inner_blob)) rel_lateral_output_blobs.reverse() return rel_lateral_output_blobs
class CountingIterator(object): def __init__(self, iterable, start=None, total=None): self.iterable = iterable self.itr = iter(self) if (start is None): self.n = getattr(iterable, 'n', 0) else: self.n = start if (total is None): self.total = (self.n + len(iterable)) else: self.total = total def __len__(self): return self.total def __iter__(self): for x in self.iterable: if (self.n >= self.total): raise RuntimeError('Mismatch between actual and expected iterable length. Please report this to the fairseq developers.') self.n += 1 (yield x) def __next__(self): return next(self.itr) def has_next(self): return (self.n < len(self)) def skip(self, num_to_skip): next(itertools.islice(self.itr, num_to_skip, num_to_skip), None) return self def take(self, n): self.total = min(self.total, n) propagated_take = max((n - self.n), 0) if hasattr(self.iterable, 'take'): self.iterable.take(propagated_take) else: self.iterable = itertools.islice(self.iterable, propagated_take)
class InterpretCompilerDirectives(CythonTransform): unop_method_nodes = {'typeof': ExprNodes.TypeofNode, 'operator.address': ExprNodes.AmpersandNode, 'operator.dereference': ExprNodes.DereferenceNode, 'operator.preincrement': ExprNodes.inc_dec_constructor(True, '++'), 'operator.predecrement': ExprNodes.inc_dec_constructor(True, '--'), 'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'), 'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'), 'operator.typeid': ExprNodes.TypeidNode, 'address': ExprNodes.AmpersandNode} binop_method_nodes = {'operator.comma': ExprNodes.c_binop_constructor(',')} special_methods = set(['declare', 'union', 'struct', 'typedef', 'sizeof', 'cast', 'pointer', 'compiled', 'NULL', 'fused_type', 'parallel']) special_methods.update(unop_method_nodes) valid_parallel_directives = set(['parallel', 'prange', 'threadid']) def __init__(self, context, compilation_directive_defaults): super(InterpretCompilerDirectives, self).__init__(context) self.cython_module_names = set() self.directive_names = {'staticmethod': 'staticmethod'} self.parallel_directives = {} directives = copy.deepcopy(Options.get_directive_defaults()) for (key, value) in compilation_directive_defaults.items(): directives[_unicode(key)] = copy.deepcopy(value) self.directives = directives def check_directive_scope(self, pos, directive, scope): legal_scopes = Options.directive_scopes.get(directive, None) if (legal_scopes and (scope not in legal_scopes)): self.context.nonfatal_error(PostParseError(pos, ('The %s compiler directive is not allowed in %s scope' % (directive, scope)))) return False else: if (directive not in Options.directive_types): error(pos, ("Invalid directive: '%s'." % (directive,))) return True def visit_ModuleNode(self, node): for key in sorted(node.directive_comments): if (not self.check_directive_scope(node.pos, key, 'module')): self.wrong_scope_error(node.pos, key, 'module') del node.directive_comments[key] self.module_scope = node.scope self.directives.update(node.directive_comments) node.directives = self.directives node.parallel_directives = self.parallel_directives self.visitchildren(node) node.cython_module_names = self.cython_module_names return node def is_cython_directive(self, name): return ((name in Options.directive_types) or (name in self.special_methods) or PyrexTypes.parse_basic_type(name)) def is_parallel_directive(self, full_name, pos): result = (full_name + '.').startswith('cython.parallel.') if result: directive = full_name.split('.') if (full_name == u'cython.parallel'): self.parallel_directives[u'parallel'] = u'cython.parallel' elif (full_name == u'cython.parallel.*'): for name in self.valid_parallel_directives: self.parallel_directives[name] = (u'cython.parallel.%s' % name) elif ((len(directive) != 3) or (directive[(- 1)] not in self.valid_parallel_directives)): error(pos, ('No such directive: %s' % full_name)) self.module_scope.use_utility_code(UtilityCode.load_cached('InitThreads', 'ModuleSetupCode.c')) return result def visit_CImportStatNode(self, node): if (node.module_name == u'cython'): self.cython_module_names.add((node.as_name or u'cython')) elif node.module_name.startswith(u'cython.'): if node.module_name.startswith(u'cython.parallel.'): error(node.pos, (node.module_name + ' is not a module')) if (node.module_name == u'cython.parallel'): if (node.as_name and (node.as_name != u'cython')): self.parallel_directives[node.as_name] = node.module_name else: self.cython_module_names.add(u'cython') self.parallel_directives[u'cython.parallel'] = node.module_name self.module_scope.use_utility_code(UtilityCode.load_cached('InitThreads', 'ModuleSetupCode.c')) elif node.as_name: self.directive_names[node.as_name] = node.module_name[7:] else: self.cython_module_names.add(u'cython') return None return node def visit_FromCImportStatNode(self, node): if ((not node.relative_level) and ((node.module_name == u'cython') or node.module_name.startswith(u'cython.'))): submodule = (node.module_name + u'.')[7:] newimp = [] for (pos, name, as_name, kind) in node.imported_names: full_name = (submodule + name) qualified_name = (u'cython.' + full_name) if self.is_parallel_directive(qualified_name, node.pos): self.parallel_directives[(as_name or name)] = qualified_name elif self.is_cython_directive(full_name): self.directive_names[(as_name or name)] = full_name if (kind is not None): self.context.nonfatal_error(PostParseError(pos, 'Compiler directive imports must be plain imports')) else: newimp.append((pos, name, as_name, kind)) if (not newimp): return None node.imported_names = newimp return node def visit_FromImportStatNode(self, node): if ((node.module.module_name.value == u'cython') or node.module.module_name.value.startswith(u'cython.')): submodule = (node.module.module_name.value + u'.')[7:] newimp = [] for (name, name_node) in node.items: full_name = (submodule + name) qualified_name = (u'cython.' + full_name) if self.is_parallel_directive(qualified_name, node.pos): self.parallel_directives[name_node.name] = qualified_name elif self.is_cython_directive(full_name): self.directive_names[name_node.name] = full_name else: newimp.append((name, name_node)) if (not newimp): return None node.items = newimp return node def visit_SingleAssignmentNode(self, node): if isinstance(node.rhs, ExprNodes.ImportNode): module_name = node.rhs.module_name.value is_parallel = (module_name + u'.').startswith(u'cython.parallel.') if ((module_name != u'cython') and (not is_parallel)): return node module_name = node.rhs.module_name.value as_name = node.lhs.name node = Nodes.CImportStatNode(node.pos, module_name=module_name, as_name=as_name) node = self.visit_CImportStatNode(node) else: self.visitchildren(node) return node def visit_NameNode(self, node): if (node.name in self.cython_module_names): node.is_cython_module = True else: directive = self.directive_names.get(node.name) if (directive is not None): node.cython_attribute = directive return node def visit_NewExprNode(self, node): self.visit(node.cppclass) self.visitchildren(node) return node def try_to_parse_directives(self, node): if isinstance(node, ExprNodes.CallNode): self.visit(node.function) optname = node.function.as_cython_attribute() if optname: directivetype = Options.directive_types.get(optname) if directivetype: (args, kwds) = node.explicit_args_kwds() directives = [] key_value_pairs = [] if ((kwds is not None) and (directivetype is not dict)): for keyvalue in kwds.key_value_pairs: (key, value) = keyvalue sub_optname = ('%s.%s' % (optname, key.value)) if Options.directive_types.get(sub_optname): directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos)) else: key_value_pairs.append(keyvalue) if (not key_value_pairs): kwds = None else: kwds.key_value_pairs = key_value_pairs if (directives and (not kwds) and (not args)): return directives directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos)) return directives elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)): self.visit(node) optname = node.as_cython_attribute() if optname: directivetype = Options.directive_types.get(optname) if (directivetype is bool): arg = ExprNodes.BoolNode(node.pos, value=True) return [self.try_to_parse_directive(optname, [arg], None, node.pos)] elif (directivetype is None): return [(optname, None)] else: raise PostParseError(node.pos, ("The '%s' directive should be used as a function call." % optname)) return None def try_to_parse_directive(self, optname, args, kwds, pos): if ((optname == 'np_pythran') and (not self.context.cpp)): raise PostParseError(pos, ('The %s directive can only be used in C++ mode.' % optname)) elif (optname == 'exceptval'): arg_error = (len(args) > 1) check = True if (kwds and kwds.key_value_pairs): kw = kwds.key_value_pairs[0] if ((len(kwds.key_value_pairs) == 1) and kw.key.is_string_literal and (kw.key.value == 'check') and isinstance(kw.value, ExprNodes.BoolNode)): check = kw.value.value else: arg_error = True if arg_error: raise PostParseError(pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"') return ('exceptval', ((args[0] if args else None), check)) directivetype = Options.directive_types.get(optname) if ((len(args) == 1) and isinstance(args[0], ExprNodes.NoneNode)): return (optname, Options.get_directive_defaults()[optname]) elif (directivetype is bool): if ((kwds is not None) or (len(args) != 1) or (not isinstance(args[0], ExprNodes.BoolNode))): raise PostParseError(pos, ('The %s directive takes one compile-time boolean argument' % optname)) return (optname, args[0].value) elif (directivetype is int): if ((kwds is not None) or (len(args) != 1) or (not isinstance(args[0], ExprNodes.IntNode))): raise PostParseError(pos, ('The %s directive takes one compile-time integer argument' % optname)) return (optname, int(args[0].value)) elif (directivetype is str): if ((kwds is not None) or (len(args) != 1) or (not isinstance(args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)))): raise PostParseError(pos, ('The %s directive takes one compile-time string argument' % optname)) return (optname, str(args[0].value)) elif (directivetype is type): if ((kwds is not None) or (len(args) != 1)): raise PostParseError(pos, ('The %s directive takes one type argument' % optname)) return (optname, args[0]) elif (directivetype is dict): if (len(args) != 0): raise PostParseError(pos, ('The %s directive takes no prepositional arguments' % optname)) return (optname, dict([(key.value, value) for (key, value) in kwds.key_value_pairs])) elif (directivetype is list): if (kwds and (len(kwds.key_value_pairs) != 0)): raise PostParseError(pos, ('The %s directive takes no keyword arguments' % optname)) return (optname, [str(arg.value) for arg in args]) elif callable(directivetype): if ((kwds is not None) or (len(args) != 1) or (not isinstance(args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)))): raise PostParseError(pos, ('The %s directive takes one compile-time string argument' % optname)) return (optname, directivetype(optname, str(args[0].value))) else: assert False def visit_with_directives(self, node, directives): if (not directives): return self.visit_Node(node) old_directives = self.directives new_directives = dict(old_directives) new_directives.update(directives) if (new_directives == old_directives): return self.visit_Node(node) self.directives = new_directives retbody = self.visit_Node(node) self.directives = old_directives if (not isinstance(retbody, Nodes.StatListNode)): retbody = Nodes.StatListNode(node.pos, stats=[retbody]) return Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody, directives=new_directives) def visit_FuncDefNode(self, node): directives = self._extract_directives(node, 'function') return self.visit_with_directives(node, directives) def visit_CVarDefNode(self, node): directives = self._extract_directives(node, 'function') for (name, value) in directives.items(): if (name == 'locals'): node.directive_locals = value elif (name not in ('final', 'staticmethod')): self.context.nonfatal_error(PostParseError(node.pos, ('Cdef functions can only take cython.locals(), staticmethod, or final decorators, got %s.' % name))) return self.visit_with_directives(node, directives) def visit_CClassDefNode(self, node): directives = self._extract_directives(node, 'cclass') return self.visit_with_directives(node, directives) def visit_CppClassNode(self, node): directives = self._extract_directives(node, 'cppclass') return self.visit_with_directives(node, directives) def visit_PyClassDefNode(self, node): directives = self._extract_directives(node, 'class') return self.visit_with_directives(node, directives) def _extract_directives(self, node, scope_name): if (not node.decorators): return {} directives = [] realdecs = [] both = [] for dec in node.decorators[::(- 1)]: new_directives = self.try_to_parse_directives(dec.decorator) if (new_directives is not None): for directive in new_directives: if self.check_directive_scope(node.pos, directive[0], scope_name): (name, value) = directive if (self.directives.get(name, object()) != value): directives.append(directive) if (directive[0] == 'staticmethod'): both.append(dec) if ((directive[0] == 'cclass') and (scope_name == 'class')): scope_name = 'cclass' else: realdecs.append(dec) if (realdecs and ((scope_name == 'cclass') or isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)))): raise PostParseError(realdecs[0].pos, 'Cdef functions/classes cannot take arbitrary decorators.') node.decorators = (realdecs[::(- 1)] + both[::(- 1)]) optdict = {} for directive in directives: (name, value) = directive if (name in optdict): old_value = optdict[name] if isinstance(old_value, dict): old_value.update(value) elif isinstance(old_value, list): old_value.extend(value) else: optdict[name] = value else: optdict[name] = value return optdict def visit_WithStatNode(self, node): directive_dict = {} for directive in (self.try_to_parse_directives(node.manager) or []): if (directive is not None): if (node.target is not None): self.context.nonfatal_error(PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'")) else: (name, value) = directive if (name in ('nogil', 'gil')): node = Nodes.GILStatNode(node.pos, state=name, body=node.body) return self.visit_Node(node) if self.check_directive_scope(node.pos, name, 'with statement'): directive_dict[name] = value if directive_dict: return self.visit_with_directives(node.body, directive_dict) return self.visit_Node(node)
class LinearGRP(T.nn.Linear): def __init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None, proj_dim_ratio: Optional[float]=None, proj_dim: Optional[int]=None, proj_dim_min: Optional[int]=None, proj_dim_max: Optional[int]=None, matmul: MatMulType='gaussian', generator: Optional[T.Generator]=None) -> None: super().__init__(in_features, out_features, bias, device, dtype) self.generator = generator self.matmul = matmul self.proj_dim_ratio = proj_dim_ratio self.proj_dim = proj_dim self.proj_dim_max = proj_dim_max self.proj_dim_min = proj_dim_min def forward(self, input: T.Tensor) -> T.Tensor: return linear_grp(input, self.weight, self.bias, self.proj_dim_ratio, self.proj_dim, self.proj_dim_max, self.proj_dim_min, self.matmul, self.generator) def extra_repr(self) -> str: return ', '.join([super().extra_repr(), f'matmul={self.matmul}', f'proj_dim={self.proj_dim}', f'proj_dim_ratio={self.proj_dim_ratio}', f'proj_dim_max={self.proj_dim_max}', f'proj_dim_min={self.proj_dim_min}'])
class InstallWithExtras(install): def run(self) -> None: super().run() build_ext_obj = self.distribution.get_command_obj('build_ext') build_dir = Path(self.distribution.get_command_obj('build_ext').build_temp) self.copy_file(find_symengine_wrapper(build_dir, build_ext_obj.get_ext_filename('symengine_wrapper')), ((((Path.cwd() / self.install_platlib) / 'symengine') / 'lib') / build_ext_obj.get_ext_filename('symengine_wrapper'))) subprocess.run(['cmake', str(SOURCE_DIR), f'-DCMAKE_INSTALL_PREFIX={self.prefix}'], cwd=build_dir, check=True) subprocess.run(['cmake', '--build', '.', '--target', 'install'], cwd=build_dir, check=True)
def test_clean_remove_stopwords(df_text: pd.DataFrame) -> None: pipeline = [{'operator': 'remove_stopwords'}] df_clean = clean_text(df_text, 'text', pipeline=pipeline) df_check = df_text.copy() df_check['text'] = ["'ZZZZZ!' IMDb would allow one-word reviews, that's mine would be.", 'cast played Shakespeare.<br /><br />Shakespeare lost.', 'Simon Desert (Simon del desierto) 1965 film directed Luis Bunuel.', "[SPOILERS] think I've seen film bad {acting, script, effects (!), etc...}", "<a href='/festivals/cannes-1968-a-video-essay'>Cannes 1968: video essay</a>", 'Recap thread excellent panel, hosted _NYC ', '#GameOfThrones: Season 8 #Rotten 54% #Tomatometer. deserve be?', "Come join share thoughts week's episode: '123', np.nan, 'NULL'] pipeline_custom = [{'operator': 'remove_stopwords', 'parameters': {'stopwords': {'imdb', 'film'}}}] df_clean_custom = clean_text(df_text, 'text', pipeline=pipeline_custom) df_check_custom = df_text.copy() df_check_custom['text'] = ["'ZZZZZ!' If would allow one-word reviews, that's what mine would be.", 'The cast played Shakespeare.<br /><br />Shakespeare lost.', 'Simon of the Desert (Simon del desierto) is a 1965 directed by Luis Bunuel.', "[SPOILERS] I don't think I've seen a this bad before {acting, script, effects (!), etc...}", "<a href='/festivals/cannes-1968-a-video-essay'>Cannes 1968: A video essay</a>", 'Recap thread for excellent panel, hosted by with _NYC and ', '#GameOfThrones: Season 8 is #Rotten at 54% on the #Tomatometer. But does it deserve to be?', "Come join and share your thoughts on this week's episode: '123', np.nan, 'NULL'] assert df_check.equals(df_clean) assert df_check_custom.equals(df_clean_custom)
def get_setup_nets(key, steps_or_nets, target): init_net = core.Net((key + '/init')) exit_net = core.Net((key + '/exit')) init_nets = [] exit_nets = [] objs = [] for step_or_net in steps_or_nets: if hasattr(step_or_net, 'get_all_attributes'): objs += step_or_net.get_all_attributes(key) elif hasattr(step_or_net, 'get_attributes'): objs += step_or_net.get_attributes(key) for obj in objs: if (hasattr(obj, '_setup_used') and obj._setup_used): continue if (hasattr(obj, '_setup_target') and (obj._setup_target != target)): continue if hasattr(obj, 'setup'): nets = obj.setup(init_net) if isinstance(nets, (list, tuple)): init_nets += nets elif isinstance(nets, (core.Net, core.ExecutionStep)): init_nets.append(nets) elif (nets is not None): raise TypeError(('Unsupported type for setup: %s' % type(nets))) obj._setup_used = True if hasattr(obj, 'exit'): nets = obj.exit(exit_net) if isinstance(nets, (list, tuple)): exit_nets += nets elif isinstance(nets, (core.Net, core.ExecutionStep)): exit_nets.append(nets) elif (nets is not None): raise TypeError(('Unsupported type for setup: %s' % type(nets))) obj._setup_used = True if (len(init_net.Proto().op) > 0): init_nets.insert(0, init_net) if (len(exit_net.Proto().op) > 0): exit_nets.insert(0, exit_net) return (init_nets, exit_nets)
class WFRadiationMeshSliceMax(RadiationField): glossary_name = 'params/Mesh/sliceMax' def __init__(self, wf): super(WFRadiationMeshSliceMax, self).__init__(wf) self.attributes.update({'limits': '[1:LONG_MAX]', 'alias': 'mesh.eStart'}) def value(self): return self._wf._srwl_wf.mesh.eFin def value(self, val): self._wf._srwl_wf.mesh.eFin = float(val)
class GiniIndex(BaseMetric): def __init__(self, recommendations, config, params, eval_objects): super().__init__(recommendations, config, params, eval_objects) self._cutoff = self._evaluation_objects.cutoff self._num_items = self._evaluation_objects.num_items self._item_count = {} self._free_norm = 0 def name(): return 'Gini' def __user_gini(self, user_recommendations, cutoff): user_norm = len(user_recommendations[:cutoff]) self._free_norm += user_norm for (i, _) in user_recommendations[:cutoff]: self._item_count[i] = (self._item_count.get(i, 0) + 1) def eval(self): for (u, u_r) in self._recommendations.items(): self.__user_gini(u_r, self._cutoff) n_recommended_items = len(self._item_count) gini = sum([((((2 * ((j + (self._num_items - n_recommended_items)) + 1)) - self._num_items) - 1) * (cs / self._free_norm)) for (j, cs) in enumerate(sorted(self._item_count.values()))]) gini /= (self._num_items - 1) gini = (1 - gini) return gini
def ia_minus_iadag_sparse(dimension: int, prefactor: Union[(float, complex, None)]=None) -> csc_matrix: prefactor = (prefactor if (prefactor is not None) else 1.0) return (prefactor * ((1j * annihilation_sparse(dimension)) - (1j * creation_sparse(dimension))))
def convert_element_list(monitor_descriptions: List[MonitorDescription]) -> MonitorDescriptionList: monitor_dict = {} for m in monitor_descriptions: if (m.joiner_id in monitor_dict): m_joined = monitor_dict[m.joiner_id] if (not (m.monitor_type == m_joined.monitor_type)): raise ValueError(((('Monitor type for ' + m.monitor.name) + ' is inconsistent for joiner id ') + m.joiner_id)) if (not (m.scalar_operation == m_joined.scalar_operation)): raise ValueError(((('Scalar operation for ' + m.monitor.name) + ' is inconsistent for joiner id ') + m.joiner_id)) if (not (m.vector_operation == m_joined.vector_operation)): raise ValueError(((('Vector operation for ' + m.monitor.name) + ' is inconsistent for joiner id ') + m.joiner_id)) monitor_dict[m.joiner_id].monitor_names.append(m.monitor.name) else: monitor_dict[m.joiner_id] = JoinedMonitorDescription(joiner_id=m.joiner_id, monitor_names=[m.monitor.name], monitor_type=m.monitor_type, scalar_operation=m.scalar_operation, vector_operation=m.vector_operation) return MonitorDescriptionList(monitor_list=list(monitor_dict.values()))
class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes)) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)
def mask_and_save_to_dicom(dcm_path, args, filename): dicom = Dicom(dcm_path) metadata = dicom.metadata() included_path = os.path.join(args.savepath, 'included') excluded_path = os.path.join(args.savepath, 'excluded') filename = number_image(filename, metadata['patient_name']) outpath = os.path.join(included_path, filename) try: dicom.mask_pixel_array(crop=args.crop, resized=False, grayscale=args.grayscale, exclude_doppler=args.exclude_doppler) except (ValueError, RuntimeError, AttributeError, IndexError) as ve: with open(args.errorfiles, 'a+') as fp: fp.write((((dcm_path + ': ') + str(ve)) + '\n')) outpath = os.path.join(excluded_path, filename) dicom.anonymize() dicom.save(outpath) return (metadata, [os.path.basename(outpath)])
class Algorithm(metaclass=ABCMeta): def run_classification(x, y, epsilon, delta, lambda_param, learning_rate, num_iters): return NotImplemented def name(self): return NotImplemented
class ExcludeFeatures(): def __init__(self, feature_names): self.feature_names = feature_names def __call__(self, inputs): outputs = dict(((k, inputs[k]) for k in inputs if (k not in self.feature_names))) return outputs
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')]) cls.add_method('Delete', 'void', [param('ns3::AttributeAccessor *', 'object')], is_static=True) return
def cal_rouge(evaluated_ngrams, reference_ngrams): reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) if (evaluated_count == 0): precision = 0.0 else: precision = (overlapping_count / evaluated_count) if (reference_count == 0): recall = 0.0 else: recall = (overlapping_count / reference_count) f1_score = (2.0 * ((precision * recall) / ((precision + recall) + 1e-08))) return {'f': f1_score, 'p': precision, 'r': recall}
def down_sample(x, scale_factor_h, scale_factor_w): (_, h, w, _) = x.get_shape().as_list() new_size = [(h // scale_factor_h), (w // scale_factor_w)] return tf.image.resize_nearest_neighbor(x, size=new_size)
class TwitterProcessor(QueryNERProcessor): def get_labels(self): return ['PER', 'LOC', 'ORG', 'O']
class PositionWiseFeedForwardNet(nn.Module): def __init__(self, d_model: int=512, d_ff: int=2048, dropout_p: float=0.3, ffnet_style: str='ff') -> None: super(PositionWiseFeedForwardNet, self).__init__() self.ffnet_style = ffnet_style.lower() if (self.ffnet_style == 'ff'): self.feed_forward = nn.Sequential(Linear(d_model, d_ff), nn.Dropout(dropout_p), nn.ReLU(), Linear(d_ff, d_model), nn.Dropout(dropout_p)) elif (self.ffnet_style == 'conv'): self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) self.relu = nn.ReLU() self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) else: raise ValueError('Unsupported mode: {0}'.format(self.mode)) def forward(self, inputs: Tensor) -> Tensor: if (self.ffnet_style == 'conv'): output = self.conv1(inputs.transpose(1, 2)) output = self.relu(output) return self.conv2(output).transpose(1, 2) return self.feed_forward(inputs)
class BDFuncType(Enum): UNKNOWN = (- 1) CONV_NEURON = 0 DEPTHWISE_OR_POOLING = 1 FC = 2 TENSOR_ARITHMETIC = 3 FC2 = 4 CONV_CORRELATION = 5 TABLE_LOOKUP = 6 MD_SUM = 7 MD_SCALAR = 8 MD_SFU = 9 MD_LINEAR = 10 LOCALMEM_ARANGE = 11 DECOMPRESS = 12 MD_CMP = 13 VECTOR_CORRELATION = 14
def _parse_inputs(actual: Any, expected: Any, *, allow_subclasses: bool) -> Tuple[(Optional[_TestingErrorMeta], Optional[Union[(_TensorPair, List, Dict)]])]: error_meta: Optional[_TestingErrorMeta] if (isinstance(actual, collections.abc.Sequence) and (not isinstance(actual, str)) and isinstance(expected, collections.abc.Sequence) and (not isinstance(expected, str))): actual_len = len(actual) expected_len = len(expected) if (actual_len != expected_len): error_meta = _TestingErrorMeta(AssertionError, f'The length of the sequences mismatch: {actual_len} != {expected_len}') return (error_meta, None) pair_list = [] for idx in range(actual_len): (error_meta, pair) = _parse_inputs(actual[idx], expected[idx], allow_subclasses=allow_subclasses) if error_meta: error_meta = error_meta.amend_msg(postfix=f''' {_SEQUENCE_MSG_FMTSTR.format(idx)}''') return (error_meta, None) pair_list.append(pair) else: return (None, pair_list) elif (isinstance(actual, collections.abc.Mapping) and isinstance(expected, collections.abc.Mapping)): actual_keys = set(actual.keys()) expected_keys = set(expected.keys()) if (actual_keys != expected_keys): missing_keys = (expected_keys - actual_keys) additional_keys = (actual_keys - expected_keys) error_meta = _TestingErrorMeta(AssertionError, f'''The keys of the mappings do not match: Missing keys in the actual mapping: {sorted(missing_keys)} Additional keys in the actual mapping: {sorted(additional_keys)}''') return (error_meta, None) pair_dict = {} for key in sorted(actual_keys): (error_meta, pair) = _parse_inputs(actual[key], expected[key], allow_subclasses=allow_subclasses) if error_meta: error_meta = error_meta.amend_msg(postfix=f''' {_MAPPING_MSG_FMTSTR.format(key)}''') return (error_meta, None) pair_dict[key] = pair else: return (None, pair_dict) else: return _to_tensor_pair(actual, expected, allow_subclasses=allow_subclasses)
def add_code_sample_docstrings(*docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask='[MASK]', qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None): def docstring_decorator(fn): model_class = (fn.__qualname__.split('.')[0] if (model_cls is None) else model_cls) if (model_class[:2] == 'TF'): sample_docstrings = TF_SAMPLE_DOCSTRINGS elif (model_class[:4] == 'Flax'): sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS doc_kwargs = {'model_class': model_class, 'processor_class': processor_class, 'checkpoint': checkpoint, 'mask': mask, 'qa_target_start_index': qa_target_start_index, 'qa_target_end_index': qa_target_end_index, 'expected_output': expected_output, 'expected_loss': expected_loss, 'real_checkpoint': real_checkpoint, 'fake_checkpoint': checkpoint, 'true': '{true}'} if ((('SequenceClassification' in model_class) or ('AudioClassification' in model_class)) and (modality == 'audio')): code_sample = sample_docstrings['AudioClassification'] elif ('SequenceClassification' in model_class): code_sample = sample_docstrings['SequenceClassification'] elif ('QuestionAnswering' in model_class): code_sample = sample_docstrings['QuestionAnswering'] elif ('TokenClassification' in model_class): code_sample = sample_docstrings['TokenClassification'] elif ('MultipleChoice' in model_class): code_sample = sample_docstrings['MultipleChoice'] elif (('MaskedLM' in model_class) or (model_class in ['FlaubertWithLMHeadModel', 'XLMWithLMHeadModel'])): code_sample = sample_docstrings['MaskedLM'] elif (('LMHead' in model_class) or ('CausalLM' in model_class)): code_sample = sample_docstrings['LMHead'] elif ('CTC' in model_class): code_sample = sample_docstrings['CTC'] elif ('AudioFrameClassification' in model_class): code_sample = sample_docstrings['AudioFrameClassification'] elif (('XVector' in model_class) and (modality == 'audio')): code_sample = sample_docstrings['AudioXVector'] elif (('Model' in model_class) and (modality == 'audio')): code_sample = sample_docstrings['SpeechBaseModel'] elif (('Model' in model_class) and (modality == 'vision')): code_sample = sample_docstrings['VisionBaseModel'] elif (('Model' in model_class) or ('Encoder' in model_class)): code_sample = sample_docstrings['BaseModel'] elif ('ImageClassification' in model_class): code_sample = sample_docstrings['ImageClassification'] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example(code_sample, expected_output=expected_output, expected_loss=expected_loss) if (real_checkpoint is not None): code_sample = (FAKE_MODEL_DISCLAIMER + code_sample) func_doc = ((fn.__doc__ or '') + ''.join(docstr)) output_doc = ('' if (output_type is None) else _prepare_output_docstrings(output_type, config_class)) built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = ((func_doc + output_doc) + built_doc) return fn return docstring_decorator
class TSTestDataset(Dataset): def __init__(self, x): self.x = x def __len__(self): return len(self.x) def __getitem__(self, idx): return self.x[idx]
class ModularPolynomialDatabase(): def _dbpath(self, level): return ('PolMod/%s/pol.%03d.dbz' % (self.model, level)) def __repr__(self): if self.model.startswith('Cls'): head = 'Classical' elif self.model.startswith('Atk'): head = 'Atkin' elif self.model.startswith('Eta'): head = 'Dedekind eta' if self.model.endswith('Crr'): poly = 'correspondence' else: poly = 'polynomial' return ('%s modular %s database' % (head, poly)) def __getitem__(self, level): from sage.rings.integer import Integer from sage.rings.integer_ring import IntegerRing from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing if (self.model in ('Atk', 'Eta')): level = Integer(level) if (not level.is_prime()): raise TypeError(('Argument level (= %s) must be prime.' % level)) elif (self.model in ('AtkCrr', 'EtaCrr')): N = Integer(level[0]) if (N not in (2, 3, 5, 7, 13)): raise TypeError(('Argument level (= %s) must be prime.' % N)) modpol = self._dbpath(level) coeff_list = _dbz_to_integer_list(modpol) if (self.model == 'Cls'): P = PolynomialRing(IntegerRing(), 2, 'j') else: P = PolynomialRing(IntegerRing(), 2, 'x,j') poly = {} if (self.model == 'Cls'): if (level == 1): return P({(1, 0): 1, (0, 1): (- 1)}) for cff in coeff_list: i = cff[0] j = cff[1] poly[(i, j)] = Integer(cff[2]) if (i != j): poly[(j, i)] = Integer(cff[2]) else: for cff in coeff_list: poly[(cff[0], cff[1])] = Integer(cff[2]) return P(poly)
def phi_square_calc(chi_square, POP): try: return (chi_square / POP) except Exception: return 'None'
def _to_polynomials(lf, R): n = max((max((part[0] for part in f.support() if part), default=0) for f in lf)) n = max(n, 1) P = PolynomialRing(R, [('v%s' % a) for a in range(1, (n + 1))]) if (n == 1): return [P({part.to_exp(n)[0]: c for (part, c) in f}) for f in lf] return [P({tuple(part.to_exp(n)): c for (part, c) in f}) for f in lf]
class DefaultProcessingUnitConfig(dict, ProcessingUnitConfig): def unit_name(self): return self['unit_name'] def set_unit_name(self, value): self['unit_name'] = value def to_dict(self): return self def from_dict(cls, obj_dict): return cls(obj_dict)
.parametrize('param_distributions, expected_n_candidates', [({'a': [1, 2]}, 2), ({'a': randint(1, 3)}, 10)]) def test_random_search_discrete_distributions(param_distributions, expected_n_candidates): n_samples = 1024 (X, y) = make_classification(n_samples=n_samples, random_state=0) base_estimator = FastClassifier() sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10) sh.fit(X, y) assert (sh.n_candidates_[0] == expected_n_candidates)
def mock_latex_file(): with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.tex') as f: latex_str = f'\documentclass{{article}}egin{{document}}{plain_text_str}\end{{document}}' f.write(latex_str) return f.name
_model def regnetx_040(pretrained=False, **kwargs): return _regnet('regnetx_040', pretrained, **kwargs)
class AlgebraMorphism(ModuleMorphismByLinearity): def __init__(self, domain, on_generators, position=0, codomain=None, category=None, anti=False): assert (position == 0) assert (codomain is not None) if (category is None): if anti: category = ModulesWithBasis(domain.base_ring()) else: category = AlgebrasWithBasis(domain.base_ring()) self._anti = anti self._on_generators = on_generators ModuleMorphismByLinearity.__init__(self, domain=domain, codomain=codomain, position=position, category=category) def __eq__(self, other): return ((self.__class__ is other.__class__) and (self.parent() == other.parent()) and (self._zero == other._zero) and (self._on_generators == other._on_generators) and (self._position == other._position) and (self._is_module_with_basis_over_same_base_ring == other._is_module_with_basis_over_same_base_ring)) def __ne__(self, other): return (not (self == other)) def _on_basis(self, c): if self._anti: c = reversed(c) return self.codomain().prod((self._on_generators(i) for i in c))
class Seq2SeqModelCaffe2(object): def _build_model(self, init_params): model = Seq2SeqModelHelper(init_params=init_params) self._build_shared(model) self._build_embeddings(model) forward_model = Seq2SeqModelHelper(init_params=init_params) self._build_shared(forward_model) self._build_embeddings(forward_model) if (self.num_gpus == 0): loss_blobs = self.model_build_fun(model) model.AddGradientOperators(loss_blobs) self.norm_clipped_grad_update(model, scope='norm_clipped_grad_update') self.forward_model_build_fun(forward_model) else: assert ((self.batch_size % self.num_gpus) == 0) data_parallel_model.Parallelize_GPU(forward_model, input_builder_fun=(lambda m: None), forward_pass_builder_fun=self.forward_model_build_fun, param_update_builder_fun=None, devices=list(range(self.num_gpus))) def clipped_grad_update_bound(model): self.norm_clipped_grad_update(model, scope='norm_clipped_grad_update') data_parallel_model.Parallelize_GPU(model, input_builder_fun=(lambda m: None), forward_pass_builder_fun=self.model_build_fun, param_update_builder_fun=clipped_grad_update_bound, devices=list(range(self.num_gpus))) self.norm_clipped_sparse_grad_update(model, scope='norm_clipped_sparse_grad_update') self.model = model self.forward_net = forward_model.net def _build_shared(self, model): optimizer_params = self.model_params['optimizer_params'] with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)): self.learning_rate = model.AddParam(name='learning_rate', init_value=float(optimizer_params['learning_rate']), trainable=False) self.global_step = model.AddParam(name='global_step', init_value=0, trainable=False) self.start_time = model.AddParam(name='start_time', init_value=time.time(), trainable=False) def _build_embeddings(self, model): with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)): sqrt3 = math.sqrt(3) self.encoder_embeddings = model.param_init_net.UniformFill([], 'encoder_embeddings', shape=[self.source_vocab_size, self.model_params['encoder_embedding_size']], min=(- sqrt3), max=sqrt3) model.params.append(self.encoder_embeddings) self.decoder_embeddings = model.param_init_net.UniformFill([], 'decoder_embeddings', shape=[self.target_vocab_size, self.model_params['decoder_embedding_size']], min=(- sqrt3), max=sqrt3) model.params.append(self.decoder_embeddings) def model_build_fun(self, model, forward_only=False, loss_scale=None): encoder_inputs = model.net.AddExternalInput((workspace.GetNameScope() + 'encoder_inputs')) encoder_lengths = model.net.AddExternalInput((workspace.GetNameScope() + 'encoder_lengths')) decoder_inputs = model.net.AddExternalInput((workspace.GetNameScope() + 'decoder_inputs')) decoder_lengths = model.net.AddExternalInput((workspace.GetNameScope() + 'decoder_lengths')) targets = model.net.AddExternalInput((workspace.GetNameScope() + 'targets')) target_weights = model.net.AddExternalInput((workspace.GetNameScope() + 'target_weights')) attention_type = self.model_params['attention'] assert (attention_type in ['none', 'regular', 'dot']) (encoder_outputs, weighted_encoder_outputs, final_encoder_hidden_states, final_encoder_cell_states, encoder_units_per_layer) = seq2seq_util.build_embedding_encoder(model=model, encoder_params=self.encoder_params, num_decoder_layers=len(self.model_params['decoder_layer_configs']), inputs=encoder_inputs, input_lengths=encoder_lengths, vocab_size=self.source_vocab_size, embeddings=self.encoder_embeddings, embedding_size=self.model_params['encoder_embedding_size'], use_attention=(attention_type != 'none'), num_gpus=self.num_gpus) (decoder_outputs, decoder_output_size) = seq2seq_util.build_embedding_decoder(model, decoder_layer_configs=self.model_params['decoder_layer_configs'], inputs=decoder_inputs, input_lengths=decoder_lengths, encoder_lengths=encoder_lengths, encoder_outputs=encoder_outputs, weighted_encoder_outputs=weighted_encoder_outputs, final_encoder_hidden_states=final_encoder_hidden_states, final_encoder_cell_states=final_encoder_cell_states, encoder_units_per_layer=encoder_units_per_layer, vocab_size=self.target_vocab_size, embeddings=self.decoder_embeddings, embedding_size=self.model_params['decoder_embedding_size'], attention_type=attention_type, forward_only=False, num_gpus=self.num_gpus) output_logits = seq2seq_util.output_projection(model=model, decoder_outputs=decoder_outputs, decoder_output_size=decoder_output_size, target_vocab_size=self.target_vocab_size, decoder_softmax_size=self.model_params['decoder_softmax_size']) (targets, _) = model.net.Reshape([targets], ['targets', 'targets_old_shape'], shape=[(- 1)]) (target_weights, _) = model.net.Reshape([target_weights], ['target_weights', 'target_weights_old_shape'], shape=[(- 1)]) (_, loss_per_word) = model.net.SoftmaxWithLoss([output_logits, targets, target_weights], ['OutputProbs_INVALID', 'loss_per_word'], only_loss=True) num_words = model.net.SumElements([target_weights], 'num_words') total_loss_scalar = model.net.Mul([loss_per_word, num_words], 'total_loss_scalar') total_loss_scalar_weighted = model.net.Scale([total_loss_scalar], 'total_loss_scalar_weighted', scale=(1.0 / self.batch_size)) return [total_loss_scalar_weighted] def forward_model_build_fun(self, model, loss_scale=None): return self.model_build_fun(model=model, forward_only=True, loss_scale=loss_scale) def _calc_norm_ratio(self, model, params, scope, ONE): with core.NameScope(scope): grad_squared_sums = [] for (i, param) in enumerate(params): logger.info(param) grad = (model.param_to_grad[param] if (not isinstance(model.param_to_grad[param], core.GradientSlice)) else model.param_to_grad[param].values) grad_squared = model.net.Sqr([grad], 'grad_{}_squared'.format(i)) grad_squared_sum = model.net.SumElements(grad_squared, 'grad_{}_squared_sum'.format(i)) grad_squared_sums.append(grad_squared_sum) grad_squared_full_sum = model.net.Sum(grad_squared_sums, 'grad_squared_full_sum') global_norm = model.net.Pow(grad_squared_full_sum, 'global_norm', exponent=0.5) clip_norm = model.param_init_net.ConstantFill([], 'clip_norm', shape=[], value=float(self.model_params['max_gradient_norm'])) max_norm = model.net.Max([global_norm, clip_norm], 'max_norm') norm_ratio = model.net.Div([clip_norm, max_norm], 'norm_ratio') return norm_ratio def _apply_norm_ratio(self, norm_ratio, model, params, learning_rate, scope, ONE): for param in params: param_grad = model.param_to_grad[param] nlr = model.net.Negative([learning_rate], 'negative_learning_rate') with core.NameScope(scope): update_coeff = model.net.Mul([nlr, norm_ratio], 'update_coeff', broadcast=1) if isinstance(param_grad, core.GradientSlice): param_grad_values = param_grad.values model.net.ScatterWeightedSum([param, ONE, param_grad.indices, param_grad_values, update_coeff], param) else: model.net.WeightedSum([param, ONE, param_grad, update_coeff], param) def norm_clipped_grad_update(self, model, scope): if (self.num_gpus == 0): learning_rate = self.learning_rate else: learning_rate = model.CopyCPUToGPU(self.learning_rate, 'LR') params = [] for param in model.GetParams(top_scope=True): if (param in model.param_to_grad): if (not isinstance(model.param_to_grad[param], core.GradientSlice)): params.append(param) ONE = model.param_init_net.ConstantFill([], 'ONE', shape=[1], value=1.0) logger.info('Dense trainable variables: ') norm_ratio = self._calc_norm_ratio(model, params, scope, ONE) self._apply_norm_ratio(norm_ratio, model, params, learning_rate, scope, ONE) def norm_clipped_sparse_grad_update(self, model, scope): learning_rate = self.learning_rate params = [] for param in model.GetParams(top_scope=True): if (param in model.param_to_grad): if isinstance(model.param_to_grad[param], core.GradientSlice): params.append(param) ONE = model.param_init_net.ConstantFill([], 'ONE', shape=[1], value=1.0) logger.info('Sparse trainable variables: ') norm_ratio = self._calc_norm_ratio(model, params, scope, ONE) self._apply_norm_ratio(norm_ratio, model, params, learning_rate, scope, ONE) def total_loss_scalar(self): if (self.num_gpus == 0): return workspace.FetchBlob('total_loss_scalar') else: total_loss = 0 for i in range(self.num_gpus): name = 'gpu_{}/total_loss_scalar'.format(i) gpu_loss = workspace.FetchBlob(name) total_loss += gpu_loss return total_loss def _init_model(self): workspace.RunNetOnce(self.model.param_init_net) def create_net(net): workspace.CreateNet(net, input_blobs=[str(i) for i in net.external_inputs]) create_net(self.model.net) create_net(self.forward_net) def __init__(self, model_params, source_vocab_size, target_vocab_size, num_gpus=1, num_cpus=1): self.model_params = model_params self.encoder_type = 'rnn' self.encoder_params = model_params['encoder_type'] self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.num_gpus = num_gpus self.num_cpus = num_cpus self.batch_size = model_params['batch_size'] workspace.GlobalInit(['caffe2', '--caffe2_log_level=0', '--v=0', '--caffe2_handle_executor_threads_exceptions=1', ('--caffe2_mkl_num_threads=' + str(self.num_cpus))]) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): workspace.ResetWorkspace() def initialize_from_scratch(self): logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Start') self._build_model(init_params=True) self._init_model() logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Finish') def get_current_step(self): return workspace.FetchBlob(self.global_step)[0] def inc_current_step(self): workspace.FeedBlob(self.global_step, np.array([(self.get_current_step() + 1)])) def step(self, batch, forward_only): if (self.num_gpus < 1): batch_obj = prepare_batch(batch) for (batch_obj_name, batch_obj_value) in zip(Batch._fields, batch_obj): workspace.FeedBlob(batch_obj_name, batch_obj_value) else: for i in range(self.num_gpus): gpu_batch = batch[i::self.num_gpus] batch_obj = prepare_batch(gpu_batch) for (batch_obj_name, batch_obj_value) in zip(Batch._fields, batch_obj): name = 'gpu_{}/{}'.format(i, batch_obj_name) if (batch_obj_name in ['encoder_inputs', 'decoder_inputs']): dev = core.DeviceOption(caffe2_pb2.CPU) else: dev = core.DeviceOption(workspace.GpuDeviceType, i) workspace.FeedBlob(name, batch_obj_value, device_option=dev) if forward_only: workspace.RunNet(self.forward_net) else: workspace.RunNet(self.model.net) self.inc_current_step() return self.total_loss_scalar() def save(self, checkpoint_path_prefix, current_step): checkpoint_path = '{0}-{1}'.format(checkpoint_path_prefix, current_step) assert workspace.RunOperatorOnce(core.CreateOperator('Save', self.model.GetAllParams(), [], absolute_path=True, db=checkpoint_path, db_type='minidb')) checkpoint_config_path = os.path.join(os.path.dirname(checkpoint_path_prefix), 'checkpoint') with open(checkpoint_config_path, 'w') as checkpoint_config_file: checkpoint_config_file.write((((('model_checkpoint_path: "' + checkpoint_path) + '"\nall_model_checkpoint_paths: "') + checkpoint_path) + '"\n')) logger.info(('Saved checkpoint file to ' + checkpoint_path)) return checkpoint_path
class FromTableauIsomorphism(Morphism): def _repr_type(self): return 'Crystal Isomorphism' def __invert__(self): return FromRCIsomorphism(Hom(self.codomain(), self.domain())) def _call_(self, x): conj = x.to_tableau().conjugate() ct = self.domain().cartan_type() act = ct.affine() TP = TensorProductOfKirillovReshetikhinTableaux(act, [[r, 1] for r in conj.shape()]) elt = TP(pathlist=[reversed(row) for row in conj]) if (ct.type() == 'A'): bij = KRTToRCBijectionTypeA(elt) elif (ct.type() == 'B'): bij = MLTToRCBijectionTypeB(elt) elif (ct.type() == 'C'): bij = KRTToRCBijectionTypeC(elt) elif (ct.type() == 'D'): bij = MLTToRCBijectionTypeD(elt) else: raise NotImplementedError('bijection of type {} not yet implemented'.format(ct)) return self.codomain()(bij.run())
def parse_args(): parser = argparse.ArgumentParser(description='Script that retags a tree file') parser.add_argument('--lang', default='vi', type=str, help='Language') parser.add_argument('--input_file', default='data/constituency/vi_vlsp21_train.mrg', help='File to retag') parser.add_argument('--output_file', default='vi_vlsp21_train_retagged.mrg', help='Where to write the retagged trees') parser.add_argument('--retag_package', default='default', help='Which tagger shortname to use when retagging trees. None for no retagging. Retagging is recommended, as gold tags will not be available at pipeline time') parser.add_argument('--retag_method', default='upos', choices=['xpos', 'upos'], help='Which tags to use when retagging') parser.add_argument('--bracket_labels', action='store_true', help='Write the trees as bracket labels instead of S-expressions') args = parser.parse_args() args = vars(args) if (args['retag_method'] == 'xpos'): args['retag_xpos'] = True elif (args['retag_method'] == 'upos'): args['retag_xpos'] = False else: raise ValueError('Unknown retag method {}'.format(xpos)) return args
def test_object(): image = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool) expected_conn_1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 1, 0, 1], [1, 1, 1, 0, 0, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool) assert_array_equal(convex_hull_object(image, connectivity=1), expected_conn_1) expected_conn_2 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0, 1, 1, 1], [1, 1, 0, 0, 0, 0, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool) assert_array_equal(convex_hull_object(image, connectivity=2), expected_conn_2) with testing.raises(ValueError): convex_hull_object(image, connectivity=3) out = convex_hull_object(image, connectivity=1) assert_array_equal(out, expected_conn_1)
def register_Ns3RvBatteryModelHelper_methods(root_module, cls): cls.add_constructor([param('ns3::RvBatteryModelHelper const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_virtual=True) cls.add_method('DoInstall', 'ns3::Ptr< ns3::EnergySource >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='private', is_virtual=True) return
def set_principled_node(principled_node: bpy.types.Node, base_color: Tuple[(float, float, float, float)]=(0.6, 0.6, 0.6, 1.0), subsurface: float=0.0, subsurface_color: Tuple[(float, float, float, float)]=(0.8, 0.8, 0.8, 1.0), subsurface_radius: Tuple[(float, float, float)]=(1.0, 0.2, 0.1), metallic: float=0.0, specular: float=0.5, specular_tint: float=0.0, roughness: float=0.5, anisotropic: float=0.0, anisotropic_rotation: float=0.0, sheen: float=0.0, sheen_tint: float=0.5, clearcoat: float=0.0, clearcoat_roughness: float=0.03, ior: float=1.45, transmission: float=0.0, transmission_roughness: float=0.0) -> None: principled_node.inputs['Base Color'].default_value = base_color principled_node.inputs['Subsurface'].default_value = subsurface principled_node.inputs['Subsurface Color'].default_value = subsurface_color principled_node.inputs['Subsurface Radius'].default_value = subsurface_radius principled_node.inputs['Metallic'].default_value = metallic principled_node.inputs['Specular'].default_value = specular principled_node.inputs['Specular Tint'].default_value = specular_tint principled_node.inputs['Roughness'].default_value = roughness principled_node.inputs['Anisotropic'].default_value = anisotropic principled_node.inputs['Anisotropic Rotation'].default_value = anisotropic_rotation principled_node.inputs['Sheen'].default_value = sheen principled_node.inputs['Sheen Tint'].default_value = sheen_tint principled_node.inputs['Clearcoat'].default_value = clearcoat principled_node.inputs['Clearcoat Roughness'].default_value = clearcoat_roughness principled_node.inputs['IOR'].default_value = ior principled_node.inputs['Transmission'].default_value = transmission principled_node.inputs['Transmission Roughness'].default_value = transmission_roughness
class EchoingStdin(object): def __init__(self, input, output): self._input = input self._output = output def __getattr__(self, x): return getattr(self._input, x) def _echo(self, rv): self._output.write(rv) return rv def read(self, n=(- 1)): return self._echo(self._input.read(n)) def readline(self, n=(- 1)): return self._echo(self._input.readline(n)) def readlines(self): return [self._echo(x) for x in self._input.readlines()] def __iter__(self): return iter((self._echo(x) for x in self._input)) def __repr__(self): return repr(self._input)
def from_path(path: PathLike, *, app: Any=None, base_url: (str | None)=None, data_generation_methods: DataGenerationMethodInput=DEFAULT_DATA_GENERATION_METHODS, code_sample_style: str=CodeSampleStyle.default().name, rate_limit: (str | None)=None, encoding: str='utf8', sanitize_output: bool=True) -> GraphQLSchema: with open(path, encoding=encoding) as fd: return from_file(fd, app=app, base_url=base_url, data_generation_methods=data_generation_methods, code_sample_style=code_sample_style, location=pathlib.Path(path).absolute().as_uri(), rate_limit=rate_limit, sanitize_output=sanitize_output)
def setup(with_data=False): if (not os.path.exists(TEST_PATH)): os.mkdir(TEST_PATH) if with_data: with_data_path = (lambda f: os.path.join(DATA_DIR, f)) with_test_path = (lambda f: os.path.join(TEST_PATH, f)) copy_file = (lambda f: shutil.copy(with_data_path(f), with_test_path(f))) list(map(copy_file, os.listdir(DATA_DIR))) compressed_files = list(filter((lambda f: f.endswith('tar.xz')), os.listdir(TEST_PATH))) def extract(f): with tarfile.open(with_test_path(f)) as f: f.extractall(TEST_PATH) any(map(extract, compressed_files))
def report_errors(dataset, result_file): df = pd.read_csv(((RESULT_ROOT / dataset) / result_file)) evaluate_errors(df['error'])
class ConsumedResource(Resource): def update_agent_infos(self, state_infos, agent_infos): super().update_agent_infos(state_infos, agent_infos) if (self._reward_this_step > 0): return agent_infos['inventory'][self._resource_name] = 0
def ocp_mixed(F, bcs_m, J_m, state_m, controls, adjoint_m, config_picard): return cashocs.OptimalControlProblem(F, bcs_m, J_m, state_m, controls, adjoint_m, config=config_picard)
class ContrastMemory(nn.Module): def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5): super(ContrastMemory, self).__init__() self.nLem = outputSize self.unigrams = torch.ones(self.nLem) self.multinomial = AliasMethod(self.unigrams) self.multinomial.cuda() self.K = K self.register_buffer('params', torch.tensor([K, T, (- 1), (- 1), momentum])) stdv = (1.0 / math.sqrt((inputSize / 3))) self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_((2 * stdv)).add_((- stdv))) self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_((2 * stdv)).add_((- stdv))) def forward(self, v1, v2, y, idx=None): K = int(self.params[0].item()) T = self.params[1].item() Z_v1 = self.params[2].item() Z_v2 = self.params[3].item() momentum = self.params[4].item() batchSize = v1.size(0) outputSize = self.memory_v1.size(0) inputSize = self.memory_v1.size(1) if (idx is None): idx = self.multinomial.draw((batchSize * (self.K + 1))).view(batchSize, (- 1)) idx.select(1, 0).copy_(y.data) weight_v1 = torch.index_select(self.memory_v1, 0, idx.view((- 1))).detach() weight_v1 = weight_v1.view(batchSize, (K + 1), inputSize) out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1)) out_v2 = torch.exp(torch.div(out_v2, T)) weight_v2 = torch.index_select(self.memory_v2, 0, idx.view((- 1))).detach() weight_v2 = weight_v2.view(batchSize, (K + 1), inputSize) out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1)) out_v1 = torch.exp(torch.div(out_v1, T)) if (Z_v1 < 0): self.params[2] = (out_v1.mean() * outputSize) Z_v1 = self.params[2].clone().detach().item() print('normalization constant Z_v1 is set to {:.1f}'.format(Z_v1)) if (Z_v2 < 0): self.params[3] = (out_v2.mean() * outputSize) Z_v2 = self.params[3].clone().detach().item() print('normalization constant Z_v2 is set to {:.1f}'.format(Z_v2)) out_v1 = torch.div(out_v1, Z_v1).contiguous() out_v2 = torch.div(out_v2, Z_v2).contiguous() with torch.no_grad(): l_pos = torch.index_select(self.memory_v1, 0, y.view((- 1))) l_pos.mul_(momentum) l_pos.add_(torch.mul(v1, (1 - momentum))) l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_v1 = l_pos.div(l_norm) self.memory_v1.index_copy_(0, y, updated_v1) ab_pos = torch.index_select(self.memory_v2, 0, y.view((- 1))) ab_pos.mul_(momentum) ab_pos.add_(torch.mul(v2, (1 - momentum))) ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_v2 = ab_pos.div(ab_norm) self.memory_v2.index_copy_(0, y, updated_v2) return (out_v1, out_v2)
def _reduced_texutalization_method(expr, entity_label_map): textual_form = textualize_s_expr(expr) toks = textual_form.split(' ') norm_toks = [] for t in toks: if t.startswith('m.'): if (t in entity_label_map): t = entity_label_map[t] else: name = get_label(t) if (name is not None): entity_label_map[t] = name t = name elif ('XMLSchema' in t): format_pos = t.find('^^') t = t[:format_pos] elif ('.' in t): meta_relations = t = t.split('.') t = meta_relations[(- 1)] if ('_' in t): t = t.replace('_', ' ') norm_toks.append(t) return ' '.join(norm_toks)
class Indices2Dataset(Dataset): def __init__(self, dataset): self.dataset = dataset self.indices = None def load(self, indices: list): self.indices = indices def __getitem__(self, idx): idx = self.indices[idx] (image, label) = self.dataset[idx] return (image, label) def __len__(self): return len(self.indices)
def get_prefix(sentence, prefix_len): tokens = sentence.strip('\n').split() if (prefix_len >= len(tokens)): return sentence.strip('\n') else: return ' '.join(tokens[:prefix_len])
class FiniteField_prime_modn(FiniteField_generic, integer_mod_ring.IntegerModRing_generic): def __init__(self, p, check=True, modulus=None): p = Integer(p) if (check and (not p.is_prime())): raise ArithmeticError('p must be prime') self.__char = p integer_mod_ring.IntegerModRing_generic.__init__(self, p, category=_FiniteFields) if (modulus is not None): self._modulus = modulus def __reduce__(self): return self._factory_data[0].reduce_data(self) def _coerce_map_from_(self, S): if (S is int): return integer_mod.Int_to_IntegerMod(self) elif (S is ZZ): return integer_mod.Integer_to_IntegerMod(self) elif isinstance(S, IntegerModRing_generic): from .residue_field import ResidueField_generic if (((S.characteristic() % self.characteristic()) == 0) and ((not isinstance(S, ResidueField_generic)) or (S.degree() == 1))): try: return integer_mod.IntegerMod_to_IntegerMod(S, self) except TypeError: pass to_ZZ = ZZ._internal_coerce_map_from(S) if (to_ZZ is not None): return (integer_mod.Integer_to_IntegerMod(self) * to_ZZ) def _convert_map_from_(self, R): from sage.rings.padics.padic_generic import pAdicGeneric, ResidueReductionMap if (isinstance(R, pAdicGeneric) and (R.residue_field() is self)): return ResidueReductionMap._create_(R, self) def construction(self): return integer_mod_ring.IntegerModRing_generic.construction(self) def characteristic(self): return self.__char def is_prime_field(self): return True def polynomial(self, name=None): if (name is None): name = self.variable_name() try: return self.__polynomial[name] except AttributeError: f = self[name]([0, 1]) try: self.__polynomial[name] = f except (KeyError, AttributeError): self.__polynomial = {} self.__polynomial[name] = f return f def order(self): return self.__char def gen(self, n=0): if n: raise IndexError('only one generator') try: return self.__gen except AttributeError: pass try: self.__gen = (- self._modulus[0]) except AttributeError: self.__gen = self.one() return self.__gen def __iter__(self): (yield self(0)) i = one = self(1) while i: (yield i) i += one def degree(self): return Integer(1)
class AverageMeter(): def __init__(self, last_n=None): self._records = [] self.last_n = last_n def update(self, result): if isinstance(result, (list, tuple)): self._records += result else: self._records.append(result) def reset(self): self._records.clear() def records(self): if (self.last_n is not None): return self._records[(- self.last_n):] else: return self._records def sum(self): return np.sum(self.records) def mean(self): return np.mean(self.records) def std(self): return np.std(self.records) def median(self): return np.median(self.records)
def test_execute_filter_endpoint(app, schema_url): schema = oas_loaders.from_uri(schema_url, endpoint=['success']) execute(schema) assert_incoming_requests_num(app, 1) assert_request(app, 0, 'GET', '/api/success') assert_not_request(app, 'GET', '/api/failure')
def test_unbounded_state(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) slice_frame = cl.streams.Slice(frame, partitions=[cl.partitioner.all(50)]) increment = cl.ops.TestIncrementUnbounded(ignore=slice_frame) unsliced_increment = cl.streams.Unslice(increment) output = NamedStream(cl, 'test_unbounded_state') output_op = cl.io.Output(unsliced_increment, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) assert (output.len() == input.len())
def b_cubed(clusters, mention_to_gold): (num, dem) = (0, 0) for c in clusters: if (len(c) == 1): continue gold_counts = Counter() correct = 0 for m in c: if (m in mention_to_gold): gold_counts[tuple(mention_to_gold[m])] += 1 for (c2, count) in gold_counts.items(): if (len(c2) != 1): correct += (count * count) num += (correct / float(len(c))) dem += len(c) return (num, dem)
def load_sqls(in_json, normalize_variables): def fill_in_variables(s, s_vars, variables): var_list = {} for (v_key, v_val) in s_vars.items(): if (len(v_val) == 0): for var in variables: if (var['name'] == v_key): v_val = var['example'] s = s.replace(v_key, v_val) var_list[v_val] = v_key return s sqls = set() with open(in_json) as f: dataset = json.load(f) for example in dataset: variables = example['variables'] for sentence in example['sentences']: s_variables = sentence['variables'] for sql in example['sql']: sql = sql.strip() if sql.endswith(';'): sql = sql[:(- 1)].strip() if (not normalize_variables): sql = fill_in_variables(sql, s_variables, variables) sqls.add(sql) print('{} SQL queries loaded'.format(len(sqls))) return list(sqls)
def make_dense(targets, noclass): with tf.device('/cpu:0'): shape = tf.shape(targets) batch_size = shape[0] indices = (targets + (noclass * tf.range(0, batch_size))) length = tf.expand_dims((batch_size * noclass), 0) dense = tf.sparse_to_dense(indices, length, 1.0, 0.0) return tf.reshape(dense, [(- 1), noclass])
def get_training_stats(stats): if (('nll_loss' in stats) and ('ppl' not in stats)): stats['ppl'] = utils.get_perplexity(stats['nll_loss']) stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0) return stats
def printTensor(dataTable, message='', nPrintedRows=0, nPrintedCols=0, interval=10): dims = dataTable.getDimensions() nRows = int(dims[0]) if (nPrintedRows != 0): nPrintedRows = min(nRows, nPrintedRows) else: nPrintedRows = nRows block = SubtensorDescriptor() dataTable.getSubtensor([], 0, nPrintedRows, readOnly, block) nCols = int((block.getSize() / nPrintedRows)) if (nPrintedCols != 0): nPrintedCols = min(nCols, nPrintedCols) else: nPrintedCols = nCols printArray(block.getArray(), int(nPrintedCols), int(nPrintedRows), int(nCols), message, interval) dataTable.releaseSubtensor(block)
.parametrize('ctx', ctx_list) .parametrize('seed', [313]) .parametrize('window_size, stride, fft_size', [(16, 2, 16), (16, 4, 16), (16, 8, 32)]) .parametrize('window_type', ['hanning', 'hamming', 'rectangular']) .parametrize('center', [True, False]) .parametrize('pad_mode', ['reflect', 'constant']) .parametrize('as_istft_backward', [False, True]) def test_stft_forward_backward(ctx, seed, window_size, stride, fft_size, window_type, center, pad_mode, as_istft_backward): backend = ctx.backend[0].split(':')[0] if (backend == 'cuda'): pytest.skip('CUDA Convolution N-D is only supported in CUDNN extension') func_name = ('STFTCuda' if (backend == 'cudnn') else 'STFT') from nbla_test_utils import function_tester rng = np.random.RandomState(seed) x_shape = create_stft_input_shape(window_size) inputs = [rng.randn(*x_shape).astype(np.float32)] if as_istft_backward: length = x_shape[1] if is_nola_violation(window_type, window_size, stride, fft_size, length, center): pytest.skip('NOLA condition violation.') if (pad_mode != 'constant'): pytest.skip('`pad_mode` must be "constant" when `as_istft_backward == True`. Normal ISTFT never use `pad_mode` and just slice the output. Thus, STFT as a backward of normal ISTFT, STFT must be `pad_mode == constant`') function_tester(rng, F.stft, ref_stft, inputs, func_args=[window_size, stride, fft_size, window_type, center, pad_mode, as_istft_backward], ctx=ctx, func_name=func_name, atol_f=2e-06, atol_b=0.02, dstep=0.01)
def scatter(tensor, **kwargs): assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode' my_rank = get_rank() src = kwargs.pop('src', my_rank) scatter_list = kwargs.pop('scatter_list', None) _group = kwargs.pop('group', group.WORLD) if kwargs: raise RuntimeError('got unexpected kwargs: {}'.format(', '.join(kwargs.keys()))) if (src == my_rank): if (scatter_list is None): raise RuntimeError('scatter_list is a required argument in scatter source') return torch._C._dist_scatter_send(scatter_list, tensor, _group) else: if scatter_list: raise RuntimeError('non-empty can be given only to scatter source') return torch._C._dist_scatter_recv(tensor, src, _group)
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[(torch.Tensor, torch.Tensor)]: if (tokenizer.mask_token is None): raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.') labels = inputs.clone() probability_matrix = torch.full(labels.shape, args.mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if (tokenizer._pad_token is not None): padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[(~ masked_indices)] = (- 100) indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices) inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced)) random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] return (inputs, labels)
def read_mk(fobj, start_length, size): start = start_length[0] fobj.seek(start) pixel_size = ((size[0] * size[2]), (size[1] * size[2])) sizesq = (pixel_size[0] * pixel_size[1]) band = Image.frombuffer('L', pixel_size, fobj.read(sizesq), 'raw', 'L', 0, 1) return {'A': band}
def batch_data_test(cfg, data, device='cuda'): batch = {} roi_keys = ['im_H', 'im_W', 'roi_img', 'inst_id', 'roi_coord_2d', 'roi_cls', 'score', 'roi_extent', 'bbox', 'bbox_est', 'bbox_mode', 'roi_wh', 'scale', 'resize_ratio'] for key in roi_keys: if (key in ['roi_cls']): dtype = torch.long else: dtype = torch.float32 if (key in data[0]): batch[key] = torch.cat([d[key] for d in data], dim=0).to(device=device, dtype=dtype, non_blocking=True) batch['roi_cam'] = torch.cat([d['cam'] for d in data], dim=0).to(device, non_blocking=True) batch['roi_center'] = torch.cat([d['bbox_center'] for d in data], dim=0).to(device, non_blocking=True) for key in ['scene_im_id', 'file_name', 'model_info']: if (key in data[0]): batch[key] = list(itertools.chain(*[d[key] for d in data])) return batch
def convert_ndarray(x): y = list(range(len(x))) for (k, v) in x.items(): y[int(k)] = v return np.array(y)
def init_array(A, B, C, D, G): ni = NI.get() nj = NJ.get() nk = NK.get() nl = NL.get() nm = NM.get() for i in range(ni): for j in range(nk): A[(i, j)] = (datatype((((i * j) + 1) % ni)) / (5 * ni)) for i in range(nk): for j in range(nj): B[(i, j)] = (datatype((((i * (j + 1)) + 2) % nj)) / (5 * nj)) for i in range(nj): for j in range(nm): C[(i, j)] = (datatype(((i * (j + 3)) % nl)) / (5 * nl)) for i in range(nm): for j in range(nl): D[(i, j)] = (datatype((((i * (j + 2)) + 2) % nk)) / (5 * nk))
class TensorDataset(Dataset): def __init__(self, *tensors): assert all(((tensors[0].size(0) == tensor.size(0)) for tensor in tensors)) self.tensors = tensors def __getitem__(self, index): return tuple((tensor[index] for tensor in self.tensors)) def __len__(self): return self.tensors[0].size(0)
def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--input_json', type=str, default='data/coco.json', help='path to the json file containing additional info and vocab') parser.add_argument('--input_fc_h5', type=str, default='data/coco_ai_challenger_talk_fc.h5', help='path to the directory containing the preprocessed fc feats') parser.add_argument('--input_att_h5', type=str, default='data/coco_ai_challenger_talk_att.h5', help='path to the directory containing the preprocessed att feats') parser.add_argument('--input_label_h5', type=str, default='data/coco_ai_challenger_talk_label.h5', help='path to the h5file containing the preprocessed dataset') parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by previous training process: \n 'infos.pkl' : configuration;\n 'checkpoint' : paths to model file(s) (created by tf).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tf)\n ") parser.add_argument('--caption_model', type=str, default='show_tell', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, adaatt, adaattmo, topdown') parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer') parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN') parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm') parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary, and the image.') parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer') parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg') parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg') parser.add_argument('--max_epochs', type=int, default=(- 1), help='number of epochs') parser.add_argument('--batch_size', type=int, default=16, help='minibatch size') parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value') parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN') parser.add_argument('--seq_per_img', type=int, default=5, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image') parser.add_argument('--beam_size', type=int, default=1, help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.') parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam') parser.add_argument('--learning_rate', type=float, default=0.0004, help='learning rate') parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)') parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)') parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)') parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam') parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam') parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing') parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay') parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability') parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability') parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob') parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.') parser.add_argument('--val_images_use', type=int, default=3200, help='how many images to use when periodically evaluating the validation loss? (-1 = all)') parser.add_argument('--save_checkpoint_every', type=int, default=2500, help='how often to save a model checkpoint (in iterations)?') parser.add_argument('--checkpoint_path', type=str, default='save', help='directory to store checkpointed models') parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.') parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)') parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.') parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used in cross-val and appended when writing progress files') parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k') args = parser.parse_args() assert (args.rnn_size > 0), 'rnn_size should be greater than 0' assert (args.num_layers > 0), 'num_layers should be greater than 0' assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0' assert (args.batch_size > 0), 'batch_size should be greater than 0' assert ((args.drop_prob_lm >= 0) and (args.drop_prob_lm < 1)), 'drop_prob_lm should be between 0 and 1' assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0' assert (args.beam_size > 0), 'beam_size should be greater than 0' assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0' assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0' assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1' assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1' assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1' return args
def log_2items_per_user(spark): date = datetime(2019, 1, 1) return spark.createDataFrame(data=[[0, 0, date, 1.0], [0, 1, date, 1.0], [1, 0, date, 1.0], [1, 1, date, 1.0], [2, 2, date, 1.0], [2, 3, date, 1.0]], schema=INTERACTIONS_SCHEMA)
def double_linear_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None): with tf.variable_scope((scope or 'Double_Linear_Logits')): first = tf.tanh(linear(args, size, bias, bias_start=bias_start, scope='first', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)) second = linear(first, 1, bias, bias_start=bias_start, squeeze=True, scope='second', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train) if (mask is not None): second = exp_mask(second, mask) return second
def synchronized(func): (func) def wrapper(*args, **kwargs): with _module_lock: return func(*args, **kwargs) return wrapper
def test_constructor_statement_clone(default_test_case, constructor_mock): int_prim = st.IntPrimitiveStatement(default_test_case, 5) method_stmt = st.ConstructorStatement(default_test_case, constructor_mock, {'y': int_prim.ret_val}) default_test_case.add_statement(int_prim) default_test_case.add_statement(method_stmt) cloned = default_test_case.clone() assert isinstance(cloned.statements[1], st.ConstructorStatement) assert (cloned.statements[1] is not method_stmt) assert (cloned.statements[0].ret_val is not default_test_case.statements[0].ret_val)
class ReportBuilderBase(): def __init__(self, file=None): database_file = (file if (file is not None) else ':memory:') self._connection = sqlite3.connect(database_file) self._create_report_tables() def process_tracker(self, tracker): tracker.populate_report(self) return self def build(self): raise NotImplementedError def _create_report_tables(self): raise NotImplementedError
def main(): (args, cfg) = parse_config() logger = common_utils.create_logger() logger.info('Quick Demo') (train_set, train_loader, train_sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=1, dist=False, workers=0, logger=logger, training=True, merge_all_iters_to_one_epoch=False, total_epochs=1) model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set) print(model) model.count_parameters(model) model.train() model.cuda() logger.info(f'Total number of samples: {len(train_set)}') with tqdm.trange(0, 100, desc='epochs', dynamic_ncols=True, leave=True) as tbar: total_it_each_epoch = len(train_loader) logger.info(f'total_it_each_epoch: {len(train_loader)}') dataloader_iter = iter(train_loader) for cur_epoch in tbar: if (train_sampler is not None): train_sampler.set_epoch(cur_epoch) batch = next(dataloader_iter) func = model_fn_decorator() load_data_to_gpu(batch) model.calc_flops(model, batch) (loss, tb_dict, disp_dict) = func(model, batch) import ipdb ipdb.set_trace() exit()
def inception_block_2b(X): X_3x3 = fr_utils.conv2d_bn(X, layer='inception_4e_3x3', cv1_out=160, cv1_filter=(1, 1), cv2_out=256, cv2_filter=(3, 3), cv2_strides=(2, 2), padding=(1, 1)) X_5x5 = fr_utils.conv2d_bn(X, layer='inception_4e_5x5', cv1_out=64, cv1_filter=(1, 1), cv2_out=128, cv2_filter=(5, 5), cv2_strides=(2, 2), padding=(2, 2)) X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool) inception = concatenate([X_3x3, X_5x5, X_pool], axis=1) return inception
class Decoder_64(nn.Module): def __init__(self, img_size=64, latent_dim=10, noise_dim=100): super(Decoder_64, self).__init__() in_channels = (latent_dim + noise_dim) self.linear = snlinear(in_features=in_channels, out_features=512) self.deconv1 = snconvTrans2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1) self.bn1 = nn.Sequential(nn.BatchNorm2d(512), nn.ReLU(True)) self.deconv2 = snconvTrans2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1) self.bn2 = nn.Sequential(nn.BatchNorm2d(512), nn.ReLU(True)) self.deconv3 = snconvTrans2d(in_channels=512, out_channels=256, kernel_size=4, stride=2, padding=1) self.bn3 = nn.Sequential(nn.BatchNorm2d(256), nn.ReLU(True)) self.deconv4 = snconvTrans2d(in_channels=256, out_channels=128, kernel_size=4, stride=2, padding=1) self.bn4 = nn.Sequential(nn.BatchNorm2d(128), nn.ReLU(True)) self.deconv5 = snconvTrans2d(in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=1) self.bn5 = nn.Sequential(nn.BatchNorm2d(64), nn.ReLU(True)) self.deconv6 = snconvTrans2d(in_channels=64, out_channels=3, kernel_size=4, stride=2, padding=1) self.tanh = nn.Tanh() def forward(self, z): code = self.linear(z) code = F.relu(code.unsqueeze((- 1)).unsqueeze((- 1)), True) out = self.bn1(self.deconv1(code)) out = self.bn2(self.deconv2(out)) out = self.bn3(self.deconv3(out)) out = self.bn4(self.deconv4(out)) out = self.bn5(self.deconv5(out)) out = self.tanh(self.deconv6(out)) return out
class TestGeneralController(testing_utils.TestCase): def setUp(self): super(TestGeneralController, self).setUp() self.session = tf.Session() (self.model_space, _) = testing_utils.get_example_conv1d_space() self.controller = architect.GeneralController(model_space=self.model_space, buffer_type='ordinal', with_skip_connection=True, kl_threshold=0.05, buffer_size=15, batch_size=5, session=self.session, train_pi_iter=2, lstm_size=32, lstm_num_layers=1, lstm_keep_prob=1.0, optim_algo='adam', skip_target=0.8, skip_weight=0.4) def test_get_architecture(self): (act, prob) = self.controller.get_action() self.assertIsInstance(act, np.ndarray) self.assertIsInstance(prob, list) i = 0 for layer_id in range(len(self.model_space)): pr = prob[i] self.assertAllClose(pr.flatten(), ([(1.0 / len(pr.flatten()))] * len(pr.flatten())), atol=0.05) if (layer_id > 0): pr = prob[(i + 1)] self.assertAllClose(pr.flatten(), ([0.5] * len(pr.flatten())), atol=0.05) i += 1 i += 1 def test_optimize(self): (act, prob) = self.controller.get_action() feed_dict = {self.controller.input_arc[i]: [[act[i]]] for i in range(len(act))} feed_dict.update({self.controller.advantage: [[1]]}) feed_dict.update({self.controller.old_probs[i]: prob[i] for i in range(len(self.controller.old_probs))}) feed_dict.update({self.controller.reward: [[1]]}) for _ in range(100): self.session.run(self.controller.train_op, feed_dict=feed_dict) (act2, prob2) = self.controller.get_action() self.assertAllEqual(act, act2)
def test_RecordArray(): array = ak.highlevel.Array([{'x': 0.0, 'y': []}, {'x': 8.0, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}, {'x': 3.3, 'y': [3, 1, 3]}, {'x': 4.4, 'y': [4, 1, 1, 4]}, {'x': 5.5, 'y': [5, 4, 5]}, {'x': 1.1, 'y': [6, 1]}, {'x': 7.7, 'y': [7]}, {'x': 10.0, 'y': [99]}]) assert (ak._do.is_unique(array.layout) is False) assert (ak._do.is_unique(array['x'].layout) is True) assert (ak._do.is_unique(array['y'].layout) is False)
def numpy_azimint_hist(data, radius, npt): histu = np.histogram(radius, npt)[0] histw = np.histogram(radius, npt, weights=data)[0] return (histw / histu)
class Config(BaseConfig): numPeriods: int = 60 tstep: int = 5 elasmu: float = 1.45 prstp: float = 0.015 gama: float = 0.3 pop0: float = 6838 popadj: float = 0.134 popasym: float = 10500 dk: float = 0.1 q0: float = 63.69 k0: float = 135 a0: float = 3.8 ga0: float = 0.079 dela: float = 0.006 gsigma1: float = (- 0.01) dsig: float = (- 0.001) eland0: float = 3.3 deland: float = 0.2 e0: float = 33.61 miu0: float = 0.039 mat0: float = 830.4 mu0: float = 1527 ml0: float = 10010 mateq: float = 588 mueq: float = 1350 mleq: float = 10000 b12: float = 0.088 b23: float = 0.0025 t2xco2: float = 2.9 fex0: float = 0.25 fex1: float = 0.7 tocean0: float = 0.0068 tatm0: float = 0.8 c1: float = 0.098 c3: float = 0.088 c4: float = 0.025 fco22x: float = 3.8 a1: float = 0 a2: float = 0.00267 a3: float = 2 expcost2: float = 2.8 pback: float = 344 gback: float = 0.025 limmiu: float = 1.2 tnopol: float = 45 cprice0: float = 1 gcprice: float = 0.02 periodfullpart: int = 21 partfract2010: float = 1 partfractfull: float = 1 fosslim: float = 6000 scale1: float = 0. scale2: float = (- 3855.106895) ifopt: int = 1 def b11(self): return (1 - self.b12) def b21(self): return ((self.b12 * self.mateq) / self.mueq) def b22(self): return ((1 - self.b21) - self.b23) def b32(self): return ((self.b23 * self.mueq) / self.mleq) def b33(self): return (1 - self.b32) def sig0(self): return (self.e0 / (self.q0 * (1 - self.miu0))) def lam(self): return (self.fco22x / self.t2xco2) def L(self): pop = {1: self.pop0} for time in range(1, self.numPeriods): pop[(time + 1)] = (pop[time] * ((self.popasym / pop[time]) ** self.popadj)) return pop def ga(self, time): return (self.ga0 * np.exp((((- self.dela) * 5.0) * (time - 1)))) def al(self, time): if (time == 1): return self.a0 return (self.al((time - 1)) / (1 - self.ga((time - 1)))) def gsig(self, time): if (time == 1): return self.gsigma1 return (self.gsig((time - 1)) * ((1 + self.dsig) ** self.tstep)) def sigma(self, time): if (time == 1): return self.sig0 return (self.sigma((time - 1)) * np.exp((self.gsig((time - 1)) * self.tstep))) def pbacktime(self, time): return (self.pback * ((1 - self.gback) ** (time - 1))) def cost1(self, time): return (((self.pbacktime(time) * self.sigma(time)) / self.expcost2) / 1000.0) def etree(self, time): return (self.eland0 * ((1 - self.deland) ** (time - 1))) def rr(self, time): return (1.0 / ((1 + self.prstp) ** (self.tstep * (time - 1)))) def forcoth(self, time): if (time < 19): return (self.fex0 + (((1 / 18) * (self.fex1 - self.fex0)) * time)) return (self.fex0 + (self.fex1 - self.fex0)) def optlrsav(self): return (((self.dk + 0.004) / ((self.dk + (0.004 * self.elasmu)) + self.prstp)) * self.gama) def partfract(self, time): if (time == 1): return self.partfract2010 if (time > self.periodfullpart): return self.partfractfull return (self.partfract2010 + (((self.partfractfull - self.partfract2010) * (time - 1)) / self.periodfullpart)) def cpricebase(self, time): return (self.cprice0 * ((1 + self.gcprice) ** (5 * (time - 1)))) def update(self, intervention): return dataclasses.replace(self, **intervention.updates)
_SEG_HEADS_REGISTRY.register() class BasePixelDecoder(nn.Module): def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, conv_dim: int, mask_dim: int, norm: Optional[Union[(str, Callable)]]=None): super().__init__() input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride)) self.in_features = [k for (k, v) in input_shape] feature_channels = [v.channels for (k, v) in input_shape] lateral_convs = [] output_convs = [] use_bias = (norm == '') for (idx, in_channels) in enumerate(feature_channels): if (idx == (len(self.in_features) - 1)): output_norm = get_norm(norm, conv_dim) output_conv = Conv2d(in_channels, conv_dim, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, activation=F.relu) weight_init.c2_xavier_fill(output_conv) self.add_module('layer_{}'.format((idx + 1)), output_conv) lateral_convs.append(None) output_convs.append(output_conv) else: lateral_norm = get_norm(norm, conv_dim) output_norm = get_norm(norm, conv_dim) lateral_conv = Conv2d(in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm) output_conv = Conv2d(conv_dim, conv_dim, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, activation=F.relu) weight_init.c2_xavier_fill(lateral_conv) weight_init.c2_xavier_fill(output_conv) self.add_module('adapter_{}'.format((idx + 1)), lateral_conv) self.add_module('layer_{}'.format((idx + 1)), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) self.lateral_convs = lateral_convs[::(- 1)] self.output_convs = output_convs[::(- 1)] self.mask_dim = mask_dim self.mask_features = Conv2d(conv_dim, mask_dim, kernel_size=3, stride=1, padding=1) weight_init.c2_xavier_fill(self.mask_features) self.maskformer_num_feature_levels = 3 def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]): ret = {} ret['input_shape'] = {k: v for (k, v) in input_shape.items() if (k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES)} ret['conv_dim'] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM ret['mask_dim'] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM ret['norm'] = cfg.MODEL.SEM_SEG_HEAD.NORM return ret def forward_features(self, features): multi_scale_features = [] num_cur_levels = 0 for (idx, f) in enumerate(self.in_features[::(- 1)]): x = features[f] lateral_conv = self.lateral_convs[idx] output_conv = self.output_convs[idx] if (lateral_conv is None): y = output_conv(x) else: cur_fpn = lateral_conv(x) y = (cur_fpn + F.interpolate(y, size=cur_fpn.shape[(- 2):], mode='nearest')) y = output_conv(y) if (num_cur_levels < self.maskformer_num_feature_levels): multi_scale_features.append(y) num_cur_levels += 1 return (self.mask_features(y), self.clip_mask_features(y), None, multi_scale_features) def forward(self, features, targets=None): logger = logging.getLogger(__name__) logger.warning('Calling forward() may cause unpredicted behavior of PixelDecoder module.') return self.forward_features(features)
def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path: in_units_file = (fst_dir / f'kaldi_dict.{in_labels}.txt') if (not in_units_file.exists()): logger.info(f'Creating {in_units_file}') with open(in_units_file, 'w') as f: print('<eps> 0', file=f) i = 1 for symb in vocab.symbols[vocab.nspecial:]: if (not symb.startswith('madeupword')): print(f'{symb} {i}', file=f) i += 1 return in_units_file
def split(s, splitter, reg=False): if (not reg): return s.split(splitter) import re return re.split(splitter, s)
class DlaRoot(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, residual): super(DlaRoot, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=False, padding=((kernel_size - 1) // 2)) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.residual = residual def forward(self, *x): children = x x = self.conv(torch.cat(x, 1)) x = self.bn(x) if self.residual: x += children[0] x = self.relu(x) return x
def convert_file_if_needed(file, debug): if file.endswith('.sgm'): return sgm2raw(file, debug) elif file.endswith('.tmx'): return tmx2raw(file, debug) elif file.endswith('wiki/fi-en/titles.fi-en'): return cut_wikitles(file, debug) elif file.endswith('.tsv'): return cut_tsv(file, debug) elif CZENG16_REGEX.match(file): return convert2czeng17(file, debug) else: return file
class GaussianMLPTwoHeadedModuleEx(GaussianMLPTwoHeadedModule, ForwardWithTransformTrait, ForwardWithChunksTrait, ForwardModeTrait): pass
def i_take_set(s: set) -> str: if (len(s) > 0): return 'not empty!' else: return 'empty!'
def _find_loop_nest_roots(loop_nest_tree: Dict[(SDFGState, Set[SDFGState])]) -> Set[SDFGState]: all_nodes = set() child_nodes = set() for (parent, children) in loop_nest_tree.items(): all_nodes.add(parent) all_nodes.update(children) child_nodes.update(children) roots = (all_nodes - child_nodes) return roots
class runtime_validation_disabled(object): prev: bool def __init__(self) -> None: global _runtime_validation_enabled self.prev = _runtime_validation_enabled _runtime_validation_enabled = False def __enter__(self) -> None: pass def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: global _runtime_validation_enabled _runtime_validation_enabled = self.prev
def scalar_imp_test(listener=False): data = [('blue', (240.0, 100.0, 100.0)), ('blue', (180.0, 100.0, 100.0)), ('teal', (240.0, 100.0, 100.0)), ('teal', (180.0, 100.0, 100.0))] return pairs_to_insts(data, listener=listener)
def create_f0_hparams(hparams_string=None, verbose=False): hparams = tf.contrib.training.HParams(type=3, layers=3, blocks=2, dilation_channels=130, residual_channels=130, skip_channels=240, input_channel=60, condition_channel=1126, cgm_factor=4, initial_kernel=10, kernel_size=2, bias=True) if hparams_string: tf.logging.info('Parsing f0 hparams: %s', hparams_string) hparams.parse(hparams_string) if verbose: tf.logging.info('f0 hparams: %s', hparams.values()) return hparams
class RegularPartitions_all(RegularPartitions): def __init__(self, ell): RegularPartitions.__init__(self, ell, bool((ell > 1))) def _repr_(self): return '{}-Regular Partitions'.format(self._ell) def __iter__(self): if (self._ell == 1): (yield self.element_class(self, [])) return n = 0 while True: for p in self._fast_iterator(n, n): (yield self.element_class(self, p)) n += 1