code
stringlengths
101
5.91M
def _exact_1_norm(A): if scipy.sparse.isspmatrix(A): return max(abs(A).sum(axis=0).flat) elif is_pydata_spmatrix(A): return max(abs(A).sum(axis=0)) else: return np.linalg.norm(A, 1)
class ContentAttrParser(object): def __init__(self, data): assert isinstance(data, bytes) self.data = data def parse(self): try: self.data.jumpTo(b'charset') self.data.position += 1 self.data.skip() if (not (self.data.currentByte == b'=')): return None self.data.position += 1 self.data.skip() if (self.data.currentByte in (b'"', b"'")): quoteMark = self.data.currentByte self.data.position += 1 oldPosition = self.data.position if self.data.jumpTo(quoteMark): return self.data[oldPosition:self.data.position] else: return None else: oldPosition = self.data.position try: self.data.skipUntil(spaceCharactersBytes) return self.data[oldPosition:self.data.position] except StopIteration: return self.data[oldPosition:] except StopIteration: return None
class ContextNet(Sequential): def __init__(self, input_shape, out_channels=640, conv_channels=None, kernel_size=3, strides=None, num_blocks=21, num_layers=5, inner_dim=12, alpha=1, beta=1, dropout=0.15, activation=Swish, se_activation=torch.nn.Sigmoid, norm=BatchNorm1d, residuals=None): super().__init__(input_shape=input_shape) if (conv_channels is None): conv_channels = [*([256] * 10), *([512] * 11)] if (strides is None): strides = ([1] * num_blocks) strides[2] = 2 strides[6] = 2 strides[13] = 2 if (residuals is None): residuals = ([True] * num_blocks) self.append(DepthwiseSeparableConv1d, conv_channels[0], kernel_size, layer_name='conv_start') self.append(norm, layer_name='norm_start') if isinstance(activation, Swish): self.append(activation(beta), layer_name='act_start') else: self.append(activation(), layer_name='act_start') for i in range(num_blocks): channels = int((conv_channels[i] * alpha)) self.append(ContextNetBlock, out_channels=channels, kernel_size=kernel_size, num_layers=num_layers, inner_dim=inner_dim, stride=strides[i], beta=beta, dropout=dropout, activation=activation, se_activation=se_activation, norm=norm, residual=residuals[i], layer_name=f'block_{i}') self.append(DepthwiseSeparableConv1d, out_channels, kernel_size, layer_name='conv_end') self.append(norm, layer_name='norm_end') if isinstance(activation, Swish): self.append(activation(beta), layer_name='act_end') else: self.append(activation(), layer_name='act_end')
class ResBlock(nn.Module): def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): super(ResBlock, self).__init__() self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): conv_block = [] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(1)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(1)] elif (padding_type == 'zero'): p = 1 else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(1)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(1)] elif (padding_type == 'zero'): p = 1 else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): out = (x + self.conv_block(x)) return nn.ReLU(True)(out)
class CachedProperty(object): def __init__(self, wrapped): self.wrapped = wrapped try: self.__doc__ = wrapped.__doc__ except: pass def __get__(self, instance, instance_type=None): if (instance is None): return self value = self.wrapped(instance) setattr(instance, self.wrapped.__name__, value) return value
class BacktranslationDataset(FairseqDataset): def __init__(self, tgt_dataset, tgt_dict, backtranslation_model, max_len_a, max_len_b, remove_eos_at_src=False, generator_class=sequence_generator.SequenceGenerator, **kwargs): self.tgt_dataset = language_pair_dataset.LanguagePairDataset(src=tgt_dataset, src_sizes=None, src_dict=tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None) self.max_len_a = max_len_a self.max_len_b = max_len_b self.remove_eos_at_src = remove_eos_at_src self.backtranslation_generator = generator_class(models=[backtranslation_model], tgt_dict=tgt_dict, **kwargs) def __getitem__(self, index): return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def collater(self, samples): collated_tgt_only_sample = self.tgt_dataset.collater(samples=samples) backtranslation_hypos = self._generate_hypotheses(sample=collated_tgt_only_sample) generated_samples = [] for (input_sample, hypos) in zip(samples, backtranslation_hypos): eos = self.tgt_dataset.src_dict.eos() original_tgt = input_sample['source'] if (original_tgt[(- 1)] != eos): original_tgt = torch.cat([original_tgt, torch.LongTensor(eos)]) generated_source = hypos[0]['tokens'] if self.remove_eos_at_src: assert (generated_source[(- 1)] == eos), f'Expected generated backtranslation to have eos (id: {eos}) at end, but instead found token id {generated_source[(- 1)]} at end.' generated_source = generated_source[:(- 1)] generated_samples.append({'id': input_sample['id'], 'source': generated_source, 'target': original_tgt}) return language_pair_dataset.collate(samples=generated_samples, pad_idx=self.tgt_dataset.src_dict.pad(), eos_idx=self.tgt_dataset.src_dict.eos()) def get_dummy_batch(self, num_tokens, max_positions): self.tgt_dataset.get_dummy_batch(num_tokens, max_positions) def num_tokens(self, index): self.tgt_dataset.num_tokens(index) def ordered_indices(self): self.tgt_dataset.ordered_indices def valid_size(self, index, max_positions): self.tgt_dataset.valid_size(index, max_positions) def _generate_hypotheses(self, sample): self.backtranslation_generator.cuda() input = sample['net_input'] srclen = input['src_tokens'].size(1) hypos = self.backtranslation_generator.generate(input, maxlen=int(((self.max_len_a * srclen) + self.max_len_b))) return hypos
class TestPostTrainingDynamic(QuantizationTestCase): def test_single_layer(self): for dtype in [torch.qint8, torch.float16]: model = SingleLayerLinearDynamicModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc1': qconfig} prepare_dynamic(model, qconfig_dict) convert_dynamic(model) def checkQuantized(model): self.checkDynamicQuantizedLinear(model.fc1, dtype) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) base = SingleLayerLinearDynamicModel() keys_before = set(list(base.state_dict().keys())) model = quantize_dynamic(base, qconfig_dict) checkQuantized(model) keys_after = set(list(base.state_dict().keys())) self.assertEqual(keys_before, keys_after) model = SingleLayerLinearDynamicModel() quantize_dynamic(model, qconfig_dict, inplace=True) checkQuantized(model) model = SingleLayerLinearDynamicModel() quantize_dynamic(model, set([nn.Linear]), inplace=True, dtype=dtype) checkQuantized(model) def test_two_layers(self): for dtype in [torch.qint8, torch.float16]: model = TwoLayerLinearModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc2': qconfig} prepare_dynamic(model, qconfig_dict) convert_dynamic(model) def checkQuantized(model): self.assertEqual(type(model.fc1), torch.nn.Linear) self.checkDynamicQuantizedLinear(model.fc2, dtype=dtype) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(TwoLayerLinearModel().eval(), qconfig_dict) checkQuantized(model) model = quantize_dynamic(TwoLayerLinearModel().eval(), {'fc2'}, dtype=dtype) checkQuantized(model) def test_nested1(self): for dtype in [torch.qint8, torch.float16]: model = NestedModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc3': qconfig, 'sub2.fc1': qconfig} prepare_dynamic(model, qconfig_dict) convert_dynamic(model) def checkQuantized(model): self.checkLinear(model.sub1.fc) self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype) self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype) self.checkLinear(model.sub2.fc2) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), qconfig_dict) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2.fc1'}, dtype=dtype) checkQuantized(model) def test_nested2(self): for dtype in [torch.qint8, torch.float16]: model = NestedModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc3': qconfig, 'sub2': qconfig} prepare_dynamic(model, qconfig_dict) convert_dynamic(model) def checkQuantized(model): self.checkLinear(model.sub1.fc) self.assertEqual(type(model.sub1.relu), torch.nn.ReLU) self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype) self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype) self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), qconfig_dict, dtype=dtype) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2'}, dtype=dtype) checkQuantized(model) def test_nested3(self): for dtype in [torch.qint8, torch.float16]: model = NestedModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dynamic_dict = {'fc3': qconfig, 'sub2': qconfig, 'sub2.fc1': qconfig} prepare_dynamic(model, qconfig_dynamic_dict) convert_dynamic(model) def checkQuantized(model): self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype) self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype) self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), qconfig_dynamic_dict) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2', 'sub2.fc1'}, dtype=dtype) checkQuantized(model) def test_type_match_rule(self): for dtype in [torch.qint8, torch.float16]: model = NestedModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc3': None, 'sub2.fc1': None, torch.nn.Linear: qconfig} prepare_dynamic(model, qconfig_dict) test_only_eval_fn(model, self.calib_data) convert_dynamic(model) def checkQuantized(model): self.checkDynamicQuantizedLinear(model.sub1.fc, dtype=dtype) self.checkLinear(model.fc3) self.checkLinear(model.sub2.fc1) self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype) test_only_eval_fn(model, self.calib_data) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), qconfig_dict, dtype=dtype) checkQuantized(model) def test_per_channel_linear_quantize(self): model = NestedModel().eval() qconfig_dict = {torch.nn.Linear: per_channel_dynamic_qconfig} prepare_dynamic(model, qconfig_dict) test_only_eval_fn(model, self.calib_data) convert_dynamic(model) def checkQuantized(model): self.checkDynamicQuantizedLinear(model.sub1.fc, dtype=torch.qint8) self.checkDynamicQuantizedLinear(model.fc3, dtype=torch.qint8) self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=torch.qint8) self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=torch.qint8) test_only_eval_fn(model, self.calib_data) self.checkScriptable(model, self.calib_data, check_save_load=True) self.checkNoQconfig(model) checkQuantized(model) model = quantize_dynamic(NestedModel().eval(), qconfig_dict) checkQuantized(model) (qconfig=st.sampled_from([per_channel_dynamic_qconfig, default_dynamic_qconfig]), dtype=st.sampled_from([torch.qint8, torch.float16])) def test_quantized_rnn(self, qconfig, dtype): model = RNNDynamicModel('LSTM').eval() niter = 10 x = torch.tensor([[100, (- 155)], [(- 155), 100], [100, (- 155)]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1) qconfig_dict = {torch.nn.LSTM: qconfig} if (dtype == torch.float16): model_quantized = quantize_dynamic(model=model, dtype=dtype) else: model_quantized = quantize_dynamic(model=model, qconfig_spec=qconfig_dict, dtype=dtype) self.assertTrue(('DynamicQuantizedLSTM' in str(model_quantized))) self.checkDynamicQuantizedModule(model_quantized.mod, torch.nn.quantized.dynamic.LSTM, dtype) self.checkScriptable(model_quantized, [[x]], check_save_load=True) class ScriptWrapperPacked(torch.nn.Module): def __init__(self, cell): super(ScriptWrapperPacked, self).__init__() self.cell = cell def forward(self, x): return self.cell(x) packed_input = torch.nn.utils.rnn.pack_padded_sequence(x, torch.tensor([10, 5, 2])) model_with_packed_input = ScriptWrapperPacked(model_quantized.mod) scripted = torch.jit.script(model_with_packed_input) self._checkScriptable(model_with_packed_input, scripted, [[packed_input]], True) (qconfig=st.sampled_from([per_channel_dynamic_qconfig, default_dynamic_qconfig]), dtype=st.sampled_from([torch.qint8, torch.float16])) def test_quantized_rnn_cell(self, qconfig, dtype): qconfig_dict = {torch.nn.LSTMCell: qconfig, torch.nn.GRUCell: qconfig, torch.nn.RNNCell: qconfig} for module_type in ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU']: model = RNNCellDynamicModel(module_type).eval() x = torch.tensor([[100, (- 155)], [(- 155), 100], [100, (- 155)]], dtype=torch.float) if ((torch.backends.quantized.engine == 'qnnpack') and (dtype == torch.float16)): continue if (dtype == torch.float16): model_quantized = quantize_dynamic(model=model, dtype=dtype) else: model_quantized = quantize_dynamic(model=model, qconfig_spec=qconfig_dict, dtype=dtype) def checkQuantized(model, module_type): mod_type_map = {'LSTMCell': torch.nn.quantized.dynamic.LSTMCell, 'GRUCell': torch.nn.quantized.dynamic.GRUCell, 'RNNTanh': torch.nn.quantized.dynamic.RNNCell, 'RNNReLU': torch.nn.quantized.dynamic.RNNCell} mod_repr_map = {'LSTMCell': 'DynamicQuantizedLSTMCell', 'GRUCell': 'DynamicQuantizedGRUCell', 'RNNTanh': 'DynamicQuantizedRNNCell', 'RNNReLU': 'DynamicQuantizedRNNCell'} self.assertTrue((mod_repr_map[module_type] in str(model_quantized))) self.checkDynamicQuantizedModule(model_quantized.mod, mod_type_map[module_type], dtype) self.checkNoQconfig(model) checkQuantized(model_quantized, module_type) self.checkScriptable(model_quantized, [[x]], check_save_load=True) def test_forward_hooks_preserved(self): for dtype in [torch.qint8, torch.float16]: model = SingleLayerLinearDynamicModel().eval() qconfig = (float16_dynamic_qconfig if (dtype == torch.float16) else default_dynamic_qconfig) qconfig_dict = {'fc1': qconfig} convert_dynamic(model) counter = {'pre_forwards': 0, 'forwards': 0} def fw_pre_hook(h_module, input): counter['pre_forwards'] += 1 def fw_hook(h_module, input, output): counter['forwards'] += 1 model.fc1.register_forward_pre_hook(fw_pre_hook) model.fc1.register_forward_hook(fw_hook) prepare_dynamic(model, qconfig_dict) def checkHooksIsPresent(model): self.assertObjectIn(fw_pre_hook, model.fc1._forward_pre_hooks.values()) self.assertObjectIn(fw_hook, model.fc1._forward_hooks.values()) self.assertEqual(len(model.fc1._forward_pre_hooks.values()), 1, 'Extra pre forward hooks have appeared on a layer') self.assertEqual(len(model.fc1._forward_hooks.values()), 1, 'Extra post forward hooks have appeared on a layer') checkHooksIsPresent(model) test_only_eval_fn(model, self.calib_data) convert_dynamic(model) checkHooksIsPresent(model)
def coords_in_U_mod_p(u, U, p): coords = U.log(u) start = (1 - int(p.divides(U.zeta_order()))) return [(c % p) for c in coords[start:]]
class Graph(Printable): name: str directed: bool vertices: Dict[(str, Vertex)] edges: List[Edge] def __init__(self, name: str, directed: bool): self.name = name self.directed = directed self.vertices = {} self.edges = [] def copy(self, graph: Graph): self.edges += deepcopy(graph.edges) self.vertices.update(deepcopy(graph.vertices)) def addVertex(self, name: str, group: str=None, shape: str='ellipse'): assert (not self.hasVertex(name, group)), '{}: vertex with name {} already exist.'.format(self.name, name) v = Vertex(name, group, shape) self.vertices[v.getId()] = v def hasVertex(self, name: str, group: str=None): return (Vertex(name, group).getId() in self.vertices) def __findVertex(self, name: str, group: str=None): if self.hasVertex(name, group): return self.vertices[Vertex(name, group).getId()] assert (group == None), '{}: {}::{} is not a vertex.'.format(self.name, group, name) for v in self.vertices.values(): if (v.name == name): return v assert False, '{}: {}::{} is not a vertex.'.format(self.name, group, name) def addEdge(self, a: str, b: str, agroup: str=None, bgroup: str=None, label: str=None, alabel: str=None, blabel: str=None, style: str='solid'): self.edges.append(Edge(self.__findVertex(a, agroup).getId(), self.__findVertex(b, bgroup).getId(), label, alabel, blabel, style)) def hasEdge(self, a: str, b: str): pass def toGraphviz(self) -> str: out = '{} "{}" {{\n'.format(('digraph' if self.directed else 'graph'), self.name) vlines = [] cluster_vlines = {} indent = 4 out += (' ' * indent) out += 'label = "{}"\n'.format(self.name) for v in self.vertices.values(): options = ' ' if (v.name != None): options += 'label="{}" '.format(v.name) if (v.shape != None): options += 'shape="{}" '.format(v.shape) vline = '"{}" [{}]\n'.format(v.getId(), options) if (v.group != None): if (v.group not in cluster_vlines): cluster_vlines[v.group] = [] cluster_vlines[v.group].append(vline) else: vlines.append(vline) for line in vlines: out += (' ' * indent) out += line cluster_id = 0 for (l, c) in cluster_vlines.items(): out += (' ' * indent) out += 'subgraph cluster_{} {{\n'.format(cluster_id) indent += 4 out += (' ' * indent) out += 'label = "{}"\n'.format(l) for line in c: out += (' ' * indent) out += line indent -= 4 out += (' ' * indent) out += '}\n' cluster_id += 1 for e in self.edges: out += (' ' * indent) options = ' ' if (e.label != None): options += 'label="{}" '.format(e.label) if (e.alabel != None): options += 'taillabel="{}" '.format(e.alabel) if (e.blabel != None): options += 'headlabel="{}" '.format(e.blabel) if (e.style != None): options += 'style="{}" '.format(e.style) out += '"{}" {} "{}" [{}]\n'.format(e.a, ('->' if self.directed else '--'), e.b, options) out += '}' return out def print(self, indent: int) -> str: out = (' ' * indent) out += 'Graph "{}":\n'.format(self.name) indent += 4 out += (' ' * indent) out += 'Vertices:\n' indent += 4 for v in self.vertices.values(): out += (' ' * indent) out += '"{}", group "{}"\n'.format(v.name, v.group) indent -= 4 out += (' ' * indent) out += 'Edges:\n' indent += 4 for e in self.edges: out += (' ' * indent) out += '"{}" {} "{}"\n'.format(e.a, ('->' if self.directed else '--'), e.b) return out
def main(args): cfg = get_cfg() cfg.merge_from_file(args.cfg_file) cfg.merge_from_list(args.opts) cfg = infer_cfg(cfg) cfg.freeze() if (not os.path.isdir(cfg.CKPT)): mkdir_p(cfg.CKPT) setup_logging(cfg.CKPT) (n_params, conv_flops, model_flops, conv_activs, model_activs) = (0, 0, 0, 0, 0) if (is_main_process() and cfg.MODEL_ANALYSE): model = Generalized_CNN(cfg) model.eval() analyser = Analyser(cfg, model, param_details=False) n_params = analyser.get_params()[1] (conv_flops, model_flops) = analyser.get_flops_activs(cfg.TEST.SCALE[0], cfg.TEST.SCALE[1], mode='flops') (conv_activs, model_activs) = analyser.get_flops_activs(cfg.TEST.SCALE[0], cfg.TEST.SCALE[1], mode='activations') del model synchronize() model = Generalized_CNN(cfg) logging_rank(model) test_weights = get_weights(cfg.CKPT, cfg.TEST.WEIGHTS) load_weights(model, test_weights) logging_rank('Params: {} | FLOPs: {:.4f}M / Conv_FLOPs: {:.4f}M | ACTIVATIONs: {:.4f}M / Conv_ACTIVATIONs: {:.4f}M'.format(n_params, model_flops, conv_flops, model_activs, conv_activs)) model.eval() model.to(torch.device(cfg.DEVICE)) datasets = build_dataset(cfg, is_train=False) test_loader = make_test_data_loader(cfg, datasets) synchronize() all_hooks = build_test_hooks(args.cfg_file.split('/')[(- 1)], log_period=1, num_warmup=0) test_engine = TestEngine(cfg, model) test(cfg, test_engine, test_loader, datasets, all_hooks)
def regenerate_lextab(py_ver, write=False): tokenizer_path = os.path.join(SKYMARSHAL_DIR, 'tokenizer.py') generated_path = os.path.join(SKYMARSHAL_DIR, 'lextab.py') try: env = os.environ.copy() env['PYTHONDONTWRITEBYTECODE'] = '1' env['SKYMARSHAL_REGENERATE_LEXER'] = '1' if os.path.exists(generated_path): os.remove(generated_path) subprocess.check_call([sys.executable, tokenizer_path], env=env) if (not write): cached = Lextab(imp.load_source('cached', LEXTABS[py_ver])) generated = Lextab(imp.load_source('generated', generated_path)) if (not (cached == generated)): raise LexerGenerationError('Attribute mismatches between generated and cached') else: shutil.move(generated_path, LEXTABS[py_ver]) finally: if os.path.exists(generated_path): os.remove(generated_path)
def get_spacy_model(spacy_model_name: str, pos_tags: bool, parse: bool, ner: bool) -> SpacyModelType: options = (spacy_model_name, pos_tags, parse, ner) if (options not in LOADED_SPACY_MODELS): disable = ['vectors', 'textcat'] if (not pos_tags): disable.append('tagger') if (not parse): disable.append('parser') if (not ner): disable.append('ner') spacy_model = spacy.load(spacy_model_name, disable=disable) LOADED_SPACY_MODELS[options] = spacy_model return LOADED_SPACY_MODELS[options]
def test_hook(): tracer = ExecutionTracer() tracer.current_thread_identifier = threading.current_thread().ident with install_import_hook('tests.fixtures.instrumentation.mixed', tracer): module = importlib.import_module('tests.fixtures.instrumentation.mixed') importlib.reload(module) assert (len(tracer.get_subject_properties().existing_code_objects) > 0) assert (module.function(6) == 0)
def test_arr2sym(): N = dace.symbol('N', dace.float64) def symarg(A: dace.float64[20]): A[:] = N def scalarg(A: dace.float64[20], arr: dace.float64[2]): symarg(A, N=arr[1]) sdfg = scalarg.to_sdfg(simplify=False) A = np.random.rand(20) sc = np.array([2.0, 3.0]) sdfg(A, sc) assert np.allclose(A, sc[1])
class FontFile(): bitmap = None def __init__(self): self.info = {} self.glyph = ([None] * 256) def __getitem__(self, ix): return self.glyph[ix] def compile(self): if self.bitmap: return h = w = maxwidth = 0 lines = 1 for glyph in self: if glyph: (d, dst, src, im) = glyph h = max(h, (src[3] - src[1])) w = (w + (src[2] - src[0])) if (w > WIDTH): lines += 1 w = (src[2] - src[0]) maxwidth = max(maxwidth, w) xsize = maxwidth ysize = (lines * h) if ((xsize == 0) and (ysize == 0)): return '' self.ysize = h self.bitmap = Image.new('1', (xsize, ysize)) self.metrics = ([None] * 256) x = y = 0 for i in range(256): glyph = self[i] if glyph: (d, dst, src, im) = glyph xx = (src[2] - src[0]) (x0, y0) = (x, y) x = (x + xx) if (x > WIDTH): (x, y) = (0, (y + h)) (x0, y0) = (x, y) x = xx s = ((src[0] + x0), (src[1] + y0), (src[2] + x0), (src[3] + y0)) self.bitmap.paste(im.crop(src), s) self.metrics[i] = (d, dst, s) def save(self, filename): self.compile() self.bitmap.save((os.path.splitext(filename)[0] + '.pbm'), 'PNG') with open((os.path.splitext(filename)[0] + '.pil'), 'wb') as fp: fp.write(b'PILfont\n') fp.write((';;;;;;%d;\n' % self.ysize).encode('ascii')) fp.write(b'DATA\n') for id in range(256): m = self.metrics[id] if (not m): puti16(fp, ([0] * 10)) else: puti16(fp, ((m[0] + m[1]) + m[2]))
class LogisticRegression(nn.Module): def __init__(self, vocab_size, embed_dim, n_classes, pad_idx): super(LogisticRegression, self).__init__() self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_dim, padding_idx=pad_idx) self.fc = nn.Linear(embed_dim, n_classes) def forward(self, input_ids): embed = self.embedding(input_ids) output = self.fc(embed) output = output.sum(dim=1) return output
def dump_pickle(data, path): ensure_parents(path) with open(str(path), 'wb') as f: pickle.dump(data, f) return
def calc_one_map(data): relcnt = 0 score = 0.0 data = sorted(data, key=(lambda d: d[1]), reverse=True) for (idx, item) in enumerate(data): if (int(item[0][2]) == 1): relcnt = (relcnt + 1) score = (score + ((1.0 * relcnt) / (idx + 1))) if (relcnt == 0): return 0 return (score / relcnt)
def encoder_forecaster_build_networks(factory, context, shared_encoder_net=None, shared_forecaster_net=None, shared_loss_net=None, for_finetune=False): encoder_net = MyModule(factory.encoder_sym(), data_names=[ele.name for ele in factory.encoder_data_desc()], label_names=[], context=context, name='encoder_net') encoder_net.bind(data_shapes=factory.encoder_data_desc(), label_shapes=None, inputs_need_grad=True, shared_module=shared_encoder_net) if (shared_encoder_net is None): encoder_net.init_params(mx.init.MSRAPrelu(slope=0.2)) init_optimizer_using_cfg(encoder_net, for_finetune=for_finetune) forecaster_net = MyModule(factory.forecaster_sym(), data_names=[ele.name for ele in factory.forecaster_data_desc()], label_names=[], context=context, name='forecaster_net') forecaster_net.bind(data_shapes=factory.forecaster_data_desc(), label_shapes=None, inputs_need_grad=True, shared_module=shared_forecaster_net) if (shared_forecaster_net is None): forecaster_net.init_params(mx.init.MSRAPrelu(slope=0.2)) init_optimizer_using_cfg(forecaster_net, for_finetune=for_finetune) loss_net = MyModule(factory.loss_sym(), data_names=[ele.name for ele in factory.loss_data_desc()], label_names=[ele.name for ele in factory.loss_label_desc()], context=context, name='loss_net') loss_net.bind(data_shapes=factory.loss_data_desc(), label_shapes=factory.loss_label_desc(), inputs_need_grad=True, shared_module=shared_loss_net) if (shared_loss_net is None): loss_net.init_params() return (encoder_net, forecaster_net, loss_net)
class RegularMeta(Meta, Generic[T]): is_list = True is_regular = True size: ShapeItem _content: T def purelist_parameters(self, *keys: str) -> JSONSerializable: if (self._parameters is not None): for key in keys: if (key in self._parameters): return self._parameters[key] return self._content.purelist_parameters(*keys) def purelist_isregular(self) -> bool: return self._content.purelist_isregular def purelist_depth(self) -> int: if (self.parameter('__array__') in ('string', 'bytestring')): return 1 else: return (self._content.purelist_depth + 1) def is_identity_like(self) -> bool: return False def minmax_depth(self) -> tuple[(int, int)]: if (self.parameter('__array__') in ('string', 'bytestring')): return (1, 1) else: (mindepth, maxdepth) = self._content.minmax_depth return ((mindepth + 1), (maxdepth + 1)) def branch_depth(self) -> tuple[(bool, int)]: if (self.parameter('__array__') in ('string', 'bytestring')): return (False, 1) else: (branch, depth) = self._content.branch_depth return (branch, (depth + 1)) def fields(self): return self._content.fields def is_tuple(self) -> bool: return self._content.is_tuple def dimension_optiontype(self) -> bool: return False def content(self) -> T: return self._content
class ResNet(nn.Module): def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', num_classes=400, last_fc=True): self.last_fc = last_fc self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = nn.BatchNorm3d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type) self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2) self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2) self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2) last_duration = math.ceil((sample_duration / 16)) last_size = math.ceil((sample_size / 32)) self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv3d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): if (shortcut_type == 'A'): downsample = partial(downsample_basic_block, planes=(planes * block.expansion), stride=stride) else: downsample = nn.Sequential(nn.Conv3d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm3d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) if self.last_fc: x = self.fc(x) return x
(scope='function') def problem_ctx(): ctx = CategoricalLpProblemContext(clf=FakeModel(), target_class=1, target_confidence=0.5, lp_space=1) return ctx
def build_model(X, num_inducing, num_layers): config = Config(num_inducing=num_inducing, inner_layer_qsqrt_factor=1e-05, between_layer_noise_variance=0.01, likelihood_noise_variance=0.01, white=True) model = build_constant_input_dim_deep_gp(X, num_layers, config=config) return model
def changeBipartiteAlterTwoStar1_SLOW(mode, G, A, i): delta3 = (sum([G.twoPaths(i, v) for v in G.nodeModeIterator(mode)]) if (G.bipartite_node_mode(i) == mode) else 0) return delta3
def _remove_qconfig(module): for child in module.children(): _remove_qconfig(child) if hasattr(module, 'qconfig'): del module.qconfig
_GENERATOR_REGISTRY.register() class RotatedAnchorGenerator(nn.Module): box_dim: int = 5 def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): super().__init__() self.strides = strides self.num_features = len(self.strides) sizes = _broadcast_params(sizes, self.num_features, 'sizes') aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, 'aspect_ratios') angles = _broadcast_params(angles, self.num_features, 'angles') self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) self.offset = offset assert (0.0 <= self.offset < 1.0), self.offset def from_config(cls, cfg, input_shape: List[ShapeSpec]): return {'sizes': cfg.MODEL.ANCHOR_GENERATOR.SIZES, 'aspect_ratios': cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, 'strides': [x.stride for x in input_shape], 'offset': cfg.MODEL.ANCHOR_GENERATOR.OFFSET, 'angles': cfg.MODEL.ANCHOR_GENERATOR.ANGLES} def _calculate_anchors(self, sizes, aspect_ratios, angles): cell_anchors = [self.generate_cell_anchors(size, aspect_ratio, angle).float() for (size, aspect_ratio, angle) in zip(sizes, aspect_ratios, angles)] return BufferList(cell_anchors) def num_cell_anchors(self): return self.num_anchors def num_anchors(self): return [len(cell_anchors) for cell_anchors in self.cell_anchors] def _grid_anchors(self, grid_sizes): anchors = [] for (size, stride, base_anchors) in zip(grid_sizes, self.strides, self.cell_anchors): (shift_x, shift_y) = _create_grid_offsets(size, stride, self.offset, base_anchors.device) zeros = torch.zeros_like(shift_x) shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) anchors.append((shifts.view((- 1), 1, 5) + base_anchors.view(1, (- 1), 5)).reshape((- 1), 5)) return anchors def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2), angles=((- 90), (- 60), (- 30), 0, 30, 60, 90)): anchors = [] for size in sizes: area = (size ** 2.0) for aspect_ratio in aspect_ratios: w = math.sqrt((area / aspect_ratio)) h = (aspect_ratio * w) anchors.extend(([0, 0, w, h, a] for a in angles)) return torch.tensor(anchors) def forward(self, features): grid_sizes = [feature_map.shape[(- 2):] for feature_map in features] anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None): super(BasicBlock, self).__init__() assert (dcn is None), 'Not implemented yet.' assert (plugins is None), 'Not implemented yet.' (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def norm1(self): return getattr(self, self.norm1_name) def norm2(self): return getattr(self, self.norm2_name) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
def get_indexing_from_db(db_path: str, shuffle=True) -> Dict[(str, List[Dict[(str, Any)]])]: (table_column_properties, _, _) = get_all_db_info_path(db_path) all_tables_names = {t_c[0] for t_c in table_column_properties} table_name2indexes = {} for table_name in all_tables_names: column_names = [t_c[1] for t_c in table_column_properties if (t_c[0] == table_name)] selection_query = (((('select ' + ', '.join([('"%s"' % c) for c in column_names])) + ' from "') + table_name) + '";') retrieved_results = exec_db_path_(db_path, selection_query)[1] table_name2indexes[table_name] = [{name: e for (name, e) in zip(column_names, row)} for row in retrieved_results] if shuffle: random.shuffle(table_name2indexes[table_name]) return table_name2indexes
def _impl(array, axis, keepdims, mask_identity, highlevel, behavior, attrs): axis = regularize_axis(axis) with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array, allow_record=False, primitive_policy='error') reducer = ak._reducers.Sum() out = ak._do.reduce(layout, reducer, axis=axis, mask=mask_identity, keepdims=keepdims, behavior=ctx.behavior) return ctx.wrap(out, highlevel=highlevel, allow_other=True)
def resize_flow(flow, shape): scale = [(n / o) for (n, o) in zip(shape, flow.shape[1:])] scale_factor = np.array(scale, dtype=flow.dtype) for _ in shape: scale_factor = scale_factor[(..., np.newaxis)] rflow = (scale_factor * ndi.zoom(flow, ([1] + scale), order=0, mode='nearest', prefilter=False)) return rflow
def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False
def matmul(mat_x, mat_y): shape_x = static(mat_x.get_shape()) shape_y = static(mat_y.get_shape()) if static(((len(shape_x) == 1) and (len(shape_y) == 2))): return _matmul_helper(transpose(mat_y), mat_x) return _matmul_helper(mat_x, mat_y)
def register_Ns3PointerChecker_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')]) cls.add_method('GetPointeeTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) return
def test_classes(): assert (list(CityscapesDataset.CLASSES) == get_classes('cityscapes')) assert (list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes('pascal_voc')) assert (list(ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k')) with pytest.raises(ValueError): get_classes('unsupported')
class DataLoader(object): def parse_data_args(parser): parser.add_argument('--path', type=str, default='../dataset/', help='Input data dir.') parser.add_argument('--dataset', type=str, default='ml100k-1-5', help='Choose a dataset.') parser.add_argument('--sep', type=str, default='\t', help='sep of csv file.') parser.add_argument('--label', type=str, default='label', help='name of dataset label column.') return parser def __init__(self, path, dataset, label='label', load_data=True, sep='\t', seqs_sep=','): self.dataset = dataset self.path = os.path.join(path, dataset) self.train_file = os.path.join(self.path, (dataset + global_p.TRAIN_SUFFIX)) self.validation_file = os.path.join(self.path, (dataset + global_p.VALIDATION_SUFFIX)) self.test_file = os.path.join(self.path, (dataset + global_p.TEST_SUFFIX)) self.info_file = os.path.join(self.path, (dataset + global_p.INFO_SUFFIX)) self.user_file = os.path.join(self.path, (dataset + global_p.USER_SUFFIX)) self.item_file = os.path.join(self.path, (dataset + global_p.ITEM_SUFFIX)) self.train_his_file = os.path.join(self.path, (dataset + global_p.TRAIN_GROUP_SUFFIX)) self.vt_his_file = os.path.join(self.path, (dataset + global_p.VT_GROUP_SUFFIX)) (self.sep, self.seqs_sep) = (sep, seqs_sep) self.load_data = load_data self.label = label (self.train_df, self.validation_df, self.test_df) = (None, None, None) self._load_user_item() self._load_data() self._load_his() self._load_info() def _load_user_item(self): (self.user_df, self.item_df) = (None, None) if (os.path.exists(self.user_file) and self.load_data): logging.info('load user csv...') self.user_df = pd.read_csv(self.user_file, sep='\t') if (os.path.exists(self.item_file) and self.load_data): logging.info('load item csv...') self.item_df = pd.read_csv(self.item_file, sep='\t') def _load_data(self): if (os.path.exists(self.train_file) and self.load_data): logging.info('load train csv...') self.train_df = pd.read_csv(self.train_file, sep=self.sep) logging.info(('size of train: %d' % len(self.train_df))) if (os.path.exists(self.validation_file) and self.load_data): logging.info('load validation csv...') self.validation_df = pd.read_csv(self.validation_file, sep=self.sep) logging.info(('size of validation: %d' % len(self.validation_df))) if (os.path.exists(self.test_file) and self.load_data): logging.info('load test csv...') self.test_df = pd.read_csv(self.test_file, sep=self.sep) logging.info(('size of test: %d' % len(self.test_df))) def _load_info(self): def json_type(o): if isinstance(o, np.int64): return int(o) raise TypeError (max_dict, min_dict) = ({}, {}) if (not os.path.exists(self.info_file)): for df in [self.train_df, self.validation_df, self.test_df, self.user_df, self.item_df]: if (df is None): continue for c in df.columns: if (c not in max_dict): max_dict[c] = df[c].max() else: max_dict[c] = max(df[c].max(), max_dict[c]) if (c not in min_dict): min_dict[c] = df[c].min() else: min_dict[c] = min(df[c].min(), min_dict[c]) max_json = json.dumps(max_dict, default=json_type) min_json = json.dumps(min_dict, default=json_type) out_f = open(self.info_file, 'w') out_f.write(((max_json + os.linesep) + min_json)) else: lines = open(self.info_file, 'r').readlines() max_dict = json.loads(lines[0]) min_dict = json.loads(lines[1]) self.column_max = max_dict self.column_min = min_dict self.label_max = self.column_max[self.label] self.label_min = self.column_min[self.label] logging.info(('label: %d-%d' % (self.label_min, self.label_max))) (self.user_num, self.item_num) = (0, 0) if ('uid' in self.column_max): self.user_num = (self.column_max['uid'] + 1) if ('iid' in self.column_max): self.item_num = (self.column_max['iid'] + 1) logging.info(('# of users: %d' % self.user_num)) logging.info(('# of items: %d' % self.item_num)) self.user_features = [f for f in self.column_max.keys() if f.startswith('u_')] logging.info(('# of user features: %d' % len(self.user_features))) self.item_features = [f for f in self.column_max.keys() if f.startswith('i_')] logging.info(('# of item features: %d' % len(self.item_features))) self.context_features = [f for f in self.column_max.keys() if f.startswith('c_')] logging.info(('# of context features: %d' % len(self.context_features))) self.features = ((self.context_features + self.user_features) + self.item_features) logging.info(('# of features: %d' % len(self.features))) def _load_his(self): if (not self.load_data): return if (not os.path.exists(self.train_his_file)): logging.info('building train history csv...') train_his_df = group_user_interactions_df(self.train_df, label=self.label, seq_sep=self.seqs_sep) train_his_df.to_csv(self.train_his_file, index=False, sep=self.sep) if (not os.path.exists(self.vt_his_file)): logging.info('building vt history csv...') vt_df = pd.concat([self.validation_df, self.test_df]) vt_his_df = group_user_interactions_df(vt_df, label=self.label, seq_sep=self.seqs_sep) vt_his_df.to_csv(self.vt_his_file, index=False, sep=self.sep) def build_his(his_df, seqs_sep): uids = his_df['uid'].tolist() iids = his_df['iids'].str.split(seqs_sep).values iids = [[int(j) for j in i] for i in iids] user_his = dict(zip(uids, iids)) return user_his (self.train_his_df, self.train_user_his) = (None, None) (self.vt_his_df, self.vt_user_his) = (None, None) if self.load_data: logging.info('load history csv...') self.train_his_df = pd.read_csv(self.train_his_file, sep=self.sep) self.train_user_his = build_his(self.train_his_df, self.seqs_sep) self.vt_his_df = pd.read_csv(self.vt_his_file, sep=self.sep) self.vt_user_his = build_his(self.vt_his_df, self.seqs_sep) def feature_info(self, include_id=True, include_item_features=True, include_user_features=True): features = [] if include_id: features.extend(['uid', 'iid']) if include_user_features: features.extend(self.user_features) if include_item_features: features.extend(self.item_features) feature_dims = 0 (feature_min, feature_max) = ([], []) for f in features: feature_min.append(feature_dims) feature_dims += int((self.column_max[f] + 1)) feature_max.append((feature_dims - 1)) logging.info(('Model # of features %d' % len(features))) logging.info(('Model # of feature dims %d' % feature_dims)) return (features, feature_dims, feature_min, feature_max) def append_his(self, last_n=10, supply=True, neg=False, neg_column=True): (his_dict, neg_dict) = ({}, {}) for df in [self.train_df, self.validation_df, self.test_df]: if (df is None): continue (history, neg_history) = ([], []) (uids, iids, labels) = (df['uid'].tolist(), df['iid'].tolist(), df[self.label].tolist()) for (i, uid) in enumerate(uids): (iid, label) = (str(iids[i]), labels[i]) if (uid not in his_dict): his_dict[uid] = [] if (uid not in neg_dict): neg_dict[uid] = [] tmp_his = (his_dict[uid] if (last_n <= 0) else his_dict[uid][(- last_n):]) tmp_neg = (neg_dict[uid] if (last_n <= 0) else neg_dict[uid][(- last_n):]) if supply: tmp_his = (tmp_his + (['-1'] * last_n)) tmp_neg = (tmp_neg + (['-1'] * last_n)) history.append(','.join(tmp_his[:last_n])) neg_history.append(','.join(tmp_neg[:last_n])) if ((label <= 0) and (not neg_column) and neg): his_dict[uid].append(('~' + iid)) elif ((label <= 0) and neg_column): neg_dict[uid].append(iid) elif (label > 0): his_dict[uid].append(iid) df[global_p.C_HISTORY] = history if (neg and neg_column): df[global_p.C_HISTORY_NEG] = neg_history def drop_neg(self): logging.info('Drop Neg Samples...') self.train_df = self.train_df[(self.train_df[self.label] > 0)].reset_index(drop=True) self.validation_df = self.validation_df[(self.validation_df[self.label] > 0)].reset_index(drop=True) self.test_df = self.test_df[(self.test_df[self.label] > 0)].reset_index(drop=True) self.train_df[self.label] = 1 self.validation_df[self.label] = 1 self.test_df[self.label] = 1 logging.info(('size of train: %d' % len(self.train_df))) logging.info(('size of validation: %d' % len(self.validation_df))) logging.info(('size of test: %d' % len(self.test_df)))
class WeightSetting(object): def __init__(self, solver_type='ECOS'): self._solver_type = solver_type def obtain_weights(self, power_signals_d): try: from solardatatools.clear_day_detection import find_clear_days except ImportError: print('Weights not set!') print('Please make sure you have solar-data-tools installed') weights = np.ones(power_signals_d.shape[1]) else: weights = find_clear_days(power_signals_d, boolean_out=False) return weights
def save_info(path, info): for im_id in sorted(info.keys()): im_info = info[im_id] if ('cam_K' in im_info.keys()): im_info['cam_K'] = im_info['cam_K'].flatten().tolist() if ('cam_R_w2c' in im_info.keys()): im_info['cam_R_w2c'] = im_info['cam_R_w2c'].flatten().tolist() if ('cam_t_w2c' in im_info.keys()): im_info['cam_t_w2c'] = im_info['cam_t_w2c'].flatten().tolist() with open(path, 'w') as f: yaml.dump(info, f, Dumper=yaml.CDumper, width=10000)
('sdmetrics.visualization.get_column_pair_plot') def test_get_column_pair_plot_with_continous_data(mock_get_plot): columns = ['amount', 'date'] real_data = pd.DataFrame({'amount': [1, 2, 3], 'date': ['2021-01-01', '2022-01-01', '2023-01-01']}) synthetic_data = pd.DataFrame({'amount': [1.0, 2.0, 3.0], 'date': ['2021-01-01', '2022-01-01', '2023-01-01']}) metadata = SingleTableMetadata() metadata.add_column('amount', sdtype='numerical') metadata.add_column('date', sdtype='datetime') plot = get_column_pair_plot(real_data, synthetic_data, metadata, columns) expected_real_data = pd.DataFrame({'amount': [1, 2, 3], 'date': pd.to_datetime(['2021-01-01', '2022-01-01', '2023-01-01'])}) expected_synth_data = pd.DataFrame({'amount': [1.0, 2.0, 3.0], 'date': pd.to_datetime(['2021-01-01', '2022-01-01', '2023-01-01'])}) pd.testing.assert_frame_equal(mock_get_plot.call_args[0][0], expected_real_data) pd.testing.assert_frame_equal(mock_get_plot.call_args[0][1], expected_synth_data) assert (mock_get_plot.call_args[0][2] == columns) assert (mock_get_plot.call_args[0][3] == 'scatter') assert (plot == mock_get_plot.return_value)
def _seg_64(): return [(120420, 'M', u'o'), (120421, 'M', u'p'), (120422, 'M', u'q'), (120423, 'M', u'r'), (120424, 'M', u's'), (120425, 'M', u't'), (120426, 'M', u'u'), (120427, 'M', u'v'), (120428, 'M', u'w'), (120429, 'M', u'x'), (120430, 'M', u'y'), (120431, 'M', u'z'), (120432, 'M', u'a'), (120433, 'M', u'b'), (120434, 'M', u'c'), (120435, 'M', u'd'), (120436, 'M', u'e'), (120437, 'M', u'f'), (120438, 'M', u'g'), (120439, 'M', u'h'), (120440, 'M', u'i'), (120441, 'M', u'j'), (120442, 'M', u'k'), (120443, 'M', u'l'), (120444, 'M', u'm'), (120445, 'M', u'n'), (120446, 'M', u'o'), (120447, 'M', u'p'), (120448, 'M', u'q'), (120449, 'M', u'r'), (120450, 'M', u's'), (120451, 'M', u't'), (120452, 'M', u'u'), (120453, 'M', u'v'), (120454, 'M', u'w'), (120455, 'M', u'x'), (120456, 'M', u'y'), (120457, 'M', u'z'), (120458, 'M', u'a'), (120459, 'M', u'b'), (120460, 'M', u'c'), (120461, 'M', u'd'), (120462, 'M', u'e'), (120463, 'M', u'f'), (120464, 'M', u'g'), (120465, 'M', u'h'), (120466, 'M', u'i'), (120467, 'M', u'j'), (120468, 'M', u'k'), (120469, 'M', u'l'), (120470, 'M', u'm'), (120471, 'M', u'n'), (120472, 'M', u'o'), (120473, 'M', u'p'), (120474, 'M', u'q'), (120475, 'M', u'r'), (120476, 'M', u's'), (120477, 'M', u't'), (120478, 'M', u'u'), (120479, 'M', u'v'), (120480, 'M', u'w'), (120481, 'M', u'x'), (120482, 'M', u'y'), (120483, 'M', u'z'), (120484, 'M', u''), (120485, 'M', u''), (120486, 'X'), (120488, 'M', u''), (120489, 'M', u''), (120490, 'M', u''), (120491, 'M', u''), (120492, 'M', u''), (120493, 'M', u''), (120494, 'M', u''), (120495, 'M', u''), (120496, 'M', u''), (120497, 'M', u''), (120498, 'M', u''), (120499, 'M', u''), (120500, 'M', u''), (120501, 'M', u''), (120502, 'M', u''), (120503, 'M', u''), (120504, 'M', u''), (120505, 'M', u''), (120506, 'M', u''), (120507, 'M', u''), (120508, 'M', u''), (120509, 'M', u''), (120510, 'M', u''), (120511, 'M', u''), (120512, 'M', u''), (120513, 'M', u''), (120514, 'M', u''), (120515, 'M', u''), (120516, 'M', u''), (120517, 'M', u''), (120518, 'M', u''), (120519, 'M', u''), (120520, 'M', u'')]
def mk_lean_code_def_name(fn_name: str, namespaces: List[ScopedName]): prefix = 'code_' return get_name_in_open_scopes(ScopedName.from_string(fn_name), namespaces, prefix)
def cat(g, *tensors, **kwargs): dim = kwargs.pop('dim') assert (not kwargs) return g.op('Concat', *tensors, axis_i=dim)
def test_setting_default_requests(): test_cases = dict() class ExplicitRequest(BaseEstimator): __metadata_request__fit = {'prop': None} def fit(self, X, y, **kwargs): return self test_cases[ExplicitRequest] = {'prop': None} class ExplicitRequestOverwrite(BaseEstimator): __metadata_request__fit = {'prop': True} def fit(self, X, y, prop=None, **kwargs): return self test_cases[ExplicitRequestOverwrite] = {'prop': True} class ImplicitRequest(BaseEstimator): def fit(self, X, y, prop=None, **kwargs): return self test_cases[ImplicitRequest] = {'prop': None} class ImplicitRequestRemoval(BaseEstimator): __metadata_request__fit = {'prop': metadata_routing.UNUSED} def fit(self, X, y, prop=None, **kwargs): return self test_cases[ImplicitRequestRemoval] = {} for (Klass, requests) in test_cases.items(): assert (get_routing_for_object(Klass()).fit.requests == requests) assert_request_is_empty(Klass().get_metadata_routing(), exclude='fit') Klass().fit(None, None)
class Transform(object): def __init__(self): self.conf = utils.get_default_conf() self.cn2an = Cn2An().cn2an self.an2cn = An2Cn().an2cn def transform(self, inputs, mode='cn2an'): if (mode == 'cn2an'): pattern = (('[' + ''.join((self.conf['number_low'] + list(set(self.conf['unit_low']))))) + ']+') output = re.sub(pattern, (lambda x: str(self.cn2an(x.group()))), inputs) elif (mode == 'an2cn'): pattern = '[0-9]+' output = re.sub(pattern, (lambda x: self.an2cn(x.group())), inputs) else: raise ValueError(f"error mode: {mode}, only support 'cn2an' and 'an2cn'!") return output
def int64_feature(values): if (not isinstance(values, (tuple, list))): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def RefreshRegisteredOperators(trigger_lazy=True): if trigger_lazy: TriggerLazyImport() global _REGISTERED_OPERATORS _REGISTERED_OPERATORS = _GetRegisteredOperators()
def test_field_statement_eq_clone(default_test_case, field_mock): ref = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(int)) statement = stmt.FieldStatement(default_test_case, field_mock, ref) memo = {ref: ref} clone = statement.clone(default_test_case, memo) memo[statement.ret_val] = clone.ret_val assert statement.structural_eq(clone, memo)
class Softplus_VGG(nn.Module): def __init__(self, vgg_name): super(Softplus_VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 10) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if (x == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.Softplus(x)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def load_img_future_de_rain(filepath, nFrames, img_id): tt = int((nFrames / 2)) img_id = (img_id + tt) (target, input, neigbor) = (None, None, None) if (filepath.split('/')[3].split('-')[0] == 'SPAC'): targetPath = (((os.path.dirname(filepath) + '/') + filepath.split('/')[5].split('_')[0]) + '_GT') target = Image.open((((targetPath + '/') + str(img_id).zfill(5)) + '.jpg')).convert('RGB') input = Image.open((((filepath + '/') + str(img_id).zfill(5)) + '.jpg')).convert('RGB') neigbor = [] seq = [x for x in range((img_id - tt), ((img_id + 1) + tt)) if (x != img_id)] for j in seq: neigbor.append(Image.open((((filepath + '/') + str(j).zfill(5)) + '.jpg')).convert('RGB')) elif (filepath.split('/')[3].split('_')[0] == 'frames'): target = Image.open(((((filepath + '/') + 'gtc-') + str(img_id)) + '.jpg')).convert('RGB') input = Image.open(((((filepath + '/') + 'rfc-') + str(img_id)) + '.jpg')).convert('RGB') neigbor = [] seq = [x for x in range((img_id - tt), ((img_id + 1) + tt)) if (x != img_id)] for j in seq: neigbor.append(Image.open(((((filepath + '/') + 'rfc-') + str(j)) + '.jpg')).convert('RGB')) if (target.size == (889, 500)): target = target.crop((0, 0, 888, 496)) input = input.crop((0, 0, 888, 496)) for j in range(len(neigbor)): neigbor[j] = neigbor[j].crop((0, 0, 888, 496)) elif (filepath.split('/')[3] == 'rain_real'): target = None input = Image.open((((filepath + '/') + str(img_id).zfill(5)) + '.jpg')).convert('RGB') input = input.resize((int((1280 * 0.8)), int((720 * 0.8))), Image.ANTIALIAS) neigbor = [] seq = [x for x in range((img_id - tt), ((img_id + 1) + tt)) if (x != img_id)] for j in seq: tmp_nei = Image.open((((filepath + '/') + str(j).zfill(5)) + '.jpg')).convert('RGB') tmp_nei = tmp_nei.resize((int((1280 * 0.8)), int((720 * 0.8))), Image.ANTIALIAS) neigbor.append(tmp_nei) return (target, input, neigbor)
def data_prep(data_folder, hparams): train_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=(data_folder / '../annotation/ASR_train.json'), replacements={'data_root': data_folder}) valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=(data_folder / '../annotation/ASR_dev.json'), replacements={'data_root': data_folder}) datasets = [train_data, valid_data] label_encoder = sb.dataio.encoder.TextEncoder() label_encoder.expect_len(hparams['num_labels']) .data_pipeline.takes('wav') .data_pipeline.provides('sig') def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) .data_pipeline.takes('phn') .data_pipeline.provides('phn_list', 'phn_encoded_bos', 'phn_encoded_eos') def text_pipeline(phn): phn_list = phn.strip().split() (yield phn_list) phn_encoded = label_encoder.encode_sequence_torch(phn_list) phn_encoded_bos = label_encoder.prepend_bos_index(phn_encoded).long() (yield phn_encoded_bos) phn_encoded_eos = label_encoder.append_eos_index(phn_encoded).long() (yield phn_encoded_eos) sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) label_encoder.insert_bos_eos(bos_index=hparams['bos_index']) label_encoder.update_from_didataset(train_data, output_key='phn_list') label_encoder.update_from_didataset(valid_data, output_key='phn_list') sb.dataio.dataset.set_output_keys(datasets, ['id', 'sig', 'phn_encoded_eos', 'phn_encoded_bos']) return (train_data, valid_data)
def test_method_name_and_count() -> None: current_file: str = os.path.basename(__file__) test_files: List[str] = get_python_files(CHALLENGES_DIR, current_file) for test_file in test_files: module = load_module_from_file(test_file) functions_list = get_test_functions(module) assert_single_test_function(functions_list, test_file)
def signed_log_add(x, y, sign_x, sign_y): (a, b) = (x, y) (sign_a, sign_b) = (sign_x, sign_y) if (y > x): (a, b) = (y, x) (sign_a, sign_b) = (sign_y, sign_x) if (sign_a != sign_b): val = log_minus(a, b) else: val = log_add(a, b) return (sign_a, val)
def test_run_phmmer(): input = ['MTFKLPDLPFDAGALEPYISALTMKTHHGKHHAAYIKNMNAILAERADAQTSLEAVVSLAAREANKKLFNNAAQAWNHGFFWQSLSADAQNGPSGDLRAAIMNSFGSLEAFNDEAKAKGVGHFASGWLWLVSDESGALSLCDLHDADTPITDPSLTPLLVCDLWEHAYYIDYANERPRFVDAFLTKLANWRFAQAQYQAARSGSGA', 'FAVSATKIHTKATLPALDYAYEALEPILSSHLLHLHHDKHHQTYVNNLNAAEEKLKDPSLDLHTQIALQSAIKFNGGGHVNHSIYWKNLAPKSAGGGAFNAQAPLGQAIVKKWGSFEAFKKNFNTQLAAIQGSGWGWLIKDADGSLRITTTMNQDTILDATPVITIDAWEHAYYPQYENRKAEYYENIWQIINWKEAEAR', 'MKFELPALPYPVNALEPTMSARTIEFHWGKHEAAYINNLNGLIEGTPLENDTLEEIVRKSDGPIYNNAAQAWNHIFFFFQLAPNGKKEPGGALAEAIDRHFGSFAAFKEAFAKAGATLFGSGWAWLSVKPDGQLEITQGPNAHNPLKNGAVPLLTADVWEHAYYLDYQNRRPDFLSALWNLVDWKVIEKR', 'MTHALPELGYDYDALEPFIDAKTMEIHHTKHHQTYVDKLNAALDGHDDLAKLGVNELISDLGKVPESIRPAVRNHGGGHSNHSFFWPLLKKNVALGGAVQEAIDRDFGSFDSFKTEFSNKAALLFGSGWTWVVADQGKLSIVTTPNQDSPVSDGKTPVLGLDVWEHAYYLKYQNRRPDYINAFFDIINWDKVNG'] query = 'MSFELPALPYAKDALAPHISAETIEYHYGKHHQTYVTNLNNLIKGTAFEGKSLEEIIRSSEGGVFNNAAQVWNHTFYWNCLAPNAGGEPTGKVAEAIAASFGSFADFKAQFTDAAIKNFGSGWTWLVKNSDGKLAIVSTSNAGTPLTTDATPLLTVDVWEHAYYIDYRNARPGYLEHFWALVNWEFVAKNL' with tempfile.TemporaryDirectory() as tmp: db_file = (tmp + 'tmp.fasta') utils.write_sequential_fasta(db_file, input) hits = utils.run_phmmer(query, db_file) assert (hits == ['2', '0', '3', '1'])
class SkeletonUnpool(nn.Module): def __init__(self, pooling_list, output_joints_num): super(SkeletonUnpool, self).__init__() self.pooling_list = pooling_list self.input_joints_num = len(pooling_list) self.output_joints_num = output_joints_num self.weight = torch.zeros(self.output_joints_num, self.input_joints_num) for (i, affecting_joints) in self.pooling_list.items(): for j in affecting_joints: self.weight[(j, i)] = 1 self.weight = F.normalize(self.weight, p=1) self.weight = nn.Parameter(self.weight.to('cuda'), requires_grad=False) def forward(self, input: torch.Tensor): return torch.matmul(self.weight, input)
def multiprocess(stream, fun, queue_size=10, worker_count=5): in_queue = multiprocessing.JoinableQueue(maxsize=queue_size) out_queue = multiprocessing.JoinableQueue(maxsize=queue_size) end_marker = object() def producer(): for item in stream: in_queue.put(item) for _ in range(worker_count): in_queue.put(end_marker) in_thread = multiprocessing.Process(target=producer) in_thread.daemon = True in_thread.start() def consumer(): while True: item = in_queue.get() in_queue.task_done() if (item is end_marker): out_queue.put(end_marker) break else: out_queue.put(fun(item)) workers = [multiprocessing.Process(target=consumer) for _ in range(worker_count)] for w in workers: w.daemon = True w.start() end_count = 0 while (end_count < worker_count): item = out_queue.get() out_queue.task_done() if (item is end_marker): end_count += 1 else: (yield item)
def main(): print('Begin Proposal Generation Module') args = parse_args() cfg = mmcv.Config.fromfile(args.config) tem_results_dir = cfg.tem_results_dir pgm_proposals_dir = cfg.pgm_proposals_dir pgm_features_dir = cfg.pgm_features_dir if (args.mode == 'test'): generate_proposals(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir, **cfg.pgm_proposals_cfg) print('\nFinish proposal generation') generate_features(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir, pgm_features_dir, **cfg.pgm_features_test_cfg) print('\nFinish feature generation') elif (args.mode == 'train'): generate_proposals(cfg.ann_file_train, tem_results_dir, pgm_proposals_dir, **cfg.pgm_proposals_cfg) print('\nFinish proposal generation') generate_features(cfg.ann_file_train, tem_results_dir, pgm_proposals_dir, pgm_features_dir, **cfg.pgm_features_train_cfg) print('\nFinish feature generation') print('Finish Proposal Generation Module')
class GmailOrganizeEmail(VirtualFunctionTool): name = 'GmailOrganizeEmail' summary = 'Move an email to a specific folder or update its labels.' parameters: List[ArgParameter] = [{'name': 'email_id', 'type': 'string', 'description': 'The unique identifier of the email.', 'required': True}, {'name': 'folder', 'type': 'string', 'description': "The folder to move the email to, such as 'inbox', 'sent', 'drafts', or 'spam'.", 'required': False}, {'name': 'labels', 'type': 'array', 'description': "A list of updated labels of the email, e.g. 'important'.", 'required': False}] returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'A boolean indicating whether the operation was successful.'}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'email_id' is not found."}, {'name': 'InvalidRequestException', 'description': "The 'folder' is invalid."}]
def _number_field_elements_from_algebraics_list_of_lists_of_lists(listss, **kwds): from sage.rings.qqbar import number_field_elements_from_algebraics numbers = [] for lists in listss: for list in lists: numbers.extend(list) (K, K_numbers, hom) = number_field_elements_from_algebraics(numbers, **kwds) g = iter(K_numbers) return (K, [[[next(g) for _ in list] for list in lists] for lists in listss], hom)
class TMMNetModeNetI(object): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): _snap.TMMNetModeNetI_swiginit(self, _snap.new_TMMNetModeNetI(*args)) def Next(self): return _snap.TMMNetModeNetI_Next(self) def __lt__(self, NodeI): return _snap.TMMNetModeNetI___lt__(self, NodeI) def __eq__(self, NodeI): return _snap.TMMNetModeNetI___eq__(self, NodeI) def GetModeId(self): return _snap.TMMNetModeNetI_GetModeId(self) def GetModeNet(self): return _snap.TMMNetModeNetI_GetModeNet(self) __swig_destroy__ = _snap.delete_TMMNetModeNetI
def make_batch_roberta(sessions): (batch_input, batch_labels, batch_speaker_tokens) = ([], [], []) for session in sessions: data = session[0] label_list = session[1] (context_speaker, context, emotion, sentiment) = data now_speaker = context_speaker[(- 1)] speaker_utt_list = [] inputString = '' for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)): inputString += (('<s' + str((speaker + 1))) + '> ') inputString += (utt + ' ') if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)): speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer)) concat_string = inputString.strip() batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer)) if (len(label_list) > 3): label_ind = label_list.index(emotion) else: label_ind = label_list.index(sentiment) batch_labels.append(label_ind) batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer)) batch_input_tokens = padding(batch_input, roberta_tokenizer) batch_labels = torch.tensor(batch_labels) return (batch_input_tokens, batch_labels, batch_speaker_tokens)
def test_image_to_text_single(): class MockImageExplanation(): def __init__(self, data, values, output_names): self.data = data self.values = values self.output_names = output_names test_image_height = 500 test_image_width = 500 test_word_length = 4 test_data = (np.ones((test_image_height, test_image_width, 3)) * 50) test_values = np.random.rand(test_image_height, test_image_width, 3, test_word_length) test_output_names = np.array([str(i) for i in range(test_word_length)]) shap_values_test = MockImageExplanation(test_data, test_values, test_output_names) shap.plots.image_to_text(shap_values_test)
def load_data(prompt_file, continuation_file, unigram_file): print('Reading lines...') prompts = [] prompt_f = open(prompt_file, 'r') prompt_lines = prompt_f.readlines() for prompt in prompt_lines: prompts.append(prompt.strip('\n').strip('\ufeff')) continuations = [] f = open(continuation_file, 'r') lines = f.readlines() for cs in lines: conts = cs.strip('\n').strip('\ufeff').split(' <CAND_SEP> ') continuations.append(conts) unigram_dict = {} f2 = open(unigram_file, 'r') uni_lines = f2.readlines() for line in uni_lines: elements = line.split('\t') unigram_dict[elements[0].strip()] = int(elements[1].strip()) total_freq = 0 for (k, v) in unigram_dict.items(): total_freq += v print('Total_freq: ', total_freq) for (k, v) in unigram_dict.items(): unigram_dict[k] = (v / (total_freq + 1)) counter = 0 for (k, v) in unigram_dict.items(): if (counter < 10): print(k, ' | ', v) counter += 1 unknown_value = (0.0001 / (total_freq + 1)) print('Unknown value: ', unknown_value) assert (len(prompts) == len(continuations)) logger.info('Loaded: %d', len(prompts)) return (prompts, continuations, unigram_dict, unknown_value)
_function_dispatch(_rec_drop_fields_dispatcher) def rec_drop_fields(base, drop_names): return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def homchain(complex=None, **kwds): deprecation(33777, 'the CHomP interface is deprecated') from sage.homology.chain_complex import ChainComplex_class help = kwds.get('help', False) if help: return CHomP().help('homchain') if isinstance(complex, ChainComplex_class): return CHomP()('homchain', complex, **kwds) else: raise TypeError('Complex is not a chain complex.')
def test_case157(): url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert') headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata156), headers=headers) print(r.content) print(r.status_code) assert (r.status_code == 204)
def create_permutation_instruction(item=None, rank_start=0, rank_end=100, model_name='gpt-3.5-turbo'): query = item['query'] num = len(item['hits'][rank_start:rank_end]) max_length = 300 while True: messages = get_prefix_prompt(query, num) rank = 0 for hit in item['hits'][rank_start:rank_end]: rank += 1 content = hit['content'] content = content.replace('Title: Content: ', '') content = content.strip() content = ' '.join(content.split()[:int(max_length)]) messages.append({'role': 'user', 'content': f'[{rank}] {content}'}) messages.append({'role': 'assistant', 'content': f'Received passage [{rank}].'}) messages.append({'role': 'user', 'content': get_post_prompt(query, num)}) if (num_tokens_from_messages(messages, model_name) <= (max_tokens(model_name) - 200)): break else: max_length -= 1 return messages
class MinimizerWrapper(object): def __init__(self, minimizer, func=None, **kwargs): self.minimizer = minimizer self.func = func self.kwargs = kwargs def __call__(self, x0): if (self.func is None): return self.minimizer(x0, **self.kwargs) else: return self.minimizer(self.func, x0, **self.kwargs)
def get_list_of_highlevel_actions(traj_data, test=False, test_dict=None, args_nonsliced=False, appended=False): if (not test): (language_goal, task_type, mrecep_target, obj_target, parent_target, sliced) = get_arguments(traj_data) if test: r_idx = traj_data['repeat_idx'] instruction = traj_data['turk_annotations']['anns'][r_idx]['task_desc'] instruction = instruction.lower() instruction = ''.join((ch for ch in instruction if (ch not in exclude))) (language_goal, task_type, mrecep_target, obj_target, parent_target, sliced) = get_arguments_test(test_dict, instruction) if (parent_target == 'Sink'): parent_target = 'SinkBasin' if (parent_target == 'Bathtub'): parent_target = 'BathtubBasin' if args_nonsliced: if (sliced == 1): obj_target = (obj_target + 'Sliced') categories_in_inst = [] list_of_highlevel_actions = [] second_object = [] caution_pointers = [] if (sliced == 1): list_of_highlevel_actions.append(('Knife', 'PickupObject')) list_of_highlevel_actions.append((obj_target, 'SliceObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions.append(('SinkBasin', 'PutObject')) categories_in_inst.append(obj_target) if sliced: obj_target = (obj_target + 'Sliced') if (task_type == 'pick_cool_then_place_in_recep'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target('Fridge', 'PutObject', list_of_highlevel_actions) list_of_highlevel_actions.append(('Fridge', 'OpenObject')) list_of_highlevel_actions.append((obj_target, 'PickupObject')) list_of_highlevel_actions.append(('Fridge', 'CloseObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) categories_in_inst.append(obj_target) categories_in_inst.append('Fridge') categories_in_inst.append(parent_target) elif (task_type == 'pick_and_place_with_movable_recep'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(mrecep_target, 'PutObject', list_of_highlevel_actions) list_of_highlevel_actions.append((mrecep_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) categories_in_inst.append(obj_target) categories_in_inst.append(mrecep_target) categories_in_inst.append(parent_target) elif (task_type == 'pick_and_place_simple'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) categories_in_inst.append(obj_target) categories_in_inst.append(parent_target) elif (task_type == 'pick_heat_then_place_in_recep'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target('Microwave', 'PutObject', list_of_highlevel_actions) list_of_highlevel_actions.append(('Microwave', 'ToggleObjectOn')) list_of_highlevel_actions.append(('Microwave', 'ToggleObjectOff')) list_of_highlevel_actions.append(('Microwave', 'OpenObject')) list_of_highlevel_actions.append((obj_target, 'PickupObject')) list_of_highlevel_actions.append(('Microwave', 'CloseObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) categories_in_inst.append(obj_target) categories_in_inst.append('Microwave') categories_in_inst.append(parent_target) elif (task_type == 'pick_two_obj_and_place'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) if (parent_target in constants.OPENABLE_CLASS_LIST): second_object = ([False] * 4) else: second_object = ([False] * 2) if sliced: second_object = (second_object + ([False] * 3)) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions.append((obj_target, 'PickupObject')) second_object.append(True) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) second_object.append(False) categories_in_inst.append(obj_target) categories_in_inst.append(parent_target) elif (task_type == 'look_at_obj_in_light'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) toggle_target = 'FloorLamp' list_of_highlevel_actions.append((toggle_target, 'ToggleObjectOn')) categories_in_inst.append(obj_target) categories_in_inst.append(toggle_target) elif (task_type == 'pick_clean_then_place_in_recep'): list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions.append(('SinkBasin', 'PutObject')) list_of_highlevel_actions.append(('Faucet', 'ToggleObjectOn')) list_of_highlevel_actions.append(('Faucet', 'ToggleObjectOff')) list_of_highlevel_actions.append((obj_target, 'PickupObject')) caution_pointers.append(len(list_of_highlevel_actions)) list_of_highlevel_actions = add_target(parent_target, 'PutObject', list_of_highlevel_actions) categories_in_inst.append(obj_target) categories_in_inst.append('SinkBasin') categories_in_inst.append('Faucet') categories_in_inst.append(parent_target) else: raise Exception('Task type not one of 0, 1, 2, 3, 4, 5, 6!') if (sliced == 1): if (not (parent_target == 'SinkBasin')): categories_in_inst.append('SinkBasin') print('instruction goal is ', language_goal) return (list_of_highlevel_actions, categories_in_inst, second_object, caution_pointers)
class DATASET_MODES(): train = 'train' val = 'val' test = 'test' trainval = 'trainval'
.parametrize('likelihood', LIKELIHOODS) def test_separable_likelihood_vectorization(likelihood): assert (not likelihood.isotropic) N = np.prod(likelihood.size) az = np.linspace(1, 2, N) az = az.reshape(likelihood.size) bz = np.linspace((- 2), 2, N) bz = bz.reshape(likelihood.size) (rz, vz) = likelihood.compute_backward_posterior(az, bz, likelihood.y) assert (rz.shape == bz.shape) assert (vz.shape == bz.shape) rz_ = np.array([likelihood.scalar_backward_mean(a, b, y) for (a, b, y) in zip(az, bz, likelihood.y)]) rz_ = rz_.reshape(likelihood.size) assert_allclose(rz, rz_) vz_ = np.array([likelihood.scalar_backward_variance(a, b, y) for (a, b, y) in zip(az, bz, likelihood.y)]) vz_ = vz_.reshape(likelihood.size) assert_allclose(vz, vz_) A = likelihood.compute_log_partition(az, bz, likelihood.y) A_ = np.mean([likelihood.scalar_log_partition(a, b, y) for (a, b, y) in zip(az, bz, likelihood.y)]) assert (A == A_)
def random_hermitian_matrix(l, *batches, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) A = (A + A.transpose((- 2), (- 1)).conj()).div_(2) return A
class AttackNet(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(NUM_CLASSES, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 64) self.softmax = nn.Linear(64, 1) def forward(self, x, **kwargs): del kwargs x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.sigmoid(self.softmax(x)) return x
class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() inner_dim = (dim_head * heads) context_dim = default(context_dim, query_dim) self.scale = (dim_head ** (- 0.5)) self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) def forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) (q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h)), (q, k, v)) sim = (einsum('b i d, b j d -> b i j', q, k) * self.scale) if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = (- torch.finfo(sim.dtype).max) mask = repeat(mask, 'b j -> (b h) () j', h=h) sim.masked_fill_((~ mask), max_neg_value) attn = sim.softmax(dim=(- 1)) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) n d -> b n (h d)', h=h) return self.to_out(out)
def r_pow_scalar_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, val=1): dy = grad_inputs[0] x0 = inputs[0] dx0 = ((dy * (val ** x0)) * np.log(val)) return dx0
class BLUR(BuiltinFilter): name = 'Blur' filterargs = ((5, 5), 16, 0, (1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1))
class ParallelDroplessMLP(moe.ParallelMLP): def __init__(self, args: Arguments): super(ParallelDroplessMLP, self).__init__(args) self.hidden_size = args.hidden_size self.ffn_hidden_size = mpu.features_per_rank(args) self.blocking = 128 self.mlp = dmlp_registry.get(args) max_column_index = ((self.ffn_hidden_size * self.num_experts) // self.blocking) self.transpose_sort_end_bit = max(int(np.ceil(np.log2(max_column_index))), 1) def sparse_transpose(self, size, row_indices, column_indices, offsets): block_columns = (size[1] // self.blocking) (_, gather_indices) = ops.sort(column_indices.int(), self.transpose_sort_end_bit) column_indices_t = row_indices.gather(0, gather_indices.long()) block_offsets_t = gather_indices.int() zero = torch.zeros((1,), dtype=torch.int32, device=row_indices.device) nnz_per_column = ops.histogram(column_indices, block_columns) nnz_per_column = ops.inclusive_cumsum(nnz_per_column, 0) offsets_t = torch.cat([zero, nnz_per_column]) return (column_indices_t, offsets_t, block_offsets_t) def topology(self, x, padded_bins): (padded_tokens, _) = x.size() assert ((padded_tokens % self.blocking) == 0) assert ((self.ffn_hidden_size % self.blocking) == 0) block_rows = (padded_tokens // self.blocking) blocks_per_row = (self.ffn_hidden_size // self.blocking) offsets = torch.arange(0, ((block_rows * blocks_per_row) + 1), blocks_per_row, dtype=torch.int32, device=x.device) column_indices = ops.topology(padded_bins, self.blocking, block_rows, blocks_per_row) data = torch.empty(column_indices.numel(), self.blocking, self.blocking, dtype=common.dtype(self.args), device='meta') shape = (padded_tokens, (self.ffn_hidden_size * mpu.experts_per_rank(self.args))) row_indices = stk.ops.row_indices(shape, data, offsets, column_indices) (column_indices_t, offsets_t, block_offsets_t) = self.sparse_transpose(shape, row_indices, column_indices, offsets) return stk.Matrix(shape, data, row_indices, column_indices, offsets, column_indices_t, offsets_t, block_offsets_t) def indices_and_padded_bins(self, top_experts): top_experts = top_experts.int() (bin_ids, indices) = ops.sort(top_experts, self.sort_end_bit) tokens_per_expert = ops.histogram(top_experts, self.num_experts) padded_tokens_per_expert = ops.round_up(tokens_per_expert, self.blocking) padded_bins = ops.inclusive_cumsum(padded_tokens_per_expert, 0) padded_bins = promote_scalar(padded_bins) bins = ops.inclusive_cumsum(tokens_per_expert, 0) bins = promote_scalar(bins) return (indices, bin_ids, bins, padded_bins, tokens_per_expert) def sparse_forward_once(self, x, expert_weights, top_experts): expert_weights = expert_weights.flatten() top_experts = top_experts.flatten() with torch.no_grad(): (indices, bin_ids, bins, padded_bins, tokens_per_expert) = self.indices_and_padded_bins(top_experts) x = x.view((- 1), x.shape[(- 1)]) x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, self.top_k) with torch.no_grad(): topo = self.topology(x, padded_bins) x = self.mlp(x, topo) x = ops.padded_scatter(x, indices, bin_ids, expert_weights, bins, padded_bins, self.top_k, self.args.quantize_scatter_num_bits) return (x, tokens_per_expert) def sparse_permute_and_compute(self, x, tokens_per_expert, indices, bin_ids, expert_weights, bins, expert_capactiy, top_k): padded_tokens_per_expert = ops.round_up(tokens_per_expert, self.blocking) padded_bins = ops.inclusive_cumsum(padded_tokens_per_expert, 0) padded_bins = promote_scalar(padded_bins) x = x.view((- 1), x.shape[(- 1)]) x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, top_k) with torch.no_grad(): topo = self.topology(x, padded_bins) x = self.mlp(x, topo) return ops.padded_scatter(x, indices, bin_ids, expert_weights, bins, padded_bins, top_k) def grouped_forward_once(self, x, expert_weights, top_experts): expert_weights = expert_weights.flatten() top_experts = top_experts.flatten() with torch.no_grad(): (indices, bin_ids, bins, tokens_per_expert) = self.indices_and_bins(top_experts) out = self.grouped_permute_and_compute(x, tokens_per_expert, indices, bin_ids, expert_weights, bins, (- 1), self.args.moe_top_k) return (out, tokens_per_expert) def grouped_permute_and_compute(self, x, tokens_per_expert, indices, bin_ids, expert_weights, bins, expert_capactiy, top_k): x = x.view((- 1), x.shape[(- 1)]) x = ops.gather(x, indices, bin_ids, bins, top_k) x = self.mlp(x, tokens_per_expert) return ops.scatter(x, indices, bin_ids, expert_weights, bins, top_k, self.args.quantize_scatter_num_bits) def forward_once(self, x, expert_weights, top_experts): if (self.args.mlp_impl == 'sparse'): return self.sparse_forward_once(x, expert_weights, top_experts) else: return self.grouped_forward_once(x, expert_weights, top_experts) def permute_and_compute(self, x, tokens_per_expert, indices, bin_ids, expert_weights, bins, expert_capactiy, top_k): if (self.args.mlp_impl == 'sparse'): return self.sparse_permute_and_compute(x, tokens_per_expert, indices, bin_ids, expert_weights, bins, expert_capactiy, top_k) else: return self.grouped_permute_and_compute(x, tokens_per_expert, indices, bin_ids, expert_weights, bins, expert_capactiy, top_k)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_file', required=True, type=str) parser.add_argument('-n', '--repeat_times', required=True, type=int) parser.add_argument('-o', '--output_file', required=False) parser.add_argument('-f', '--func', required=False, default='mean') args = parser.parse_args() stream = (open(args.output_file, 'w') if args.output_file else sys.stdout) segment_scores = [] for line in open(args.input_file): segment_scores.append(float(line.strip())) if (len(segment_scores) == args.repeat_times): stream.write('{}\n'.format(aggregate_funcs[args.func](segment_scores))) segment_scores = []
class NormalizedMeanSquaredError2D(keras.losses.Loss): def __init__(self, denom_nonzero=1e-05, **kwargs): self.denom_nonzero = denom_nonzero super().__init__(**kwargs) def call(self, y_true, y_pred): mse = tf.reduce_mean(tf.reduce_mean(tf.square((y_pred - y_true)), axis=(- 1)), axis=(- 1)) true_norm = tf.reduce_mean(tf.reduce_mean(tf.square(y_true), axis=(- 1)), axis=(- 1)) true_norm += self.denom_nonzero err = tf.truediv(mse, true_norm) return err def get_config(self): base_config = super().get_config() return {**base_config, 'denom_nonzero': self.denom_nonzero}
def write_pip_lock_file(build_metadata): build_name = build_metadata['build_name'] python_version = build_metadata['python_version'] environment_name = f'pip-tools-python{python_version}' command = f'conda create -c conda-forge -n pip-tools-python{python_version} python={python_version} pip-tools -y' execute_command(shlex.split(command)) json_output = execute_command(shlex.split('conda info --json')) conda_info = json.loads(json_output) environment_folder = [each for each in conda_info['envs'] if each.endswith(environment_name)][0] environment_path = Path(environment_folder) pip_compile_path = ((environment_path / 'bin') / 'pip-compile') folder_path = Path(build_metadata['folder']) requirement_path = (folder_path / f'{build_name}_requirements.txt') lock_file_path = (folder_path / f'{build_name}_lock.txt') pip_compile(pip_compile_path, requirement_path, lock_file_path)
class inner_GNN(MessagePassing): def __init__(self, dim, hidden_layer): super(inner_GNN, self).__init__(aggr='mean') self.lin1 = nn.Linear(dim, hidden_layer) self.lin2 = nn.Linear(hidden_layer, dim) self.act = nn.ReLU() self.drop = nn.Dropout(p=0.5) def forward(self, x, edge_index, edge_weight=None): x = x.squeeze() return self.propagate(edge_index, x=x, edge_weight=edge_weight) def message(self, x_i, x_j, edge_weight): pairwise_analysis = (x_i * x_j) pairwise_analysis = self.lin1(pairwise_analysis) pairwise_analysis = self.act(pairwise_analysis) pairwise_analysis = self.lin2(pairwise_analysis) pairwise_analysis = self.drop(pairwise_analysis) if (edge_weight != None): interaction_analysis = (pairwise_analysis * edge_weight.view((- 1), 1)) else: interaction_analysis = pairwise_analysis return interaction_analysis def update(self, aggr_out): return aggr_out
def s3_iterator(client, resource, root, dir, bucket, action): paginator = client.get_paginator('list_objects') for result in paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=dir): if (result.get('CommonPrefixes') is not None): for subdir in result.get('CommonPrefixes'): s3_iterator(client, resource, root, subdir.get('Prefix'), bucket, action) if (result.get('Contents') is not None): for file in result.get('Contents'): action(file.get('Key').replace(root, ''))
class data_prefetcher(): def __init__(self, loader, fp16=True): self.loader = iter(loader) self.fp16 = fp16 self.stream = torch.cuda.Stream() self.mean = torch.tensor([0.485, 0.456, 0.406]).cuda().view(1, 3, 1, 1) self.std = torch.tensor([0.229, 0.224, 0.225]).cuda().view(1, 3, 1, 1) if fp16: self.mean = self.mean.half() self.std = self.std.half() self.preload() def preload(self): try: (self.multi_crops, self.weak_flag) = next(self.loader) except StopIteration: (self.multi_crops, self.weak_flag) = (None, None) return with torch.cuda.stream(self.stream): for i in range(len(self.multi_crops)): self.multi_crops[i] = self.multi_crops[i].cuda(non_blocking=True) if self.fp16: self.multi_crops[i] = self.multi_crops[i].half().sub_(self.mean).div_(self.std) else: self.multi_crops[i] = self.multi_crops[i].float().sub_(self.mean).div_(self.std) def next(self): torch.cuda.current_stream().wait_stream(self.stream) (multi_crops, weak_flags) = (self.multi_crops, self.weak_flag) self.preload() return (multi_crops, weak_flags)
def asgi_test(case: Case, checks: Iterable[CheckFunction], targets: Iterable[Target], result: TestResult, store_interactions: bool, headers: (dict[(str, Any)] | None), feedback: Feedback, max_response_time: (int | None), data_generation_methods: list[DataGenerationMethod], dry_run: bool, errors: list[Exception]) -> None: with ErrorCollector(errors): _force_data_generation_method(data_generation_methods, case) result.mark_executed() headers = (headers or {}) if (not dry_run): args = (checks, targets, result, store_interactions, headers, feedback, max_response_time) response = _asgi_test(case, *args) add_cases(case, response, _asgi_test, *args)
.parametrize('fname,val,low,high', [('workspace_no_parameter_inits.json', '1', '-5', '5'), ('workspace_no_parameter_bounds.json', '5', '0', '10')], ids=['no_inits', 'no_bounds']) def test_issue1814(datadir, mocker, fname, val, low, high): with open((datadir / fname), encoding='utf-8') as spec_file: spec = json.load(spec_file) modifierspec = {'data': None, 'name': 'mu_sig', 'type': 'normfactor'} channelname = None samplename = None sampledata = None modifier = pyhf.writexml.build_modifier(spec, modifierspec, channelname, samplename, sampledata) assert (modifier is not None) assert (sorted(modifier.keys()) == ['High', 'Low', 'Name', 'Val']) assert (modifier.get('Val') == val) assert (modifier.get('Low') == low) assert (modifier.get('High') == high)
class TensorflowCropFlipImagePipeline(BaseImagePipeline): def __init__(self, output_image_size: Tuple, extra_pixels: int): super(TensorflowCropFlipImagePipeline, self).__init__(output_image_size, extra_pixels) self.img_manipulation_list = [(random_flip, {}), (random_crop, {'height_crop': output_image_size[0], 'width_crop': output_image_size[1]})] self.img_output_finalize_list = [(center_crop, {'output_size': output_image_size})] self.extra_pixels = extra_pixels def get_image_input_size(self) -> Tuple: return tuple((np.array(self.output_image_size) + self.extra_pixels)) def image_input_manipulation(self, images: tf.Tensor) -> tf.Tensor: def manipulate_fn(image): for (fn, args) in self.img_manipulation_list: image = fn(image, **args) return image manipulated_images = manipulate_fn(images) return manipulated_images def image_output_finalize(self, images: tf.Tensor) -> tf.Tensor: def finalize_fn(image): for (fn, args) in self.img_output_finalize_list: image = fn(image, **args) return image output_finalize_images = finalize_fn(images) return output_finalize_images
def get_run_id(run_info=None): if (run_info is None): run_info = get_run_info() keys = ['hostname', 'pid', 'timestamp'] val = [str(run_info[key]) for key in keys if (key in run_info)] return '_'.join(val)
class DeepNN(nn.Module): def __init__(self, lr, width, depth, version): super(DeepNN, self).__init__() self.linear_input = nn.Linear(((3 * 32) * 32), width) self.linear_layers = nn.ModuleList([nn.Linear(width, width) for i in range(depth)]) self.linear_output = nn.Linear(width, 10) self.version = version if (self.version == 'ReZero'): self.resweight = nn.Parameter(torch.zeros(depth), requires_grad=True) if (self.version == 'LayerNorm'): self.ln = torch.nn.LayerNorm(width) torch.nn.init.kaiming_normal_(self.linear_input.weight, a=0, mode='fan_in', nonlinearity='relu') for i in range(depth): if (self.version == 'ReZero'): torch.nn.init.xavier_normal_(self.linear_layers[i].weight, gain=torch.sqrt(torch.tensor(2.0))) elif (self.version == 'Vanilla'): torch.nn.init.xavier_normal_(self.linear_layers[i].weight, gain=torch.sqrt(torch.tensor(2.0))) elif (self.version == 'Residual'): torch.nn.init.xavier_normal_(self.linear_layers[i].weight, gain=torch.sqrt(torch.tensor(0.25))) elif (self.version == 'LayerNorm'): torch.nn.init.xavier_normal_(self.linear_layers[i].weight, gain=torch.sqrt(torch.tensor(2.0))) def forward(self, x): x = x.view((- 1), ((3 * 32) * 32)) x = F.relu(self.linear_input(x)) for (i, j) in enumerate(self.linear_layers): if (self.version == 'ReZero'): x = (x + (self.resweight[i] * torch.relu(self.linear_layers[i](x)))) elif (self.version == 'Vanilla'): x = F.relu(self.linear_layers[i](x)) elif (self.version == 'Residual'): x = (x + F.relu(self.linear_layers[i](x))) elif (self.version == 'LayerNorm'): x = self.ln(F.relu(self.linear_layers[i](x))) x = self.linear_output(x) return x
def incremental_sent_bleu(hypothesis, references, max_n=4): (clip_count, count, total_len_hyp, total_len_ref) = incremental_bleu_count([hypothesis], [references], max_n=max_n) clip_count = clip_count[0] count = count[0] total_len_hyp = total_len_hyp[0] total_len_ref = total_len_ref[0] n_len = len(clip_count) ret = [] for i in range(n_len): brevity_penalty = 1.0 bleu_scores = [] bleu = 0 for n in range(max_n): if (count[i][n] > 0): bleu_scores.append((clip_count[i][n] / count[i][n])) else: bleu_scores.append(0) if (total_len_hyp[i] < total_len_ref[i]): brevity_penalty = math.exp((1 - (total_len_ref[i] / total_len_hyp[i]))) def my_log(x): if (x == 0): return (- .0) elif (x < 0): raise Exception('Value Error') return math.log(x) log_bleu = 0.0 for n in range(max_n): log_bleu += my_log(bleu_scores[n]) bleu = (brevity_penalty * math.exp((log_bleu / float(max_n)))) ret.append(bleu) return ret
class modules(_TestParametrizer): def __init__(self, module_info_list): super().__init__(handles_dtypes=True) self.module_info_list = module_info_list def _parametrize_test(self, test, generic_cls, device_cls): for module_info in self.module_info_list: for dtype in floating_types(): test_name = '{}_{}{}'.format(module_info.name.replace('.', '_'), device_cls.device_type, _dtype_test_suffix(dtype)) param_kwargs = {'module_info': module_info} _update_param_kwargs(param_kwargs, 'dtype', dtype) try: active_decorators = [] if module_info.should_skip(generic_cls.__name__, test.__name__, device_cls.device_type, dtype): active_decorators.append(skipIf(True, 'Skipped!')) if (module_info.decorators is not None): for decorator in module_info.decorators: if (decorator.__class__.__name__ == 'DecorateInfo'): if decorator.is_active(generic_cls.__name__, test.__name__, device_cls.device_type, dtype): active_decorators += decorator.decorators else: active_decorators.append(decorator) (test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) for decorator in active_decorators: test_wrapper = decorator(test_wrapper) (yield (test_wrapper, test_name, param_kwargs)) except Exception as ex: print('Failed to instantiate {0} for module {1}!'.format(test_name, module_info.name)) raise ex
class _HalfOpenInterval(Constraint): def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def check(self, value): return ((self.lower_bound <= value) & (value < self.upper_bound)) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) return fmt_string
class NumpyKernel(BaseKernel): def _cast(cls, x, t): if issubclass(t, ctypes._Pointer): if numpy.is_own_array(x): assert numpy.is_c_contiguous(x), 'kernel expects contiguous array' if (x.ndim > 0): return ctypes.cast(x.ctypes.data, t) else: return x elif hasattr(x, '_b_base_'): return ctypes.cast(x, t) else: raise AssertionError("CuPy buffers shouldn't be passed to Numpy Kernels.") else: return x def __call__(self, *args) -> None: assert (len(args) == len(self._impl.argtypes)) return self._impl(*(self._cast(x, t) for (x, t) in zip(args, self._impl.argtypes)))
class UnpairedImageTrain(UnpairedImageBase): def __init__(self, size=None, random_crop=False, folder1=None, folder2=None, numpy_folder1=None, numpy_folder2=None, wikiart_info1=None, wikiart_key1=None, wikiart_info2=None, wikiart_key2=None): super().__init__() self.data = UnpairedImagePaths(size=size, random_crop=random_crop, folder1=folder1, folder2=folder2, numpy_folder1=numpy_folder1, numpy_folder2=numpy_folder2, wikiart_info1=wikiart_info1, wikiart_key1=wikiart_key1, wikiart_info2=wikiart_info2, wikiart_key2=wikiart_key2)
def read_tb(path): import pandas import numpy as np from glob import glob from collections import defaultdict import tensorflow as tf if osp.isdir(path): fnames = glob(osp.join(path, 'events.*')) elif osp.basename(path).startswith('events.'): fnames = [path] else: raise NotImplementedError(('Expected tensorboard file or directory containing them. Got %s' % path)) tag2pairs = defaultdict(list) maxstep = 0 for fname in fnames: for summary in tf.compat.v1.train.summary_iterator(fname): if (summary.step > 0): for v in summary.summary.value: pair = (summary.step, v.simple_value) tag2pairs[v.tag].append(pair) maxstep = max(summary.step, maxstep) data = np.empty((maxstep, len(tag2pairs))) data[:] = np.nan tags = sorted(tag2pairs.keys()) for (colidx, tag) in enumerate(tags): pairs = tag2pairs[tag] for (step, value) in pairs: data[((step - 1), colidx)] = value return pandas.DataFrame(data, columns=tags)
def _sum2(cp1, cp2, length): size = 2 total = 0 for i in range(length): total += (getsample(cp1, size, i) * getsample(cp2, size, i)) return total
def compute_and_write_labels(output_path, qid2answers, qid2rankings): cutoffs = [1, 5, 10, 20, 30, 50, 100, 1000, 'all'] success = {cutoff: 0.0 for cutoff in cutoffs} counts = {cutoff: 0.0 for cutoff in cutoffs} with open(output_path, 'w') as f: for qid in qid2answers: if (qid not in qid2rankings): continue prev_rank = 0 labels = [] for (pid, rank, label) in qid2rankings[qid]: assert (rank == (prev_rank + 1)), (qid, pid, (prev_rank, rank)) prev_rank = rank labels.append(label) line = ('\t'.join(map(str, [qid, pid, rank, int(label)])) + '\n') f.write(line) for cutoff in cutoffs: if (cutoff != 'all'): success[cutoff] += (sum(labels[:cutoff]) > 0) counts[cutoff] += sum(labels[:cutoff]) else: success[cutoff] += (sum(labels) > 0) counts[cutoff] += sum(labels) return (success, counts)
def slice_module_at_return(module_name: str) -> list[UniqueInstruction]: config.configuration.statistics_output.coverage_metrics = [config.CoverageMetric.CHECKED] tracer = ExecutionTracer() tracer.current_thread_identifier = threading.current_thread().ident with install_import_hook(module_name, tracer): module = importlib.import_module(module_name) importlib.reload(module) module.func() trace = tracer.get_trace() known_code_objects = tracer.get_subject_properties().existing_code_objects assert known_code_objects dynamic_slicer = DynamicSlicer(known_code_objects) assert trace.executed_instructions last_traced_instr = trace.executed_instructions[(- 1)] slicing_instruction = UniqueInstruction(last_traced_instr.file, last_traced_instr.name, last_traced_instr.code_object_id, last_traced_instr.node_id, known_code_objects.get(last_traced_instr.code_object_id), last_traced_instr.offset, lineno=last_traced_instr.lineno) slicing_criterion = SlicingCriterion(slicing_instruction, (len(trace.executed_instructions) - 2)) return dynamic_slicer.slice(trace, slicing_criterion)
_metaclass(abc.ABCMeta) class Configurable(object): def __init__(self, params, mode): self._params = _parse_params(params, self.default_params()) self._mode = mode self._print_params() def _print_params(self): classname = self.__class__.__name__ tf.logging.info('Creating %s in mode=%s', classname, self._mode) tf.logging.info('\n%s', yaml.dump({classname: self._params})) def mode(self): return self._mode def params(self): return self._params def default_params(): raise NotImplementedError
class CustomInit(InitialConditions): def __init__(self, a_init=None, b_init=None, a=0, b=0): a_init = (a_init or []) self.a_init = {id: {direction: a} for (id, direction, a) in a_init} b_init = (b_init or []) self.b_init = {id: {direction: b} for (id, direction, b) in b_init} self.a = a self.b = b self.repr_init() def init_a(self, shape, id, direction): try: a = self.a_init[id][direction] except KeyError: a = self.a return a def init_b(self, shape, id, direction): assert (shape is not None) try: b = self.b_init[id][direction] assert (b.shape == shape) except KeyError: b = (self.b * np.ones(shape)) return b
def save_son(filename, d, is_metadata=False): son.dump(d, normalize_extension(filename, '.son'), is_metadata=is_metadata, dumper=yaml.dump)
def native_to_byteorder(array, byteorder: str): assert (byteorder in '<>') if (byteorder != native_byteorder): return array.byteswap(inplace=False) else: return array
def gamma_grad_logr(epsilon, alpha): b = (alpha - (1.0 / 3.0)) c = (1.0 / tf.sqrt((9.0 * b))) v = (1.0 + (epsilon * c)) return (((- 0.5) / b) + (((9.0 * epsilon) * (c ** 3)) / v))