code
stringlengths
101
5.91M
def test_mscn(dataset: str, version: str, workload: str, params: Dict[(str, Any)], overwrite: bool) -> None: torch.set_num_threads(NUM_THREADS) assert (NUM_THREADS == torch.get_num_threads()), torch.get_num_threads() L.info(f'torch threads: {torch.get_num_threads()}') model_file = ((MODEL_ROOT / dataset) / f"{params['model']}.pt") L.info(f'load model from {model_file} ...') state = torch.load(model_file, map_location=DEVICE) args = state['args'] table = load_table(dataset, state['version']) (column2vec, op2vec) = load_dicts(table) predicate_feats = ((len(column2vec) + len(op2vec)) + 1) model = SetConv(args.num_samples, predicate_feats, args.hid_units) report_model(model) L.info(f"Overall MSCN model size + sample size = {state['model_size']:.2f}MB") model.load_state_dict(state['model_state_dict']) estimator = MSCN(model, params['model'], state['samples'], table, column2vec, op2vec, state['label_range']) L.info(f'load and built mscn estimator: {estimator}') run_test(dataset, version, workload, estimator, overwrite)
def test_has_notebooks(): assert (len(get_notebooks()) >= 2), 'there are probably some notebooks that were not discovered'
def write_entries(cmd, basename, filename): ep = cmd.distribution.entry_points if (isinstance(ep, six.string_types) or (ep is None)): data = ep elif (ep is not None): data = [] for (section, contents) in sorted(ep.items()): if (not isinstance(contents, six.string_types)): contents = EntryPoint.parse_group(section, contents) contents = '\n'.join(sorted(map(str, contents.values()))) data.append(('[%s]\n%s\n\n' % (section, contents))) data = ''.join(data) cmd.write_or_delete_file('entry points', filename, data, True)
def test_product_combiner(create_pool_classifiers): query = np.array([[1, (- 1)]]) ensemble_classifiers = create_pool_classifiers expected = 0 result = product_combiner(ensemble_classifiers, query) assert np.allclose(expected, result)
class FixedBundleAdjustmentProblem(): def __init__(self, num_views: int, num_landmarks: int) -> None: self.num_views = num_views self.num_landmarks = num_landmarks self.values = build_values(num_views=num_views, num_landmarks=num_landmarks) self.residual = self._build_residual() def generate(self, output_dir: T.Openable) -> None: logger.debug('Generating linearization function for fixed-size problem') linearization_func = self._build_codegen_object() namespace = 'bundle_adjustment_fixed_size' linearization_func.generate_function(output_dir=output_dir, namespace=namespace) def _build_codegen_object(self) -> codegen.Codegen: logger.debug('Building linearization function') flat_keys = {key: re.sub('[\\.\\[\\]]+', '_', key) for key in self.values.keys_recursive()} inputs = Values(**{flat_keys[key]: value for (key, value) in self.values.items_recursive()}) outputs = Values(residual=sf.M(self.residual.to_storage())) linearization_func = codegen.Codegen(inputs=inputs, outputs=outputs, config=codegen.CppConfig(), docstring=textwrap.dedent('\n This function was autogenerated. Do not modify by hand.\n\n Computes the linearization of the residual around the given state,\n and returns the relevant information about the resulting linear system.\n\n Input args: The state to linearize around\n\n Output args:\n residual (Eigen::Matrix*): The residual vector\n ')).with_linearization(name='linearization', which_args=[flat_keys[key] for key in self._optimized_keys()], sparse_linearization=True) return linearization_func def _optimized_keys(self) -> T.List[str]: return ([f'views[{cam_index}].pose' for cam_index in range(1, self.num_views)] + [f'landmarks[{i}]' for i in range(self.num_landmarks)]) def _build_residual(self) -> Values: residual = Values() residual['pose_prior'] = [] residual['reprojection'] = [] residual['inv_range_prior'] = [] for src_cam_index in range(self.num_views): pose_priors = [] for target_cam_index in range(self.num_views): if (src_cam_index == target_cam_index): continue pose_priors.append(geo_factors_codegen.between_factor(self.values['views'][src_cam_index]['pose'], self.values['views'][target_cam_index]['pose'], self.values['priors'][src_cam_index][target_cam_index]['target_T_src'], self.values['priors'][src_cam_index][target_cam_index]['sqrt_info'], self.values['epsilon'])) residual['pose_prior'].append(pose_priors) for v_i in range(1, self.num_views): reprojections = [] inv_range_priors = [] for l_i in range(self.num_landmarks): match = self.values['matches'][(v_i - 1)][l_i] reprojections.append(inverse_range_landmark_gnc_residual(self.values['views'][0]['pose'], self.values['views'][0]['calibration'], self.values['views'][v_i]['pose'], self.values['views'][v_i]['calibration'], self.values['landmarks'][l_i], match['source_coords'], match['target_coords'], match['weight'], self.values['costs']['reprojection_error_gnc_mu'], self.values['costs']['reprojection_error_gnc_scale'], self.values['epsilon'])) inv_range_priors.append(inverse_range_landmark_prior_residual(self.values['landmarks'][l_i], match['inverse_range_prior'], match['weight'], match['inverse_range_prior_sigma'], self.values['epsilon'])[0]) residual['reprojection'].append(reprojections) residual['inv_range_prior'].append(inv_range_priors) return residual
def add_hostvuln_to_allvuln(host_vulners, all_vulners): if isinstance(host_vulners, list): all_vulners.extend(host_vulners) elif isinstance(host_vulners, str): all_vulners.append(host_vulners)
def insert_bn(names): names_bn = [] for name in names: names_bn.append(name) if ('conv' in name): position = name.replace('conv', '') names_bn.append(('bn' + position)) return names_bn
_properties class TaskletFusion(transformation.SingleStateTransformation): tsk1 = transformation.PatternNode(nd.Tasklet) data = transformation.PatternNode(nd.AccessNode) tsk2 = transformation.PatternNode(nd.Tasklet) def expressions(cls): return [node_path_graph(cls.tsk1, cls.data, cls.tsk2), node_path_graph(cls.tsk1, cls.tsk2)] def can_be_applied(self, graph: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool=False) -> bool: tsk1: nd.Tasklet = self.tsk1 data: nd.AccessNode = (self.data if (self.expr_index == 0) else None) tsk2: nd.Tasklet = self.tsk2 if ((tsk1.language is not dtypes.Language.Python) or (tsk2.language is not dtypes.Language.Python)): return False if ((data is not None) and (data.desc(sdfg).total_size != 1)): return False if ((graph.out_degree(tsk1) != 1) or ((data is not None) and (graph.out_degree(data) != 1))): return False try: if (len(tsk1.code.code) != 1): return False if (len(tsk1.code.code[0].targets) != 1): return False except: return False return True def apply(self, state: SDFGState, sdfg: SDFG) -> nd.Tasklet: tsk1: nd.Tasklet = self.tsk1 data: nd.AccessNode = (self.data if (self.expr_index == 0) else None) tsk2: nd.Tasklet = self.tsk2 tsk2_in_edge = state.out_edges((data if (data is not None) else tsk1))[0] inputs = {k: v for (k, v) in tsk2.in_connectors.items() if (k != tsk2_in_edge.dst_conn)} repldict = {} for in_edge in state.in_edges(tsk1): old_value = in_edge.dst_conn if (in_edge.dst_conn in inputs): tsk2edge = list(state.in_edges_by_connector(tsk2, in_edge.dst_conn))[0] if ((in_edge.data != tsk2edge.data) or (in_edge.data.data != tsk2edge.data.data)): in_edge.dst_conn = find_str_not_in_set(set(inputs), in_edge.dst_conn) repldict[old_value] = in_edge.dst_conn else: pass inputs[in_edge.dst_conn] = tsk1.in_connectors[old_value] assigned_value = tsk1.code.code[0].value if repldict: assigned_value = Renamer(repldict).visit(assigned_value) new_code = [Inliner(tsk2_in_edge.dst_conn, assigned_value).visit(line) for line in tsk2.code.code] new_code_str = '\n'.join((astunparse.unparse(line) for line in new_code)) new_tasklet = state.add_tasklet(((tsk1.label + '_fused_') + tsk2.label), inputs, tsk2.out_connectors, new_code_str) for in_edge in state.in_edges(tsk1): state.add_edge(in_edge.src, in_edge.src_conn, new_tasklet, in_edge.dst_conn, in_edge.data) for in_edge in state.in_edges(tsk2): if (len(list(state.in_edges_by_connector(new_tasklet, in_edge.dst_conn))) == 0): state.add_edge(in_edge.src, in_edge.src_conn, new_tasklet, in_edge.dst_conn, in_edge.data) else: state.remove_memlet_path(in_edge) for out_edge in state.out_edges(tsk2): state.add_edge(new_tasklet, out_edge.src_conn, out_edge.dst, out_edge.dst_conn, out_edge.data) state.remove_node(tsk1) if (data is not None): state.remove_node(data) state.remove_node(tsk2)
def rand_x(num): correct = [[0, 0], [2, 2], [5, 5], [(- 15), 15], [15, (- 15)], [0, 0], [2, 2], [5, 0], [0, 0], [24, (- 15)], [17.5, (- 15)], [15, (- 15)], [18, (- 15)], [3.7, 5], [5, 5], [17.5, (- 15)], [15, (- 15)]] if ((num > 0.33) and (num < 0.66)): x = random.uniform((61.3 + correct[map_num][0]), 95.5) elif (num > 0.66): x = random.uniform(3.55, (30.8 - correct[map_num][1])) if ((map_num == 8) or (map_num == 7)): x = random.uniform(2.55, 27.8) else: x = random.uniform((- 30.7), (- 29.7)) return x
class DocBuilder(): def __init__(self, name, lang='en'): doc = name.split(os.path.sep) if (doc[0] in build_options.LANGUAGES): lang = doc[0] doc.pop(0) self.name = os.path.join(*doc) self.lang = lang self.dir = os.path.join(SAGE_DOC_SRC, self.lang, self.name) def _output_dir(self, type): from sage.env import SAGE_DOC d = os.path.join(SAGE_DOC, type, self.lang, self.name) os.makedirs(d, exist_ok=True) return d def _doctrees_dir(self): from sage.env import SAGE_DOC d = os.path.join(SAGE_DOC, 'doctrees', self.lang, self.name) os.makedirs(d, exist_ok=True) return d def _output_formats(self): output_formats = [] for attr in dir(self): if hasattr(getattr(self, attr), 'is_output_format'): output_formats.append(attr) output_formats.sort() return output_formats def pdf(self): self.latex() tex_dir = self._output_dir('latex') pdf_dir = self._output_dir('pdf') if (self.name == 'reference'): tex_file = os.path.join(tex_dir, 'reference.tex') with open(tex_file) as f: ref = f.read() ref = re.sub('\\\\textbackslash{}', '\\\\', ref) ref = re.sub('\\\\textbackslash{}', '\\\\', ref) ref = re.sub('\\\\{', '{', ref) ref = re.sub('\\\\}', '}', ref) ref = re.sub('\\\\_', '_', ref) ref = re.sub('\\\\textasciicircum{}', '^', ref) with open(tex_file, 'w') as f: f.write(ref) make_target = "cd '%s' && $MAKE %s && mv -f *.pdf '%s'" error_message = 'failed to run $MAKE %s in %s' command = 'all-pdf' if subprocess.call((make_target % (tex_dir, command, pdf_dir)), close_fds=False, shell=True): raise RuntimeError((error_message % (command, tex_dir))) logger.warning('Build finished. The built documents can be found in %s', pdf_dir) def clean(self, *args): shutil.rmtree(self._doctrees_dir()) output_formats = (list(args) if args else self._output_formats()) for format in output_formats: shutil.rmtree(self._output_dir(format), ignore_errors=True) html = builder_helper('html') pickle = builder_helper('pickle') web = pickle json = builder_helper('json') htmlhelp = builder_helper('htmlhelp') latex = builder_helper('latex') changes = builder_helper('changes') linkcheck = builder_helper('linkcheck') inventory = builder_helper('inventory')
def setup_context(setup_dir): temp_dir = os.path.join(setup_dir, 'temp') with save_pkg_resources_state(): with save_modules(): hide_setuptools() with save_path(): with save_argv(): with override_temp(temp_dir): with pushd(setup_dir): __import__('setuptools') (yield)
class GCNConv_MLP(MessagePassing): _cached_edge_index: Optional[Tuple[(Tensor, Tensor)]] _cached_adj_t: Optional[SparseTensor] def __init__(self, in_channels: int, out_channels: int, improved: bool=False, cached: bool=False, add_self_loops: bool=True, normalize: bool=True, bias: bool=True, **kwargs): kwargs.setdefault('aggr', 'add') super(GCNConv_MLP, self).__init__(**kwargs) self.in_channels = in_channels self.out_channels = out_channels self.improved = improved self.cached = cached self.add_self_loops = add_self_loops self.normalize = normalize self._cached_edge_index = None self._cached_adj_t = None self.weight = Parameter(torch.Tensor(in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): glorot(self.weight) zeros(self.bias) self._cached_edge_index = None self._cached_adj_t = None def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor=None) -> Tensor: if self.normalize: if isinstance(edge_index, Tensor): cache = self._cached_edge_index if (cache is None): (edge_index, edge_weight) = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), self.improved, self.add_self_loops) if self.cached: self._cached_edge_index = (edge_index, edge_weight) else: (edge_index, edge_weight) = (cache[0], cache[1]) elif isinstance(edge_index, SparseTensor): cache = self._cached_adj_t if (cache is None): edge_index = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), self.improved, self.add_self_loops) if self.cached: self._cached_adj_t = edge_index else: edge_index = cache x = (x self.weight) out = x if (self.bias is not None): out += self.bias return out def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor: return (x_j if (edge_weight is None) else (edge_weight.view((- 1), 1) * x_j)) def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: return matmul(adj_t, x, reduce=self.aggr) def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels)
def main(opts): data_folder = opts.data_root file_lst = opts.file_list file_out = opts.file_out save_path = opts.out_root copy_folder(opts.data_root, opts.out_root) if (not os.path.exists(file_out)): print('VADing signals to build {} list...'.format(file_out)) pool = mp.Pool(opts.num_workers) with open(file_out, 'w') as f: with open(file_lst, 'r') as lst_f: wav_lst = [(data_folder, line.rstrip()) for line in lst_f] count = 1 count_seg_tot = 0 count_short = 0 wi = 1 for annotations in tqdm.tqdm(pool.imap(segment_signal, wav_lst), total=len(wav_lst)): for annotation in annotations: f.write(annotation) else: print('[!] Found existing {} file, proceeding with it'.format(file_out)) with open(file_out, 'r') as f: fnames = [l.rstrip() for l in f] print('Producing segments out of VAD list...') beg_t = timer() for (li, line) in tqdm.tqdm(enumerate(fnames, start=1), total=len(fnames)): (wav_file, beg_samp, end_samp, seg_id) = line.split(' ') (signal, fs) = sf.read(os.path.join(opts.data_root, wav_file)) signal = (signal / np.max(np.abs(signal))) signal = signal[int(float(beg_samp)):int(float(end_samp))] path_out = os.path.join(opts.out_root, wav_file) path_out = path_out.replace('.flac', (('-' + str(seg_id)) + '.wav')) sf.write(path_out, signal, fs) end_t = timer() print('Finalized segments production to output path: {}'.format(opts.out_root)) print('Production time: {:.1f} s'.format((end_t - beg_t)))
class WordpieceTokenizer(object): def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if (len(chars) > self.max_input_chars_per_word): output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while (start < len(chars)): end = len(chars) cur_substr = None while (start < end): substr = ''.join(chars[start:end]) if (start > 0): substr = ('##' + substr) if (substr in self.vocab): cur_substr = substr break end -= 1 if (cur_substr is None): is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
def show_average(): dataset = 4 result = [RESULT[dataset] for RESULT in RESULTS] Y = [(num - result[0]) for num in result][1:] plt.bar(X, Y, color=color) plt.yticks(fontsize=18, rotation=90) plt.xticks(X, fontsize=18) plt.title(DATASETS[dataset], {'fontsize': 30}) plt.ylim((- 10), 20) plt.ylabel('Accuracy', fontsize=28) plt.show()
def load_hdf5_tree(hdf5_file_name): out_dict = {} def add_item(name, obj): if (not isinstance(obj, h5py.Group)): tmp = {} if (type(obj.value) == numpy.ndarray): tmp['value'] = 'array shape = {}'.format(obj.shape) else: tmp['value'] = obj.value tmp['attrs'] = dict(((k.encode(), v) for (k, v) in list(obj.attrs.items()))) out_dict.update({name.encode(): tmp}) with h5py.File(hdf5_file_name, 'r') as h5_file: h5_file.visititems(add_item) return out_dict
_pydub_effect def speedup(seg, playback_speed=1.5, chunk_size=150, crossfade=25): atk = (1.0 / playback_speed) if (playback_speed < 2.0): ms_to_remove_per_chunk = int(((chunk_size * (1 - atk)) / atk)) else: ms_to_remove_per_chunk = int(chunk_size) chunk_size = int(((atk * chunk_size) / (1 - atk))) crossfade = min(crossfade, (ms_to_remove_per_chunk - 1)) chunks = make_chunks(seg, (chunk_size + ms_to_remove_per_chunk)) if (len(chunks) < 2): raise Exception('Could not speed up AudioSegment, it was too short {2:0.2f}s for the current settings:\n{0}ms chunks at {1:0.1f}x speedup'.format(chunk_size, playback_speed, seg.duration_seconds)) ms_to_remove_per_chunk -= crossfade last_chunk = chunks[(- 1)] chunks = [chunk[:(- ms_to_remove_per_chunk)] for chunk in chunks[:(- 1)]] out = chunks[0] for chunk in chunks[1:]: out = out.append(chunk, crossfade=crossfade) out += last_chunk return out
def time_synchronized(): if torch.cuda.is_available(): torch.cuda.synchronize() return time.time()
class UnpoolingDataGrad(UnaryDataGrad): def __init__(self, ctx, kernel, channel_last=False): super(UnpoolingDataGrad, self).__init__(ctx) self._func = _F.Unpooling(ctx, kernel, channel_last)
def test_fsaf_head_loss(): s = 256 img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3)}] cfg = dict(reg_decoded_bbox=True, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=1, scales_per_octave=1, ratios=[1.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0), loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='none'), loss_bbox=dict(type='IoULoss', eps=1e-06, loss_weight=1.0, reduction='none')) train_cfg = mmcv.Config(dict(assigner=dict(type='CenterRegionAssigner', pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01), allowed_border=(- 1), pos_weight=(- 1), debug=False)) head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg) if torch.cuda.is_available(): head.cuda() feat = [torch.rand(1, 1, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))).cuda() for i in range(len(head.anchor_generator.strides))] (cls_scores, bbox_preds) = head.forward(feat) gt_bboxes_ignore = None gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda()] gt_labels = [torch.LongTensor([2]).cuda()] one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert (onegt_cls_loss.item() > 0), 'cls loss should be non-zero' assert (onegt_box_loss.item() > 0), 'box loss should be non-zero' gt_bboxes = [torch.empty((0, 4)).cuda()] gt_labels = [torch.LongTensor([]).cuda()] empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) assert (empty_cls_loss.item() > 0), 'cls loss should be non-zero' assert (empty_box_loss.item() == 0), 'there should be no box loss when there are no true boxes'
def certify_director(token): subprocess.check_call(['fx', 'pki', 'certify', '-n', DIRECTOR_SUBJECT_NAME, '-t', token, '-c', f"{(CA_PATH / 'cert')}", '-p', str(CA_PATH)])
def register_Ns3MqQueueDisc_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_method('GetWakeMode', 'ns3::QueueDisc::WakeMode', [], is_const=True, is_virtual=True) cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], visibility='private', is_virtual=True) cls.add_method('DoDequeue', 'ns3::Ptr< ns3::QueueDiscItem >', [], visibility='private', is_virtual=True) cls.add_method('DoPeek', 'ns3::Ptr< ns3::QueueDiscItem const >', [], is_const=True, visibility='private', is_virtual=True) cls.add_method('CheckConfig', 'bool', [], visibility='private', is_virtual=True) cls.add_method('InitializeParams', 'void', [], visibility='private', is_virtual=True) return
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True, bn2=False): with tf.variable_scope(scope) as sc: (dist, idx) = three_nn(xyz1, xyz2) dist = tf.maximum(dist, 1e-10) norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True) norm = tf.tile(norm, [1, 1, 3]) weight = ((1.0 / dist) / norm) interpolated_points = three_interpolate(points2, idx, weight) if (points1 is not None): new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) else: new_points1 = interpolated_points new_points1 = tf.expand_dims(new_points1, 2) for (i, num_out_channel) in enumerate(mlp): new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv_%d' % i), bn_decay=bn_decay, bn2=bn2) new_points1 = tf.squeeze(new_points1, [2]) return new_points1
def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_coverage=1.0, command_tokens=None, type_tokens=None, **kwargs): tokenizer_class = tokenizer_type if isinstance(tokenizer_class, str): tokenizer_class = eval(tokenizer_class) if (tokenizer_class is BertWordPieceTokenizer): return BertWordPieceTokenizer(model_type, **kwargs) elif (tokenizer_class is GPT2BPETokenizer): return GPT2BPETokenizer(**kwargs) text_tokenizer = tokenizer_class(corpus=corpus, vocab_size=vocab_size, model_path=model_path, model_type=model_type, pad_token=pad_token, character_coverage=character_coverage) return Tokenizer(text_tokenizer, command_tokens, type_tokens)
_model('masked_lm') class MaskedLMModel(BaseFairseqModel): def __init__(self, args, encoder): super().__init__() self.args = args self.encoder = encoder if getattr(args, 'apply_bert_init', False): self.apply(init_bert_params) def add_args(parser): parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after activation in FFN') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input and output embeddings') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input') parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task') parser.add_argument('--sent-loss', action='store_true', help='if set, calculate sentence level predictions') parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT') parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions def build_model(cls, args, task): base_architecture(args) if (not hasattr(args, 'max_positions')): args.max_positions = args.tokens_per_sample logger.info(args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder)
class Layer(nn.Module): def __init__(self, config, d_model, n_head): super(Layer, self).__init__() self.config = config self.d_model = d_model self.n_head = n_head self.attn_network = MultiHeadAttention.MultiHeadAttention(config, d_model, n_head) self.ffn = FeedForwardNetwork.FeedForwardNetwork(config) def forward(self, query, key, val, key_structure=None, val_structure=None, attention_mask=None): (self_atten_features, atten_values) = self.attn_network(query, key, val, key_structure=key_structure, val_structure=val_structure, attention_mask=attention_mask) enc_output = self.ffn(self_atten_features) del self_atten_features torch.cuda.empty_cache() return (enc_output, atten_values)
def _fundamental_constant_implicit_function_(phi): from sage.symbolic.ring import SR u = SR('u') positive_solution = [s for s in (phi(u) - (u * phi(u).diff(u))).solve(u) if (s.rhs() > 0)] if (len(positive_solution) == 1): return positive_solution[0].rhs() raise ValueError('Fundamental constant tau could not be determined')
class OxfordFlowers102Dataset(Dataset): def __init__(self, root='data/meta-dataset/VGGFlower', mode='test', backbone_name='resnet12', transform=None): self.root = root (_, train_process, val_process) = load(backbone_name, jit=False) if ((mode == 'val') or (mode == 'test')): transform = val_process elif (mode == 'train'): transform = train_process self.transform = transform labels_filename = (self.root + '/imagelabels.mat') self.label = (loadmat(labels_filename)['labels'].flatten() - 1) def __getitem__(self, index): filepath = ((self.root + '/jpg') + f'/image_{(index + 1):05}.jpg') img = Image.open(filepath).convert('RGB') img = self.transform(img) label = self.label[index] label = torch.tensor(label, dtype=torch.long) return (img, label) def __len__(self): return len(self.labels)
def get_bn_modules(model: nn.Module) -> List[nn.Module]: bn_layers = [m for m in model.modules() if (m.training and isinstance(m, BN_MODULE_TYPES))] return bn_layers
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) logger.info(f'Training/evaluation parameters {training_args}') last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') ds = load_dataset(data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir) data_args.train_val_split = (None if ('validation' in ds.keys()) else data_args.train_val_split) if (isinstance(data_args.train_val_split, float) and (data_args.train_val_split > 0.0)): split = ds['train'].train_test_split(data_args.train_val_split) ds['train'] = split['train'] ds['validation'] = split['test'] config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)} if model_args.config_name: config = ViTMAEConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.') if (model_args.config_overrides is not None): logger.info(f'Overriding config: {model_args.config_overrides}') config.update_from_string(model_args.config_overrides) logger.info(f'New config: {config}') config.update({'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss}) if model_args.feature_extractor_name: feature_extractor = ViTFeatureExtractor.from_pretrained(model_args.feature_extractor_name, **config_kwargs) elif model_args.model_name_or_path: feature_extractor = ViTFeatureExtractor.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: feature_extractor = ViTFeatureExtractor() if model_args.model_name_or_path: model = ViTMAEForPreTraining.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) else: logger.info('Training new model from scratch') model = ViTMAEForPreTraining(config) if training_args.do_train: column_names = ds['train'].column_names else: column_names = ds['validation'].column_names if (data_args.image_column_name is not None): image_column_name = data_args.image_column_name elif ('image' in column_names): image_column_name = 'image' elif ('img' in column_names): image_column_name = 'img' else: image_column_name = column_names[0] transforms = Compose([Lambda((lambda img: (img.convert('RGB') if (img.mode != 'RGB') else img))), RandomResizedCrop(feature_extractor.size, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC), RandomHorizontalFlip(), ToTensor(), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)]) def preprocess_images(examples): examples['pixel_values'] = [transforms(image) for image in examples[image_column_name]] return examples if training_args.do_train: if ('train' not in ds): raise ValueError('--do_train requires a train dataset') if (data_args.max_train_samples is not None): ds['train'] = ds['train'].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ds['train'].set_transform(preprocess_images) if training_args.do_eval: if ('validation' not in ds): raise ValueError('--do_eval requires a validation dataset') if (data_args.max_eval_samples is not None): ds['validation'] = ds['validation'].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ds['validation'].set_transform(preprocess_images) total_train_batch_size = ((training_args.train_batch_size * training_args.gradient_accumulation_steps) * training_args.world_size) if (training_args.base_learning_rate is not None): training_args.learning_rate = ((training_args.base_learning_rate * total_train_batch_size) / 256) trainer = Trainer(model=model, args=training_args, train_dataset=(ds['train'] if training_args.do_train else None), eval_dataset=(ds['validation'] if training_args.do_eval else None), tokenizer=feature_extractor, data_collator=collate_fn) if training_args.do_train: checkpoint = None if (training_args.resume_from_checkpoint is not None): checkpoint = training_args.resume_from_checkpoint elif (last_checkpoint is not None): checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics('train', train_result.metrics) trainer.save_metrics('train', train_result.metrics) trainer.save_state() if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) kwargs = {'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding']} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs)
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3MobilityModel__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::MobilityModel const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::MobilityModel const >', 'arg0')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
class TestNCCL(TestCase): (IS_WINDOWS, "NCCL doesn't support Windows") def test_unique_id(self, device): uid = nccl.unique_id() self.assertIsInstance(uid, bytes) self.assertGreater(len(uid), 1) ((TEST_WITH_ROCM and (HIP_VERSION < 3.5)), 'Skip NCCL tests for ROCm') (IS_WINDOWS, "NCCL doesn't support Windows") ((not TEST_MULTIGPU), 'only one GPU detected') (*datatypes) def test_broadcast(self, device, dtype): expected = torch.zeros(128).uniform_().to(dtype=dtype) tensors = [expected.cuda()] for device in range(1, torch.cuda.device_count()): tensors.append(torch.zeros(128, dtype=dtype, device=device)) nccl.broadcast(tensors) for i in range(torch.cuda.device_count()): self.assertEqual(tensors[i], expected) tensors = [expected.cuda()] for device in range(1, torch.cuda.device_count()): tensors.append(torch.zeros(128, dtype=dtype, device=device)) nccl.broadcast(tuple(tensors)) for i in range(torch.cuda.device_count()): self.assertEqual(tensors[i], expected) ((TEST_WITH_ROCM and (HIP_VERSION < 3.5)), 'Skip NCCL tests for ROCm') (IS_WINDOWS, "NCCL doesn't support Windows") ((not TEST_MULTIGPU), 'only one GPU detected') (*datatypes) def test_reduce(self, device, dtype): cpu_tensors = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)] expected = torch.zeros(128, dtype=dtype) for t in cpu_tensors: expected.add_(t) tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)] nccl.reduce(tensors) self.assertEqual(tensors[0], expected) tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)] nccl.reduce(tuple(tensors)) self.assertEqual(tensors[0], expected) (IS_WINDOWS, "NCCL doesn't support Windows") ((not TEST_MULTIGPU), 'only one GPU detected') (*datatypes) def test_all_reduce(self, device, dtype): if (TEST_WITH_ROCM and (HIP_VERSION < 3.5) and (dtype == torch.bfloat16)): raise unittest.SkipTest('Skip bfloat16 test for ROCm < 3.5') cpu_tensors = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)] expected = torch.zeros(128, dtype=dtype) for t in cpu_tensors: expected.add_(t) tensors = [cpu_tensors[i].cuda(i) for i in range(nGPUs)] nccl.all_reduce(tensors) for tensor in tensors: self.assertEqual(tensor, expected) tensors = tuple((cpu_tensors[i].cuda(i) for i in range(nGPUs))) nccl.all_reduce(tensors) for tensor in tensors: self.assertEqual(tensor, expected) tensors = {cpu_tensors[i].cuda(i) for i in range(nGPUs)} nccl.all_reduce(tensors) for tensor in tensors: self.assertEqual(tensor, expected) ((TEST_WITH_ROCM and (HIP_VERSION < 3.5)), 'Skip NCCL tests for ROCm') (IS_WINDOWS, "NCCL doesn't support Windows") def test_collective_errors(self, device): t = torch.rand(10).cuda(0) with self.assertRaisesRegex(TypeError, 'Inputs should be a collection of tensors'): nccl.all_reduce(t) with self.assertRaisesRegex(TypeError, 'Inputs should be a collection of tensors'): nccl.reduce(t) with self.assertRaisesRegex(TypeError, 'Inputs should be a collection of tensors'): nccl.broadcast(t) with self.assertRaisesRegex(TypeError, 'Inputs should be a collection of tensors'): nccl.all_gather(t, t) with self.assertRaisesRegex(TypeError, 'Inputs should be a collection of tensors'): nccl.reduce_scatter(t, t) ((TEST_WITH_ROCM and (HIP_VERSION < 3.5)), 'Skip NCCL tests for ROCm') (IS_WINDOWS, "NCCL doesn't support Windows") ((not TEST_MULTIGPU), 'only one GPU detected') (*datatypes) def test_all_gather(self, device, dtype): cpu_inputs = [torch.zeros(128).uniform_().to(dtype=dtype) for i in range(nGPUs)] expected = torch.cat(cpu_inputs, 0) inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)] outputs = [torch.zeros((128 * nGPUs), device=i, dtype=dtype) for i in range(nGPUs)] nccl.all_gather(inputs, outputs) for tensor in outputs: self.assertEqual(tensor, expected) inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)] outputs = [torch.zeros((128 * nGPUs), device=i, dtype=dtype) for i in range(nGPUs)] nccl.all_gather(tuple(inputs), tuple(outputs)) for tensor in outputs: self.assertEqual(tensor, expected) ((TEST_WITH_ROCM and (HIP_VERSION < 3.5)), 'Skip NCCL tests for ROCm') (IS_WINDOWS, "NCCL doesn't support Windows") ((not TEST_MULTIGPU), 'only one GPU detected') (*datatypes) def test_reduce_scatter(self, device, dtype): in_size = (32 * nGPUs) out_size = 32 cpu_inputs = [torch.zeros(in_size).uniform_().to(dtype=dtype) for i in range(nGPUs)] expected = torch.zeros(in_size, dtype=dtype) for t in cpu_inputs: expected.add_(t) expected = expected.view(nGPUs, 32) inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)] outputs = [torch.zeros(out_size, device=i, dtype=dtype) for i in range(nGPUs)] nccl.reduce_scatter(inputs, outputs) for i in range(nGPUs): self.assertEqual(outputs[i], expected[i]) inputs = [cpu_inputs[i].cuda(i) for i in range(nGPUs)] outputs = [torch.zeros(out_size, device=i, dtype=dtype) for i in range(nGPUs)] nccl.reduce_scatter(tuple(inputs), tuple(outputs)) for i in range(nGPUs): self.assertEqual(outputs[i], expected[i])
class ThreeDPW(Dataset3D): def __init__(self, load_opt, set, seqlen, overlap=0.75, debug=False, target_vid=''): db_name = '3dpw' print('3DPW Dataset overlap ratio: ', overlap) super(ThreeDPW, self).__init__(load_opt=load_opt, set=set, folder=THREEDPW_DIR, seqlen=seqlen, overlap=overlap, dataset_name=db_name, debug=debug, target_vid=target_vid) print(f'{db_name} - number of dataset objects {self.__len__()}')
def get_module_name(frame): modulename = frame.f_globals.get('__name__', None) if (modulename is None): if (frame.f_code.co_filename == '<__array_function__ internals>'): modulename = 'numpy.__array_function__' else: modulename = 'unkown' typeobject = frame.f_locals.get('self', None) if (typeobject is not None): if has_c_instrumenter(): return '.'.join([modulename, type(typeobject).__name__]) else: return '.'.join([modulename, typeobject.__class__.__name__]) return modulename
def iob2bioes(tags: List[str]) -> List[str]: new_tags = [] for (i, tag) in enumerate(tags): if (tag == 'O'): new_tags.append(tag) else: split = tag.split('-')[0] if (split == 'B'): if (((i + 1) != len(tags)) and (tags[(i + 1)].split('-')[0] == 'I')): new_tags.append(tag) else: new_tags.append(tag.replace('B-', 'S-')) elif (split == 'I'): if (((i + 1) < len(tags)) and (tags[(i + 1)].split('-')[0] == 'I')): new_tags.append(tag) else: new_tags.append(tag.replace('I-', 'E-')) else: raise TypeError('Invalid IOB format.') return new_tags
class TestUtils(test_util.TestCase): def testArgsToDict(self): args = [utils.MakeArgument('int1', 3), utils.MakeArgument('float1', 4.0), utils.MakeArgument('string1', 'foo'), utils.MakeArgument('intlist1', np.array([3, 4])), utils.MakeArgument('floatlist1', np.array([5.0, 6.0])), utils.MakeArgument('stringlist1', np.array(['foo', 'bar']))] dict_ = utils.ArgsToDict(args) expected = {'int1': 3, 'float1': 4.0, 'string1': b'foo', 'intlist1': [3, 4], 'floatlist1': [5.0, 6.0], 'stringlist1': [b'foo', b'bar']} self.assertEqual(dict_, expected, "dictionary version of arguments doesn't match original") def testBuildUniqueMutexIter(self): init_net = core.Net('init_net') net = core.Net('net') utils.BuildUniqueMutexIter(init_net, net) for op in init_net.Proto().op: self.assertEqual(op.device_option.extra_info[0], 'device_type_override:cpu') for op in net.Proto().op: self.assertEqual(op.device_option.extra_info[0], 'device_type_override:cpu')
def downsample_basic_block(x, planes, stride): out = F.avg_pool3d(x, kernel_size=1, stride=stride) zero_pads = torch.Tensor(out.size(0), (planes - out.size(1)), out.size(2), out.size(3), out.size(4)).zero_() if isinstance(out.data, torch.cuda.FloatTensor): zero_pads = zero_pads.cuda() out = Variable(torch.cat([out.data, zero_pads], dim=1)) return out
def validate_variable(f): if isinstance(f, (sciann.Variable, sciann.functionals.RadialBasis, sciann.functionals.RNNVariable)): return True else: raise ValueError('These operations can only be applied to the `Variable` object. Use `Keras` or `TensorFlow` functions when applying to tensors or layers. ')
class CheckpointIO(object): def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): self.module_dict.update(kwargs) def save(self, filename, **kwargs): if (not os.path.isabs(filename)): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for (k, v) in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename) def load(self, filename): if is_url(filename): return self.load_url(filename) else: return self.load_file(filename) def load_file(self, filename): if (not os.path.isabs(filename)): filename = os.path.join(self.checkpoint_dir, filename) if os.path.exists(filename): print(filename) print('=> Loading checkpoint from local file...') state_dict = torch.load(filename) scalars = self.parse_state_dict(state_dict) return scalars else: raise FileExistsError def load_url(self, url): print(url) print('=> Loading checkpoint from url...') state_dict = model_zoo.load_url(url, progress=True) scalars = self.parse_state_dict(state_dict) return scalars def parse_state_dict(self, state_dict): for (k, v) in self.module_dict.items(): if (k in state_dict): v.load_state_dict(state_dict[k]) else: print(('Warning: Could not find %s in checkpoint!' % k)) scalars = {k: v for (k, v) in state_dict.items() if (k not in self.module_dict)} return scalars
def __main__(): if (cfg.TEST.WEIGHTS == ''): print('no test weights exist!!') else: model = setup_model() checkpoint.load_checkpoint(cfg.TEST.WEIGHTS, model) test_model(model, cfg.TEST.DATA_DIR, cfg.TEST.DATASET_LIST, cfg.TEST.SCALE_LIST, cfg.TEST.TOPK_LIST)
class DepthDecoder(nn.Module): def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True): super(DepthDecoder, self).__init__() self.num_output_channels = num_output_channels self.use_skips = use_skips self.upsample_mode = 'nearest' self.scales = scales self.num_ch_enc = num_ch_enc self.num_ch_dec = np.array([64, 128, 256, 512, 1024]) self.convs = OrderedDict() for i in range(4, (- 1), (- 1)): num_ch_in = (self.num_ch_enc[(- 1)] if (i == 4) else self.num_ch_dec[(i + 1)]) num_ch_out = self.num_ch_dec[i] self.convs[('upconv', i, 0)] = ConvBlock(num_ch_in, num_ch_out) num_ch_in = self.num_ch_dec[i] if (self.use_skips and (i > 0)): num_ch_in += self.num_ch_enc[(i - 1)] num_ch_out = self.num_ch_dec[i] self.convs[('upconv', i, 1)] = ConvBlock(num_ch_in, num_ch_out) for s in self.scales: self.convs[('dispconv', s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) self.decoder = nn.ModuleList(list(self.convs.values())) self.sigmoid = nn.Sigmoid() def forward(self, input_features): self.outputs = {} x = input_features[(- 1)] for i in range(4, (- 1), (- 1)): x = self.convs[('upconv', i, 0)](x) if (i < 3): x = [upsample(x)] else: x = [x] if (self.use_skips and (i > 0)): x += [input_features[(i - 1)]] x = torch.cat(x, 1) x = self.convs[('upconv', i, 1)](x) if (i in self.scales): self.outputs[('disp', i)] = self.sigmoid(self.convs[('dispconv', i)](x)) return self.outputs
class Temporal_Basic_Block(nn.Module): def __init__(self, channels, temporal_window_size, stride=1, residual=False, **kwargs): super(Temporal_Basic_Block, self).__init__() padding = (((temporal_window_size - 1) // 2), 0) if (not residual): self.residual = (lambda x: 0) elif (stride == 1): self.residual = (lambda x: x) else: self.residual = nn.Sequential(nn.Conv2d(channels, channels, 1, (stride, 1)), nn.BatchNorm2d(channels)) self.conv = nn.Conv2d(channels, channels, (temporal_window_size, 1), (stride, 1), padding) self.bn = nn.BatchNorm2d(channels) self.relu = nn.ReLU(inplace=True) def forward(self, x, res_module): res_block = self.residual(x) x = self.conv(x) x = self.bn(x) x = self.relu(((x + res_block) + res_module)) return x
def merge_flows(flow_list): result = defaultdict(float) for ((u, v), l) in flow_list: result[(u, v)] += l return [((u, v), l) for ((u, v), l) in result.items()]
def main(): args = parse_args() if (args is None): exit() with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = AttnGAN(sess, args) gan.build_model() show_all_variables() if (args.phase == 'train'): gan.train() print(' [*] Training finished!') if (args.phase == 'test'): gan.test() print(' [*] Test finished!')
def _percentile(a, q, *, method='linear', **kwargs): return np.percentile(a, q, interpolation=method, **kwargs)
class ActivationsAndGradients(): def __init__(self, model, target_layers, reshape_transform): self.model = model self.gradients = [] self.activations = [] self.reshape_transform = reshape_transform self.handles = [] for target_layer in target_layers: self.handles.append(target_layer.register_forward_hook(self.save_activation)) if hasattr(target_layer, 'register_full_backward_hook'): self.handles.append(target_layer.register_full_backward_hook(self.save_gradient)) else: self.handles.append(target_layer.register_backward_hook(self.save_gradient)) def save_activation(self, module, input, output): activation = output if (self.reshape_transform is not None): activation = self.reshape_transform(activation) self.activations.append(activation.cpu().detach()) def save_gradient(self, module, grad_input, grad_output): grad = grad_output[0] if (self.reshape_transform is not None): grad = self.reshape_transform(grad) self.gradients = ([grad.cpu().detach()] + self.gradients) def __call__(self, x): self.gradients = [] self.activations = [] return self.model(x) def release(self): for handle in self.handles: handle.remove()
def get_token(name, ca_url, ca_path='.'): ca_path = Path(ca_path) step_config_dir = (ca_path / CA_STEP_CONFIG_DIR) pki_dir = (ca_path / CA_PKI_DIR) (step_path, _) = get_ca_bin_paths(ca_path) if (not step_path): raise Exception('Step-CA is not installed!\nRun `fx pki install` first') priv_json = ((step_config_dir / 'secrets') / 'priv.json') pass_file = (pki_dir / CA_PASSWORD_FILE) root_crt = ((step_config_dir / 'certs') / 'root_ca.crt') try: token = subprocess.check_output(f'{step_path} ca token {name} --key {priv_json} --root {root_crt} --password-file {pass_file} --ca-url {ca_url}', shell=True) except subprocess.CalledProcessError as exc: logger.error(f'Error code {exc.returncode}: {exc.output}') sys.exit(1) token = token.strip() token_b64 = base64.b64encode(token) with open(root_crt, mode='rb') as file: root_certificate_b = file.read() root_ca_b64 = base64.b64encode(root_certificate_b) return TOKEN_DELIMITER.join([token_b64.decode('utf-8'), root_ca_b64.decode('utf-8')])
def _nanmedian_dispatcher(a, axis=None, out=None, overwrite_input=None, keepdims=None): return (a, out)
def load_experiment_config(experiments_file, experiment_tags): with open(experiments_file, 'r') as f: data = json.load(f) d = {} for tag in experiment_tags: _inject_items(build_dict(data, tag), d) return d
def process_image(encoded_image, is_training, height, width, resize_height=346, resize_width=346, thread_id=0, image_format='jpeg'): def image_summary(name, image): if (not thread_id): tf.image_summary(name, tf.expand_dims(image, 0)) with tf.name_scope('decode', values=[encoded_image]): if (image_format == 'jpeg'): image = tf.image.decode_jpeg(encoded_image, channels=3) elif (image_format == 'png'): image = tf.image.decode_png(encoded_image, channels=3) else: raise ValueError(('Invalid image format: %s' % image_format)) image = tf.image.convert_image_dtype(image, dtype=tf.float32) image_summary('original_image', image) assert ((resize_height > 0) == (resize_width > 0)) if resize_height: image = tf.image.resize_images(image, size=[resize_height, resize_width], method=tf.image.ResizeMethod.BILINEAR) if is_training: image = tf.random_crop(image, [height, width, 3]) else: image = tf.image.resize_image_with_crop_or_pad(image, height, width) image_summary('resized_image', image) if is_training: image = distort_image(image, thread_id) image_summary('final_image', image) image = tf.sub(image, 0.5) image = tf.mul(image, 2.0) return image
def cross_entropy_calc(TOP, P, POP): try: result = 0 for i in TOP.keys(): reference_likelihood = (P[i] / POP[i]) response_likelihood = (TOP[i] / POP[i]) if ((response_likelihood != 0) and (reference_likelihood != 0)): result += (reference_likelihood * math.log(response_likelihood, 2)) return (- result) except Exception: return 'None'
class PseudoDataParallel(nn.Module): def __init__(self, model): super().__init__() self.module = model
class FeatureFusionModule(nn.Module): def __init__(self, higher_in_channels, lower_in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), align_corners=False): super(FeatureFusionModule, self).__init__() self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.align_corners = align_corners self.dwconv = ConvModule(lower_in_channels, out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.conv_lower_res = ConvModule(out_channels, out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None) self.conv_higher_res = ConvModule(higher_in_channels, out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None) self.relu = nn.ReLU(True) def forward(self, higher_res_feature, lower_res_feature): lower_res_feature = resize(lower_res_feature, size=higher_res_feature.size()[2:], mode='bilinear', align_corners=self.align_corners) lower_res_feature = self.dwconv(lower_res_feature) lower_res_feature = self.conv_lower_res(lower_res_feature) higher_res_feature = self.conv_higher_res(higher_res_feature) out = (higher_res_feature + lower_res_feature) return self.relu(out)
class GroupAlgebraFunctor(ConstructionFunctor): def __init__(self, group): self.__group = group from sage.categories.rings import Rings ConstructionFunctor.__init__(self, Rings(), Rings()) def group(self): return self.__group def _apply_functor(self, base_ring): return self.__group.algebra(base_ring) def _apply_functor_to_morphism(self, f): from sage.categories.rings import Rings domain = self(f.domain()) codomain = self(f.codomain()) return SetMorphism(domain.Hom(codomain, category=Rings()), (lambda x: codomain.sum_of_terms(((g, f(c)) for (g, c) in x))))
def CppExtension(name, sources, *args, **kwargs): include_dirs = kwargs.get('include_dirs', []) include_dirs += include_paths() kwargs['include_dirs'] = include_dirs if (sys.platform == 'win32'): library_dirs = kwargs.get('library_dirs', []) library_dirs += library_paths() kwargs['library_dirs'] = library_dirs libraries = kwargs.get('libraries', []) libraries.append('caffe2') libraries.append('_C') kwargs['libraries'] = libraries kwargs['language'] = 'c++' return setuptools.Extension(name, sources, *args, **kwargs)
def register_optimizer(name): if (name in OPTIMIZERS): return if (name == 'Ranger'): from lib.torch_utils.solver.ranger import Ranger OPTIMIZERS.register_module()(Ranger) elif (name in ['AdaBelief', 'RangerAdaBelief']): from lib.torch_utils.solver.AdaBelief import AdaBelief from lib.torch_utils.solver.ranger_adabelief import RangerAdaBelief OPTIMIZERS.register_module()(AdaBelief) OPTIMIZERS.register_module()(RangerAdaBelief) elif (name in ['SGDP', 'AdamP']): from lib.torch_utils.solver.adamp import AdamP from lib.torch_utils.solver.sgdp import SGDP OPTIMIZERS.register_module()(AdamP) OPTIMIZERS.register_module()(SGDP) elif (name in ['SGD_GC', 'SGD_GCC']): from lib.torch_utils.solver.sgd_gc import SGD_GC, SGD_GCC OPTIMIZERS.register_module()(SGD_GC) OPTIMIZERS.register_module()(SGD_GCC) else: raise ValueError(f'Unknown optimizer name: {name}')
class TransducerLoss(Module): def __init__(self, blank=0, reduction='mean'): super(TransducerLoss, self).__init__() self.blank = blank self.reduction = reduction self.loss = Transducer.apply try: cuda.cuda_paths except ImportError: err_msg = 'cannot import numba. To use Transducer loss\n' err_msg += '\n' err_msg += 'If you use your localhost:\n' err_msg += 'pip install numba\n' err_msg += "export NUMBAPRO_LIBDEVICE='/usr/local/cuda/nvvm/libdevice/' \n" err_msg += "export NUMBAPRO_NVVM='/usr/local/cuda/nvvm/lib64/libnvvm.so' \n" err_msg += ' \n' err_msg += 'If you use conda:\n' err_msg += 'conda install numba cudatoolkit=XX (XX is your cuda toolkit version)' raise ImportError(err_msg) def forward(self, logits, labels, T, U): if all((t.is_cuda for t in (logits, labels, T, U))): log_probs = logits.log_softmax((- 1)) return self.loss(log_probs, labels, T, U, self.blank, self.reduction) else: raise ValueError(f"Found inputs tensors to be on {[logits.device, labels.device, T.device, U.device]} while needed to be on a 'cuda' device to use the transducer loss.")
.unit .convert def test_slice_idx_generator_z1(): shape = (4305, 9791) zoom = 1 tile_size = 256 given = convert.slice_idx_generator(shape, zoom, tile_size) expected = helpers.get_slice_idx_generator_solution(zoom) comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given)) comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected)) assert (comparable_given == comparable_expected)
def load_goals(para_config, intent_utterance_dir, intent_name, mode, number_utterances=(- 1)): if (number_utterances > 0): para_config = ((para_config + '_utt_') + str(number_utterances)) else: para_config = (para_config + '_utt_all') goal_path = (((((intent_utterance_dir + '/') + intent_name) + '_') + para_config) + '.{}.paraphrases.goal.json'.format(mode)) if (('STORAGE' in os.environ) and (os.environ['STORAGE'] == 'S3')): return read_s3_json(S3_BUCKET_NAME, goal_path, 'r') elif os.path.exists(goal_path): with open(goal_path, 'r') as json_file: return json.load(json_file) return None
def benchmark(args): print('Batch size: {}'.format(args.batch_size)) mf = ModelDownloader() (init_net, pred_net, value_info) = mf.get_c2_model(args.model) input_shapes = {k: ([args.batch_size] + v[(- 1)][1:]) for (k, v) in value_info.items()} print('input info: {}'.format(input_shapes)) external_inputs = {} for (k, v) in input_shapes.items(): external_inputs[k] = np.random.randn(*v).astype(np.float32) if (args.device == 'CPU'): device_option = core.DeviceOption(caffe2_pb2.CPU) elif (args.device == 'MKL'): device_option = core.DeviceOption(caffe2_pb2.MKLDNN) elif (args.device == 'IDEEP'): device_option = core.DeviceOption(caffe2_pb2.IDEEP) else: raise Exception('Unknown device: {}'.format(args.device)) print('Device option: {}, {}'.format(args.device, device_option)) pred_net.device_option.CopyFrom(device_option) for op in pred_net.op: op.device_option.CopyFrom(device_option) workspace.RunNetOnce(init_net) bb = workspace.Blobs() weights = {} for b in bb: weights[b] = workspace.FetchBlob(b) for (k, v) in external_inputs.items(): weights[k] = v workspace.ResetWorkspace() with core.DeviceScope(device_option): for (name, blob) in weights.items(): workspace.FeedBlob(name, blob, device_option) workspace.CreateNet(pred_net) start = time.time() res = workspace.BenchmarkNet(pred_net.name, args.warmup_iterations, args.iterations, args.layer_wise_benchmark) print('FPS: {:.2f}'.format((((1 / res[0]) * 1000) * args.batch_size)))
def check_equal(x, y, logger): if (x != y): exception = ValueError(f'{x} != {y}') logger.exception(repr(exception)) raise exception
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_dir', required=True) parser.add_argument('--save_dir', required=True, help='path to save checkpoints and logs') parser.add_argument('--lr', default=0.001, type=float) parser.add_argument('--weight_decay', default=1e-05, type=float) parser.add_argument('--num_epoch', default=100, type=int) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--seed', type=int, default=666, help='random seed') parser.add_argument('--dim_word', default=300, type=int) parser.add_argument('--dim_hidden', default=1024, type=int) parser.add_argument('--max_dec_len', default=100, type=int) args = parser.parse_args() if os.path.isdir(args.save_dir): shutil.rmtree(args.save_dir) os.mkdir(args.save_dir) fileHandler = logging.FileHandler(os.path.join(args.save_dir, 'log.txt')) fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) for (k, v) in vars(args).items(): logging.info(((k + ':') + str(v))) torch.manual_seed(args.seed) train(args)
def reduce_by_model(logs, error_filter=None): logs = [(x[0], x[1], get_model(x[2])) for x in logs] logs = [x for x in logs if (x[2] is not None)] tests = {x[2] for x in logs} r = {} for test in tests: counter = Counter() counter.update([x[1] for x in logs if (x[2] == test)]) counts = counter.most_common() error_counts = {error: count for (error, count) in counts if ((error_filter is None) or (error not in error_filter))} n_errors = sum(error_counts.values()) if (n_errors > 0): r[test] = {'count': n_errors, 'errors': error_counts} r = dict(sorted(r.items(), key=(lambda item: item[1]['count']), reverse=True)) return r
class TestNLPLabelingFunction(unittest.TestCase): def _run_lf(self, lf: NLPLabelingFunction) -> None: x = SimpleNamespace(num=8, title='Great film!', article='The movie is really great!') self.assertEqual(lf(x), (- 1)) x = SimpleNamespace(num=8, title='Nice movie!', article='Jane Doe acted well.') self.assertEqual(lf(x), 0) def test_nlp_labeling_function(self) -> None: lf = NLPLabelingFunction(name='my_lf', f=has_person_mention, pre=[combine_text]) self._run_lf(lf) def test_nlp_labeling_function_memoized(self) -> None: lf = NLPLabelingFunction(name='my_lf', f=has_person_mention, pre=[combine_text]) lf._nlp_config.nlp.reset_cache() self.assertEqual(len(lf._nlp_config.nlp._cache), 0) self._run_lf(lf) self.assertEqual(len(lf._nlp_config.nlp._cache), 2) self._run_lf(lf) self.assertEqual(len(lf._nlp_config.nlp._cache), 2) .complex def test_labeling_function_serialize(self) -> None: lf = NLPLabelingFunction(name='my_lf', f=has_person_mention, pre=[combine_text]) lf_load = dill.loads(dill.dumps(lf)) self._run_lf(lf_load) def test_nlp_labeling_function_decorator(self) -> None: _labeling_function(pre=[combine_text]) def has_person_mention(x: DataPoint) -> int: person_ents = [ent for ent in x.doc.ents if (ent.label_ == 'PERSON')] return (0 if (len(person_ents) > 0) else (- 1)) self.assertIsInstance(has_person_mention, NLPLabelingFunction) self.assertEqual(has_person_mention.name, 'has_person_mention') self._run_lf(has_person_mention) def test_nlp_labeling_function_decorator_no_parens(self) -> None: with self.assertRaisesRegex(ValueError, 'missing parentheses'): _labeling_function def has_person_mention(x: DataPoint) -> int: person_ents = [ent for ent in x.doc.ents if (ent.label_ == 'PERSON')] return (0 if (len(person_ents) > 0) else (- 1)) def test_nlp_labeling_function_shared_cache(self) -> None: lf = NLPLabelingFunction(name='my_lf', f=has_person_mention, pre=[combine_text]) _labeling_function(pre=[combine_text]) def lf2(x: DataPoint) -> int: return (0 if (len(x.doc) < 9) else (- 1)) lf._nlp_config.nlp.reset_cache() self.assertEqual(len(lf._nlp_config.nlp._cache), 0) self.assertEqual(len(lf2._nlp_config.nlp._cache), 0) self._run_lf(lf) self.assertEqual(len(lf._nlp_config.nlp._cache), 2) self.assertEqual(len(lf2._nlp_config.nlp._cache), 2) self._run_lf(lf2) self.assertEqual(len(lf._nlp_config.nlp._cache), 2) self.assertEqual(len(lf2._nlp_config.nlp._cache), 2) def test_nlp_labeling_function_raises(self) -> None: with self.assertRaisesRegex(ValueError, 'different parameters'): _labeling_function() def has_person_mention(x: DataPoint) -> int: person_ents = [ent for ent in x.doc.ents if (ent.label_ == 'PERSON')] return (0 if (len(person_ents) > 0) else (- 1))
class detect_anomaly(object): def __init__(self): self.prev = torch.is_anomaly_enabled() def __enter__(self): torch.set_anomaly_enabled(True) def __exit__(self, *args): torch.set_anomaly_enabled(self.prev) return False
class RequestTimeout(HTTPException): code = 408 description = "The server closed the network connection because the browser didn't finish the request within the specified time."
class FuncContiguousArgs(): def forward(self, input_ids, token_type_ids, attention_mask): return None
def classification_eval(model: tf.keras.Model, data_loader: tf.data.Dataset, limit=None): logging.info(f'Start classification evaluation') acc = tf.keras.metrics.Accuracy() total = 0 for data in tqdm(data_loader, desc='Classification evaluation'): (images, labels) = data outputs = model(images) predicted = tf.argmax(outputs, 1) acc.update_state(labels, predicted) total += labels.shape[0] if (limit and (total >= int(limit))): break logging.info(f'Num of images: {total}, Accuracy: {round((100 * acc.result().numpy()), 2)} %') return (acc.result().numpy(), total)
def _check_polynomials_P3(quadratic1, quadratic2, variables): if (quadratic1.parent() is not quadratic2.parent()): raise ValueError('the two quadratics must be in the same polynomial ring') if (variables is None): variables = (quadratic1.variables() + quadratic2.variables()) variables = sorted(set(variables), reverse=True) if (len(variables) == 4): (w, x, y, z) = variables _check_homogeneity(quadratic1, [w, x, y, z], (1, 1, 1, 1), 2) _check_homogeneity(quadratic2, [w, x, y, z], (1, 1, 1, 1), 2) elif (len(variables) == 3): (w, x, y) = variables z = None else: raise ValueError(f'need three or four variables, got {variables}') return (w, x, y, z)
def get_linear_schedule_with_warmup(*args, **kwargs): requires_pytorch(get_linear_schedule_with_warmup)
class RCAN(nn.Module): def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) self.sub_mean = common.MeanShift(args.rgb_range) modules_head = [conv(args.n_colors, n_feats, kernel_size)] modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) for _ in range(n_resgroups)] modules_body.append(conv(n_feats, n_feats, kernel_size)) modules_tail = [common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, sign=1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=False): own_state = self.state_dict() for (name, param) in state_dict.items(): if (name in own_state): if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if (name.find('tail') >= 0): print('Replace pre-trained upsampler to new one...') else: raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size())) elif strict: if (name.find('tail') == (- 1)): raise KeyError('unexpected key "{}" in state_dict'.format(name)) if strict: missing = (set(own_state.keys()) - set(state_dict.keys())) if (len(missing) > 0): raise KeyError('missing keys in state_dict: "{}"'.format(missing))
def test_birch_duck_typing_meta(): birch = Birch(n_clusters=AgglomerativeClustering(n_clusters=3)) html_output = estimator_html_repr(birch) with config_context(print_changed_only=True): assert (f'<pre>{html.escape(str(birch.n_clusters))}' in html_output) assert ('AgglomerativeClustering</label>' in html_output) assert (f'<pre>{html.escape(str(birch))}' in html_output)
def _fractional_power_pade(R, t, m): if ((m < 1) or (int(m) != m)): raise ValueError('expected a positive integer m') if (not ((- 1) < t < 1)): raise ValueError('expected -1 < t < 1') R = np.asarray(R) if ((len(R.shape) != 2) or (R.shape[0] != R.shape[1])): raise ValueError('expected an upper triangular square matrix') (n, n) = R.shape ident = np.identity(n) Y = (R * _fractional_power_pade_constant((2 * m), t)) for j in range(((2 * m) - 1), 0, (- 1)): rhs = (R * _fractional_power_pade_constant(j, t)) Y = solve_triangular((ident + Y), rhs) U = (ident + Y) if (not np.array_equal(U, np.triu(U))): raise Exception('internal inconsistency') return U
def test_schema_not_available_wsgi(cli, loadable_flask_app, snapshot_cli): assert (cli.run('unknown.yaml', f'--app={loadable_flask_app}') == snapshot_cli)
def entropy(x, k=3, base=2): assert (k <= (len(x) - 1)), 'Set k smaller than num. samples - 1' d = len(x[0]) N = len(x) intens = 1e-10 x = [list((p + (intens * nr.rand(len(x[0]))))) for p in x] tree = ss.cKDTree(x) nn = [tree.query(point, (k + 1), p=float('inf'))[0][k] for point in x] const = ((digamma(N) - digamma(k)) + (d * log(2))) return ((const + (d * np.mean(list(map(log, nn))))) / log(base))
class ReparamPolicy(ReparamModule): def sample(self, *args, **kwargs): return self.module.sample(*args, **kwargs) def log_prob(self, *args, **kwargs): return self.module.log_prob(*args, **kwargs) def kl_divergence(self, *args, **kwargs): return self.module.kl_divergence(*args, **kwargs) def predict(self, obs, *args, **kwargs): obs = torch.tensor(obs) return self.module._predict(self.forward(obs), *args, **kwargs) def unsafe_probability_mass(self, *args, **kwargs): return self.module.unsafe_probability_mass(*args, **kwargs)
def pq_group_bitrade_generators(p, q): assert is_prime(p) assert is_prime(q) assert ((q % p) == 1) F = FiniteField(q) fgen = F.multiplicative_generator() beta = (fgen ** ((q - 1) / p)) assert (beta != 1) assert (((beta ** p) % q) == 1) Q = tuple(range(1, (q + 1))) P = [] seenValues = {} for i in range(2, q): if (i in seenValues): continue cycle = [] for k in range(p): x = ((1 + ((i - 1) * (beta ** k))) % q) if (x == 0): x = q seenValues[x] = True cycle.append(x) P.append(tuple(map(Integer, cycle))) G = PermutationGroup([P, Q]) assert (G.order() == (p * q)) assert (not G.is_abelian()) a = PermutationConstructor(P) b = PermutationConstructor(Q) c = PermutationConstructor(((a * b) ** (- 1))) return (a, b, c, PermutationGroup([P, Q]))
def test_read_vi_tree(): text = VI_TREEBANK.split('\n')[0] trees = tree_reader.read_trees(text) assert (len(trees) == 1) assert (str(trees[0]) == text) node = trees[0].children[0].children[0].children[2] assert node.is_preterminal() assert (node.children[0].label == 'ai Loan')
class MatchingPipe(Pipe): def __init__(self, lower=False, tokenizer: str='raw'): super().__init__() self.lower = bool(lower) self.tokenizer = get_tokenizer(tokenize_method=tokenizer) def _tokenize(self, data_bundle, field_names, new_field_names): for (name, dataset) in data_bundle.datasets.items(): for (field_name, new_field_name) in zip(field_names, new_field_names): dataset.apply_field((lambda words: self.tokenizer(words)), field_name=field_name, new_field_name=new_field_name) return data_bundle def process(self, data_bundle): data_bundle = self._tokenize(data_bundle, [Const.RAW_WORDS(0), Const.RAW_WORDS(1)], [Const.INPUTS(0), Const.INPUTS(1)]) for dataset in data_bundle.datasets.values(): if dataset.has_field(Const.TARGET): dataset.drop((lambda x: (x[Const.TARGET] == '-'))) if self.lower: for (name, dataset) in data_bundle.datasets.items(): dataset[Const.INPUTS(0)].lower() dataset[Const.INPUTS(1)].lower() word_vocab = Vocabulary() word_vocab.from_dataset(*[dataset for (name, dataset) in data_bundle.datasets.items() if ('train' in name)], field_name=[Const.INPUTS(0), Const.INPUTS(1)], no_create_entry_dataset=[dataset for (name, dataset) in data_bundle.datasets.items() if ('train' not in name)]) word_vocab.index_dataset(*data_bundle.datasets.values(), field_name=[Const.INPUTS(0), Const.INPUTS(1)]) target_vocab = Vocabulary(padding=None, unknown=None) target_vocab.from_dataset(*[ds for (name, ds) in data_bundle.iter_datasets() if ('train' in name)], field_name=Const.TARGET, no_create_entry_dataset=[ds for (name, ds) in data_bundle.iter_datasets() if (('train' not in name) and ds.has_field(Const.TARGET))]) if (len(target_vocab._no_create_word) > 0): warn_msg = f"There are {len(target_vocab._no_create_word)} target labels in {[name for name in data_bundle.datasets.keys() if ('train' not in name)]} data set but not in train data set!." warnings.warn(warn_msg) logger.warning(warn_msg) has_target_datasets = [dataset for (name, dataset) in data_bundle.datasets.items() if dataset.has_field(Const.TARGET)] target_vocab.index_dataset(*has_target_datasets, field_name=Const.TARGET) data_bundle.set_vocab(word_vocab, Const.INPUTS(0)) data_bundle.set_vocab(target_vocab, Const.TARGET) input_fields = [Const.INPUTS(0), Const.INPUTS(1), Const.INPUT_LENS(0), Const.INPUT_LENS(1)] target_fields = [Const.TARGET] for (name, dataset) in data_bundle.datasets.items(): dataset.add_seq_len(Const.INPUTS(0), Const.INPUT_LENS(0)) dataset.add_seq_len(Const.INPUTS(1), Const.INPUT_LENS(1)) dataset.set_input(*input_fields, flag=True) for fields in target_fields: if dataset.has_field(fields): dataset.set_target(fields, flag=True) return data_bundle
class ZincConfig(BaseGraphConfig): def __init__(self, num_samples=50) -> None: super().__init__(debug_mode=False) self.num_samples = num_samples def settings(self) -> ExperimentSettings: return ExperimentSettings('zinc', final_repeats=REPEATS, final_max_iterations=ITERS) def resource_requirements(self) -> TrialResources: return TrialResources(cpus=2, gpus=0.25) def search_strategy(self): return RandomSearchStrategy(self.num_samples) def trial_scheduler(self): metric = self.trial_metric() return AsyncHyperBandScheduler(metric=metric.name, mode=metric.mode, max_t=ITERS, grace_period=20) def trial_metric(self) -> Metric: return Metric('val_loss', 'min') def stoppers(self): metric = self.trial_metric() return [PatientStopper(metric=metric.name, mode=metric.mode, patience=20, max_iters=ITERS)] def optimizer(self, model, hparams): return Adam(model.parameters(), lr=hparams['lr'], weight_decay=hparams['wd']) def extra_setup(self, model, optimizer, hparams): print_model_parameters(model) metric = self.trial_metric() return Extra(device=torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), lr_scheduler=ReduceLROnPlateau(optimizer, metric.mode, factor=0.5, patience=10, min_lr=1e-05)) def data(self, pinned_objs, hparams): return zinc_data(data_location(), batch_size=hparams['batch_size']) def train(self, model, optimizer, data, extra, iteration: int): return (train(model, optimizer, data, extra.device), None) def val(self, model, data, extra, iteration: int): trial_metric = self.trial_metric() metrics = evaluate(model, data, extra.device, 'val') extra.lr_scheduler.step(metrics[trial_metric.name]) return (metrics, None) def test(self, model, data, extra): return (evaluate(model, data, extra.device, 'test'), None) def persist_trial(self, checkpoint_dir, model, optimizer, hparams, extra): out = {'model': model.state_dict(), 'opt': optimizer.state_dict(), 'lr_scheduler': extra.lr_scheduler.state_dict(), 'hparams': hparams} torch.save(out, str((checkpoint_dir / 'checkpoint.pt'))) def restore_trial(self, checkpoint_dir, map_location=None): checkpoint = torch.load(str((checkpoint_dir / 'checkpoint.pt')), map_location=map_location) hparams = checkpoint['hparams'] model = self.model(hparams) model.load_state_dict(checkpoint['model']) opt = self.optimizer(model, hparams) opt.load_state_dict(checkpoint['opt']) extra = self.extra_setup(model, opt, hparams) extra.lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) return (model, opt, hparams, extra) def final_runs_summaries(self): return [TrialCurvePlotter(['train_loss', 'val_loss'], name='loss_curves'), TestMetricSummaries()]
class Uniform(Initializer): def __init__(self, low=0.0, high=1.0): super().__init__() self.low = low self.high = high def initialize(self, shape): return np.random.uniform(size=shape, low=self.low, high=self.high)
def vmap(func: Callable, in_dims: in_dims_t=0, out_dims: out_dims_t=0) -> Callable: warnings.warn('torch.vmap is an experimental prototype that is subject to change and/or deletion. Please use at your own risk.') (func) def wrapped(*args): _check_out_dims_is_int_or_int_tuple(out_dims, func) vmap_level = torch._C._vmapmode_increment_nesting() try: (batched_inputs, batch_size) = _create_batched_inputs(in_dims, args, vmap_level, func) batched_outputs = func(*batched_inputs) _validate_outputs(batched_outputs, func) return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func) finally: torch._C._vmapmode_decrement_nesting() return wrapped
def specialize_types(f: SymbolicFunction, type_replacements: T.Mapping[(T.Type, T.Type)]) -> SymbolicFunction: (f) def specialized_function(*args: T.Any, **kwargs: T.Any) -> T.Any: return f(*args, **kwargs) specialized_function.__annotations__ = f.__annotations__.copy() for (annotation, cls) in specialized_function.__annotations__.items(): if (cls in type_replacements): specialized_function.__annotations__[annotation] = type_replacements[cls] return T.cast(SymbolicFunction, specialized_function)
class Pretrainer(): def __init__(self, collect_in='./model_checkpoints', loadables=None, paths=None, custom_hooks=None, conditions=None): self.loadables = {} self.collect_in = pathlib.Path(collect_in) if (loadables is not None): self.add_loadables(loadables) self.paths = {} if (paths is not None): self.add_paths(paths) self.custom_hooks = {} if (custom_hooks is not None): self.add_custom_hooks(custom_hooks) self.conditions = {} if (conditions is not None): self.add_conditions(conditions) self.is_local = [] def set_collect_in(self, path): self.collect_in = pathlib.Path(path) def add_loadables(self, loadables): self.loadables.update(loadables) def add_paths(self, paths): self.paths.update(paths) def add_custom_hooks(self, custom_hooks): self.custom_hooks.update(custom_hooks) def add_conditions(self, conditions): self.conditions.update(conditions) def split_path(path): def split(src): if ('/' in src): return src.rsplit('/', maxsplit=1) else: return ('./', src) if isinstance(path, FetchSource): (fetch_from, fetch_path) = path (source, filename) = split(fetch_path) return (FetchSource(fetch_from, source), filename) else: return split(path) def collect_files(self, default_source=None, internal_ddp_handling=False): logger.debug(f'Collecting files (or symlinks) for pretraining in {self.collect_in}.') self.collect_in.mkdir(exist_ok=True) loadable_paths = {} for name in self.loadables: if (not self.is_loadable(name)): continue save_filename = (name + PARAMFILE_EXT) if (name in self.paths): (source, filename) = self.split_path(self.paths[name]) elif (default_source is not None): filename = save_filename source = default_source else: raise ValueError(f"Path not specified for '{name}', and no default_source given!") if internal_ddp_handling: run_on_main(fetch, kwargs={'filename': filename, 'source': source, 'overwrite': False, 'save_filename': save_filename, 'use_auth_token': False, 'revision': None}) path = fetch(filename=filename, source=source, savedir=self.collect_in, overwrite=False, save_filename=save_filename, use_auth_token=False, revision=None) else: path = fetch(filename=filename, source=source, savedir=self.collect_in, overwrite=False, save_filename=save_filename, use_auth_token=False, revision=None) loadable_paths[name] = path fetch_from = None if isinstance(source, FetchSource): (fetch_from, source) = source if ((fetch_from is FetchFrom.LOCAL) or (pathlib.Path(path).resolve() == (pathlib.Path(source).resolve() / filename))): logger.info(f'Set local path in self.paths[{name}] = {path}') self.paths[name] = str(path) self.is_local.append(name) return loadable_paths def is_loadable(self, name): if (name not in self.conditions): return True condition = self.conditions[name] if callable(condition): return condition() else: return bool(condition) def load_collected(self, device=None): logger.info(f"Loading pretrained files for: {', '.join(self.loadables)}") paramfiles = {} for name in self.loadables: if (not self.is_loadable(name)): continue filename = (name + PARAMFILE_EXT) paramfiles[name] = (self.collect_in / filename) if (name in self.is_local): logger.info(f'Redirecting (loading from local path): {paramfiles[name]} -> {self.paths[name]}') paramfiles[name] = self.paths[name] self._call_load_hooks(paramfiles, device) def _call_load_hooks(self, paramfiles, device=None): for (name, obj) in self.loadables.items(): if (not self.is_loadable(name)): continue loadpath = paramfiles[name] if (name in self.custom_hooks): self.custom_hooks[name](obj, loadpath, device=device) continue default_hook = get_default_hook(obj, DEFAULT_TRANSFER_HOOKS) if (default_hook is not None): default_hook(obj, loadpath, device=device) continue default_hook = get_default_hook(obj, DEFAULT_LOAD_HOOKS) if (default_hook is not None): end_of_epoch = False default_hook(obj, loadpath, end_of_epoch, device) continue MSG = f"Don't know how to load {type(obj)}. Register default hook or add custom hook for this object." raise RuntimeError(MSG)
def test_matlab_like_resize(): results = {} results['lq'] = np.ones((16, 16, 3)) imresize = MATLABLikeResize(keys=['lq'], scale=0.25) results = imresize(results) assert (results['lq'].shape == (4, 4, 3)) results['lq'] = np.ones((16, 16, 3)) imresize = MATLABLikeResize(keys=['lq'], output_shape=(6, 6)) results = imresize(results) assert (results['lq'].shape == (6, 6, 3)) with pytest.raises(ValueError): MATLABLikeResize(keys=['lq'], kernel='abc') with pytest.raises(ValueError): MATLABLikeResize(keys=['lq'], kernel_width=10) with pytest.raises(ValueError): MATLABLikeResize(keys=['lq']) assert (repr(imresize) == ((imresize.__class__.__name__ + "(keys=['lq'], scale=None, output_shape=(6, 6), ") + 'kernel=bicubic, kernel_width=4.0)'))
class IntersectionTester(): def __init__(self, mesh: fenics.Mesh) -> None: self.mesh = mesh cells = self.mesh.cells() flat_cells = cells.flatten().tolist() self.cell_counter: collections.Counter = collections.Counter(flat_cells) self.occurrences = np.array([self.cell_counter[i] for i in range(self.mesh.num_vertices())]) def test(self) -> bool: self_intersections = False collisions = CollisionCounter.compute_collisions(self.mesh) if (not (collisions == self.occurrences).all()): self_intersections = True list_self_intersections = fenics.MPI.comm_world.allgather(self_intersections) if any(list_self_intersections): _loggers.debug('Mesh transformation rejected due to a posteriori check.') return False else: return True
def distiller_local(ckpt, *args, **kwargs): assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def denoise_image(mic, models, lowpass=1, cutoff=0, gaus=None, inv_gaus=None, deconvolve=False, deconv_patch=1, patch_size=(- 1), padding=0, normalize=False, use_cuda=False): if (lowpass > 1): mic = dn.lowpass(mic, lowpass) mic = torch.from_numpy(mic) if use_cuda: mic = mic.cuda() mu = mic.mean() std = mic.std() x = ((mic - mu) / std) if (cutoff > 0): x[((x < (- cutoff)) | (x > cutoff))] = 0 if (gaus is not None): x = dn.denoise(gaus, x) elif (inv_gaus is not None): x = dn.denoise(inv_gaus, x) elif deconvolve: x = dn.correct_spatial_covariance(x, patch=deconv_patch) mic = 0 for model in models: mic += dn.denoise(model, x, patch_size=patch_size, padding=padding) mic /= len(models) if normalize: mic = ((mic - mic.mean()) / mic.std()) else: mic = ((std * mic) + mu) mic = mic.cpu().numpy() return mic
class sage__libs__gap(JoinFeature): def __init__(self): JoinFeature.__init__(self, 'sage.libs.gap', [PythonModule('sage.libs.gap.libgap'), PythonModule('sage.interfaces.gap'), PythonModule('sage.groups.matrix_gps.finitely_generated_gap'), PythonModule('sage.groups.matrix_gps.group_element_gap'), PythonModule('sage.groups.matrix_gps.heisenberg'), PythonModule('sage.groups.matrix_gps.isometries'), PythonModule('sage.groups.matrix_gps.linear_gap'), PythonModule('sage.groups.matrix_gps.matrix_group_gap'), PythonModule('sage.groups.matrix_gps.named_group_gap'), PythonModule('sage.groups.matrix_gps.orthogonal_gap'), PythonModule('sage.groups.matrix_gps.symplectic_gap'), PythonModule('sage.groups.matrix_gps.unitary_gap'), PythonModule('sage.matrix.matrix_gap'), PythonModule('sage.rings.universal_cyclotomic_field')])
class TestTimeSeriesData(unittest.TestCase): def test_data_class(self): data = np.array([[(- 0.), 0., (- 2.)], [(- 0.), (- 0.6893588), (- 1.)], [0., 1., 0.], [(- 0.), 0., 1.], [(- 1.), (- 0.), (- 0.)]]) data_obj = TimeSeriesData(data, var_names=['A', 'B', 'C']) (x, y, z) = data_obj.extract_array(X='A', Y=('B', (- 1)), Z=[], max_lag=2) self.assertTrue(all((x == [0., (- 0.), (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 1., 0.]))) self.assertTrue((z == None)) (x, y, z) = data_obj.extract_array(X=0, Y=(1, (- 1)), Z=[], max_lag=2) self.assertTrue(all((x == [0., (- 0.), (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 1., 0.]))) self.assertTrue((z == None)) (x, y, z) = data_obj.extract_array(X='A', Y=('B', (- 1)), Z=[('C', (- 2))], max_lag=2) self.assertTrue(all((z == [[(- 2.)], [(- 1.)], [0.]]))) (x, y, z) = data_obj.extract_array(X=0, Y=(1, (- 1)), Z=[('C', (- 2))], max_lag=2) self.assertTrue(all((z == [[(- 2.)], [(- 1.)], [0.]]))) data[(1, (- 1))] = math.nan data_obj = TimeSeriesData(data, var_names=['A', 'B', 'C'], contains_nans=True) (x, y, z) = data_obj.extract_array(X='A', Y=('B', (- 1)), Z=[], max_lag=2) self.assertTrue(all((x == [0., (- 0.), (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 1., 0.]))) self.assertTrue((z == None)) (x, y, z) = data_obj.extract_array(X=0, Y=(1, (- 1)), Z=[], max_lag=2) self.assertTrue(all((x == [0., (- 0.), (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 1., 0.]))) self.assertTrue((z == None)) (x, y, z) = data_obj.extract_array(X='A', Y=('B', (- 1)), Z=[('C', (- 2))], max_lag=2) self.assertTrue(all((x == [0., (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 0.]))) self.assertTrue(all((z == [[(- 2.)], [0.]]))) (x, y, z) = data_obj.extract_array(X=0, Y=(1, (- 1)), Z=[(2, (- 2))], max_lag=2) self.assertTrue(all((x == [0., (- 1.)]))) self.assertTrue(all((y == [(- 0.6893588), 0.]))) self.assertTrue(all((z == [[(- 2.)], [0.]]))) (x, y, z) = data_obj.extract_array(X='A', Y=None, Z=[('C', (- 2))], max_lag=2) self.assertTrue(all((x == [0., (- 1.)]))) self.assertTrue((y == None)) self.assertTrue(all((z == [[(- 2.)], [0.]]))) (x, y, z) = data_obj.extract_array(X=0, Y=None, Z=[(2, (- 2))], max_lag=2) self.assertTrue(all((x == [0., (- 1.)]))) self.assertTrue((y == None)) self.assertTrue(all((z == [[(- 2.)], [0.]]))) self.assertTrue((data_obj.to_var_index([('A', (- 1))], [('B', (- 1))]) == [[(0, (- 1))], [(1, (- 1))]])) self.assertTrue((data_obj.to_var_index([('A', (- 1))]) == [(0, (- 1))])) self.assertTrue((data_obj.var_name2index('A') == 0)) self.assertTrue((data_obj.var_name2index(0) == 0)) data = np.array([[(- 0.), 0., (- 2.)], [(- 0.), (- 0.6893588), (- 1.)], [0., 1., 0.], [(- 0.), 0., 1.], [(- 1.), (- 0.), (- 0.)]]) data1 = data data2 = data[:3] data_obj = TimeSeriesData(data1, data2, var_names=['A', 'B', 'C']) self.assertTrue((data_obj.length == [5, 3])) self.assertTrue((data_obj.dim == 3))
def tldr_metrics(src_file, pred_file): src_list = [] pred_list = [] with open(src_file, 'r') as f: for line in f: src_list.append(line.strip()) with open(pred_file, 'r') as f: for line in f: pred_list.append(line.strip()) assert (len(src_list) == len(pred_list)) metric_list = defaultdict(list) for (src, pred) in zip(src_list, pred_list): for (k, v) in calc_template_matching(src, pred).items(): metric_list[k].append(v) for (k, v) in measure_bag_of_word(src, pred).items(): metric_list[k].append(v) for (k, v) in calc_edit_distance(src, pred).items(): metric_list[k].append(v) for (k, v) in metric_list.items(): metric_list[k] = np.mean(v) def clean_for_bleu(s): s = s.replace('sudo', '').strip() s = s.replace('`', '').replace('"', '').replace("'", '') s = s.replace('|', ' ').replace('>', ' ').replace('<', ' ') s = ' '.join(s.split()) s = s.replace('={', ' {') var_to_pc_holder = defaultdict((lambda : len(var_to_pc_holder))) for var in re.findall('{{(.*?)}}', s): _ = var_to_pc_holder[var] for (var, id) in var_to_pc_holder.items(): var_str = ('{{%s}}' % var) s = s.replace(var_str, f'${id}') return s bleu = BLEU(tokenize='none', smooth_method='add-k', smooth_value=1) pred_list = [clean_for_bleu(x) for x in pred_list] src_list = [clean_for_bleu(x) for x in src_list] bleu_score = bleu.corpus_score(pred_list, [src_list]).score metric_list['bleu'] = bleu_score def to_characters(s): return s bleu = BLEU(tokenize='char') pred_list = [to_characters(x) for x in pred_list] src_list = [to_characters(x) for x in src_list] bleu_score = bleu.corpus_score(pred_list, [src_list]).score metric_list['bleu_char'] = bleu_score return metric_list
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False): if isinstance(iri, tuple): iri = url_unparse(iri) if safe_conversion: try: native_iri = to_native(iri) ascii_iri = native_iri.encode('ascii') if (len(ascii_iri.split()) == 1): return native_iri except UnicodeError: pass iri = url_parse(to_unicode(iri, charset, errors)) path = url_quote(iri.path, charset, errors, _to_uri_safe) query = url_quote(iri.query, charset, errors, _to_uri_safe) fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe) return to_native(url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment)))
class HausdorffDistance(DistanceMetric): def __init__(self, percentile: float=100.0, metric: str='HDRFDST'): super().__init__(metric) self.percentile = percentile def calculate(self): if ((self.distances.distances_gt_to_pred is not None) and (len(self.distances.distances_gt_to_pred) > 0)): surfel_areas_cum_gt = (np.cumsum(self.distances.surfel_areas_gt) / np.sum(self.distances.surfel_areas_gt)) idx = np.searchsorted(surfel_areas_cum_gt, (self.percentile / 100.0)) perc_distance_gt_to_pred = self.distances.distances_gt_to_pred[min(idx, (len(self.distances.distances_gt_to_pred) - 1))] else: warnings.warn('Unable to compute Hausdorff distance due to empty reference mask, returning inf', NotComputableMetricWarning) return float('inf') if ((self.distances.distances_pred_to_gt is not None) and (len(self.distances.distances_pred_to_gt) > 0)): surfel_areas_cum_pred = (np.cumsum(self.distances.surfel_areas_pred) / np.sum(self.distances.surfel_areas_pred)) idx = np.searchsorted(surfel_areas_cum_pred, (self.percentile / 100.0)) perc_distance_pred_to_gt = self.distances.distances_pred_to_gt[min(idx, (len(self.distances.distances_pred_to_gt) - 1))] else: warnings.warn('Unable to compute Hausdorff distance due to empty prediction mask, returning inf', NotComputableMetricWarning) return float('inf') return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt)
class ConvPnPNetCls(nn.Module): def __init__(self, nIn, num_regions=8, mask_attention_type='none', featdim=128, rot_dim=6, num_stride2_layers=3, num_extra_layers=0, norm='GN', num_gn_groups=32, act='relu', drop_prob=0.0, dropblock_size=5, flat_op='flatten', final_spatial_size=(8, 8)): super().__init__() self.featdim = featdim self.num_regions = num_regions self.mask_attention_type = mask_attention_type self.flat_op = flat_op conv_act = get_nn_act_func(act) if (act == 'relu'): self.act = get_nn_act_func('lrelu') else: self.act = get_nn_act_func(act) self.drop_prob = drop_prob self.dropblock = LinearScheduler(DropBlock2D(drop_prob=drop_prob, block_size=dropblock_size), start_value=0.0, stop_value=drop_prob, nr_steps=5000) self.features = nn.ModuleList() for i in range(num_stride2_layers): _in_channels = (nIn if (i == 0) else featdim) self.features.append(nn.Conv2d(_in_channels, featdim, kernel_size=3, stride=2, padding=1, bias=False)) self.features.append(get_norm(norm, featdim, num_gn_groups=num_gn_groups)) self.features.append(conv_act) for i in range(num_extra_layers): self.features.append(nn.Conv2d(featdim, featdim, kernel_size=3, stride=1, padding=1, bias=False)) self.features.append(get_norm(norm, featdim, num_gn_groups=num_gn_groups)) self.features.append(conv_act) (final_h, final_w) = final_spatial_size fc_in_dim = {'flatten': ((featdim * final_h) * final_w), 'avg': featdim, 'avg-max': (featdim * 2), 'avg-max-min': (featdim * 3)}[flat_op] self.fc1 = nn.Linear((fc_in_dim + 128), 1024) self.fc2 = nn.Linear(1024, 256) self.fc_r = nn.Linear(256, rot_dim) self.fc_t = nn.Linear(256, 3) self.extent_fc1 = nn.Linear(3, 64) self.extent_fc2 = nn.Linear(64, 128) for m in self.modules(): if isinstance(m, (nn.Conv2d, nn.Conv1d)): normal_init(m, std=0.001) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) elif isinstance(m, nn.ConvTranspose2d): normal_init(m, std=0.001) elif isinstance(m, nn.Linear): normal_init(m, std=0.001) normal_init(self.fc_r, std=0.01) normal_init(self.fc_t, std=0.01) def forward(self, coor_feat, region=None, extents=None, mask_attention=None): if (region is not None): x = torch.cat([coor_feat, region], dim=1) else: x = coor_feat if (self.mask_attention_type != 'none'): assert (mask_attention is not None) if (self.mask_attention_type == 'mul'): x = (x * mask_attention) elif (self.mask_attention_type == 'concat'): x = torch.cat([x, mask_attention], dim=1) else: raise ValueError(f'Wrong mask attention type: {self.mask_attention_type}') if (self.drop_prob > 0): self.dropblock.step() x = self.dropblock(x) for (_i, layer) in enumerate(self.features): x = layer(x) flat_conv_feat = x.flatten(2) if (self.flat_op == 'flatten'): flat_conv_feat = flat_conv_feat.flatten(1) elif (self.flat_op == 'avg'): flat_conv_feat = flat_conv_feat.mean((- 1)) elif (self.flat_op == 'avg-max'): flat_conv_feat = torch.cat([flat_conv_feat.mean((- 1)), flat_conv_feat.max((- 1))[0]], dim=(- 1)) elif (self.flat_op == 'avg-max-min'): flat_conv_feat = torch.cat([flat_conv_feat.mean((- 1)), flat_conv_feat.max((- 1))[0], flat_conv_feat.min((- 1))[0]], dim=(- 1)) else: raise ValueError(f'Invalid flat_op: {self.flat_op}') x_extent = self.act(self.extent_fc1(extents)) x_extent = self.act(self.extent_fc2(x_extent)) x = torch.cat([flat_conv_feat, x_extent], dim=1) x = self.act(self.fc1(x)) x = self.act(self.fc2(x)) rot = self.fc_r(x) t = self.fc_t(x) return (rot, t)
class ResNet(nn.Module): def __init__(self, block, layers, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1)): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=strides[0], padding=3, bias=False) self.bn1 = FixedBatchNorm(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=dilations[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1]) self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2]) self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3]) self.inplanes = 1024 def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), FixedBatchNorm((planes * block.expansion))) layers = [block(self.inplanes, planes, stride, downsample, dilation=1)] self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
class PlayerShip(PhysicalObject): def __init__(self, *args, **kwargs): super(PlayerShip, self).__init__('ship.png', *args, **kwargs) def create_physical_entity(self): body = self._engine.CreateDynamicBody(position=self.physical_position, linearDamping=0.99, fixedRotation=True) body.CreatePolygonFixture(box=(((self.width / 2.0) / self._world.physical_scale), ((self.height / 2.0) / self._world.physical_scale)), density=1.0, friction=0.0, restitution=0.0) joint = box_2d.b2PrismaticJointDef() joint.Initialize(body, self._world.ground, body.worldCenter, (1.0, 0.0)) joint.collideConnected = True self._engine.CreateJoint(joint) return body
def train_wrapper_reg(_paramsList, _GPU_ID): for (pIdx, params) in enumerate(_paramsList): print(('===[%d/%d]===' % (pIdx, len(_paramsList)))) (_trainMode, _dataType, _oRate, _var) = (params[0], params[1], params[2], params[3]) if (_trainMode == 'CN'): run_cn(_trainMode, _dataType, _oRate, _var, _GPU_ID) elif (_trainMode == 'MDN'): run_mdn(_trainMode, _dataType, _oRate, _var, _GPU_ID) elif (_trainMode == 'MLP'): run_mlp(_trainMode, _dataType, _oRate, _var, _GPU_ID) else: print(('Unknown trainMode:[%s].' % _trainMode))
class DataCollatorMixin(): def __call__(self, features, return_tensors=None): if (return_tensors is None): return_tensors = self.return_tensors if (return_tensors == 'tf'): return self.tf_call(features) elif (return_tensors == 'pt'): return self.torch_call(features) elif (return_tensors == 'np'): return self.numpy_call(features) else: raise ValueError(f"Framework '{return_tensors}' not recognized!")
def DepRound(weights_p, k=1, isWeights=True): p = np.array(weights_p) K = len(p) assert (k < K), 'Error: k = {} should be < K = {}.'.format(k, K) if (not np.isclose(np.sum(p), 1)): p = (p / np.sum(p)) assert (np.all((0 <= p)) and np.all((p <= 1))), 'Error: the weights (p_1, ..., p_K) should all be 0 <= p_i <= 1 ...'.format(p) assert np.isclose(np.sum(p), 1), 'Error: the sum of weights p_1 + ... + p_K should be = 1 (= {}).'.format(np.sum(p)) possible_ij = [a for a in range(K) if (0 < p[a] < 1)] while possible_ij: if (len(possible_ij) == 1): i = np.random.choice(possible_ij, size=1) j = i else: (i, j) = np.random.choice(possible_ij, size=2, replace=False) (pi, pj) = (p[i], p[j]) assert (0 < pi < 1), 'Error: pi = {} (with i = {}) is not 0 < pi < 1.'.format(pi, i) assert (0 < pj < 1), 'Error: pj = {} (with j = {}) is not 0 < pj < 1.'.format(pj, i) assert (i != j), 'Error: i = {} is different than with j = {}.'.format(i, j) (alpha, beta) = (min((1 - pi), pj), min(pi, (1 - pj))) proba = (alpha / (alpha + beta)) if with_proba(proba): (pi, pj) = ((pi + alpha), (pj - alpha)) else: (pi, pj) = ((pi - beta), (pj + beta)) (p[i], p[j]) = (pi, pj) possible_ij = [a for a in range(K) if (0 < p[a] < 1)] if (len([a for a in range(K) if np.isclose(p[a], 0)]) == (K - k)): break subset = [a for a in range(K) if np.isclose(p[a], 1)] if (len(subset) < k): subset = [a for a in range(K) if (not np.isclose(p[a], 0))] assert (len(subset) == k), 'Error: DepRound({}, {}) is supposed to return a set of size {}, but {} has size {}...'.format(weights_p, k, k, subset, len(subset)) return subset
def make_monic(f): R = f.parent() n = f.degree() lc = f[n] d = ZZ.one() for i in range(n): expo = (n - i) den = (((d ** expo) * f[i]) / lc).denominator() for (p, e) in factor_trial_division(den, 1000000): d *= (p ** (((e + expo) - 1) // expo)) g = R([(((d ** (n - i)) * f[i]) / lc) for i in range((n + 1))]) return (g, d)