code
stringlengths
101
5.91M
def process_cfg(cfg_dir, java_dir, final_cfg_dir): cfg_file_list = os.listdir(cfg_dir) os.chdir(cfg_dir) for item in tqdm(cfg_file_list): if (item.find('.txt') > 0): cfg_file = open(item, encoding='utf-8') cfg_id = item.split('.')[0].replace('A', '') try: java_file = open(((java_dir + cfg_id) + '.java'), encoding='utf-8') except FileNotFoundError: print(cfg_id) continue source_code = java_file.readlines() nodes = cfg_file.read().split(';') for i in range(len(nodes)): nodes[i] = nodes[i].split(',') skip_empty_node(nodes) new_nodes = '' for i in range(len(nodes)): if ((i != 0) and (i != (len(nodes) - 1)) and (nodes[i][2] == '')): continue node_attrs = nodes[i] code_list = ('' if (node_attrs[1] == '') else source_code[(int(node_attrs[1]) - 1):int(node_attrs[3])]) code = '' for j in range(len(code_list)): code_list[j] = code_list[j].replace('\t', '').replace('\n', '').strip() code += code_list[j] if (j != (len(code_list) - 1)): code += '\n' next_nodes = list(set(node_attrs[6:])) if (i != len(nodes)): new_nodes += (json.dumps({'id': str((i + 1)), 'source_code': code.replace('\n', ''), 'next_nodes': next_nodes}) + '\n') new_cfg_file = open(((final_cfg_dir + cfg_id) + '.json'), 'w', encoding='utf-8') new_cfg_file.write(new_nodes) new_cfg_file.close() java_file.close() cfg_file.close()
class AttentionBlock(nn.Module): def __init__(self, in_ch: int, skip_ch: int, out_ch: ty.N[int]=None, upsample_mode: str='nearest'): super().__init__() self.in_ch = (in_ch + skip_ch) self.out_ch = (out_ch or in_ch) self.upsample_mode = upsample_mode self.layers = nn.Sequential(ChannelAttention(self.in_ch), conv3x3(self.in_ch, self.out_ch), nn.ReLU(inplace=True)) def forward(self, x, x_skip): return self.layers(torch.cat((F.interpolate(x, scale_factor=2, mode=self.upsample_mode), x_skip), dim=1))
def sklearn_OutputCodeClassifier(*args, **kwargs): return sklearn.multiclass.OutputCodeClassifier(*args, **kwargs)
def _check_for_nans(metrics: Dict, new_params: P) -> chex.Numeric: metrics_nans = _check_metrics_for_nans(metrics) params_nans = jnp.any(jnp.isnan(jax.flatten_util.ravel_pytree(new_params)[0])) return jnp.logical_or(metrics_nans, params_nans)
def Nnet3DescriptorToDot(descriptor, parent_node_name): dot_lines = [] [segments, arguments] = descriptor_parser.IdentifyNestedSegments(descriptor) if segments: for segment in segments: dot_lines += DescriptorSegmentToDot(segment, parent_node_name, parent_node_name) elif arguments: assert (len(arguments) == 1) dot_lines.append('{0} -> {1}'.format(GetDotNodeName(arguments[0])['node'], GetDotNodeName(parent_node_name)['node'])) return dot_lines
def worker(args): (video_name, video_path, out_dir, sample_fps) = args def get_stride(src_fps): if (sample_fps <= 0): stride = 1 else: stride = int((src_fps / sample_fps)) return stride vc = cv2.VideoCapture(video_path) fps = vc.get(cv2.CAP_PROP_FPS) num_frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT)) w = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT)) oh = TARGET_HEIGHT ow = TARGET_WIDTH time_in_s = get_duration(video_path) fps_path = None if (out_dir is not None): fps_path = os.path.join(out_dir, 'fps.txt') if (not RECALC_FPS_ONLY): if os.path.exists(fps_path): print('Already done:', video_name) vc.release() return elif (str(read_fps(out_dir)) == str((fps / get_stride(fps)))): print('FPS is already consistent:', video_name) vc.release() return else: print('Inconsistent FPS:', video_name) os.makedirs(out_dir, exist_ok=True) not_done = True while not_done: stride = get_stride(fps) est_out_fps = (fps / stride) print('{} -- effective fps: {} (stride: {})'.format(video_name, est_out_fps, stride)) out_frame_num = 0 i = 0 while True: (ret, frame) = vc.read() if (not ret): if (i != num_frames): print('Failed to decode: {} -- {} / {}'.format(video_path, i, num_frames)) if ((i + FRAME_RETRY_THRESHOLD) < num_frames): num_frames = i adj_fps = (num_frames / time_in_s) if (get_stride(adj_fps) == stride): not_done = False else: print('Retrying:', video_path) vc.set(cv2.CAP_PROP_POS_FRAMES, 0) fps = adj_fps else: not_done = False else: not_done = False break if ((i % stride) == 0): if (not RECALC_FPS_ONLY): if ((frame.shape[0] != oh) or (frame.shape[1] != ow)): frame = cv2.resize(frame, (ow, oh)) if (out_dir is not None): frame_path = os.path.join(out_dir, '{:06d}.jpg'.format(out_frame_num)) cv2.imwrite(frame_path, frame) out_frame_num += 1 i += 1 vc.release() out_fps = (fps / get_stride(fps)) if (fps_path is not None): with open(fps_path, 'w') as fp: fp.write(str(out_fps)) print('{} - done'.format(video_name))
def filter_roidb(roidb): print(('before filtering, there are %d images...' % len(roidb))) i = 0 while (i < len(roidb)): if (len(roidb[i]['boxes']) == 0): del roidb[i] i -= 1 i += 1 print(('after filtering, there are %d images...' % len(roidb))) return roidb
def test_hist(): np.random.seed(0) X_col = np.random.random_sample((1000,)) (counts, vals) = np.histogram(X_col, bins='doane') X_col = np.concatenate(([np.nan], X_col)) native = Native.get_native_singleton() n_cuts = native.get_histogram_cut_count(X_col) cuts = native.cut_uniform(X_col, n_cuts) bin_indexes = native.discretize(X_col, cuts) bin_counts = np.bincount(bin_indexes, minlength=(len(cuts) + 2)) edges = np.concatenate(([np.nanmin(X_col)], cuts, [np.nanmax(X_col)])) assert (bin_counts[0] == 1) assert (np.sum(bin_counts) == (1000 + 1)) bin_counts = bin_counts[1:] assert np.array_equal(counts, bin_counts) assert np.allclose(vals, edges)
def get_obj_label(key): words = key.split('_') return ' '.join(map((lambda w: (str(w[0]).upper() + w[1:])), words))
def add_self_bond(bond_features): if (len(bond_features.shape) == 3): bf = np.transpose(bond_features, (2, 0, 1)) bf = np.concatenate((bf, [np.identity(bf.shape[2])]), axis=0) else: bf = np.concatenate(([bond_features], [np.identity(bond_features.shape[1])]), axis=0) return bf
def custom_draw_geometry_load_option(pcd, render_option_path): vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(pcd) vis.get_render_option().load_from_json(render_option_path) vis.run() vis.destroy_window()
class TestDagDrawer(QiskitTestCase): def setUp(self): qr = QuantumRegister(2, 'qr') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[1]) circuit.cx(qr[0], qr[1]) self.dag = circuit_to_dag(circuit) def test_dag_drawer_no_graphviz(self): with unittest.mock.patch('nxpd.pydot.find_graphviz', return_value=None) as _: self.assertRaises(VisualizationError, dag_drawer, self.dag)
def simu_data(n, p, rho=0.25, snr=2.0, sparsity=0.06, effect=1.0, seed=None): rng = np.random.default_rng(seed) k = int((sparsity * p)) mu = np.zeros(p) Sigma = toeplitz((rho ** np.arange(0, p))) X = rng.multivariate_normal(mu, Sigma, size=n) non_zero = rng.choice(p, k) beta_true = np.zeros(p) beta_true[non_zero] = effect eps = rng.standard_normal(size=n) prod_temp = np.dot(X, beta_true) noise_mag = (np.linalg.norm(prod_temp) / (snr * np.linalg.norm(eps))) y = (prod_temp + (noise_mag * eps)) return (X, y, beta_true, non_zero)
class SentUnit(): def __init__(self, sent_index, raw_words, list_of_bpes, discourse_bag): self.sent_index = sent_index self.raw_words = raw_words self.bpes = list(itertools.chain(*list_of_bpes)) self.prefix_len = (- 1) self.discourse_bag = discourse_bag def get_bpe_w_cls_sep(self): return ((['[CLS]'] + self.bpes) + ['[SEP]']) def get_length_w_pad(self): return (len(self.bpes) + 2)
class SepFormer(nn.Module): def __init__(self, in_chan, n_src, n_heads=8, ff_hid=1024, chunk_size=250, hop_size=None, n_repeats=8, n_blocks=2, norm_type='gLN', ff_activation='relu', mask_act='relu', bidirectional=True, dropout=0): super(SepFormer, self).__init__() self.in_chan = in_chan self.n_src = n_src self.n_heads = n_heads self.ff_hid = ff_hid self.chunk_size = chunk_size hop_size = (hop_size if (hop_size is not None) else (chunk_size // 2)) self.hop_size = hop_size self.n_repeats = n_repeats self.n_blocks = n_blocks self.n_src = n_src self.norm_type = norm_type self.ff_activation = ff_activation self.mask_act = mask_act self.bidirectional = bidirectional self.dropout = dropout self.mha_in_dim = ff_hid self.in_norm = norms.get(norm_type)(self.in_chan) self.sepformer_input_layer = nn.Linear(self.in_chan, self.mha_in_dim) self.ola = DualPathProcessing(self.chunk_size, self.hop_size) self.sepformer_layers = nn.ModuleList([]) for n_block in range(self.n_blocks): sepformer_block = nn.ModuleList([]) intra_layers = nn.ModuleList([]) inter_layers = nn.ModuleList([]) for repeat in range(self.n_repeats): intra_layers.append(SepFormerTransformerLayer(self.mha_in_dim, self.n_heads, self.ff_hid, self.dropout, self.ff_activation, self.norm_type)) for repeat in range(self.n_repeats): inter_layers.append(SepFormerTransformerLayer(self.mha_in_dim, self.n_heads, self.ff_hid, self.dropout, self.ff_activation, self.norm_type)) sepformer_block.append(intra_layers) sepformer_block.append(inter_layers) self.sepformer_layers.append(sepformer_block) net_out_conv = nn.Conv2d(self.mha_in_dim, (n_src * self.in_chan), 1) self.first_out = nn.Sequential(nn.PReLU(), net_out_conv) self.sepformer_last_layer1 = nn.Linear(self.in_chan, self.in_chan) self.relu = nn.ReLU() self.sepformer_last_layer2 = nn.Linear(self.in_chan, self.in_chan) mask_nl_class = activations.get(mask_act) if has_arg(mask_nl_class, 'dim'): self.output_act = mask_nl_class(dim=1) else: self.output_act = mask_nl_class() self.apply(self._init_weights) def forward(self, mixture_w): mixture_w = self.in_norm(mixture_w) n_orig_frames = mixture_w.shape[(- 1)] mixture_w = self.sepformer_input_layer(mixture_w.transpose(1, (- 1))).transpose(1, (- 1)) mixture_w = self.ola.unfold(mixture_w) (batch, n_filters, self.chunk_size, n_chunks) = mixture_w.size() for block_idx in range(len(self.sepformer_layers)): block = self.sepformer_layers[block_idx] intra_blocks = block[0] for transformer_idx in range(self.n_repeats): mixture_w = self.ola.intra_process(mixture_w, intra_blocks[transformer_idx]) inter_blocks = block[1] for transformer_idx in range(self.n_repeats): mixture_w = self.ola.inter_process(mixture_w, inter_blocks[transformer_idx]) output = self.first_out(mixture_w) output = output.reshape((batch * self.n_src), self.in_chan, self.chunk_size, n_chunks) output = self.ola.fold(output, output_size=n_orig_frames) output = output.reshape(batch, self.n_src, self.in_chan, (- 1)) output = self.sepformer_last_layer2(self.relu(self.sepformer_last_layer1(output.transpose((- 1), (- 2))))).transpose((- 1), (- 2)) est_mask = self.output_act(output) return est_mask def get_config(self): config = {'in_chan': self.in_chan, 'ff_hid': self.ff_hid, 'n_heads': self.n_heads, 'chunk_size': self.chunk_size, 'hop_size': self.hop_size, 'n_repeats': self.n_repeats, 'n_src': self.n_src, 'norm_type': self.norm_type, 'ff_activation': self.ff_activation, 'mask_act': self.mask_act, 'bidirectional': self.bidirectional, 'dropout': self.dropout} return config def _init_weights(self, module): if isinstance(module, nn.Linear): nn.init.uniform_(module.weight, a=((- 1) / module.in_features), b=(1 / module.in_features))
def get_cov_left_right(cov_diag, k): shape_cov = tf.shape(cov_diag) (_, top_idx) = tf.nn.top_k(cov_diag, k=k) (ii, _) = tf.meshgrid(tf.range(shape_cov[0]), tf.range(k), indexing='ij') top_idx = tf.stack([ii, top_idx], axis=(- 1)) top_k = tf.gather_nd(cov_diag, top_idx) top_k_cov = tf.linalg.diag(top_k) eye = tf.eye(shape_cov[(- 1)], batch_shape=[shape_cov[0]]) eye_top_rows = tf.gather_nd(eye, top_idx) shape_eye = eye_top_rows.get_shape().as_list() red_string = red_string_matmul(eye_top_rows, top_k_cov) permutation = list(np.arange(len(shape_eye))) permutation = ((permutation[:(- 2)] + permutation[(- 1):]) + permutation[(- 2):(- 1)]) cov_left = tf.einsum(red_string, tf.transpose(eye_top_rows, perm=permutation), top_k_cov) cov_right = eye_top_rows return [cov_left, cov_right]
def read_filenames(root_dir): speaker2filenames = defaultdict((lambda : [])) for path in sorted(glob.glob(os.path.join(root_dir, '*/*'))): filename = path.strip().split('/')[(- 1)] (speaker_id, utt_id) = re.match('p(\\d+)_(\\d+)\\.wav', filename).groups() speaker2filenames[speaker_id].append(path) return speaker2filenames
class Output(): def __init__(self, n_output_channels, filters): self.n_output_channels = n_output_channels self.filters = filters conv = partial(Conv2D, kernel_size=(1, 1), activation='relu', padding='same', use_bias=False) self.input_bn = BatchNormalization() self.input_conv = conv(filters) self.loss_bn = BatchNormalization() self.loss_output = conv(n_output_channels, activation='linear', use_bias=True) self.loss_res_bn = BatchNormalization() self.loss_res_conv = conv(filters) self.conv_1x1_bn = BatchNormalization() self.conv_1x1 = conv(filters) self.res_add_bn_loss = BatchNormalization() self.res_add_bn_conv = BatchNormalization() self.res_add_bn_identity = BatchNormalization() self.res_add = Add() def __call__(self, inputs): x = inputs x = self.input_bn(x) x = self.input_conv(x) loss_x = self.loss_bn(x) loss_outputs = self.loss_output(loss_x) loss_x = self.loss_res_bn(loss_outputs) loss_x = self.loss_res_conv(loss_x) conv_x = self.conv_1x1_bn(x) conv_x = self.conv_1x1(conv_x) loss_x = self.res_add_bn_loss(loss_x) conv_x = self.res_add_bn_conv(conv_x) identity = self.res_add_bn_identity(inputs) res_outputs = self.res_add([loss_x, conv_x, identity]) return [loss_outputs, res_outputs]
class TFConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class Anchor(): def __init__(self, reg_num, yind, fold): self.__yind = yind self.__fold = fold self.__reg_num = reg_num self.__gate_placed = [] self.gate_anchor = 0 def plot_coord(self, index, gate_width): h_pos = ((index % self.__fold) + 1) if (self.__fold > 0): if ((h_pos + (gate_width - 1)) > self.__fold): index += (self.__fold - (h_pos - 1)) x_pos = (((index % self.__fold) + 1) + (0.5 * (gate_width - 1))) y_pos = (self.__yind - ((index // self.__fold) * (self.__reg_num + 1))) else: x_pos = ((index + 1) + (0.5 * (gate_width - 1))) y_pos = self.__yind self.gate_anchor = index return (x_pos, y_pos) def is_locatable(self, index, gate_width): hold = [(index + i) for i in range(gate_width)] for p in hold: if (p in self.__gate_placed): return False return True def set_index(self, index, gate_width): h_pos = ((index % self.__fold) + 1) if ((h_pos + (gate_width - 1)) > self.__fold): _index = ((index + self.__fold) - (h_pos - 1)) else: _index = index for ii in range(gate_width): if ((_index + ii) not in self.__gate_placed): self.__gate_placed.append((_index + ii)) self.__gate_placed.sort() def get_index(self): if self.__gate_placed: return (self.__gate_placed[(- 1)] + 1) return 0
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): loss = F.cross_entropy(pred, label, reduction='none') if (weight is not None): weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss
class TestFSAFHead(TestCase): def test_fsaf_head_loss(self): s = 300 img_metas = [{'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': 1}] cfg = Config(dict(assigner=dict(type='CenterRegionAssigner', pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01), allowed_border=(- 1), pos_weight=(- 1), debug=False)) fsaf_head = FSAFHead(num_classes=4, in_channels=1, stacked_convs=1, feat_channels=1, reg_decoded_bbox=True, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=1, scales_per_octave=1, ratios=[1.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0), loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='none'), loss_bbox=dict(type='IoULoss', eps=1e-06, loss_weight=1.0, reduction='none'), train_cfg=cfg) feats = (torch.rand(1, 1, ceil((s / stride[0])), ceil((s / stride[0]))) for stride in fsaf_head.prior_generator.strides) (cls_scores, bbox_preds) = fsaf_head.forward(feats) gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas) empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero') self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
_lr_scheduler('fixed') class FixedSchedule(FairseqLRScheduler): def __init__(self, args, optimizer): super().__init__(args, optimizer) args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0) self.lr = args.lr[0] if (args.warmup_updates > 0): self.warmup_factor = (1.0 / args.warmup_updates) else: self.warmup_factor = 1 def add_args(parser): parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') def get_next_lr(self, epoch): lrs = self.args.lr if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)): next_lr = lrs[min(epoch, (len(lrs) - 1))] else: next_lr = (lrs[(- 1)] * (self.args.lr_shrink ** ((epoch + 1) - self.args.force_anneal))) return next_lr def step(self, epoch, val_loss=None): super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr((self.warmup_factor * self.lr)) return self.optimizer.get_lr() def step_update(self, num_updates): if ((self.args.warmup_updates > 0) and (num_updates <= self.args.warmup_updates)): self.warmup_factor = (num_updates / float(self.args.warmup_updates)) self.optimizer.set_lr((self.warmup_factor * self.lr)) return self.optimizer.get_lr()
def get_ov_sut(model_path, preprocessed_data_dir, performance_count): return _3DUNET_OV_SUT(model_path, preprocessed_data_dir, performance_count)
def preprocess_image(image_buffer, output_height, output_width, num_channels, is_training=False): if is_training: image = _decode_crop_and_flip(image_buffer, num_channels) image = _resize_image(image, output_height, output_width) else: image = tf.image.decode_jpeg(image_buffer, channels=num_channels) image = _aspect_preserving_resize(image, _RESIZE_MIN) image = _central_crop(image, output_height, output_width) image.set_shape([output_height, output_width, num_channels]) return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
def parse_distributed_args(): parser = ArgumentParser(description='Dist FlowNMT') parser.add_argument('--nnodes', type=int, default=1, help='The number of nodes to use for distributed training') parser.add_argument('--node_rank', type=int, default=0, help='The rank of the node for multi-node distributed training') parser.add_argument('--nproc_per_node', type=int, default=1, help='The number of processes to launch on each node, for GPU training, this is recommended to be set to the number of GPUs in your system so that each process can be bound to a single GPU.') parser.add_argument('--master_addr', default='127.0.0.1', type=str, help="Master node (rank 0)'s address, should be either the IP address or the hostname of node 0, for single node multi-proc training, the --master_addr can simply be 127.0.0.1") parser.add_argument('--master_port', default=29500, type=int, help="Master node (rank 0)'s free port that needs to be used for communciation during distributed training") parser.add_argument('--config', type=str, help='config file', required=True) parser.add_argument('--batch_size', type=int, default=512, metavar='N', help='input batch size for training (default: 512)') parser.add_argument('--eval_batch_size', type=int, default=4, metavar='N', help='input batch size for eval (default: 4)') parser.add_argument('--init_batch_size', type=int, default=1024, metavar='N', help='number of instances for model initialization (default: 1024)') parser.add_argument('--batch_steps', type=int, default=1, metavar='N', help='number of steps for each batch (the batch size of each step is batch-size / steps (default: 1)') parser.add_argument('--epochs', type=int, default=500, metavar='N', help='number of epochs to train') parser.add_argument('--kl_warmup_steps', type=int, default=10000, metavar='N', help='number of steps to warm up KL weight(default: 10000)') parser.add_argument('--init_steps', type=int, default=5000, metavar='N', help='number of steps to train decoder (default: 5000)') parser.add_argument('--seed', type=int, default=65537, metavar='S', help='random seed (default: 524287)') parser.add_argument('--loss_type', choices=['sentence', 'token'], default='sentence', help='loss type (default: sentence)') parser.add_argument('--train_k', type=int, default=1, metavar='N', help='training K (default: 1)') parser.add_argument('--log_interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--lr_decay', choices=['inv_sqrt', 'expo'], help='lr decay method', default='inv_sqrt') parser.add_argument('--lr', type=float, default=0.001, help='learning rate') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 of Adam') parser.add_argument('--beta2', type=float, default=0.999, help='beta2 of Adam') parser.add_argument('--eps', type=float, default=1e-06, help='eps of Adam') parser.add_argument('--weight_decay', type=float, default=0.0, help='weight for l2 norm decay') parser.add_argument('--amsgrad', action='store_true', help='AMS Grad') parser.add_argument('--grad_clip', type=float, default=0, help='max norm for gradient clip (default 0: no clip') parser.add_argument('--model_path', help='path for saving model file.', required=True) parser.add_argument('--data_path', help='path for data file.', default=None) parser.add_argument('--src', type=str, help='source language code', required=True) parser.add_argument('--tgt', type=str, help='target language code', required=True) parser.add_argument('--create_vocab', action='store_true', help='create vocabulary.') parser.add_argument('--share_all_embeddings', action='store_true', help='share source, target and output embeddings') parser.add_argument('--subword', type=str, default='joint-bpe', choices=['joint-bpe', 'sep-bpe', 'word', 'bert-bpe']) parser.add_argument('--bucket_batch', type=int, default=0, help='whether bucket data based on tgt length in batching') parser.add_argument('--recover', type=int, default=(- 1), help='recover the model from disk.') return parser.parse_args()
class NfCfg(): depths: Tuple[(int, int, int, int)] channels: Tuple[(int, int, int, int)] alpha: float = 0.2 stem_type: str = '3x3' stem_chs: Optional[int] = None group_size: Optional[int] = None attn_layer: Optional[str] = None attn_kwargs: dict = None attn_gain: float = 2.0 width_factor: float = 1.0 bottle_ratio: float = 0.5 num_features: int = 0 ch_div: int = 8 reg: bool = False extra_conv: bool = False gamma_in_act: bool = False same_padding: bool = False std_conv_eps: float = 1e-05 skipinit: bool = False zero_init_fc: bool = False act_layer: str = 'silu'
def hex_to_rgb(value): value = value.lstrip('#') hex_total_length = len(value) rgb_section_length = (hex_total_length // 3) return tuple((int(value[i:(i + rgb_section_length)], 16) for i in range(0, hex_total_length, rgb_section_length)))
class BertGenerationConfig(PretrainedConfig): model_type = 'bert-generation' def __init__(self, vocab_size=50358, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=2, eos_token_id=1, gradient_checkpointing=False, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.gradient_checkpointing = gradient_checkpointing
def get_python_logger() -> logging.Logger: logger = logging.getLogger() logger.handlers = [] ch = logging.StreamHandler() formatter = logging.Formatter(f'{Fore.CYAN}{Style.BRIGHT}%(message)s', '%H:%M:%S') ch.setFormatter(formatter) logger.addHandler(ch) logger.setLevel('INFO') return logger
def parse_args(): parser = ArgumentParser(description='CIFAR') parser.add_argument('--dataset', choices=['cifar10', 'pathfinder'], required=True) parser.add_argument('--resolution', type=int, default=None) parser.add_argument('--data_path', help='path for data file.', required=True) return parser.parse_args()
.parametrize('size', list_sizes()) .parametrize('dtype', list_float_dtypes()) .parametrize('device', list_devices()) def test_hybrid_search(benchmark, size, dtype, device): nns_opt = dict(knn=1, radius=0.01) np_a = np.array(np.random.rand(size, 3), dtype=to_numpy_dtype(dtype)) np_b = np.array(np.random.rand(size, 3), dtype=to_numpy_dtype(dtype)) a = o3c.Tensor(np_a, dtype=dtype, device=device) b = o3c.Tensor(np_b, dtype=dtype, device=device) index = NNSOps.hybrid_setup(a, nns_opt) benchmark(NNSOps.hybrid_search, index, b, nns_opt)
class MetaTransformer_AD_ResBackBone(nn.Module): def __init__(self, model_cfg, input_channels, grid_size, **kwargs): super().__init__() self.model_cfg = model_cfg norm_fn = partial(uni3d_norm_2_in.UniNorm1d, dataset_from_flag=int(self.model_cfg.db_source), eps=0.001, momentum=0.01, voxel_coord=True) self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0]) self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU()) block = post_act_block self.conv1 = spconv.SparseSequential(SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'), SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1')) self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2')) self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3')) self.conv4 = spconv.SparseSequential(block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4')) last_pad = 0 last_pad = self.model_cfg.get('last_pad', last_pad) self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU()) self.num_point_features = 128 self.backbone_channels = {'x_conv1': 16, 'x_conv2': 32, 'x_conv3': 64, 'x_conv4': 128} def forward(self, batch_dict): (voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords']) batch_size = batch_dict['batch_size'] input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size) x = self.conv_input(input_sp_tensor) x_conv1 = self.conv1(x) x_conv2 = self.conv2(x_conv1) x_conv3 = self.conv3(x_conv2) x_conv4 = self.conv4(x_conv3) out = self.conv_out(x_conv4) batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8}) batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}}) batch_dict.update({'multi_scale_3d_strides': {'x_conv1': 1, 'x_conv2': 2, 'x_conv3': 4, 'x_conv4': 8}}) return batch_dict
def get_model(point_cloud, is_training, bn_decay=None, num_class=NUM_CLASSES): batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = tf.slice(point_cloud, [0, 0, 0], [(- 1), (- 1), 3]) l0_points = None (l1_xyz, l1_points, l1_indices) = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') (l2_xyz, l2_points, l2_indices) = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') (l3_xyz, l3_points, l3_indices) = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256, 512, 1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256, 256], is_training, bn_decay, scope='fa_layer1') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training, bn_decay, scope='fa_layer2') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 128, 128], is_training, bn_decay, scope='fa_layer3') net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='seg_fc1', bn_decay=bn_decay) end_points['feats'] = net net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='seg_dp1') seg_pred = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='seg_fc2') return seg_pred
def _reduce_prod_over_leaves(xs: PyTree) -> Array: return functools.reduce((lambda a, b: (a * b)), jax.tree_util.tree_leaves(xs))
def save_metrics(metrics, exp_dir, filename='metrics.json', i=None): if (i is not None): filename = '{}.{}'.format(filename, i) with open(os.path.join(exp_dir, filename), 'w') as f: json.dump(dict(metrics), f, indent=4, separators=(',', ': '), sort_keys=True)
def to_md(comment_dict): doc = '' if ('short_description' in comment_dict): doc += comment_dict['short_description'] doc += '\n\n' if ('long_description' in comment_dict): doc += md_parse_line_break(comment_dict['long_description']) doc += '\n' if (('Args' in comment_dict) and (comment_dict['Args'] is not None)): doc += '##### Args\n' for (arg, des) in comment_dict['Args'].items(): doc += (((('* **' + arg) + '**: ') + des) + '\n\n') if (('Attributes' in comment_dict) and (comment_dict['Attributes'] is not None)): doc += '##### Attributes\n' for (arg, des) in comment_dict['Attributes'].items(): doc += (((('* **' + arg) + '**: ') + des) + '\n\n') if (('Returns' in comment_dict) and (comment_dict['Returns'] is not None)): doc += '##### Returns\n' if isinstance(comment_dict['Returns'], str): doc += comment_dict['Returns'] doc += '\n' else: for (arg, des) in comment_dict['Returns'].items(): doc += (((('* **' + arg) + '**: ') + des) + '\n\n') return doc
def dataset_exists(path, impl): if (impl == 'raw'): return IndexedRawTextDataset.exists(path) elif (impl == 'mmap'): return MMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path)
def compile_args(config): if ('lr' in config): lr = config['lr'] else: lr = 0.001 args = {'optimizer': tf.keras.optimizers.Adam(lr), 'loss': 'mean_squared_error', 'metrics': ['mean_squared_error']} return args
class CombinedSample(AbstractSample): def __init__(self, samples): self.samples = samples self.bases = ([0] + [s.numOptions() for s in self.samples[:(- 1)]]) def _sample(self, node, *args, **kwargs): idx = len(node.children) for (base, sample) in zip(self.bases, self.samples): adj_idx = (idx - base) if (adj_idx < 0): continue action = sample.getOption(node, adj_idx) if (action is not None): action.id = idx return action else: continue
def nth(iterator, n, default=None): if (n is None): return collections.deque(iterator, maxlen=0) else: return next(islice(iterator, n, None), default)
class AdamW(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue p.data.mul_((1 - (group['lr'] * group['weight_decay']))) grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = (group['lr'] / bias_correction1) p.data.addcdiv_((- step_size), exp_avg, denom) return loss
def get_plugin_instance(plugin_name): if (is_plugin_enabled(plugin_name) and plugins[plugin_name]['instance']): return plugins[plugin_name]['instance'] return None
def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model
def rigid_align(coords_pred, coords_true, *, joint_validity_mask=None, scale_align=False, reflection_align=False): if (joint_validity_mask is None): joint_validity_mask = np.ones_like(coords_pred[(..., 0)], dtype=np.bool) valid_coords_pred = coords_pred[joint_validity_mask] valid_coords_true = coords_true[joint_validity_mask] try: (d, Z, tform) = eval.procrustes.procrustes(valid_coords_true, valid_coords_pred, scaling=scale_align, reflection=('best' if reflection_align else False)) except np.linalg.LinAlgError: logging.error('Cannot do Procrustes alignment, returning original prediction.') return coords_pred T = tform['rotation'] b = tform['scale'] c = tform['translation'] return (((b * coords_pred) T) + c)
_grad() def convert_models(model_path: str, output_path: str, opset: int, fp16: bool=False): dtype = (torch.float16 if fp16 else torch.float32) if (fp16 and torch.cuda.is_available()): device = 'cuda' elif (fp16 and (not torch.cuda.is_available())): raise ValueError('`float16` model export is only supported on GPUs with CUDA') else: device = 'cpu' output_path = Path(output_path) vae_decoder = AutoencoderKL.from_pretrained((model_path + '/vae')) vae_latent_channels = vae_decoder.config.latent_channels vae_decoder.forward = vae_decoder.decode onnx_export(vae_decoder, model_args=(torch.randn(1, vae_latent_channels, 25, 25).to(device=device, dtype=dtype), False), output_path=((output_path / 'vae_decoder') / 'model.onnx'), ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}}, opset=opset) del vae_decoder
def binary_block5x5(in_planes, out_planes, stride=1, **kwargs): return b_utils.BinBlock(in_planes, out_planes, kernel_size=5, stride=stride, padding=2, bias=False, **kwargs)
class TestLoadSoundFiles(): def test_load_stereo_ogg_vorbis(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'background_noises', 'hens.ogg'), sample_rate=None) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] >= 442575) assert (samples.shape[0] <= 443328) max_value = np.amax(samples) assert (max_value > 0.02) assert (max_value < 1.0) def test_load_mono_opus(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'bus.opus'), sample_rate=None) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] >= 36682) assert (samples.shape[0] <= 36994) max_value = np.amax(samples) assert (max_value > 0.3) assert (max_value < 1.0) def test_load_mono_m4a(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'testing.m4a'), sample_rate=None) assert (sample_rate == 44100) assert (samples.dtype == np.float32) assert (samples.ndim == 1) max_value = np.amax(samples) assert (max_value > 0.1) assert (max_value < 1.0) def test_load_mono_signed_16_bit_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'acoustic_guitar_0.wav'), sample_rate=None) assert (sample_rate == 16000) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == 140544) max_value = np.amax(samples) assert (max_value > 0.5) assert (max_value < 1.0) def test_load_stereo_signed_16_bit_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'stereo_16bit.wav'), sample_rate=None) assert (sample_rate == 16000) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == 17833) max_value = np.amax(samples) assert (max_value > 0.5) assert (max_value < 1.0) def test_load_mono_signed_24_bit_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'signed_24bit.wav'), sample_rate=None) assert (sample_rate == 48000) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == 54514) max_value = np.amax(samples) assert (max_value > 0.09) assert (max_value < 1.0) def test_load_mono_signed_24_bit_wav2(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'mono_int24.wav'), sample_rate=None) assert (sample_rate == 44100) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[(- 1)] == 22) max_value = np.amax(samples) assert (max_value == pytest.approx(0.)) min_value = np.amin(samples) assert (min_value == pytest.approx((- 0.9822748))) def test_load_mono_signed_32_bit_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'mono_int32.wav'), sample_rate=None) assert (sample_rate == 44100) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[(- 1)] == 22) max_value = np.amax(samples) assert (max_value == pytest.approx(0.)) min_value = np.amin(samples) assert (min_value == pytest.approx((- 0.9822748))) def test_load_mono_float64_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'mono_float64.wav'), sample_rate=None) assert (sample_rate == 44100) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[(- 1)] == 22) max_value = np.amax(samples) assert (max_value == pytest.approx(0.)) min_value = np.amin(samples) assert (min_value == pytest.approx((- 0.9822748))) def test_load_stereo_signed_24_bit_wav(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'stereo_24bit.WAV'), sample_rate=None) assert (sample_rate == 16000) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == 17833) max_value = np.amax(samples) assert (max_value > 0.5) assert (max_value < 1.0) def test_load_mono_ms_adpcm(self): (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'ms_adpcm.wav'), sample_rate=None) assert (sample_rate == 11024) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == 895500) max_value = np.amax(samples) assert (max_value > 0.3) assert (max_value < 1.0) def test_load_mono_ms_adpcm_and_resample(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') (samples, sample_rate) = load_sound_file(os.path.join(DEMO_DIR, 'ms_adpcm.wav'), sample_rate=16000) assert (len(w) >= 1) assert (sample_rate == 16000) assert (samples.dtype == np.float32) assert (samples.ndim == 1) assert (samples.shape[0] == math.ceil(((895500 * 16000) / 11024))) max_value = np.amax(samples) assert (max_value > 0.3) assert (max_value < 1.0)
class LocalAttention(nn.Module): def __init__(self, local_context, softmax_temp=None, attention_dropout=0.0, device=None, dtype=None): super().__init__() self.local_context = local_context self.softmax_temp = softmax_temp self.dropout = nn.Dropout(attention_dropout) def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False): (B, T, H, E) = query.shape (_, S, _, D) = value.shape softmax_temp = (self.softmax_temp or (1 / math.sqrt(E))) if (attn_mask is None): attn_mask_additive_matrix = torch.zeros(T, S, device=query.device) else: attn_mask_additive_matrix = attn_mask.additive_matrix_finite if (key_padding_mask is None): key_padding_mask_lengths = torch.full(size=(B,), fill_value=S, dtype=torch.long, device=key.device) else: key_padding_mask_lengths = key_padding_mask.lengths query = rearrange(query, 'b t h e -> b h t e').contiguous() key = rearrange(key, 'b s h e -> b h s e').contiguous() value = rearrange(value, 'b s h d -> b h s d').contiguous() QK = local_dot_product(query, key, attn_mask_additive_matrix, key_padding_mask_lengths, self.local_context) attn_local = torch.softmax((softmax_temp * QK), dim=(- 1)) if ((key_padding_mask is not None) and (not key_padding_mask.all_ones)): i = rearrange(torch.arange(T, device=query.device), 't -> 1 1 t 1') j = torch.arange(self.local_context, device=query.device) local_idx = ((i - (self.local_context // 2)) + j) valid_idx_mask = ((local_idx >= 0) & (local_idx < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1'))) attn_local = attn_local.masked_fill((~ valid_idx_mask), 0.0) A = self.dropout(attn_local) V_new = local_weighted_average(A, value) attn = None if need_weights: attn = torch.zeros(B, H, T, S, device=query.device) i = rearrange(torch.arange(T, device=query.device), 't -> 1 1 t 1') j = torch.arange(self.local_context, device=query.device) local_idx = ((i - (self.local_context // 2)) + j) valid_idx_mask = ((local_idx >= 0) & (local_idx < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1'))) k = torch.arange(S, device=key.device) idx = (k - i) local_mask = (((idx >= (- (self.local_context // 2))) & (idx < ((self.local_context + 1) // 2))) & (k < rearrange(key_padding_mask_lengths, 'b -> b 1 1 1'))) attn.masked_scatter_(local_mask, attn_local.masked_select(valid_idx_mask)) return (rearrange(V_new, 'b h t d -> b t h d'), attn)
class _RCSuperOp(): def __call__(self, discardNonchemical: bool=True, allowPartial: bool=True, enforceConstraints: bool=False) -> _RCSuperOpArgsBound: return _RCSuperOpArgsBound(discardNonchemical, allowPartial, enforceConstraints) def __rmul__(self, first: RCExpExp) -> _RCSuperOpFirstBound: return (first * self())
def check_module(module_name: str) -> None: if (module_name == 'onnxrt'): module_name = 'onnxruntime' if (module_name == 'pytorch'): module_name = 'torch' module = find_spec(module_name.lower()) if (module is None): raise ClientErrorException(f'Could not find {module_name} module.')
def get_spnasnet(model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): init_block_channels = [32, 16] final_block_channels = [320, 1280] channels = [[24, 24, 24], [40, 40, 40, 40], [80, 80, 80, 80], [96, 96, 96, 96, 192, 192, 192, 192]] kernels3 = [[1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]] exp_factors = [[3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 6, 6, 6]] net = SPNASNet(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, kernels3=kernels3, exp_factors=exp_factors, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
('refextract.app.extract_journal_reference', side_effect=KeyError('test message')) def test_extract_journal_info_when_timeout_from_refextract(mock_extract_refs, app_client): journal_kb_data = {'COMMUNICATIONS IN ASTEROSEISMOLOGY': 'Commun.Asteros.', 'PHYS REV': 'Phys.Rev.', 'PHYSICAL REVIEW': 'Phys.Rev.', 'PHYS REV LETT': 'Phys.Rev.Lett.', 'JINST': 'JINST', 'JOURNAL OF INSTRUMENTATION': 'JINST', 'SENS ACTUATORS B': 'Sens.Actuators B', 'SENSORS AND ACTUATORS B: CHEMICAL': 'Sens.Actuators B', 'PHYS SCRIPTA': 'Phys.Scripta', 'PHYSICA SCRIPTA': 'Phys.Scripta', 'BULL CALCUTTA MATH SOC': 'Bull.Calcutta Math.Soc.', 'BULLETIN OF THE CALCUTTA MATHEMATICAL SOCIETY': 'Bull.Calcutta Math.Soc.', 'QUANTUM MACHINE INTELLIGENCE': 'Quantum Machine Intelligence'} publication_infos = [{'pubinfo_freetext': 'Phys. Rev. 127 (1962) 965-970'}] payload = {'journal_kb_data': journal_kb_data, 'publication_infos': publication_infos} headers = {'content-type': 'application/json'} response = app_client.post('/extract_journal_info', headers=headers, data=json.dumps(payload)) assert (response.status_code == 500) assert ({'message': "Can not extract publication info data. Reason: 'test message'"} == response.json)
class MolInstance_BP_Dipole(MolInstance_fc_sqdiff_BP): def __init__(self, TData_, Name_=None, Trainable_=True): self.NetType = 'fc_sqdiff_BP' MolInstance.__init__(self, TData_, Name_, Trainable_) self.name = ((((((('Mol_' + self.TData.name) + '_') + self.TData.dig.name) + '_') + str(self.TData.order)) + '_') + self.NetType) LOGGER.debug(('Raised Instance: ' + self.name)) self.train_dir = (PARAMS['networks_directory'] + self.name) self.learning_rate = 0.0001 self.momentum = 0.95 if self.Trainable: self.TData.LoadDataToScratch(self.tformer) self.inshape = np.prod(self.TData.dig.eshape) print('MolInstance_BP_Dipole.inshape: ', self.inshape) self.eles = self.TData.eles self.n_eles = len(self.eles) self.MeanStoich = self.TData.MeanStoich self.MeanNumAtoms = np.sum(self.MeanStoich) self.AtomBranchNames = [] self.netcharge_output = None self.dipole_output = None self.inp_pl = None self.mats_pl = None self.coords = None self.label_pl = None self.batch_size = 10000 self.batch_size_output = 0 self.hidden1 = 100 self.hidden2 = 100 self.hidden3 = 100 self.summary_op = None self.summary_writer = None def Clean(self): MolInstance_fc_sqdiff_BP.Clean(self) self.coords_pl = None self.netcharge_output = None self.dipole_output = None return def TrainPrepare(self, continue_training=False): self.MeanNumAtoms = self.TData.MeanNumAtoms print('self.MeanNumAtoms: ', self.MeanNumAtoms) self.batch_size_output = int(((1.5 * self.batch_size) / self.MeanNumAtoms)) print('Assigned batch input size: ', self.batch_size) print('Assigned batch output size in BP_Dipole:', self.batch_size_output) with tf.Graph().as_default(): self.inp_pl = [] self.mats_pl = [] self.coords_pl = [] for e in range(len(self.eles)): self.inp_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.inshape]))) self.mats_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.batch_size_output]))) self.coords_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, 3]))) self.label_pl = tf.placeholder(self.tf_prec, shape=tuple([self.batch_size_output, 4])) (self.netcharge_output, self.dipole_output, self.atom_outputs) = self.inference(self.inp_pl, self.mats_pl, self.coords_pl) self.check = tf.add_check_numerics_ops() (self.total_loss, self.loss) = self.loss_op(self.netcharge_output, self.dipole_output, self.label_pl) self.train_op = self.training(self.total_loss, self.learning_rate, self.momentum) self.summary_op = tf.summary.merge_all() init = tf.global_variables_initializer() self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) self.saver = tf.train.Saver() try: metafiles = [x for x in os.listdir(self.train_dir) if (x.count('meta') > 0)] if (len(metafiles) > 0): most_recent_meta_file = metafiles[0] LOGGER.info(('Restoring training from Metafile: ' + most_recent_meta_file)) config = tf.ConfigProto(allow_soft_placement=True) self.sess = tf.Session(config=config) self.saver = tf.train.import_meta_graph(((self.train_dir + '/') + most_recent_meta_file)) self.saver.restore(self.sess, tf.train.latest_checkpoint(self.train_dir)) except Exception as Ex: print('Restore Failed', Ex) pass self.summary_writer = tf.summary.FileWriter(self.train_dir, self.sess.graph) self.sess.run(init) return def loss_op(self, netcharge_output, dipole_output, labels): charge_labels = tf.slice(labels, [0, 0], [self.batch_size_output, 1]) dipole_labels = tf.slice(labels, [0, 1], [self.batch_size_output, 3]) charge_diff = tf.subtract(netcharge_output, charge_labels) dipole_diff = tf.subtract(dipole_output, dipole_labels) charge_loss = tf.nn.l2_loss(charge_diff) dipole_loss = tf.nn.l2_loss(dipole_diff) loss = tf.add(charge_loss, dipole_loss) tf.add_to_collection('losses', loss) return (tf.add_n(tf.get_collection('losses'), name='total_loss'), loss) def inference(self, inp_pl, mats_pl, coords_pl): branches = [] atom_outputs = [] hidden1_units = self.hidden1 hidden2_units = self.hidden2 hidden3_units = self.hidden3 netcharge_output = tf.zeros([self.batch_size_output, 1], dtype=self.tf_prec) dipole_output = tf.zeros([self.batch_size_output, 3], dtype=self.tf_prec) nrm1 = (1.0 / (10 + math.sqrt(float(self.inshape)))) nrm2 = (1.0 / (10 + math.sqrt(float(hidden1_units)))) nrm3 = (1.0 / (10 + math.sqrt(float(hidden2_units)))) nrm4 = (1.0 / (10 + math.sqrt(float(hidden3_units)))) LOGGER.info('Norms: %f,%f,%f', nrm1, nrm2, nrm3) for e in range(len(self.eles)): branches.append([]) inputs = inp_pl[e] mats = mats_pl[e] coords = coords_pl[e] shp_in = tf.shape(inputs) shp_coords = tf.shape(coords) if (PARAMS['CheckLevel'] > 2): tf.Print(tf.to_float(shp_in), [tf.to_float(shp_in)], message=(('Element ' + str(e)) + 'input shape '), first_n=, summarize=) mats_shape = tf.shape(mats) tf.Print(tf.to_float(mats_shape), [tf.to_float(mats_shape)], message=(('Element ' + str(e)) + 'mats shape '), first_n=, summarize=) tf.Print(tf.to_float(shp_coords), [tf.to_float(shp_coords)], message=(('Element ' + str(e)) + 'coords shape '), first_n=, summarize=) if (PARAMS['CheckLevel'] > 3): tf.Print(tf.to_float(inputs), [tf.to_float(inputs)], message='This is input shape ', first_n=, summarize=) with tf.name_scope((str(self.eles[e]) + '_hidden_1')): weights = self._variable_with_weight_decay(var_name='weights', var_shape=[self.inshape, hidden1_units], var_stddev=nrm1, var_wd=0.001) biases = tf.Variable(tf.zeros([hidden1_units], dtype=self.tf_prec), name='biases') branches[(- 1)].append(tf.nn.relu((tf.matmul(inputs, weights) + biases))) with tf.name_scope((str(self.eles[e]) + '_hidden_2')): weights = self._variable_with_weight_decay(var_name='weights', var_shape=[hidden1_units, hidden2_units], var_stddev=nrm2, var_wd=0.001) biases = tf.Variable(tf.zeros([hidden2_units], dtype=self.tf_prec), name='biases') branches[(- 1)].append(tf.nn.relu((tf.matmul(branches[(- 1)][(- 1)], weights) + biases))) with tf.name_scope((str(self.eles[e]) + '_hidden_3')): weights = self._variable_with_weight_decay(var_name='weights', var_shape=[hidden2_units, hidden3_units], var_stddev=nrm3, var_wd=0.001) biases = tf.Variable(tf.zeros([hidden3_units], dtype=self.tf_prec), name='biases') branches[(- 1)].append(tf.nn.relu((tf.matmul(branches[(- 1)][(- 1)], weights) + biases))) with tf.name_scope((str(self.eles[e]) + '_regression_linear')): shp = tf.shape(inputs) weights = self._variable_with_weight_decay(var_name='weights', var_shape=[hidden3_units, 1], var_stddev=nrm4, var_wd=None) biases = tf.Variable(tf.zeros([1], dtype=self.tf_prec), name='biases') branches[(- 1)].append((tf.matmul(branches[(- 1)][(- 1)], weights) + biases)) shp_out = tf.shape(branches[(- 1)][(- 1)]) cut = tf.slice(branches[(- 1)][(- 1)], [0, 0], [shp_out[0], 1]) rshp = tf.reshape(cut, [1, shp_out[0]]) atom_outputs.append(rshp) coords_rshp = tf.transpose(coords) coords_rshp_shape = tf.shape(coords_rshp) dipole_tmp = tf.multiply(rshp, coords_rshp) dipole_tmp = tf.reshape(dipole_tmp, [3, shp_out[0]]) netcharge = tf.matmul(rshp, mats) dipole = tf.matmul(dipole_tmp, mats) netcharge = tf.transpose(netcharge) dipole = tf.transpose(dipole) netcharge_output = tf.add(netcharge_output, netcharge) dipole_output = tf.add(dipole_output, dipole) tf.verify_tensor_all_finite(netcharge_output, 'Nan in output!!!') tf.verify_tensor_all_finite(dipole_output, 'Nan in output!!!') return (netcharge_output, dipole_output, atom_outputs) def fill_feed_dict(self, batch_data): for e in range(len(self.eles)): if (not np.all(np.isfinite(batch_data[0][e]), axis=(0, 1))): print('I was fed shit1') raise Exception('DontEatShit') if (not np.all(np.isfinite(batch_data[1][e]), axis=(0, 1))): print('I was fed shit3') raise Exception('DontEatShit') if (not np.all(np.isfinite(batch_data[2][e]), axis=(0, 1))): print('I was fed shit3') raise Exception('DontEatShit') if (not np.all(np.isfinite(batch_data[3]), axis=(0, 1))): print('I was fed shit4') raise Exception('DontEatShit') feed_dict = {i: d for (i, d) in zip((((self.inp_pl + self.mats_pl) + self.coords_pl) + [self.label_pl]), (((batch_data[0] + batch_data[1]) + batch_data[2]) + [batch_data[3]]))} return feed_dict def train_step(self, step): Ncase_train = self.TData.NTrain start_time = time.time() train_loss = 0.0 num_of_mols = 0 for ministep in range(0, int((Ncase_train / self.batch_size))): batch_data = self.TData.GetTrainBatch(self.batch_size, self.batch_size_output) actual_mols = np.count_nonzero(np.any(batch_data[3][1:], axis=1)) (dump_, dump_2, total_loss_value, loss_value, netcharge_output, dipole_output) = self.sess.run([self.check, self.train_op, self.total_loss, self.loss, self.netcharge_output, self.dipole_output], feed_dict=self.fill_feed_dict(batch_data)) train_loss = (train_loss + loss_value) duration = (time.time() - start_time) num_of_mols += actual_mols self.print_training(step, train_loss, num_of_mols, duration) return def test(self, step): test_loss = 0.0 start_time = time.time() Ncase_test = self.TData.NTest num_of_mols = 0 for ministep in range(0, int((Ncase_test / self.batch_size))): batch_data = self.TData.GetTestBatch(self.batch_size, self.batch_size_output) feed_dict = self.fill_feed_dict(batch_data) actual_mols = np.count_nonzero(np.any(batch_data[3][1:], axis=1)) (total_loss_value, loss_value, netcharge_output, dipole_output, atom_outputs) = self.sess.run([self.total_loss, self.loss, self.netcharge_output, self.dipole_output, self.atom_outputs], feed_dict=feed_dict) test_loss += loss_value num_of_mols += actual_mols print('testing result:') print('acurrate charge, dipole:', batch_data[3][:20]) print('predict dipole', dipole_output[:20]) duration = (time.time() - start_time) self.print_training(step, test_loss, num_of_mols, duration) return (test_loss, feed_dict) def print_training(self, step, loss, Ncase, duration, Train=True): if Train: print('step: ', ('%7d' % step), ' duration: ', ('%.5f' % duration), ' train loss: ', ('%.10f' % (float(loss) / Ncase))) else: print('step: ', ('%7d' % step), ' duration: ', ('%.5f' % duration), ' test loss: ', ('%.10f' % (float(loss) / NCase))) return def evaluate(self, batch_data): nmol = batch_data[3].shape[0] LOGGER.debug('nmol: %i', batch_data[3].shape[0]) self.batch_size_output = nmol if (not self.sess): LOGGER.info('loading the session..') self.EvalPrepare() feed_dict = self.fill_feed_dict(batch_data) (netcharge, dipole, total_loss_value, loss_value, atom_outputs) = self.sess.run([self.netcharge_output, self.dipole_output, self.total_loss, self.loss, self.atom_outputs], feed_dict=feed_dict) return (netcharge, (dipole / AUPERDEBYE), atom_outputs) def EvalPrepare(self): if isinstance(self.inshape, tuple): if (len(self.inshape) > 1): raise Exception('My input should be flat') else: self.inshape = self.inshape[0] with tf.Graph().as_default(), tf.device('/job:localhost/replica:0/task:0/gpu:1'): self.inp_pl = [] self.mats_pl = [] self.coords_pl = [] for e in range(len(self.eles)): self.inp_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.inshape]))) self.mats_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.batch_size_output]))) self.coords_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, 3]))) self.label_pl = tf.placeholder(self.tf_prec, shape=tuple([self.batch_size_output, 4])) (self.netcharge_output, self.dipole_output, self.atom_outputs) = self.inference(self.inp_pl, self.mats_pl, self.coords_pl) self.check = tf.add_check_numerics_ops() (self.total_loss, self.loss) = self.loss_op(self.netcharge_output, self.dipole_output, self.label_pl) self.train_op = self.training(self.total_loss, self.learning_rate, self.momentum) self.summary_op = tf.summary.merge_all() init = tf.global_variables_initializer() self.saver = tf.train.Saver() self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) self.saver.restore(self.sess, self.chk_file) return def Prepare(self): self.MeanNumAtoms = self.TData.MeanNumAtoms self.batch_size_output = int(((1.5 * self.batch_size) / self.MeanNumAtoms)) with tf.Graph().as_default(), tf.device('/job:localhost/replica:0/task:0/gpu:1'): self.inp_pl = [] self.mats_pl = [] self.coords = [] for e in range(len(self.eles)): self.inp_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.inshape]))) self.mats_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, self.batch_size_output]))) self.coords_pl.append(tf.placeholder(self.tf_prec, shape=tuple([None, 3]))) self.label_pl = tf.placeholder(self.tf_prec, shape=tuple([self.batch_size_output, 4])) (self.netcharge_output, self.dipole_output, self.atom_outputs) = self.inference(self.inp_pl, self.mats_pl, self.coords_pl) self.check = tf.add_check_numerics_ops() (self.total_loss, self.loss) = self.loss_op(self.netcharge_output, self.dipole_out, self.label_pl) self.train_op = self.training(self.total_loss, self.learning_rate, self.momentum) self.summary_op = tf.summary.merge_all() init = tf.global_variables_initializer() self.saver = tf.train.Saver() self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) self.saver.restore(self.sess, self.chk_file) return
def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if (v == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
class DPRReaderState(DPRState): def load_dpr_model(self): model = DPRReader(DPRConfig(**BertConfig.get_config_dict('bert-base-uncased')[0])) print('Loading DPR reader from {}'.format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) state_dict = {'encoder.bert_model.embeddings.position_ids': model.span_predictor.encoder.bert_model.embeddings.position_ids} for (key, value) in saved_state.model_dict.items(): if (key.startswith('encoder.') and (not key.startswith('encoder.encode_proj'))): key = ('encoder.bert_model.' + key[len('encoder.'):]) state_dict[key] = value model.span_predictor.load_state_dict(state_dict) return model
def new_job_auto_scaler(job_strategy, job_resource: JobResource, job_nodes: Dict[(str, Dict[(int, Node)])], job_optimizer: JobResourceOptimizer, speed_monitor: SpeedMonitor, ps_manager: ParameterServerManager, worker_manager: WorkerManager, node_scaler: Scaler): if (job_strategy == DistributionStrategy.PS): return PSTrainingAutoScaler(job_resource, job_nodes, job_optimizer, speed_monitor, ps_manager, worker_manager, node_scaler) elif (job_strategy == DistributionStrategy.ALLREDUCE): return AllreduceTrainingAutoScaler(job_resource, job_nodes, job_optimizer, speed_monitor, worker_manager, node_scaler) else: raise ValueError('No job auto scaler for %s', job_strategy)
(help='Generate list of LiDAR timestamps at which to evaluate the model.') ('--tbv-dataroot', required=True, help='Path to local directory where the TbV logs are stored.', type=click.Path(exists=True)) def run_generate_eval_timestamp_list(tbv_dataroot: str) -> None: generate_eval_timestamp_list(tbv_dataroot=Path(tbv_dataroot))
class ThreeNN(Function): def forward(ctx, target: torch.Tensor, source: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: target = target.contiguous() source = source.contiguous() (B, N, _) = target.size() m = source.size(1) dist2 = torch.cuda.FloatTensor(B, N, 3) idx = torch.cuda.IntTensor(B, N, 3) ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(idx) return (torch.sqrt(dist2), idx) def backward(ctx, a=None, b=None): return (None, None)
def make_room_dict(rir_list): room_dict = {} for rir in rir_list: if (rir.room_id not in room_dict): room_dict[rir.room_id] = (lambda : None) setattr(room_dict[rir.room_id], 'rir_list', []) setattr(room_dict[rir.room_id], 'probability', 0) room_dict[rir.room_id].rir_list.append(rir) for key in room_dict.keys(): room_dict[key].probability = sum((rir.probability for rir in room_dict[key].rir_list)) assert almost_equal(sum((room_dict[key].probability for key in room_dict.keys())), 1.0) return room_dict
class ParseDecodeCoco(): def __call__(self, sample): feature_map = {'image/encoded': tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/class/text': tf.compat.v1.VarLenFeature(dtype=tf.string), 'image/object/class/label': tf.compat.v1.VarLenFeature(dtype=tf.int64), 'image/source_id': tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value='')} sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32) feature_map.update({k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) features = tf.io.parse_single_example(sample, feature_map) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) bbox = tf.concat([ymin, xmin, ymax, xmax], 0) bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) encoded_image = features['image/encoded'] image_tensor = tf.image.decode_image(encoded_image, channels=3) image_tensor.set_shape([None, None, 3]) str_label = features['image/object/class/text'].values int_label = features['image/object/class/label'].values image_id = features['image/source_id'] return (image_tensor, (bbox[0], str_label, int_label, image_id))
class RolloutJSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, np.ndarray): return o.tolist() if isinstance(o, np.bool_): return bool(o) if isinstance(o, np.floating): return float(o) if isinstance(o, np.number): return int(o) if isinstance(o, Rollout): return asdict(o) if isinstance(o, Step): return asdict(o) return json.JSONEncoder.default(self, o)
(scope='module') def example_explanation(): data = synthetic_classification() explainer = LogisticRegression() explainer.fit(data['train']['X'], data['train']['y']) explanation = explainer.explain_local(data['test']['X'].head(), data['test']['y'].head()) return explanation
def kitti_odom10_validation(img_height, img_width, batch_size, num_workers): transforms = [tf.CreateScaledImage(True), tf.Resize((img_height, img_width), image_types=('color',)), tf.CreateColoraug(), tf.ToTensor(), tf.NormalizeZeroMean(), tf.AddKeyValue('domain', 'kitti_odom10_val_pose'), tf.AddKeyValue('purposes', ('depth',))] dataset = StandardDataset(dataset='kitti', split='odom10_split', trainvaltest_split='test', video_mode='video', stereo_mode='mono', keys_to_load=('color', 'poses'), keys_to_video=('color',), data_transforms=transforms, video_frames=(0, (- 1), 1), disable_const_items=True) loader = DataLoader(dataset, batch_size, False, num_workers=num_workers, pin_memory=True, drop_last=False) print(f' - Can use {len(dataset)} images from the kitti (odom10 split) validation set for pose validation', flush=True) return loader
def evaluate(args, model, tokenizer, prefix=''): eval_task_names = (('mnli', 'mnli-mm') if (args.task_name == 'mnli') else (args.task_name,)) eval_outputs_dirs = ((args.output_dir, (args.output_dir + '/MM')) if (args.task_name == 'mnli') else (args.output_dir,)) results = {} for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])): os.makedirs(eval_output_dir) args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu)) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))): model = torch.nn.DataParallel(model) logger.info('***** Running evaluation {} *****'.format(prefix)) logger.info(' Num examples = %d', len(eval_dataset)) logger.info(' Batch size = %d', args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc='Evaluating'): model.eval() batch = tuple((t.to(args.device) for t in batch)) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (args.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'masked_bert', 'xlnet', 'albert']) else None) if ('masked' in args.model_type): inputs['threshold'] = args.final_threshold if args.global_topk: if (threshold_mem is None): concat = torch.cat([param.view((- 1)) for (name, param) in model.named_parameters() if ('mask_scores' in name)]) n = concat.numel() kth = max((n - (int((n * args.final_threshold)) + 1)), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs['threshold'] = threshold_mem outputs = model(**inputs) (tmp_eval_loss, logits) = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if (preds is None): preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = (eval_loss / nb_eval_steps) if (args.output_mode == 'classification'): from scipy.special import softmax probs = softmax(preds, axis=(- 1)) entropy = np.exp(((- probs) * np.log(probs)).sum(axis=(- 1)).mean()) preds = np.argmax(preds, axis=1) elif (args.output_mode == 'regression'): preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if (entropy is not None): result['eval_avg_entropy'] = entropy output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt') with open(output_eval_file, 'w') as writer: logger.info('***** Eval results {} *****'.format(prefix)) for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) return results
class PointerGenerator(nn.Module): def __init__(self, encoder, decoder): super(PointerGenerator, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, src, lengths, tgt, dec_state=None): tgt = tgt[:(- 1)] (memory_bank, enc_final) = self.encoder(src, lengths) enc_state = self.decoder.init_decoder_state(enc_final) (decoder_outputs, dec_state, attns) = self.decoder(tgt, memory_bank, enc_state, memory_lengths=lengths) return (decoder_outputs, attns, dec_state)
def flatten_first_axis_tensor_dict(tensor_dict): keys = list(tensor_dict.keys()) ret = dict() for k in keys: if isinstance(tensor_dict[k], dict): ret[k] = flatten_first_axis_tensor_dict(tensor_dict[k]) else: old_shape = tensor_dict[k].shape ret[k] = tensor_dict[k].reshape((((- 1),) + old_shape[2:])) return ret
def get_ppq5_jitter(frequencies, p_floor, p_ceil, max_p_factor): counter = 0 cumsum = 0 mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor) for (freq1, freq2, freq3, freq4, freq5) in shifted_sequence(frequencies, 5): if validate_frequencies([freq1, freq2, freq3, freq4, freq5], p_floor, p_ceil, max_p_factor): counter += 1 cumsum += np.abs(((1 / freq3) - ((((((1 / freq1) + (1 / freq2)) + (1 / freq3)) + (1 / freq4)) + (1 / freq5)) / 5))) if (counter != 0): ppq5_jitter = (((cumsum / counter) / mean_period) if (mean_period != 0) else None) return ppq5_jitter return None
def extract_cnn_feature(model, inputs, modules=None): model.eval() inputs = to_torch(inputs).cuda() if (modules is None): outputs = model(inputs) outputs = outputs.data.cpu() return outputs outputs = OrderedDict() handles = [] for m in modules: outputs[id(m)] = None def func(m, i, o): outputs[id(m)] = o.data.cpu() handles.append(m.register_forward_hook(func)) model(inputs) for h in handles: h.remove() return list(outputs.values())
_algo(name=RTN_WEIGHT_ONLY_QUANT) def rtn_quantize_entry(model: torch.nn.Module, configs_mapping: Dict[(Tuple[(str, callable)], RTNWeightQuantConfig)], *args, **kwargs) -> torch.nn.Module: from .weight_only.rtn import apply_rtn_on_single_module for ((op_name, op_type), quant_config) in configs_mapping.items(): original_module = fetch_module(model, op_name) if (original_module is None): continue logger.info(f'Apply RTN on module: {op_name}, {original_module}') rtn_module = apply_rtn_on_single_module(original_module, quant_config) set_module(model, op_name, rtn_module) return model
def eval_func(model): predictions = [] references = [] for batch in librispeech_test_clean: audio = batch['audio'] input_features = processor(audio['array'], sampling_rate=audio['sampling_rate'], return_tensors='pt').input_features reference = processor.tokenizer._normalize(batch['text']) references.append(reference) with torch.no_grad(): predicted_ids = model.generate(input_features)[0] transcription = processor.decode(predicted_ids) prediction = processor.tokenizer._normalize(transcription) predictions.append(prediction) wer_result = wer.compute(references=references, predictions=predictions) print(f'Result wer: {(wer_result * 100)}') accuracy = (1 - wer_result) print(('Accuracy: %.5f' % accuracy)) return accuracy
def unbatchify(x: Array, agents: List[str]) -> Dict[(str, Array)]: return {agent: x[i] for (i, agent) in enumerate(agents)}
def normalize_probs(probs: tc.Tensor) -> tc.Tensor: return (probs / probs.sum(dim=(- 1), keepdim=True))
class Mulki2019(dataset.Dataset): name = 'mulki2019' url = ' hash = '3fc5e06ab624b47e404ac4894c323ca038e726ce6dd3d0e6a371e3' files = [{'name': 'mulki2019ar.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}] license = 'UNKNOWN' def process(cls, tmp_file_path, dataset_folder, api_config): df = pd.read_csv(tmp_file_path, sep='\t') df.to_csv(tmp_file_path, index=False) helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mulki2019ar.csv')) def unify_row(cls, row): row['text'] = row['Tweet'] labels = [row['Class']] row['labels'] = labels row = row.drop(['Class', 'Tweet']) return row
_model def gluon_resnet101_v1d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['gluon_resnet101_v1d'] model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, in_chans=in_chans, stem_width=32, stem_type='deep', avg_down=True, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
class ControllerFromTrainruns(): pp = pprint.PrettyPrinter(indent=4) def __init__(self, env: RailEnv, trainrun_dict: Dict[(int, Trainrun)]): self.env: RailEnv = env self.trainrun_dict: Dict[(int, Trainrun)] = trainrun_dict self.action_plan: ActionPlanDict = [self._create_action_plan_for_agent(agent_id, chosen_path) for (agent_id, chosen_path) in trainrun_dict.items()] def get_waypoint_before_or_at_step(self, agent_id: int, step: int) -> Waypoint: trainrun = self.trainrun_dict[agent_id] entry_time_step = trainrun[0].scheduled_at if (step <= entry_time_step): return Waypoint(position=None, direction=self.env.agents[agent_id].initial_direction) exit_time_step = trainrun[(- 1)].scheduled_at if (step >= exit_time_step): return Waypoint(position=None, direction=trainrun[(- 1)].waypoint.direction) waypoint = None for trainrun_waypoint in trainrun: if (step < trainrun_waypoint.scheduled_at): return waypoint if (step >= trainrun_waypoint.scheduled_at): waypoint = trainrun_waypoint.waypoint assert (waypoint is not None) return waypoint def get_action_at_step(self, agent_id: int, current_step: int) -> Optional[RailEnvActions]: for action_plan_element in self.action_plan[agent_id]: scheduled_at = action_plan_element.scheduled_at if (scheduled_at > current_step): return None elif (current_step == scheduled_at): return action_plan_element.action return None def act(self, current_step: int) -> Dict[(int, RailEnvActions)]: action_dict = {} for agent_id in range(len(self.env.agents)): action: Optional[RailEnvActions] = self.get_action_at_step(agent_id, current_step) if (action is not None): action_dict[agent_id] = action return action_dict def print_action_plan(self): self.__class__.print_action_plan_dict(self.action_plan) def print_action_plan_dict(action_plan: ActionPlanDict): for (agent_id, plan) in enumerate(action_plan): print('{}: '.format(agent_id)) for step in plan: print(' {}'.format(step)) def assert_actions_plans_equal(expected_action_plan: ActionPlanDict, actual_action_plan: ActionPlanDict): assert (len(expected_action_plan) == len(actual_action_plan)) for k in range(len(expected_action_plan)): assert (len(expected_action_plan[k]) == len(actual_action_plan[k])), 'len for agent {} should be the same.\n\n expected ({}) = {}\n\n actual ({}) = {}'.format(k, len(expected_action_plan[k]), ControllerFromTrainruns.pp.pformat(expected_action_plan[k]), len(actual_action_plan[k]), ControllerFromTrainruns.pp.pformat(actual_action_plan[k])) for i in range(len(expected_action_plan[k])): assert (expected_action_plan[k][i] == actual_action_plan[k][i]), 'not the same at agent {} at step {}\n\n expected = {}\n\n actual = {}'.format(k, i, ControllerFromTrainruns.pp.pformat(expected_action_plan[k][i]), ControllerFromTrainruns.pp.pformat(actual_action_plan[k][i])) assert (expected_action_plan == actual_action_plan), 'expected {}, found {}'.format(expected_action_plan, actual_action_plan) def _create_action_plan_for_agent(self, agent_id, trainrun) -> ActionPlan: action_plan = [] agent = self.env.agents[agent_id] minimum_cell_time = int(np.ceil((1.0 / agent.speed_data['speed']))) for (path_loop, trainrun_waypoint) in enumerate(trainrun): trainrun_waypoint: TrainrunWaypoint = trainrun_waypoint position = trainrun_waypoint.waypoint.position if Vec2d.is_equal(agent.target, position): break next_trainrun_waypoint: TrainrunWaypoint = trainrun[(path_loop + 1)] next_position = next_trainrun_waypoint.waypoint.position if (path_loop == 0): self._add_action_plan_elements_for_first_path_element_of_agent(action_plan, trainrun_waypoint, next_trainrun_waypoint, minimum_cell_time) continue just_before_target = Vec2d.is_equal(agent.target, next_position) self._add_action_plan_elements_for_current_path_element(action_plan, minimum_cell_time, trainrun_waypoint, next_trainrun_waypoint) if just_before_target: self._add_action_plan_elements_for_target_at_path_element_just_before_target(action_plan, minimum_cell_time, trainrun_waypoint, next_trainrun_waypoint) return action_plan def _add_action_plan_elements_for_current_path_element(self, action_plan: ActionPlan, minimum_cell_time: int, trainrun_waypoint: TrainrunWaypoint, next_trainrun_waypoint: TrainrunWaypoint): scheduled_at = trainrun_waypoint.scheduled_at next_entry_value = next_trainrun_waypoint.scheduled_at position = trainrun_waypoint.waypoint.position direction = trainrun_waypoint.waypoint.direction next_position = next_trainrun_waypoint.waypoint.position next_direction = next_trainrun_waypoint.waypoint.direction next_action = get_action_for_move(position, direction, next_position, next_direction, self.env.rail) if (next_entry_value > (scheduled_at + minimum_cell_time)): action = ActionPlanElement(scheduled_at, RailEnvActions.STOP_MOVING) action_plan.append(action) action = ActionPlanElement((next_entry_value - minimum_cell_time), next_action) action_plan.append(action) else: action = ActionPlanElement(scheduled_at, next_action) action_plan.append(action) def _add_action_plan_elements_for_target_at_path_element_just_before_target(self, action_plan: ActionPlan, minimum_cell_time: int, trainrun_waypoint: TrainrunWaypoint, next_trainrun_waypoint: TrainrunWaypoint): scheduled_at = trainrun_waypoint.scheduled_at action = ActionPlanElement((scheduled_at + minimum_cell_time), RailEnvActions.STOP_MOVING) action_plan.append(action) def _add_action_plan_elements_for_first_path_element_of_agent(self, action_plan: ActionPlan, trainrun_waypoint: TrainrunWaypoint, next_trainrun_waypoint: TrainrunWaypoint, minimum_cell_time: int): scheduled_at = trainrun_waypoint.scheduled_at position = trainrun_waypoint.waypoint.position direction = trainrun_waypoint.waypoint.direction next_position = next_trainrun_waypoint.waypoint.position next_direction = next_trainrun_waypoint.waypoint.direction if (scheduled_at > 0): action = ActionPlanElement(0, RailEnvActions.DO_NOTHING) action_plan.append(action) action = ActionPlanElement(scheduled_at, RailEnvActions.MOVE_FORWARD) action_plan.append(action) next_action = get_action_for_move(position, direction, next_position, next_direction, self.env.rail) if (next_trainrun_waypoint.scheduled_at > ((scheduled_at + 1) + minimum_cell_time)): action = ActionPlanElement((scheduled_at + 1), RailEnvActions.STOP_MOVING) action_plan.append(action) action = ActionPlanElement((next_trainrun_waypoint.scheduled_at - minimum_cell_time), next_action) action_plan.append(action)
def average_weights_ns(w, ns): w_avg = copy.deepcopy(w[0]) for key in w_avg.keys(): (w_avg[key] * ns[0]) for i in range(1, len(w)): w_avg[key] += (ns[i] * w[i][key]) w_avg[key] = torch.div(w_avg[key], sum(ns)) return w_avg
def replace_instance_num(cmd_str, instance): return cmd_str.replace('INST_NUM=', ('INST_NUM=' + str(instance)))
class AsyncInferenceTestCase(AsyncTestCase): if (sys.version_info >= (3, 7)): async def test_simple_inference(self): if (not torch.cuda.is_available()): import pytest pytest.skip('test requires GPU and torch+cuda') ori_grad_enabled = torch.is_grad_enabled() root_dir = os.path.dirname(os.path.dirname(__name__)) model_config = os.path.join(root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py') detector = MaskRCNNDetector(model_config) (await detector.init()) img_path = os.path.join(root_dir, 'demo/demo.jpg') (bboxes, _) = (await detector.apredict(img_path)) self.assertTrue(bboxes) torch.set_grad_enabled(ori_grad_enabled)
class ExplainedVarianceDisplay(): def __init__(self, explained_variance_train, explained_variance_test=None, ratio=True, view_labels=None, **kwargs): self.explained_variance_train = explained_variance_train self.explained_variance_test = explained_variance_test self.ratio = ratio if (view_labels is not None): assert (len(view_labels) == len(self.explained_variance_train)), 'view_labels must be the same length as train_views' assert (len(view_labels) == len(self.explained_variance_test)), 'view_labels must be the same length as test_views' self.view_labels = view_labels else: self.view_labels = [f'View {i}' for i in range(len(self.explained_variance_train))] self.kwargs = kwargs def _validate_plot_params(self): check_seaborn_support('CorrelationHeatmapDisplay') def from_estimator(cls, model, train_views, test_views=None, ratio=True, view_labels=None, **kwargs): if ratio: explained_variance_train = model.explained_variance_ratio(train_views) else: explained_variance_train = model.explained_variance(train_views) if (test_views is not None): if ratio: explained_variance_test = model.explained_variance_ratio(test_views) else: explained_variance_test = model.explained_variance(test_views) else: explained_variance_test = None if ratio: return cls.from_explained_variance_ratio(explained_variance_train, explained_variance_test, view_labels=view_labels, **kwargs) else: return cls.from_explained_variance(explained_variance_train, explained_variance_test, view_labels=view_labels, **kwargs) def from_explained_variance(cls, explained_variance_train, explained_variance_test=None, view_labels=None, **kwargs): return cls(explained_variance_train, explained_variance_test, ratio=False, view_labels=view_labels, **kwargs) def from_explained_variance_ratio(cls, explained_variance_train, explained_variance_test=None, view_labels=None, **kwargs): return cls(explained_variance_train, explained_variance_test, ratio=True, view_labels=view_labels, **kwargs) def plot(self, ax=None): self._validate_plot_params() data = pd.DataFrame(self.explained_variance_train, index=self.view_labels).T data.index.name = 'Latent dimension' data = data.reset_index().melt(id_vars='Latent dimension', value_vars=self.view_labels) data.columns = ['Latent dimension', 'View', 'value'] data['Mode'] = 'Train' if (self.explained_variance_test is not None): data_test = pd.DataFrame(self.explained_variance_test, index=self.view_labels).T data_test.index.name = 'Latent dimension' data_test = data_test.reset_index().melt(id_vars='Latent dimension', value_vars=self.view_labels) data_test.columns = ['Latent dimension', 'View', 'value'] data_test['Mode'] = 'Test' data = pd.concat([data, data_test]) if (ax is None): (fig, ax) = plt.subplots(figsize=(10, 5)) else: fig = ax.get_figure() sns.lineplot(data=data, x='Latent dimension', y='value', hue='View', style='Mode', marker='o', ax=ax) ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True)) ax.set_xlabel('Latent dimension') if self.ratio: ax.set_ylabel('Explained Variance %') ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0)) else: ax.set_ylabel('Explained Variance') ax.set_title('Explained Variance') plt.tight_layout() self.figure_ = fig return self
def initialize_quad_double_tracker(target, start, fixedgamma=True, regamma=0.0, imgamma=0.0, vrblvl=0): if (vrblvl > 0): print('in initialize_quad_double_tracker', end='') print(', fixedgamma :', fixedgamma, end='') print(', regamma :', regamma, end='') print(', imgamma :', imgamma) print('the target system :') for pol in target: print(pol) print('the start system :') for pol in start: print(pol) set_quad_double_target_system(target, vrblvl) set_quad_double_start_system(start, vrblvl) phc = get_phcfun() afix = pointer(c_int32(int(fixedgamma))) bbb = pointer(c_int32(0)) c_gamma = (c_double * 2)() c_gamma[0] = c_double(regamma) c_gamma[1] = c_double(imgamma) ptr_gamma = pointer(c_gamma) vrb = c_int32(vrblvl) if (vrblvl > 0): print('-> initialize_quad_double_tracker calls phc', end='') retval = phc(502, afix, bbb, ptr_gamma, vrb) if (vrblvl > 0): print(', return value :', retval) return retval
def test_can_instantiate_from_data_config(data_cfg, parser): cfg_string = read_cfg(data_cfg) parser.add_lightning_class_args(LightningDataModule, 'cfg', subclass_mode=True, required=True) args = parser.parse_string(cfg_string) assert ('class_path' in args.cfg), 'No class_path key in config root level' class_path = args.cfg['class_path'] objs = parser.instantiate_classes(args) assert isinstance(objs.cfg, import_class(class_path))
class AutoModelForTokenClassification(): def __init__(self): raise EnvironmentError('AutoModelForTokenClassification is designed to be instantiated using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForTokenClassification.from_config(config)` methods.') def from_config(cls, config): for (config_class, model_class) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())))) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop('config', None) if (not isinstance(config, PretrainedConfig)): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for (config_class, model_class) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()))))
class ConditionalGuidedModel(nn.Module): def __init__(self, config): super(ConditionalGuidedModel, self).__init__() n_steps = (config.diffusion.timesteps + 1) self.cat_x = config.model.cat_x self.cat_y_pred = config.model.cat_y_pred data_dim = config.model.y_dim if self.cat_x: data_dim += config.model.x_dim if self.cat_y_pred: data_dim += config.model.y_dim self.lin1 = ConditionalLinear(data_dim, 128, n_steps) self.lin2 = ConditionalLinear(128, 128, n_steps) self.lin3 = ConditionalLinear(128, 128, n_steps) self.lin4 = nn.Linear(128, 1) def forward(self, x, y_t, y_0_hat, t): if self.cat_x: if self.cat_y_pred: eps_pred = torch.cat((y_t, y_0_hat, x), dim=1) else: eps_pred = torch.cat((y_t, x), dim=1) elif self.cat_y_pred: eps_pred = torch.cat((y_t, y_0_hat), dim=1) else: eps_pred = y_t eps_pred = F.softplus(self.lin1(eps_pred, t)) eps_pred = F.softplus(self.lin2(eps_pred, t)) eps_pred = F.softplus(self.lin3(eps_pred, t)) return self.lin4(eps_pred)
class Point_Transformer_Last(nn.Module): def __init__(self, args, channels=256): super(Point_Transformer_Last, self).__init__() self.args = args self.conv1 = nn.Conv1d(channels, channels, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(channels, channels, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm1d(channels) self.bn2 = nn.BatchNorm1d(channels) self.sa1 = SA_Layer(channels) self.sa2 = SA_Layer(channels) self.sa3 = SA_Layer(channels) self.sa4 = SA_Layer(channels) def forward(self, x): (batch_size, _, N) = x.size() x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x1 = self.sa1(x) x2 = self.sa2(x1) x3 = self.sa3(x2) x4 = self.sa4(x3) x = torch.cat((x1, x2, x3, x4), dim=1) return x
(frozen=True) class ValidationResult(): compilation_rate: float plausible_rate: float n_plausible_fixes: int
def initialize_weights(modules, init_mode): for m in modules: if isinstance(m, nn.Conv2d): if (init_mode == 'he'): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif (init_mode == 'xavier'): nn.init.xavier_uniform_(m.weight.data) else: raise ValueError('Invalid init_mode {}'.format(init_mode)) if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)
class ViltImageProcessingTester(unittest.TestCase): def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, size_divisor=2, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]): size = (size if (size is not None) else {'shortest_edge': 30}) self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size, 'size_divisor': self.size_divisor} def get_expected_values(self, image_inputs, batched=False): if (not batched): size = self.size['shortest_edge'] image = image_inputs[0] if isinstance(image, Image.Image): (w, h) = image.size else: (h, w) = (image.shape[1], image.shape[2]) scale = (size / min(w, h)) if (h < w): (newh, neww) = (size, (scale * w)) else: (newh, neww) = ((scale * h), size) max_size = int(((1333 / 800) * size)) if (max(newh, neww) > max_size): scale = (max_size / max(newh, neww)) newh = (newh * scale) neww = (neww * scale) (newh, neww) = (int((newh + 0.5)), int((neww + 0.5))) (expected_height, expected_width) = (((newh // self.size_divisor) * self.size_divisor), ((neww // self.size_divisor) * self.size_divisor)) else: expected_values = [] for image in image_inputs: (expected_height, expected_width) = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=(lambda item: item[0]))[0] expected_width = max(expected_values, key=(lambda item: item[1]))[1] return (expected_height, expected_width)
class RandomChooseData(RNGDataFlow): def __init__(self, df_lists): super(RandomChooseData, self).__init__() if isinstance(df_lists[0], (tuple, list)): assert (sum([v[1] for v in df_lists]) == 1.0) self.df_lists = df_lists else: prob = (1.0 / len(df_lists)) self.df_lists = [(k, prob) for k in df_lists] def reset_state(self): super(RandomChooseData, self).reset_state() for d in self.df_lists: if isinstance(d, tuple): d[0].reset_state() else: d.reset_state() def get_data(self): itrs = [v[0].get_data() for v in self.df_lists] probs = np.array([v[1] for v in self.df_lists]) try: while True: itr = self.rng.choice(itrs, p=probs) (yield next(itr)) except StopIteration: return
def extract_axis_1(data, ind): batch_range = tf.range(tf.shape(data)[0]) indices = tf.stack([batch_range, ind], axis=1) res = tf.gather_nd(data, indices) return res
def test_add_loss(): (name, type) = ('test', 'loss') (name, type) class Test(): ... assert (name in LOSS_REG), 'Missing item from LOSS registry.' LOSS_REG.pop(name)
class SimpleDataset(): def __init__(self, data_file, transform, target_transform=identity, n_images=(- 1), n_classes=(- 1), seed=0): with open(data_file, 'r') as f: self.meta = json.load(f) self.transform = transform self.target_transform = target_transform self.meta['image_names'] = np.array(self.meta['image_names']) self.meta['image_labels'] = np.array(self.meta['image_labels']) n_images_original = len(self) n_classes_original = len(np.unique(self.meta['image_labels'])) if (n_images > (- 1)): random_idxs = np.random.RandomState(seed).permutation(len(self))[:n_images] self.meta['image_names'] = self.meta['image_names'][random_idxs] self.meta['image_labels'] = self.meta['image_labels'][random_idxs] classes = np.unique(self.meta['image_labels']) if (n_classes > (- 1)): random_idxs = np.random.RandomState(seed).permutation(len(classes))[:n_classes] limit_classes = [classes[i] for i in random_idxs] idxs_in_limit_classes = np.where(np.in1d(self.meta['image_labels'], limit_classes)) self.meta['image_names'] = self.meta['image_names'][idxs_in_limit_classes] self.meta['image_labels'] = self.meta['image_labels'][idxs_in_limit_classes] labels_new = range(n_classes) labels_orig = np.unique(self.meta['image_labels']) keyarray = {} i = 0 for label_orig in labels_orig: keyarray[label_orig] = i i += 1 mp = np.arange(0, (max(self.meta['image_labels']) + 1)) mp[list(keyarray.keys())] = list(keyarray.values()) self.meta['image_labels'] = mp[self.meta['image_labels']] print('Number of images (sub)selected: {0} / {1}'.format(len(self), n_images_original)) print('Number of classes (sub)selected: {0} / {1}'.format(len(np.unique(self.meta['image_labels'])), n_classes_original)) print('Min label: {0} | Max label: {1}'.format(min(self.meta['image_labels']), max(self.meta['image_labels']))) def __getitem__(self, i): image_path = os.path.join(self.meta['image_names'][i]) img = Image.open(image_path).convert('RGB') img = self.transform(img) target = self.target_transform(self.meta['image_labels'][i]) return (img, target) def __len__(self): return len(self.meta['image_names'])
def _get_config(params, arg_name, subfolder): config_name = None for (_i, _v) in enumerate(params): if (_v.split('=')[0] == arg_name): config_name = _v.split('=')[1] del params[_i] break if (config_name is not None): with open(os.path.join(os.path.dirname(__file__), 'config', subfolder, '{}.yaml'.format(config_name)), 'r') as f: try: config_dict = yaml.load(f) except yaml.YAMLError as exc: assert False, '{}.yaml error: {}'.format(config_name, exc) return config_dict
def get_mask_pallete(npimg, dataset='detail'): if (dataset == 'pascal_voc'): npimg[(npimg == 21)] = 255 out_img = Image.fromarray(npimg.squeeze().astype('uint8')) if (dataset == 'ade20k'): out_img.putpalette(adepallete) elif (dataset == 'citys'): out_img.putpalette(citypallete) elif (dataset in ('detail', 'pascal_voc', 'pascal_aug')): out_img.putpalette(vocpallete) return out_img
class Cider(): def __init__(self, n=4, df='corpus'): self._n = n self._df = df self.cider_scorer = CiderScorer(n=self._n, df_mode=self._df) def compute_score(self, gts, res): self.cider_scorer.clear() for res_id in res: hypo = res_id['caption'] ref = gts[res_id['image_id']] assert (type(hypo) is list) assert (len(hypo) == 1) assert (type(ref) is list) assert (len(ref) > 0) self.cider_scorer += (hypo[0], ref) (score, scores) = self.cider_scorer.compute_score() return (score, scores) def method(self): return 'CIDEr'
def gen_evalset(args): torch.manual_seed(args.eval_seed) torch.cuda.manual_seed(args.eval_seed) eval_ds = EMNIST(train=False, class_range=args.class_range) eval_loader = torch.utils.data.DataLoader(eval_ds, batch_size=args.eval_batch_size, shuffle=False, num_workers=4) batches = [] for (x, _) in tqdm(eval_loader): batches.append(img_to_task(x, t_noise=args.t_noise, max_num_points=args.max_num_points)) torch.manual_seed(time.time()) torch.cuda.manual_seed(time.time()) path = osp.join(evalsets_path, 'emnist') if (not osp.isdir(path)): os.makedirs(path) (c1, c2) = args.class_range filename = f'{c1}-{c2}' if (args.t_noise is not None): filename += f'_{args.t_noise}' filename += '.tar' torch.save(batches, osp.join(path, filename))
def imread_indexed(filename): im = Image.open(filename) annotation = np.atleast_3d(im)[(..., 0)] return (annotation, np.array(im.getpalette()).reshape(((- 1), 3)))
class ClapModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def _return_context(value): data = struct.pack('=q', value.v_int64) arr = struct.unpack('=ii', data) return TVMContext(arr[0], arr[1])
class CondConv2d(nn.Module): __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = tup_pair(kernel_size) self.stride = tup_pair(stride) (padding_val, is_padding_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic self.padding = tup_pair(padding_val) self.dilation = tup_pair(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = ((self.out_channels, (self.in_channels // self.groups)) + self.kernel_size) weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer(partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if (self.bias is not None): fan_in = np.prod(self.weight_shape[1:]) bound = (1 / math.sqrt(fan_in)) init_bias = get_condconv_initializer(partial(nn.init.uniform_, a=(- bound), b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): (B, C, H, W) = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (((B * self.out_channels), (self.in_channels // self.groups)) + self.kernel_size) weight = weight.view(new_weight_shape) bias = None if (self.bias is not None): bias = torch.matmul(routing_weights, self.bias) bias = bias.view((B * self.out_channels)) x = x.view(1, (B * C), H, W) if self.dynamic_padding: out = conv2d_same(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(self.groups * B)) else: out = F.conv2d(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(self.groups * B)) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[(- 2)], out.shape[(- 1)]) return out