code
stringlengths
101
5.91M
class ThresholdParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _THRESHOLDPARAMETER
(version='2.0') class TensorflowModelZooBertDataLoader(DefaultDataLoader): def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, sampler, batch_sampler, num_workers, pin_memory, shuffle, distributed): if shuffle: logging.warning('Shuffle is not supported yet in TensorflowBertDataLoader, ignoring shuffle keyword.') def bert_collate_fn(batch): input_ids = [] input_mask = [] segment_ids = [] for elem in batch: input_ids.append(elem[0][0][0]) input_mask.append(elem[0][1][0]) segment_ids.append(elem[0][2][0]) inputs = [input_ids, input_mask, segment_ids] return (inputs, batch[0][1]) drop_last = (False if (last_batch == 'rollover') else True) sampler = self._generate_sampler(dataset, distributed) self.batch_sampler = BatchSampler(sampler, batch_size, drop_last) self.fetcher = FETCHERS[self.dataset_type](dataset, bert_collate_fn, drop_last, distributed) inputs = [] for batched_indices in self.batch_sampler: try: data = self.fetcher(batched_indices) (yield data) except StopIteration: return
class ResNeXtUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(ResNeXtUnit, self).__init__() self.resize_identity = ((in_channels != out_channels) or (stride != 1)) self.body = ResNeXtBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) if self.resize_identity: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = (x + identity) x = self.activ(x) return x
def restore_all_mat_props() -> None: for (mat_name, mat_props) in _SAVED_MATERIALS.items(): set_mat_props(mat_name, mat_props)
class MatrixTypeNode(ExprNode): def __init__(self, parse_info=None, raw_text=None): super().__init__(IRNodeType.MatrixType, parse_info=parse_info, raw_text=raw_text) self.id1 = None self.id2 = None self.type = None
class KerasONNXRuntimeINCMetic(ONNXRuntimeINCMetic): def stack(self, preds, labels): (preds, labels) = super().stack(preds, labels) preds = tf.convert_to_tensor(preds) labels = tf.convert_to_tensor(labels) return (preds, labels) def to_scalar(self, tensor): return float(tensor)
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, pattern, if_all): if label_list: label_map = {label: i for (i, label) in enumerate(label_list)} else: label_map = None max_len_count = 0 features = [] tokenslist = [] if (((pattern['max_combined_attr'][1] / pattern['max_combined_attr'][0]) < 0.2) and (not if_all)): target_position = sorted(list(set(pattern['top1position']))) else: target_position = sorted(list(set((pattern['top1position'] + pattern['top2position'])))) clean_position = [] added_tokens = [] for i in range(len(target_position)): if ((pattern['tokens'][target_position[i]] != '[CLS]') and (pattern['tokens'][target_position[i]] != '[SEP]')): clean_position.append(target_position[i]) added_tokens.append(pattern['tokens'][target_position[i]]) target_position = clean_position position_type = [] for i in range(len(target_position)): if (target_position[i] < pattern['seg_pos']): position_type.append(0) target_position[i] = (target_position[i] / (pattern['seg_pos'] - 1)) elif (target_position[i] > pattern['seg_pos']): position_type.append(1) target_position[i] = ((target_position[i] - pattern['seg_pos']) / ((len(pattern['tokens']) - 2) - pattern['seg_pos'])) else: print('tar_postion should not be seg_pos !') exit(0) w_token_a_count = position_type.count(0) w_token_b_count = position_type.count(1) for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) origin_len_a = len(tokens_a) prev_pos = (- 1) for i in range(w_token_a_count): cur_pos = round(((origin_len_a + w_token_a_count) * target_position[i])) if (prev_pos == cur_pos): cur_pos += 1 prev_pos = cur_pos tokens_a.insert(cur_pos, added_tokens[i]) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) origin_len_b = len(tokens_b) prev_pos = (- 1) for i in range(w_token_a_count, len(position_type)): cur_pos = round(((origin_len_b + w_token_b_count) * target_position[i])) if (prev_pos == cur_pos): cur_pos += 1 prev_pos = cur_pos tokens_b.insert(cur_pos, added_tokens[i]) _truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3)) elif (len(tokens_a) > (max_seq_length - 2)): tokens_a = tokens_a[:(max_seq_length - 2)] tokens = ((['[CLS]'] + tokens_a) + ['[SEP]']) base_tokens = ((['[UNK]'] + (['[UNK]'] * len(tokens_a))) + ['[UNK]']) segment_ids = ([0] * len(tokens)) if tokens_b: tokens += (tokens_b + ['[SEP]']) base_tokens += ((['[UNK]'] * len(tokens_b)) + ['[UNK]']) segment_ids += ([1] * (len(tokens_b) + 1)) if (len(tokens) == 128): max_len_count += 1 input_ids = tokenizer.convert_tokens_to_ids(tokens) baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens) input_mask = ([1] * len(input_ids)) padding = ([0] * (max_seq_length - len(input_ids))) input_ids += padding baseline_ids += padding input_mask += padding segment_ids += padding assert (len(baseline_ids) == max_seq_length) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) if label_map: label_id = label_map[example.label] else: label_id = float(example.label) if (ex_index < 2): logger.debug('*** Example ***') logger.debug(('guid: %s' % example.guid)) logger.debug(('tokens: %s' % ' '.join([str(x) for x in tokens]))) logger.debug(('input_ids: %s' % ' '.join([str(x) for x in input_ids]))) logger.debug(('input_mask: %s' % ' '.join([str(x) for x in input_mask]))) logger.debug(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))) logger.debug(('label: %s (id = %d)' % (example.label, label_id))) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, tokens=tokens, baseline_ids=baseline_ids)) tokenslist.append(tokens) print((('max_length_count is ' + str(max_len_count)) + '\n')) return (features, tokenslist)
class _3DUNET_PyTorch_SUT(): def __init__(self, model, preprocessed_data_dir, performance_count, folds, checkpoint_name): print('Loading PyTorch model...') self.model = model self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) print('Constructing SUT...') self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies) print('Finished constructing SUT.') self.qsl = get_brats_QSL(preprocessed_data_dir, performance_count) def issue_queries(self, query_samples): with torch.no_grad(): for i in range(len(query_samples)): data = self.qsl.get_features(query_samples[i].index) print('Processing sample id {:d} with shape = {:}'.format(query_samples[i].index, data.shape)) image = torch.from_numpy(data[(np.newaxis, ...)]).float().to(self.device) output = self.model(image)[0].cpu().numpy().astype(np.float16) response_array = array.array('B', output.tobytes()) bi = response_array.buffer_info() response = lg.QuerySampleResponse(query_samples[i].id, bi[0], bi[1]) lg.QuerySamplesComplete([response]) def flush_queries(self): pass def process_latencies(self, latencies_ns): pass
def eval_func_onnx(model, dataloader, metric, postprocess=None): metric.reset() sess = ort.InferenceSession(model.SerializeToString(), providers=ort.get_available_providers()) input_names = [i.name for i in sess.get_inputs()] for (input_data, label) in dataloader: output = sess.run(None, dict(zip(input_names, [input_data]))) if postprocess: (output, label) = postprocess((output, label)) metric.update(output, label) return metric.result()
_processor('blip_question') class BlipQuestionProcessor(BaseProcessor): def __init__(self, max_words=50): self.max_words = max_words def __call__(self, question): return self.pre_question(question) def from_config(cls, cfg=None): if (cfg is None): cfg = OmegaConf.create() max_words = cfg.get('max_words', 50) return cls(max_words=max_words) def pre_question(self, question): question = re.sub('([.!\\"()*#:;~])', '', question.lower()) question = question.rstrip(' ') question_words = question.split(' ') if (len(question_words) > self.max_words): question = ' '.join(question_words[:self.max_words]) return question
def main(): parser = argparse.ArgumentParser() parser.add_argument('network') parser.add_argument('network_trainer') parser.add_argument('task', help='can be task name or task id') parser.add_argument('fold', help="0, 1, ..., 5 or 'all'") parser.add_argument('-val', '--validation_only', help='use this if you want to only run the validation', action='store_true') parser.add_argument('-c', '--continue_training', help='use this if you want to continue a training', action='store_true') parser.add_argument('-p', help='plans identifier. Only change this if you created a custom experiment planner', default=default_plans_identifier, required=False) parser.add_argument('--use_compressed_data', default=False, action='store_true', help='If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data is much more CPU and RAM intensive and should only be used if you know what you are doing', required=False) parser.add_argument('--deterministic', help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think this is not necessary. Deterministic training will make you overfit to some random seed. Don't use that.", required=False, default=False, action='store_true') parser.add_argument('-gpus', help='number of gpus', required=True, type=int) parser.add_argument('--dbs', required=False, default=False, action='store_true', help='distribute batch size. If True then whatever batch_size is in plans will be distributed over DDP models, if False then each model will have batch_size for a total of GPUs*batch_size') parser.add_argument('--npz', required=False, default=False, action='store_true', help='if set then nnUNet will export npz files of predicted segmentations in the vlaidation as well. This is needed to run the ensembling step so unless you are developing nnUNet you should enable this') parser.add_argument('--valbest', required=False, default=False, action='store_true', help='') parser.add_argument('--find_lr', required=False, default=False, action='store_true', help='') parser.add_argument('--fp32', required=False, default=False, action='store_true', help='disable mixed precision training and run old school fp32') parser.add_argument('--val_folder', required=False, default='validation_raw', help='name of the validation folder. No need to use this for most people') parser.add_argument('--disable_saving', required=False, action='store_true', help='If set nnU-Net will not save any parameter files. Useful for development when you are only interested in the results and want to save some disk space') parser.add_argument('--disable_postprocessing_on_folds', required=False, action='store_true', help='Running postprocessing on each fold only makes sense when developing with nnU-Net and closely observing the model performance on specific configurations. You do not need it when applying nnU-Net because the postprocessing for this will be determined only once all five folds have been trained and nnUNet_find_best_configuration is called. Usually running postprocessing on each fold is computationally cheap, but some users have reported issues with very large images. If your images are large (>600x600x600 voxels) you should consider setting this flag.') parser.add_argument('-pretrained_weights', type=str, required=False, default=None, help='path to nnU-Net checkpoint file to be used as pretrained model (use .model file, for example model_final_checkpoint.model). Will only be used when actually training. Optional. Beta. Use with caution.') args = parser.parse_args() task = args.task fold = args.fold network = args.network network_trainer = args.network_trainer validation_only = args.validation_only plans_identifier = args.p disable_postprocessing_on_folds = args.disable_postprocessing_on_folds use_compressed_data = args.use_compressed_data decompress_data = (not use_compressed_data) deterministic = args.deterministic valbest = args.valbest find_lr = args.find_lr num_gpus = args.gpus fp32 = args.fp32 val_folder = args.val_folder if (not task.startswith('Task')): task_id = int(task) task = convert_id_to_task_name(task_id) if (fold == 'all'): pass else: fold = int(fold) (plans_file, output_folder_name, dataset_directory, batch_dice, stage, trainer_class) = get_default_configuration(network, task, network_trainer, plans_identifier) if (trainer_class is None): raise RuntimeError('Could not find trainer class') if (network == '3d_cascade_fullres'): assert issubclass(trainer_class, nnUNetTrainerCascadeFullRes), 'If running 3d_cascade_fullres then your trainer class must be derived from nnUNetTrainerCascadeFullRes' else: assert issubclass(trainer_class, nnUNetTrainer), 'network_trainer was found but is not derived from nnUNetTrainer' trainer = trainer_class(plans_file, fold, output_folder=output_folder_name, dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage, unpack_data=decompress_data, deterministic=deterministic, distribute_batch_size=args.dbs, num_gpus=num_gpus, fp16=(not fp32)) if args.disable_saving: trainer.save_latest_only = False trainer.save_intermediate_checkpoints = False trainer.save_best_checkpoint = False trainer.save_final_checkpoint = False trainer.initialize((not validation_only)) if find_lr: trainer.find_lr() else: if (not validation_only): if args.continue_training: trainer.load_latest_checkpoint() elif ((not args.continue_training) and (args.pretrained_weights is not None)): load_pretrained_weights(trainer.network, args.pretrained_weights) else: pass trainer.run_training() elif valbest: trainer.load_best_checkpoint(train=False) else: trainer.load_final_checkpoint(train=False) trainer.network.eval() trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder, run_postprocessing_on_folds=(not disable_postprocessing_on_folds)) if (network == '3d_lowres'): print('predicting segmentations for the next stage of the cascade') predict_next_stage(trainer, join(dataset_directory, (trainer.plans['data_identifier'] + ('_stage%d' % 1))))
def hard_intersection(left: TBoxTensor, right: TBoxTensor) -> TBoxTensor: t1 = left t2 = right z = torch.max(t1.z, t2.z) Z = torch.min(t1.Z, t2.Z) return left.from_zZ(z, Z)
def collate_wrapper(x, y, edge_index, edge_attr, device, return_y=True): x = torch.tensor(x, dtype=torch.float, device=device) y = torch.tensor(y, dtype=torch.float, device=device) x = x.transpose(dim0=1, dim1=0) y_T_first = y.transpose(dim0=1, dim1=0) T = x.size()[0] N = x.size()[1] sequences = [] for t in range(T): cur_batch_x = x[t] cur_batch_y = y_T_first[t] batch = Batch.from_data_list([Data(x=cur_batch_x[i], edge_index=edge_index, edge_attr=edge_attr, y=cur_batch_y[i]) for i in range(N)]) sequences.append(batch) if return_y: return (SimpleBatch(sequences), y) else: return SimpleBatch(sequences)
class Dictionary(object): def __init__(self, save_dir): self.idx2token = {} self.token2idx = {} self.word_freq = {} self.special = [] self.save_dir = save_dir def save(self, save_dir=None): if (not (save_dir is None)): self.save_dir = save_dir if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) save(self.idx2token, (self.save_dir + '/idx2token.pkl')) save(self.token2idx, (self.save_dir + '/token2idx.pkl')) save(self.word_freq, (self.save_dir + '/word_freq.pkl')) save(self.special, (self.save_dir + '/special_words.pkl')) def load(self): self.idx2token = load((self.save_dir + '/idx2token.pkl')) self.token2idx = load((self.save_dir + '/token2idx.pkl')) self.word_freq = load((self.save_dir + '/word_freq.pkl')) self.special = load((self.save_dir + '/special_words.pkl')) def split_words(self, s): s = s.strip().lower() for sw in stop_words: s = s.replace(sw, ' ') return s.split() def size(self): return len(self.token2idx) def add(self, token, idx=None): token = token.lower() if (idx is None): idx = len(self.token2idx) if (token not in self.token2idx): self.token2idx[token] = idx self.idx2token[idx] = token idx = self.token2idx[token] if (idx not in self.word_freq): self.word_freq[idx] = 1 else: self.word_freq[idx] += 1 return idx def get_tokens(self): return self.token2idx.keys() def add_tokens(self, tokens): vec = [] for token in tokens: vec.append(self.add(token)) return vec def add_specials(self, tokens, idxs): for (idx, token) in zip(idxs, tokens): self.add(token, idx) self.special += tokens def lookup(self, token, default=cfg.UNK): return self.token2idx.get(token, default) def has_token(self, token): return self.token2idx.has_key(token) def get_token(self, idx, default=cfg.UNK_WORD): return self.idx2token.get(idx, default) def merge(self, dic): self.add_tokens(dic.get_tokens()) def convert2idxs(self, tokens, unk_word): unk = self.lookup(unk_word) vec = [self.lookup(token, unk) for token in tokens] return vec def convert2tokens(self, idxs, unk_word): tokens = [] tokens += [self.get_token(idx, unk_word) for idx in idxs] return tokens
def cdeint_gde(dX_dt, z0, func_f, func_g, t, adjoint=True, **kwargs): control_gradient = dX_dt(torch.zeros(1, dtype=z0.dtype, device=z0.device)) if (control_gradient.shape[:(- 1)] != z0.shape[:(- 1)]): raise ValueError('dX_dt did not return a tensor with the same number of batch dimensions as z0. dX_dt returned shape {} (meaning {} batch dimensions)), whilst z0 has shape {} (meaning {} batch dimensions).'.format(tuple(control_gradient.shape), tuple(control_gradient.shape[:(- 1)]), tuple(z0.shape), tuple(z0.shape[:(- 1)]))) vector_field = func_f(z0) if (vector_field.shape[:(- 2)] != z0.shape[:(- 1)]): raise ValueError('func did not return a tensor with the same number of batch dimensions as z0. func returned shape {} (meaning {} batch dimensions)), whilst z0 has shape {} (meaning {} batch dimensions).'.format(tuple(vector_field.shape), tuple(vector_field.shape[:(- 2)]), tuple(z0.shape), tuple(z0.shape[:(- 1)]))) if (vector_field.size((- 2)) != z0.shape[(- 1)]): raise ValueError('func did not return a tensor with the same number of hidden channels as z0. func returned shape {} (meaning {} channels), whilst z0 has shape {} (meaning {} channels).'.format(tuple(vector_field.shape), vector_field.size((- 2)), tuple(z0.shape), z0.shape.size((- 1)))) if (vector_field.size((- 1)) != control_gradient.size((- 1))): raise ValueError('func did not return a tensor with the same number of input channels as dX_dt returned. func returned shape {} (meaning {} channels), whilst dX_dt returned shape {} (meaning {} channels).'.format(tuple(vector_field.shape), vector_field.size((- 1)), tuple(control_gradient.shape), control_gradient.size((- 1)))) if (control_gradient.requires_grad and adjoint): raise ValueError('Gradients do not backpropagate through the control with adjoint=True. (This is a limitation of the underlying torchdiffeq library.)') odeint = (torchdiffeq.odeint_adjoint if adjoint else torchdiffeq.odeint) vector_field = VectorFieldGDE(dX_dt=dX_dt, func_f=func_f, func_g=func_g) out = odeint(func=vector_field, y0=z0, t=t, **kwargs) return out
class MultiInputSequential(nn.Sequential): def forward(self, *input): multi_inp = False if (len(input) > 1): multi_inp = True (_, edge_index) = (input[0], input[1]) for module in self._modules.values(): if multi_inp: if hasattr(module, 'weight'): input = [module(*input)] else: input = [module(input[0]), edge_index] else: input = [module(*input)] return input[0]
def read_labeled(file): labeled_edges = {} sent_id = None sent_edges = [] for line in open(file): if line.startswith('# sent_id'): sent_id = line.strip().split(' = ')[(- 1)] if ((line.strip() == '') and (sent_id is not None)): labeled_edges[sent_id] = sent_edges sent_edges = [] sent_id = None if line[0].isdigit(): split = line.strip().split('\t') idx = split[0] edge_label = split[(- 1)] if (edge_label is not '_'): if ('|' in edge_label): for el in edge_label.split('|'): (edge, label) = el.split(':', 1) sent_edges.append((idx, edge, label)) else: (edge, label) = edge_label.split(':', 1) sent_edges.append((idx, edge, label)) return labeled_edges
def convert_stockholm_to_a3m(stockholm_format: str, max_sequences: Optional[int]=None) -> str: descriptions = {} sequences = {} reached_max_sequences = False for line in stockholm_format.splitlines(): reached_max_sequences = (max_sequences and (len(sequences) >= max_sequences)) if (line.strip() and (not line.startswith(('#', '//')))): (seqname, aligned_seq) = line.split(maxsplit=1) if (seqname not in sequences): if reached_max_sequences: continue sequences[seqname] = '' sequences[seqname] += aligned_seq for line in stockholm_format.splitlines(): if (line[:4] == '#=GS'): columns = line.split(maxsplit=3) (seqname, feature) = columns[1:3] value = (columns[3] if (len(columns) == 4) else '') if (feature != 'DE'): continue if (reached_max_sequences and (seqname not in sequences)): continue descriptions[seqname] = value if (len(descriptions) == len(sequences)): break a3m_sequences = {} query_sequence = next(iter(sequences.values())) query_non_gaps = [(res != '-') for res in query_sequence] for (seqname, sto_sequence) in sequences.items(): a3m_sequences[seqname] = ''.join(_convert_sto_seq_to_a3m(query_non_gaps, sto_sequence)) fasta_chunks = (f'''>{k} {descriptions.get(k, '')} {a3m_sequences[k]}''' for k in a3m_sequences) return ('\n'.join(fasta_chunks) + '\n')
def canon_input_planes(fen): fen = maybe_flip_fen(fen, is_black_turn(fen)) return all_input_planes(fen)
class TestTransformerDecoder(TensorTestCase): def setUp(self): self.emb_size = 12 self.num_layers = 3 self.hidden_size = 12 self.ff_size = 24 self.num_heads = 4 self.dropout = 0.0 seed = 42 torch.manual_seed(seed) def test_transformer_decoder_freeze(self): decoder = TransformerDecoder(freeze=True) for (n, p) in decoder.named_parameters(): self.assertFalse(p.requires_grad) def test_transformer_decoder_output_size(self): vocab_size = 11 decoder = TransformerDecoder(num_layers=self.num_layers, num_heads=self.num_heads, hidden_size=self.hidden_size, ff_size=self.ff_size, dropout=self.dropout, vocab_size=vocab_size) if (not hasattr(decoder, 'output_size')): self.fail('Missing output_size property.') self.assertEqual(decoder.output_size, vocab_size) def test_transformer_decoder_forward(self): batch_size = 2 src_time_dim = 4 trg_time_dim = 5 vocab_size = 7 trg_embed = torch.rand(size=(batch_size, trg_time_dim, self.emb_size)) decoder = TransformerDecoder(num_layers=self.num_layers, num_heads=self.num_heads, hidden_size=self.hidden_size, ff_size=self.ff_size, dropout=self.dropout, emb_dropout=self.dropout, vocab_size=vocab_size) encoder_output = torch.rand(size=(batch_size, src_time_dim, self.hidden_size)) for p in decoder.parameters(): torch.nn.init.uniform_(p, (- 0.5), 0.5) src_mask = (torch.ones(size=(batch_size, 1, src_time_dim)) == 1) trg_mask = (torch.ones(size=(batch_size, trg_time_dim, 1)) == 1) encoder_hidden = None decoder_hidden = None unrol_steps = None (output, states, _, _) = decoder(trg_embed, encoder_output, encoder_hidden, src_mask, unrol_steps, decoder_hidden, trg_mask) output_target = torch.Tensor([[[0.1718, 0.5595, (- 0.1996), (- 0.6924), 0.4351, (- 0.085), 0.2805], [0.0666, 0.4923, (- 0.1724), (- 0.6804), 0.3983, (- 0.1111), 0.2194], [(- 0.0315), 0.3673, (- 0.232), (- 0.61), 0.3019, 0.0422, 0.2514], [(- 0.0026), 0.3807, (- 0.2195), (- 0.601), 0.3081, (- 0.0101), 0.2099], [(- 0.0172), 0.3384, (- 0.2853), (- 0.5799), 0.247, 0.0312, 0.2518]], [[0.0284, 0.3918, (- 0.201), (- 0.6472), 0.3646, (- 0.0296), 0.1791], [0.1017, 0.4387, (- 0.2031), (- 0.7084), 0.3051, (- 0.1354), 0.2511], [0.0155, 0.4274, (- 0.2061), (- 0.6702), 0.3085, (- 0.0617), 0.283], [0.0227, 0.4067, (- 0.1697), (- 0.6463), 0.3277, (- 0.0423), 0.2333], [0.0133, 0.4409, (- 0.1186), (- 0.5694), 0.445, 0.029, 0.1643]]]) self.assertEqual(output_target.shape, output.shape) self.assertTensorAlmostEqual(output_target, output) greedy_predictions = output.argmax((- 1)) expect_predictions = output_target.argmax((- 1)) self.assertTensorEqual(expect_predictions, greedy_predictions) states_target = torch.Tensor([[[0.037535, 0.53508, 0.049478, (- 0.91961), (- 0.53966), (- 0.10065), 0.43053, (- 0.30671), (- 0.012724), (- 0.41879), 0.59625, 0.11887], [0.13837, 0.46963, (- 0.037059), (- 0.68479), (- 0.46042), (- 0.10072), 0.39374, (- 0.30429), (- 0.054203), (- 0.4368), 0.64257, 0.11424], [0.10263, 0.38331, (- 0.025586), (- 0.64478), (- 0.4586), (- 0.1059), 0.58806, (- 0.28856), 0.011084, (- 0.47479), 0.59094, 0.16089], [0.073408, 0.37701, (- 0.058783), (- 0.62368), (- 0.44201), (- 0.10237), 0.52556, (- 0.30821), (- 0.053345), (- 0.45606), 0.58259, 0.12531], [0.041206, 0.36129, (- 0.012955), (- 0.58638), (- 0.46023), (- 0.094267), 0.55464, (- 0.30029), (- 0.033974), (- 0.48347), 0.54088, 0.12015]], [[0.11017, 0.47179, 0.026402, (- 0.7217), (- 0.39778), (- 0.10226), 0.53498, (- 0.28369), (- 0.11081), (- 0.46096), 0.59517, 0.13531], [0.21947, 0.46407, 0.084276, (- 0.63263), (- 0.44953), (- 0.097334), 0.40321, (- 0.29893), (- 0.10368), (- 0.4576), 0.61378, 0.13509], [0.21437, 0.41372, 0.019859, (- 0.57415), (- 0.45025), (- 0.098621), 0.41182, (- 0.2841), (- 0.0012729), (- 0.48586), 0.62318, 0.14731], [0.19153, 0.38401, 0.026096, (- 0.62339), (- 0.40685), (- 0.097387), 0.41836, (- 0.28648), (- 0.017857), (- 0.47678), 0.62907, 0.17617], [0.031713, 0.37548, (- 0.063005), (- 0.79804), (- 0.36541), (- 0.10398), 0.42991, (- 0.29607), 0., (- 0.45897), 0.61062, 0.16142]]]) self.assertEqual(states_target.shape, states.shape) self.assertTensorAlmostEqual(states_target, states) def test_transformer_decoder_layers(self): vocab_size = 7 decoder = TransformerDecoder(num_layers=self.num_layers, num_heads=self.num_heads, hidden_size=self.hidden_size, ff_size=self.ff_size, dropout=self.dropout, vocab_size=vocab_size) self.assertEqual(len(decoder.layers), self.num_layers) for layer in decoder.layers: self.assertTrue(isinstance(layer, TransformerDecoderLayer)) self.assertTrue(hasattr(layer, 'src_trg_att')) self.assertTrue(hasattr(layer, 'trg_trg_att')) self.assertTrue(hasattr(layer, 'feed_forward')) self.assertEqual(layer.size, self.hidden_size) self.assertEqual(layer.feed_forward.pwff_layer[0].in_features, self.hidden_size) self.assertEqual(layer.feed_forward.pwff_layer[0].out_features, self.ff_size)
def _linear(args, output_size, bias, bias_initializer=tf.zeros_initializer(), scope=None, kernel_initializer=initializer(), reuse=None): if ((args is None) or (nest.is_sequence(args) and (not args))): raise ValueError('`args` must be specified') if (not nest.is_sequence(args)): args = [args] total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if (shape.ndims != 2): raise ValueError(('linear is expecting 2D arguments: %s' % shapes)) if (shape[1].value is None): raise ValueError(('linear expects shape[1] to be provided for shape %s, but saw %s' % (shape, shape[1]))) else: total_arg_size += shape[1].value dtype = [a.dtype for a in args][0] with tf.variable_scope(scope, reuse=reuse) as outer_scope: weights = tf.get_variable('linear_kernel', [total_arg_size, output_size], dtype=dtype, regularizer=regularizer, initializer=kernel_initializer) if (len(args) == 1): res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(args, 1), weights) if (not bias): return res with tf.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) biases = tf.get_variable('linear_bias', [output_size], dtype=dtype, regularizer=regularizer, initializer=bias_initializer) return nn_ops.bias_add(res, biases)
class TestOptions(BaseOptions): def initialize(self, parser): BaseOptions.initialize(self, parser) parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--how_many', type=int, default=float('inf'), help='how many test images to run') parser.add_argument('--style_method', type=str, default='random', help='random, fix, unseen, original, none') parser.add_argument('--mask_num', type=int, default=0, help='num of segmentation classes to be masked') parser.add_argument('--output_dir', type=str, default='views_img_spade_original') parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) parser.set_defaults(serial_batches=True) parser.set_defaults(no_flip=True) parser.set_defaults(phase='test') self.isTrain = False return parser
def get_labels(input_shape, xs): label = [0, 0] if (input_shape == (1,)): ys = (0 if (np.sin(xs) <= 0) else 1) label[ys] = 1 elif (input_shape == (2,)): ys = (int(np.round(xs[0])) ^ int(np.round(xs[1]))) label[ys] = 1 return label
def build_dataloader(dataset, imgs_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs): (rank, world_size) = get_dist_info() if dist: if shuffle: sampler = DistributedGroupSampler(dataset, imgs_per_gpu, world_size, rank) else: sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) batch_size = imgs_per_gpu num_workers = workers_per_gpu else: sampler = (GroupSampler(dataset, imgs_per_gpu) if shuffle else None) batch_size = (num_gpus * imgs_per_gpu) num_workers = (num_gpus * workers_per_gpu) init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None) data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs) return data_loader
def override(interface_class): def overrider(method): invalidInputError((method.__name__ in dir(interface_class)), "method.__name__ doesn't exist in interface_class") return method return overrider
def Float2BFloat16Bytes(float_data): int_datas = [] for value in float_data: bytes = struct.pack('f', value) int_data = struct.unpack('i', bytes)[0] int_datas.append((int_data >> 16)) return np.array(int_datas).astype(np.uint16).tobytes()
def get_net(cfg: dict) -> nn.ModuleDict: reg.trigger_nets() reg.trigger_decoders() nets = {k: get_cls(reg.NET_REG, type=k, **kw) for (k, kw) in cfg.items() if (kw is not None)} return nn.ModuleDict(OrderedDict(nets))
class Sample(object): def __init__(self): self.itemSeq = [] self.target = [] self.action = [] self.reward = [] self.clicked = {} def genSample_pred(self, item, reward, action, warmup, real_num_label, rec_len, add_end=True): length = len(item) for j in range(length): seqLen = len(item[j]) pre_len = 1 tar_len = (seqLen - pre_len) if ((pre_len > 0) and (tar_len > 0)): for i in range(pre_len, seqLen): self.itemSeq.append(item[j][:i]) self.target.append(item[j][i]) self.reward.append(reward[j][i]) if add_end: self.action.append((action[j][i] + [(real_num_label - 1)])) else: self.action.append(action[j][i]) if (item[j][(i - 1)] not in self.clicked): self.clicked[item[j][(i - 1)]] = 0 if add_end: self.itemSeq.append(item[j][:seqLen]) self.target.append((real_num_label - 1)) temp = np.random.randint((real_num_label - 1), size=rec_len).tolist() temp.append((real_num_label - 1)) self.action.append(temp) self.reward.append(0) if add_end: self.clicked[(real_num_label - 1)] = 0 def genSample_predtest(self, item, reward, action, window, real_num_label, add_end=False): length = len(item) for j in range(length): seqLen = len(item[j]) winLen = 1 if (winLen > 0): for k in range(winLen, seqLen): if (len(action[j][k]) >= 10): self.itemSeq.append(item[j][:k]) self.target.append(item[j][k]) if add_end: self.action.append((action[j][k] + [(real_num_label - 1)])) else: self.action.append(action[j][k]) self.reward.append(reward[j][k]) ' \n if add_end:\n self.itemSeq.append(item[j][:seqLen])\n self.target.append(real_num_label-1)\n temp = np.random.randint(real_num_label-1,size = 10).tolist()\n temp.append(real_num_label-1)\n self.action.append(temp) \n self.reward.append(0) #If eos, reward=0\n ' def genSample_generator(self, item, reward, action, real_num_label, rec_len, add_end=False): length = len(item) for j in range(length): if add_end: self.itemSeq.append((item[j] + [(real_num_label - 1)])) self.reward.append((reward[j] + [0])) tmp_action = action[j] tmp_action.append(np.random.randint((real_num_label - 1), size=rec_len).tolist()) self.action.append(tmp_action) else: self.itemSeq.append(item[j]) self.reward.append(reward[j]) self.action.append(action[j]) self.target.append([(- 1)]) def genSample_discriminator(self, item, reward, action, label, real_num_label, add_end=False): length = len(item) rec_len = len(action[0][0]) for j in range(length): if add_end: self.itemSeq.append((item[j] + [(real_num_label - 1)])) self.reward.append((reward[j] + [0])) tmp_action = action[j] tmp_action.append(np.random.randint((real_num_label - 1), size=rec_len).tolist()) self.action.append(tmp_action) else: self.itemSeq.append(item[j]) self.action.append(action[j]) self.reward.append(reward[j]) self.target.append(label[j]) def batchSample(self, batchstart, batchend): return (np.array(self.itemSeq[batchstart:batchend]), np.array(self.target[batchstart:batchend]), np.array(self.reward[batchstart:batchend]), np.array(self.action[batchstart:batchend])) def subSample_copy(self, subnum, origin, shuffle=None): if (shuffle == None): index = np.random.permutation(origin.length()) self.itemSeq = np.array(origin.itemSeq)[index[:subnum]].tolist() self.reward = np.array(origin.reward)[index[:subnum]].tolist() self.action = np.array(origin.action)[index[:subnum]].tolist() self.target = np.array(origin.target)[index[:subnum]].tolist() else: self.itemSeq = np.array(origin.itemSeq)[shuffle[:subnum]].tolist() self.reward = np.array(origin.reward)[shuffle[:subnum]].tolist() self.action = np.array(origin.action)[shuffle[:subnum]].tolist() self.target = np.array(origin.target)[shuffle[:subnum]].tolist() def length(self): return len(self.itemSeq)
class DynamicsTest(test_util.JAXMDTestCase): _parameters(test_util.cases_from_list(({'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype} for dim in SPATIAL_DIMENSION for dtype in DTYPE))) def test_gradient_descent(self, spatial_dimension, dtype): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): (key, split, split0) = random.split(key, 3) R = random.uniform(split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R0 = random.uniform(split0, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) energy = (lambda R, **kwargs: np.sum(((R - R0) ** 2))) (_, shift_fn) = space.free() (opt_init, opt_apply) = minimize.gradient_descent(energy, shift_fn, f32(0.1)) E_current = energy(R) dr_current = np.sum(((R - R0) ** 2)) for _ in range(OPTIMIZATION_STEPS): R = opt_apply(R) E_new = energy(R) dr_new = np.sum(((R - R0) ** 2)) assert (E_new < E_current) assert (E_new.dtype == dtype) assert (dr_new < dr_current) assert (dr_new.dtype == dtype) E_current = E_new dr_current = dr_new _parameters(test_util.cases_from_list(({'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__), 'spatial_dimension': dim, 'dtype': dtype} for dim in SPATIAL_DIMENSION for dtype in DTYPE))) def test_fire_descent(self, spatial_dimension, dtype): key = random.PRNGKey(0) for _ in range(STOCHASTIC_SAMPLES): (key, split, split0) = random.split(key, 3) R = random.uniform(split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R0 = random.uniform(split0, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) energy = (lambda R, **kwargs: np.sum(((R - R0) ** 2))) (_, shift_fn) = space.free() (opt_init, opt_apply) = minimize.fire_descent(energy, shift_fn) opt_state = opt_init(R) E_current = energy(R) dr_current = np.sum(((R - R0) ** 2)) step_fn = (lambda i, state: opt_apply(state)) def three_steps(state): return lax.fori_loop(0, 3, step_fn, state) for _ in range(OPTIMIZATION_STEPS): opt_state = three_steps(opt_state) R = opt_state.position E_new = energy(R) dr_new = np.sum(((R - R0) ** 2)) assert (E_new < E_current) assert (E_new.dtype == dtype) assert (dr_new < dr_current) assert (dr_new.dtype == dtype) E_current = E_new dr_current = dr_new
def _gen_missing_api(api, mod_name): def _missing_api(*args, **kwargs): raise ImportError(('API "%s" is not supported by backend "%s". You can switch to other backends by setting the DDE_BACKEND environment.' % (api, mod_name))) return _missing_api
class TestBuilder(unittest.TestCase): SHAPE_LARGE = (100, 100, 3) SHAPE_SMALL = (15, 15, 3) _conv_args = dict(kernel_size=3, padding='same', activation='relu') def _get_attention(self): return Sequential([Conv2D(8, input_shape=self.SHAPE_SMALL, **self._conv_args), MaxPool2D(), Conv2D(8, **self._conv_args), Conv2D(1, kernel_size=3, padding='same'), SampleSoftmax(squeeze_channels=True)]) def _get_feature(self): return Sequential([Conv2D(8, input_shape=self.SHAPE_SMALL, **self._conv_args), MaxPool2D(), Conv2D(16, **self._conv_args), MaxPool2D(), Flatten(), Dense(64, activation='relu')]) def test_model(self): x_full = Input(shape=self.SHAPE_LARGE) x_small = Input(shape=self.SHAPE_SMALL) attention_model = self._get_attention() feature_model = self._get_feature() (feature, attention, patches) = attention_sampling(attention_model, feature_model, (15, 15), attention_regularizer=(lambda x: K.sum(x)))([x_small, x_full]) model = Model(inputs=[x_small, x_full], outputs=[feature]) model.predict([np.random.rand(10, *self.SHAPE_SMALL), np.random.rand(10, *self.SHAPE_LARGE)]) model.compile('sgd', 'mse') model.fit([np.random.rand(10, *self.SHAPE_SMALL), np.random.rand(10, *self.SHAPE_LARGE)], np.random.rand(10, 64)) self.assertEqual(len(model.trainable_weights), (len(attention_model.trainable_weights) + len(feature_model.trainable_weights)))
class LossStatistics(): def __init__(self, loss=0.0, n_tokens=0, n_batch=0, forward_time=0.0, loss_compute_time=0.0, backward_time=0.0): self.loss = loss if math.isnan(loss): raise ValueError('Loss is NaN') self.n_tokens = n_tokens self.n_batch = n_batch self.forward_time = forward_time self.loss_compute_time = loss_compute_time self.backward_time = backward_time def update(self, stat): self.loss += stat.loss if math.isnan(stat.loss): raise ValueError('Loss is NaN') self.n_tokens += stat.n_tokens self.n_batch += stat.n_batch self.forward_time += stat.forward_time self.loss_compute_time += stat.loss_compute_time self.backward_time += stat.backward_time def xent(self): assert (self.n_tokens > 0), 'n_tokens must be larger than 0' return (self.loss / self.n_tokens) def ppl(self): assert (self.n_tokens > 0), 'n_tokens must be larger than 0' return math.exp(min((self.loss / self.n_tokens), 100)) def total_time(self): return (self.forward_time, self.loss_compute_time, self.backward_time) def clear(self): self.loss = 0.0 self.n_tokens = 0 self.n_batch = 0 self.forward_time = 0.0 self.loss_compute_time = 0.0 self.backward_time = 0.0
def format_attention(attention, layers=None, heads=None): if layers: attention = [attention[layer_index] for layer_index in layers] squeezed = [] for layer_attention in attention: if (len(layer_attention.shape) != 4): raise ValueError('The attention tensor does not have the correct number of dimensions. Make sure you set output_attentions=True when initializing your model.') layer_attention = layer_attention.squeeze(0) if heads: layer_attention = layer_attention[heads] squeezed.append(layer_attention) return torch.stack(squeezed)
def random_split(valid_pct: float, *arrs: NPArrayableList) -> SplitArrayList: assert ((valid_pct >= 0) and (valid_pct <= 1)), 'Validation set percentage should be between 0 and 1' is_train = (np.random.uniform(size=(len(arrs[0]),)) > valid_pct) return arrays_split(is_train, *arrs)
def ensure_optimizer_ckpt_params_order(param_groups_names, checkpoint): assert (len(param_groups_names) == len(checkpoint['optimizer']['param_groups'])) param_lens = (len(g) for g in param_groups_names) saved_lens = (len(g['params']) for g in checkpoint['optimizer']['param_groups']) if any(((p_len != s_len) for (p_len, s_len) in zip(param_lens, saved_lens))): raise ValueError("loaded state dict contains a parameter group that doesn't match the size of optimizer's group") name_to_curpos = {} for (i, p_names) in enumerate(param_groups_names): for (j, name) in enumerate(p_names): name_to_curpos[name] = (i, j) param_groups_inds = [[] for _ in range(len(param_groups_names))] cnts = ([0] * len(param_groups_names)) for key in checkpoint['model']: pos = name_to_curpos.get(key) if pos: saved_p_id = checkpoint['optimizer']['param_groups'][pos[0]]['params'][cnts[pos[0]]] assert (checkpoint['model'][key].shape == checkpoint['optimizer']['state'][saved_p_id]['momentum_buffer'].shape), 'param and momentum_buffer shape mismatch in checkpoint. param_name: {}, param_id: {}'.format(key, saved_p_id) param_groups_inds[pos[0]].append(pos[1]) cnts[pos[0]] += 1 for (cnt, param_inds) in enumerate(param_groups_inds): ckpt_params = checkpoint['optimizer']['param_groups'][cnt]['params'] assert (len(ckpt_params) == len(param_inds)) ckpt_params = [x for (x, _) in sorted(zip(ckpt_params, param_inds), key=(lambda x: x[1]))] checkpoint['optimizer']['param_groups'][cnt]['params'] = ckpt_params
def test_print_log_silent(capsys, caplog): print_log('welcome', logger='silent') (out, _) = capsys.readouterr() assert (out == '') assert (len(caplog.records) == 0)
def linear_quantize(input, scale, zero_point, inplace=False): if (len(input.shape) == 4): scale = scale.view((- 1), 1, 1, 1) zero_point = zero_point.view((- 1), 1, 1, 1) elif (len(input.shape) == 2): scale = scale.view((- 1), 1) zero_point = zero_point.view((- 1), 1) if inplace: input.mul_(scale).sub_(zero_point).round_() return input return torch.round(((scale * input) - zero_point))
def train(train_loader, val_loader, trainval_loader, tracking_module, lr_scheduler, start_iter, tb_logger): global best_mota batch_time = AverageMeter(config.print_freq) data_time = AverageMeter(config.print_freq) losses = AverageMeter(config.print_freq) tracking_module.model.train() logger = logging.getLogger('global_logger') end = time.time() for (i, (input, det_info, det_id, det_cls, det_split)) in enumerate(train_loader): curr_step = (start_iter + i) if (lr_scheduler is not None): lr_scheduler.step(curr_step) current_lr = lr_scheduler.get_lr() data_time.update((time.time() - end)) input = input.cuda() if (len(det_info) > 0): for (k, v) in det_info.items(): det_info[k] = (det_info[k].cuda() if (not isinstance(det_info[k], list)) else det_info[k]) loss = tracking_module.step(input.squeeze(0), det_info, det_id, det_cls, det_split) batch_time.update((time.time() - end)) losses.update(loss.item()) if (((curr_step + 1) % config.print_freq) == 0): tb_logger.add_scalar('loss_train', losses.avg, curr_step) logger.info('Iter: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})'.format((curr_step + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) if ((curr_step > 0) and (((curr_step + 1) % config.val_freq) == 0)): logger.info('Evaluation on validation set:') (MOTA, MOTP, recall, prec, F1, fp, fn, id_switches) = validate(val_loader, tracking_module, str((curr_step + 1)), part=args.part) if (tb_logger is not None): tb_logger.add_scalar('prec', prec, curr_step) tb_logger.add_scalar('recall', recall, curr_step) tb_logger.add_scalar('mota', MOTA, curr_step) tb_logger.add_scalar('motp', MOTP, curr_step) tb_logger.add_scalar('fp', fp, curr_step) tb_logger.add_scalar('fn', fn, curr_step) tb_logger.add_scalar('f1', F1, curr_step) tb_logger.add_scalar('id_switches', id_switches, curr_step) if (lr_scheduler is not None): tb_logger.add_scalar('lr', current_lr, curr_step) is_best = (MOTA > best_mota) best_mota = max(MOTA, best_mota) save_checkpoint({'step': curr_step, 'score_arch': config.model.score_arch, 'appear_arch': config.model.appear_arch, 'best_mota': best_mota, 'state_dict': tracking_module.model.state_dict(), 'optimizer': tracking_module.optimizer.state_dict()}, is_best, (config.save_path + '/ckpt')) end = time.time()
def get_credentials(): credentials = tools.get_credentials_file() session_credentials = get_session_credentials() for credentials_key in credentials: session_value = session_credentials.get(credentials_key) if ((session_value is False) or session_value): credentials[credentials_key] = session_value return credentials
class vgg_ex(nn.Module): def __init__(self, cfg, incs=512, padding=1, dilation=1): super(vgg_ex, self).__init__() self.cfg = cfg layers = [] for v in self.cfg: conv2d = nn.Conv2d(incs, v, kernel_size=3, padding=padding, dilation=dilation, bias=False) layers += [conv2d, nn.ReLU(inplace=True)] incs = v self.ex = nn.Sequential(*layers) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, 0.01) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): x = self.ex(x) return x
def set_graph_training(graph, train=True): for e in graph.edges: module = graph.edges[e]['module'] if isinstance(module, Segment): set_graph_training(module.G, train=train) elif train: graph.edges[e]['module'].train() else: graph.edges[e]['module'].eval()
class WideResNet(nn.Module): def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0): super(WideResNet, self).__init__() nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)] assert (((depth - 4) % 6) == 0) n = ((depth - 4) / 6) block = BasicBlock self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x): out = self.conv1(x) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view((- 1), self.nChannels) return self.fc(out)
def convert_imagenet_to_tf_records(raw_data_dir: str, output_dir: str) -> None: import tensorflow as tf random.seed(0) def make_shuffle_idx(n): order = list(range(n)) random.shuffle(order) return order training_files = tf.gfile.Glob(os.path.join(raw_data_dir, TRAINING_DIRECTORY, '*', '*.JPEG')) training_synsets = [os.path.basename(os.path.dirname(f)) for f in training_files] training_synsets = list(map((lambda x: bytes(x, 'utf-8')), training_synsets)) training_shuffle_idx = make_shuffle_idx(len(training_files)) training_files = [training_files[i] for i in training_shuffle_idx] training_synsets = [training_synsets[i] for i in training_shuffle_idx] validation_files = sorted(tf.gfile.Glob(os.path.join(raw_data_dir, VALIDATION_DIRECTORY, '*.JPEG'))) validation_synsets = tf.gfile.FastGFile(os.path.join(raw_data_dir, VALIDATION_LABELS), 'rb').read().splitlines() labels = {v: (k + 1) for (k, v) in enumerate(sorted(set((validation_synsets + training_synsets))))} print('Processing the training data.') _process_dataset(training_files, training_synsets, labels, os.path.join(output_dir, TRAINING_DIRECTORY), TRAINING_DIRECTORY, TRAINING_SHARDS) print('Processing the validation data.') _process_dataset(validation_files, validation_synsets, labels, os.path.join(output_dir, VALIDATION_DIRECTORY), VALIDATION_DIRECTORY, VALIDATION_SHARDS)
def test_dyhead(): s = 64 in_channels = 8 out_channels = 16 feat_sizes = [(s // (2 ** i)) for i in range(4)] feats = [torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i]) for i in range(len(feat_sizes))] neck = DyHead(in_channels=in_channels, out_channels=out_channels, num_blocks=3) outs = neck(feats) assert (len(outs) == len(feats)) for i in range(len(outs)): assert (outs[i].shape[1] == out_channels) assert (outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i))) feat = torch.rand(1, 8, 4, 4) with pytest.raises(AssertionError): neck(feat)
class RiRUnit(nn.Module): def __init__(self, in_channels, out_channels, stride): super(RiRUnit, self).__init__() self.resize_identity = ((in_channels != out_channels) or (stride != 1)) self.res_pass_conv = conv3x3(in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_pass_conv = conv3x3(in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_cross_conv = conv3x3(in_channels=in_channels, out_channels=out_channels, stride=stride) self.trans_cross_conv = conv3x3(in_channels=in_channels, out_channels=out_channels, stride=stride) self.res_postactiv = PostActivation(in_channels=out_channels) self.trans_postactiv = PostActivation(in_channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1(in_channels=in_channels, out_channels=out_channels, stride=stride) def forward(self, x_res, x_trans): if self.resize_identity: x_res_identity = self.identity_conv(x_res) else: x_res_identity = x_res y_res = self.res_cross_conv(x_res) y_trans = self.trans_cross_conv(x_trans) x_res = self.res_pass_conv(x_res) x_trans = self.trans_pass_conv(x_trans) x_res = ((x_res + x_res_identity) + y_trans) x_trans = (x_trans + y_res) x_res = self.res_postactiv(x_res) x_trans = self.trans_postactiv(x_trans) return (x_res, x_trans)
def build(post_processing_config): if (not isinstance(post_processing_config, post_processing_pb2.PostProcessing)): raise ValueError('post_processing_config not of type post_processing_pb2.Postprocessing.') non_max_suppressor_fn = _build_non_max_suppressor(post_processing_config.batch_non_max_suppression) score_converter_fn = _build_score_converter(post_processing_config.score_converter, post_processing_config.logit_scale) return (non_max_suppressor_fn, score_converter_fn)
def preprocess_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes, transpose_forward): errors_in = [] for (i, l) in enumerate(list_of_lists): try: output_file = output_files[i] print('preprocessing', output_file) (d, _, dct) = preprocess_fn(l) if (segs_from_prev_stage[i] is not None): assert (isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith('.nii.gz')), 'segs_from_prev_stage must point to a segmentation file' seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i])) img = sitk.GetArrayFromImage(sitk.ReadImage(l[0])) assert all([(i == j) for (i, j) in zip(seg_prev.shape, img.shape)]), ("image and segmentation from previous stage don't have the same pixel array shape! image: %s, seg_prev: %s" % (l[0], segs_from_prev_stage[i])) seg_prev = seg_prev.transpose(transpose_forward) seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1) seg_reshaped = to_one_hot(seg_reshaped, classes) d = np.vstack((d, seg_reshaped)).astype(np.float32) 'There is a problem with python process communication that prevents us from communicating obejcts \n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is \n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long \n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually \n patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will \n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either \n filename or np.ndarray and will handle this automatically' print(d.shape) if (np.prod(d.shape) > ((.0 / 4) * 0.85)): print('This output is too large for python process-process communication. Saving output temporarily to disk') np.save((output_file[:(- 7)] + '.npy'), d) d = (output_file[:(- 7)] + '.npy') q.put((output_file, (d, dct))) except KeyboardInterrupt: raise KeyboardInterrupt except Exception as e: print('error in', l) print(e) q.put('end') if (len(errors_in) > 0): print('There were some errors in the following cases:', errors_in) print('These cases were ignored.') else: print('This worker has ended successfully, no errors to report')
def get_pkg_version(frontend_pkg): try: import importlib.metadata return importlib.metadata.version(frontend_pkg) except ModuleNotFoundError: pass import pkg_resources try: return pkg_resources.get_distribution(frontend_pkg).version except pkg_resources.DistributionNotFound: pass return None
class XLMProphetNetTokenizer(metaclass=DummyObject): _backends = ['sentencepiece'] def __init__(self, *args, **kwargs): requires_backends(self, ['sentencepiece'])
class MetaBatchNorm2d(MetaModule): def __init__(self, *args, **kwargs): super().__init__() ignore = nn.BatchNorm2d(*args, **kwargs) self.num_features = ignore.num_features self.eps = ignore.eps self.momentum = ignore.momentum self.affine = ignore.affine self.track_running_stats = ignore.track_running_stats if self.affine: self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True)) self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True)) if self.track_running_stats: self.register_buffer('running_mean', torch.zeros(ignore.num_features)) self.register_buffer('running_var', torch.ones(ignore.num_features)) else: self.register_parameter('running_mean', None) self.register_parameter('running_var', None) def forward(self, x): return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, (self.training or (not self.track_running_stats)), self.momentum, self.eps) def named_leaves(self): return [('weight', self.weight), ('bias', self.bias)]
def dobldobl_clear(): from phcpy.phcpy2c3 import py2c_numbtrop_dobldobl_clear py2c_numbtrop_dobldobl_clear()
def get_video_to_frame_path_fn(fn_type: str='idx', zeros: int=8, incr: int=1) -> Callable: if (fn_type == 'idx'): def fn(video_path, frame_idx): return f'{video_path}/{(frame_idx + incr):0{zeros}d}.jpg' return fn else: raise NotImplementedError(f'{fn_type} unknown.')
class CLIPImageDataset(torch.utils.data.Dataset): def __init__(self, data): self.data = data self.preprocess = self._transform_test(224) def _transform_test(self, n_px): return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) def __getitem__(self, idx): c_data = self.data[idx] image = Image.open(c_data) image = self.preprocess(image) return {'image': image} def __len__(self): return len(self.data)
_type def gaussian_noise(image, stddev_max=0.1): stddev = tf.random.uniform([], 0.0, stddev_max) noise = tf.random.normal(shape=tf.shape(image), mean=0, stddev=stddev) image = (image + noise) return image
class Mixed3a(nn.Module): def __init__(self): super(Mixed3a, self).__init__() self.maxpool = nn.MaxPool2d(3, stride=2) self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) def forward(self, x): x0 = self.maxpool(x) x1 = self.conv(x) out = torch.cat((x0, x1), 1) return out
class SPAdaIN(nn.Module): def __init__(self, norm, input_nc, planes): super(SPAdaIN, self).__init__() self.conv_weight = nn.Conv1d(input_nc, planes, 1) self.conv_bias = nn.Conv1d(input_nc, planes, 1) self.norm = norm(planes) def forward(self, x, addition): x = self.norm(x) weight = self.conv_weight(addition) bias = self.conv_bias(addition) out = ((weight * x) + bias) return out
class DataLoader(): def load_data_oss(file_paths): all_problem_solutions = [] for file_path in file_paths: with open(file_path, 'r') as file: for line in file: line = line.strip() if (not line): continue data = json.loads(line) problem = data.get('problem', '') solution = data.get('solution', '') combined_text = ((problem + ' ') + solution) all_problem_solutions.append(combined_text) return all_problem_solutions def load_data_evol(file_paths): all_problem_solutions = [] for file_path in file_paths: with open(file_path, 'r') as file: for line in file: line = line.strip() if (not line): continue data = json.loads(line) problem = data.get('instruction', '') solution = data.get('output', '') combined_text = ((problem + ' ') + solution) all_problem_solutions.append(combined_text) return all_problem_solutions def load_data_codealpaca(file_paths): all_problem_solutions = [] for file_path in file_paths: with open(file_path, 'r') as file: for line in file: line = line.strip() if (not line): continue data = json.loads(line) problem = data.get('instruction', '') solution = data.get('response', '') combined_text = ((problem + ' ') + solution) all_problem_solutions.append(combined_text) return all_problem_solutions
def main(folder, version): folder_name = os.path.basename(folder) for index_file in glob.glob('{}/**/*.html'.format(folder), recursive=True): update_version_link(version, folder_name, index_file) update_source_url(version, folder_name, index_file)
class HessianVectorProduct(abc.ABC): def __init__(self, num_slices=1): self._target = None self._reg_coeff = None self._hvp_fun = None self._num_slices = num_slices def update_hvp(self, f, target, inputs, reg_coeff, name=None): def build_eval(self, inputs): def _eval(v): xs = tuple(self._target.flat_to_params(v)) ret = (sliced_fun(self._hvp_fun['f_hx_plain'], self._num_slices)(inputs, xs) + (self._reg_coeff * v)) return ret return _eval def __getstate__(self): new_dict = self.__dict__.copy() del new_dict['_hvp_fun'] return new_dict
def quaddobl_clear(): from phcpy.phcpy2c3 import py2c_numbtrop_quaddobl_clear py2c_numbtrop_quaddobl_clear()
class DisparitySampleRangeHead(nn.Module): def __init__(self, max_disp): super(DisparitySampleRangeHead, self).__init__() self.max_disp = max_disp def forward(self, stage, disparity_sample_number, left, min_disparity=None, max_disparity=None): device = left.device (B, _, H, W) = left.shape if (stage == 'pre'): min_disparity = torch.zeros((B, 1, H, W), device=device) max_disparity = (torch.zeros((B, 1, H, W), device=device) + self.max_disp) else: global_min_disparity = torch.min(min_disparity, max_disparity) global_max_disparity = torch.max(min_disparity, max_disparity) overflow_disparity_samples = torch.clamp(((global_min_disparity + disparity_sample_number) - global_max_disparity), min=0) min_disparity = torch.clamp(((global_min_disparity - overflow_disparity_samples) / 2.0), min=0.0, max=self.max_disp) max_disparity = torch.clamp(((global_max_disparity + overflow_disparity_samples) / 2.0), min=0.0, max=self.max_disp) return (min_disparity, max_disparity)
def python_3000_raise_comma(logical_line): match = RAISE_COMMA_REGEX.match(logical_line) if (match and (not RERAISE_COMMA_REGEX.match(logical_line))): (yield ((match.end() - 1), 'W602 deprecated form of raising exception'))
def _numpy_inference(model, input_sample_list, batch_size): if (batch_size is None): return model(*input_sample_list) else: yhat_list = [] sample_num = input_sample_list[0].shape[0] if (sample_num <= batch_size): return model(*input_sample_list) else: batch_num = math.ceil((sample_num / batch_size)) for batch_id in range(batch_num): yhat_list.append(model(*tuple(map((lambda x: x[(batch_id * batch_size):((batch_id + 1) * batch_size)]), input_sample_list)))) yhat = np.concatenate(yhat_list, axis=0) return yhat
.parametrize('stability_threshold', [0.1, 0.01]) .parametrize('x_lim', [(0, 0.6), ((- 0.2), 0.8)]) .parametrize('which_energy', ['true', 'pred']) .parametrize('backend', ['plotly', 'matplotlib']) def test_hist_classified_stable_vs_hull_dist(stability_threshold: float, x_lim: tuple[(float, float)], which_energy: Literal[('true', 'pred')], backend: Backend) -> None: ax = plt.figure().gca() df_wbm[each_pred_col] = ((df_wbm[each_true_col] + df_wbm[models[0]]) - df_wbm[e_form_col]) ax = hist_classified_stable_vs_hull_dist(df_wbm, each_true_col=each_true_col, each_pred_col=each_pred_col, ax=ax, stability_threshold=stability_threshold, x_lim=x_lim, which_energy=which_energy, backend=backend) if (backend == 'matplotlib'): assert isinstance(ax, plt.Axes) assert (ax.get_ylabel() == 'Number of materials') else: assert isinstance(ax, go.Figure) assert (ax.layout.yaxis.title.text == 'count')
class BertLMHeadModel(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def get_optimizer(p, model, cluster_head_only=False): if cluster_head_only: for (name, param) in model.named_parameters(): if ('cluster_head' in name): param.requires_grad = True else: param.requires_grad = False params = list(filter((lambda p: p.requires_grad), model.parameters())) assert (len(params) == (2 * p['num_heads'])) else: params = model.parameters() if (p['optimizer'] == 'sgd'): optimizer = torch.optim.SGD(params, **p['optimizer_kwargs']) elif (p['optimizer'] == 'adam'): optimizer = torch.optim.Adam(params, **p['optimizer_kwargs']) else: raise ValueError('Invalid optimizer {}'.format(p['optimizer'])) return optimizer
class VLBartCOCOCaption(VLBart): def __init__(self, config): super().__init__(config) def train_step(self, batch): device = next(self.parameters()).device vis_feats = batch['vis_feats'].to(device) input_ids = batch['input_ids'].to(device) vis_pos = batch['boxes'].to(device) lm_labels = batch['target_ids'].to(device) reduce_loss = True output = self(input_ids=input_ids, vis_inputs=(vis_feats, vis_pos), labels=lm_labels, reduce_loss=reduce_loss) lm_mask = (lm_labels != (- 100)) (B, L) = lm_labels.size() loss = output['loss'] result = {'loss': loss} return result def test_step(self, batch, **kwargs): device = next(self.parameters()).device vis_feats = batch['vis_feats'].to(device) input_ids = batch['input_ids'].to(device) vis_pos = batch['boxes'].to(device) output = self.generate(input_ids=input_ids, vis_inputs=(vis_feats, vis_pos), **kwargs) generated_sents = self.tokenizer.batch_decode(output, skip_special_tokens=True) result = {} result['pred'] = generated_sents return result
class TrainFromScratch(Algorithm): def __init__(self, **kwargs): super().__init__(**kwargs) self.trainable = False self.baselearner = self.baselearner_fn(**self.baselearner_args).to(self.dev) self.model = self.baselearner_fn(**self.baselearner_args).to(self.dev) def evaluate(self, num_classes, train_x, train_y, test_x, test_y, **kwargs): self.model.load_params(self.baselearner.state_dict()) if (num_classes is not None): self.model.modify_out_layer(num_classes) optimizer = self.opt_fn(self.model.parameters(), lr=self.lr) (train_x, train_y, test_x, test_y) = put_on_device(self.dev, [train_x, train_y, test_x, test_y]) (test_score, loss_history, probs, preds) = deploy_on_task(model=self.model, optimizer=optimizer, train_x=train_x, train_y=train_y, test_x=test_x, test_y=test_y, T=self.T, test_batch_size=self.test_batch_size) return (test_score, loss_history, probs, preds) def dump_state(self): return self.baselearner.state_dict() def load_state(self, state): self.baselearner.load_state_dict(state)
def set_exec_mode(line_: str) -> None: usage = f'Usage: %flow mode [{ExecutionMode.NORMAL}|{ExecutionMode.REACTIVE}]' try: exec_mode = ExecutionMode(line_.strip()) except ValueError: warn(usage) return flow_ = flow() flow_.mut_settings.exec_mode = exec_mode if (exec_mode == ExecutionMode.REACTIVE): for cell in cells().current_cells_for_each_id(): cell.set_ready(False) flow_._min_new_ready_cell_counter = (flow_.cell_counter() + 1) comm = flow_._comm if (comm is not None): comm.send({'type': 'set_exec_mode', 'exec_mode': exec_mode.value, 'success': True})
def get_indent(line: str) -> str: search = _re_indent.search(line) return ('' if (search is None) else search.groups()[0])
class StreamToLogger(object): def __init__(self, logger, log_level=logging.INFO): self.terminal = sys.stdout self.logger = logger self.log_level = log_level self.linebuf = '' def __getattr__(self, attr): return getattr(self.terminal, attr) def write(self, buf): temp_linebuf = (self.linebuf + buf) self.linebuf = '' for line in temp_linebuf.splitlines(True): if (line[(- 1)] == '\n'): self.logger.log(self.log_level, line.rstrip()) else: self.linebuf += line def flush(self): if (self.linebuf != ''): self.logger.log(self.log_level, self.linebuf.rstrip()) self.linebuf = ''
class Soft(nn.Module): def __init__(self): super(Soft, self).__init__() gaussian_kernel = np.float32(gkern(31, 4)) gaussian_kernel = gaussian_kernel[(np.newaxis, np.newaxis, ...)] self.gaussian_kernel = Parameter(torch.from_numpy(gaussian_kernel)) def forward(self, attention): soft_attention = F.conv2d(attention, self.gaussian_kernel, padding=15) soft_attention = min_max_norm(soft_attention) return soft_attention.max(attention)
class TestLayerWithParam(unittest.TestCase): def setUp(self): net_file = python_param_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['mul2'].data.flat: self.assertEqual(y, ((2 * 10) * x)) def test_backward(self): x = 7 self.net.blobs['mul2'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, ((2 * 10) * x))
class InvertedDoublePendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self): mujoco_env.MujocoEnv.__init__(self, 'inverted_double_pendulum.xml', 5) utils.EzPickle.__init__(self) def step(self, action): self.do_simulation(action, self.frame_skip) ob = self._get_obs() (x, _, y) = self.sim.data.site_xpos[0] dist_penalty = ((0.01 * (x ** 2)) + ((y - 2) ** 2)) (v1, v2) = self.sim.data.qvel[1:3] vel_penalty = ((0.001 * (v1 ** 2)) + (0.005 * (v2 ** 2))) alive_bonus = 10 r = ((alive_bonus - dist_penalty) - vel_penalty) done = bool((y <= 1)) return (ob, r, done, {}) def _get_obs(self): return np.concatenate([self.sim.data.qpos[:1], np.sin(self.sim.data.qpos[1:]), np.cos(self.sim.data.qpos[1:]), np.clip(self.sim.data.qvel, (- 10), 10), np.clip(self.sim.data.qfrc_constraint, (- 10), 10)]).ravel() def reset_model(self): self.set_state((self.init_qpos + self.np_random.uniform(low=(- 0.1), high=0.1, size=self.model.nq)), (self.init_qvel + (self.np_random.randn(self.model.nv) * 0.1))) return self._get_obs() def viewer_setup(self): v = self.viewer v.cam.trackbodyid = 0 v.cam.distance = (self.model.stat.extent * 0.5) v.cam.lookat[2] = 0.
class MultiScaleD(nn.Module): def __init__(self, channels, resolutions, num_discs=1, proj_type=2, cond=0, separable=False, patch=False, **kwargs): super().__init__() assert (num_discs in [1, 2, 3, 4]) self.disc_in_channels = channels[:num_discs] self.disc_in_res = resolutions[:num_discs] Disc = (SingleDiscCond if cond else SingleDisc) mini_discs = [] for (i, (cin, res)) in enumerate(zip(self.disc_in_channels, self.disc_in_res)): start_sz = (res if (not patch) else 16) mini_discs += ([str(i), Disc(nc=cin, start_sz=start_sz, end_sz=8, separable=separable, patch=patch)],) self.mini_discs = nn.ModuleDict(mini_discs) def forward(self, features, c): all_logits = [] for (k, disc) in self.mini_discs.items(): all_logits.append(disc(features[k], c).view(features[k].size(0), (- 1))) all_logits = torch.cat(all_logits, dim=1) return all_logits
class FNetForPreTraining(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class HumanoidMimic(env.Env): def __init__(self, system_config, reference_traj, obs_type='timestamp', cyc_len=None, reward_scaling=1.0, rot_weight=1.0, vel_weight=0.0, ang_weight=0.0): super().__init__(config=get_system_cfg(system_config)) self.reference_qp = deserialize_qp(reference_traj) self.reference_len = reference_traj.shape[0] self.reward_scaling = reward_scaling self.obs_type = obs_type self.cycle_len = (cyc_len if (cyc_len is not None) else self.reference_len) self.rot_weight = rot_weight self.vel_weight = vel_weight self.ang_weight = ang_weight def reset(self, rng: jp.ndarray) -> env.State: (reward, done, zero) = jp.zeros(3) qp = self._get_ref_state(zero) metrics = {'step_index': zero, 'pose_error': zero, 'fall': zero} obs = self._get_obs(qp, step_index=zero) state = env.State(qp, obs, reward, done, metrics) return state def step(self, state: env.State, action: jp.ndarray) -> env.State: step_index = (state.metrics['step_index'] + 1) (qp, info) = self.sys.step(state.qp, action) obs = self._get_obs(qp, step_index) ref_qp = self._get_ref_state(step_idx=step_index) reward = (((- 1) * (((mse_pos(qp, ref_qp) + (self.rot_weight * mse_rot(qp, ref_qp))) + (self.vel_weight * mse_vel(qp, ref_qp))) + (self.ang_weight * mse_ang(qp, ref_qp)))) * self.reward_scaling) fall = jp.where((qp.pos[(0, 2)] < 0.2), jp.float32(1), jp.float32(0)) fall = jp.where((qp.pos[(0, 2)] > 1.7), jp.float32(1), fall) state.metrics.update(step_index=step_index, pose_error=loss_l2_relpos(qp, ref_qp), fall=fall) state = state.replace(qp=qp, obs=obs, reward=reward) return state def _get_obs(self, qp: brax.QP, step_index: jp.ndarray) -> jp.ndarray: (pos, rot, vel, ang) = (qp.pos[:(- 1)], qp.rot[:(- 1)], qp.vel[:(- 1)], qp.ang[:(- 1)]) rot_6d = quaternion_to_rotation_6d(rot) rel_pos = (pos - pos[0])[1:] if (self.obs_type == 'timestamp'): phi = ((step_index % self.cycle_len) / self.cycle_len) obs = jp.concatenate([rel_pos.reshape((- 1)), rot_6d.reshape((- 1)), vel.reshape((- 1)), ang.reshape((- 1)), phi[None]], axis=(- 1)) elif (self.obs_type == 'target_state'): target_qp = self._get_ref_state(step_idx=(step_index + 1)) (target_pos, target_rot, target_vel, target_ang) = (target_qp.pos[:(- 1)], target_qp.rot[:(- 1)], target_qp.vel[:(- 1)], target_qp.ang[:(- 1)]) target_rot_6d = quaternion_to_rotation_6d(target_rot) obs = jp.concatenate([pos.reshape((- 1)), rot.reshape((- 1)), vel.reshape((- 1)), ang.reshape((- 1)), target_pos.reshape((- 1)), target_rot_6d.reshape((- 1)), target_vel.reshape((- 1)), target_ang.reshape((- 1))], axis=(- 1)) else: raise NotImplementedError return obs def _get_ref_state(self, step_idx) -> brax.QP: mask = jp.where((step_idx == jp.arange(0, self.reference_len)), jp.float32(1), jp.float32(0)) ref_state = jp.tree_map((lambda x: (mask x.transpose(1, 0, 2))), self.reference_qp) return ref_state
def get_parser(): parser = argparse.ArgumentParser(description='writes text from binarized file to stdout') parser.add_argument('--dataset-impl', help='dataset implementation', choices=['raw', 'lazy', 'cached', 'mmap'], default='lazy') parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') return parser
class Blip2QFormerModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def vggcam(nb_classes, input_shape=(3, None, None), num_input_channels=1024): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=input_shape)) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='relu5_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='relu5_3')) model.add(Convolution2D(num_input_channels, 3, 3, activation='relu', border_mode='same', name='CAM_relu')) model.add(GlobalAveragePooling2D(name='CAM_pool')) model.add(Dense(nb_classes, activation='softmax')) model.name = 'vgg_cam' return model
def get_bigdl_conf(): jar_dir = os.path.abspath((__file__ + '/../../../')) conf_paths = glob.glob(os.path.join(jar_dir, 'share/*/conf/*.conf')) return conf_paths[0]
def objects365v2_classes() -> list: return ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket', 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon', 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap', 'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak', 'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate', 'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle', 'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis ']
class TFAutoModelForMaskedLM(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class TFTapasForSequenceClassification(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def process_units(units, reduce=False): if (not reduce): return units out = [u for (i, u) in enumerate(units) if ((i == 0) or (u != units[(i - 1)]))] return out
class AlphaLayer(nn.Module): def __init__(self, channels, min_width, max_width, offset, prob_type='exp'): super(AlphaLayer, self).__init__() assert (prob_type in ['exp', 'sigmoid']) self.prob_type = prob_type self.channels = channels ch_indice = self._get_ch_indice(min_width, max_width, offset, channels) self.min_ch = ch_indice[0] for i in range(1, (len(ch_indice) - 1)): assert ((ch_indice[(i + 1)] - ch_indice[i]) == (ch_indice[i] - ch_indice[(i - 1)])) self.num_groups = (len(ch_indice) - 1) self.group_size = ((ch_indice[1] - ch_indice[0]) if (self.num_groups > 0) else 0) assert (((self.group_size * self.num_groups) + self.min_ch) == channels) self.register_buffer('alpha0', torch.ones(1)) if (self.num_groups > 0): self.alpha = nn.Parameter(torch.zeros(self.num_groups)) else: self.alpha = None def get_condition_prob(self): if (self.prob_type == 'exp'): self.alpha.data.clamp_(min=0.0) return torch.exp((- self.apha)) elif (self.prob_type == 'sigmoid'): return torch.sigmoid(self.alpha) else: return NotImplementedError def get_marginal_prob(self): alpha = self.get_condition_prob() return torch.cumprod(alpha, dim=0) def expected_channel(self): if (self.num_groups == 0): return self.min_ch marginal_prob = self.get_marginal_prob() return ((torch.sum(marginal_prob) * self.group_size) + self.min_ch) def direct_sampling(self): if (self.num_groups == 0): return self.min_ch prob = self.get_condition_prob().detach().cpu() pruned_ch = self.min_ch for i in range(self.num_groups): if (random.uniform(0, 1) > prob[i]): break pruned_ch += self.group_size return pruned_ch def expected_sampling(self): expected = round((self.expected_channel().item() - 0.0001)) candidate = [(self.min_ch + (self.group_size * i)) for i in range((self.num_groups + 1))] idx = np.argmin([abs((ch - expected)) for ch in candidate]) return (candidate[idx], expected) def forward(self, x): size_x = x.size() if ((self.num_groups == 0) or (size_x[1] == self.min_ch)): return x prob = self.get_marginal_prob().view(self.num_groups, 1) tp_x = x.transpose(0, 1).contiguous() tp_group_x = tp_x[self.min_ch:] size_tp_group = tp_group_x.size() num_groups = (size_tp_group[0] // self.group_size) tp_group_x = (tp_group_x.view(num_groups, (- 1)) * prob[:num_groups]) tp_group_x = tp_group_x.view(size_tp_group) x = torch.cat([tp_x[:self.min_ch], tp_group_x]).transpose(0, 1).contiguous() return x def _get_ch_indice(min_width, max_width, width_offset, max_ch): ch_offset = int(((width_offset * max_ch) / max_width)) num_offset = int((((max_width - min_width) / width_offset) + 0.0001)) min_ch = (max_ch - (ch_offset * num_offset)) assert (min_ch > 0) indice = [] for i in range((num_offset + 1)): indice.append((min_ch + (i * ch_offset))) assert (indice[0] == min_ch) assert (indice[(- 1)] == max_ch) return sorted(list(set(indice)))
class NumpyArrayFormatterState(TemporalFeatureFormatterState): def to_tensor(self, features: np.ndarray) -> torch.Tensor: return torch.from_numpy(features) def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures: return features.cpu().numpy()
class FullyConnectedLatentVariable(LatentVariable): def __init__(self, latent_config): super(FullyConnectedLatentVariable, self).__init__(latent_config) self._construct(latent_config) def _construct(self, latent_config): self.inference_procedure = latent_config['inference_procedure'] if (self.inference_procedure in ['gradient', 'error']): self.update_type = latent_config['update_type'] n_variables = latent_config['n_variables'] n_inputs = latent_config['n_in'] self.normalize_samples = latent_config['normalize_samples'] if self.normalize_samples: self.normalizer = LayerNorm() if (self.inference_procedure in ['direct', 'gradient', 'error']): self.inf_mean_output = FullyConnectedLayer({'n_in': n_inputs[0], 'n_out': n_variables}) self.inf_log_var_output = FullyConnectedLayer({'n_in': n_inputs[0], 'n_out': n_variables}) if (self.inference_procedure in ['gradient', 'error']): self.approx_post_mean_gate = FullyConnectedLayer({'n_in': n_inputs[0], 'n_out': n_variables, 'non_linearity': 'sigmoid'}) self.approx_post_log_var_gate = FullyConnectedLayer({'n_in': n_inputs[0], 'n_out': n_variables, 'non_linearity': 'sigmoid'}) if (self.inference_procedure == 'sgd'): self.learning_rate = latent_config['inf_lr'] self.prior_mean = FullyConnectedLayer({'n_in': n_inputs[1], 'n_out': n_variables}) self.prior_log_var = FullyConnectedLayer({'n_in': n_inputs[1], 'n_out': n_variables}) self.approx_post = Normal() self.prior = Normal() self.approx_post.re_init() self.prior.re_init() def infer(self, input): if (self.inference_procedure in ['direct', 'gradient', 'error']): approx_post_mean = self.inf_mean_output(input) approx_post_log_var = self.inf_log_var_output(input) if (self.inference_procedure == 'direct'): self.approx_post.mean = approx_post_mean self.approx_post.log_var = torch.clamp(approx_post_log_var, (- 15), 5) elif (self.inference_procedure in ['gradient', 'error']): if (self.update_type == 'highway'): approx_post_mean_gate = self.approx_post_mean_gate(input) self.approx_post.mean = ((approx_post_mean_gate * self.approx_post.mean.detach()) + ((1 - approx_post_mean_gate) * approx_post_mean)) approx_post_log_var_gate = self.approx_post_log_var_gate(input) self.approx_post.log_var = torch.clamp(((approx_post_log_var_gate * self.approx_post.log_var.detach()) + ((1 - approx_post_log_var_gate) * approx_post_log_var)), (- 15), 5) elif (self.update_type == 'learned_sgd'): (mean_grad, log_var_grad) = self.approx_posterior_gradients() mean_lr = self.approx_post_mean_gate(input) log_var_lr = self.approx_post_log_var_gate(input) self.approx_post.mean = ((self.approx_post.mean.detach() - (mean_lr * mean_grad)) + approx_post_mean) self.approx_post.log_var = torch.clamp(((self.approx_post.log_var.detach() - (log_var_lr * log_var_grad)) + approx_post_log_var), (- 15), 5) elif (self.inference_procedure == 'sgd'): self.approx_post.mean = (self.approx_post.mean.detach() - (self.learning_rate * input[0])) self.approx_post.log_var = torch.clamp((self.approx_post.log_var.detach() - (self.learning_rate * input[1])), (- 15), 5) self.approx_post.mean.requires_grad = True self.approx_post.log_var.requires_grad = True else: raise NotImplementedError if self.normalize_samples: self.approx_post.mean = self.normalizer(self.approx_post.mean) self.approx_post.mean.retain_grad() self.approx_post.log_var.retain_grad() def generate(self, input, gen, n_samples): if (input is not None): (b, s, n) = input.data.shape input = input.view((b * s), n) self.prior.mean = self.prior_mean(input).view(b, s, (- 1)) self.prior.log_var = torch.clamp(self.prior_log_var(input).view(b, s, (- 1)), (- 15), 5) dist = (self.prior if gen else self.approx_post) sample = dist.sample(n_samples, resample=True) sample = (sample.detach() if self.detach else sample) return sample def re_init(self): self.re_init_approx_posterior() self.prior.re_init() def re_init_approx_posterior(self): mean = self.prior.mean.detach().mean(dim=1).data log_var = self.prior.log_var.detach().mean(dim=1).data self.approx_post.re_init(mean, log_var) def step(self): pass def error(self, averaged=True): sample = self.approx_post.sample() n_samples = sample.data.shape[1] prior_mean = self.prior.mean.detach() if (len(prior_mean.data.shape) == 2): prior_mean = prior_mean.unsqueeze(1).repeat(1, n_samples, 1) prior_log_var = self.prior.log_var.detach() if (len(prior_log_var.data.shape) == 2): prior_log_var = prior_log_var.unsqueeze(1).repeat(1, n_samples, 1) n_error = ((sample - prior_mean) / torch.exp((prior_log_var + 1e-07))) if averaged: n_error = n_error.mean(dim=1) return n_error def close_gates(self): nn.init.constant(self.approx_post_mean_gate.linear.bias, 5.0) nn.init.constant(self.approx_post_log_var_gate.linear.bias, 5.0) def inference_parameters(self): params = nn.ParameterList() params.extend(list(self.inf_mean_output.parameters())) params.extend(list(self.inf_log_var_output.parameters())) if (self.inference_procedure != 'direct'): params.extend(list(self.approx_post_mean_gate.parameters())) params.extend(list(self.approx_post_log_var_gate.parameters())) return params def generative_parameters(self): params = nn.ParameterList() params.extend(list(self.prior_mean.parameters())) params.extend(list(self.prior_log_var.parameters())) return params def approx_posterior_parameters(self): return [self.approx_post.mean.detach(), self.approx_post.log_var.detach()] def approx_posterior_gradients(self): assert (self.approx_post.mean.grad is not None), 'Approximate posterior gradients are None.' grads = [self.approx_post.mean.grad.detach()] grads += [self.approx_post.log_var.grad.detach()] for grad in grads: grad.volatile = False return grads
def adverb_freq(s, tokens=None): if (tokens == None): tokens = word_tokenize(s) pos = pos_tag(tokens) adverbs = [] for [token, tag] in pos: part = map_tag('en-ptb', 'universal', tag) if (part == 'ADV'): adverbs.append(token) if (len(tokens) == 0): return float(0) else: return (float(len(adverbs)) / float(len(tokens)))
def important_spans(data, output, tgt_codes, pred_codes, s, dicts, filter_size, true_str, pred_str, spans_file, fps=False): (ind2w, ind2c, desc_dict) = (dicts['ind2w'], dicts['ind2c'], dicts['desc']) for p_code in pred_codes: if ((output[0][p_code] > 0.5) and (fps ^ (p_code in tgt_codes))): confidence = output[0][p_code] code = ind2c[p_code] conf_str = ('confidence of prediction: %f' % confidence) typ = ('false positive' if fps else 'true positive') prelude = ('top three important windows for %s code %s (%s: %s)' % (typ, str(p_code), code, desc_dict[code])) if (spans_file is not None): spans_file.write((conf_str + '\n')) spans_file.write((true_str + '\n')) spans_file.write((pred_str + '\n')) spans_file.write((prelude + '\n')) attn = s[0][p_code].data.cpu().numpy() imps = attn.argsort()[(- 10):][::(- 1)] windows = make_windows(imps, filter_size, attn) kgram_strs = [] i = 0 while ((len(kgram_strs) < 3) and (i < len(windows))): ((start, end), score) = windows[i] words = [(ind2w[w] if (w in ind2w.keys()) else 'UNK') for w in data[0][start:end].data.cpu().numpy()] kgram_str = ((' '.join(words) + ', score: ') + str(score)) if (kgram_str not in kgram_strs): kgram_strs.append(kgram_str) i += 1 for kgram_str in kgram_strs: if (spans_file is not None): spans_file.write((kgram_str + '\n')) spans_file.write('\n')
class InputFeatures(object): def __init__(self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size): assert (0 <= all(boxes) <= 1000), 'Error with input bbox ({}): the coordinate value is not between 0 and 1000'.format(boxes) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size
class ConditionalEntropyAsyncProcess(GPUAsyncProcess): def __init__(self, *args, **kwargs): super(ConditionalEntropyAsyncProcess, self).__init__(*args, **kwargs) self.phase_bins = kwargs.get('phase_bins', 10) self.mag_bins = kwargs.get('mag_bins', 5) self.max_phi = kwargs.get('max_phi', 3.0) self.weighted = kwargs.get('weighted', False) self.block_size = kwargs.get('block_size', 256) self.phase_overlap = kwargs.get('phase_overlap', 0) self.mag_overlap = kwargs.get('mag_overlap', 0) if (self.mag_overlap > 0): if kwargs.get('balanced_magbins', False): raise Exception('mag_overlap must be zero if balanced_magbins is True') self.use_double = kwargs.get('use_double', False) self.real_type = np.float32 if self.use_double: self.real_type = np.float64 self.call_func = conditional_entropy if kwargs.get('use_fast', False): self.call_func = conditional_entropy_fast self.memory = kwargs.get('memory', None) self.shmem_lc = kwargs.get('shmem_lc', True) def _compile_and_prepare_functions(self, **kwargs): cpp_defs = dict(NPHASE=self.phase_bins, NMAG=self.mag_bins, PHASE_OVERLAP=self.phase_overlap, MAG_OVERLAP=self.mag_overlap) if self.use_double: cpp_defs['DOUBLE_PRECISION'] = None kernel_txt = _module_reader(find_kernel('ce'), cpp_defs=cpp_defs) self.module = SourceModule(kernel_txt, options=['--use_fast_math']) self.dtypes = dict(constdpdm_ce=[np.intp, np.int32, np.intp, np.intp], histogram_data_weighted=[np.intp, np.intp, np.intp, np.intp, np.intp, np.uint32, np.uint32, self.real_type], histogram_data_count=[np.intp, np.intp, np.intp, np.intp, np.uint32, np.uint32], log_prob=[np.intp, np.uint32, np.intp, np.intp], standard_ce=[np.intp, np.uint32, np.intp], weighted_ce=[np.intp, np.uint32, np.intp], ce_classical_fast=[np.intp, np.intp, np.intp, np.intp, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32], ce_classical_faster=[np.intp, np.intp, np.intp, np.intp, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32, np.uint32]) for (fname, dtype) in self.dtypes.items(): func = self.module.get_function(fname) self.prepared_functions[fname] = func.prepare(dtype) self.function_tuple = tuple((self.prepared_functions[fname] for fname in sorted(self.dtypes.keys()))) def memory_requirement(self, data, **kwargs): raise NotImplementedError() def allocate_for_single_lc(self, t, y, freqs, dy=None, stream=None, **kwargs): kw = dict(phase_bins=self.phase_bins, mag_bins=self.mag_bins, mag_overlap=self.mag_overlap, phase_overlap=self.phase_overlap, max_phi=self.max_phi, stream=stream, weighted=self.weighted, use_double=self.use_double) kw.update(kwargs) mem = ConditionalEntropyMemory(**kw) mem.fromdata(t, y, dy=dy, freqs=freqs, allocate=True, **kwargs) return mem def autofrequency(self, *args, **kwargs): return utils_autofreq(*args, **kwargs) def _nfreqs(self, *args, **kwargs): return len(self.autofrequency(*args, **kwargs)) def allocate(self, data, freqs=None, **kwargs): if (len(data) > len(self.streams)): self._create_streams((len(data) - len(self.streams))) allocated_memory = [] frqs = freqs if (frqs is None): frqs = [self.autofrequency(t, **kwargs) for (t, y, dy) in data] elif isinstance(freqs[0], float): frqs = ([freqs] * len(data)) for (i, ((t, y, dy), f)) in enumerate(zip(data, frqs)): mem = self.allocate_for_single_lc(t, y, dy=dy, freqs=f, stream=self.streams[i], **kwargs) allocated_memory.append(mem) return allocated_memory def preallocate(self, max_nobs, freqs, nlcs=1, streams=None, **kwargs): kw = dict(phase_bins=self.phase_bins, mag_bins=self.mag_bins, mag_overlap=self.mag_overlap, phase_overlap=self.phase_overlap, max_phi=self.max_phi, weighted=self.weighted, use_double=self.use_double, n0_buffer=max_nobs, buffered_transfer=True, allocate=True, freqs=freqs) kw.update(kwargs) self.memory = [] for i in range(nlcs): stream = (None if (streams is None) else streams[i]) kw.update(dict(stream=stream)) mem = ConditionalEntropyMemory(**kw) mem.allocate(**kwargs) self.memory.append(mem) return self.memory def run(self, data, memory=None, freqs=None, set_data=True, **kwargs): if ((not hasattr(self, 'prepared_functions')) or (not all([(func in self.prepared_functions) for func in ['ce_wt']]))): self._compile_and_prepare_functions(**kwargs) frqs = freqs if (frqs is None): frqs = [self.autofrequency(d[0], **kwargs) for d in data] elif isinstance(frqs[0], float): frqs = ([frqs] * len(data)) assert (len(frqs) == len(data)) memory = (memory if (memory is not None) else self.memory) if (memory is None): memory = self.allocate(data, freqs=frqs, **kwargs) for mem in memory: mem.transfer_freqs_to_gpu() elif set_data: for (i, (t, y, dy)) in enumerate(data): memory[i].set_gpu_arrays_to_zero(**kwargs) memory[i].setdata(t, y, dy=dy, **kwargs) kw = dict(block_size=self.block_size, shmem_lc=self.shmem_lc) kw.update(kwargs) results = [self.call_func(memory[i], self.function_tuple, **kw) for i in range(len(data))] results = [(f, r) for (f, r) in zip(frqs, results)] return results def large_run(self, data, freqs=None, max_memory=None, **kwargs): if ((not hasattr(self, 'prepared_functions')) or (not all([(func in self.prepared_functions) for func in ['ce_wt']]))): self._compile_and_prepare_functions(**kwargs) if (max_memory is None): (free, total) = cuda.mem_get_info() max_memory = (0.9 * free) frqs = freqs if (frqs is None): frqs = [self.autofrequency(d[0], **kwargs) for d in data] elif isinstance(frqs[0], float): frqs = ([frqs] * len(data)) assert (len(frqs) == len(data)) cpers = [] for (d, f) in zip(data, frqs): size_of_real = self.real_type(1).nbytes fmem = (max_memory - ((len(d[0]) * size_of_real) * 3)) tot_bins = (self.phase_bins * self.mag_bins) batch_size = int(np.floor((fmem / (size_of_real * (tot_bins + 2))))) nbatches = int(np.ceil((len(f) / float(batch_size)))) cper = np.zeros(len(f)) for i in range(nbatches): imin = (i * batch_size) imax = min([len(f), ((i + 1) * batch_size)]) r = self.run([d], freqs=f[slice(imin, imax)], **kwargs) self.finish() cper[imin:imax] = r[0][1][:] cpers.append(cper) results = [(f, cper) for (f, cper) in zip(frqs, cpers)] return results def batched_run_const_nfreq(self, data, batch_size=10, freqs=None, only_return_best_freqs=False, **kwargs): bsize = min([len(data), batch_size]) if (len(self.streams) < bsize): self._create_streams((bsize - len(self.streams))) streams = [self.streams[i] for i in range(bsize)] max_ndata = max([len(t) for (t, y, dy) in data]) if (freqs is None): data_with_max_baseline = max(data, key=(lambda d: (max(d[0]) - min(d[0])))) freqs = self.autofrequency(data_with_max_baseline[0], **kwargs) df = (freqs[1] - freqs[0]) nf = len(freqs) ces = [] batches = [] while ((len(batches) * batch_size) < len(data)): start = (len(batches) * batch_size) finish = (start + min([batch_size, (len(data) - start)])) batches.append([data[i] for i in range(start, finish)]) kwargs_mem = dict(buffered_transfer=True, n0_buffer=max_ndata, mag_overlap=self.mag_overlap, phase_overlap=self.phase_overlap, phase_bins=self.phase_bins, mag_bins=self.mag_bins, weighted=self.weighted, max_phi=self.max_phi, use_double=self.use_double) kwargs_mem.update(kwargs) memory = [ConditionalEntropyMemory(stream=stream, **kwargs_mem) for stream in streams] [mem.allocate(freqs=freqs, **kwargs) for mem in memory] [mem.transfer_freqs_to_gpu(**kwargs) for mem in memory] (best_freqs, best_freq_significances) = ([], []) for (b, batch) in enumerate(batches): results = self.run(batch, memory=memory, freqs=freqs, **kwargs) self.finish() for (i, (f, ce)) in enumerate(results): ce = np.copy(ce) significance = (np.abs((np.mean(ce) - np.min(ce))) / np.std(ce)) if only_return_best_freqs: best_freqs.append(freqs[np.argmin(ce)]) best_freq_significances.append(significance) else: ces.append(ce) if only_return_best_freqs: return (best_freqs, best_freq_significances) else: return [(freqs, ce) for ce in ces]
def _check_sha1(file_name, sha1_hash): sha1 = hashlib.sha1() with open(file_name, 'rb') as f: while True: data = f.read(1048576) if (not data): break sha1.update(data) return (sha1.hexdigest() == sha1_hash)
_torch _vision class LevitImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (LevitImageProcessor if is_vision_available() else None) def setUp(self): self.image_processor_tester = LevitImageProcessingTester(self) def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, 'image_mean')) self.assertTrue(hasattr(image_processing, 'image_std')) self.assertTrue(hasattr(image_processing, 'do_normalize')) self.assertTrue(hasattr(image_processing, 'do_resize')) self.assertTrue(hasattr(image_processing, 'do_center_crop')) self.assertTrue(hasattr(image_processing, 'size')) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'shortest_edge': 18}) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84}) def test_batch_feature(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
class QuantizedData(object): def __init__(self): self._data = None self._scale = 0 self._zero = 0 self._minval = 0.0 self._maxval = 0.0 def data(self): return self._data def scale(self): return self._scale def zero(self): return self._zero def minval(self): return self._minval def maxval(self): return self._maxval def data(self, data): self._data = data def scale(self, scale): self._scale = scale def zero(self, zero): self._zero = zero def minval(self, minval): self._minval = minval def maxval(self, maxval): self._maxval = maxval
def main(): args = parser.parse_args() train_loader = loaddata.getTrainingData(args, args.batch_size) gmm_dict = fit_gmm(train_loader, args) gmm_path = 'gmm.pkl' joblib.dump(gmm_dict, gmm_path) print('Dumped at {}'.format(gmm_path))
class TextProcessor(): phonemes = (['<pad>', '<unk>'] + ['AA0', 'AA1', 'AA2', 'AE0', 'AE1', 'AE2', 'AH0', 'AH1', 'AH2', 'AO0', 'AO1', 'AO2', 'AW0', 'AW1', 'AW2', 'AY0', 'AY1', 'AY2', 'B', 'CH', 'D', 'DH', 'EH0', 'EH1', 'EH2', 'ER0', 'ER1', 'ER2', 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH0', 'IH1', 'IH2', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW0', 'OW1', 'OW2', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH0', 'UH1', 'UH2', 'UW', 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH', ' ', '.', ',', '?', '!', '\\-']) def __init__(self, graphemes_list, phonemize=False): assert ((graphemes_list[0] == '<pad>') and (graphemes_list[1] == '<unk>')), 'First two items must be <pad> and <unk>' self.graphemes = graphemes_list self.phonemize = phonemize if phonemize: self.g2p = G2p() self.phon2idx = {g: i for (i, g) in enumerate(self.phonemes)} self.idx2phon = {i: g for (i, g) in enumerate(self.phonemes)} else: self.text2idx = {g: i for (i, g) in enumerate(graphemes_list)} self.idx2text = {i: g for (i, g) in enumerate(graphemes_list)} def __call__(self, text): text = [t.lower() for t in text] if (not self.phonemize): text = [[self.text2idx.get(ch, 1) for ch in s] for s in text] return pad_batch(text) else: keep_re = (('[^' + str(self.graphemes[2:])) + ']') text = [re.sub(keep_re, '', t) for t in text] phonemes = [self.g2p(t) for t in text] phonemes = [[self.phon2idx.get(ch, 1) for ch in s] for s in phonemes] return pad_batch(phonemes)
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False): if (total_it_each_epoch == len(train_loader)): dataloader_iter = iter(train_loader) if (rank == 0): pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) data_time = common_utils.AverageMeter() batch_time = common_utils.AverageMeter() forward_time = common_utils.AverageMeter() for cur_it in range(total_it_each_epoch): end = time.time() try: batch = next(dataloader_iter) except StopIteration: dataloader_iter = iter(train_loader) batch = next(dataloader_iter) print('new iters') data_timer = time.time() cur_data_time = (data_timer - end) lr_scheduler.step(accumulated_iter) try: cur_lr = float(optimizer.lr) except: cur_lr = optimizer.param_groups[0]['lr'] if (tb_log is not None): tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) model.train() optimizer.zero_grad() (loss, tb_dict, disp_dict) = model_func(model, batch) forward_timer = time.time() cur_forward_time = (forward_timer - data_timer) loss.backward() clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP) optimizer.step() accumulated_iter += 1 cur_batch_time = (time.time() - end) avg_data_time = commu_utils.average_reduce_value(cur_data_time) avg_forward_time = commu_utils.average_reduce_value(cur_forward_time) avg_batch_time = commu_utils.average_reduce_value(cur_batch_time) if (rank == 0): data_time.update(avg_data_time) forward_time.update(avg_forward_time) batch_time.update(avg_batch_time) disp_dict.update({'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', 'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'}) pbar.update() pbar.set_postfix(dict(total_it=accumulated_iter)) tbar.set_postfix(disp_dict) tbar.refresh() if (tb_log is not None): tb_log.add_scalar('train/loss', loss, accumulated_iter) tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) for (key, val) in tb_dict.items(): tb_log.add_scalar(('train/' + key), val, accumulated_iter) if (rank == 0): pbar.close() return accumulated_iter
def add_suffix2name(ori_model, suffix='__', verify=False): special_ops = ('If', 'Loop') for node in ori_model.graph.node: if (node.op_type in special_ops): warnings.warn(f'This model has special op: {node.op_type}.') return ori_model model = copy.deepcopy(ori_model) def need_update(name): return name.isnumeric() def update_name(nodes): for node in nodes: if need_update(node.name): node.name += suffix update_name(model.graph.initializer) update_name(model.graph.input) update_name(model.graph.output) for (i, node) in enumerate(ori_model.graph.node): for (j, name) in enumerate(node.input): if need_update(name): model.graph.node[i].input[j] = (name + suffix) for (j, name) in enumerate(node.output): if need_update(name): model.graph.node[i].output[j] = (name + suffix) if verify: onnx.checker.check_model(model) return model
class ListPop(ListRemove): def process_arg(self, pop_pos: int) -> None: self.remove_pos = pop_pos