code
stringlengths
101
5.91M
def plot_samples(sess, shape, prior, decoder): z = prior.sample(100) x = decoder.encode(z, sampling=False) samples = sess.run(x) plot_images(samples, shape, '', 'samples')
_config def alexnet(): uuid = 'habitat_alexnet_feature' cfg = {} cfg['learner'] = {'perception_network': 'AlexNetFeaturesOnlyNet', 'perception_network_kwargs': {'extra_kwargs': {'main_perception_network': 'AlexNetFeaturesOnlyNet'}}} cfg['env'] = {'env_specific_kwargs': {'target_dim': 13}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'taskonomy': 'alexnet_transform((3, 224, 224))'}}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'taskonomy': "alexnet_features_transform('{load_path}')".format(load_path='/mnt/models/alexnet-owt-4df8aa71.pth')}, 'keep_unnamed': True}}
class Pose(xmlr.Object): def __init__(self, xyz=None, rpy=None): self.xyz = xyz self.rpy = rpy def check_valid(self): assert (((self.xyz is None) or (len(self.xyz) == 3)) and ((self.rpy is None) or (len(self.rpy) == 3))) def rotation(self): return self.rpy def rotation(self, value): self.rpy = value def position(self): return self.xyz def position(self, value): self.xyz = value
def default_style(num_v: int, num_e: int, v_color: Union[(str, list)]='r', e_color: Union[(str, list)]='gray', e_fill_color: Union[(str, list)]='whitesmoke'): _v_color = 'r' _e_color = 'gray' _e_fill_color = 'whitesmoke' v_color = fill_color(v_color, _v_color, num_v) e_color = fill_color(e_color, _e_color, num_e) e_fill_color = fill_color(e_fill_color, _e_fill_color, num_e) return (v_color, e_color, e_fill_color)
def recode_cc_data(frame): sex_dict = {1: 'male', 2: 'female'} education_dict = {0: 'other', 1: 'graduate school', 2: 'university', 3: 'high school', 4: 'other', 5: 'other', 6: 'other'} marriage_dict = {0: 'other', 1: 'married', 2: 'single', 3: 'divorced'} pay_dict = {(- 2): 'no consumption', (- 1): 'pay duly', 0: 'use of revolving credit', 1: '1 month delay', 2: '2 month delay', 3: '3 month delay', 4: '4 month delay', 5: '5 month delay', 6: '6 month delay', 7: '7 month delay', 8: '8 month delay', 9: '9+ month delay'} frame['SEX'] = frame['SEX'].apply((lambda i: sex_dict[i])) frame['EDUCATION'] = frame['EDUCATION'].apply((lambda i: education_dict[i])) frame['MARRIAGE'] = frame['MARRIAGE'].apply((lambda i: marriage_dict[i])) for name in frame.columns: if (name in ['PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']): frame[name] = frame[name].apply((lambda i: pay_dict[i])) return h2o.H2OFrame(frame)
class DebertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = DebertaTokenizer test_rust_tokenizer = True rust_tokenizer_class = DebertaTokenizerFast def setUp(self): super().setUp() vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '[UNK]'] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', ''] self.special_tokens_map = {'unk_token': '[UNK]'} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file, 'w', encoding='utf-8') as fp: fp.write((json.dumps(vocab_tokens) + '\n')) with open(self.merges_file, 'w', encoding='utf-8') as fp: fp.write('\n'.join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = 'lower newer' output_text = 'lower newer' return (input_text, output_text) def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = 'lower newer' bpe_tokens = ['l', 'o', 'w', 'er', 'G', 'n', 'e', 'w', 'er'] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = (tokens + [tokenizer.unk_token]) input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_token_type_ids(self): tokenizer = self.get_tokenizer() tokd = tokenizer('Hello', 'World') expected_token_type_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['token_type_ids'], expected_token_type_ids) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained('microsoft/deberta-base') text = tokenizer.encode('sequence builders', add_special_tokens=False) text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False) encoded_text_from_decode = tokenizer.encode('sequence builders', add_special_tokens=True, add_prefix_space=False) encoded_pair_from_decode = tokenizer.encode('sequence builders', 'multi-sequence build', add_special_tokens=True, add_prefix_space=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert (encoded_sentence == encoded_text_from_decode) assert (encoded_pair == encoded_pair_from_decode) def test_tokenizer_integration(self): tokenizer_classes = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: tokenizer = tokenizer_class.from_pretrained('microsoft/deberta-base') sequences = ['ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary embedding matrix into two small matrices, we separate the size of the hidden layers from the size of vocabulary embedding.'] encoding = tokenizer(sequences, padding=True) decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding['input_ids']] expected_encoding = {'input_ids': [[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} expected_decoded_sequence = ['ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary embedding matrix into two small matrices, we separate the size of the hidden layers from the size of vocabulary embedding.'] self.assertDictEqual(encoding.data, expected_encoding) for (expected, decoded) in zip(expected_decoded_sequence, decoded_sequences): self.assertEqual(expected, decoded)
def _initialize_centroids(X, k, algorithm='first-k', random_state=None): if isinstance(k, torch.Tensor): k = k.item() if (not isinstance(random_state, numpy.random.mtrand.RandomState)): random_state = numpy.random.RandomState(random_state) if (algorithm == 'first-k'): return _cast_as_tensor(torch.clone(X[:k]), dtype=torch.float32) elif (algorithm == 'random'): idxs = random_state.choice(len(X), size=k, replace=False) return _cast_as_tensor(torch.clone(X[idxs]), dtype=torch.float32) elif (algorithm == 'submodular-facility-location'): selector = FacilityLocationSelection(k, random_state=random_state) return _cast_as_tensor(selector.fit_transform(X), dtype=torch.float32) elif (algorithm == 'submodular-feature-based'): selector = FeatureBasedSelection(k, random_state=random_state) return selector.fit_transform(X)
class BeitForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def gen_samples(vec): sentences = [] sentences = generate(autoencoder, gan_gen, z=torch.FloatTensor(vec).view(1, (- 1)).expand(20, vec.shape[0]), vocab=idx2word, sample=True, maxlen=model_args['maxlen'])[0] return sentences
class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True): raise NotImplementedError def end_of_epoch(self) -> bool: raise NotImplementedError def iterations_in_epoch(self) -> int: raise NotImplementedError def state_dict(self): raise NotImplementedError def load_state_dict(self, state_dict): raise NotImplementedError def first_batch(self): return 'DUMMY'
def window(iterable, stride=3): for index in range(((len(iterable) - stride) + 1)): (yield iterable[index:(index + stride)])
_cache def check_float_literals(): legal_literals = [] try: completed_process = subprocess.run(['make', 'check-float-literals'], capture_output=True) legal_literals = completed_process.stdout.decode().split('\n') except Exception: pass legal_literals = [legal_literal for legal_literal in legal_literals if legal_literal.strip()] return tuple(legal_literals)
def get_test_data_dirs(prefix): gt_data_root = (Path.home() / 'open3d_data') gt_download_dir = ((gt_data_root / 'download') / prefix) gt_extract_dir = ((gt_data_root / 'extract') / prefix) return (gt_data_root, gt_download_dir, gt_extract_dir)
def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, 'deepspeed_config.json') with open(args.deepspeed_config, mode='w') as writer: ds_config = {'train_batch_size': ((args.batch_size * args.update_freq) * get_world_size()), 'train_micro_batch_size_per_gpu': args.batch_size, 'steps_per_print': 1000, 'optimizer': {'type': 'Adam', 'adam_w_mode': True, 'params': {'lr': args.lr, 'weight_decay': args.weight_decay, 'bias_correction': True, 'betas': [0.9, 0.999], 'eps': 1e-08}}, 'fp16': {'enabled': True, 'loss_scale': 0, 'initial_scale_power': 7, 'loss_scale_window': 128}} writer.write(json.dumps(ds_config, indent=2))
class AdversarialLoss(nn.Module): def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0): super(AdversarialLoss, self).__init__() self.type = type self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) if (type == 'nsgan'): self.criterion = nn.BCELoss() elif (type == 'lsgan'): self.criterion = nn.MSELoss() elif (type == 'hinge'): self.criterion = nn.ReLU() def __call__(self, outputs, is_real, for_dis=None): if (self.type == 'hinge'): if for_dis: if is_real: outputs = (- outputs) return self.criterion((1 + outputs)).mean() else: return (- outputs).mean() else: labels = (self.real_label if is_real else self.fake_label).expand_as(outputs) loss = self.criterion(outputs, labels) return loss
def seresnet110_svhn(num_classes=10, **kwargs): return get_seresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name='seresnet110_svhn', **kwargs)
def attention(query, num_heads, y_w, v, hidden, hidden_features, attention_vec_size, attn_length, use_global_attention=False): at_logits = [] at_probs = [] ds = [] if nest.is_sequence(query): query_list = nest.flatten(query) for q in query_list: ndims = q.get_shape().ndims if ndims: assert (ndims == 2) query = tf.concat(1, query_list) for a in xrange(num_heads): with tf.variable_scope(('Attention_%d' % a)): y = (tf.matmul(query, y_w[a][0]) + y_w[a][1]) y = tf.reshape(y, [(- 1), 1, 1, attention_vec_size]) if use_global_attention: s = tf.reduce_sum((hidden_features[a] * y), [2, 3]) else: s = tf.reduce_sum((v[a] * tf.tanh((hidden_features[a] + y))), [2, 3]) at_logits.append(s) att = tf.nn.softmax(s) at_probs.append(att) d = tf.reduce_sum((tf.reshape(att, [(- 1), attn_length, 1, 1]) * hidden), [1, 2]) ds.append(tf.reshape(d, [(- 1), attention_vec_size])) return (at_logits, at_probs, ds)
class Corpus(object): def __init__(self, vocab, debug=False): self.vocab = vocab self.encoded_train = self.encode_corpus('train.txt', debug) self.encoded_dev = self.encode_corpus('dev.txt', debug) self.encoded_test = self.encode_corpus('test.txt', debug) def encode_corpus(self, filename, debug=False): encoded = [] print('encode corpus: {}'.format(filename)) with open(os.path.join(data_path, filename), 'r', encoding='utf-8') as f: lines = f.readlines() if debug: lines = lines[:(1024 * 100)] for line in tqdm(lines): words = line.strip().split(' ') if isinstance(self.vocab, CharVocab): words = ''.join([word.split('/')[0] for word in words]) encoded += ([(self.vocab.c2i[x] if (x in self.vocab.c2i) else self.vocab.c2i['<unk>']) for x in words] + [self.vocab.c2i['<eos>']]) else: encoded += ([(self.vocab.w2i[x] if (x in self.vocab.w2i) else self.vocab.w2i['<unk>']) for x in words] + [self.vocab.w2i['<eos>']]) return encoded
class SResTransformerPredict(torch.nn.Module): def __init__(self, d_model, coords, flatten_order, attention_type='full', n_layers=4, n_heads=4, d_query=32, dropout=0.1, attention_dropout=0.1): super(SResTransformerPredict, self).__init__() self.fourier_coefficient_embedding = torch.nn.Linear(2, (d_model // 2)) self.pos_embedding = PositionalEncoding2D((d_model // 2), coords=coords, flatten_order=flatten_order, persistent=False) self.encoder = RecurrentEncoderBuilder.from_kwargs(attention_type=attention_type, n_layers=n_layers, n_heads=n_heads, feed_forward_dimensions=((n_heads * d_query) * 4), query_dimensions=d_query, value_dimensions=d_query, dropout=dropout, attention_dropout=attention_dropout).get() self.predictor_amp = torch.nn.Linear((n_heads * d_query), 1) self.predictor_phase = torch.nn.Linear((n_heads * d_query), 1) def forward(self, x, i=0, memory=None): x = x.view(x.shape[0], (- 1)) x = self.fourier_coefficient_embedding(x) x = self.pos_embedding.forward_i(x, i) (y_hat, memory) = self.encoder(x, memory) y_amp = self.predictor_amp(y_hat) y_phase = torch.tanh(self.predictor_phase(y_hat)) return (torch.cat([y_amp, y_phase], dim=(- 1)), memory)
def sub(scores0, scores1): combined = [] for (a, b) in zip(scores0, scores1): combined.append(abs((a - b))) return combined
def generate_toy_features(dataset_path: str, num_frames: int=500, num_joints: int=50): skeletons = [] head_width = [] midbody_width = [] tail_width = [] init_angle = np.arange(0, num_frames) for i in range(num_frames): skel = [] center = ((IM_SIZE // 2), (IM_SIZE // 2)) worm_thickness = (IM_SIZE // 20) start_angle = 0 end_angle = 180 axes = ((IM_SIZE // 4), (IM_SIZE // 3)) for theta in np.linspace((init_angle[i] + start_angle), (init_angle[i] + end_angle), num_joints): x = int((center[0] + (axes[0] * np.cos(np.deg2rad(theta))))) y = int((center[1] - (axes[1] * np.sin(np.deg2rad(theta))))) skel.append((x, y)) head_width.append(worm_thickness) midbody_width.append(worm_thickness) tail_width.append(worm_thickness) skeletons.append(skel) skeletons = np.array(skeletons, np.float32) head_width = np.array(head_width, np.float32) midbody_width = np.array(midbody_width, np.float32) tail_width = np.array(tail_width, np.float32) features = {'toy_video_0': {'skeletons': skeletons, 'head_width': head_width, 'midbody_width': midbody_width, 'tail_width': tail_width, 'frame_rate': 30}} with open(dataset_path, 'wb') as f: pickle.dump(features, f)
def write_vtt(transcript: Iterator[dict], file: TextIO): print('WEBVTT\n', file=file) for segment in transcript: print(f'''{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])} {segment['text'].strip().replace('-->', '->')} ''', file=file, flush=True)
class RteProcessor(DataProcessor): def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_labels(self): return ['entailment', 'not_entailment'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = ('%s-%s' % (set_type, line[0])) text_a = line[1] text_b = line[2] label = line[(- 1)] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
class DatasetManager(): def __init__(self, data, super_category, sub_category, round_id, oversampling_ratio, cross_validation=10): self.data = data self.super_category = super_category self.sub_category = sub_category self.round_id = (round_id - 1) self.sampling_ratio = oversampling_ratio self.dataset = {'positive': [], 'negative': []} for item in data: if (self.sub_category in item['label'][self.super_category]): self.dataset['positive'].append((item['doc_embeddings'], item['id'])) else: self.dataset['negative'].append((item['doc_embeddings'], item['id'])) self.round_size_positive = (len(self.dataset['positive']) // cross_validation) self.round_size_negative = (len(self.dataset['negative']) // cross_validation) self.split_data = [] round_i = 0 for round_i in range(cross_validation): one_split = {} one_split['positive'] = self.dataset['positive'][(round_i * self.round_size_positive):((round_i + 1) * self.round_size_positive)] one_split['negative'] = self.dataset['negative'][(round_i * self.round_size_negative):((round_i + 1) * self.round_size_negative)] self.split_data.append(copy.deepcopy(one_split)) extra = {} extra['positive'] = self.dataset['positive'][((round_i + 1) * self.round_size_positive):] extra['negative'] = self.dataset['negative'][((round_i + 1) * self.round_size_negative):] self.batch_id = 0 self.batch_pos_id = 0 self.batch_neg_id = 0 self.trainset_ = copy.deepcopy(self.split_data) self.validset = copy.deepcopy(self.split_data[self.round_id]) self.validset_input = (self.validset['positive'] + self.validset['negative']) self.validset_output = (([0] * len(self.validset['positive'])) + ([1] * len(self.validset['negative']))) self.testset = copy.deepcopy(self.split_data[((self.round_id + 1) % cross_validation)]) self.testset_input = (self.testset['positive'] + self.testset['negative']) self.testset_output = (([0] * len(self.testset['positive'])) + ([1] * len(self.testset['negative']))) self.testset_ids = [item[1] for item in self.testset_input] del self.trainset_[self.round_id] if (self.round_id == (cross_validation - 1)): del self.trainset_[0] else: del self.trainset_[self.round_id] self.trainset = {'positive': [], 'negative': []} for item in self.trainset_: self.trainset['positive'].extend(item['positive']) self.trainset['negative'].extend(item['negative']) self.trainset['positive'].extend(extra['positive']) self.trainset['negative'].extend(extra['negative']) del self.trainset_ self.ratio = (len(self.trainset['negative']) / len(self.trainset['positive'])) random.shuffle(self.trainset['positive']) random.shuffle(self.trainset['negative']) self.training_set = [] for item in self.trainset['positive']: self.training_set.append([item, 0]) for item in self.trainset['negative']: self.training_set.append([item, 1]) random.shuffle(self.training_set) def next_batch(self, batchsize=24): self.ratio = (len(self.dataset['positive']) / len(self.dataset['negative'])) if ((self.sampling_ratio in [1, 3, 5, 7]) and (self.ratio < (1 / self.sampling_ratio))): self.batch_pos_size = (batchsize // (self.sampling_ratio + 1)) if ((self.batch_pos_id + self.batch_pos_size) >= len(self.trainset['positive'])): self.batch_pos_id = 0 random.shuffle(self.trainset['positive']) self.batch_neg_size = (batchsize - self.batch_pos_size) if ((self.batch_neg_id + self.batch_neg_size) >= len(self.trainset['negative'])): self.batch_neg_id = 0 random.shuffle(self.trainset['negative']) batch_pos_data = self.trainset['positive'][self.batch_pos_id:(self.batch_pos_id + self.batch_pos_size)] batch_neg_data = self.trainset['negative'][self.batch_neg_id:(self.batch_neg_id + self.batch_neg_size)] batch_input_list = (batch_pos_data + batch_neg_data) batch_output_list = (([0] * self.batch_pos_size) + ([1] * self.batch_neg_size)) self.batch_pos_id = (self.batch_pos_id + self.batch_pos_size) self.batch_neg_id = (self.batch_neg_id + self.batch_neg_size) return (batch_input_list, batch_output_list) else: if ((self.batch_id + batchsize) >= len(self.training_set)): self.batch_id = 0 random.shuffle(self.training_set) batch_data = self.training_set[self.batch_id:(self.batch_id + batchsize)] self.batch_id = (self.batch_id + batchsize) batch_input_list = [] batch_output_list = [] for (i, item) in enumerate(batch_data): batch_input_list.append(item[0]) batch_output_list.append(item[1]) self.batch_id = (self.batch_id + batchsize) return (batch_input_list, batch_output_list)
class DownSampler(nn.Module): def __init__(self, nin, nout, k=4, r_lim=9, reinf=True): super().__init__() nout_new = (nout - nin) self.eesp = EESP(nin, nout_new, stride=2, k=k, r_lim=r_lim, down_method='avg') self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=2) if reinf: self.inp_reinf = nn.Sequential(CBR(config_inp_reinf, config_inp_reinf, 3, 1), CB(config_inp_reinf, nout, 1, 1)) self.act = nn.PReLU(nout) def forward(self, input, input2=None): avg_out = self.avg(input) eesp_out = self.eesp(input) output = torch.cat([avg_out, eesp_out], 1) if (input2 is not None): w1 = avg_out.size(2) while True: input2 = F.avg_pool2d(input2, kernel_size=3, padding=1, stride=2) w2 = input2.size(2) if (w2 == w1): break output = (output + self.inp_reinf(input2)) return self.act(output)
def load_data(args): folder_src = os.path.join(args.data_dir, args.src_domain) folder_tgt = os.path.join(args.data_dir, args.tgt_domain) (source_loader, n_class) = data_loader.load_data(folder_src, args.batch_size, infinite_data_loader=(not args.epoch_based_training), train=True, num_workers=args.num_workers) (target_train_loader, _) = data_loader.load_data(folder_tgt, args.batch_size, infinite_data_loader=(not args.epoch_based_training), train=True, num_workers=args.num_workers) (target_test_loader, _) = data_loader.load_data(folder_tgt, args.batch_size, infinite_data_loader=False, train=False, num_workers=args.num_workers) return (source_loader, target_train_loader, target_test_loader, n_class)
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): model.eval() results = [] dataset = data_loader.dataset (rank, world_size) = get_dist_info() if (rank == 0): prog_bar = mmcv.ProgressBar(len(dataset)) for data in data_loader: with torch.no_grad(): result = model(return_loss=False, **data) results.append(result) if (rank == 0): batch_size = len(next(iter(data.values()))) for _ in range((batch_size * world_size)): prog_bar.update() if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results
def load_model(path, compile=False, remove_last_n_layers=0): loaded_model = keras.models.load_model(path, compile=compile, custom_objects={'PatchEncoder': PatchEncoder, 'Switch': Switch, 'Router': Router}) if (remove_last_n_layers == 0): return loaded_model else: model = keras.Model(inputs=loaded_model.input, outputs=loaded_model.layers[((- remove_last_n_layers) - 1)].output) return model
def create_dataloaders(args): ds_kwargs = {'streaming': True} train_data = load_dataset(args.dataset_name_train, split='train', **ds_kwargs) train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed) valid_data = load_dataset(args.dataset_name_valid, split='train', **ds_kwargs) train_dataset = ConstantLengthDataset(tokenizer, train_data, infinite=True, seq_length=args.seq_length, tokenized=args.tokenized) valid_dataset = ConstantLengthDataset(tokenizer, valid_data, infinite=False, seq_length=args.seq_length, tokenized=args.tokenized) train_dataset = train_dataset.shuffle(buffer_size=args.shuffle_buffer) train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size) return (train_dataloader, eval_dataloader)
class HourglassNet(exkp): def __init__(self, heads, num_stacks=2): n = 5 dims = [256, 256, 384, 384, 384, 512] modules = [2, 2, 2, 2, 2, 4] super(HourglassNet, self).__init__(n, num_stacks, dims, modules, heads, make_tl_layer=None, make_br_layer=None, make_pool_layer=make_pool_layer, make_hg_layer=make_hg_layer, kp_layer=residual, cnv_dim=256)
def generalized_cross_entropy(y_true, y_pred): q = 0.7 t_loss = ((1 - tf.pow(tf.reduce_sum((y_true * y_pred), axis=(- 1)), q)) / q) return tf.reduce_mean(t_loss)
class BaseOptions(): def __init__(self): self.initialized = False self.isTrain = True def initialize(self, parser): parser.add_argument('--name', type=str, default='cityscapes_from_gta5', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/fcn8s/', help='models are saved here') parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') parser.add_argument('--model', type=str, default='fcn8s', help='which model to use') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=('resize_and_crop', 'crop', 'scale_width', 'scale_width_and_crop', 'scale_shortside', 'scale_shortside_and_crop', 'fixed', 'none')) parser.add_argument('--load_size', type=int, default=1024, help='Scale images to this size. The final image will be cropped to --crop_size.') parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)') parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio') parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.') parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)') parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') parser.add_argument('--dataroot', type=str, default='/data/yzhang/gta5_deeplab/') parser.add_argument('--label_dir', type=str, default='/data/yzhang/gta5_deeplab/images/') parser.add_argument('--image_dir', type=str, default='/data/yzhang/gta5_deeplab/labels/') parser.add_argument('--dataset_mode', type=str, default='custom') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data') parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default') parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster') parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache') parser.add_argument('--niter', type=int, default=100000, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay') parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero') parser.add_argument('--snapshot', type=int, default=50000, help='# of iter to save snapshot') parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam') parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input') parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer') parser.add_argument('--use_vae', action='store_true', help='enable training with an image encoder.') parser.add_argument('--vgg_norm', action='store_true', help='vgg_norm') parser.add_argument('--model_path', type=str, default='') parser.add_argument('--eval_output_dir', type=str, default='outputs', help='dir to save evaluation outputs') parser.add_argument('--eval_losses_dir', type=str, default='.', help='dir to save evaluation losses') parser.add_argument('--eval_spade', type=bool, default=False, help='when eval SPADE, input should be gtFinePred') parser.add_argument('--n_fold', type=int, default=0, help='n fold validation') parser.add_argument('--fold', type=int, default=0, help='fold number') parser.add_argument('--cross_validation_mode', type=str, default='train', help='cross validation mode') self.initialized = True return parser def gather_options(self): if (not self.initialized): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) (opt, unknown) = parser.parse_known_args() model_name = opt.model dataset_mode = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_mode) parser = dataset_option_setter(parser, self.isTrain) (opt, unknown) = parser.parse_known_args() if opt.load_from_opt_file: parser = self.update_options_from_file(parser, opt) opt = parser.parse_args() self.parser = parser return opt def print_options(self, opt): message = '' message += ' Options \n' for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if (v != default): comment = ('\t[default: %s]' % str(default)) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += ' End ' print(message) def option_file_path(self, opt, makedir=False): expr_dir = os.path.join(opt.checkpoints_dir, opt.name) if makedir: util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt') return file_name def save_options(self, opt): file_name = self.option_file_path(opt, makedir=True) with open((file_name + '.txt'), 'wt') as opt_file: for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if (v != default): comment = ('\t[default: %s]' % str(default)) opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)) with open((file_name + '.pkl'), 'wb') as opt_file: pickle.dump(opt, opt_file) def update_options_from_file(self, parser, opt): new_opt = self.load_options(opt) for (k, v) in sorted(vars(opt).items()): if (hasattr(new_opt, k) and (v != getattr(new_opt, k))): new_val = getattr(new_opt, k) parser.set_defaults(**{k: new_val}) return parser def load_options(self, opt): file_name = self.option_file_path(opt, makedir=False) new_opt = pickle.load(open((file_name + '.pkl'), 'rb')) return new_opt def parse(self, save=False): opt = self.gather_options() opt.isTrain = self.isTrain self.print_options(opt) if opt.isTrain: self.save_options(opt) opt.semantic_nc = (opt.label_nc + (1 if opt.contain_dontcare_label else 0)) str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if (id >= 0): opt.gpu_ids.append(id) if (len(opt.gpu_ids) > 0): torch.cuda.set_device(opt.gpu_ids[0]) assert ((len(opt.gpu_ids) == 0) or ((opt.batchSize % len(opt.gpu_ids)) == 0)), ('Batch size %d is wrong. It must be a multiple of # GPUs %d.' % (opt.batchSize, len(opt.gpu_ids))) self.opt = opt return self.opt
class PieceWiseConstantLrSchedulerMaker(object): def __init__(self, milestones: List[int], gamma: float=0.1): self.milestones = milestones self.gamma = gamma def __call__(self, optimizer): return torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.milestones, gamma=self.gamma)
def compact(text): page = [] headers = {} emptySection = False listLevel = [] listCount = [] for line in text.split('\n'): if (not line): if len(listLevel): page.append(line) if options.toHTML: for c in reversed(listLevel): page.append(listClose[c]) listLevel = [] listCount = [] emptySection = False elif (page and page[(- 1)]): page.append('') continue m = section.match(line) if m: title = m.group(2) lev = len(m.group(1)) if options.toHTML: page.append(('<h%d>%s</h%d>' % (lev, title, lev))) if (title and (title[(- 1)] not in '!?')): title += '.' headers[lev] = title for i in list(headers.keys()): if (i > lev): del headers[i] emptySection = True listLevel = [] listCount = [] continue elif line.startswith('++'): title = line[2:(- 2)] if title: if (title[(- 1)] not in '!?'): title += '.' page.append(title) elif (line[0] == ':'): continue elif (line[0] in '*#;:'): i = 0 for (c, n) in zip_longest(listLevel, line, fillvalue=''): if ((not n) or (n not in '*#;:')): if c: if options.toHTML: page.append(listClose[c]) listLevel = listLevel[:(- 1)] listCount = listCount[:(- 1)] continue else: break if ((c != n) and ((not c) or ((c not in ';:') and (n not in ';:')))): if c: if options.toHTML: page.append(listClose[c]) listLevel = listLevel[:(- 1)] listCount = listCount[:(- 1)] listLevel += n listCount.append(0) if options.toHTML: page.append(listOpen[n]) i += 1 n = line[(i - 1)] line = line[i:].strip() if line: if options.keepLists: if options.keepSections: items = sorted(headers.items()) for (_, v) in items: page.append(('Section::::' + v)) headers.clear() listCount[(i - 1)] += 1 bullet = (('BULLET::::%d. ' % listCount[(i - 1)]) if (n == '#') else 'BULLET::::- ') page.append(('{0:{1}s}'.format(bullet, len(listLevel)) + line)) elif options.toHTML: if (n not in listItem): n = '*' page.append((listItem[n] % line)) elif len(listLevel): if options.toHTML: for c in reversed(listLevel): page.append(listClose[c]) listLevel = [] listCount = [] page.append(line) elif ((line[0] in '{|') or (line[(- 1)] == '}')): continue elif (((line[0] == '(') and (line[(- 1)] == ')')) or (line.strip('.-') == '')): continue elif len(headers): if options.keepSections: items = sorted(headers.items()) for (i, v) in items: page.append(('Section::::' + v)) headers.clear() page.append(line) emptySection = False elif (not emptySection): if (line[0] != ' '): page.append(line) return page
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)): a = ((out_sz - 1) / (bbox[2] - bbox[0])) b = ((out_sz - 1) / (bbox[3] - bbox[1])) c = ((- a) * bbox[0]) d = ((- b) * bbox[1]) mapping = np.array([[a, 0, c], [0, b, d]]).astype(np.float) crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding) return crop
def ibn_densenet169(**kwargs): return get_ibndensenet(num_layers=169, model_name='ibn_densenet169', **kwargs)
def cc(net): if torch.cuda.is_available(): return net.cuda() else: return net
class Tox21(MoleculeCSVDataset): def __init__(self, smiles_to_graph=smiles_2_dgl, load=False, log_every=1000, cache_file_path='./tox21_dglgraph.bin', n_jobs=1): self._url = 'dataset/tox21.csv.gz' data_path = (get_download_dir() + '/tox21.csv.gz') download(_get_dgl_url(self._url), path=data_path, overwrite=False) df = pd.read_csv(data_path) self.id = df['mol_id'] df = df.drop(columns=['mol_id']) self.load_full = False super(Tox21, self).__init__(df, smiles_to_graph, smiles_column='smiles', cache_file_path=cache_file_path, load=load, log_every=log_every, n_jobs=n_jobs) self.id = [self.id[i] for i in self.valid_ids] def __getitem__(self, item): if self.load_full: return (self.smiles[item], self.graphs[item], self.labels[item], self.mask[item], self.id[item]) else: return (self.smiles[item], self.graphs[item], self.labels[item], self.mask[item])
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False): url_map_ = (url_map_advprop if advprop else url_map) pretrained_dict = model_zoo.load_url(url_map_[model_name], map_location=torch.device('cpu')) model_dict = model.state_dict() for name in copy.deepcopy(model_dict).keys(): if (name not in pretrained_dict.keys()): name_list = name.split('.') name_list.pop((- 2)) pretrained_name = '.'.join(name_list) else: pretrained_name = name model_dict[name] = pretrained_dict[pretrained_name] ret = model.load_state_dict(model_dict, strict=False) print('Loaded pretrained weights for {}'.format(model_name))
class TrueCaser(): uppercase_pos = ['PROPN'] def __init__(self, backend='spacy'): if (backend == 'spacy'): import spacy self.nlp = spacy.load('en_core_web_sm') self.normalize_fn = self._spacy_truecasing else: from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') self.normalize_fn = self._nltk_truecasing def __call__(self, random_capitalized_string: str) -> str: truecased_str = self.normalize_fn(random_capitalized_string) return truecased_str def _spacy_truecasing(self, random_capitalized_string: str): doc = self.nlp(random_capitalized_string.lower()) POS = self.uppercase_pos truecased_str = ''.join([(w.text_with_ws.capitalize() if ((w.pos_ in POS) or w.is_sent_start) else w.text_with_ws) for w in doc]) return truecased_str def _nltk_truecasing(self, random_capitalized_string: str): from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') POS = ['NNP', 'NNPS'] tagged_text = pos_tag(word_tokenize(random_capitalized_string.lower())) truecased_str = ' '.join([(w.capitalize() if (p in POS) else w) for (w, p) in tagged_text]) return truecased_str
_registry(op_types='Softmax, BiasGelu, Elu, Exp, FastGelu, Gelu, Softplus, Tanh') class Float16ActivationOperator(Operator): def __init__(self, onnx_quantizer, onnx_node): super(Float16ActivationOperator, self).__init__(onnx_quantizer, onnx_node)
def test_get_py_file_if_possible_with_py_file(): assert (get_py_file_if_possible(EXAMPLE_SOURCE) == EXAMPLE_SOURCE)
def generate_slicing_transform_function(transform_func_structs, slicing_axis=2, concatenate_axis=2): def slicing_transform_func(sample): all_slices = [] for (indices, transform_func) in transform_func_structs: trasnformed_slice = transform_func(np.take(sample, indices, slicing_axis)) all_slices.append(trasnformed_slice) if (concatenate_axis is None): return all_slices else: return np.concatenate(all_slices, axis=concatenate_axis) return slicing_transform_func
def parse_args_and_update_hparams(H, parser, s=None): H = dataclasses.replace(H, **vars(parser.parse_args(s))) hparam_sets = [x for x in H.hps.split(',') if x] for hp_set in hparam_sets: hps = HPARAMS_REGISTRY[hp_set] parser.set_defaults(**hps) return dataclasses.replace(H, **vars(parser.parse_args(s)))
def text_record(filename, text_model): textfile = open(filename, 'w') for i in range(5): sentence = text_model.make_sentence() textfile.write(sentence) textfile.close()
_criterion('magnitude') class MagnitudeCriterion(PruningCriterion): def __init__(self, modules, config, pattern): super(MagnitudeCriterion, self).__init__(modules, config, pattern) def on_step_begin(self): with torch.no_grad(): for key in self.modules.keys(): p = self.modules[key].weight.data if hasattr(self.pattern, 'reduce_score'): self.scores[key] = self.pattern.reduce_score(torch.abs(p), key) else: self.scores[key] = torch.abs(p)
class IdentityBijection(Bijection): def __init__(self, x_shape): super().__init__(x_shape=x_shape, z_shape=x_shape) def _x_to_z(self, x, **kwargs): return {'z': x, 'log-jac': self._log_jac_like(x)} def _z_to_x(self, z, **kwargs): return {'x': z, 'log-jac': self._log_jac_like(z)} def _log_jac_like(self, inputs): return torch.zeros(inputs.shape[0], 1, dtype=inputs.dtype, device=inputs.device)
def get_checkpoint_url(config_path): url = _ModelZooUrls.query(config_path) if (url is None): raise RuntimeError('Pretrained model for {} is not available!'.format(config_path)) return url
def get_agent_view(grid: chex.Array, agent: chex.Array, sensor_range: chex.Array) -> Tuple[(chex.Array, chex.Array)]: receptive_field = ((sensor_range * 2) + 1) padded_agents_layer = jnp.pad(grid[_AGENTS], sensor_range, mode='constant') padded_shelves_layer = jnp.pad(grid[_SHELVES], sensor_range, mode='constant') agent_view_of_agents = jax.lax.dynamic_slice(padded_agents_layer, (agent.position.x, agent.position.y), (receptive_field, receptive_field)).reshape((- 1)) agent_view_of_shelves = jax.lax.dynamic_slice(padded_shelves_layer, (agent.position.x, agent.position.y), (receptive_field, receptive_field)).reshape((- 1)) return (agent_view_of_agents, agent_view_of_shelves)
def print_state(train_ctx: Context, formats: List[str], join_str: str=' | ') -> None: def unescape(escapped_str): return bytes(escapped_str, 'utf-8').decode('unicode_escape') def safe_format(format_str, **kwargs): try: return format_str.format(**kwargs) except: return None format_list = [safe_format(unescape(format_str), **vars(train_ctx)) for format_str in formats] output_str = unescape(join_str).join([val for val in format_list if (val is not None)]) train_ctx.logger.info(output_str)
def slurm_run_scripts(scripts): assert isinstance(scripts, str) os.chdir(slurm_dir) assert scripts.startswith('#!/usr/bin/env bash\n') file_temp = NamedTemporaryFile(delete=False) file_temp.write(scripts.encode('utf-8')) file_temp.close() run(['sbatch', file_temp.name], check=True) os.remove(file_temp.name)
def seresnet164bn_cifar10(num_classes=10, **kwargs): return get_seresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name='seresnet164bn_cifar10', **kwargs)
def prob_eval_runner(benchmark, old_eval: bool=False, vec_input: bool=False, parallel: bool=False, uniform: bool=True, cmplx: bool=False, input_dim: int=10, bond_dim: int=10, seq_len: int=100, batch: int=100): if uniform: mps_model = ProbUnifMPS(input_dim, bond_dim, cmplx, parallel) else: mps_model = ProbMPS(seq_len, input_dim, bond_dim, cmplx, parallel) mps_model = partial(mps_model, slim_eval=(not old_eval)) if vec_input: fake_data = torch.randn(seq_len, batch, input_dim).abs() fake_data = (fake_data / fake_data.sum(dim=(- 1), keepdim=True)) else: fake_data = torch.randint(input_dim, (seq_len, batch)) benchmark(mps_model, fake_data)
class TestDataset(unittest.TestCase): def setUpClass(cls) -> None: cls._orig_logging_level = sf.getLoggingLevel() sf.setLoggingLevel(40) cls.PROJECT = TestConfig().create_project(overwrite=True) def tearDownClass(cls) -> None: super().tearDownClass() sf.setLoggingLevel(cls._orig_logging_level) if (cls.PROJECT is not None): shutil.rmtree(cls.PROJECT.root) def test_base_annotations(self): dataset = self.PROJECT.dataset() self.assertTrue((len(dataset.annotations) == 10)) def test_load_annotations(self): dataset = self.PROJECT.dataset() ann_df = pd.DataFrame({'patient': pd.Series([f'pt{p}' for p in range(100)]), 'slide': pd.Series([f'slide{s}' for s in range(100)]), 'linear': pd.Series([random.random() for _ in range(100)])}) dataset.load_annotations(ann_df) self.assertTrue((len(dataset.annotations) == 100)) def test_load_faulty_annotations_with_duplicates(self): dataset = self.PROJECT.dataset() ann_df = pd.DataFrame({'patient': pd.Series([f'pt{p}' for p in range(100)]), 'slide': pd.Series((['slide_test', 'slide_test'] + [f'slide{s}' for s in range(98)])), 'linear': pd.Series([random.random() for _ in range(100)])}) with self.assertRaises(sf.errors.DatasetError): dataset.load_annotations(ann_df) def test_load_faulty_annotations_without_patient(self): dataset = self.PROJECT.dataset() ann_df = pd.DataFrame({'slide': pd.Series([f'slide{s}' for s in range(100)]), 'linear': pd.Series([random.random() for _ in range(100)])}) self.assertRaises(sf.errors.AnnotationsError, dataset.load_annotations, ann_df) def test_load_faulty_annotations_without_slide(self): dataset = self.PROJECT.dataset() ann_df = pd.DataFrame({'patient': pd.Series([f'pt{p}' for p in range(100)]), 'linear': pd.Series([random.random() for _ in range(100)])}) self.assertRaises(sf.errors.AnnotationsError, dataset.load_annotations, ann_df) def test_properties(self): dataset = self.PROJECT.dataset() self.assertTrue((dataset.num_tiles == 0)) self.assertFalse(dataset.filters) self.assertFalse(dataset.filter_blank) self.assertFalse(dataset.min_tiles) self.assertTrue((dataset.num_tiles == 0)) self.assertTrue((dataset.num_tiles == 0)) self.assertTrue((dataset.num_tiles == 0)) def test_faulty_balance(self): dataset = self.PROJECT.dataset() self.assertRaises(sf.errors.DatasetBalanceError, dataset.balance, 'category1') def test_is_float(self): dataset = self.PROJECT.dataset() self.assertTrue(dataset.is_float('linear1')) self.assertTrue(dataset.is_float('linear2')) self.assertFalse(dataset.is_float('category1')) self.assertFalse(dataset.is_float('category2'))
def load_weight_checkpoint(model: peft.LoraModel, checkpoint_path: str): modules = find_lora_modules(model) shard_paths = sharded_paths(checkpoint_path, modules.keys()) unique_shards = list(set(shard_paths.values())) for shard_path in unique_shards: tensors = st.load_file(os.path.join(checkpoint_path, shard_path)) for (module_name, target) in modules.items(): key = (module_name + '.weight') if ((key not in shard_paths) or (shard_paths[key] != shard_path)): continue new_weight = tensors[key] update_weights(target, new_weight, reinit=False, device=target.weight.device)
def update_counts(s, counts): for char in s: if (char in counts): counts[char] += 1
class RPNHead(object): __inject__ = ['anchor_generator', 'rpn_target_assign', 'train_proposal', 'test_proposal'] def __init__(self, anchor_generator=AnchorGenerator().__dict__, rpn_target_assign=RPNTargetAssign().__dict__, train_proposal=GenerateProposals(12000, 2000).__dict__, test_proposal=GenerateProposals().__dict__, num_classes=1): super(RPNHead, self).__init__() self.anchor_generator = anchor_generator self.rpn_target_assign = rpn_target_assign self.train_proposal = train_proposal self.test_proposal = test_proposal self.num_classes = num_classes if isinstance(anchor_generator, dict): self.anchor_generator = AnchorGenerator(**anchor_generator) if isinstance(rpn_target_assign, dict): self.rpn_target_assign = RPNTargetAssign(**rpn_target_assign) if isinstance(train_proposal, dict): self.train_proposal = GenerateProposals(**train_proposal) if isinstance(test_proposal, dict): self.test_proposal = GenerateProposals(**test_proposal) def _get_output(self, input): dim_out = input.shape[1] rpn_conv = fluid.layers.conv2d(input=input, num_filters=dim_out, filter_size=3, stride=1, padding=1, act='relu', name='conv_rpn', param_attr=ParamAttr(name='conv_rpn_w', initializer=Normal(loc=0.0, scale=0.01)), bias_attr=ParamAttr(name='conv_rpn_b', learning_rate=2.0, regularizer=L2Decay(0.0))) (self.anchor, self.anchor_var) = self.anchor_generator(input=rpn_conv) num_anchor = self.anchor.shape[2] self.rpn_cls_score = fluid.layers.conv2d(rpn_conv, num_filters=(num_anchor * self.num_classes), filter_size=1, stride=1, padding=0, act=None, name='rpn_cls_score', param_attr=ParamAttr(name='rpn_cls_logits_w', initializer=Normal(loc=0.0, scale=0.01)), bias_attr=ParamAttr(name='rpn_cls_logits_b', learning_rate=2.0, regularizer=L2Decay(0.0))) self.rpn_bbox_pred = fluid.layers.conv2d(rpn_conv, num_filters=(4 * num_anchor), filter_size=1, stride=1, padding=0, act=None, name='rpn_bbox_pred', param_attr=ParamAttr(name='rpn_bbox_pred_w', initializer=Normal(loc=0.0, scale=0.01)), bias_attr=ParamAttr(name='rpn_bbox_pred_b', learning_rate=2.0, regularizer=L2Decay(0.0))) return (self.rpn_cls_score, self.rpn_bbox_pred) def get_proposals(self, body_feats, im_info, mode='train'): body_feat = list(body_feats.values())[(- 1)] (rpn_cls_score, rpn_bbox_pred) = self._get_output(body_feat) if (self.num_classes == 1): rpn_cls_prob = fluid.layers.sigmoid(rpn_cls_score, name='rpn_cls_prob') else: rpn_cls_score = fluid.layers.transpose(rpn_cls_score, perm=[0, 2, 3, 1]) rpn_cls_score = fluid.layers.reshape(rpn_cls_score, shape=(0, 0, 0, (- 1), self.num_classes)) rpn_cls_prob_tmp = fluid.layers.softmax(rpn_cls_score, use_cudnn=False, name='rpn_cls_prob') rpn_cls_prob_slice = fluid.layers.slice(rpn_cls_prob_tmp, axes=[4], starts=[1], ends=[self.num_classes]) (rpn_cls_prob, _) = fluid.layers.topk(rpn_cls_prob_slice, 1) rpn_cls_prob = fluid.layers.reshape(rpn_cls_prob, shape=(0, 0, 0, (- 1))) rpn_cls_prob = fluid.layers.transpose(rpn_cls_prob, perm=[0, 3, 1, 2]) prop_op = (self.train_proposal if (mode == 'train') else self.test_proposal) (rpn_rois, rpn_roi_probs) = prop_op(scores=rpn_cls_prob, bbox_deltas=rpn_bbox_pred, im_info=im_info, anchors=self.anchor, variances=self.anchor_var) return rpn_rois def _transform_input(self, rpn_cls_score, rpn_bbox_pred, anchor, anchor_var): rpn_cls_score = fluid.layers.transpose(rpn_cls_score, perm=[0, 2, 3, 1]) rpn_bbox_pred = fluid.layers.transpose(rpn_bbox_pred, perm=[0, 2, 3, 1]) anchor = fluid.layers.reshape(anchor, shape=((- 1), 4)) anchor_var = fluid.layers.reshape(anchor_var, shape=((- 1), 4)) rpn_cls_score = fluid.layers.reshape(x=rpn_cls_score, shape=(0, (- 1), self.num_classes)) rpn_bbox_pred = fluid.layers.reshape(x=rpn_bbox_pred, shape=(0, (- 1), 4)) return (rpn_cls_score, rpn_bbox_pred, anchor, anchor_var) def _get_loss_input(self): for attr in ['rpn_cls_score', 'rpn_bbox_pred', 'anchor', 'anchor_var']: if (not getattr(self, attr, None)): raise ValueError('self.{} should not be None,'.format(attr), 'call RPNHead.get_proposals first') return self._transform_input(self.rpn_cls_score, self.rpn_bbox_pred, self.anchor, self.anchor_var) def get_loss(self, im_info, gt_box, is_crowd, gt_label=None): (rpn_cls, rpn_bbox, anchor, anchor_var) = self._get_loss_input() if (self.num_classes == 1): (score_pred, loc_pred, score_tgt, loc_tgt, bbox_weight) = self.rpn_target_assign(bbox_pred=rpn_bbox, cls_logits=rpn_cls, anchor_box=anchor, anchor_var=anchor_var, gt_boxes=gt_box, is_crowd=is_crowd, im_info=im_info) score_tgt = fluid.layers.cast(x=score_tgt, dtype='float32') score_tgt.stop_gradient = True rpn_cls_loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=score_pred, label=score_tgt) else: (score_pred, loc_pred, score_tgt, loc_tgt, bbox_weight) = self.rpn_target_assign(bbox_pred=rpn_bbox, cls_logits=rpn_cls, anchor_box=anchor, anchor_var=anchor_var, gt_boxes=gt_box, gt_labels=gt_label, is_crowd=is_crowd, num_classes=self.num_classes, im_info=im_info) labels_int64 = fluid.layers.cast(x=score_tgt, dtype='int64') labels_int64.stop_gradient = True rpn_cls_loss = fluid.layers.softmax_with_cross_entropy(logits=score_pred, label=labels_int64, numeric_stable_mode=True) rpn_cls_loss = fluid.layers.reduce_mean(rpn_cls_loss, name='loss_rpn_cls') loc_tgt = fluid.layers.cast(x=loc_tgt, dtype='float32') loc_tgt.stop_gradient = True rpn_reg_loss = fluid.layers.smooth_l1(x=loc_pred, y=loc_tgt, sigma=3.0, inside_weight=bbox_weight, outside_weight=bbox_weight) rpn_reg_loss = fluid.layers.reduce_sum(rpn_reg_loss, name='loss_rpn_bbox') score_shape = fluid.layers.shape(score_tgt) score_shape = fluid.layers.cast(x=score_shape, dtype='float32') norm = fluid.layers.reduce_prod(score_shape) norm.stop_gradient = True rpn_reg_loss = (rpn_reg_loss / norm) return {'loss_rpn_cls': rpn_cls_loss, 'loss_rpn_bbox': rpn_reg_loss}
_module() class mit_b4(MixVisionTransformer): def __init__(self, **kwargs): super(mit_b4, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs)
def test_chained_config_scopes_fix_subentries(): def cfg1(): d = {'a': 10, 'b': 20} def cfg2(): pass (final_cfg, summary) = chain_evaluate_config_scopes([cfg1, cfg2], fixed={'d': {'a': 0}}) assert (set(final_cfg['d'].keys()) == {'a', 'b'}) assert (final_cfg['d']['a'] == 0) assert (final_cfg['d']['b'] == 20)
def test_next_track(precision='d', decimals=80): from phcpy.solver import total_degree_start_system quadrics = ['x**2 + 4*y**2 - 4;', '2*y**2 - x;'] (startsys, startsols) = total_degree_start_system(quadrics) print('the first start solution :\n', startsols[0]) if (precision == 'd'): initialize_standard_tracker(quadrics, startsys) initialize_standard_solution(2, startsols[0]) while True: sol = next_standard_solution() print('the next solution :\n', sol) answer = input('continue ? (y/n) ') if (answer != 'y'): break elif (precision == 'dd'): initialize_dobldobl_tracker(quadrics, startsys) initialize_dobldobl_solution(2, startsols[0]) while True: sol = next_dobldobl_solution() print('the next solution :\n', sol) answer = input('continue ? (y/n) ') if (answer != 'y'): break elif (precision == 'qd'): initialize_quaddobl_tracker(quadrics, startsys) initialize_quaddobl_solution(2, startsols[0]) while True: sol = next_quaddobl_solution() print('the next solution :\n', sol) answer = input('continue ? (y/n) ') if (answer != 'y'): break elif (precision == 'mp'): initialize_multprec_tracker(quadrics, startsys, decimals) initialize_multprec_solution(2, startsols[0]) while True: sol = next_multprec_solution() print('the next solution :\n', sol) answer = input('continue ? (y/n) ') if (answer != 'y'): break else: print('wrong argument for precision')
def get_data(name, data_dir, height, width, ratio, batch_size, workers, num_instances): root = osp.join(data_dir, name) root = data_dir dataset = datasets.create(name, root) normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) num_classes = dataset.num_train_ids train_transformer = T.Compose([T.ContVerticalCrop(height, width, ratio), T.RandomHorizontalFlip(), T.ToTensor(), normalizer]) test_transformer = T.Compose([T.RectScale(height, width), T.ToTensor(), normalizer]) query_transformer = T.Compose([T.ContVerticalCropDiscret(height, width, ratio), T.ToTensor(), normalizer]) train_loader = DataLoader(Preprocessor(dataset.train, root=osp.join(dataset.images_dir, dataset.train_path), transform=train_transformer), batch_size=batch_size, num_workers=workers, sampler=RandomIdentitySampler(dataset.train, num_instances), pin_memory=True, drop_last=True) query_loader = DataLoader(Preprocessor(dataset.query, root=osp.join(dataset.images_dir, dataset.query_path), transform=query_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) gallery_loader = DataLoader(Preprocessor(dataset.gallery, root=osp.join(dataset.images_dir, dataset.gallery_path), transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) return (dataset, num_classes, train_loader, query_loader, gallery_loader)
class AAMSoftmax(nn.Module): def __init__(self, n_class, m, s): super(AAMSoftmax, self).__init__() self.m = m self.s = s self.weight = torch.nn.Parameter(torch.FloatTensor(n_class, 192), requires_grad=True) nn.init.xavier_normal_(self.weight, gain=1) self.ce = nn.CrossEntropyLoss() self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos((math.pi - self.m)) self.mm = (math.sin((math.pi - self.m)) * self.m) def forward(self, x, label=None): cosine = F.linear(F.normalize(x), F.normalize(self.weight)) sine = torch.sqrt((1.0 - torch.mul(cosine, cosine)).clamp(0, 1)) phi = ((cosine * self.cos_m) - (sine * self.sin_m)) phi = torch.where(((cosine - self.th) > 0), phi, (cosine - self.mm)) one_hot = torch.zeros_like(cosine) one_hot.scatter_(1, label.view((- 1), 1), 1) output = ((one_hot * phi) + ((1.0 - one_hot) * cosine)) output = (output * self.s) loss = self.ce(output, label) prec1 = accuracy(output.detach(), label.detach(), topk=(1,))[0] return (loss, prec1)
class DataSet(): def __init__(self, dir): os.mkdir(dir) self.tst = open(os.path.join(dir, 'corpus.tst'), 'w') self.ref = open(os.path.join(dir, 'corpus.ref'), 'w') self.ter = open(os.path.join(dir, 'corpus.ter'), 'w') self.tst.write('<tstset trglang="any" setid="any" srclang="any">\n<doc docid="any" sysid="sys">\n') self.ref.write('<refset trglang="any" setid="any" srclang="any">\n<doc docid="any" sysid="sys">\n') self.i = 0 def add(self, hyp, ref, score): self.i += 1 self.tst.write('<seg id="{}"> {} </seg>\n'.format(self.i, hyp)) self.ref.write('<seg id="{}"> {} </seg>\n'.format(self.i, ref)) self.ter.write('any {} {}\n'.format(self.i, score)) def close(self): self.tst.write('</doc>\n</tstset>\n') self.ref.write('</doc>\n</tstset>\n') self.tst.close() self.ref.close() self.ter.close()
def test_merged_configs(): test_config = get_config(CFG_TEST) eqa_config = get_config(CFG_EQA) merged_config = get_config('{},{}'.format(CFG_TEST, CFG_EQA)) assert (merged_config.TASK.TYPE == eqa_config.TASK.TYPE) assert (merged_config.ENVIRONMENT.MAX_EPISODE_STEPS == test_config.ENVIRONMENT.MAX_EPISODE_STEPS)
def concatChar(input_lines, char_dict): features = [(([char_dict[' ']] + list(reduce((lambda x, y: ((x + [char_dict[' ']]) + y)), sentence))) + [char_dict['\n']]) for sentence in input_lines] return features
class TestArgs(BaseArgs): def __init__(self): super().__init__() def add_args(self): super().add_args() self.parser.set_defaults(batch_size=1) self.parser.add_argument('--id_dir', type=Path) self.parser.add_argument('--attr_dir', type=Path) self.parser.add_argument('--output_dir', type=Path) self.parser.add_argument('--input_dir', type=Path) self.parser.add_argument('--real_id', action='store_true') self.parser.add_argument('--real_attr', action='store_true') BaseArgs.add_bool_arg(self.parser, 'loop_fake') self.parser.add_argument('--img_suffixes', type=list, default=['png', 'jpg', 'jpeg']) self.parser.add_argument('--test_func', type=str, choices=['infer_on_dirs', 'infer_pairs', 'interpolate']) self.parser.add_argument('--input', type=str) def validate(self): super().validate() def process(self): self.args.train = False super().process() self.args.output_dir.mkdir(exist_ok=True, parents=True)
(version='2.3.0', reason='Please use spark engine and ray engine.') class DistributedSequentialSampler(Sampler): def __init__(self, dataset, num_replicas, rank): self.dataset = dataset self.num_samples = int(math.floor(((len(self.dataset) * 1.0) / num_replicas))) extra_samples = (len(self.dataset) % num_replicas) self.epoch = 0 if (extra_samples > rank): self.num_samples += 1 self.offset = (self.num_samples * rank) else: self.offset = ((self.num_samples * rank) + extra_samples) self.total_size = len(dataset) def __iter__(self): indices = list(range(self.offset, (self.num_samples + self.offset))) invalidInputError((len(indices) == self.num_samples), 'expect indices len match num_samples') return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
class RandomVerticalCropCont(object): def __init__(self, height, width): self.height = height self.width = width def __call__(self, img): (w, h) = img.size ratio = min(1, np.random.uniform(0.5, 1.08333)) ratio = float(ratio) jitter = np.random.uniform(0.9, 1.11111) apply_ratio = min(1.0, (ratio * jitter)) apply_ratio = ratio start_ratio = 0 if (np.random.uniform(0, 1) > 0.8): start_ratio = (1 - apply_ratio) start_h = int((start_ratio * h)) img = img.crop((0, start_h, w, np.round((h * apply_ratio)))) img = img.resize((self.width, self.height), Image.BILINEAR) return [img, (apply_ratio, start_ratio)]
class LayerNorm2d(nn.LayerNorm): def __init__(self, num_channels): super().__init__(num_channels) def forward(self, x: torch.Tensor) -> torch.Tensor: return F.layer_norm(x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
class SeparatorStyle(Enum): ADD_COLON_SINGLE = auto() ADD_COLON_TWO = auto() ADD_COLON_SPACE_SINGLE = auto() NO_COLON_SINGLE = auto() ADD_NEW_LINE_SINGLE = auto() DOLLY = auto() RWKV = auto() PHOENIX = auto() BAYLING = auto() ALPACA = auto()
def main(): Nin = 784 Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(Nin, Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
def test_fermi_report_number_ESH(): ref_line = u'[11] T. Sanami, Applicability of a Bonner Sphere technique for pulsed neutron in 120 GeV proton facility, in Proceedings of the 22nd Workshop on Radiation Detectors and Their Uses, pp. 148-159, FERMILAB-CONF-08-203-AD-APC-E-ESH (2008).' res = get_references(ref_line) references = res[0] assert (references[0]['reportnumber'] == [u'FERMILAB-Conf-08-203-AD-APC-E-ESH']) assert (references[0]['linemarker'] == [u'11'])
def test_masked_backward(model, X, X_masked): X = torch.tensor(numpy.array(X)) mask = torch.ones_like(X).type(torch.bool) X_ = torch.masked.MaskedTensor(X, mask=mask) b = model.backward(X_) assert_array_almost_equal(b, [[[(- 18.8311), (- 19.113)], [(- 15.5423), (- 15.83)], [(- 10.8078), (- 11.0955)], [(- 6.1547), (- 5.3717)], [(- 2.3026), (- 2.3026)]], [[(- 15.5896), (- 14.7842)], [(- 12.1797), (- 12.4674)], [(- 8.8158), (- 8.0555)], [(- 5.9508), (- 6.2384)], [(- 2.3026), (- 2.3026)]]], 4) b = model.backward(X_masked) assert_array_almost_equal(b, [[[(- 11.6441), (- 11.7241)], [(- 11.1645), (- 11.0955)], [(- 10.8078), (- 11.0955)], [(- 6.1547), (- 5.3717)], [(- 2.3026), (- 2.3026)]], [[(- 9.162), (- 8.3668)], [(- 7.4618), (- 7.7494)], [(- 5.1529), (- 4.4689)], [(- 2.4079), (- 2.4079)], [(- 2.3026), (- 2.3026)]]], 4)
class GPT2TokenizerFast(): def __init__(self, *args, **kwargs): requires_tokenizers(self) def from_pretrained(self, *args, **kwargs): requires_tokenizers(self)
class TuningCriterion(): def __init__(self, strategy='basic', strategy_kwargs=None, timeout=0, max_trials=100, objective='performance'): self.strategy = strategy self.timeout = timeout self.max_trials = max_trials self.objective = objective self.strategy_kwargs = strategy_kwargs def max_trials(self): return self._max_trials _trials.setter def max_trials(self, max_trials): if _check_value('max_trials', max_trials, int): self._max_trials = max_trials def timeout(self): return self._timeout def timeout(self, timeout): if _check_value('timeout', timeout, int): self._timeout = timeout def objective(self): return self._objective def objective(self, objective): if isinstance(objective, list): for val in objective: assert _check_value('objective', val, str, ['performance', 'accuracy', 'modelsize', 'footprint']) self._objective = objective return if _check_value('objective', objective, str, ['performance', 'accuracy', 'modelsize', 'footprint']): self._objective = [objective] return if _check_value('objective', objective, dict): if (('weight' in objective.keys()) and isinstance(objective['weight'], list)): assert (len(objective['objective']) == len(objective['weight'])) for (k, v) in objective.items(): _check_value('objective', k, str, ['objective', 'weight', 'higher_is_better']) if (k == 'objective'): _check_value('objective', v, str, ['performance', 'accuracy', 'modelsize', 'footprint']) self._objective = objective def strategy(self): return self._strategy def strategy(self, strategy): if _check_value('strategy', strategy, str, ['basic', 'mse', 'bayesian', 'random', 'exhaustive', 'sigopt', 'tpe', 'mse_v2', 'hawq_v2']): self._strategy = strategy def strategy_kwargs(self): return self._strategy_kwargs _kwargs.setter def strategy_kwargs(self, strategy_kwargs): self._strategy_kwargs = strategy_kwargs
class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() if (conv_block is None): conv_block = BasicConv2d self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 self.fc = nn.Linear(768, num_classes) self.fc.stddev = 0.001 def forward(self, x): x = F.avg_pool2d(x, kernel_size=5, stride=3) x = self.conv0(x) x = self.conv1(x) x = F.adaptive_avg_pool2d(x, (1, 1)) x = torch.flatten(x, 1) x = self.fc(x) return x
def svr(name, kernels=['linear', 'rbf', 'poly', 'sigmoid'], **kwargs): svms = {'linear': partial(svr_linear, name=name), 'rbf': partial(svr_rbf, name=name), 'poly': partial(svr_poly, name=name), 'sigmoid': partial(svr_sigmoid, name=name)} choices = [svms[kern](**kwargs) for kern in kernels] if (len(choices) == 1): rval = choices[0] else: rval = hp.choice(('%s.kernel' % name), choices) return rval
class TestTwoQubitWeylDecomposition(QiskitTestCase): def check_two_qubit_weyl_decomposition(self, target_unitary, tolerance=1e-07): with self.subTest(unitary=target_unitary): decomp = TwoQubitWeylDecomposition(target_unitary) q = QuantumRegister(2) decomp_circuit = QuantumCircuit(q) decomp_circuit.append(UnitaryGate(decomp.K2r), [q[0]]) decomp_circuit.append(UnitaryGate(decomp.K2l), [q[1]]) decomp_circuit.append(UnitaryGate(Ud(decomp.a, decomp.b, decomp.c)), [q[0], q[1]]) decomp_circuit.append(UnitaryGate(decomp.K1r), [q[0]]) decomp_circuit.append(UnitaryGate(decomp.K1l), [q[1]]) result = execute(decomp_circuit, UnitarySimulatorPy()).result() decomp_unitary = result.get_unitary() target_unitary *= (la.det(target_unitary) ** (- 0.25)) decomp_unitary *= (la.det(decomp_unitary) ** (- 0.25)) maxdists = [np.max(np.abs((target_unitary + (phase * decomp_unitary)))) for phase in [1, 1j, (- 1), (- 1j)]] maxdist = np.min(maxdists) self.assertTrue((np.abs(maxdist) < tolerance), 'Worst distance {}'.format(maxdist)) def test_two_qubit_weyl_decomposition_cnot(self): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud((np.pi / 4), 0, 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_iswap(self): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud((np.pi / 4), (np.pi / 4), 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_swap(self): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud((np.pi / 4), (np.pi / 4), (np.pi / 4)) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_bgate(self): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud((np.pi / 4), (np.pi / 8), 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_a00(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, 0, 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_aa0(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, aaa, 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_aaa(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, aaa, aaa) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_aama(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, aaa, (- aaa)) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_ab0(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for bbb in np.linspace(0, aaa, 10): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, bbb, 0) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_abb(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for bbb in np.linspace(0, aaa, 6): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, bbb, bbb) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_abmb(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for bbb in np.linspace(0, aaa, 6): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, bbb, (- bbb)) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_aac(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for ccc in np.linspace((- aaa), aaa, 6): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, aaa, ccc) self.check_two_qubit_weyl_decomposition(((k1 a) k2)) def test_two_qubit_weyl_decomposition_abc(self, smallest=1e-18, factor=9.8, steps=11): for aaa in (([(smallest * (factor ** i)) for i in range(steps)] + [((np.pi / 4) - (smallest * (factor ** i))) for i in range(steps)]) + [(np.pi / 8), (0.113 * np.pi), (0.1972 * np.pi)]): for bbb in np.linspace(0, aaa, 4): for ccc in np.linspace((- bbb), bbb, 4): for (k1l, k1r, k2l, k2r) in K1K2S: k1 = np.kron(k1l.data, k1r.data) k2 = np.kron(k2l.data, k2r.data) a = Ud(aaa, aaa, ccc) self.check_two_qubit_weyl_decomposition(((k1 a) k2))
def subdict(d: Dict[(str, Any)], keys: List[str]) -> Dict[(str, Any)]: return {k: v for (k, v) in d.items() if (k in keys)}
class RemoveGrid(SparseModule): def forward(self, x: SparseConvTensor): x.grid = None return x
def scale_ocr_x(x, dimensions_scenegraph, dimensions_ocr): return ((x * dimensions_scenegraph[0]) / dimensions_ocr[0])
class SubPolicy(object): def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): ranges = {'shearX': np.linspace(0, 0.3, 10), 'shearY': np.linspace(0, 0.3, 10), 'translateX': np.linspace(0, (150 / 331), 10), 'translateY': np.linspace(0, (150 / 331), 10), 'rotate': np.linspace(0, 30, 10), 'color': np.linspace(0.0, 0.9, 10), 'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int), 'solarize': np.linspace(256, 0, 10), 'contrast': np.linspace(0.0, 0.9, 10), 'sharpness': np.linspace(0.0, 0.9, 10), 'brightness': np.linspace(0.0, 0.9, 10), 'autocontrast': ([0] * 10), 'equalize': ([0] * 10), 'invert': ([0] * 10)} func = {'shearX': ShearX(fillcolor=fillcolor), 'shearY': ShearY(fillcolor=fillcolor), 'translateX': TranslateX(fillcolor=fillcolor), 'translateY': TranslateY(fillcolor=fillcolor), 'rotate': Rotate(), 'color': Color(), 'posterize': Posterize(), 'solarize': Solarize(), 'contrast': Contrast(), 'sharpness': Sharpness(), 'brightness': Brightness(), 'autocontrast': AutoContrast(), 'equalize': Equalize(), 'invert': Invert()} self.p1 = p1 self.operation1 = func[operation1] self.magnitude1 = ranges[operation1][magnitude_idx1] self.p2 = p2 self.operation2 = func[operation2] self.magnitude2 = ranges[operation2][magnitude_idx2] def __call__(self, img): if (random.random() < self.p1): img = self.operation1(img, self.magnitude1) if (random.random() < self.p2): img = self.operation2(img, self.magnitude2) return img
def all_input_planes(fen): current_aux_planes = aux_planes(fen) history_both = to_planes(fen) ret = np.vstack((history_both, current_aux_planes)) assert (ret.shape == (18, 8, 8)) return ret
def build_one(frames=64, bands=40, n_classes=10, dropout=0.0, tstride=1, fstride=4): from keras.layers import Conv2D, Dense, Dropout, Flatten conv_f = 8 conv_t = 32 kernels = 90 bottleneck = 32 input_shape = (frames, bands, 1) model = keras.Sequential([Conv2D(kernels, (conv_t, conv_f), strides=(tstride, fstride), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dense(bottleneck, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(128, activation='relu', use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)]) return model
_torch class TvltProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = 'ZinengTang/tvlt-base' self.tmpdirname = tempfile.mkdtemp() def get_image_processor(self, **kwargs): return TvltImageProcessor.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return TvltFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = TvltProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.feature_extractor, TvltFeatureExtractor) self.assertIsInstance(processor.image_processor, TvltImageProcessor) def test_feature_extractor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) audio = np.ones([12000]) audio_dict = feature_extractor(audio, return_tensors='np') input_processor = processor(audio=audio, return_tensors='np') for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=0.01) def test_image_processor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) images = np.ones([3, 224, 224]) image_dict = image_processor(images, return_tensors='np') input_processor = processor(images=images, return_tensors='np') for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=0.01) def test_processor(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) audio = np.ones([12000]) images = np.ones([3, 224, 224]) inputs = processor(audio=audio, images=images) self.assertListEqual(list(inputs.keys()), ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask']) with pytest.raises(ValueError): processor() def test_model_input_names(self): image_processor = self.get_image_processor() feature_extractor = self.get_feature_extractor() processor = TvltProcessor(image_processor=image_processor, feature_extractor=feature_extractor) self.assertListEqual(processor.model_input_names, (image_processor.model_input_names + feature_extractor.model_input_names), msg='`processor` and `image_processor`+`feature_extractor` model input names do not match')
(params=[('True', 'fixed_thres', 0.5, 0.2, ParCorr, 1, 0.5), ('True', 'fixed_thres', 0.5, 0.5, ParCorr, 1, 0.5), ('True', 'fixed_thres', 0.8, 0.2, ParCorr, 1, 0.5), ('True', 'fixed_thres', 0.8, 0.5, ParCorr, 1, 0.5), ('True', 'analytic', 0.5, 0.2, ParCorr, None, None), ('True', 'shuffle_test', 0.5, 0.2, ParCorr, None, None), ('True', 'analytic', 0.5, 0.5, ParCorr, None, None), ('True', 'shuffle_test', 0.5, 0.5, ParCorr, None, None), ('True', 'analytic', 0.8, 0.2, ParCorr, None, None), ('True', 'shuffle_test', 0.8, 0.2, ParCorr, None, None), ('True', 'analytic', 0.8, 0.5, ParCorr, None, None), ('True', 'shuffle_test', 0.8, 0.5, ParCorr, None, None), ('True', 'analytic', 0.5, 0.2, RobustParCorr, None, None), ('True', 'shuffle_test', 0.5, 0.2, RobustParCorr, None, None), ('True', 'analytic', 0.5, 0.5, RobustParCorr, None, None), ('True', 'shuffle_test', 0.5, 0.5, RobustParCorr, None, None), ('True', 'analytic', 0.8, 0.2, RobustParCorr, None, None), ('True', 'shuffle_test', 0.8, 0.2, RobustParCorr, None, None), ('True', 'analytic', 0.8, 0.5, RobustParCorr, None, None), ('True', 'shuffle_test', 0.8, 0.5, RobustParCorr, None, None), ('True', 'shuffle_test', 0.5, 0.2, CMIknn, None, None), ('True', 'shuffle_test', 0.5, 0.5, CMIknn, None, None), ('True', 'shuffle_test', 0.8, 0.2, CMIknn, None, None), ('True', 'shuffle_test', 0.8, 0.5, CMIknn, None, None), ('False', 'fixed_thres', 0.0, 0.0, ParCorr, 1, 0.5), ('False', 'analytic', 0.0, 0.0, ParCorr, None, None), ('False', 'shuffle_test', 0.0, 0.0, ParCorr, None, None), ('False', 'analytic', 0.0, 0.0, RobustParCorr, None, None), ('False', 'shuffle_test', 0.0, 0.0, RobustParCorr, None, None), ('False', 'shuffle_test', 0.0, 0.0, CMIknn, None, None)]) def pairwise_mult_ci(request): (learn_augmented_cond_sets, sig, alpha_pre, pre_step_sample_fraction, cond_ind_test, cond_ind_test_thres, cond_ind_test_thres_pre) = request.param if (sig != 'fixed_thres'): return PairwiseMultCI(learn_augmented_cond_sets=learn_augmented_cond_sets, cond_ind_test=cond_ind_test(significance=sig), alpha_pre=alpha_pre, pre_step_sample_fraction=pre_step_sample_fraction) else: return PairwiseMultCI(learn_augmented_cond_sets=learn_augmented_cond_sets, cond_ind_test=cond_ind_test(significance=sig), alpha_pre=None, pre_step_sample_fraction=pre_step_sample_fraction, significance=sig, fixed_thres_pre=2)
class Permutation(): def __init__(self, length: int): self.counter = 0 self.length = length self.permutation = np.random.permutation(length) def get_next_value(self): next_value = self.permutation[self.counter] self.counter += 1 if (self.counter == self.length): self.permutation = np.random.permutation(self.length) self.counter = 0 return next_value def get_permutation(self): return self.permutation.tolist()
class Uniform(Distribution): def __init__(self, mins=None, maxs=None, inertia=0.0, frozen=False, check_data=True): super().__init__(inertia=inertia, frozen=frozen, check_data=check_data) self.name = 'Uniform' self.mins = _check_parameter(_cast_as_parameter(mins), 'mins', ndim=1) self.maxs = _check_parameter(_cast_as_parameter(maxs), 'maxs', ndim=1) _check_shapes([self.mins, self.maxs], ['mins', 'maxs']) self._initialized = ((mins is not None) and (maxs is not None)) self.d = (self.mins.shape[(- 1)] if self._initialized else None) self._reset_cache() def _initialize(self, d): self.mins = _cast_as_parameter(torch.zeros(d, dtype=self.dtype, device=self.device)) self.maxs = _cast_as_parameter(torch.zeros(d, dtype=self.dtype, device=self.device)) self._initialized = True super()._initialize(d) def _reset_cache(self): if (self._initialized == False): return self.register_buffer('_x_mins', torch.full((self.d,), inf, device=self.device)) self.register_buffer('_x_maxs', torch.full((self.d,), (- inf), device=self.device)) self.register_buffer('_logps', (- torch.log((self.maxs - self.mins)))) def sample(self, n): return torch.distributions.Uniform(self.mins, self.maxs).sample([n]) def log_probability(self, X): X = _check_parameter(_cast_as_tensor(X), 'X', ndim=2, shape=((- 1), self.d), check_parameter=self.check_data) return torch.where(((X >= self.mins) & (X <= self.maxs)), self._logps, float('-inf')).sum(dim=1) def summarize(self, X, sample_weight=None): if (self.frozen == True): return (X, sample_weight) = super().summarize(X, sample_weight=sample_weight) self._x_mins = torch.minimum(self._x_mins, X.min(dim=0).values) self._x_maxs = torch.maximum(self._x_maxs, X.max(dim=0).values) def from_summaries(self): if (self.frozen == True): return _update_parameter(self.mins, self._x_mins, self.inertia) _update_parameter(self.maxs, self._x_maxs, self.inertia) self._reset_cache()
class PruningCriterion(): def __init__(self, modules, config): self.scores = {} self.modules = modules self.config = config def on_step_begin(self): pass def on_before_optimizer_step(self): pass def on_after_optimizer_step(self): pass
def load_queries(query_path): query = {} with open(query_path, 'r') as f: for line in tqdm(f, desc='loading query....'): (qid, text) = line.strip().split('\t') query[qid] = text return query
class PMXeon_X5570(PM): def __init__(self): super().__init__() self.powerlist = [81.4, 110, 125, 139, 153, 167, 182, 199, 214, 229, 244] def power(self): cpu = self.host.getCPU() index = math.floor((cpu / 10)) left = self.powerlist[index] right = self.powerlist[((index + 1) if ((cpu % 10) != 0) else index)] alpha = ((cpu / 10) - index) return ((alpha * right) + ((1 - alpha) * left))
def gen_nice_inds(): for i in range(26): (yield chr((ord('a') + i))) for i in range(26): (yield chr((ord('A') + i))) for i in itertools.count(192): (yield chr(i))
def __crop(img, pos, size): (ow, oh) = img.size (x1, y1) = pos tw = th = size if ((ow > tw) or (oh > th)): return img.crop((x1, y1, (x1 + tw), (y1 + th))) return img
def ksave(kspace, filepath): path = (os.path.dirname(filepath) or '.') if (not os.path.exists(path)): os.makedirs(path) img = np.abs(kspace) img /= np.max(img) img = np.log((img + 1e-05)) scipy.misc.imsave(filepath, _normalize(img).astype(np.uint8))
def de_resnet18(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet: return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def save_training_config(config_file, output_dir): json_data = read_json(config_file) save_json(os.path.join(output_dir, 'training_config.json'), json_data)
def target_days_to_cols(target_days): predicted_cols = [f'Predicted Deaths {day}-day' for day in target_days] return predicted_cols
def test_digits_two_stage(): model1 = FeatureBasedSelection(100, 'sqrt') model2 = FeatureBasedSelection(100, 'log') model = MixtureSelection(100, [model1, model2], [1.0, 0.3], optimizer='two-stage') model.fit(X_digits) assert_array_equal(model.ranking, digits_ranking) assert_array_almost_equal(model.gains, digits_gains, 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
class PegasusConverter(SpmConverter): def vocab(self, proto): vocab = [(self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.mask_token_sent, 0.0), (self.original_tokenizer.mask_token, 0.0)] vocab += [(f'<unk_{i}>', (- 100.0)) for i in range(2, self.original_tokenizer.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return (proto.trainer_spec.unk_id + self.original_tokenizer.offset) def post_processor(self): eos = self.original_tokenizer.eos_token special_tokens = [(eos, self.original_tokenizer.eos_token_id)] return processors.TemplateProcessing(single=['$A', eos], pair=['$A', '$B', eos], special_tokens=special_tokens)
def crop_images(image_list, offset, size, name=None, verbose=0): with tf.name_scope(name, 'crop_images', [image_list, size]) as name: if isinstance(image_list, list): cropped_image_list = [] size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size') for image in image_list: image = ops.convert_to_tensor(image, name='image') image = array_ops.slice(image, offset, size, name=name) cropped_image_list.append(image) else: size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size') image_list = ops.convert_to_tensor(image_list, name='image') if (not tf.contrib.framework.is_tensor(offset)): offset = ops.convert_to_tensor(offset, dtype=dtypes.int32, name='offset') if (verbose > 0): print('crop_images offset:', offset, 'size', size, 'img_list_shape', image_list.shape) offset = tf.Print(offset, [offset, size, image_list.shape]) cropped_image_list = array_ops.slice(image_list, offset, size, name=name) return cropped_image_list