code
stringlengths
101
5.91M
def se_resnext50_32x4d(pretrained: bool=False): return get_model('se_resnext50_32x4d', pretrained)
def _build_eval_and_test_data_mode_from_folder(wide_from_folder, tab_from_folder, eval_fname, test_fname): eval_wide_from_folder = TabFromFolder(fname=eval_fname, reference=wide_from_folder) eval_tab_from_folder = TabFromFolder(fname=eval_fname, reference=tab_from_folder) test_wide_from_folder = TabFromFolder(fname=test_fname, reference=wide_from_folder, ignore_target=True) test_tab_from_folder = TabFromFolder(fname=test_fname, reference=tab_from_folder, ignore_target=True) return (eval_wide_from_folder, eval_tab_from_folder, test_wide_from_folder, test_tab_from_folder)
def test_trajectory(): t0 = process_time() t = Trajectory(ts, [x0_p1, x0_p2, x0_p3]) t1 = process_time() print((t1 - t0)) assert (t.XT == VehicleState)
class DCGAN(nn.Module): def __init__(self, num_channels=3, ngf=100): super(DCGAN, self).__init__() self.generator = nn.Sequential(nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, ngf, 1, 1, 0, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=False), nn.Tanh()) def forward(self, inputs, target=None): return (self.generator(inputs), inputs) def save(self, fn): torch.save(self.generator.state_dict(), fn) def load(self, fn): self.generator.load_state_dict(torch.load(fn))
def evaluate_squad(model, dataloader, input_ids, eval_examples, extra_data, input_file): session = onnxruntime.InferenceSession(model.SerializeToString(), None, providers=onnxruntime.get_available_providers()) for output_meta in session.get_outputs(): print(output_meta) for input_meta in session.get_inputs(): print(input_meta) n = len(input_ids) bs = 1 all_results = [] start = timer() for (idx, (batch, label)) in tqdm.tqdm(enumerate(dataloader), desc='eval'): data = {'input_ids': np.array(batch[0]), 'input_mask': np.array(batch[1]), 'segment_ids': np.array(batch[2])} result = session.run(['end_logits', 'start_logits'], data) in_batch = result[0].shape[0] start_logits = [float(x) for x in result[1][0].flat] end_logits = [float(x) for x in result[0][0].flat] for i in range(0, in_batch): unique_id = len(all_results) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) output_dir = './output' os.makedirs(output_dir, exist_ok=True) output_prediction_file = os.path.join(output_dir, 'predictions_mobilebert_fp32.json') output_nbest_file = os.path.join(output_dir, 'nbest_predictions_mobilebert_fp32.json') write_predictions(eval_examples, extra_data, all_results, n_best_size, max_answer_length, True, output_prediction_file, output_nbest_file) with open(input_file) as dataset_file: dataset_json = json.load(dataset_file) expected_version = '1.1' if (dataset_json['version'] != expected_version): print(((('Evaluation expects v-' + expected_version) + ', but got dataset with v-') + dataset_json['version']), file=sys.stderr) dataset = dataset_json['data'] with open(output_prediction_file) as prediction_file: predictions = json.load(prediction_file) res = evaluate(dataset, predictions) return res['f1']
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, attention='0', att_dim=128): super(Bottleneck, self).__init__() self.dimDR = att_dim self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.relu_normal = nn.ReLU(inplace=False) if (attention in {'1', '+', 'M', '&'}): if (planes > 64): DR_stride = 1 else: DR_stride = 2 self.ch_dim = att_dim self.conv_for_DR = nn.Conv2d((planes * self.expansion), self.ch_dim, kernel_size=1, stride=DR_stride, bias=True) self.bn_for_DR = nn.BatchNorm2d(self.ch_dim) self.row_bn = nn.BatchNorm2d(self.ch_dim) self.row_conv_group = nn.Conv2d(self.ch_dim, (4 * self.ch_dim), kernel_size=(self.ch_dim, 1), groups=self.ch_dim, bias=True) self.fc_adapt_channels = nn.Conv2d((4 * self.ch_dim), (planes * self.expansion), kernel_size=1, groups=1, bias=True) self.sigmoid = nn.Sigmoid() if (attention in {'2', '+', 'M', '&'}): self.sp_d = att_dim self.sp_h = 8 self.sp_w = 8 self.sp_reso = (self.sp_h * self.sp_w) self.conv_for_DR_spatial = nn.Conv2d((planes * self.expansion), self.sp_d, kernel_size=1, stride=1, bias=True) self.bn_for_DR_spatial = nn.BatchNorm2d(self.sp_d) self.adppool = nn.AdaptiveAvgPool2d((self.sp_h, self.sp_w)) self.row_bn_for_spatial = nn.BatchNorm2d(self.sp_reso) self.row_conv_group_for_spatial = nn.Conv2d(self.sp_reso, (self.sp_reso * 4), kernel_size=(self.sp_reso, 1), groups=self.sp_reso, bias=True) self.fc_adapt_channels_for_spatial = nn.Conv2d((self.sp_reso * 4), self.sp_reso, kernel_size=1, groups=1, bias=True) self.sigmoid = nn.Sigmoid() self.adpunpool = F.adaptive_avg_pool2d if (attention is '&'): self.groups_base = 32 self.groups = int(((planes * self.expansion) / 64)) self.factor = int(math.log((self.groups_base / self.groups), 2)) self.padding_num = (self.factor + 2) self.conv_kernel_size = ((self.factor * 2) + 5) self.dilate_conv_for_concat1 = nn.Conv2d((planes * self.expansion), (planes * self.expansion), kernel_size=(self.conv_kernel_size, 1), stride=1, padding=(self.padding_num, 0), groups=self.groups, bias=True) self.dilate_conv_for_concat2 = nn.Conv2d((planes * self.expansion), (planes * self.expansion), kernel_size=(self.conv_kernel_size, 1), stride=1, padding=(self.padding_num, 0), groups=self.groups, bias=True) self.bn_for_concat = nn.BatchNorm2d((planes * self.expansion)) self.downsample = downsample self.stride = stride self.attention = attention def chan_att(self, out): out = self.relu_normal(out) out = self.conv_for_DR(out) out = self.bn_for_DR(out) out = self.relu(out) out = MPNCOV.CovpoolLayer(out) out = out.view(out.size(0), out.size(1), out.size(2), 1).contiguous() out = self.row_bn(out) out = self.row_conv_group(out) out = self.fc_adapt_channels(out) out = self.sigmoid(out) return out def pos_att(self, out): pre_att = out out = self.relu_normal(out) out = self.conv_for_DR_spatial(out) out = self.bn_for_DR_spatial(out) out = self.adppool(out) out = cov_feature(out) out = out.view(out.size(0), out.size(1), out.size(2), 1).contiguous() out = self.row_bn_for_spatial(out) out = self.row_conv_group_for_spatial(out) out = self.relu(out) out = self.fc_adapt_channels_for_spatial(out) out = self.sigmoid(out) out = out.view(out.size(0), 1, self.sp_h, self.sp_w).contiguous() out = self.adpunpool(out, (pre_att.size(2), pre_att.size(3))) return out def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) if (self.attention is '1'): pre_att = out att = self.chan_att(out) out = (pre_att * att) elif (self.attention is '2'): pre_att = out att = self.pos_att(out) out = self.relu_normal((pre_att * att)) elif (self.attention is '+'): pre_att = out chan_att = self.chan_att(out) pos_att = self.pos_att(out) out = ((pre_att * chan_att) + self.relu((pre_att.clone() * pos_att))) elif (self.attention is 'M'): pre_att = out chan_att = self.chan_att(out) pos_att = self.pos_att(out) out = torch.max((pre_att * chan_att), self.relu((pre_att.clone() * pos_att))) elif (self.attention is '&'): pre_att = out chan_att = self.chan_att(out) pos_att = self.pos_att(out) out1 = self.dilate_conv_for_concat1((pre_att * chan_att)) out2 = self.dilate_conv_for_concat2(self.relu((pre_att * pos_att))) out = (out1 + out2) out = self.bn_for_concat(out) out += residual out = self.relu(out) return out
def _create_datafile(cls, setname): modeldir = os.path.join(path['data'], class2uid[cls], 'obj_models') setfile = os.path.join(modeldir, (setname + '.list')) vxls = [] with open(setfile, 'r') as fp: for line in fp: muid = line[:(- 1)] muid = muid.split('.')[0] bvfile = os.path.join(modeldir, (muid + '.binvox')) objfile = os.path.join(modeldir, (muid + '.obj')) os.system('./binvox -d 30 -cb -e -t binvox -ri {}'.format(objfile))
class TestSelfDistillation(unittest.TestCase): model = torchvision.models.resnet50() def setUpClass(cls): build_fake_yaml() def tearDownClass(cls): os.remove('fake.yaml') shutil.rmtree('./saved', ignore_errors=True) shutil.rmtree('runs', ignore_errors=True) def test_self_distillation(self): import copy from neural_compressor.config import DistillationConfig, SelfKnowledgeDistillationLossConfig from neural_compressor.training import prepare_compression datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(100, 3, 224, 224), low=0.0, high=1.0, label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) distil_loss = SelfKnowledgeDistillationLossConfig(layer_mappings=[[['resblock.1.feature.output', 'resblock.deepst.feature.output'], ['resblock.2.feature.output', 'resblock.deepst.feature.output']], [['resblock.2.fc', 'resblock.deepst.fc'], ['resblock.3.fc', 'resblock.deepst.fc']], [['resblock.1.fc', 'resblock.deepst.fc'], ['resblock.2.fc', 'resblock.deepst.fc'], ['resblock.3.fc', 'resblock.deepst.fc']]], temperature=3.0, loss_types=['L2', 'KL', 'CE'], loss_weights=[0.5, 0.05, 0.02], add_origin_loss=True) conf = DistillationConfig(teacher_model=self.model, criterion=distil_loss) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) compression_manager = prepare_compression(copy.deepcopy(self.model), conf) model = compression_manager.model def training_func_for_nc(model): epochs = 3 iters = 10 for nepoch in range(epochs): model.train() cnt = 0 compression_manager.callbacks.on_epoch_begin(nepoch) for (image, target) in dummy_dataloader: compression_manager.callbacks.on_step_begin(cnt) print('.', end='') cnt += 1 output = model(image) loss = criterion(output, target) outputs_features = dict() outputs_features['resblock.deepst.feature.output'] = torch.randn(128, 1024) outputs_features['resblock.2.feature.output'] = torch.randn(128, 1024) outputs_features['resblock.1.feature.output'] = torch.randn(128, 1024) outputs_features['resblock.deepst.fc'] = torch.randn(128, 100) outputs_features['resblock.3.fc'] = torch.randn(128, 100) outputs_features['resblock.2.fc'] = torch.randn(128, 100) outputs_features['resblock.1.fc'] = torch.randn(128, 100) loss = compression_manager.callbacks.on_after_compute_loss(image, outputs_features, loss, teacher_output=outputs_features) optimizer.zero_grad() loss.backward() optimizer.step() compression_manager.callbacks.on_step_end() if (cnt >= iters): break compression_manager.callbacks.on_epoch_end() return model def eval_func(model): for (image, target) in dummy_dataloader: model(image) return 1 model = training_func_for_nc(model) eval_func(model)
def eval_epoch_bleu(model, validation_data, device, vocab, list_of_refs_dev, args): model.eval() total_loss = 0 n_word_total = 0 n_word_correct = 0 hypotheses = {} count = 0 with torch.no_grad(): for batch in tqdm(validation_data, mininterval=2, desc=' - (Validation) ', leave=False): (image0, image1, image0_attribute, image1_attribute) = map((lambda x: x.to(device)), batch) hyp = beam_search(image0, image1, model, args, vocab, image0_attribute, image1_attribute) hyp = hyp.split('<end>')[0].strip() hypotheses[count] = [hyp] count += 1 scorer = Bleu(4) (score, _) = scorer.compute_score(list_of_refs_dev, hypotheses) return score
def post_processing_function(examples, features, predictions, stage='eval'): predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, log_level=logging.WARNING, prefix=stage) if data_args.version_2_with_negative: formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()] else: formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()] references = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references)
def sample_data(dump_paths, para=False, doc_sample_ratio=0.2, vec_sample_ratio=0.2, seed=29, max_norm=None, max_norm_cf=1.3, num_dummy_zeros=0, norm_th=999): vecs = [] random.seed(seed) np.random.seed(seed) print('sampling from:') for dump_path in dump_paths: print(dump_path) dumps = [h5py.File(dump_path, 'r') for dump_path in dump_paths] for (i, f) in enumerate(tqdm(dumps)): doc_ids = list(f.keys()) sampled_doc_ids = random.sample(doc_ids, int((doc_sample_ratio * len(doc_ids)))) for doc_id in tqdm(sampled_doc_ids, desc=('sampling from %d' % i)): doc_group = f[doc_id] if para: groups = doc_group.values() else: groups = [doc_group] for group in groups: (num_vecs, d) = group['start'].shape if (num_vecs == 0): continue sampled_vec_idxs = np.random.choice(num_vecs, int((vec_sample_ratio * num_vecs))) cur_vecs = int8_to_float(group['start'][:], group.attrs['offset'], group.attrs['scale'])[sampled_vec_idxs] cur_vecs = cur_vecs[(np.linalg.norm(cur_vecs, axis=1) <= norm_th)] vecs.append(cur_vecs) out = np.concatenate(vecs, 0) for dump in dumps: dump.close() norms = np.linalg.norm(out, axis=1, keepdims=True) if (max_norm is None): max_norm = (max_norm_cf * np.max(norms)) consts = np.sqrt(np.maximum(0.0, ((max_norm ** 2) - (norms ** 2)))) out = np.concatenate([consts, out], axis=1) if (num_dummy_zeros > 0): out = np.concatenate([out, np.zeros([out.shape[0], num_dummy_zeros], dtype=out.dtype)], axis=1) return (out, max_norm)
class TorchModel(Model): def __init__(self) -> None: super().__init__() self.torch_model: SymbolNet = None self.sat_inputs = None def version(self) -> str: return torch.__version__ def from_gir(cls: Type['TorchModel'], ir: GraphIR, **kwargs) -> 'TorchModel': ret = cls() ret.torch_model = SymbolNet(ir, **kwargs) return ret def gir_name() -> str: return 'gir.pkl' def refine_weights(self) -> None: self.torch_model.enable_proxy_grad() searcher = PracticalHybridSearch(self.torch_model) (_, inputs) = searcher.search(max_time_ms=20, max_sample=2) if inputs: self.sat_inputs = inputs self.torch_model.disable_proxy_grad() def make_oracle(self) -> Oracle: with torch.no_grad(): self.torch_model.eval() if (self.sat_inputs is None): inputs = self.torch_model.get_random_inps() else: inputs = self.sat_inputs outputs: Tuple[torch.Tensor] = self.torch_model.forward(**inputs) input_dict = {k: v.cpu().detach().numpy() for (k, v) in inputs.items()} output_dict = {} for (oname, val) in zip(self.output_like.keys(), outputs): if val.is_conj(): output_dict[oname] = val.cpu().detach().resolve_conj().numpy() else: output_dict[oname] = val.cpu().detach().numpy() return Oracle(input_dict, output_dict, provider='torch[cpu] eager') def dump(self, path: PathLike): torch.save(self.torch_model.state_dict(), path) gir_path = path.replace((TorchModel.name_prefix() + TorchModel.name_suffix()), TorchModel.gir_name()) with open(gir_path, 'wb') as f: pickle.dump(self.torch_model.ir, f) def load(cls, path: PathLike) -> 'TorchModel': ret = cls() gir_path = path.replace((cls.name_prefix() + cls.name_suffix()), cls.gir_name()) with open(gir_path, 'rb') as f: ir = pickle.load(f) torch_model = SymbolNet(ir) torch_model.load_state_dict(torch.load(path), strict=False) ret.torch_model = torch_model return ret def name_suffix() -> str: return '.pth' def input_like(self) -> Dict[(str, AbsTensor)]: return self.torch_model.input_like def output_like(self) -> Dict[(str, AbsTensor)]: return self.torch_model.output_like def native_model(self) -> SymbolNet: return self.torch_model def operators() -> List[Type[AbsOpBase]]: return ALL_TORCH_OPS def add_seed_setter() -> None: register_seed_setter('torch', torch.manual_seed, overwrite=True)
class CVAE(): def __init__(self, vocab_size, args): self.vocab_size = vocab_size self.batch_size = args.batch_size self.lr = tf.Variable(args.lr, trainable=False) self.unit_size = args.unit_size self.n_rnn_layer = args.n_rnn_layer self._create_network() def _create_network(self): self.X = tf.placeholder(tf.int32, [self.batch_size, None]) self.Y = tf.placeholder(tf.int32, [self.batch_size, None]) self.L = tf.placeholder(tf.int32, [self.batch_size]) encoded_rnn_size = [self.unit_size for i in range(self.n_rnn_layer)] with tf.variable_scope('rnn'): encode_cell = [] for i in encoded_rnn_size[:]: encode_cell.append(tf.nn.rnn_cell.LSTMCell(i)) self.encoder = tf.nn.rnn_cell.MultiRNNCell(encode_cell) self.weights = {} self.biases = {} self.weights['softmax'] = tf.get_variable('softmaxw', initializer=tf.random_uniform(shape=[encoded_rnn_size[(- 1)], self.vocab_size], minval=(- 0.1), maxval=0.1)) self.biases['softmax'] = tf.get_variable('softmaxb', initializer=tf.zeros(shape=[self.vocab_size])) self.embedding_encode = tf.get_variable(name='encode_embedding', shape=[self.unit_size, self.vocab_size], initializer=tf.random_uniform_initializer(minval=(- 0.1), maxval=0.1)) (self.decoded, decoded_logits) = self.rnn() weights = tf.sequence_mask(self.L, tf.shape(self.X)[1]) weights = tf.cast(weights, tf.int32) weights = tf.cast(weights, tf.float32) self.reconstr_loss = tf.reduce_mean(tf.contrib.seq2seq.sequence_loss(logits=decoded_logits, targets=self.Y, weights=weights)) self.loss = self.reconstr_loss optimizer = tf.train.AdamOptimizer(self.lr) self.opt = optimizer.minimize(self.loss) self.mol_pred = tf.argmax(self.decoded, axis=2) self.sess = tf.Session() init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init) self.saver = tf.train.Saver(max_to_keep=None) print('Network Ready') def rnn(self): seq_length = tf.shape(self.X)[1] X = tf.nn.embedding_lookup(self.embedding_encode, self.X) self.initial_rnn_state = tuple([tf.contrib.rnn.LSTMStateTuple(tf.zeros((self.batch_size, self.unit_size)), tf.zeros((self.batch_size, self.unit_size))) for i in range(3)]) (Y, self.output_rnn_state) = tf.nn.dynamic_rnn(self.encoder, X, dtype=tf.float32, scope='rnn', sequence_length=self.L, initial_state=self.initial_rnn_state) Y = tf.reshape(Y, [(self.batch_size * seq_length), (- 1)]) Y = (tf.matmul(Y, self.weights['softmax']) + self.biases['softmax']) Y_logits = tf.reshape(Y, [self.batch_size, seq_length, (- 1)]) Y = tf.nn.softmax(Y_logits) return (Y, Y_logits) def save(self, ckpt_path, global_step): self.saver.save(self.sess, ckpt_path, global_step=global_step) def assign_lr(self, learning_rate): self.sess.run(tf.assign(self.lr, learning_rate)) def restore(self, ckpt_path): self.saver.restore(self.sess, ckpt_path) def train(self, x, y, l): (_, loss) = self.sess.run([self.opt, self.loss], feed_dict={self.X: x, self.Y: y, self.L: l}) return loss def test(self, x, y, l): (mol_pred, loss) = self.sess.run([self.mol_pred, self.loss], feed_dict={self.X: x, self.Y: y, self.L: l}) return loss def sample(self, start_codon, seq_length): l = np.ones(self.batch_size).astype(np.int32) x = start_codon preds = [] for i in range(seq_length): if (i == 0): (x, state) = self.sess.run([self.decoded, self.output_rnn_state], feed_dict={self.X: x, self.L: l}) else: (x, state) = self.sess.run([self.decoded, self.output_rnn_state], feed_dict={self.X: x, self.L: l, self.initial_rnn_state: state}) sampled_x = [] for j in range(len(x)): prob = x[(j, 0)].tolist() norm0 = sum(prob) prob = [(i / norm0) for i in prob] index = np.random.choice(range(np.shape(x)[(- 1)]), 1, p=prob) sampled_x.append(index) x = np.array(sampled_x) preds.append(x) return np.concatenate(preds, 1).astype(int).squeeze()
class EllipSegNet(torch.nn.Module): def __init__(self, init_f, num_outputs): super(EllipSegNet, self).__init__() self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) self.upsample = torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.inc = DoubleConv(1, init_f) self.down1 = DoubleConv(init_f, (2 * init_f)) self.down2 = DoubleConv((2 * init_f), (4 * init_f)) self.down3 = DoubleConv((4 * init_f), (4 * init_f)) self.up1 = DoubleConv(((2 * 4) * init_f), (2 * init_f), (4 * init_f)) self.up2 = DoubleConv(((2 * 2) * init_f), init_f, (2 * init_f)) self.up3 = DoubleConv((2 * init_f), init_f) self.outc = torch.nn.Conv2d(init_f, num_outputs, kernel_size=1) def forward(self, x): x1 = self.inc(x) x2 = self.down1(self.pool(x1)) x3 = self.down2(self.pool(x2)) x4 = self.down3(self.pool(x3)) x = torch.cat([self.upsample(x4), x3], 1) x = self.up1(x) x = torch.cat([self.upsample(x), x2], 1) x = self.up2(x) x = torch.cat([self.upsample(x), x1], 1) x = self.up3(x) x = self.outc(x) return x
_torch class CLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained('hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert') batch_size = 13 pixel_values = floats_tensor([batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size]) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return (model, inputs) def get_vision_text_model(self, vision_config, text_config): vision_model = CLIPVisionModel(vision_config).eval() text_model = BertModel(text_config).eval() return (vision_model, text_model) def prepare_config_and_inputs(self): clip_model_tester = CLIPVisionModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() (vision_config, pixel_values) = vision_config_and_inputs (text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = text_config_and_inputs return {'text_config': text_config, 'vision_config': vision_config, 'pixel_values': pixel_values, 'attention_mask': input_mask, 'input_ids': input_ids, 'text_token_type_ids': token_type_ids, 'text_sequence_labels': sequence_labels, 'text_token_labels': token_labels, 'text_choice_labels': choice_labels}
def change_data_type(df): df['x_scaled'] = df['x_scaled'].apply((lambda x: np.array(x, dtype=np.float32))) df['target'] = df['target'].apply((lambda x: int(x))) return df
class EmptyStringException(Exception): def __init__(self, message): self.message = message def __str__(self): return self.message
def augmentor_rwr(input_file, output_file, restart_prob=0.2, gamma=2): with open(input_file, 'r') as f, open(output_file, 'w') as save_f: for line in f: g = nx.Graph() user_dict = dict() paths = line.strip().split('\t') paths = paths[:(- 1)] observation_path = [paths[1]] cascade_id = paths[0] root_path = paths[1] root_user = root_path.split(':')[0] if (root_user == '-1'): continue user_dict[root_user] = root_path num_ori_nodes = 0 for path in paths[2:(FLAGS.max_seq + 2)]: nodes = path.split(':')[0].split(',') user_dict[nodes[(- 1)]] = path user_dict[nodes[(- 2)]] = path g.add_edge(nodes[(- 1)], nodes[(- 2)]) num_ori_nodes += 1 num_steps = int((gamma * g.number_of_nodes())) cur_node = root_user sampled_node_set = {cur_node} for step in range(num_steps): if (len(sampled_node_set) == g.number_of_nodes()): break if (random.random() > restart_prob): cur_node = random.choices(list(g[cur_node]), weights=[degree for (node, degree) in nx.degree(g, g[cur_node])])[0] else: cur_node = root_user sampled_node_set.add(cur_node) for node in sampled_node_set: observation_path.append(user_dict[node]) observation_path.sort(key=(lambda tup: int(tup.split(':')[1]))) save_file(cascade_id, observation_path, save_f) num_cur_nodes = len(observation_path)
def partition_data(datadir, partition, n_nets, alpha, logger): logger.info('partition data') (X_train, y_train, X_test, y_test) = load_cifar10_data(datadir) n_train = X_train.shape[0] if (partition == 'n_cls'): n_client = n_nets n_cls = 10 n_data_per_clnt = (len(y_train) / n_client) clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client) clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int) cls_priors = np.zeros(shape=(n_client, n_cls)) for i in range(n_client): cls_priors[i][random.sample(range(n_cls), int(alpha))] = (1.0 / alpha) prior_cumsum = np.cumsum(cls_priors, axis=1) idx_list = [np.where((y_train == i))[0] for i in range(n_cls)] cls_amount = [len(idx_list[i]) for i in range(n_cls)] net_dataidx_map = {} for j in range(n_client): net_dataidx_map[j] = [] while (np.sum(clnt_data_list) != 0): curr_clnt = np.random.randint(n_client) if (clnt_data_list[curr_clnt] <= 0): continue clnt_data_list[curr_clnt] -= 1 curr_prior = prior_cumsum[curr_clnt] while True: cls_label = np.argmax((np.random.uniform() <= curr_prior)) if (cls_amount[cls_label] <= 0): cls_amount[cls_label] = np.random.randint(0, len(idx_list[cls_label])) continue cls_amount[cls_label] -= 1 net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]]) break elif (partition == 'dir'): n_client = n_nets n_cls = 10 n_data_per_clnt = (len(y_train) / n_client) clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client) clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int) cls_priors = np.random.dirichlet(alpha=([alpha] * n_cls), size=n_client) prior_cumsum = np.cumsum(cls_priors, axis=1) idx_list = [np.where((y_train == i))[0] for i in range(n_cls)] cls_amount = [len(idx_list[i]) for i in range(n_cls)] net_dataidx_map = {} for j in range(n_client): net_dataidx_map[j] = [] while (np.sum(clnt_data_list) != 0): curr_clnt = np.random.randint(n_client) if (clnt_data_list[curr_clnt] <= 0): continue clnt_data_list[curr_clnt] -= 1 curr_prior = prior_cumsum[curr_clnt] while True: cls_label = np.argmax((np.random.uniform() <= curr_prior)) if (cls_amount[cls_label] <= 0): continue cls_amount[cls_label] -= 1 net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]]) break elif (partition == 'my_part'): n_shards = int(alpha) n_client = n_nets n_cls = 10 n_data_per_clnt = (len(y_train) / n_client) clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client) clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int) cls_priors = np.zeros(shape=(n_client, n_cls)) cls_priors_tmp = np.random.dirichlet(alpha=([0.3] * n_cls), size=int(n_shards)) for i in range(n_client): cls_priors[i] = cls_priors_tmp[int((i / int((n_client / n_shards))))] prior_cumsum = np.cumsum(cls_priors, axis=1) idx_list = [np.where((y_train == i))[0] for i in range(n_cls)] cls_amount = [len(idx_list[i]) for i in range(n_cls)] net_dataidx_map = {} for j in range(n_client): net_dataidx_map[j] = [] while (np.sum(clnt_data_list) != 0): curr_clnt = np.random.randint(n_client) if (clnt_data_list[curr_clnt] <= 0): continue clnt_data_list[curr_clnt] -= 1 curr_prior = prior_cumsum[curr_clnt] while True: cls_label = np.argmax((np.random.uniform() <= curr_prior)) if (cls_amount[cls_label] <= 0): cls_amount[cls_label] = len(idx_list[cls_label]) continue cls_amount[cls_label] -= 1 net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]]) break traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map) return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts)
def convert_longformer_qa_checkpoint_to_pytorch(longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str): longformer = LongformerModel.from_pretrained(longformer_model) lightning_model = LightningModel(longformer) ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device('cpu')) lightning_model.load_state_dict(ckpt['state_dict']) longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model) longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() longformer_for_qa.save_pretrained(pytorch_dump_folder_path) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}')
def get_config(): parser = argparse.ArgumentParser() parser.add_argument('--project-dir', type=str, default='output') parser.add_argument('--dataset-dir', type=str, default='output') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--data-seed', type=int, default=0) parser.add_argument('--train-seed', type=int, default=0) parser.add_argument('--config-override', type=str, default='') args = parser.parse_args() with open('config.json', 'r') as read_file: config = json.load(read_file) args_dict = vars(args) config.update(args_dict) if (config['config_override'] == ''): del config['config_override'] else: print(config['config_override']) config_override = json.loads(config['config_override']) del config['config_override'] config.update(config_override) return config
class Joiner(nn.Sequential): def __init__(self, backbone, position_embedding): super().__init__(backbone, position_embedding) def forward(self, tensor_list: NestedTensor, Raw_point: NestedTensor): (xs, point_fea, img_fea) = self[0](tensor_list, Emb_x=Raw_point) out: List[NestedTensor] = [] pos = [] for (name, x) in xs.items(): out.append(x) pos.append(self[1](x).to(x.tensors.dtype)) return (out, pos, point_fea, img_fea) def train_forward(self, tensor_list: NestedTensor, Raw_point: NestedTensor, ratio): (xs, point_fea, img_fea) = self[0].train_forward(tensor_list, Emb_x=Raw_point, ratio=ratio) out: List[NestedTensor] = [] pos = [] for (name, x) in xs.items(): out.append(x) pos.append(self[1](x).to(x.tensors.dtype)) return (out, pos, point_fea, img_fea)
def get_edge_labels(): labels = {} for ang in range(8): k = str(ang) labels[k] = len(labels) return labels
class AlignConfig(PretrainedConfig): model_type = 'align' is_composition = True def __init__(self, text_config=None, vision_config=None, projection_dim=640, temperature_init_value=1.0, initializer_range=0.02, **kwargs): super().__init__(**kwargs) if (text_config is None): text_config = {} logger.info('text_config is None. Initializing the AlignTextConfig with default values.') if (vision_config is None): vision_config = {} logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.') self.text_config = AlignTextConfig(**text_config) self.vision_config = AlignVisionConfig(**vision_config) self.projection_dim = projection_dim self.temperature_init_value = temperature_init_value self.initializer_range = initializer_range def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) def to_dict(self): output = copy.deepcopy(self.__dict__) output['text_config'] = self.text_config.to_dict() output['vision_config'] = self.vision_config.to_dict() output['model_type'] = self.__class__.model_type return output
class QuantizedLinear(Linear): def forward(self, x): return super().forward(self.input_quant(x)).dequantize()
def test_nbr(g1): assert (g1.nbr_v(0) == [1, 2]) assert (g1.nbr_v(1) == [0]) assert (g1.nbr_v(2) == [0]) assert (g1.nbr_v(3) == []) g1.add_edges((3, 0)) assert (g1.nbr_v(0) == [1, 2, 3]) g1.remove_edges((0, 2)) assert (g1.nbr_v(2) == []) g3 = Graph(5, [(0, 1), (0, 3), (1, 4), (2, 3)]) assert (sorted(g3.nbr_v(3, 1)) == [0, 2]) assert (sorted(g3.nbr_v(3, 2)) == [1, 3]) assert (sorted(g3.nbr_v(3, 3)) == [0, 2, 4])
def get_poses(nusc: NuScenes, scene_token: str) -> List[dict]: pose_list = [] scene_rec = nusc.get('scene', scene_token) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) ego_pose = nusc.get('ego_pose', sd_rec['token']) pose_list.append(ego_pose) while (sd_rec['next'] != ''): sd_rec = nusc.get('sample_data', sd_rec['next']) ego_pose = nusc.get('ego_pose', sd_rec['token']) pose_list.append(ego_pose) return pose_list
class ADE20KDataset(Pix2pixDataset): def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode='resize_and_crop') if is_train: parser.set_defaults(load_size=286) else: parser.set_defaults(load_size=256) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=150) parser.set_defaults(contain_dontcare_label=True) parser.set_defaults(cache_filelist_read=False) parser.set_defaults(cache_filelist_write=False) parser.set_defaults(no_instance=True) return parser def get_paths(self, opt): root = opt.dataroot phase = ('val' if (opt.phase == 'test') else 'train') all_images = make_dataset(root, recursive=True, read_cache=False, write_cache=False) image_paths = [] label_paths = [] for p in all_images: if (('_%s_' % phase) not in p): continue if p.endswith('.jpg'): image_paths.append(p) elif p.endswith('.png'): label_paths.append(p) instance_paths = [] return (label_paths, image_paths, instance_paths) def postprocess(self, input_dict): label = input_dict['label'] label = (label - 1) label[(label == (- 1))] = self.opt.label_nc
class memoized(object): def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args, **kwargs): kwlist = tuple(sorted(list(kwargs), key=operator.itemgetter(0))) if ((not isinstance(args, collections.Hashable)) or (not isinstance(kwlist, collections.Hashable))): logger.warn('Arguments to memoized call is unhashable!') return self.func(*args, **kwargs) key = (args, kwlist) if (key in self.cache): return self.cache[key] else: value = self.func(*args, **kwargs) self.cache[key] = value return value def __repr__(self): return self.func.__doc__ def __get__(self, obj, objtype): return functools.partial(self.__call__, obj)
def assert_group(tensor, group_name, same=True): tensor_list = [torch.empty_like(tensor) for _ in range(parallel_group_size(group_name))] tensor_list[parallel_rank(group_name)] = tensor dist.all_gather(tensor_list, tensor, group=parallel_group(group_name)) for tensor in tensor_list[1:]: all_same = torch.eq(tensor, tensor_list[0]).all() assert (all_same if same else (not all_same)), f'''Tensor {('same' if same else 'differet')} check failed: {tensor} {tensor_list[0]}'''
class TFXLNetLMHeadModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class SqueezeExcite(nn.Module): def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1): super(SqueezeExcite, self).__init__() reduced_chs = make_divisible(((reduced_base_chs or in_chs) * se_ratio), divisor) self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) self.act1 = act_layer(inplace=True) self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) self.gate_fn = gate_fn def forward(self, x): x_se = x.mean((2, 3), keepdim=True) x_se = self.conv_reduce(x_se) x_se = self.act1(x_se) x_se = self.conv_expand(x_se) x = (x * self.gate_fn(x_se)) return x
def args_parse(): parser = argparse.ArgumentParser(description='Atari: DDQN') parser.add_argument('--env', default='BreakoutNoFrameskip-v4', help='Should be NoFrameskip environment') parser.add_argument('--train', action='store_true', help='Train agent with given environment') parser.add_argument('--play', help='Play with a given weight directory') parser.add_argument('--log_interval', default=100, help='Interval of logging stdout', type=int) parser.add_argument('--save_weight_interval', default=1000, help='Interval of saving weights', type=int) args = parser.parse_args() return args
class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ['vision'] def __init__(self, *args, **kwargs): requires_backends(self, ['vision'])
class Lwf(ContinualLearner): def __init__(self, model, opt, params): super(Lwf, self).__init__(model, opt, params) def train_learner(self, x_train, y_train): self.before_train(x_train, y_train) train_dataset = dataset_transform(x_train, y_train, transform=transforms_match[self.data]) train_loader = data.DataLoader(train_dataset, batch_size=self.batch, shuffle=True, num_workers=0, drop_last=True) self.model = self.model.train() losses_batch = AverageMeter() acc_batch = AverageMeter() for ep in range(self.epoch): for (i, batch_data) in enumerate(train_loader): (batch_x, batch_y) = batch_data batch_x = maybe_cuda(batch_x, self.cuda) batch_y = maybe_cuda(batch_y, self.cuda) logits = self.forward(batch_x) loss_old = self.kd_manager.get_kd_loss(logits, batch_x) loss_new = self.criterion(logits, batch_y) loss = (((1 / (self.task_seen + 1)) * loss_new) + ((1 - (1 / (self.task_seen + 1))) * loss_old)) (_, pred_label) = torch.max(logits, 1) correct_cnt = ((pred_label == batch_y).sum().item() / batch_y.size(0)) acc_batch.update(correct_cnt, batch_y.size(0)) losses_batch.update(loss, batch_y.size(0)) self.opt.zero_grad() loss.backward() self.opt.step() if (((i % 100) == 1) and self.verbose): print('==>>> it: {}, avg. loss: {:.6f}, running train acc: {:.3f}'.format(i, losses_batch.avg(), acc_batch.avg())) self.after_train()
def handle(signum, frame): proc_pool.terminate() print_test_suite_result() print_results() exit(1)
class MLP(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0.0, batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) self.nhid = (0 if ('nhid' not in params) else params['nhid']) self.optim = ('adam' if ('optim' not in params) else params['optim']) self.tenacity = (5 if ('tenacity' not in params) else params['tenacity']) self.epoch_size = (4 if ('epoch_size' not in params) else params['epoch_size']) self.max_epoch = (200 if ('max_epoch' not in params) else params['max_epoch']) self.dropout = (0.0 if ('dropout' not in params) else params['dropout']) self.batch_size = (64 if ('batch_size' not in params) else params['batch_size']) if (params['nhid'] == 0): self.model = nn.Sequential(nn.Linear(self.inputdim, 1)).cuda() else: self.model = nn.Sequential(nn.Linear(self.inputdim, params['nhid']), nn.Dropout(p=self.dropout), nn.Sigmoid(), nn.Linear(params['nhid'], 1)).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False (optim_fn, optim_params) = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
def load_image(img_path, image_size): image = cv2.imread(img_path) image = cv2.resize(image, (image_size, image_size)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = image.astype(np.float32) image = (((image / 255) * 2) - 1) image = np.transpose(image, (2, 0, 1)) return image
class OptimizerHook(Hook): def __init__(self, grad_clip=None): self.grad_clip = grad_clip def clip_grads(self, params): clip_grad.clip_grad_norm_(filter((lambda p: p.requires_grad), params), **self.grad_clip) def after_train_iter(self, runner): runner.optimizer.zero_grad() runner.outputs['loss'].backward() if (self.grad_clip is not None): self.clip_grads(runner.model.parameters()) runner.optimizer.step()
class ModelArguments(): model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) resize_position_embeddings: Optional[bool] = field(default=None, metadata={'help': "Whether to automatically resize the position embeddings if `max_source_length` exceeds the model's position embeddings."})
def pooler(inputs, pool_type, axis=1, **kwargs): if (pool_type == 'mean'): return mean_pool(inputs, kwargs['sequence_length'], axis) elif (pool_type == 'max'): return max_pool(inputs, axis) elif (pool_type == 'sum'): return sum_pool(inputs, axis)
def module_checkpoint_iter(prefix, iteration_list='10000,20000'): def _callback(epoch_no, iter_no, sym=None, arg=None, aux=None): import numpy as np iters_list = np.array([int(i) for i in iteration_list.split(',')]) if (sum(((iter_no + 1) == iters_list)) == 1): mx.model.save_checkpoint((prefix + ('_%d' % (iter_no + 1))), (epoch_no + 1), sym, arg, aux) return _callback
def build_yaml(): fake_yaml = '\n device: gpu\n model:\n name: test\n framework: onnxrt_qlinearops\n\n mixed_precision:\n precisions: fp16\n\n evaluation:\n accuracy:\n metric:\n MSE:\n compare_label: False\n dataloader:\n dataset:\n dummy:\n shape: [[1,1,5,5], [1,1,5,1]]\n label: True\n ' with open('test.yaml', 'w', encoding='utf-8') as f: f.write(fake_yaml)
def generate_2D_generalized_gaussian(rows, columns, alpha=2): m = rows n = columns r = ((0.5 * np.random.random((m * n))) + 0.5) beta = np.sqrt((special.gamma((3.0 / alpha)) / special.gamma((1.0 / alpha)))) y = (r / beta) ymin = (1e-20 * np.ones((m * n))) ymax = (1000 * np.ones((m * n))) for iter in range(0, 33): cdf = (0.5 + (0.5 * special.gammainc((1.0 / alpha), ((beta * y) ** alpha)))) indplus = np.nonzero((cdf > r)) if (len(indplus) > 0): ymax[indplus] = y[indplus] indminus = np.nonzero((cdf < r)) if (len(indminus) > 0): ymin[indminus] = y[indminus] y = (0.5 * (ymax + ymin)) ind = np.nonzero((np.random.random((m * n)) > 0.5)) if (len(ind) > 0): y[ind] = (- y[ind]) x = y.reshape([n, m]).T.copy() return x
def smoke_test_explanations(global_exp, local_exp, port): from interpret import preserve, show, shutdown_show_server, set_show_addr set_show_addr(('127.0.0.1', port)) preserve(global_exp) preserve(local_exp) show(global_exp) show(local_exp) for selector_key in global_exp.selector[global_exp.selector.columns[0]]: preserve(global_exp, selector_key) shutdown_show_server()
def quaddobl_decomposition(deg): from phcpy.phcpy2c3 import py2c_factor_number_of_quaddobl_components from phcpy.phcpy2c3 import py2c_factor_witness_points_of_quaddobl_component from phcpy.phcpy2c3 import py2c_factor_quaddobl_trace_sum_difference as qtf nbcmp = py2c_factor_number_of_quaddobl_components() result = [] for i in range(1, (nbcmp + 1)): compnt = py2c_factor_witness_points_of_quaddobl_component(deg, i) tsd = qtf(deg, i, len(compnt), compnt) result.append((eval(compnt), tsd)) return result
def encode_string(text): return text.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
class SequenceFeatureExtractor(FeatureExtractionMixin): def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs): self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.padding_side = kwargs.pop('padding_side', 'right') self.return_attention_mask = kwargs.pop('return_attention_mask', True) super().__init__(**kwargs) def pad(self, processed_features: Union[(BatchFeature, List[BatchFeature], Dict[(str, BatchFeature)], Dict[(str, List[BatchFeature])], List[Dict[(str, BatchFeature)]])], padding: Union[(bool, str, PaddingStrategy)]=True, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None) -> BatchFeature: if (isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature))): processed_features = {key: [example[key] for example in processed_features] for key in processed_features[0].keys()} if (self.model_input_names[0] not in processed_features): raise ValueError(f'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature` to this method that includes {self.model_input_names[0]}, but you provided {list(processed_features.keys())}') required_input = processed_features[self.model_input_names[0]] return_attention_mask = (return_attention_mask if (return_attention_mask is not None) else self.return_attention_mask) if (not required_input): if return_attention_mask: processed_features['attention_mask'] = [] return processed_features first_element = required_input[0] if isinstance(first_element, (list, tuple)): index = 0 while (len(required_input[index]) == 0): index += 1 if (index < len(required_input)): first_element = required_input[index][0] if (return_tensors is None): if (is_tf_available() and _is_tensorflow(first_element)): return_tensors = 'tf' elif (is_torch_available() and _is_torch(first_element)): return_tensors = 'pt' elif isinstance(first_element, (int, float, list, tuple, np.ndarray)): return_tensors = 'np' else: raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, pytorch or tensorflow object.') for (key, value) in processed_features.items(): if isinstance(value[0], (int, float)): processed_features[key] = to_numpy(value) else: processed_features[key] = [to_numpy(v) for v in value] padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length) required_input = processed_features[self.model_input_names[0]] batch_size = len(required_input) if (not all(((len(v) == batch_size) for v in processed_features.values()))): raise ValueError('Some items in the output dictionary have a different batch size than others.') truncated_inputs = [] for i in range(batch_size): inputs = dict(((k, v[i]) for (k, v) in processed_features.items())) inputs_slice = self._truncate(inputs, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, truncation=truncation) truncated_inputs.append(inputs_slice) if (padding_strategy == PaddingStrategy.LONGEST): max_length = max((len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): outputs = self._pad(truncated_inputs[i], max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) for (key, value) in outputs.items(): if (key not in batch_outputs): batch_outputs[key] = [] if (value.dtype is np.dtype(np.float64)): value = value.astype(np.float32) batch_outputs[key].append(value) return BatchFeature(batch_outputs, tensor_type=return_tensors) def _pad(self, processed_features: Union[(Dict[(str, np.ndarray)], BatchFeature)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict: required_input = processed_features[self.model_input_names[0]] if (padding_strategy == PaddingStrategy.LONGEST): max_length = len(required_input) if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)): max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of) needs_to_be_padded = ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (len(required_input) < max_length)) if (return_attention_mask and ('attention_mask' not in processed_features)): processed_features['attention_mask'] = np.ones(len(required_input), dtype=np.int32) if needs_to_be_padded: difference = (max_length - len(required_input)) if (self.padding_side == 'right'): if return_attention_mask: processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (0, difference)) padding_shape = (((0, difference), (0, 0)) if (self.feature_size > 1) else (0, difference)) processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value) elif (self.padding_side == 'left'): if return_attention_mask: processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (difference, 0)) padding_shape = (((difference, 0), (0, 0)) if (self.feature_size > 1) else (difference, 0)) processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value) else: raise ValueError(('Invalid padding strategy:' + str(self.padding_side))) return processed_features def _truncate(self, processed_features: Union[(Dict[(str, np.ndarray)], BatchFeature)], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None): if (not truncation): return processed_features elif (truncation and (max_length is None)): raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.') required_input = processed_features[self.model_input_names[0]] if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)): max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of) needs_to_be_truncated = (len(required_input) > max_length) if needs_to_be_truncated: processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length] if ('attention_mask' in processed_features): processed_features['attention_mask'] = processed_features['attention_mask'][:max_length] return processed_features def _get_padding_strategies(self, padding=False, max_length=None): if (padding is not False): if (padding is True): padding_strategy = PaddingStrategy.LONGEST elif (not isinstance(padding, PaddingStrategy)): padding_strategy = PaddingStrategy(padding) elif isinstance(padding, PaddingStrategy): padding_strategy = padding else: padding_strategy = PaddingStrategy.DO_NOT_PAD if (max_length is None): if (padding_strategy == PaddingStrategy.MAX_LENGTH): raise ValueError(f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined') if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (self.padding_value is None)): raise ValueError('Asking to pad but the feature_extractor does not have a padding value. Please select a value to use as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.') return padding_strategy
class AdaBound(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), final_lr=0.1, gamma=0.001, eps=1e-08, weight_decay=0, amsbound=False): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if (not (0.0 <= final_lr)): raise ValueError('Invalid final learning rate: {}'.format(final_lr)) if (not (0.0 <= gamma < 1.0)): raise ValueError('Invalid gamma parameter: {}'.format(gamma)) defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps, weight_decay=weight_decay, amsbound=amsbound) super(AdaBound, self).__init__(params, defaults) self.base_lrs = list(map((lambda group: group['lr']), self.param_groups)) def __setstate__(self, state): super(AdaBound, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsbound', False) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for (group, base_lr) in zip(self.param_groups, self.base_lrs): for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsbound = group['amsbound'] state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) if amsbound: state['max_exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsbound: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 if (group['weight_decay'] != 0): grad = grad.add(group['weight_decay'], p.data) exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) if amsbound: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1) final_lr = ((group['final_lr'] * group['lr']) / base_lr) lower_bound = (final_lr * (1 - (1 / ((group['gamma'] * state['step']) + 1)))) upper_bound = (final_lr * (1 + (1 / (group['gamma'] * state['step'])))) step_size = torch.full_like(denom, step_size) step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg) p.data.add_((- step_size)) return loss
def build_fake_yaml(): fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n performance_only: True\n workspace:\n path: saved\n ' y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) with open('fake_yaml.yaml', 'w', encoding='utf-8') as f: yaml.dump(y, f) f.close()
def feedforward_model(input_shapes, output_size, hidden_layer_sizes, activation='relu', output_activation='linear', preprocessors=None, name='feedforward_model', *args, **kwargs): inputs = [tf.keras.layers.Input(shape=input_shape) for input_shape in input_shapes] if (preprocessors is None): preprocessors = ((None,) * len(inputs)) preprocessed_inputs = [(preprocessor(input_) if (preprocessor is not None) else input_) for (preprocessor, input_) in zip(preprocessors, inputs)] concatenated = tf.keras.layers.Lambda((lambda x: tf.concat(x, axis=(- 1))))(preprocessed_inputs) out = concatenated for units in hidden_layer_sizes: out = tf.keras.layers.Dense(units, *args, activation=activation, **kwargs)(out) out = tf.keras.layers.Dense(output_size, *args, activation=output_activation, **kwargs)(out) model = PicklableKerasModel(inputs, out, name=name) return model
def test_global_var(): run_cell('x = 0') run_cell('y = x + 1') run_cell('def f(): global x; x = 42') run_cell('logging.info(y)') assert_not_detected() run_cell('f()') run_cell('logging.info(y)') assert_detected()
def right(continuous_pulse: Callable) -> Callable: return sampler(strategies.right_sample)(continuous_pulse)
def load_mod(model_file): model = tf.keras.models.load_model(model_file) print('Load from {}'.format(model_file)) return model
def zeros_like(*args, torch_device=None, **kwargs): if (torch_device is None): torch_device = device return torch.zeros_like(*args, **kwargs, device=torch_device)
def batch_render(buffers_path, target_path, args): subdirs = ['render', 'albedo', 'normal', 'target'] subdirs_paths = [os.path.join(args.output_dir, s) for s in subdirs] if (not os.path.isdir(args.output_dir)): os.mkdir(args.output_dir) [os.mkdir(s) for s in subdirs_paths] buffers_ext = ('.exr' if args.hdr_buffers else '.png') target_ext = ('.exr' if args.hdr_targets else '.png') for i in range(args.nb_renders): img_id = '{0:04d}'.format((i + 1)) seeds = np.random.randint(0, 1000000.0, size=2) render_source_cmd = f'{args.tungsten} -s {seeds[0]} -d {args.output_dir} {buffers_path}' target_fname = os.path.join(subdirs_paths[3], f'{img_id}_target{target_ext}') render_target_cmd = f'{args.tungsten} -s {seeds[1]} -d {args.output_dir} {target_path}' sp.call(render_source_cmd.split()) sp.call(render_target_cmd.split()) mv_imgs = [] for (name, subdir_path) in zip(subdirs, subdirs_paths): file_ext = (target_ext if (name == 'target') else buffers_ext) filename = os.path.join(args.output_dir, (name + file_ext)) dest = os.path.join(subdir_path, f'{img_id}_{name}{file_ext}') mv_imgs.append(f'mv {filename} {dest}') for mv in mv_imgs: sp.call(mv.split()) scene_root = os.path.dirname(buffers_path) if (args.hdr_buffers or args.hdr_targets): if args.resolution: print('Warning: Could not resize reference image, do it manually') mv_ref_hdr = f'cp {scene_root}/TungstenRender.exr {args.output_dir}/reference.exr' sp.call(mv_ref_hdr.split()) else: if args.resolution: ref_ldr = Image.open(f'{scene_root}/TungstenRender.png') ref_ldr = ref_ldr.resize(tuple(args.resolution), Image.BILINEAR) ref_ldr.save(f'{scene_root}/TungstenRender.png') mv_ref_ldr = f'cp {scene_root}/TungstenRender.png {args.output_dir}/reference.png' sp.call(mv_ref_ldr.split())
def calculate(O21, O22, l_buff, p_buff, duration): if (len(l_buff) and len(p_buff)): if (p_buff[0] == 0): initial_buffering_length = l_buff[0] else: initial_buffering_length = 0 else: initial_buffering_length = 0 rebuf_stats = get_rebuf_stats(l_buff, p_buff, duration) rebuf_stats[1] = (((1.0 * initial_buffering_length) / 3.0) + rebuf_stats[1]) rebuf_stats[3] = ((((1.0 * initial_buffering_length) / duration) / 3.0) + rebuf_stats[3]) O21_rounded = np.around(O21, decimals=3) O22_rounded = np.around(O22, decimals=3) sec_moses_feature_video = scale_moses(O22_rounded, 3) sec_moses_feature_audio = scale_moses(O21_rounded, 2) sec_mos_stat = np.percentile(O22_rounded, [1, 5, 10]).tolist() tree_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'trees')) rf_score = execute_trees(np.array(((((rebuf_stats + sec_moses_feature_video) + sec_mos_stat) + sec_moses_feature_audio) + [duration])).astype('float64'), path=tree_path) return rf_score
def dnbins(nbins, dlogq): if (dlogq < 0): return 1 n = int(np.floor((dlogq * nbins))) return (n if (n > 0) else 1)
def binary_focal_loss(gt, pr, gamma=2.0, alpha=0.25, **kwargs): backend = kwargs['backend'] pr = backend.clip(pr, backend.epsilon(), (1.0 - backend.epsilon())) loss_1 = ((- gt) * ((alpha * backend.pow((1 - pr), gamma)) * backend.log(pr))) loss_0 = ((- (1 - gt)) * (((1 - alpha) * backend.pow(pr, gamma)) * backend.log((1 - pr)))) loss = backend.mean((loss_0 + loss_1)) return loss
class OrthogonalFusion(layers.Layer): def __init__(self, **kwargs): super().__init__(name='OrthogonalFusion', **kwargs) def call(self, inputs): (local_feat, global_feat) = inputs height = local_feat.shape[1] width = local_feat.shape[2] depth = local_feat.shape[3] local_feat = tf.reshape(local_feat, [(- 1), (height * width), depth]) local_feat = tf.transpose(local_feat, perm=[0, 2, 1]) projection = tf.matmul(tf.expand_dims(global_feat, axis=1), local_feat) projection = tf.matmul(tf.expand_dims(global_feat, axis=2), projection) projection = tf.reshape(projection, [(- 1), height, width, depth]) global_feat_norm = tf.norm(global_feat, ord=2, axis=1) projection = (projection / tf.reshape((global_feat_norm * global_feat_norm), shape=[(- 1), 1, 1, 1])) local_feat = tf.transpose(local_feat, perm=[0, 1, 2]) local_feat = tf.reshape(local_feat, [(- 1), height, width, depth]) orthogonal_comp = (local_feat - projection) global_feat = tf.expand_dims(tf.expand_dims(global_feat, axis=1), axis=1) global_feat = tf.broadcast_to(global_feat, tf.shape(local_feat)) output = tf.concat([global_feat, orthogonal_comp], axis=(- 1)) return output
def text_to_conll(f): global options if options.nosplit: sentences = f.readlines() else: sentences = [] for l in f: l = sentencebreaks_to_newlines(l) sentences.extend([s for s in NEWLINE_TERM_REGEX.split(l) if s]) lines = [] offset = 0 fixed_sentences = [] for s in sentences: fixed_sentences.append(s) for s in fixed_sentences: nonspace_token_seen = False try: tokens = stokenizer.tokenize(s) except stokenizer.TimedOutExc as e: try: print('using ark tokenizer') tokens = ark_twokenize.tokenizeRawTweetText(s) except Exception as e: print(e) token_w_pos = map_text_to_char(s, tokens, offset) for (t, pos) in token_w_pos: if (not t.isspace()): lines.append(['O', pos, (pos + len(t)), t]) lines.append([]) offset += len(s) if options.annsuffix: lines = relabel(lines, get_annotations(f.name), f) lines = [([l[3], l[0]] if l else l) for l in lines] return StringIO('\n'.join(('\t'.join(l) for l in lines)))
class EvalHook(HookBase): def __init__(self, eval_period, eval_function, eval_after_train=True): self._period = eval_period self._func = eval_function self._eval_after_train = eval_after_train def _do_eval(self): results = self._func() if results: assert isinstance(results, dict), 'Eval function must return a dict. Got {} instead.'.format(results) flattened_results = flatten_results_dict(results) for (k, v) in flattened_results.items(): try: v = float(v) except Exception as e: raise ValueError("[EvalHook] eval_function should return a nested dict of float. Got '{}: {}' instead.".format(k, v)) from e self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) comm.synchronize() def after_step(self): next_iter = (self.trainer.iter + 1) if ((self._period > 0) and ((next_iter % self._period) == 0)): if (next_iter != self.trainer.max_iter): self._do_eval() def after_train(self): if (self._eval_after_train and ((self.trainer.iter + 1) >= self.trainer.max_iter)): self._do_eval() del self._func
def three_comp_average(comp1, comp2, comp3): return np.sqrt((((comp1 ** 2) + (comp2 ** 2)) + (comp3 ** 2)))
def subset_reencode_features(x_unvec, feat_encoding_dict): return [[feat_encoding_dict[feat_idx] for feat_idx in x if (feat_idx in feat_encoding_dict)] for x in x_unvec]
def find_output_tensors_info(subgraphs, tensor_names): tensors_info = {} all_tensor_names = [] all_tensor_shapes = [] all_data_formats = [] all_data_types = [] all_check_tensor_names = [] all_check_tensor_shapes = [] for (subname, subgraph) in subgraphs.items(): all_tensor_names.extend(subgraph[ModelKeys.output_tensors]) all_tensor_shapes.extend(subgraph[ModelKeys.output_shapes]) all_data_formats.extend(subgraph[ModelKeys.output_data_formats]) all_data_types.extend(subgraph[ModelKeys.output_data_types]) output_num = len(subgraph[ModelKeys.output_tensors]) if (ModelKeys.check_tensors in subgraph): all_check_tensor_names.extend(subgraph[ModelKeys.check_tensors]) else: all_check_tensor_names.extend(([None] * output_num)) if (ModelKeys.check_shapes in subgraph): all_check_tensor_shapes.extend(subgraph[ModelKeys.check_shapes]) else: all_check_tensor_shapes.extend(([None] * output_num)) name_id = {} for i in range(len(all_tensor_names)): name_id[all_tensor_names[i]] = i tensors_info[ModelKeys.output_tensors] = [] tensors_info[ModelKeys.output_shapes] = [] tensors_info[ModelKeys.output_data_formats] = [] tensors_info[ModelKeys.output_data_types] = [] tensors_info[ModelKeys.check_tensors] = [] tensors_info[ModelKeys.check_shapes] = [] for tensor_name in tensor_names: tensor_name = str(tensor_name) i = name_id[tensor_name] tensors_info[ModelKeys.output_tensors].append(tensor_name) tensors_info[ModelKeys.output_shapes].append(all_tensor_shapes[i]) tensors_info[ModelKeys.output_data_formats].append(all_data_formats[i]) tensors_info[ModelKeys.output_data_types].append(all_data_types[i]) tensors_info[ModelKeys.check_tensors].append(all_check_tensor_names[i]) tensors_info[ModelKeys.check_shapes].append(all_check_tensor_shapes[i]) return tensors_info
def test_test_naive_weighted_average_with_stats(): x = torch.randint(low=0, high=256, dtype=torch.uint8, size=(8, 12, 495, 436, 8)) additional_data = torch.cat((torch.randint(low=0, high=7, dtype=torch.uint8, size=(8, 1)), torch.randint(low=0, high=MAX_TEST_SLOT_INDEX, dtype=torch.uint8, size=(8, 1))), axis=1) assert (additional_data.shape == (8, 2)) with tempfile.TemporaryDirectory() as tempdir: tempdir_path = Path(tempdir) print('creating fake data') for d in range(7): for city in (spatio_temporal_cities + temporal_cities): write_data_to_h5(np.random.randint(0, 255, size=(24, 495, 436, 8)), filename=str((tempdir_path / f'{city}_{d}_means.h5')), compression_level=0) write_data_to_h5(np.random.randint(0, 12, size=(24, 495, 436, 8)), filename=str((tempdir_path / f'{city}_{d}_zeros.h5')), compression_level=0) print('fake data created') m = NaiveWeightedAverageWithStats(stats_dir=tempdir_path) m(x, city='VIENNA', additional_data=additional_data)
def run(): parser = argparse.ArgumentParser() parser.add_argument('--data_root', type=str, default='../../Dataset/Pairs_street_view/paris_auged') parser.add_argument('--mask_root', type=str, default='../../Dataset/irregular_mask/testing_mask_dataset_auged') parser.add_argument('--model_save_path', type=str, default='checkpoint') parser.add_argument('--result_save_path', type=str, default='results') parser.add_argument('--target_size', type=int, default=256) parser.add_argument('--mask_mode', type=int, default=0) parser.add_argument('--g_path', type=str, default='checkpoint/100000.pth') parser.add_argument('--d_path', type=str, default='checkpoint/100000.pth') parser.add_argument('--batch_size', type=int, default=6) parser.add_argument('--n_threads', type=int, default=6) parser.add_argument('--finetune', action='store_true') parser.add_argument('--test', action='store_true') parser.add_argument('--gpu_id', type=str, default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id model = PRVSModel() if args.test: model.initialize_model(args.g_path, train=False) model.cuda() dataloader = DataLoader(Dataset(args.data_root, args.mask_root, args.mask_mode, args.target_size, mask_reverse=True)) model.test(dataloader, args.result_save_path) else: model.initialize_model(args.g_path, args.d_path, train=True) model.cuda() dataloader = DataLoader(Dataset(args.data_root, args.mask_root, args.mask_mode, args.target_size, mask_reverse=True), batch_size=args.batch_size, shuffle=True, num_workers=args.n_threads) model.train(dataloader, args.model_save_path, args.finetune)
def evaluate(model, data, indices): start_time = time.time() eval_loss = 0.0 eval_num_words = 0 model.eval() with torch.no_grad(): batch = [dh.make_batch(data, indices[0])] for j in six.moves.range(len(indices)): (x_batch, h_batch, q_batch, a_batch_in, a_batch_out, s_batch) = batch.pop() if (j < (len(indices) - 1)): prefetch = threading.Thread(target=fetch_batch, args=[dh, data, indices[(j + 1)], batch]) prefetch.start() x = [torch.from_numpy(x) for x in x_batch] h = [[torch.from_numpy(h) for h in hb] for hb in h_batch] q = [torch.from_numpy(q) for q in q_batch] ai = [torch.from_numpy(ai) for ai in a_batch_in] ao = [torch.from_numpy(ao) for ao in a_batch_out] s = torch.from_numpy(s_batch).cuda().float() (_, _, loss) = model.loss(x, h, q, ai, ao, s) num_words = sum([len(s) for s in ao]) eval_loss += (loss.cpu().data.numpy() * num_words) eval_num_words += num_words prefetch.join() model.train() wall_time = (time.time() - start_time) return (math.exp((eval_loss / eval_num_words)), wall_time)
def save_videos(videos_tensor, nrow, path): (b, c, t, h, w) = videos_tensor.shape imgs_tensor = videos_tensor.permute(0, 2, 1, 3, 4).reshape((b * t), c, h, w) imgs = make_grid(imgs_tensor, nrow=nrow, normalize=True) img = F.to_pil_image(imgs.detach()) show_img = Image.fromarray(np.array(img)) show_img.save(path)
def main(): import pdb pdb.set_trace() moses_detok = MosesDetokenizer(lang='en') for line in sys.stdin: decoded_line = decode(line.strip(), moses_detok) sys.stdout.write((decoded_line + '\n')) sys.stdout.flush()
class OptimizedInstructor(InstructorEmbedding.INSTRUCTOR): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _load_auto_model(self, model_name_or_path, token: Optional[Union[(bool, str)]], cache_folder: Optional[str]): logger.warning('No sentence-transformers model found with name {}.Creating a new one with MEAN pooling.'.format(model_name_or_path)) transformer_model = OptimzedTransformer(model_name_or_path, cache_dir=cache_folder, model_args={'token': token}) pooling_model = sentence_transformers.models.Pooling(transformer_model.get_word_embedding_dimension(), 'mean') return [transformer_model, pooling_model] def _load_sbert_model(self, model_name_or_path: str, token: Optional[Union[(bool, str)]], cache_folder: Optional[str]): config_sentence_transformers_json_path = sentence_transformers.util.load_file_path(model_name_or_path, 'config_sentence_transformers.json', token=token, cache_folder=cache_folder) if (config_sentence_transformers_json_path is not None): with open(config_sentence_transformers_json_path) as fIn: self._model_config = json.load(fIn) if (('__version__' in self._model_config) and ('sentence_transformers' in self._model_config['__version__']) and (self._model_config['__version__']['sentence_transformers'] > sentence_transformers.__version__)): logger.warning('You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n'.format(self._model_config['__version__']['sentence_transformers'], sentence_transformers.__version__)) model_card_path = sentence_transformers.util.load_file_path(model_name_or_path, 'README.md', token=token, cache_folder=cache_folder) if (model_card_path is not None): try: with open(model_card_path, encoding='utf8') as fIn: self._model_card_text = fIn.read() except: pass modules_json_path = sentence_transformers.util.load_file_path(model_name_or_path, 'modules.json', token=token, cache_folder=cache_folder) with open(modules_json_path) as fIn: modules_config = json.load(fIn) modules = OrderedDict() for module_config in modules_config: if (module_config['idx'] == 0): logger.info('load Optimized InstructorTransformer') kwargs = {} for config_name in ['sentence_bert_config.json', 'sentence_roberta_config.json', 'sentence_distilbert_config.json', 'sentence_camembert_config.json', 'sentence_albert_config.json', 'sentence_xlm-roberta_config.json', 'sentence_xlnet_config.json']: config_path = sentence_transformers.util.load_file_path(model_name_or_path, config_name, token=token, cache_folder=cache_folder) if (config_path is not None): with open(config_path) as fIn: kwargs = json.load(fIn) break if ('model_args' in kwargs): kwargs['model_args']['token'] = token else: kwargs['model_args'] = {'token': token} module = OptimizedInstructorTransformer(model_name_or_path, cache_dir=cache_folder, **kwargs) elif (module_config['idx'] == 1): module_class = InstructorEmbedding.INSTRUCTOR_Pooling module_path = get_module_path(model_name_or_path, module_config['path'], token, cache_folder) module = module_class.load(module_path) else: module_class = InstructorEmbedding.import_from_string(module_config['type']) module_path = get_module_path(model_name_or_path, module_config['path'], token, cache_folder) module = module_class.load(module_path) modules[module_config['name']] = module return modules
def iterate_sys_modules(): items = list(sys.modules.items()) for (modname, mod) in items: if ((modname not in MODULE_BLACKLIST) and (mod is not None)): (yield (modname, mod))
def build_causal_conv1d_block(block_arch): idim = block_arch['idim'] odim = block_arch['odim'] kernel_size = block_arch['kernel_size'] return (lambda : CausalConv1d(idim, odim, kernel_size))
class ResidualConv(nn.Module): def __init__(self, in_channels, out_channels, stride, dropout=None): super().__init__() self.conv1 = Conv2D(in_channels, out_channels, 3, stride) self.conv2 = Conv2D(out_channels, out_channels, 3, 1) self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride) self.normalize = torch.nn.GroupNorm(16, out_channels) self.activ = nn.ELU(inplace=True) if dropout: self.conv3 = nn.Sequential(self.conv3, nn.Dropout2d(dropout)) def forward(self, x): x_out = self.conv1(x) x_out = self.conv2(x_out) shortcut = self.conv3(x) return self.activ(self.normalize((x_out + shortcut)))
class GaussianPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, args): super(GaussianPolicy, self).__init__() self.linear1 = nn.Linear(num_inputs, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean_linear = nn.Linear(hidden_dim, num_actions) self.log_std_linear = nn.Linear(hidden_dim, num_actions) self.apply(weights_init_) self.tanh = args.tanh def encode(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) mean = self.mean_linear(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX) return (mean, log_std) def forward(self, state, reparam=False): (mean, log_std) = self.encode(state) std = log_std.exp() normal = Normal(mean, std) if (reparam == True): x_t = normal.rsample() else: x_t = normal.sample() if self.tanh: action = torch.tanh(x_t) else: action = x_t log_prob = normal.log_prob(x_t) if self.tanh: log_prob -= torch.log(((1 - action.pow(2)) + epsilon)) log_prob = log_prob.sum(1, keepdim=True) return (action, log_prob, x_t, mean, log_std)
def is_pt_tf_cross_test(test_case): if ((not _run_pt_tf_cross_tests) or (not is_torch_available()) or (not is_tf_available())): return unittest.skip('test is PT+TF test')(test_case) else: try: import pytest except ImportError: return test_case else: return pytest.mark.is_pt_tf_cross_test()(test_case)
def test_planar_hull(nbr=7, size=9): pts = random_points(2, nbr, (- size), size) print('the points :', pts) (vertices, normals) = planar_convex_hull(pts) print('the vertices :', vertices) print('inner normals :', normals)
def keypoint_mpjpe(pred, gt, mask): assert mask.any() pred_aligned = np.stack((compute_similarity_transform(pred_i, gt_i) for (pred_i, gt_i) in zip(pred, gt))) mpjpe = np.linalg.norm((pred - gt), ord=2, axis=(- 1))[mask].mean() p_mpjpe = np.linalg.norm((pred_aligned - gt), ord=2, axis=(- 1))[mask].mean() return (mpjpe, p_mpjpe)
def _loader_switch_cls(cls): class Loader(cls): def __init__(self, *args, image_size=None, **kwargs): raise NotImplementedError() def __new__(_cls, *args, **kwargs): return cls(*args, **kwargs, image_size=128) return Loader
def get_labevents_extractors(data_dir, extractor_map): extractors = [] table = 'labevents' id_extractor = MultiExtractor(names=['subject_id', 'hadm_id'], sep='_') outpath = os.path.join(data_dir, (table + '.tsv')) time_ext = TimeExtractor(name='charttime', converter=time2str) type_ext = FmtExtractor(names=['itemid'], fmt='labevents.%s') value_ext = MultiExtractor(names=['value', 'flag']) test_ext = TestExtractor(name='value', test=exist_test) extractor = ExtractorInfo(table, outpath, id_extractor, time_ext, type_ext, value_ext, test_ext) extractors.append(extractor) extractor_map[table] = extractors return table
def cross_entropy(z, zt): Pz = F.softmax(z, dim=1) Pzt = F.softmax(zt, dim=1) return (- (Pz * torch.log(Pzt)).mean())
() ('yaml_path') ('--just-cache-data', default=0, help='If 1, just writes data to cache; does not run experiment') ('--do_test', default=0, help='If 1, evaluates on the test set; hopefully just run this once!') def run_yaml_experiment(yaml_path, just_cache_data, do_test): yaml_args = yaml.load(open(yaml_path), Loader=yaml.Loader) list_dataset = yaml_args['dataset'] list_model = yaml_args['model'] probe_model = yaml_args['probe'] regimen_model = yaml_args['regimen'] reporter_model = yaml_args['reporter'] cache_model = yaml_args['cache'] os.makedirs(regimen_model.reporting_root, exist_ok=True) train_dataloader = list_dataset.get_train_dataloader(shuffle=True) dev_dataloader = list_dataset.get_dev_dataloader(shuffle=False) if do_test: test_dataloader = list_dataset.get_test_dataloader(shuffle=False) cache_model.release_locks() if just_cache_data: print('Data caching done. Exiting...') return regimen_model.train_until_convergence(probe_model, list_model, None, train_dataloader, dev_dataloader, gradient_steps_between_eval=min(1000, len(train_dataloader))) probe_model.load_state_dict(torch.load(regimen_model.params_path)) train_dataloader = list_dataset.get_train_dataloader(shuffle=False) dev_dataloader = list_dataset.get_dev_dataloader(shuffle=False) dev_predictions = regimen_model.predict(probe_model, list_model, dev_dataloader) train_predictions = regimen_model.predict(probe_model, list_model, train_dataloader) if do_test: test_dataloader = list_dataset.get_test_dataloader(shuffle=False) test_predictions = regimen_model.predict(probe_model, list_model, test_dataloader) train_dataloader = list_dataset.get_train_dataloader(shuffle=False) dev_dataloader = list_dataset.get_dev_dataloader(shuffle=False) reporter_model(train_predictions, train_dataloader, TRAIN_STR) reporter_model(dev_predictions, dev_dataloader, DEV_STR) if do_test: test_dataloader = list_dataset.get_test_dataloader(shuffle=False) reporter_model(test_predictions, test_dataloader, TEST_STR)
def get_article(article_id): xml_str = 'PMC{}.nxml'.format(article_id) xml_path = os.path.join(base_XML_path, xml_str) return article_reader.Article(xml_path, use_plain_text=USE_PLAIN_TEXT)
def load_values(save_dir, valid=False): outputs = [] outputs.append(list(np.load((save_dir + '/plots/track_d_loss_iter.npy')))) outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/epochs.npy')))) outputs.append(outputs[0][(- 1)]) return outputs
_class class Conv2dLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True, activation='linear', up=1, down=1, resample_filter=[1, 3, 3, 1], conv_clamp=None, channels_last=False, trainable=True): super().__init__() self.activation = activation self.up = up self.down = down self.conv_clamp = conv_clamp self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) self.padding = (kernel_size // 2) self.weight_gain = (1 / np.sqrt((in_channels * (kernel_size ** 2)))) self.act_gain = bias_act.activation_funcs[activation].def_gain memory_format = (torch.channels_last if channels_last else torch.contiguous_format) weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format) bias = (torch.zeros([out_channels]) if bias else None) if trainable: self.weight = torch.nn.Parameter(weight) self.bias = (torch.nn.Parameter(bias) if (bias is not None) else None) else: self.register_buffer('weight', weight) if (bias is not None): self.register_buffer('bias', bias) else: self.bias = None def forward(self, x, gain=1): w = (self.weight * self.weight_gain) b = (self.bias.to(x.dtype) if (self.bias is not None) else None) flip_weight = (self.up == 1) x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight) act_gain = (self.act_gain * gain) act_clamp = ((self.conv_clamp * gain) if (self.conv_clamp is not None) else None) x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) return x
class ComparableItemSet(): def issuperset(self, other): return (self.frozenset >= other.frozenset) def issubset(self, other): return (self.frozenset <= other.frozenset) def __ge__(self, other): return self.issuperset(other) def __le__(self, other): return self.issubset(other)
class DensePoseCOCOEvaluator(DatasetEvaluator): def __init__(self, dataset_name, distributed, output_dir=None): self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device('cpu') self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) def reset(self): self._predictions = [] def process(self, inputs, outputs): for (input, output) in zip(inputs, outputs): instances = output['instances'].to(self._cpu_device) boxes = instances.pred_boxes.tensor.clone() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) instances.pred_densepose = instances.pred_densepose.to_result(boxes) json_results = prediction_to_json(instances, input['image_id']) self._predictions.extend(json_results) def evaluate(self): if self._distributed: synchronize() predictions = all_gather(self._predictions) predictions = list(itertools.chain(*predictions)) if (not is_main_process()): return else: predictions = self._predictions return copy.deepcopy(self._eval_predictions(predictions)) def _eval_predictions(self, predictions): self._logger.info('Preparing results for COCO format ...') if self._output_dir: file_path = os.path.join(self._output_dir, 'coco_densepose_results.json') with open(file_path, 'w') as f: json.dump(predictions, f) f.flush() os.fsync(f.fileno()) self._logger.info('Evaluating predictions ...') res = OrderedDict() (results_gps, results_gpsm) = _evaluate_predictions_on_coco(self._coco_api, predictions) res['densepose_gps'] = results_gps res['densepose_gpsm'] = results_gpsm return res
def print_dataset_stats(dataset): print('=== {} ==='.format(dataset)) class_file = os.path.join('data', dataset, 'class.txt') if (not os.path.isfile(class_file)): print('Dataset not found!') return print('Categories:', len(load_text(class_file))) src_videos = {} total_frames = 0 total_events = 0 all_videos = set() for split in ['train', 'val', 'test']: split_file = os.path.join('data', dataset, '{}.json'.format(split)) if os.path.exists(split_file): print('{}:'.format(split.capitalize())) labels = load_json(split_file) num_events = sum([len(x['events']) for x in labels]) num_frames = sum([x['num_frames'] for x in labels]) src_videos_split = {get_src_video(dataset, x['video']) for x in labels} print('\torig videos:', len(src_videos_split)) print('\tvideos:', len(labels)) print('\tevents:', num_events) print('\tframes:', num_frames) print('\tevents / frames (%):', round(((num_events / num_frames) * 100), 2)) total_frames += num_frames total_events += num_events first_event = min([min((e['frame'] for e in x['events'])) for x in labels]) last_event = min([min(((x['num_frames'] - e['frame']) for e in x['events'])) for x in labels]) print('\tmin frame (of first event):', first_event) print('\tmax frame (of last event):', last_event) split_videos = {x['video'] for x in labels} assert (len((split_videos & all_videos)) == 0), 'Bad video splits!' all_videos.update(split_videos) src_videos[split] = src_videos_split label_counts = Counter() for x in labels: for e in x['events']: label_counts[e['label']] += 1 print('\tLabel counts:') for l in sorted(label_counts.keys()): print('\t\t{} : {}'.format(l, label_counts[l])) print('Overall:') print('\thas train/test orig video overlap:', (len((src_videos['train'] & src_videos.get('test', src_videos['val']))) > 0)) print('\tnum frames:', total_frames) print('\tnum events:', total_events) print('\tevent %:', ((total_events * 100) / total_frames))
class VOCSegmentation(Dataset): NUM_CLASSES = 21 def __init__(self, args, base_dir=Path.db_root_dir('pascal'), split='train'): super().__init__() self._base_dir = base_dir self._image_dir = os.path.join(self._base_dir, 'JPEGImages') self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass') if isinstance(split, str): self.split = [split] else: split.sort() self.split = split self.args = args _splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation') self.im_ids = [] self.images = [] self.categories = [] for splt in self.split: with open(os.path.join(os.path.join(_splits_dir, (splt + '.txt'))), 'r') as f: lines = f.read().splitlines() for (ii, line) in enumerate(lines): _image = os.path.join(self._image_dir, (line + '.jpg')) _cat = os.path.join(self._cat_dir, (line + '.png')) assert os.path.isfile(_image) assert os.path.isfile(_cat) self.im_ids.append(line) self.images.append(_image) self.categories.append(_cat) assert (len(self.images) == len(self.categories)) print('Number of images in {}: {:d}'.format(split, len(self.images))) def __len__(self): return len(self.images) def __getitem__(self, index): (_img, _target) = self._make_img_gt_point_pair(index) sample = {'image': _img, 'label': _target} for split in self.split: if (split == 'train'): return self.transform_tr(sample) elif (split == 'val'): return self.transform_val(sample) def _make_img_gt_point_pair(self, index): _img = Image.open(self.images[index]).convert('RGB') _target = Image.open(self.categories[index]) return (_img, _target) def transform_tr(self, sample): composed_transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), tr.RandomGaussianBlur(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample) def transform_val(self, sample): composed_transforms = transforms.Compose([tr.FixScaleCrop(crop_size=self.args.crop_size), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()]) return composed_transforms(sample) def __str__(self): return (('VOC2012(split=' + str(self.split)) + ')')
def add_vtarg_and_adv(seg, gamma, lam): new = np.append(seg['new'], 0) vpred = np.append(seg['vpred'], seg['nextvpred']) T = len(seg['rew']) seg['adv'] = gaelam = np.empty(T, 'float32') rew = seg['rew'] lastgaelam = 0 for t in reversed(range(T)): nonterminal = (1 - new[(t + 1)]) delta = ((rew[t] + ((gamma * vpred[(t + 1)]) * nonterminal)) - vpred[t]) gaelam[t] = lastgaelam = (delta + (((gamma * lam) * nonterminal) * lastgaelam)) seg['tdlamret'] = (seg['adv'] + seg['vpred'])
def __linear_circuit_block(x_block, y_block, encoder): from .binary import BinarySharedTensor from crypten.cuda import CUDALongTensor ci = torch_stack([torch.zeros_like(x_block), torch.ones_like(y_block)]) for i in range(8): xi = ((x_block >> i) & 1) yi = ((y_block >> i) & 1) (xi, yi) = (torch_stack([xi, xi]), torch_stack([yi, yi])) si = ((xi ^ yi) ^ ci) ci = (ci ^ resharing.AND_gate((xi ^ ci), (yi ^ ci)).share) select_bits = torch.zeros_like(ci[(0, 0)]) for i in range(8): select_bits = (resharing.AND_gate((select_bits ^ 1), ci[(0, i)]).share ^ resharing.AND_gate(select_bits, ci[(1, i)]).share) sign_bits = (resharing.AND_gate((select_bits ^ 1), si[(0, 7)]).share ^ resharing.AND_gate(select_bits, si[(1, 7)]).share) sign_bits = sign_bits.long() if sign_bits.is_cuda: sign_bits = CUDALongTensor(sign_bits) sign_bits = BinarySharedTensor.from_shares(sign_bits, src=comm.get().get_rank()) sign_bits.encoder = encoder return sign_bits
def activation_helper(activation, dim=None): if (activation == 'sigmoid'): act = nn.Sigmoid() elif (activation == 'tanh'): act = nn.Tanh() elif (activation == 'relu'): act = nn.ReLU() elif (activation == 'leakyrelu'): act = nn.LeakyReLU() elif (activation is None): def act(x): return x else: raise ValueError(('unsupported activation: %s' % activation)) return act
def run(coco, cat_ids, output_dir, num_examples): object_scales = {1: 0.3, 2: 0.3, 3: 0.2, 4: 0.2, 5: 0.7, 6: 0.2, 7: 0.2, 8: 0.3, 9: 0.3, 10: 0.2} cats = {cat['id']: cat for cat in coco.dataset['categories']} for cat_id in cat_ids: cat_name = cats[cat_id]['name'] print('generating {} poses for {}'.format(num_examples, cat_name)) object_scale = object_scales[cat_id] position_mean = [0, 0, (object_scale * 3)] position_cov = np.diag(np.square(((object_scale * 0.5) * np.array([1.5, 0.8, 2])))) np.random.seed(0) position = np.random.multivariate_normal(position_mean, position_cov, num_examples) orientation = np.random.randn(num_examples, 4) orientation = np.divide(orientation, np.linalg.norm(orientation, axis=1, keepdims=True)) poses = np.append(position, orientation, axis=1) output_path = os.path.join(output_dir, '{}.txt'.format(cat_name)) print('saving poses to {}'.format(output_path)) np.savetxt(output_path, poses) print('')
def test_save(g1, tmp_path): from dhg import load_structure g1.save((tmp_path / 'g1')) g2 = load_structure((tmp_path / 'g1')) for (e1, e2) in zip(g1.e[0], g2.e[0]): assert (e1 == e2) for (w1, w2) in zip(g1.e[1], g2.e[1]): assert (w1 == w2)
def get_all_images_pool(image_names, path_voc): images = [] for j in range(np.size(image_names)): image_name = image_names[j] string = (((path_voc + '/JPEGImages/') + image_name) + '.jpg') images.append(image.load_img(string, False)) return images
def main(opts): hvd.init() n_gpu = hvd.size() device = torch.device('cuda', hvd.local_rank()) torch.cuda.set_device(hvd.local_rank()) rank = hvd.rank() opts.rank = rank LOGGER.info(f'device: {device}, n_gpu: {n_gpu}, rank: {hvd.rank()}, 16-bits training: {opts.fp16}') if (opts.gradient_accumulation_steps < 1): raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(opts.gradient_accumulation_steps)) random.seed(opts.seed) np.random.seed(opts.seed) torch.manual_seed(opts.seed) if (n_gpu > 0): torch.cuda.manual_seed_all(opts.seed) LOGGER.info(f'Loading Train Dataset {opts.train_txt_db}, {opts.train_img_dir}') train_img_dir = ReImageFeatDir(opts.train_img_dir) train_dataset = ReferringExpressionDataset(opts.train_txt_db, train_img_dir, max_txt_len=opts.max_txt_len) val_img_dir = ReImageFeatDir(opts.val_img_dir) val_dataset = ReferringExpressionEvalDataset(opts.val_txt_db, val_img_dir, max_txt_len=opts.max_txt_len) if (opts.checkpoint and (opts.checkpoint != 'scratch')): if (opts.checkpoint == 'google-bert'): checkpoint = None else: checkpoint = torch.load(opts.checkpoint) else: checkpoint = {} bert_model = json.load(open(f'{opts.train_txt_db}/meta.json'))['bert'] model = BertForReferringExpressionComprehension.from_pretrained(bert_model, img_dim=2048, loss=opts.train_loss, margin=opts.margin, hard_ratio=opts.hard_ratio, mlp=opts.mlp, state_dict=checkpoint) if (opts.cut_bert != (- 1)): model.bert.encoder.layer = torch.nn.ModuleList(model.bert.encoder.layer[:opts.cut_bert]) del checkpoint for (name, module) in model.named_modules(): if isinstance(module, torch.nn.Dropout): if (module.p != opts.dropout): module.p = opts.dropout LOGGER.info(f'{name} set to {opts.dropout}') model.to(device) broadcast_tensors([p.data for p in model.parameters()], 0) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': opts.weight_decay}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] if (opts.optim == 'adam'): OptimCls = Adam elif (opts.optim == 'adamax'): OptimCls = Adamax elif (opts.optim == 'adamw'): OptimCls = AdamW else: raise ValueError('invalid optimizer') optimizer = OptimCls(optimizer_grouped_parameters, lr=opts.learning_rate, betas=opts.betas) (model, optimizer) = amp.initialize(model, optimizer, enabled=opts.fp16, opt_level='O2') global_step = 0 LOGGER.info('***** Running training *****') LOGGER.info(' Num examples = %d', len(train_dataset)) LOGGER.info(' Batch size = %d', opts.train_batch_size) LOGGER.info(' Accumulate steps = %d', opts.gradient_accumulation_steps) LOGGER.info(' Num steps = %d', opts.num_train_steps) train_sampler = DistributedSampler(train_dataset, num_replicas=n_gpu, rank=rank, shuffle=False) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=opts.train_batch_size, num_workers=opts.n_workers, pin_memory=opts.pin_mem, collate_fn=re_collate) train_dataloader = PrefetchLoader(train_dataloader) val_sampler = DistributedSampler(val_dataset, num_replicas=n_gpu, rank=rank, shuffle=False) val_dataloader = DataLoader(val_dataset, sampler=val_sampler, batch_size=opts.val_batch_size, num_workers=opts.n_workers, pin_memory=opts.pin_mem, collate_fn=re_eval_collate) val_dataloader = PrefetchLoader(val_dataloader) if (rank == 0): save_training_meta(opts) TB_LOGGER.create(join(opts.output_dir, 'log')) pbar = tqdm(total=opts.num_train_steps) model_saver = ModelSaver(join(opts.output_dir, 'ckpt'), 'model_epoch') os.makedirs(join(opts.output_dir, 'results')) add_log_to_file(join(opts.output_dir, 'log', 'log.txt')) else: LOGGER.disabled = True pbar = NoOp() model_saver = NoOp() running_loss = RunningMeter(opts.train_loss) n_examples = 0 n_epoch = 0 (best_val_acc, best_epoch) = (None, None) start = time() optimizer.zero_grad() optimizer.step() while True: model.train() for (step, batch) in enumerate(train_dataloader): if (global_step >= opts.num_train_steps): break (*_, targets) = batch n_examples += targets.size(0) loss = model(*batch, compute_loss=True) loss = loss.sum() delay_unscale = (((step + 1) % opts.gradient_accumulation_steps) != 0) with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss: scaled_loss.backward() if (not delay_unscale): grads = [p.grad.data for p in model.parameters() if (p.requires_grad and (p.grad is not None))] all_reduce_and_rescale_tensors(grads, float(1)) running_loss(loss.item()) if (((step + 1) % opts.gradient_accumulation_steps) == 0): global_step += 1 if (opts.decay == 'linear'): lr_this_step = (opts.learning_rate * warmup_linear(global_step, opts.warmup_steps, opts.num_train_steps)) elif (opts.decay == 'invsqrt'): lr_this_step = (opts.learning_rate * noam_schedule(global_step, opts.warmup_steps)) elif (opts.decay == 'constant'): lr_this_step = opts.learning_rate if (lr_this_step < 0): lr_this_step = 1e-08 for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step TB_LOGGER.add_scalar('lr', lr_this_step, global_step) losses = all_gather_list(running_loss) running_loss = RunningMeter(opts.train_loss, (sum((l.val for l in losses)) / len(losses))) TB_LOGGER.add_scalar(('loss_' + opts.train_loss), running_loss.val, global_step) TB_LOGGER.step() if (opts.grad_norm != (- 1)): grad_norm = clip_grad_norm_(amp.master_params(optimizer), opts.grad_norm) TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step) optimizer.step() optimizer.zero_grad() pbar.update(1) if ((global_step % 5) == 0): torch.cuda.empty_cache() if ((global_step % 100) == 0): tot_ex = sum(all_gather_list(n_examples)) ex_per_sec = int((tot_ex / (time() - start))) LOGGER.info(f'{tot_ex} examples trained at {ex_per_sec} ex/s') TB_LOGGER.add_scalar('perf/ex_per_s', ex_per_sec, global_step) (val_log, _) = validate(model, val_dataloader) TB_LOGGER.log_scaler_dict(val_log) n_epoch += 1 model_saver.save(model, n_epoch) LOGGER.info(f'finished {n_epoch} epochs') if ((best_val_acc is None) or (val_log['valid/acc'] > best_val_acc)): best_val_acc = val_log['valid/acc'] best_epoch = n_epoch model_saver.save(model, 'best') train_dataloader.loader.dataset.shuffle() if (global_step >= opts.num_train_steps): break (val_log, results) = validate(model, val_dataloader) with open(f'{opts.output_dir}/results/results_{global_step}_rank{rank}_final.json', 'w') as f: json.dump(results, f) TB_LOGGER.log_scaler_dict(val_log) model_saver.save(model, f'{global_step}_final') LOGGER.info(f'best_val_acc = {(best_val_acc * 100):.2f}% at epoch {best_epoch}.')
def ssast_patch_base_10s(ckpt, *args, **kwargs): kwargs['model_size'] = 'base_p' kwargs['pretrain_path'] = '/data/sls/scratch/yuangong/ssast/pretrained_model/SSAST-Base-Patch-400.pth' kwargs['target_length'] = 1000 return _UpstreamExpert(ckpt, *args, **kwargs)
class TestPytorchPruning(unittest.TestCase): model = torchvision.models.resnet18() def test_pruning_class_config(self): local_configs = [{'op_names': ['layer1.*', 'layer2.*'], 'excluded_op_names': ['downsample.*'], 'target_sparsity': 0.6, 'pattern': 'channelx1', 'pruning_type': 'snip_progressive', 'pruning_scope': 'local', 'start_step': 0, 'end_step': 10}, {'op_names': ['layer3.*'], 'pruning_type': 'pattern_lock'}] config = WeightPruningConfig(local_configs, pruning_frequency=2, target_sparsity=0.8) compression_manager = prepare_compression(model=self.model, confs=config) compression_manager.callbacks.on_train_begin() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) datasets = Datasets('pytorch') dummy_dataset = datasets['dummy'](shape=(12, 3, 224, 224), low=0.0, high=1.0, label=True) dummy_dataloader = PyTorchDataLoader(dummy_dataset) logger.info(compression_manager.callbacks.callbacks_list[0].pruners) assert (compression_manager.callbacks.callbacks_list[0].pruners[0].config['pruning_frequency'] == 2) assert (compression_manager.callbacks.callbacks_list[0].pruners[0].config['target_sparsity'] == 0.6) assert (compression_manager.callbacks.callbacks_list[0].pruners[1].config['target_sparsity'] == 0.8) assert (compression_manager.callbacks.callbacks_list[0].pruners[0].config['pattern'] == 'channelx1') assert (compression_manager.callbacks.callbacks_list[0].pruners[1].config['pruning_type'] == 'pattern_lock') for epoch in range(1): self.model.train() compression_manager.callbacks.on_epoch_begin(epoch) local_step = 0 for (image, target) in dummy_dataloader: compression_manager.callbacks.on_step_begin(local_step) output = self.model(image) loss = criterion(output, target) optimizer.zero_grad() loss.backward() compression_manager.callbacks.on_before_optimizer_step() optimizer.step() compression_manager.callbacks.on_after_optimizer_step() compression_manager.callbacks.on_step_end() local_step += 1 compression_manager.callbacks.on_epoch_end() compression_manager.callbacks.on_train_end() compression_manager.callbacks.on_before_eval() compression_manager.callbacks.on_after_eval()
def main(): args = getArgs() rospy.init_node('parse_task_model') if (args.bagfile is not None): rtp = RosTaskParser(filename=args.bagfile, configs=[TOM_RIGHT_CONFIG, TOM_LEFT_CONFIG], unknown_apply_before=4, min_action_length=1, demo_topic=args.demo_topic, alias_topic=args.alias_topic) rtp.addIdle('IdleMotion') rtp.addObjectClassParent('Cube_red', 'cube') rtp.addObjectClassParent('Cube_blue', 'cube') rtp.addObjectClassParent('Cube_green', 'cube') rtp.addObjectClassParent('Cube_yellow', 'cube') rtp.process() task = rtp.makeTask() world = TomWorld(lfd=rtp.lfd) else: raise RuntimeError('no project or bag files specified') if (args.project and (args.bagfile is not None)): world.saveModels(args.project) if args.fake: world.addObjects(fakeTaskArgs()) filled_args = task.compile(fakeTaskArgs()) if args.verbose: print(task.nodeSummary()) print(task.children['ROOT()']) if args.show: from costar_task_plan.tools import showTask showTask(task) if args.debug: q1 = [(- 0.), (- 1.), 1., (- 2.0823833), 2., 1.] q2 = [0., (- 1.), (- 1.), (- 2.0823833), 2., 1.] r_js_pub = rospy.Publisher('/right_arm_joint_states', JointState, queue_size=1) l_js_pub = rospy.Publisher('/left_arm_joint_states', JointState, queue_size=1) r_msg = JointState(position=q1, name=['r_shoulder_pan_joint', 'r_shoulder_lift_joint', 'r_elbow_joint', 'r_wrist_1_joint', 'r_wrist_2_joint', 'r_wrist_3_joint']) l_msg = JointState(position=q2, name=['l_shoulder_pan_joint', 'l_shoulder_lift_joint', 'l_elbow_joint', 'l_wrist_1_joint', 'l_wrist_2_joint', 'l_wrist_3_joint']) try: rate = rospy.Rate(30) rospy.sleep(0.1) r_js_pub.publish(r_msg) l_js_pub.publish(l_msg) while (not rospy.is_shutdown()): world.update() world.debugLfD(verbose=args.verbose) rate.sleep() except rospy.ROSInterruptException as e: return if args.plan: world.update() rospy.sleep(0.1) world.update() if (not args.fake): raise RuntimeError('currently only fake scene is supported') path = do_search(world, task, max_depth=args.max_depth, iter=args.iter) plan = PlanExecutionManager(path, OpenLoopTomExecute(world, 0)) if args.execute: if (not args.plan): raise RuntimeError('cannot execute without a corresponding plan, did you forget to add the --plan flag?') if args.fake: raise RuntimeError('executing with a fake scene is dangerous') try: rate = rospy.Rate(10) while (not rospy.is_shutdown()): plan.step(world) except rospy.ROSInterruptException as e: return
def plot_preds_of_code_id(code_id): plt.figure() cnx = ut.create_connection() codes = pd.read_sql('SELECT code_token FROM functional_unit_augmentation WHERE code_id={}'.format(code_id), cnx) t = [graph(get_data_item(codes.iloc[i].code_token)).item() for i in range(len(codes))] plt.title('PREDICTION: functional Unit Augmentation: code_id={}'.format(code_id)) plt.xlabel('Repetitions') plt.ylabel('Predicted execution time') plt.scatter(range(len(t)), t)