code
stringlengths
101
5.91M
class Critic(nn.Module): def __init__(self, encoder_src, encoder_tgt, dropout): super(Critic, self).__init__() self.encoder_src = encoder_src self.encoder_tgt = encoder_tgt self.dropout = nn.Dropout(dropout) self.linear_out = nn.Linear((encoder_src.hidden_size * 2), (encoder_tgt.hidden_size * 2)) self.log_softmax = nn.LogSoftmax(dim=(- 1)) def forward(self, src_inputs_emb, src_lengths, *args): (_, src) = self.encoder_src(src_inputs_emb, src_lengths) assert ((len(args) % 2) == 0) ret = [] src = torch.squeeze(src[(- 1)], 0) src = self.linear_out(self.dropout(src)) for (input_emb, lengths) in zip(args[::2], args[1::2]): (_, tgti) = self.encoder_tgt(input_emb, lengths) tgti = torch.squeeze(tgti[(- 1)], 0) tgti = self.dropout(tgti) score = torch.sum((src * tgti), 1) ret.append(score) cat = torch.stack(ret, 1) logp = self.log_softmax(cat) (_, max_i) = torch.max(logp, 1) print('-', torch.mean(torch.eq(max_i, 0).float()).data[0]) print('-', torch.mean(torch.eq(max_i, 1).float()).data[0]) print('-', torch.mean(torch.eq(max_i, 2).float()).data[0]) (x, y, z) = torch.split(logp, 1, (- 1)) return (torch.squeeze(x, 1), torch.squeeze(y, 1), torch.squeeze(z, 1)) def save_checkpoint(self, epoch, opt, filename): torch.save({'encoder_src_dict': self.encoder_src.state_dict(), 'encoder_tgt_dict': self.encoder_tgt.state_dict(), 'linear_out_dict': self.linear_out.state_dict(), 'opt': opt, 'epoch': epoch}, filename) def load_checkpoint(self, filename): ckpt = torch.load(filename) self.encoder_src.load_state_dict(ckpt['encoder_src_dict']) self.encoder_tgt.load_state_dict(ckpt['encoder_tgt_dict']) self.linear_out.load_state_dict(ckpt['linear_out_dict']) epoch = ckpt['epoch'] return epoch
def main(): print('create env') env = gym.make('InvertedPendulumBulletEnv-v0') env.render(mode='human') pi = SmallReactivePolicy(env.observation_space, env.action_space) while 1: frame = 0 score = 0 restart_delay = 0 obs = env.reset() print('frame') while 1: time.sleep((1.0 / 60.0)) a = pi.act(obs) (obs, r, done, _) = env.step(a) score += r frame += 1 still_open = env.render('human') if (still_open == False): return if (not done): continue if (restart_delay == 0): print(('score=%0.2f in %i frames' % (score, frame))) restart_delay = (60 * 2) else: restart_delay -= 1 if (restart_delay == 0): break
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn=None): layers = (layers or LayerFn()) assert (stem_type in ('', 'quad', 'tiered', 'deep', 'rep', '7x7', '3x3')) if ('quad' in stem_type): num_act = (2 if ('quad2' in stem_type) else None) stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) elif ('tiered' in stem_type): stem = Stem(in_chs, (((3 * out_chs) // 8), (out_chs // 2), out_chs), pool=pool_type, layers=layers) elif ('deep' in stem_type): stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) elif ('rep' in stem_type): stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) elif ('7x7' in stem_type): if pool_type: stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) elif pool_type: stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) if isinstance(stem, Stem): feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] else: feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] return (stem, feature_info)
class CUHK03(object): def __init__(self, root): self.images_dir = os.path.join(root, 'cuhk03-np/detected') self.train_path = 'bounding_box_train' self.gallery_path = 'bounding_box_test' self.query_path = 'query' (self.train, self.query, self.gallery) = ([], [], []) (self.num_train_ids, self.num_query_ids, self.num_gallery_ids) = (0, 0, 0) self.load() def preprocess(self, path, relabel=True): pattern = re.compile('([-\\d]+)_c(\\d)') all_pids = {} ret = [] fpaths = sorted(glob(os.path.join(self.images_dir, path, '*.png'))) for fpath in fpaths: fname = os.path.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if (pid == (- 1)): continue if relabel: if (pid not in all_pids): all_pids[pid] = len(all_pids) elif (pid not in all_pids): all_pids[pid] = pid pid = all_pids[pid] cam -= 1 ret.append((os.path.join(path, fname), pid, cam)) return (ret, int(len(all_pids))) def load(self): (self.train, self.num_train_pids) = self.preprocess(self.train_path) (self.gallery, self.num_gallery_pids) = self.preprocess(self.gallery_path, False) (self.query, self.num_query_pids) = self.preprocess(self.query_path, False) print(self.__class__.__name__, 'dataset loaded') print(' subset | # ids | # images') print(' ') print(' train | {:5d} | {:8d}'.format(self.num_train_pids, len(self.train))) print(' query | {:5d} | {:8d}'.format(self.num_query_pids, len(self.query))) print(' gallery | {:5d} | {:8d}'.format(self.num_gallery_pids, len(self.gallery)))
def dgStrat(s: _DGStratType) -> DGStrat: if isinstance(s, DGStrat): return s elif isinstance(s, Rule): return DGStrat.makeRule(s) elif isinstance(s, _DGStrat_sequenceProxy): return DGStrat.makeSequence(s.strats) elif isinstance(s, collections.abc.Iterable): l = [dgStrat(a) for a in s] return DGStrat.makeParallel(l) else: raise TypeError((("Can not convert type '" + str(type(s))) + "' to DGStrat."))
def verify_view_layer(view_layer_name: str='View Layer') -> bpy.types.ViewLayer: scene = zpy.blender.verify_blender_scene() view_layer = scene.view_layers.get(view_layer_name, None) if (view_layer is None): log.debug(f'Could not find view layer {view_layer_name}') view_layer = scene.view_layers[0] log.debug(f'Setting view layer to {view_layer.name}') bpy.context.window.view_layer = view_layer return view_layer
def to_tensor(batch: Any) -> Any: if isinstance(batch, dict): return {k: to_tensor(v) for (k, v) in batch.items()} elif isinstance(batch, (list, tuple)): return [to_tensor(v) for v in batch] elif isinstance(batch, np.ndarray): if (batch.dtype == np.float64): batch = batch.astype(np.float32) return torch.from_numpy(batch) elif isinstance(batch, (int, float, type(None))): return batch else: raise ValueError('Unsupported type passed to `to_tensor`')
class DatumIoTest(tf.test.TestCase): def Conversion2dTestWithType(self, dtype): original_data = np.arange(9).reshape(3, 3).astype(dtype) serialized = datum_io.SerializeToString(original_data) retrieved_data = datum_io.ParseFromString(serialized) self.assertTrue(np.array_equal(original_data, retrieved_data)) def Conversion3dTestWithType(self, dtype): original_data = np.arange(24).reshape(2, 3, 4).astype(dtype) serialized = datum_io.SerializeToString(original_data) retrieved_data = datum_io.ParseFromString(serialized) self.assertTrue(np.array_equal(original_data, retrieved_data)) def testConversion2dWithType(self): self.Conversion2dTestWithType(np.int8) self.Conversion2dTestWithType(np.int16) self.Conversion2dTestWithType(np.int32) self.Conversion2dTestWithType(np.int64) self.Conversion2dTestWithType(np.float16) self.Conversion2dTestWithType(np.float32) self.Conversion2dTestWithType(np.float64) def testConversion3dWithType(self): self.Conversion3dTestWithType(np.int8) self.Conversion3dTestWithType(np.int16) self.Conversion3dTestWithType(np.int32) self.Conversion3dTestWithType(np.int64) self.Conversion3dTestWithType(np.float16) self.Conversion3dTestWithType(np.float32) self.Conversion3dTestWithType(np.float64) def testWriteAndReadToFile(self): data = np.array([[[(- 1.0), 125.0, (- 2.5)], [14.5, 3.5, 0.0]], [[20.0, 0.0, 30.0], [25.5, 36.0, 42.0]]]) tmpdir = tf.test.get_temp_dir() filename = os.path.join(tmpdir, 'test.datum') datum_io.WriteToFile(data, filename) data_read = datum_io.ReadFromFile(filename) self.assertAllEqual(data_read, data)
class TensorboardLogger(MetricLogger): def __init__(self, log_dir, start_iter=0, delimiter='\t'): super(TensorboardLogger, self).__init__(delimiter) self.iteration = start_iter self.writer = self._get_tensorboard_writer(log_dir) def _get_tensorboard_writer(log_dir): try: from tensorboardX import SummaryWriter except ImportError: raise ImportError('To use tensorboard please install tensorboardX [ pip install tensorflow tensorboardX ].') if is_main_process(): timestamp = datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H:%M') tb_logger = SummaryWriter('{}-{}'.format(log_dir, timestamp)) return tb_logger else: return None def update(self, **kwargs): super(TensorboardLogger, self).update(**kwargs) if self.writer: for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.writer.add_scalar(k, v, self.iteration) self.iteration += 1
class DecoderConfig(FairseqDataclass): name: DECODER_CHOICES = field(default='viterbi', metadata={'help': 'The type of decoder to use'}) nbest: int = field(default=1, metadata={'help': 'Number of decodings to return'}) criterion: CRITERION_CHOICES = field(default='ctc', metadata={'help': 'Criterion to use'}) asgtransitions: List[int] = field(default=MISSING, metadata={'help': 'ASG transition indices'}) maxreplabel: int = field(default=2, metadata={'help': 'Maximum repeated labels for ASG criterion'}) unitlm: bool = field(default=False, metadata={'help': 'If set, use unit language model'}) lmpath: str = field(default=MISSING, metadata={'help': 'Language model for KenLM decoder'}) lexicon: Optional[str] = field(default=None, metadata={'help': 'Lexicon for Flashlight decoder'}) beam: int = field(default=50, metadata={'help': 'Number of beams to use for decoding'}) beamthreshold: float = field(default=15.0, metadata={'help': 'Threshold for beam search decoding'}) beamsizetoken: Optional[int] = field(default=None, metadata={'help': 'Beam size to use'}) wordscore: float = field(default=1.5, metadata={'help': 'Word score for KenLM decoder'}) unkweight: float = field(default=(- math.inf), metadata={'help': 'Unknown weight for KenLM decoder'}) silweight: float = field(default=(- 0.3), metadata={'help': 'Silence weight for KenLM decoder'}) lmweight: float = field(default=1.5, metadata={'help': 'Weight for LM while interpolating score'}) unitlm: bool = field(default=False, metadata={'help': 'If using a unit language model'})
class Evaluator(): def __init__(self, model, mode, loss_function, dataloader, device, output_dir, multi_gpu=False): self.model = model self.mode = mode self.output_dir = output_dir self.logs_dir = os.path.join(output_dir, 'logs') self.loss_function = loss_function self.dataloader = dataloader self.device = device self.multi_gpu = multi_gpu def run(self): (acc, loss) = test(model=self.model, loss_function=self.loss_function, test_dataloader=self.dataloader, device=self.device) return (acc.cpu().numpy(), loss)
def _merge_pred_true_cc(df_pred, df_true): logging.debug(f"evaluate_submission_cc {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") pred_columns = ['u', 'v', 'test_idx', 'logit_green', 'logit_yellow', 'logit_red'] for k in pred_columns: assert (k in df_pred.columns), (k, df_pred.columns) df_pred = df_pred[pred_columns] true_columns = ['u', 'v', 'test_idx', 'cc'] for k in true_columns: assert (k in df_true.columns), (k, df_true.columns) df_true = df_true[true_columns] assert (df_true['cc'].min() >= 0) assert (df_true['cc'].max() <= 3) logging.debug(f"start merge {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") df_merged = df_pred.merge(df_true, left_on=['u', 'v', 'test_idx'], right_on=['u', 'v', 'test_idx'], suffixes=['_pred', '']) assert (len(df_merged) == len(df_true)), (len(df_merged), len(df_true)) logging.debug(f"end merge {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") return df_merged
def test_bisenetv1_attention_refinement_module(): x_arm = AttentionRefinementModule(32, 8) assert (x_arm.conv_layer.in_channels == 32) assert (x_arm.conv_layer.out_channels == 8) assert (x_arm.conv_layer.kernel_size == (3, 3)) x = torch.randn(2, 32, 8, 16) x_out = x_arm(x) assert (x_out.shape == torch.Size([2, 8, 8, 16]))
def divide_feature(in_feature, out_features, min_unit=8): assert ((in_feature % min_unit) == 0), f'in_feature ({in_feature}) must be divisible by min_unit ({min_unit})' units = (in_feature // min_unit) indices = np.argsort(out_features) out_features_sorted = np.array(out_features)[indices] out_feat_groups = [(k, indices[list(g)]) for (k, g) in groupby(range(len(indices)), (lambda i: out_features_sorted[i]))] out_feat_groups.sort(key=(lambda x: (x[0] * len(x[1]))), reverse=True) units_feat_ratio = (float(units) / sum(out_features)) out_group_units = [len(out_feat_group[1]) for out_feat_group in out_feat_groups] remaining_units = (units - sum(out_group_units)) for (i, out_feat_group) in enumerate(out_feat_groups): if (i < (len(out_feat_groups) - 1)): n = len(out_feat_group[1]) curr_out_feat_size = (out_feat_group[0] * n) curr_units = max((curr_out_feat_size * units_feat_ratio), n) curr_units = (((curr_units // n) * n) - n) curr_units = min(curr_units, remaining_units) out_group_units[i] += curr_units remaining_units -= curr_units if (remaining_units == 0): break elif (len(out_feat_groups) == 1): out_group_units[(- 1)] += remaining_units else: out_group_units.append(remaining_units) divided_in_features = np.zeros(len(out_features), dtype=int) for (i, out_feat_group) in enumerate(out_feat_groups): for j in range(len(out_feat_group[1])): divided_in_features[out_feat_group[1][j]] = ((out_group_units[i] // len(out_feat_group[1])) * min_unit) return divided_in_features
class StandardScaler(): def __init__(self, means: np.ndarray=None, stds: np.ndarray=None, replace_nan_token: Any=None): self.means = means self.stds = stds self.replace_nan_token = replace_nan_token def fit(self, X: List[List[Optional[float]]]) -> 'StandardScaler': X = np.array(X).astype(float) X[np.where(np.isinf(X))] = np.nan self.means = np.nanmean(X, axis=0) self.stds = np.nanstd(X, axis=0) self.means = np.where(np.isnan(self.means), np.zeros(self.means.shape), self.means) self.stds = np.where(np.isnan(self.stds), np.ones(self.stds.shape), self.stds) self.stds = np.where((self.stds == 0), np.ones(self.stds.shape), self.stds) return self def transform(self, X: List[List[Optional[float]]]) -> np.ndarray: X = np.array(X).astype(float) X[np.where(np.isinf(X))] = np.nan transformed_with_nan = ((X - self.means) / self.stds) transformed_with_none = np.where(np.isnan(transformed_with_nan), self.replace_nan_token, transformed_with_nan) return transformed_with_none def inverse_transform(self, X: List[List[Optional[float]]]) -> np.ndarray: X = np.array(X).astype(float) transformed_with_nan = ((X * self.stds) + self.means) transformed_with_none = np.where(np.isnan(transformed_with_nan), self.replace_nan_token, transformed_with_nan) return transformed_with_none
class Normal(Distribution): def __init__(self, mean=None, log_var=None): super(Normal, self).__init__() self.mean_reset_value = Parameter(dt.zeros(1)) self.log_var_reset_value = Parameter(dt.zeros(1)) self.mean = mean self.log_var = log_var self._sample = None def sample(self, n_samples=1, resample=False): if ((self._sample is None) or resample): mean = self.mean std = self.log_var.mul(0.5).exp_() if (len(self.mean.size()) == 2): mean = mean.unsqueeze(1).repeat(1, n_samples, 1) std = std.unsqueeze(1).repeat(1, n_samples, 1) elif (len(self.mean.size()) == 4): mean = mean.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) std = std.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) rand_normal = Variable(mean.data.new(mean.size()).normal_()) self._sample = rand_normal.mul_(std).add_(mean) return self._sample def log_prob(self, value): if (type(value) == tuple): return torch.log(((self.cdf(value[1]) - self.cdf(value[0])) + 1e-06)) else: if (value is None): value = self.sample() assert ((self.mean is not None) and (self.log_var is not None)), 'Mean or log variance are None.' n_samples = value.data.shape[1] mean = self.mean log_var = self.log_var if (len(mean.size()) == 2): mean = mean.unsqueeze(1).repeat(1, n_samples, 1) log_var = log_var.unsqueeze(1).repeat(1, n_samples, 1) elif (len(mean.size()) == 4): mean = mean.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) log_var = log_var.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) return log_var.add(math.log((2 * math.pi))).add_(value.sub(mean).pow_(2).div_(log_var.exp().add(1e-05))).mul_((- 0.5)) def cdf(self, value): n_samples = value.data.shape[1] mean = self.mean std = self.log_var.mul(0.5).exp_() if (len(mean.size()) == 2): mean = mean.unsqueeze(1).repeat(1, n_samples, 1) std = std.unsqueeze(1).repeat(1, n_samples, 1) elif (len(mean.size()) == 4): mean = mean.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) std = std.unsqueeze(1).repeat(1, n_samples, 1, 1, 1) return (1 + torch.erf(((value - mean) / (math.sqrt(2) * std).add(1e-05)))).mul_(0.5) def re_init(self, mean_value=None, log_var_value=None): self.re_init_mean(mean_value) self.re_init_log_var(log_var_value) def re_init_mean(self, value): mean = (value if (value is not None) else self.mean_reset_value.data.unsqueeze(1)) self.mean = Variable(mean, requires_grad=True) self._sample = None def re_init_log_var(self, value): log_var = (value if (value is not None) else self.log_var_reset_value.data.unsqueeze(1)) self.log_var = Variable(log_var, requires_grad=True) self._sample = None
class VarSkipFastLSTM(VarSkipRNNBase): def __init__(self, *args, **kwargs): super(VarSkipFastLSTM, self).__init__(SkipConnectFastLSTMCell, *args, **kwargs) self.lstm = True
def train(task, loader, model, optimizer, loss_name, dataset_name): model.train() def get_extra_param(): return None perf = PerfTrackTrain(task, extra_param=get_extra_param()) time_forward = 0 time_backward = 0 time_data_loading = 0 time3 = time() for (i, data_batch) in enumerate(loader): time1 = time() inp = get_inp(task, model, data_batch, loader.dataset.batch_proc, dataset_name) out = model(**inp) loss = get_loss(task, loss_name, data_batch, out, dataset_name) perf.update_all(data_batch=data_batch, out=out, loss=loss) time2 = time() if loss.ne(loss).any(): print('WARNING: avoiding step as nan in the loss') else: optimizer.zero_grad() loss.backward() bad_grad = False for x in model.parameters(): if (x.grad is not None): if x.grad.ne(x.grad).any(): print('WARNING: nan in a gradient') bad_grad = True break if ((x.grad == float('inf')) | (x.grad == float('-inf'))).any(): print('WARNING: inf in a gradient') bad_grad = True break if bad_grad: print('WARNING: avoiding step as bad gradient') else: optimizer.step() time_data_loading += (time1 - time3) time_forward += (time2 - time1) time3 = time() time_backward += (time3 - time2) if ((i % 50) == 0): print(f'[{i}/{len(loader)}] avg_loss: {perf.agg_loss()}, FW time = {round(time_forward, 2)}, BW time = {round(time_backward, 2)}, DL time = {round(time_data_loading, 2)}') return (perf.agg(), perf.agg_loss())
_processor('m4c_answer') class M4CAnswerProcessor(BaseProcessor): def __init__(self, config, *args, **kwargs): super().__init__(config, *args, **kwargs) self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs) self.PAD_IDX = self.answer_vocab.word2idx('<pad>') self.BOS_IDX = self.answer_vocab.word2idx('<s>') self.EOS_IDX = self.answer_vocab.word2idx('</s>') self.UNK_IDX = self.answer_vocab.UNK_INDEX assert (self.PAD_IDX != self.answer_vocab.UNK_INDEX) assert (self.BOS_IDX != self.answer_vocab.UNK_INDEX) assert (self.EOS_IDX != self.answer_vocab.UNK_INDEX) assert (self.PAD_IDX == 0) self.answer_preprocessor = Processor(config.preprocessor) assert (self.answer_preprocessor is not None) self.num_answers = config.num_answers self.max_length = config.max_length self.max_copy_steps = config.max_copy_steps assert (self.max_copy_steps >= 1) def get_anls(self, s1, s2): s1 = s1.lower().strip() s2 = s2.lower().strip() iou = (1 - (editdistance.eval(s1, s2) / max(len(s1), len(s2)))) anls = (iou if (iou >= 0.5) else 0.0) return anls def match_answer_to_vocab_ocr_seq(self, answer, vocab2idx_dict, ocr_tokens, ocr_num, ocr2inds_dict, max_match_num=20): num_vocab = len(vocab2idx_dict) answer_words = answer.split() answer_word_matches = [] for word in answer_words: matched_inds = [] if (word in vocab2idx_dict): matched_inds.append(vocab2idx_dict.get(word)) matched_inds.extend([(num_vocab + idx) for idx in ocr2inds_dict[word]]) if ((len(matched_inds) == 0) and (ocr_num != 0)): ocr_scores = torch.zeros(ocr_num, dtype=torch.float) for i in range(ocr_num): ocr_scores[i] = self.get_anls(word, ocr_tokens[i]) most_similar_id = torch.argmax(ocr_scores).item() most_similar_score = self.get_anls(word, ocr_tokens[most_similar_id]) if (most_similar_score >= 0.5): matched_inds.append((num_vocab + most_similar_id)) else: return [] if ((len(matched_inds) == 0) and (ocr_num == 0)): return [] answer_word_matches.append(matched_inds) if (len(answer_word_matches) == 0): return [] idx_seq_list = [()] for matched_inds in answer_word_matches: idx_seq_list = [(seq + (idx,)) for seq in idx_seq_list for idx in matched_inds] if (len(idx_seq_list) > max_match_num): idx_seq_list = idx_seq_list[:max_match_num] return idx_seq_list def get_vocab_size(self): answer_vocab_nums = self.answer_vocab.num_vocab answer_vocab_nums += self.max_length return answer_vocab_nums def get_true_vocab_size(self): return self.answer_vocab.num_vocab def __call__(self, item): answers = item['answers'] answers = [self.answer_preprocessor({'text': a})['text'] for a in answers] assert (len(answers) == self.num_answers) gt_answers = list(enumerate(answers)) unique_answers = sorted(set(answers)) unique_answer_scores = ([0] * len(unique_answers)) for (idx, unique_answer) in enumerate(unique_answers): accs = [] for gt_answer in gt_answers: other_answers = [item for item in gt_answers if (item != gt_answer)] matching_answers = [item for item in other_answers if (item[1] == unique_answer)] acc = min(1, (float(len(matching_answers)) / 3)) accs.append(acc) unique_answer_scores[idx] = (sum(accs) / len(accs)) unique_answer2score = {a: s for (a, s) in zip(unique_answers, unique_answer_scores)} scores = torch.zeros(self.max_copy_steps, self.get_vocab_size(), dtype=torch.float) ocr2inds_dict = defaultdict(list) for (idx, token) in enumerate(item['context_tokens']): ocr2inds_dict[token].append(idx) answer_dec_inds = [self.match_answer_to_vocab_ocr_seq(a, self.answer_vocab.word2idx_dict, item['context_tokens'], item['context_length'], ocr2inds_dict) for a in answers] all_idx_seq_list = [] for (answer, idx_seq_list) in zip(answers, answer_dec_inds): all_idx_seq_list.extend(idx_seq_list) score = unique_answer2score[answer] for idx_seq in idx_seq_list: score_idx = idx_seq[0] scores[(0, score_idx)] = max(scores[(0, score_idx)], score) train_prev_inds = torch.zeros(self.max_copy_steps, dtype=torch.long) train_loss_mask = torch.zeros(self.max_copy_steps, dtype=torch.float) if (len(all_idx_seq_list) > 0): idx_seq = all_idx_seq_list[np.random.choice(len(all_idx_seq_list))] dec_step_num = min((1 + len(idx_seq)), self.max_copy_steps) train_loss_mask[:dec_step_num] = 1.0 train_prev_inds[0] = self.BOS_IDX for t in range(1, dec_step_num): train_prev_inds[t] = idx_seq[(t - 1)] score_idx = (idx_seq[t] if (t < len(idx_seq)) else self.EOS_IDX) scores[(t, score_idx)] = 1.0 else: idx_seq = () answer_info = {'answers': answers, 'answers_scores': scores, 'sampled_idx_seq': idx_seq, 'train_prev_inds': train_prev_inds, 'train_loss_mask': train_loss_mask} return answer_info
class TestDevice(unittest.TestCase): def test_cpu(self): torch.cuda.is_available = MagicMock(return_value=False) from intel_extension_for_transformers.utils.device_utils import is_hpu_available, get_device_type device = get_device_type() self.assertTrue(('cpu' in device)) def test_gpu(self): torch.cuda.is_available = MagicMock(return_value=True) from intel_extension_for_transformers.utils.device_utils import is_hpu_available, get_device_type device = get_device_type() self.assertTrue(('cuda' in device))
def experiment_string(configs, fname_args=[], separator=','): this_run_str = expr_prefix_str(configs) for str_arg in fname_args: if (str_arg in configs.keys()): this_run_str += (((separator + str_arg.title().replace('_', '')) + '=') + str(configs[str_arg])) else: raise ValueError(('%s in fname_args does not exist in configs' % str_arg)) this_run_str = this_run_str.replace('/', '_') return this_run_str
def aug_rcount(record_path: str) -> int: rcount = 0 for filename in os.listdir(record_path): with open(os.path.join(record_path, filename), 'rb') as f: info_dict = pickle.load(f) rcount += (len(info_dict['success']) + len(info_dict['fail'])) return rcount
def convert(bit_string, n_phases=3): assert ((bit_string.shape[0] % n_phases) == 0) phase_length = (bit_string.shape[0] // n_phases) genome = [] for i in range(0, bit_string.shape[0], phase_length): genome.append(bit_string[i:(i + phase_length)].tolist()) return genome
class OwlViTPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def _create_episode(episode_id, scene_id, start_position, start_rotation, target_position, shortest_paths=None, radius=None, info=None) -> Optional[NavigationEpisode]: goals = [NavigationGoal(position=target_position, radius=radius)] return NavigationEpisode(episode_id=str(episode_id), goals=goals, scene_id=scene_id, start_position=start_position, start_rotation=start_rotation, shortest_paths=shortest_paths, info=info)
class RandomResizedCropVideo(RandomResizedCrop): def __init__(self, size, scale=(0.08, 1.0), ratio=((3.0 / 4.0), (4.0 / 3.0)), interpolation_mode='bilinear'): if isinstance(size, tuple): assert (len(size) == 2), 'size should be tuple (height, width)' self.size = size else: self.size = (size, size) self.interpolation_mode = interpolation_mode self.scale = scale self.ratio = ratio def __call__(self, clip): (i, j, h, w) = self.get_params(clip, self.scale, self.ratio) return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self): return (self.__class__.__name__ + '(size={0}, interpolation_mode={1}, scale={2}, ratio={3})'.format(self.size, self.interpolation_mode, self.scale, self.ratio))
def parsePos(pos, tagsDict): phraseDict = {} for t in nltk_tags: phraseDict[t] = [] for (word, tag) in pos: word = word.lower() if (word not in tagsDict[tag]): tagsDict[tag][word] = 0 tagsDict[tag][word] += 1 phraseDict[tag].append(word) return (tagsDict, phraseDict)
def build_dataset_with_tgt_pinyinid(data_path, vocab_path, max_len): data_raw = [] with open(data_path, encoding='utf8') as f: data_raw = [s.split('\t') for s in f.read().splitlines()] print(f'#Item: {len(data_raw)} from "{data_path}"') tokenizer = BertWordPieceTokenizer(vocab_path, lowercase=True) data = [] for item_raw in tqdm(data_raw, desc='Build Dataset'): item = {'id': item_raw[0], 'src': item_raw[1], 'tgt': item_raw[2]} assert (len(item['src']) == len(item['tgt'])) data.append(item) encoded = tokenizer.encode(item['src']) tokens = encoded.tokens[1:(- 1)] tokens_size = [] for t in tokens: if (t == '[UNK]'): tokens_size.append(1) elif t.startswith('##'): tokens_size.append((len(t) - 2)) else: tokens_size.append(len(t)) item['tokens_size'] = tokens_size item['input_ids'] = encoded.ids item['pinyin_ids'] = token2pinyin.convert_sentence_to_pinyin_ids(item['src'], encoded) encoded = tokenizer.encode(item['tgt']) item['label'] = encoded.ids item['tgt_pinyin_ids'] = token2pinyin.convert_sentence_to_pinyin_ids(item['tgt'], encoded) item['pinyin_label'] = token2pinyin.convert_sentence_to_shengmu_yunmu_shengdiao_ids(item['tgt'], encoded) assert (len(item['input_ids']) == len(item['label'])) if (max_len > 0): n_all_items = len(data) data = [item for item in data if (len(item['input_ids']) <= max_len)] n_filter_items = len(data) n_cut = (n_all_items - n_filter_items) print(f'max_len={max_len}, {n_all_items} -> {n_filter_items} ({n_cut})') return data
def get_data_for_model(train_dir, labels, test_dir=None, nn_model=None, as_generator=False, batch_size=BATCH_SIZE, word2vec_model=None, scaler=None): kwargs = dict(label_indices={lab: i for (i, lab) in enumerate(labels)}, word2vec_model=word2vec_model, scaler=scaler, nn_model=nn_model) if as_generator: filename_it = FilenameIterator(train_dir, batch_size) train_data = iterate_over_batches(filename_it, **kwargs) else: train_files = {filename[:(- 4)] for filename in os.listdir(train_dir)} train_data = build_x_and_y(train_files, train_dir, **kwargs) test_data = None if test_dir: test_files = {filename[:(- 4)] for filename in os.listdir(test_dir)} test_data = build_x_and_y(test_files, test_dir, **kwargs) return (train_data, test_data)
class bertf1cProcessor(DataProcessor): def __init__(self): random.seed(42) self.D = [[], [], []] for sid in range(1, 3): with open(('data/' + ['dev.json', 'test.json'][(sid - 1)]), 'r', encoding='utf8') as f: data = json.load(f) for i in range(len(data)): for j in range(len(data[i][1])): rid = [] for k in range(36): if ((k + 1) in data[i][1][j]['rid']): rid += [1] else: rid += [0] for l in range(1, (len(data[i][0]) + 1)): d = ['\n'.join(data[i][0][:l]).lower(), data[i][1][j]['x'].lower(), data[i][1][j]['y'].lower(), rid] self.D[sid] += [d] logger.info(((((str(len(self.D[0])) + ',') + str(len(self.D[1]))) + ',') + str(len(self.D[2])))) def get_train_examples(self, data_dir): return self._create_examples(self.D[0], 'train') def get_test_examples(self, data_dir): return self._create_examples(self.D[2], 'test') def get_dev_examples(self, data_dir): return self._create_examples(self.D[1], 'dev') def get_labels(self): return [str(x) for x in range(2)] def _create_examples(self, data, set_type): examples = [] for (i, d) in enumerate(data): guid = ('%s-%s' % (set_type, i)) text_a = tokenization.convert_to_unicode(data[i][0]) text_b = tokenization.convert_to_unicode(data[i][1]) text_c = tokenization.convert_to_unicode(data[i][2]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=data[i][3], text_c=text_c)) return examples
class Parser(): def __init__(self): pass def _filename(self, index, basename=False, absolute=False): pass def filename(self, index, basename=False, absolute=False): return self._filename(index, basename=basename, absolute=absolute) def filenames(self, basename=False, absolute=False): return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))]
def load_device_results(results_dir): frames = [] for filename in os.listdir(results_dir): if filename.endswith('.device.json'): experiment = filename.rstrip('.device.json') p = os.path.join(results_dir, filename) with open(p, 'r') as f: contents = f.read() contents = contents.replace("'", '"') d = json.loads(contents) d['experiment'] = experiment df = pandas.DataFrame([d]) frames.append(df) df = pandas.concat(frames) df.set_index('experiment', inplace=True) return df
def embed(nvar, topdim, pols, precision='d', verbose_level=0): if (precision == 'd'): return standard_embed(nvar, topdim, pols, verbose_level) elif (precision == 'dd'): return dobldobl_embed(nvar, topdim, pols, verbose_level) elif (precision == 'qd'): return quaddobl_embed(nvar, topdim, pols, verbose_level) else: print('wrong argument for precision') return None
_torch _sentencepiece _tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments('..') self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments('./regression') trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_evaluation_with_keys_to_drop(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) eval_dataset = RepeatDataset(x) args = TrainingArguments('./test') trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset) result = trainer.predict(eval_dataset) self.assertTrue(isinstance(result.predictions, np.ndarray)) result = trainer.predict(eval_dataset, ignore_keys=[]) self.assertTrue(isinstance(result.predictions, tuple)) self.assertEqual(len(result.predictions), 2) def test_training_arguments_are_left_untouched(self): trainer = get_regression_trainer() trainer.train() args = TrainingArguments('./regression', report_to=[]) (dict1, dict2) = (args.to_dict(), trainer.args.to_dict()) for key in dict1.keys(): if (key != 'logging_dir'): self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): trainer = get_regression_trainer(learning_rate=0.1) train_output = trainer.train() self.assertEqual(train_output.global_step, ((self.n_epochs * 64) / self.batch_size)) trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(((1.5 * 64) / self.batch_size))) trainer = get_regression_trainer(learning_rate=0.1, max_steps=10) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) _torch_bf16_cpu _intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, no_cuda=True) train_output = trainer.train() self.assertEqual(train_output.global_step, ((self.n_epochs * 64) / trainer.args.train_batch_size)) trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, no_cuda=True) train_output = trainer.train() self.assertEqual(train_output.global_step, int(((1.5 * 64) / trainer.args.train_batch_size))) trainer = get_regression_trainer(learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, no_cuda=True) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments('./test', learning_rate=.0, logging_steps=5, logging_nan_inf_filter=False) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history args = TrainingArguments('./test', learning_rate=.0, logging_steps=5, logging_nan_inf_filter=True) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l['loss'] for l in log_history[:(- 1)]] return (any((math.isnan(x) for x in losses)) or any((math.isinf(x) for x in losses))) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): n_gpu = max(1, torch.cuda.device_count()) trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().batch_size, (16 * n_gpu)) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) self.assertEqual(trainer.get_eval_dataloader().batch_size, (16 * n_gpu)) trainer = get_regression_trainer(train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32) self.assertEqual(len(trainer.get_train_dataloader()), ((66 // (16 * n_gpu)) + 1)) self.assertEqual(len(trainer.get_eval_dataloader()), ((74 // (32 * n_gpu)) + 1)) trainer = get_regression_trainer(train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True) self.assertEqual(len(trainer.get_train_dataloader()), (66 // (16 * n_gpu))) self.assertEqual(len(trainer.get_eval_dataloader()), (74 // (32 * n_gpu))) new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), (128 // (32 * n_gpu))) def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) trainer = CustomDataloaderTrainer(model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset) trainer.train() trainer.evaluate() def test_sampler_seed(self): class DummyDataset(torch.utils.data.Dataset): def __init__(self, length: int=101): self.length = length def __len__(self): return self.length def __getitem__(self, i): if ((i < 0) or (i >= self.length)): raise IndexError return {'input_ids': [i]} class DummyModel(PreTrainedModel): def __init__(self, num_params: int): super().__init__(PretrainedConfig()) self.params = nn.Parameter(torch.randn(num_params)) def forward(self, input_ids, labels=None): if (labels is not None): return (torch.tensor(0.0, device=input_ids.device), input_ids) else: return input_ids def _get_first_data_sample(num_params, seed, data_seed, **kwargs): with tempfile.TemporaryDirectory() as tmpdir: trainer = Trainer(model_init=(lambda : DummyModel(num_params)), args=TrainingArguments(output_dir=tmpdir, **kwargs, seed=seed, data_seed=data_seed, local_rank=(- 1)), train_dataset=DummyDataset()) return next(iter(trainer.get_train_dataloader())) for group_by_length in [True, False]: sample42_1 = _get_first_data_sample(num_params=10, seed=42, data_seed=42, group_by_length=group_by_length) sample42_2 = _get_first_data_sample(num_params=11, seed=42, data_seed=42, group_by_length=group_by_length) self.assertTrue(torch.equal(sample42_1['input_ids'], sample42_2['input_ids'])) sample42_3 = _get_first_data_sample(num_params=11, seed=11, data_seed=42, group_by_length=group_by_length) self.assertTrue(torch.equal(sample42_1['input_ids'], sample42_3['input_ids'])) others = [_get_first_data_sample(num_params=i, seed=42, data_seed=i, group_by_length=group_by_length) for i in range(10)] self.assertTrue(any(((not torch.equal(sample42_1['input_ids'], sample['input_ids'])) for sample in others))) _torch_multi_gpu def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() model.is_parallelizable = True model.model_parallel = True args = TrainingArguments('./regression', per_device_train_batch_size=16, per_device_eval_batch_size=16) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) self.assertEqual(trainer.get_train_dataloader().batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), (64 // 16)) self.assertEqual(trainer.get_eval_dataloader().batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), (64 // 16)) def test_evaluate(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=(lambda logits, labels: (logits + 1))) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()(((pred + 1), y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) def test_evaluate_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=(lambda logits, labels: (logits + 1)), jit_mode_eval=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()(((pred + 1), y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) _torch_bf16_cpu _intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, no_cuda=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, no_cuda=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=(lambda logits, labels: (logits + 1)), bf16=mix_bf16, no_cuda=True) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.x, trainer.eval_dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()(((pred + 1), y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) def test_predict(self): trainer = get_regression_trainer(a=1.5, b=2.5) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=['labels', 'labels_2']) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=['labels', 'labels_2'], jit_mode_eval=True) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) _torch_bf16_cpu _intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, no_cuda=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, no_cuda=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, no_cuda=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=['labels', 'labels_2'], use_ipex=True, bf16=mix_bf16, no_cuda=True) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], ((1.5 * x) + 2.5))) self.assertTrue(np.allclose(preds[1], ((1.5 * x) + 2.5))) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) args = TrainingArguments('./regression') trainer = Trainer(model, args, eval_dataset=eval_dataset) _ = trainer.evaluate() preds = trainer.predict(eval_dataset) for (expected, seen) in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[:expected.shape[0]])) self.assertTrue(np.all((seen[expected.shape[0]:] == (- 100)))) for (expected, seen) in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(((2 * expected) + 1), seen[:expected.shape[0]])) self.assertTrue(np.all((seen[expected.shape[0]:] == (- 100)))) args = TrainingArguments('./regression', eval_accumulation_steps=2) trainer = Trainer(model, args, eval_dataset=eval_dataset) _ = trainer.evaluate() preds = trainer.predict(eval_dataset) for (expected, seen) in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[:expected.shape[0]])) self.assertTrue(np.all((seen[expected.shape[0]:] == (- 100)))) for (expected, seen) in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(((2 * expected) + 1), seen[:expected.shape[0]])) self.assertTrue(np.all((seen[expected.shape[0]:] == (- 100)))) def test_log_level(self): logger = logging.get_logger() log_info_string = 'Running training' is_info = (logging.get_verbosity() <= 20) with CaptureLogger(logger) as cl: trainer = get_regression_trainer() trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level='debug') trainer.train() self.assertIn(log_info_string, cl.out) with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level='error') trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(((self.n_epochs * 64) / self.batch_size))) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(((self.n_epochs * 64) / self.batch_size)), False) _torch_multi_gpu def test_run_seq2seq_double_train_wrap_once(self): trainer = get_regression_trainer() trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, 'should be not wrapped twice') _torch_up_to_2_gpus def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: kwargs = {'output_dir': tmpdir, 'train_len': 128, 'save_steps': 5, 'learning_rate': 0.1, 'logging_steps': 5} trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, 'checkpoint-5') trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) checkpoint = os.path.join(tmpdir, 'checkpoint-15') trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) with tempfile.TemporaryDirectory() as tmpdir: kwargs = {'output_dir': tmpdir, 'train_len': 128, 'save_steps': 5, 'learning_rate': 0.1, 'pretrained': False} trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, 'checkpoint-5') trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) checkpoint = os.path.join(tmpdir, 'checkpoint-15') trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) trainer = get_regression_trainer() with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f'{checkpoint}-bogus') self.assertTrue(("Can't find a valid checkpoint at" in str(context.exception))) output_dir2 = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=output_dir2) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue(('No valid checkpoint found in output directory' in str(context.exception))) def test_resume_training_with_randomness(self): random_torch = ((not torch.cuda.is_available()) or (torch.cuda.device_count() <= 1)) if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest('Test every step'): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, 'checkpoint-15')) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) self.assertAlmostEqual(a, a1, delta=1e-05) self.assertAlmostEqual(b, b1, delta=1e-05) with self.subTest('Test every epoch'): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy='epoch', learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith('checkpoint-')] self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=(lambda x: int(x.replace('checkpoint-', ''))))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) self.assertAlmostEqual(a, a1, delta=1e-05) self.assertAlmostEqual(b, b1, delta=1e-05) _accelerate _torch_non_multi_gpu def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'examples', 'pytorch', 'text-classification')) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f''' run_glue.py --model_name_or_path distilbert-base-uncased --task_name mrpc --do_train --do_eval --max_seq_len 128 --per_device_train_batch_size 4096 --learning_rate 2e-5 --num_train_epochs 1 --output_dir {tmpdir} --auto_find_batch_size 0 '''.split() with self.assertRaises(RuntimeError): with patch.object(sys, 'argv', testargs): run_glue.main() testargs[(- 1)] = '1' with patch.object(sys, 'argv', testargs): run_glue.main() def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) _torch_up_to_2_gpus def test_resume_training_with_shard_checkpoint(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, 'checkpoint-5') self.convert_to_sharded_checkpoint(checkpoint) trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) _torch_up_to_2_gpus def test_resume_training_with_gradient_accumulation(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, 'checkpoint-5') trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) _torch_up_to_2_gpus def test_resume_training_with_frozen_params(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = (trainer.model.a.item(), trainer.model.b.item()) state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, 'checkpoint-5') trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = (trainer.model.a.item(), trainer.model.b.item()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(((self.n_epochs * 64) / self.batch_size)) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy='steps', save_steps=5, load_best_model_at_end=True) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, 'eval_loss') with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy='steps', save_steps=5, load_best_model_at_end=True, metric_for_best_model='accuracy', compute_metrics=AlmostAccuracy()) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, 'eval_accuracy', greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, evaluation_strategy='epoch', save_strategy='epoch', load_best_model_at_end=True, metric_for_best_model='accuracy', compute_metrics=AlmostAccuracy()) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, (64 // self.batch_size), total) self.check_best_model_has_been_loaded(tmpdir, (64 // self.batch_size), total, trainer, 'eval_accuracy', greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy='steps', save_steps=5, load_best_model_at_end=True, pretrained=False) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, 'eval_loss', is_pretrained=False) def test_trainer_eval_mrpc(self): MODEL_ID = 'bert-base-cased-finetuned-mrpc' tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments(task_name='mrpc', data_dir=f'{get_tests_dir()}/fixtures/tests_samples/MRPC', overwrite_cache=True) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode='dev') training_args = TrainingArguments(output_dir='./examples', no_cuda=True) trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result['eval_loss'], 0.2) def test_trainer_eval_lm(self): MODEL_ID = 'distilroberta-base' tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset(tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) train_dataset = SampleIterableDataset(label_names=['labels', 'extra']) args = RegressionTrainingArguments(output_dir='./examples', max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_training_finite_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) batch_size = 1 num_samples = 10 available_steps = (num_samples // batch_size) data = FiniteIterableDataset(length=num_samples) train_args = TrainingArguments('..', max_steps=(available_steps + 1), per_device_train_batch_size=batch_size) trainer = Trainer(model, train_dataset=data, args=train_args) with self.assertLogs('transformers.trainer', level='WARNING') as logs: trainer.train() self.assertIn(f'stopping training at step {available_steps}!', logs.output[0]) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset(label_names=['labels', 'extra']) args = RegressionTrainingArguments(output_dir='./examples') trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() (x, y) = (trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) (x, y) = (eval_dataset.dataset.x, eval_dataset.dataset.ys[0]) pred = ((1.5 * x) + 2.5) expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results['eval_loss'], expected_loss) expected_acc = AlmostAccuracy()((pred, y))['accuracy'] self.assertAlmostEqual(results['eval_accuracy'], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() args = RegressionTrainingArguments(output_dir='./examples') trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) test_dataset = SampleIterableDataset(length=66, label_names=['labels', 'extra']) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, ((1.5 * x) + 2.5))) def test_num_train_epochs_in_training(self): trainer = get_regression_trainer(max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) def test_early_stopping_callback(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model='accuracy') trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, ((20 * 64) / 16)) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, evaluation_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model='accuracy') trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) def test_flos_extraction(self): trainer = get_regression_trainer(learning_rate=0.1) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, 'total_flos', 0), 0) assert_flos_extraction(trainer, trainer.model) assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f'{PREFIX_CHECKPOINT_DIR}-{n}'), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f'{PREFIX_CHECKPOINT_DIR}-*')] values = [int(re.match(f'.*{PREFIX_CHECKPOINT_DIR}-([0-9]+)', d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) trainer = get_regression_trainer(output_dir=tmp_dir, evaluation_strategy='steps', load_best_model_at_end=True, save_total_limit=2) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, 'checkpoint-5') self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) trainer = get_regression_trainer(output_dir=tmp_dir, evaluation_strategy='steps', load_best_model_at_end=True, save_total_limit=1) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, 'checkpoint-25') self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, 'checkpoint-5') self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func('init_mem_cpu_alloc_delta', metrics) check_func('train_mem_cpu_alloc_delta', metrics) if (torch.cuda.device_count() > 0): check_func('init_mem_gpu_alloc_delta', metrics) check_func('train_mem_gpu_alloc_delta', metrics) metrics = trainer.evaluate() check_func('eval_mem_cpu_alloc_delta', metrics) if (torch.cuda.device_count() > 0): check_func('eval_mem_gpu_alloc_delta', metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func('test_mem_cpu_alloc_delta', metrics) if (torch.cuda.device_count() > 0): check_func('test_mem_gpu_alloc_delta', metrics) def test_mem_metrics(self): trainer = get_regression_trainer(skip_memory_metrics=False) self.check_mem_metrics(trainer, self.assertIn) trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) _torch_gpu def test_fp16_full_eval(self): debug = 0 n_gpus = get_gpu_count() bs = 8 eval_len = (16 * n_gpus) a = (torch.ones(1000, bs) + 0.001) b = (torch.ones(1000, bs) - 0.001) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics['init_mem_gpu_alloc_delta'] fp32_eval = metrics['eval_mem_gpu_alloc_delta'] if debug: print(f'fp32_init {fp32_init}') print(f'fp32_eval {fp32_eval}') self.assertGreater(fp32_init, 59000) self.assertLess(fp32_eval, 5000) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() fp16_init = metrics['init_mem_gpu_alloc_delta'] fp16_eval = metrics['eval_mem_gpu_alloc_delta'] if debug: print(f'fp16_init {fp16_init}') print(f'fp16_eval {fp16_eval}') self.assertLess(fp16_init, 5000) self.assertGreater(fp16_eval, 27000) self.assertAlmostEqual(fp16_eval, (fp32_init / 2), delta=5000) _torch_non_multi_gpu _torchdynamo _torch_tensorrt_fx def test_torchdynamo_full_eval(self): import torchdynamo n_gpus = get_gpu_count() bs = 8 eval_len = (16 * n_gpus) a = (torch.ones(1000, bs) + 0.001) b = (torch.ones(1000, bs) - 0.001) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len) metrics = trainer.evaluate() original_eval_loss = metrics['eval_loss'] del trainer trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo='eager') metrics = trainer.evaluate() self.assertAlmostEqual(metrics['eval_loss'], original_eval_loss) del trainer torchdynamo.reset() trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo='nvfuser') metrics = trainer.evaluate() self.assertAlmostEqual(metrics['eval_loss'], original_eval_loss) torchdynamo.reset() trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo='fx2trt') metrics = trainer.evaluate() self.assertAlmostEqual(metrics['eval_loss'], original_eval_loss) torchdynamo.reset() ("torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.") _torch_non_multi_gpu _torchdynamo def test_torchdynamo_memory(self): import torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs['x'] output = model(x) if (self.args.n_gpu == 1): return output.mean() return output class MyModule(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() a = torch.ones(1024, 1024, device='cuda', requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) for _ in range(10): orig_loss = trainer.training_step(mod, {'x': a}) gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() orig_loss = trainer.training_step(mod, {'x': a}) orig_peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer a = torch.ones(1024, 1024, device='cuda', requires_grad=True) a.grad = None args = TrainingArguments(output_dir='None', torchdynamo='nvfuser') trainer = CustomTrainer(model=mod, args=args) for _ in range(10): loss = trainer.training_step(mod, {'x': a}) gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() loss = trainer.training_step(mod, {'x': a}) peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer self.assertAlmostEqual(loss, orig_loss) self.assertGreater(orig_peak_mem, (peak_mem * 2)) _torch_gpu _torch_bf16_gpu def test_bf16_full_eval(self): debug = 0 n_gpus = get_gpu_count() bs = 8 eval_len = (16 * n_gpus) a = (torch.ones(1000, bs) + 0.001) b = (torch.ones(1000, bs) - 0.001) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics['init_mem_gpu_alloc_delta'] fp32_eval = metrics['eval_mem_gpu_alloc_delta'] if debug: print(f'fp32_init {fp32_init}') print(f'fp32_eval {fp32_eval}') self.assertGreater(fp32_init, 59000) self.assertLess(fp32_eval, 5000) trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() bf16_init = metrics['init_mem_gpu_alloc_delta'] bf16_eval = metrics['eval_mem_gpu_alloc_delta'] if debug: print(f'bf16_init {bf16_init}') print(f'bf16_eval {bf16_eval}') self.assertLess(bf16_init, 5000) self.assertGreater(bf16_eval, 27000) self.assertAlmostEqual(bf16_eval, (fp32_init / 2), delta=5000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) trainer = Trainer(model=model) trainer.create_optimizer_and_scheduler(10) wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] wd_params = [p for (n, p) in model.named_parameters() if (n in wd_names)] no_wd_params = [p for (n, p) in model.named_parameters() if (n not in wd_names)] self.assertListEqual(trainer.optimizer.param_groups[0]['params'], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]['params'], no_wd_params)
class DistributedAutoEncoder(nn.Module): def __init__(self, num_filters=192, bound=0.11): super(DistributedAutoEncoder, self).__init__() self.conv1 = nn.Conv2d(3, num_filters, 5, stride=2, padding=2) self.gdn1 = gdn.GDN(num_filters) self.conv2 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn2 = gdn.GDN(num_filters) self.conv3 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn3 = gdn.GDN(num_filters) self.conv4 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.conv1_cor = nn.Conv2d(3, num_filters, 5, stride=2, padding=2) self.gdn1_cor = gdn.GDN(num_filters) self.conv2_cor = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn2_cor = gdn.GDN(num_filters) self.conv3_cor = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn3_cor = gdn.GDN(num_filters) self.conv4_cor = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.conv1_w = nn.Conv2d(3, num_filters, 5, stride=2, padding=2) self.gdn1_w = gdn.GDN(num_filters) self.conv2_w = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn2_w = gdn.GDN(num_filters) self.conv3_w = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.gdn3_w = gdn.GDN(num_filters) self.conv4_w = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2) self.entropy_bottleneck = entropy_model.EntropyBottleneck(num_filters, quantize=False) self.entropy_bottleneck_hx = entropy_model.EntropyBottleneck(num_filters) self.entropy_bottleneck_hy = entropy_model.EntropyBottleneck(num_filters, quantize=False) self.deconv1 = nn.ConvTranspose2d((2 * num_filters), num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn1 = gdn.GDN(num_filters, inverse=True) self.deconv2 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn2 = gdn.GDN(num_filters, inverse=True) self.deconv3 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn3 = gdn.GDN(num_filters, inverse=True) self.deconv4 = nn.ConvTranspose2d(num_filters, 3, 5, stride=2, padding=2, output_padding=1) self.deconv1_cor = nn.ConvTranspose2d((2 * num_filters), num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn1_cor = gdn.GDN(num_filters, inverse=True) self.deconv2_cor = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn2_cor = gdn.GDN(num_filters, inverse=True) self.deconv3_cor = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1) self.igdn3_cor = gdn.GDN(num_filters, inverse=True) self.deconv4_cor = nn.ConvTranspose2d(num_filters, 3, 5, stride=2, padding=2, output_padding=1) self.bound = bound def encode(self, x): x = self.conv1(x) x = self.gdn1(x) x = self.conv2(x) x = self.gdn2(x) x = self.conv3(x) x = self.gdn3(x) x = self.conv4(x) return x def encode_cor(self, x): x = self.conv1_cor(x) x = self.gdn1_cor(x) x = self.conv2_cor(x) x = self.gdn2_cor(x) x = self.conv3_cor(x) x = self.gdn3_cor(x) x = self.conv4_cor(x) return x def encode_w(self, x): x = self.conv1_w(x) x = self.gdn1_w(x) x = self.conv2_w(x) x = self.gdn2_w(x) x = self.conv3_w(x) x = self.gdn3_w(x) x = self.conv4_w(x) return x def decode(self, x, w): x = torch.cat((x, w), 1) x = self.deconv1(x) x = self.igdn1(x) x = self.deconv2(x) x = self.igdn2(x) x = self.deconv3(x) x = self.igdn3(x) x = self.deconv4(x) return x def decode_cor(self, x, w): x = torch.cat((x, w), 1) x = self.deconv1_cor(x) x = self.igdn1_cor(x) x = self.deconv2_cor(x) x = self.igdn2_cor(x) x = self.deconv3_cor(x) x = self.igdn3_cor(x) x = self.deconv4_cor(x) return x def forward(self, x, y): w = self.encode_w(y) if self.training: w = (w + (math.sqrt(0.001) * torch.randn_like(w))) hx = self.encode(x) hy = self.encode_cor(y) (hx_tilde, x_likelihoods) = self.entropy_bottleneck_hx(hx) (hy_tilde, y_likelihoods) = self.entropy_bottleneck_hy(hy) (_, w_likelihoods) = self.entropy_bottleneck(w) x_tilde = self.decode(hx_tilde, w) y_tilde = self.decode_cor(hy_tilde, w) return (x_tilde, y_tilde, x_likelihoods, y_likelihoods, w_likelihoods)
def data_load(filename, label): fl = np.loadtxt(filename) data = [] lab = [] (start, end) = (0, signal_size) while (end <= (fl.shape[0] / 5)): w = int(np.sqrt(signal_size)) x = fl[start:end] imgs = x.reshape(w, w) data.append(imgs) lab.append(label) start += signal_size end += signal_size return (data, lab)
def backward_chain(query, rules, facts, max_depth): return backward_chain_(query, 0, rules, facts, max_depth, set())
def masked_max(input, mask=None, dim=1): if (mask is None): (max_v, _) = torch.max(input, dim=dim) return max_v else: mask = mask.unsqueeze((- 1)) mask = mask.repeat(1, 1, input.size((- 1))) input = input.masked_fill((mask == 0.0), float('-inf')) (max_v, _) = torch.max(input, dim=dim) return max_v
class FieldAwareFactorizationMachineModel(torch.nn.Module): def __init__(self, field_dims, embed_dim): super().__init__() self.linear = FeaturesLinear(field_dims) self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim) def forward(self, x): ffm_term = torch.sum(torch.sum(self.ffm(x), dim=1), dim=1, keepdim=True) x = (self.linear(x) + ffm_term) return torch.sigmoid(x.squeeze(1))
def actionAngleAdiabatic_c(pot, gamma, R, vR, vT, z, vz): from ..orbit.integrateFullOrbit import _parse_pot from ..orbit.integratePlanarOrbit import _prep_tfuncs (npot, pot_type, pot_args, pot_tfuncs) = _parse_pot(pot, potforactions=True) pot_tfuncs = _prep_tfuncs(pot_tfuncs) jr = numpy.empty(len(R)) jz = numpy.empty(len(R)) err = ctypes.c_int(0) ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE') actionAngleAdiabatic_actionsFunc = _lib.actionAngleAdiabatic_actions actionAngleAdiabatic_actionsFunc.argtypes = [ctypes.c_int, ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_int, ndpointer(dtype=numpy.int32, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_void_p, ctypes.c_double, ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.POINTER(ctypes.c_int)] f_cont = [R.flags['F_CONTIGUOUS'], vR.flags['F_CONTIGUOUS'], vT.flags['F_CONTIGUOUS'], z.flags['F_CONTIGUOUS'], vz.flags['F_CONTIGUOUS']] R = numpy.require(R, dtype=numpy.float64, requirements=['C', 'W']) vR = numpy.require(vR, dtype=numpy.float64, requirements=['C', 'W']) vT = numpy.require(vT, dtype=numpy.float64, requirements=['C', 'W']) z = numpy.require(z, dtype=numpy.float64, requirements=['C', 'W']) vz = numpy.require(vz, dtype=numpy.float64, requirements=['C', 'W']) jr = numpy.require(jr, dtype=numpy.float64, requirements=['C', 'W']) jz = numpy.require(jz, dtype=numpy.float64, requirements=['C', 'W']) actionAngleAdiabatic_actionsFunc(len(R), R, vR, vT, z, vz, ctypes.c_int(npot), pot_type, pot_args, pot_tfuncs, ctypes.c_double(gamma), jr, jz, ctypes.byref(err)) if f_cont[0]: R = numpy.asfortranarray(R) if f_cont[1]: vR = numpy.asfortranarray(vR) if f_cont[2]: vT = numpy.asfortranarray(vT) if f_cont[3]: z = numpy.asfortranarray(z) if f_cont[4]: vz = numpy.asfortranarray(vz) return (jr, jz, err.value)
def readFloatsFile(filename): with open(filename) as f: array = [] for line in f: if (line.startswith('%') or line.startswith('#')): continue if (len(line.split()) == 0): continue array.append([float(x) for x in line.split()]) return array
def load_data_from_text_file(path_to_data): (data, spk_id, tags) = ([], [], []) with open(path_to_data, 'r') as utt2spk_id: for line in utt2spk_id: sample = clean_input_utterance(' '.join(line.split(' ')[3:]).rstrip()) data.append(sample.lower()) spk_id.append(int(line.split(' ')[1])) tags.append(line.split(' ')[2]) return (data, spk_id, tags)
def X_masked(X): X = torch.tensor(numpy.array(X)) mask = [[[True, True, True], [True, False, False], [False, False, False], [True, True, True], [True, True, True]], [[True, True, True], [False, True, True], [True, False, True], [True, True, True], [False, False, False]]] mask = torch.tensor(numpy.array(mask), dtype=torch.bool) return torch.masked.MaskedTensor(X, mask=mask)
class TestNet(unittest.TestCase): def setUp(self): self.num_output = 13 net_file = simple_net_file(self.num_output) self.net = caffe.Net(net_file, caffe.TRAIN) self.net.blobs['label'].data[...] = np.random.randint(self.num_output, size=self.net.blobs['label'].data.shape) os.remove(net_file) def test_memory(self): params = sum(map(list, six.itervalues(self.net.params)), []) blobs = self.net.blobs.values() del self.net total = 0 for p in params: total += (p.data.sum() + p.diff.sum()) for bl in blobs: total += (bl.data.sum() + bl.diff.sum()) def test_forward_backward(self): self.net.forward() self.net.backward() def test_inputs_outputs(self): self.assertEqual(self.net.inputs, []) self.assertEqual(self.net.outputs, ['loss']) def test_save_and_read(self): f = tempfile.NamedTemporaryFile(mode='w+', delete=False) f.close() self.net.save(f.name) net_file = simple_net_file(self.num_output) net2 = caffe.Net(net_file, f.name, caffe.TRAIN) os.remove(net_file) os.remove(f.name) for name in self.net.params: for i in range(len(self.net.params[name])): self.assertEqual(abs((self.net.params[name][i].data - net2.params[name][i].data)).sum(), 0)
def log_begin_block(input_tensor, block_type): mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_BEGIN_BLOCK, value={'block_type': block_type}, stack_offset=_STACK_OFFSET) mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RESNET_TOPOLOGY, value=' Block Input: {}'.format(_get_shape(input_tensor)), stack_offset=_STACK_OFFSET)
class TFCLIPVisionModelTester(): def __init__(self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return (config, pixel_values) def get_config(self): return CLIPVisionConfig(image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range) def create_and_check_model(self, config, pixel_values): model = TFCLIPVisionModel(config=config) result = model(pixel_values, training=False) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, (num_patches + 1), self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, pixel_values) = config_and_inputs inputs_dict = {'pixel_values': pixel_values} return (config, inputs_dict)
class DetectionPresetTrain(): def __init__(self, hflip_prob=0.5): trans = [T.ToTensor()] if (hflip_prob > 0): trans.append(T.RandomHorizontalFlip(hflip_prob)) self.transforms = T.Compose(trans) def __call__(self, img, target): return self.transforms(img, target)
def make_protein_features(protein_object: protein.Protein, description: str, _is_distillation: bool=False) -> FeatureDict: pdb_feats = {} aatype = protein_object.aatype sequence = _aatype_to_str_sequence(aatype) pdb_feats.update(make_sequence_features(sequence=sequence, description=description, num_res=len(protein_object.aatype))) all_atom_positions = protein_object.atom_positions all_atom_mask = protein_object.atom_mask pdb_feats['all_atom_positions'] = all_atom_positions.astype(np.float32) pdb_feats['all_atom_mask'] = all_atom_mask.astype(np.float32) pdb_feats['resolution'] = np.array([0.0]).astype(np.float32) pdb_feats['is_distillation'] = np.array((1.0 if _is_distillation else 0.0)).astype(np.float32) return pdb_feats
def all_gather_list(data): if (not hasattr(all_gather_list, '_buffer')): all_gather_list._buffer = torch.cuda.ByteTensor(_BUFFER_SIZE) try: enc = msgpack.dumps(data, use_bin_type=True) msgpack_success = True except TypeError: enc = pickle.dumps(data) msgpack_success = False enc_size = len(enc) max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item() buffer_ = all_gather_list._buffer (in_buffer, enc_byte) = _encode(enc, max_size, buffer_) out_buffer = hvd.allgather(in_buffer[:(enc_byte + enc_size)]) results = [] for _ in range(hvd.size()): (bytes_list, shift) = _decode(out_buffer, enc_byte) out_buffer = out_buffer[shift:] if msgpack_success: result = msgpack.loads(bytes_list, raw=False) else: result = pickle.loads(bytes_list) results.append(result) return results
def merge_cnn_dm(): cnn = '/scratch/cluster/jcxu/exComp/0.327,0.122,0.290-cnnTrue1.0-1True3-1093-cp_0.5' dm = '/scratch/cluster/jcxu/exComp/0.427,0.192,0.388-dmTrue1.0-1True3-10397-cp_0.7' total_pred = [] total_ref = [] f = open(cnn, 'rb') cnn_dict = pickle.load(f) f.close() fine_cnn_pd = [] for x in cnn_dict['pred']: fine_x = [easy_post_processing(s) for s in x] fine_cnn_pd.append(fine_x) total_pred += fine_cnn_pd total_ref += cnn_dict['ref'] f = open(dm, 'rb') dm_dict = pickle.load(f) f.close() fine_dm_pd = [] for x in dm_dict['pred']: fine_x = [easy_post_processing(s) for s in x] fine_dm_pd.append(fine_x) total_pred += fine_dm_pd total_ref += dm_dict['ref'] rouge_metrics = RougeStrEvaluation(name='mine') for (p, r) in zip(total_pred, total_ref): rouge_metrics(pred=p, ref=r) rouge_metrics.get_metric(True, note='test')
def whitebox_pgd(args, model, test_loader): PGDAttack = kwargs_PGD_attack[args.attack_ball] adversary = PGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction='sum'), eps=0.3, nb_iter=40, eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False) correct = 0 test_itr = tqdm(enumerate(test_loader), total=len(list(test_loader))) for (_, (data, target)) in test_itr: (x, target) = (data.to(args.dev), target.to(args.dev)) adv_image = adversary.perturb(x, target) pred = model(adv_image) out = pred.max(1, keepdim=True)[1] correct += out.eq(target.unsqueeze(1).data).sum() acc = ((100.0 * correct.cpu().numpy()) / len(test_loader.dataset)) print('\nSuccess rate after PGD attack : {}/{} ({:.0f}%), '.format(correct, len(test_loader.dataset), acc.item()))
def get_artifacts_links(worflow_run_id, token=None): headers = None if (token is not None): headers = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'} url = f' result = requests.get(url, headers=headers).json() artifacts = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']}) pages_to_iterate_over = math.ceil(((result['total_count'] - 100) / 100)) for i in range(pages_to_iterate_over): result = requests.get((url + f'&page={(i + 2)}'), headers=headers).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']}) return artifacts except Exception: print(f'''Unknown error, could not fetch links: {traceback.format_exc()}''') return {}
def read_raw_img_from_pair(id_path_pair): (img_id, img_path) = id_path_pair try: encoded_img_arr = read_compress_raw_img(img_path) return (img_id, encoded_img_arr) except Exception as e: return (img_path, None)
def ror3_56_cifar10(num_classes=10, **kwargs): return get_ror_cifar(num_classes=num_classes, blocks=56, model_name='ror3_56_cifar10', **kwargs)
def main(): args = parse_args() coco_path = args.coco_path nproc = args.nproc print(full_clsID_to_trID) out_mask_dir = osp.join(coco_path, 'stuffthingmaps_detectron2') for dir_name in ['train2017', 'val2017']: os.makedirs(osp.join(out_mask_dir, dir_name), exist_ok=True) train_list = glob(osp.join(coco_path, 'stuffthingmaps', 'train2017', '*.png')) test_list = glob(osp.join(coco_path, 'stuffthingmaps', 'val2017', '*.png')) assert ((len(train_list) + len(test_list)) == COCO_LEN), 'Wrong length of list {} & {}'.format(len(train_list), len(test_list)) if (args.nproc > 1): mmcv.track_parallel_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), train_list, nproc=nproc) mmcv.track_parallel_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), test_list, nproc=nproc) else: mmcv.track_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), train_list) mmcv.track_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), test_list) print('Done!')
def make_imbalanced_data(balanced_data, class_name, class_value, prop=9): one_class_only = balanced_data[(balanced_data[class_name] == class_value)] one_class_only_oversampled = one_class_only.sample(frac=prop, replace=True).reset_index(drop=True) return pd.concat([balanced_data, one_class_only_oversampled])
class HyperGCN(nn.Module): def __init__(self, in_channels: int, hid_channels: int, num_classes: int, use_mediator: bool=False, use_bn: bool=False, fast: bool=True, drop_rate: float=0.5) -> None: super().__init__() self.fast = fast self.cached_g = None self.with_mediator = use_mediator self.layers = nn.ModuleList() self.layers.append(HyperGCNConv(in_channels, hid_channels, use_mediator, use_bn=use_bn, drop_rate=drop_rate)) self.layers.append(HyperGCNConv(hid_channels, num_classes, use_mediator, use_bn=use_bn, is_last=True)) def forward(self, X: torch.Tensor, hg: 'dhg.Hypergraph') -> torch.Tensor: if self.fast: if (self.cached_g is None): self.cached_g = Graph.from_hypergraph_hypergcn(hg, X, self.with_mediator) for layer in self.layers: X = layer(X, hg, self.cached_g) else: for layer in self.layers: X = layer(X, hg) return X
def det_between_split(tree: TreeNode, rootlen: int): def _easy_match(tag): if (tag in ['ADVP', 'SBAR', 'PP']): return True return False def _easy_match_len_limit(_node: TreeNode, tag='S', l=5): if ((tag == _node.tag) and (len(_node.text) <= l)): return True return False def _easy_two_step_match(_node: TreeNode, match_tag=['VP'], match_subtag=['VBG', 'VBN']): if (_node.tag in match_tag): if _node.children: if (_node.children[0].tag in match_subtag): return True return False def _mix_np_vp(_node_a, _node_b): if ((_node_a.tag == 'NP') and (_node_b.tag == 'VP') and (len((_node_a.text + _node_b.text)) <= 6)): return True if ((_node_b.tag == 'NP') and (_node_a.tag == 'VP') and (len((_node_a.text + _node_b.text)) <= 6)): return True return False bag = [] if (not tree.children): return [] flag = any([(x.tag in SPLIT) for x in tree.children]) if (not flag): return [] split_child_id = [] for (idx, child) in enumerate(tree.children): if (child.tag in SPLIT): split_child_id.append(idx) last = (- 1) for (idx_c, c) in enumerate(split_child_id): if (last == (- 1)): cand_child = tree.children[:c] else: cand_child = tree.children[(last + 1):c] last = c if (len(cand_child) == 1): candi = cand_child[0] if (_easy_match(candi.tag) or _easy_two_step_match(candi) or _easy_match_len_limit(candi)): (right, left) = (False, False) if (idx_c == 0): right = True elif (c == (rootlen - 1)): left = True else: (right, left) = (True, True) sel_idx = set() if left: sel_idx = sel_idx.union(set(range(tree.children[split_child_id[(idx_c - 1)]].start_idx, tree.children[split_child_id[(idx_c - 1)]].end_idx))) if right: sel_idx = sel_idx.union(set(range(tree.children[split_child_id[idx_c]].start_idx, tree.children[split_child_id[idx_c]].end_idx))) sel_idx = sel_idx.union(set(range(candi.start_idx, candi.end_idx))) bag += [{'node': ('Split_' + candi.tag), 'selected_idx': sel_idx, 'text': ' '.join(candi.text), 'par_text': ' '.join(tree.text)}] elif (len(cand_child) == 3): if (cand_child[0].tag.startswith("'") and _mix_np_vp(cand_child[1], cand_child[2])): bag += [{'node': 'NPxVP', 'selected_idx': set(range(cand_child[1].start_idx, cand_child[2].end_idx)), 'text': ' '.join((cand_child[1].text + cand_child[2].text)), 'par_text': ' '.join(tree.text)}] return bag
class DrqCatalogue(QuasarCatalogue): def __init__(self, config): self.logger = logging.getLogger(__name__) super().__init__(config) self.best_obs = None self.bi_max = None self.drq_filename = None self.keep_bal = None self.spall = None self.__parse_config(config) catalogue = self.read_drq() if (not self.best_obs): catalogue = self.read_spall(catalogue) self.catalogue = catalogue if (self.max_num_spec is not None): super().trim_catalogue() def __parse_config(self, config): self.best_obs = config.getboolean('best obs') if (self.best_obs is None): raise QuasarCatalogueError("Missing argument 'best obs' required by DrqCatalogue") self.bi_max = config.getfloat('BI max') self.drq_filename = config.get('drq catalogue') if (self.drq_filename is None): raise QuasarCatalogueError("Missing argument 'drq catalogue' required by DrqCatalogue") self.keep_bal = config.getboolean('keep BAL') if (self.keep_bal is None): raise QuasarCatalogueError("Missing argument 'keep BAL' required by DrqCatalogue") if self.best_obs: self.spall = None else: self.spall = config.get('spAll') if (self.spall is None): self.logger.warning("Missing argument 'spAll' required by DrqCatalogue. Looking for spAll in input directory...") if (config.get('input directory') is None): self.logger.error("'spAll' file not found. If you didn't want to load the spAll file you should pass the option 'best obs = True'. Quiting...") raise QuasarCatalogueError("Missing argument 'spAll' required by DrqCatalogue") folder = config.get('input directory') folder = folder.replace('spectra', '').replace('lite', '').replace('full', '') filenames = glob.glob(f'{folder}/spAll-*.fits') if (len(filenames) > 1): self.logger.error("Found multiple 'spAll' files. Quiting...") for filename in filenames: self.logger.error(f'found: {filename}') raise QuasarCatalogueError("Missing argument 'spAll' required by DrqCatalogue") if (len(filenames) == 0): self.logger.error("'spAll' file not found. If you didn't want to load the spAll file you should pass the option 'best obs = True'. Quiting...") raise QuasarCatalogueError("Missing argument 'spAll' required by DrqCatalogue") self.spall = filenames[0] self.logger.ok_warning("'spAll' file found. Contining with normal execution") def read_drq(self): self.logger.progress(f'Reading DRQ catalogue from {self.drq_filename}') catalogue = Table.read(self.drq_filename, hdu='CATALOG') keep_columns = ['RA', 'DEC', 'Z', 'THING_ID', 'PLATE', 'MJD', 'FIBERID'] if ('Z' not in catalogue.colnames): if ('Z_VI' in catalogue.colnames): catalogue.rename_column('Z_VI', 'Z') self.logger.progress('Z not found (new DRQ >= DRQ14 style), using Z_VI (DRQ <= DRQ12)') else: raise QuasarCatalogueError(f'Error in reading DRQ Catalogue. No valid column for redshift found in {self.drq_filename}') w = np.ones(len(catalogue), dtype=bool) self.logger.progress(f'start : nb object in cat = {np.sum(w)}') w &= (catalogue['THING_ID'] > 0) self.logger.progress(f'and THING_ID > 0 : nb object in cat = {np.sum(w)}') w &= (catalogue['RA'] != catalogue['DEC']) self.logger.progress(f'and ra != dec : nb object in cat = {np.sum(w)}') w &= (catalogue['RA'] != 0.0) self.logger.progress(f'and ra != 0. : nb object in cat = {np.sum(w)}') w &= (catalogue['DEC'] != 0.0) self.logger.progress(f'and dec != 0. : nb object in cat = {np.sum(w)}') w &= (catalogue['Z'] >= self.z_min) self.logger.progress(f'and z >= {self.z_min} : nb object in cat = {np.sum(w)}') w &= (catalogue['Z'] < self.z_max) self.logger.progress(f'and z < {self.z_max} : nb object in cat = {np.sum(w)}') if ((not self.keep_bal) and (self.bi_max is None)): if ('BAL_FLAG_VI' in catalogue.colnames): self.bal_flag = catalogue['BAL_FLAG_VI'] w &= (self.bal_flag == 0) self.logger.progress(f'and BAL_FLAG_VI == 0 : nb object in cat = {np.sum(w)}') keep_columns += ['BAL_FLAG_VI'] else: self.logger.warning(f'BAL_FLAG_VI not found in {self.drq_filename}.') self.logger.ok_warning('Ignoring') if (self.bi_max is not None): if ('BI_CIV' in catalogue.colnames): bi = catalogue['BI_CIV'] w &= (bi <= self.bi_max) self.logger.progress(f'and BI_CIV <= {self.bi_max} : nb object in cat = {np.sum(w)}') keep_columns += ['BI_CIV'] else: raise QuasarCatalogueError("Error in reading DRQ Catalogue. 'BI max' was passed but field BI_CIV was not present in the HDU") if ('NHI' in catalogue.colnames): keep_columns += ['NHI'] np.radians(catalogue['RA'], out=catalogue['RA']) np.radians(catalogue['DEC'], out=catalogue['DEC']) catalogue.keep_columns(keep_columns) w = np.where(w)[0] catalogue = catalogue[w] return catalogue def read_spall(self, drq_catalogue): self.logger.progress(f'reading spAll from {self.spall}') try: catalogue = Table.read(self.spall, hdu=1) catalogue.keep_columns(['THING_ID', 'PLATE', 'MJD', 'FIBERID', 'PLATEQUALITY', 'ZWARNING']) except IOError as error: raise QuasarCatalogueError(f'Error in reading DRQ Catalogue. Error reading file {self.spall}. IOError message: {str(error)}') from error w = np.in1d(catalogue['THING_ID'], drq_catalogue['THING_ID']) self.logger.progress(f'Found {np.sum(w)} spectra with required THING_ID') w &= (catalogue['PLATEQUALITY'] == 'good') self.logger.progress(f"Found {np.sum(w)} spectra with 'good' plate") bad_z_warn_bit = {0: 'SKY', 1: 'LITTLE_COVERAGE', 7: 'UNPLUGGED', 8: 'BAD_TARGET', 9: 'NODATA'} for (z_warn_bit, z_warn_bit_name) in bad_z_warn_bit.items(): wbit = ((catalogue['ZWARNING'] & (2 ** z_warn_bit)) == 0) w &= wbit self.logger.progress(f'Found {np.sum(w)} spectra without {z_warn_bit} bit set: {z_warn_bit_name}') self.logger.progress(f'# unique objs: {len(drq_catalogue)}') self.logger.progress(f'# spectra: {w.sum()}') catalogue = catalogue[w] select_cols = [name for name in catalogue.colnames if (name not in ['PLATEQUALITY', 'ZWARNING'])] select_cols_drq = [name for name in drq_catalogue.colnames if (name not in ['PLATE', 'FIBERID', 'MJD'])] catalogue = join(catalogue[select_cols], drq_catalogue[select_cols_drq], join_type='left') return catalogue
def index_of_first_true(seq, default=None): return next((i for (i, x) in enumerate(seq) if x), default)
def main(): parser = argparse.ArgumentParser(description='Deep Orientation Estimation') parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str) args = parser.parse_args() config_file = args.config assert os.path.exists(args.config), 'Config file {} does not exist'.format(args.config) with open(config_file) as fp: config = yaml.load(fp) if (not os.path.exists(config['train']['save_dir'])): os.makedirs(config['train']['save_dir']) device = torch.device((config['train']['device'] if torch.cuda.is_available() else 'cpu')) print('Using device: {}'.format(device)) num_channels = (config['train']['num_channels'] or 3) model_name = (config['train']['model'] or 'vgg11') num_classes = config['train'].get('num_outputs', None) model = modules.network.get_model(name=model_name, pretrained=True, num_channels=num_channels, num_classes=num_classes) model.to(device) print('Model name: {}'.format(model_name)) resume = config['train']['resume'] if resume: if os.path.isfile(resume): print('Loading checkpoint {}'.format(resume)) checkpoint = torch.load(resume) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) else: start_epoch = 0 print('No checkpoint found at {}'.format(resume)) else: start_epoch = 0 (train_dataset, test_dataset) = get_dataset(config) b_size = (config['train']['batch_size'] or 4) if (config['train']['device'] != 'cpu'): use_memory_pinning = True else: use_memory_pinning = False validationloader = torch.utils.data.DataLoader(test_dataset, batch_size=b_size, shuffle=True, num_workers=1, pin_memory=use_memory_pinning) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=b_size, shuffle=True, num_workers=1, pin_memory=use_memory_pinning) print('batch size: {}'.format(b_size)) learning_rate = (config['train']['learning_rate'] or 0.0001) loss_function_name = config['train']['loss_function'] if ('loss_parameters' in config['train']): loss_parameters = config['train']['loss_parameters'] else: loss_parameters = None optimizer = optim.Adam(model.parameters(), lr=learning_rate) print(optimizer) writer_train = SummaryWriter('runs/{}/training'.format(config['train']['save_as'])) writer_val = SummaryWriter('runs/{}/validation'.format(config['train']['save_as'])) num_epochs = (config['train']['num_epochs'] or 2) print('Number of epochs: {}'.format(num_epochs)) if (loss_parameters is not None): loss_function = LOSS_FUNCTIONS[loss_function_name](**loss_parameters) else: loss_function = LOSS_FUNCTIONS[loss_function_name]() if ('floating_point_type' in config['train']): floating_point_type = config['train']['floating_point_type'] else: floating_point_type = 'float' trainer = Trainer(device, floating_point_type) for epoch in range(start_epoch, num_epochs): trainer.train_epoch(trainloader, model, loss_function, optimizer, epoch, writer_train, writer_val, validationloader) save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict()}, filename=os.path.join(config['train']['save_dir'], 'checkpoint_{}_{}.tar'.format(model_name, epoch))) print('Finished training')
def load_ets_annot(corpus_fname, index_fname): ids = [] ys = [] with open(corpus_fname, 'r') as fin: for line in fin: ljson = json.loads(line) ids.append(ljson['id']) ys.append(ljson['y']) test_labels = pd.DataFrame({'id': ids, 'y': ys}) with open(index_fname, 'r') as fin: vals = pd.read_csv(fin) colnames = [('text_segment_id', str), ('sentence_number', str), ('sentence_offset', int), ('word_offset', int), ('subword_offset', int), ('verb', str)] for ((colname, coltype), series) in zip(colnames, zip(*vals.id.apply((lambda x: x.split('_'))))): vals[colname] = pd.Series(series).astype(coltype) (vals['bnc_file'], vals['bnc_file_n']) = zip(*vals.text_segment_id.apply(split_text_segment_id)) vals = vals.merge(test_labels, on='id') return vals
_audio_dataset_transform('noisyoverlapaugment') class NoisyOverlapAugment(AudioDatasetTransform): def from_config_dict(cls, config=None): _config = ({} if (config is None) else config) return NoisyOverlapAugment(_config.get('rate', _DEFAULTS['rate']), _config.get('mixing_noise_rate', _DEFAULTS['mixing_noise_rate']), _config.get('noise_path', _DEFAULTS['noise_path']), _config.get('noise_snr_min', _DEFAULTS['noise_snr_min']), _config.get('noise_snr_max', _DEFAULTS['noise_snr_max']), _config.get('utterance_snr_min', _DEFAULTS['utterance_snr_min']), _config.get('utterance_snr_max', _DEFAULTS['utterance_snr_max'])) def __init__(self, rate=_DEFAULTS['rate'], mixing_noise_rate=_DEFAULTS['mixing_noise_rate'], noise_path=_DEFAULTS['noise_path'], noise_snr_min=_DEFAULTS['noise_snr_min'], noise_snr_max=_DEFAULTS['noise_snr_max'], utterance_snr_min=_DEFAULTS['utterance_snr_min'], utterance_snr_max=_DEFAULTS['utterance_snr_max']): self.rate = rate self.mixing_noise_rate = mixing_noise_rate self.noise_shaper = NoiseAugmentTransform(noise_path) self.noise_snr_min = noise_snr_min self.noise_snr_max = noise_snr_max self.utterance_snr_min = utterance_snr_min self.utterance_snr_max = utterance_snr_max def __repr__(self): return (((self.__class__.__name__ + '(') + ', '.join([f'rate={self.rate}', f'mixing_noise_rate={self.mixing_noise_rate}', f'noise_snr_min={self.noise_snr_min}', f'noise_snr_max={self.noise_snr_max}', f'utterance_snr_min={self.utterance_snr_min}', f'utterance_snr_max={self.utterance_snr_max}'])) + ')') def __call__(self, sources): for (i, source) in enumerate(sources): if (np.random.random() > self.rate): continue pri = source.numpy() if (np.random.random() > self.mixing_noise_rate): sec = sources[np.random.randint(0, len(sources))].numpy() snr = rand_uniform(self.utterance_snr_min, self.utterance_snr_max) else: sec = self.noise_shaper.pick_sample(source.shape) snr = rand_uniform(self.noise_snr_min, self.noise_snr_max) L1 = pri.shape[(- 1)] L2 = sec.shape[(- 1)] l = np.random.randint(0, min(round((L1 / 2)), L2)) s_source = np.random.randint(0, (L1 - l)) s_sec = np.random.randint(0, (L2 - l)) get_power = (lambda x: np.mean((x ** 2))) if (get_power(sec) == 0): continue scl = np.sqrt((get_power(pri) / (np.power(10, (snr / 10)) * get_power(sec)))) pri[s_source:(s_source + l)] = np.add(pri[s_source:(s_source + l)], np.multiply(scl, sec[s_sec:(s_sec + l)])) sources[i] = torch.from_numpy(pri).float() return sources
class OptimConfig(): seed: int = 0 lr: float = 0.01 min_timestep: float = 0.02 max_timestep: float = 0.98 no_noise: bool = False
_name('get_mat_slices') def test_get_mat_slices_nonuniform(benchmark): get_mat_slices_runner(benchmark, uniform=False)
def save_retrofitting_lexicon_list(filename, graph, selected_relations, selected_sources): retrofitting_list = defaultdict(list) target_cuis = [] for (u, v, d) in graph.edges_iter(data=True): rel = d['rel'] source = d['sab'] if ((len(selected_relations) == 0) or ('all' in selected_relations) or (rel in selected_relations)): if ((len(selected_sources) == 0) or ('all' in selected_sources) or (source in selected_sources)): retrofitting_list[u].append(v) target_cuis.append(u) target_cuis.append(v) target_cuis = set(target_cuis) logging.info('Lexicon: %s', len(retrofitting_list)) logging.info('CUIs: %s', len(target_cuis)) nb_relations_before = 0 nb_relations_after = 0 with open(str(filename), 'w') as f: for (target_cui, related_cuis) in retrofitting_list.items(): nb_relations_before += len(related_cuis) related_cuis = set(related_cuis) nb_relations_after += len(related_cuis) row = '{} {}\n'.format(target_cui, ' '.join(related_cuis)) f.write(row) logging.info('Lexicon saved: %s, %s -> %s', filename.name, nb_relations_before, nb_relations_after) return target_cuis
def get_diapreresnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): if (bottleneck is None): bottleneck = (blocks >= 50) if (blocks == 10): layers = [1, 1, 1, 1] elif (blocks == 12): layers = [2, 1, 1, 1] elif ((blocks == 14) and (not bottleneck)): layers = [2, 2, 1, 1] elif ((blocks == 14) and bottleneck): layers = [1, 1, 1, 1] elif (blocks == 16): layers = [2, 2, 2, 1] elif (blocks == 18): layers = [2, 2, 2, 2] elif ((blocks == 26) and (not bottleneck)): layers = [3, 3, 3, 3] elif ((blocks == 26) and bottleneck): layers = [2, 2, 2, 2] elif (blocks == 34): layers = [3, 4, 6, 3] elif ((blocks == 38) and bottleneck): layers = [3, 3, 3, 3] elif (blocks == 50): layers = [3, 4, 6, 3] elif (blocks == 101): layers = [3, 4, 23, 3] elif (blocks == 152): layers = [3, 8, 36, 3] elif (blocks == 200): layers = [3, 24, 36, 3] elif (blocks == 269): layers = [3, 30, 48, 8] else: raise ValueError('Unsupported DIA-PreResNet with number of blocks: {}'.format(blocks)) if bottleneck: assert (((sum(layers) * 3) + 2) == blocks) else: assert (((sum(layers) * 2) + 2) == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [(ci * bottleneck_factor) for ci in channels_per_layers] channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)] if (width_scale != 1.0): channels = [[(int((cij * width_scale)) if ((i != (len(channels) - 1)) or (j != (len(ci) - 1))) else cij) for (j, cij) in enumerate(ci)] for (i, ci) in enumerate(channels)] init_block_channels = int((init_block_channels * width_scale)) net = DIAPreResNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
def _calc_layout_distance(gates, coupling_map, layout, max_gates=None): if (max_gates is None): max_gates = (50 + (10 * len(coupling_map.physical_qubits))) return sum((coupling_map.distance(*[layout[q] for q in gate['partition'][0]]) for gate in gates[:max_gates] if (gate['partition'] and (len(gate['partition'][0]) == 2))))
_EMBEDDING_HEAD_REGISTRY.register() class EmbeddingRCNNASNetHead(nn.Module): def __init__(self, cfg, input_shape: ShapeSpec): super().__init__() num_conv = cfg.MODEL.ROI_EMBEDDING_HEAD.NUM_CONV conv_dim = cfg.MODEL.ROI_EMBEDDING_HEAD.CONV_DIM num_fc = cfg.MODEL.ROI_EMBEDDING_HEAD.NUM_FC fc_dim = cfg.MODEL.ROI_EMBEDDING_HEAD.FC_DIM emb_dim = cfg.MODEL.ROI_EMBEDDING_HEAD.EMBEDDING_DIM norm = cfg.MODEL.ROI_EMBEDDING_HEAD.NORM self._output_size_app = (input_shape.channels, input_shape.height, input_shape.width) self.app = [] for k in range(num_conv): conv = Conv2d(self._output_size_app[0], conv_dim, kernel_size=3, padding=1, bias=(not norm), norm=get_norm(norm, conv_dim), activation=F.relu) self.add_module('embedding_app{}'.format((k + 1)), conv) self.app.append(conv) self._output_size_app = (conv_dim, self._output_size_app[1], self._output_size_app[2]) self.fcs_app = [] for k in range(num_fc): fc = nn.Linear(np.prod(self._output_size_app), fc_dim) self.add_module('embedding_fc_app{}'.format((k + 1)), fc) self.fcs_app.append(fc) self._output_size_sur = (input_shape.channels, (input_shape.height * 2), (input_shape.width * 2)) self.sur = [] for k in range(num_conv): conv = Conv2d(self._output_size_sur[0], conv_dim, kernel_size=3, padding=1, bias=(not norm), norm=get_norm(norm, conv_dim), activation=F.relu) self.add_module('embedding_sur{}'.format((k + 1)), conv) self.sur.append(conv) self._output_size_sur = (conv_dim, self._output_size_sur[1], self._output_size_sur[2]) self.sur_pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) self.fcs_sur = [] for k in range(num_fc): fc = nn.Linear(np.prod(self._output_size_app), fc_dim) self.add_module('embedding_fc_sur{}'.format((k + 1)), fc) self.fcs_sur.append(fc) self.embedding_pred_app = nn.Linear(fc_dim, emb_dim) self.embedding_pred_sur = nn.Linear(fc_dim, emb_dim) for layer in self.app: weight_init.c2_msra_fill(layer) for layer in self.sur: weight_init.c2_msra_fill(layer) for layer in self.fcs_app: weight_init.c2_xavier_fill(layer) for layer in self.fcs_sur: weight_init.c2_xavier_fill(layer) def forward(self, x, instances): for layer in self.app: x['center'] = layer(x['center']) if len(self.fcs_app): if (x['center'].dim() > 2): x['center'] = torch.flatten(x['center'], start_dim=1) for layer in self.fcs_app: x['center'] = F.relu(layer(x['center'])) for layer in self.sur: x['surrnd'] = layer(x['surrnd']) x['surrnd'] = self.sur_pool(x['surrnd']) if len(self.fcs_sur): if (x['surrnd'].dim() > 2): x['surrnd'] = torch.flatten(x['surrnd'], start_dim=1) for layer in self.fcs_sur: x['surrnd'] = F.relu(layer(x['surrnd'])) x['center'] = self.embedding_pred_app(x['center']) x['surrnd'] = self.embedding_pred_sur(x['surrnd']) x['center'] = F.normalize(x['center'], p=2, dim=1) x['surrnd'] = F.normalize(x['surrnd'], p=2, dim=1) num_boxes_per_image = [len(i) for i in instances] center_embeddings = x['center'].split(num_boxes_per_image, dim=0) surrnd_embeddings = x['surrnd'].split(num_boxes_per_image, dim=0) for (c, s, instance) in zip(center_embeddings, surrnd_embeddings, instances): instance.embedding_c = c instance.embedding_s = s return instances def output_size(self): return self._output_size_sur def output_shape(self): o = self._output_size if isinstance(o, int): return ShapeSpec(channels=o) else: return ShapeSpec(channels=o[0], height=o[1], width=o[2])
_registry(pattern_type='SliceMask') class SliceMask(Pattern): def __call__(self, model): pattern_mapping_config = {'SliceMask': [{'patterns': {'in': [[(0, 'Shape'), (2, 'Sub'), (3, 'Int'), (6, 'Slice'), (7, 'Slice'), (8, 'Where'), (9, 'Add')], [(), (1, 'Shape'), (2, 'Sub')], [(), (4, 'Slice'), (5, 'Slice'), (6, 'Slice')]], 'out': [[(0, 'Slice'), (1, 'Slice'), (2, 'BinaryAdd')]]}, 'search_mode': 'op_type', 'node_names': {0: 4, 1: 6, 2: 9}, 'input_tensors': {0: [[{4: [0]}], [[0], 1]], 1: [[], [[], 1]], 2: [[{8: [1]}, {9: [1]}], [[0, 2], 3]]}, 'output_tensors': {0: [[], [[], 1]], 1: [[], [[], 1]], 2: [[{9: [0]}], [[0], 1]]}, 'returns': []}, {'patterns': {'in': [[(0, 'Shape'), (2, 'Sub'), (3, 'Slice'), (4, 'Slice'), (5, 'Where')], [(), (1, 'Shape'), (2, 'Sub')]], 'out': [[(0, 'SliceMask'), (1, 'BinaryAdd')]]}, 'search_mode': 'op_type', 'node_names': {0: 4, 1: 5}, 'input_tensors': {0: [[{3: [0]}, {'input_data': [0]}, {'input_data': [2]}], [[0, 1, 2], 3]], 1: [[{5: [1]}], [[0], 2]]}, 'output_tensors': {0: [[], [[], 1]], 1: [[{5: [0]}], [[0], 1]]}, 'returns': []}]} if (model.framework_modeling_config['framework'] != 'torch'): return model def _set_attr(new_node_names, ret_old_nodes, model): for i in range(len(new_node_names)): binary_node_idx = model.get_node_id(new_node_names[i][2]) attr = OrderedDict() model.nodes[binary_node_idx].attr = attr slice_node = model.get_node_by_name(new_node_names[i][0]) import numpy as np slice_node.input_tensors[0].data = np.array(slice_node.input_tensors[0].data, dtype=np.float32) slice_node.input_tensors[0].data.dtype = np.float32 slice_node.input_tensors[0].data[np.where((slice_node.input_tensors[0].data == 0))] = (- 10000) attr_slice1 = OrderedDict() attr_slice1['starts'] = 0 attr_slice1['ends'] = 128 attr_slice1['axes'] = 2 attr_slice1['steps'] = 1 slice_node.attr = attr_slice1 attr_slice2 = OrderedDict() attr_slice2['starts'] = 0 attr_slice2['ends'] = 128 attr_slice2['axes'] = 3 attr_slice2['steps'] = 1 model.get_node_by_name(new_node_names[i][1]).attr = attr_slice2 pattern_dict = pattern_mapping_config['SliceMask'][0] (model, new_node_names, ret_old_nodes) = util.pattern_mapping('SliceMask', pattern_dict, model) if (len(new_node_names) != 0): _set_attr(new_node_names, ret_old_nodes, model) def _set_attr1(new_node_names, ret_old_nodes, model): for i in range(len(new_node_names)): binary_node_idx = model.get_node_id(new_node_names[i][1]) attr = OrderedDict() model.nodes[binary_node_idx].attr = attr slice_node = model.get_node_by_name(new_node_names[i][0]) import numpy as np slice_node.input_tensors[0].data = np.array(slice_node.input_tensors[0].data, dtype=np.float32) slice_node.input_tensors[0].data.dtype = np.float32 slice_node.input_tensors[0].data[np.where((slice_node.input_tensors[0].data == 0))] = (- 1600000) slice_node.input_tensors[0].data[np.where((slice_node.input_tensors[0].data == 1))] = 0 attr_slice1 = OrderedDict() attr_slice1['starts'] = 0 attr_slice1['ends_with_tensor'] = 1 attr_slice1['ends_with_tensor_alg'] = 'sub' attr_slice1['axes'] = '2, 3' attr_slice1['steps'] = 1 slice_node.attr = attr_slice1 pattern_dict = pattern_mapping_config['SliceMask'][1] (model, new_node_names, ret_old_nodes) = util.pattern_mapping('SliceMask', pattern_dict, model) if (len(new_node_names) != 0): _set_attr1(new_node_names, ret_old_nodes, model) return model
class Preprocessing(object): def __init__(self, type): self.type = type if (type == '2d'): self.norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif (type == '3d'): self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0]) def _zero_pad(self, tensor, size): n = (size - (len(tensor) % size)) if (n == size): return tensor else: z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3]) return th.cat((tensor, z), 0) def __call__(self, tensor): if (self.type == '2d'): tensor = (tensor / 255.0) tensor = self.norm(tensor) elif (self.type == '3d'): tensor = self.norm(tensor) return tensor
class MPTAdapter(BaseAdapter): def match(self, model_path: str): return ('mpt' in model_path) def load_model(self, model_path: str, from_pretrained_kwargs: dict): model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, max_seq_len=8192, **from_pretrained_kwargs) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=True) return (model, tokenizer) def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template('mpt')
class TFMobileViTEncoder(tf.keras.layers.Layer): def __init__(self, config: MobileViTConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config self.layers = [] dilate_layer_4 = dilate_layer_5 = False if (config.output_stride == 8): dilate_layer_4 = True dilate_layer_5 = True elif (config.output_stride == 16): dilate_layer_5 = True dilation = 1 layer_1 = TFMobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1, name='layer.0') self.layers.append(layer_1) layer_2 = TFMobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3, name='layer.1') self.layers.append(layer_2) layer_3 = TFMobileViTLayer(config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2, name='layer.2') self.layers.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = TFMobileViTLayer(config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation, name='layer.3') self.layers.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = TFMobileViTLayer(config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation, name='layer.4') self.layers.append(layer_5) def call(self, hidden_states: tf.Tensor, output_hidden_states: bool=False, return_dict: bool=True, training: bool=False) -> Union[(tuple, TFBaseModelOutput)]: all_hidden_states = (() if output_hidden_states else None) for (i, layer_module) in enumerate(self.layers): hidden_states = layer_module(hidden_states, training=training) if output_hidden_states: all_hidden_states = (all_hidden_states + (hidden_states,)) if (not return_dict): return tuple((v for v in [hidden_states, all_hidden_states] if (v is not None))) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class SLTopicDetectionTransformerConfig_alvaro(FairseqDataclass): subsample_input: bool = field(default=False, metadata={'help': 'if True subsample inputs along index (temporal) dimension'}) conv_kernel_sizes: str = field(default='5,5', metadata={'help': 'kernel sizes of Conv1d subsampling layers'}) conv_strides: str = field(default='3,3', metadata={'help': 'stride of Conv1d subsampling layers'}) conv_channels: int = field(default=1024, metadata={'help': '# of channels in Conv1d subsampling layers'}) activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(default='relu', metadata={'help': 'activation function to use'}) dropout: float = field(default=0.1, metadata={'help': 'dropout probability'}) attention_dropout: float = field(default=0.1, metadata={'help': 'dropout probability for attention weights'}) activation_dropout: float = field(default=0.1, metadata={'help': 'dropout probability after activation in FFN.'}) encoder_embed_dim: int = field(default=512, metadata={'help': 'encoder embedding dimension'}) encoder_ffn_embed_dim: int = field(default=2048, metadata={'help': 'encoder embedding dimension for FFN'}) encoder_layers: int = field(default=12, metadata={'help': 'num encoder layers'}) encoder_attention_heads: Optional[int] = field(default=None, metadata={'help': 'num encoder attention heads'}) encoder_normalize_before: bool = field(default=True, metadata={'help': 'apply layernorm before each encoder block'}) load_pretrained_encoder_from: str = field(default='relu', metadata={'help': 'model to take encoder weights from (for initialization)'}) encoder_freezing_updates: int = field(default=0, metadata={'help': 'freeze encoder for first N updates'}) feats_type: ChoiceEnum([x.name for x in SignFeatsType]) = II('task.feats_type') body_parts: str = II('task.body_parts') feat_dims: str = II('task.feat_dims') max_source_positions: int = II('task.max_source_positions')
def _focal(y_true, y_pred, alpha=0.25, gamma=2, axis=(- 1)): epsilon_ = constant_op.constant(K.epsilon(), dtype=y_pred.dtype.base_dtype) y_pred = clip_ops.clip_by_value(y_pred, epsilon_, (1 - epsilon_)) fl = (((y_true * tf.math.pow((1 - y_pred), gamma)) * alpha) * math_ops.log((y_pred + K.epsilon()))) fl += ((((1 - y_true) * tf.math.pow(y_pred, gamma)) * (1 - alpha)) * math_ops.log(((1 - y_pred) + K.epsilon()))) return (- fl)
def standard_to_metadata_format(standard_record, filenames): all_rows = [] images = standard_record['images'] standard_patient = standard_record['patient'] for (image, filename) in zip(images, filenames): patient_row = {} for (key, value) in dictionary_walk(standard_patient): print(key, value) patient_row[key] = value patient_row['clinical_notes'] = ((string_or_empty(patient_row.pop('clinical_history')) + ' ') + image['image_description']) patient_row.update(standard_record['document']) modality = image['modality'] if (modality == 'X-ray'): folder = 'images' elif (modality == 'CT'): folder = 'volumes' else: raise ValueError patient_row['modality'] = modality patient_row['folder'] = folder patient_row['filename'] = filename print(patient_row) all_rows.append(patient_row) return all_rows
def copy_files(directory_): listdir = os.listdir() newdir = os.getcwd() for i in tqdm(range(len(listdir)), desc=newdir): if (listdir[i].endswith('.json') or listdir[i].endswith('.wav')): shutil.copy(((os.getcwd() + '/') + listdir[i]), ((directory_ + '/') + listdir[i]))
def nested_simplify(obj, decimals=3): import numpy as np if isinstance(obj, list): return [nested_simplify(item, decimals) for item in obj] if isinstance(obj, tuple): return tuple([nested_simplify(item, decimals) for item in obj]) elif isinstance(obj, np.ndarray): return nested_simplify(obj.tolist()) elif isinstance(obj, Mapping): return {nested_simplify(k, decimals): nested_simplify(v, decimals) for (k, v) in obj.items()} elif isinstance(obj, (str, int, np.int64)): return obj elif (obj is None): return obj elif (is_torch_available() and isinstance(obj, torch.Tensor)): return nested_simplify(obj.tolist(), decimals) elif (is_tf_available() and tf.is_tensor(obj)): return nested_simplify(obj.numpy().tolist()) elif isinstance(obj, float): return round(obj, decimals) elif isinstance(obj, (np.int32, np.float32)): return nested_simplify(obj.item(), decimals) else: raise Exception(f'Not supported: {type(obj)}')
class args(): std = 0.1 num_mem = 10 embedding_size = 50 constraint = True rnn_type = set(['PAIR']) margin = 0.1 topic_num = topic_num l2_reg = 1e-05 opt = 'SGD' clip_norm = 2 dropout = 0.7 learn_rate = 0.01 max_p_num = 100 stddev = 0.1 lamb_m = 0.1 lamb_d = 0.1 ratio1 = 0.1 ratio2 = 0.1 init_method = 'normal'
def test_osipkovmerritt_selfconsist_dehnencore_sigmar(): pot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15) ras = [2.3, 5.7] for (ra, dfh) in zip(ras, osipkovmerritt_dfs_selfconsist): numpy.random.seed(10) samp = dfh.sample(n=300000) tol = 0.1 check_sigmar_against_jeans(samp, pot, tol, beta=(lambda r: (1.0 / (1.0 + ((ra ** 2.0) / (r ** 2.0))))), rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31) return None
class nnUNetTrainerV2(nnUNetTrainer): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 300 self.initial_lr = 0.0001 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): if (not self.was_initialized): maybe_mkdir_p(self.output_folder) if (force_load_plans or (self.plans is None)): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() net_numpool = len(self.net_num_pool_op_kernel_sizes) weights = np.array([(1 / (2 ** i)) for i in range(net_numpool)]) mask = np.array(([True] + [(True if (i < (net_numpool - 1)) else False) for i in range(1, net_numpool)])) weights[(~ mask)] = 0 weights = (weights / weights.sum()) self.ds_loss_weights = weights self.loss = BoundaryEqualCELoss() self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) self.folder_with_preprocessed_data = join(self.dataset_directory, (self.plans['data_identifier'] + ('_stage%d' % self.stage))) if training: (self.dl_tr, self.dl_val) = self.get_basic_generators() if self.unpack_data: print('unpacking dataset') unpack_dataset(self.folder_with_preprocessed_data) print('done') else: print('INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you will wait all winter for your model to finish!') (self.tr_gen, self.val_gen) = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params['patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory) self.print_to_log_file(('TRAINING KEYS:\n %s' % str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file(('VALIDATION KEYS:\n %s' % str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-05, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, (lambda x: x), InitWeights_He(0.01), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def initialize_optimizer_and_scheduler(self): assert (self.network is not None), 'self.initialize_network must be called first' self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay) self.lr_scheduler = None def run_online_evaluation(self, output, target): target = target[0] output = output[0] return super().run_online_evaluation(output, target) def validate(self, do_mirroring: bool=True, use_sliding_window: bool=True, step_size: float=0.5, save_softmax: bool=True, use_gaussian: bool=True, overwrite: bool=True, validation_folder_name: str='validation_raw', debug: bool=False, all_in_gpu: bool=False, segmentation_export_kwargs: dict=None): ds = self.network.do_ds self.network.do_ds = False ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) self.network.do_ds = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool=True, mirror_axes: Tuple[int]=None, use_sliding_window: bool=True, step_size: float=0.5, use_gaussian: bool=True, pad_border_mode: str='constant', pad_kwargs: dict=None, all_in_gpu: bool=False, verbose: bool=True, mixed_precision=True) -> Tuple[(np.ndarray, np.ndarray)]: ds = self.network.do_ds self.network.do_ds = False ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target, self.epoch) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def do_split(self): if (self.fold == 'all'): tr_keys = val_keys = list(self.dataset.keys()) else: splits_file = join(self.dataset_directory, 'splits_final.pkl') if (not isfile(splits_file)): self.print_to_log_file('Creating new split...') splits = [] all_keys_sorted = np.sort(list(self.dataset.keys())) kfold = KFold(n_splits=5, shuffle=True, random_state=12345) for (i, (train_idx, test_idx)) in enumerate(kfold.split(all_keys_sorted)): train_keys = np.array(all_keys_sorted)[train_idx] test_keys = np.array(all_keys_sorted)[test_idx] splits.append(OrderedDict()) splits[(- 1)]['train'] = train_keys splits[(- 1)]['val'] = test_keys save_pickle(splits, splits_file) splits = load_pickle(splits_file) if (self.fold < len(splits)): tr_keys = splits[self.fold]['train'] val_keys = splits[self.fold]['val'] else: self.print_to_log_file(('INFO: Requested fold %d but split file only has %d folds. I am now creating a random 80:20 split!' % (self.fold, len(splits)))) rnd = np.random.RandomState(seed=(12345 + self.fold)) keys = np.sort(list(self.dataset.keys())) idx_tr = rnd.choice(len(keys), int((len(keys) * 0.8)), replace=False) idx_val = [i for i in range(len(keys)) if (i not in idx_tr)] tr_keys = [keys[i] for i in idx_tr] val_keys = [keys[i] for i in idx_val] tr_keys.sort() val_keys.sort() self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def setup_DA_params(self): self.deep_supervision_scales = ([[1, 1, 1]] + list((list(i) for i in (1 / np.cumprod(np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))))[:(- 1)]) if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) self.data_aug_params['rotation_y'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) self.data_aug_params['rotation_z'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) if self.do_dummy_2D_aug: self.data_aug_params['dummy_2D'] = True self.print_to_log_file('Using dummy2d data augmentation') self.data_aug_params['elastic_deform_alpha'] = default_2D_augmentation_params['elastic_deform_alpha'] self.data_aug_params['elastic_deform_sigma'] = default_2D_augmentation_params['elastic_deform_sigma'] self.data_aug_params['rotation_x'] = default_2D_augmentation_params['rotation_x'] else: self.do_dummy_2D_aug = False if ((max(self.patch_size) / min(self.patch_size)) > 1.5): default_2D_augmentation_params['rotation_x'] = (((((- 15.0) / 360) * 2.0) * np.pi), (((15.0 / 360) * 2.0) * np.pi)) self.data_aug_params = default_2D_augmentation_params self.data_aug_params['mask_was_used_for_normalization'] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array(([self.patch_size[0]] + list(self.basic_generator_patch_size))) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['scale_range'] = (0.7, 1.4) self.data_aug_params['do_elastic'] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params['num_cached_per_thread'] = 2 def maybe_update_lr(self, epoch=None): if (epoch is None): ep = (self.epoch + 1) else: ep = epoch self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) self.print_to_log_file('lr:', np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) def on_epoch_end(self): super().on_epoch_end() continue_training = (self.epoch < self.max_num_epochs) if (self.epoch == 100): if (self.all_val_eval_metrics[(- 1)] == 0): self.optimizer.param_groups[0]['momentum'] = 0.95 self.network.apply(InitWeights_He(0.01)) self.print_to_log_file('At epoch 100, the mean foreground Dice was 0. This can be caused by a too high momentum. High momentum (0.99) is good for datasets where it works, but sometimes causes issues such as this one. Momentum has now been reduced to 0.95 and network weights have been reinitialized') return continue_training def run_training(self): self.maybe_update_lr(self.epoch) ds = self.network.do_ds self.network.do_ds = True ret = super().run_training() self.network.do_ds = ds return ret
def collect_images(rootpath: str) -> List[str]: return [os.path.join(rootpath, f) for f in os.listdir(rootpath) if (os.path.splitext(f)[(- 1)].lower() in IMG_EXTENSIONS)]
def hard_state_selection(attn_inds, hidden, batch_size, attn_length): batch_inds = tf.transpose(tf.pack([tf.to_int64(tf.range(batch_size)), tf.to_int64(attn_inds)])) align_index = tf.to_float(tf.sparse_to_dense(batch_inds, tf.to_int64(tf.pack([batch_size, attn_length])), 1)) attns = tf.reduce_sum((hidden * tf.reshape(align_index, [(- 1), attn_length, 1, 1])), [1, 2]) return attns
class EasyOCRReader(OCRReader): def __init__(self): super().__init__() self.reader = None def apply_ocr(self, image: 'Image.Image') -> Tuple[(List[str], List[List[int]])]: if (not self.reader): self.reader = easyocr.Reader(['en']) data = self.reader.readtext(np.array(image)) (boxes, words, acc) = list(map(list, zip(*data))) irrelevant_indices = set((idx for (idx, word) in enumerate(words) if (not word.strip()))) words = [word for (idx, word) in enumerate(words) if (idx not in irrelevant_indices)] boxes = [coords for (idx, coords) in enumerate(boxes) if (idx not in irrelevant_indices)] actual_boxes = [(tl + br) for (tl, tr, br, bl) in boxes] return (words, actual_boxes) def _check_if_available(): if (not OCR_AVAILABLE['easyocr']): raise NoOCRReaderFound('Unable to use easyocr (OCR will be unavailable). Install easyocr to process images with OCR.')
class ContrastiveProjectorWrapper(_ProjectorWrapperBase): def __init__(self): super().__init__() self.__index = 0 def _register_global_projector(self, *, feature_name: str, head_type: str, output_dim: int=256, normalize=True, pool_name: str): _max_channel = get_config(scope='base')['Arch'].get('max_channel', None) input_dim = get_channel_dim(feature_name, max_channel=_max_channel) projector = ProjectionHead(input_dim=input_dim, head_type=head_type, normalize=normalize, pool_name=pool_name, output_dim=output_dim) self._projectors[f'{self.__index}|{feature_name}'] = projector self.__index += 1 def _register_dense_projector(self, *, feature_name: str, output_dim: int=64, head_type: str, normalize: bool=False, pool_name='adaptive_avg', spatial_size=(16, 16), **kwargs): _max_channel = get_config(scope='base')['Arch'].get('max_channel', None) input_dim = get_channel_dim(feature_name, max_channel=_max_channel) projector = DenseProjectionHead(input_dim=input_dim, output_dim=output_dim, head_type=head_type, normalize=normalize, pool_name=pool_name, spatial_size=spatial_size) self._projectors[f'{self.__index}|{feature_name}'] = projector self.__index += 1 def register_global_projector(self, *, feature_names: Union[(str, List[str])], head_type: Union[(str, List[str])]='mlp', output_dim: Union[(int, List[int])]=256, normalize: Union[(bool, List[bool])]=True, pool_name: Union[(str, List[str])]='adaptive_avg', **kwargs): if isinstance(feature_names, str): feature_names = [feature_names] self._global_feature_names = feature_names n = len(self._global_feature_names) n_pair = _nlist(n) head_type_ = n_pair(head_type) normalize_ = n_pair(normalize) pool_name_ = n_pair(pool_name) output_dim_ = n_pair(output_dim) for (i, (f, h, n, p, o)) in enumerate(zip(feature_names, head_type_, normalize_, pool_name_, output_dim_)): self._register_global_projector(feature_name=f, head_type=h, output_dim=o, normalize=n, pool_name=p) def register_dense_projector(self, *, feature_names: str, output_dim: int=64, head_type: str, normalize: bool=False, pool_name='adaptive_avg', spatial_size=(16, 16), **kwargs): if isinstance(feature_names, str): feature_names = [feature_names] self._dense_feature_names = feature_names n = len(self._dense_feature_names) n_pair = _nlist(n) head_type_ = n_pair(head_type) normalize_ = n_pair(normalize) pool_name_ = n_pair(pool_name) output_dim_ = n_pair(output_dim) spatial_size_ = n_pair(spatial_size) for (i, (f, h, n, p, o, s)) in enumerate(zip(feature_names, head_type_, normalize_, pool_name_, output_dim_, spatial_size_)): self._register_dense_projector(feature_name=f, head_type=h, output_dim=o, normalize=n, pool_name=p, spatial_size=s)
class SliceableMixin(Protocol): dynamic_parents: Dict[(IdType, Set['Symbol'])] dynamic_children: Dict[(IdType, Set['Symbol'])] static_parents: Dict[(IdType, Set['Symbol'])] static_children: Dict[(IdType, Set['Symbol'])] dangling_dynamic_parents: Dict[(IdType, Set['Symbol'])] dangling_dynamic_children: Dict[(IdType, Set['Symbol'])] dangling_static_parents: Dict[(IdType, Set['Symbol'])] dangling_static_children: Dict[(IdType, Set['Symbol'])] def current(cls) -> 'SliceableMixin': ... def at_timestamp(cls, ts: TimestampOrCounter, stmt_num: Optional[int]=None) -> 'SliceableMixin': ... def from_timestamp(cls, ts: TimestampOrCounter, stmt_num: Optional[int]=None) -> 'SliceableMixin': return cls.at_timestamp(ts, stmt_num=stmt_num) def from_id(cls, sid: IdType) -> 'SliceableMixin': ... def from_id_nullable(cls, sid: IdType) -> Optional['SliceableMixin']: ... def timestamp(self) -> Timestamp: ... def id(self) -> Union[(str, int)]: ... def prev(self) -> Optional['SliceableMixin']: ... def text(self) -> str: ... def is_current(self) -> bool: return True def _from_ref(cls, parent_ref: SliceRefType) -> 'SliceableMixin': if isinstance(parent_ref, Timestamp): return cls.at_timestamp(parent_ref) elif isinstance(parent_ref, (int, str)): return cls.from_id(parent_ref) else: return parent_ref def add_parent_edges(self, parent_ref: SliceRefType, syms: Set['Symbol']) -> None: if (not syms): return parent = self._from_ref(parent_ref) pid = parent.id if (pid in self.children): return if (pid == self.id): if (self.prev is not None): for _ in iter_dangling_contexts(): for (prev_pid, prev_syms) in self.prev.parents.items(): common = (syms & prev_syms) if common: self.parents.setdefault(prev_pid, set()).update(common) return with dangling_context((not parent.is_current)): self.parents.setdefault(pid, set()).update(syms) parent.children.setdefault(self.id, set()).update(syms) def add_parent_edge(self, parent_ref: SliceRefType, sym: 'Symbol') -> None: self.add_parent_edges(parent_ref, {sym}) def remove_parent_edges(self, parent_ref: SliceRefType, syms: Set['Symbol']) -> None: if (not syms): return parent = self._from_ref(parent_ref) pid = parent.id with dangling_context((not parent.is_current)): for (edges, eid) in ((self.parents, pid), (parent.children, self.id)): sym_edges = edges.get(eid, set()) if (not sym_edges): continue sym_edges.difference_update(syms) if (not sym_edges): del edges[eid] def remove_parent_edge(self, parent_ref: SliceRefType, sym: 'Symbol') -> None: self.remove_parent_edges(parent_ref, {sym}) def replace_parent_edges(self, prev_parent_ref: SliceRefType, new_parent_ref: SliceRefType) -> None: prev_parent = self._from_ref(prev_parent_ref) new_parent = self._from_ref(new_parent_ref) with dangling_context((not prev_parent.is_current)): syms = self.parents.pop(prev_parent.id) prev_parent.children.pop(self.id) with dangling_context((not new_parent.is_current)): self.parents.setdefault(new_parent.id, set()).update(syms) new_parent.children.setdefault(self.id, set()).update(syms) def replace_child_edges(self, prev_child_ref: SliceRefType, new_child_ref: SliceRefType) -> None: prev_child = self._from_ref(prev_child_ref) new_child = self._from_ref(new_child_ref) with dangling_context((not prev_child.is_current)): syms = self.children.pop(prev_child.id) prev_child.parents.pop(self.id) with dangling_context((not new_child.is_current)): self.children.setdefault(new_child.id, set()).update(syms) new_child.parents.setdefault(self.id, set()).update(syms) def parents(self) -> Dict[(IdType, Set['Symbol'])]: ctx = slicing_ctx_var.get() dangling_ctx = dangling_ctx_var.get() if (ctx == SlicingContext.DYNAMIC): return (self.dangling_dynamic_parents if dangling_ctx else self.dynamic_parents) elif (ctx == SlicingContext.STATIC): return (self.dangling_static_parents if dangling_ctx else self.static_parents) flow_ = flow() assert (not flow_.is_test) settings = flow_.mut_settings parents: Dict[(IdType, Set['Symbol'])] = {} for _ in settings.iter_slicing_contexts(): for (pid, syms) in self.parents.items(): parents.setdefault(pid, set()).update(syms) return parents def parents(self, new_parents: Dict[(IdType, Set['Symbol'])]) -> None: ctx = slicing_ctx_var.get() dangling_ctx = dangling_ctx_var.get() assert (ctx is not None) if (ctx == SlicingContext.DYNAMIC): if dangling_ctx: self.dangling_dynamic_parents = new_parents else: self.dynamic_parents = new_parents elif (ctx == SlicingContext.STATIC): if dangling_ctx: self.dangling_static_parents = new_parents else: self.static_parents = new_parents else: assert False def children(self) -> Dict[(IdType, Set['Symbol'])]: ctx = slicing_ctx_var.get() dangling_ctx = dangling_ctx_var.get() if (ctx == SlicingContext.DYNAMIC): return (self.dangling_dynamic_children if dangling_ctx else self.dynamic_children) elif (ctx == SlicingContext.STATIC): return (self.dangling_static_children if dangling_ctx else self.static_children) flow_ = flow() assert (not flow_.is_test) settings = flow_.mut_settings children: Dict[(IdType, Set['Symbol'])] = {} for _ in settings.iter_slicing_contexts(): for (pid, syms) in self.children.items(): children.setdefault(pid, set()).update(syms) return children def children(self, new_children: Dict[(IdType, Set['Symbol'])]) -> None: ctx = slicing_ctx_var.get() dangling_ctx = dangling_ctx_var.get() assert (ctx is not None) if (ctx == SlicingContext.DYNAMIC): if dangling_ctx: self.dangling_dynamic_children = new_children else: self.dynamic_children = new_children elif (ctx == SlicingContext.STATIC): if dangling_ctx: self.dangling_static_children = new_children else: self.static_children = new_children else: assert False def _make_slice_helper(self, closure: Set['SliceableMixin']) -> None: if (self in closure): return closure.add(self) for _ in flow().mut_settings.iter_slicing_contexts(): for pid in self.parents.keys(): parent = self.from_id(pid) while (parent.timestamp > self.timestamp): if getattr(parent, 'override', False): break parent = parent.prev parent._make_slice_helper(closure) def make_slice(self) -> List['SliceableMixin']: return self.make_multi_slice([self]) def make_multi_slice(cls, seeds: Iterable[Union[(TimestampOrCounter, 'SliceableMixin')]], seed_only: bool=False) -> List['SliceableMixin']: closure: Set['SliceableMixin'] = set() for seed in seeds: slice_seed = (cls.at_timestamp(seed) if isinstance(seed, (Timestamp, int)) else seed) if seed_only: closure.add(slice_seed) else: slice_seed._make_slice_helper(closure) return sorted(closure, key=(lambda dep: dep.timestamp)) def make_cell_dict_from_closure(closure: Sequence['SliceableMixin']) -> Dict[(int, str)]: slice_text_by_cell_num: Dict[(int, List[str])] = {} for sliceable in closure: slice_text_by_cell_num.setdefault(sliceable.timestamp.cell_num, []).append(sliceable.text) return {cell_num: '\n'.join(text) for (cell_num, text) in slice_text_by_cell_num.items()} def make_cell_dict_multi_slice(cls, seeds: Iterable[Union[(TimestampOrCounter, 'SliceableMixin')]], seed_only: bool=False) -> Dict[(int, str)]: return cls.make_cell_dict_from_closure(cls.make_multi_slice(seeds, seed_only=seed_only)) def make_cell_dict_slice(self) -> Dict[(int, str)]: return self.make_cell_dict_multi_slice([self]) def _process_memoized_seeds(seeds: Iterable[Union[(TimestampOrCounter, 'SliceableMixin')]]) -> Set[TimestampOrCounter]: processed_seeds: Set[TimestampOrCounter] = set() for seed in seeds: if (not isinstance(seed, (Timestamp, int))): seed = seed.timestamp assert isinstance(seed, (Timestamp, int)) mem_ctr = cells().at_timestamp(seed).skipped_due_to_memoization_ctr if (mem_ctr == (- 1)): processed_seeds.add(seed) elif isinstance(seed, int): processed_seeds.add(mem_ctr) else: processed_seeds.add(Timestamp(mem_ctr, seed.stmt_num)) return processed_seeds def format_multi_slice(cls, seeds: Iterable[Union[(TimestampOrCounter, 'SliceableMixin')]], blacken: bool=True, seed_only: bool=False, format_type: Optional[Type[FormatType]]=None) -> Slice: seeds = cls._process_memoized_seeds(seeds) return format_slice(cls.make_cell_dict_multi_slice(seeds, seed_only=seed_only), blacken=blacken, format_type=format_type) def format_slice(self, blacken: bool=True, seed_only: bool=False, format_type: Optional[Type[FormatType]]=None) -> Slice: return self.format_multi_slice([self], blacken=blacken, seed_only=seed_only, format_type=format_type)
def process_csv(filename): reddit_dump = pd.read_csv(filename, encoding='utf-8') users = reddit_dump['users'] subreddit = reddit_dump['subreddit'] total_entries = len(users) G = nx.Graph() data_itr = tqdm(enumerate(zip(users, subreddit))) file_prefix = filename.split('/')[(- 1)] file_prefix = file_prefix.replace('.csv', '') save_path = (('./reddit_data/Reddit_split_2017-11/split_csv/' + file_prefix) + '_graph.pkl') for (idx, (user, subreddit)) in data_itr: user = ('U_' + user) subreddit = ('SR_' + subreddit) G.add_edge(user, subreddit) nx.write_gpickle(G, save_path) return G
class WideResNetEnsemble(TorchEnsemble): def __init__(self, cfg): super().__init__(cfg, WideResNet)
def int64_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def run_training(hyperparams=None): gt = GraspTrain() with K.get_session() as sess: make_model_fn = choose_make_model_fn(hyperparams=hyperparams) load_weights = FLAGS.load_weights if ('train' in FLAGS.pipeline_stage): print(('Training ' + FLAGS.grasp_model)) (load_weights, history) = gt.train(make_model_fn=make_model_fn, load_weights=load_weights, model_name=FLAGS.grasp_model) if ('eval' in FLAGS.pipeline_stage): print(((('Evaluating ' + FLAGS.grasp_model) + ' on weights ') + load_weights)) gt.eval(make_model_fn=make_model_fn, load_weights=load_weights, model_name=FLAGS.grasp_model, test_per_epoch=False) return history
def shuffle_dataset(sampler, cur_epoch, is_shuffle=False): if (not is_shuffle): return assert isinstance(sampler, (RandomSampler, DistributedSampler)), "Sampler type '{}' not supported".format(type(sampler)) if isinstance(sampler, DistributedSampler): sampler.set_epoch(cur_epoch)
def read_hist(f): ch = [] for line in f: c = line[0] if (c == args.threshold_character): break ch.append(c) return ch
def drn_c_58(pretrained=False, **kwargs): model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['drn-c-58'])) return model
def logsumexp2(t1: torch.Tensor, t2: torch.Tensor) -> torch.Tensor: m = torch.max(t1, t2) a1 = (t1 - m) a2 = (t2 - m) lse = (m + torch.log((torch.exp(a1) + torch.exp(a2)))) return lse
def GetShellCommandOutput(env_cmd): environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output
class TestEvents(unittest.TestCase): def test_qkv(self): d = {} def store_qkv(event): d['q'] = event.queries d['k'] = event.keys d['v'] = event.values transformer = TransformerEncoderBuilder().get() x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(d), 0) EventDispatcher.get().listen(QKVEvent, store_qkv) x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(d), 3) d.clear() EventDispatcher.get().remove(store_qkv) x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(d), 0) d.clear() EventDispatcher.get().listen((QKVEvent & layer_name_contains(transformer, 'layers.2.attention')), store_qkv) x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(d), 3) d.clear() EventDispatcher.get().listen((QKVEvent & layer_name_contains(transformer, 'layers.22.attention')), store_qkv) x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(d), 0) d.clear() EventDispatcher.get().clear() def test_attention_matrix(self): A = [] def store_attention(event): A.append(event.attention_matrix) transformer = TransformerEncoderBuilder().get() x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(A), 0) EventDispatcher.get().listen(AttentionEvent, store_attention) x = transformer(torch.rand(1, 100, (64 * 4))) self.assertEqual(len(A), 4) def test_intermediate_output(self): intermediates = [] def store_values(event): intermediates.append(event.x) transformer = TransformerEncoderBuilder().get() x = transformer(torch.rand(1, 100, (64 * 4))) EventDispatcher.get().listen(IntermediateOutput, store_values) transformer(x) self.assertEqual(len(intermediates), 4)
class Dagger(BaseDagger): def __init__(self): super().__init__('dagger', weight=10, damage=D.Dice.from_str('d3'), material=M.Iron, hit=2)
def build_head(name, verbose=True, **kwargs): avai_heads = HEAD_REGISTRY.registered_names() check_availability(name, avai_heads) if verbose: print('Head: {}'.format(name)) return HEAD_REGISTRY.get(name)(**kwargs)
_criterion('sentence_prediction_r3f') class SentencePredictionR3F(FairseqCriterion): def __init__(self, task, eps, r3f_lambda, noise_type, classification_head_name, regression_target): super().__init__(task) self.eps = eps self.r3f_lambda = r3f_lambda self.noise_type = noise_type self.classification_head_name = classification_head_name self.regression_target = regression_target if (self.noise_type in {'normal'}): self.noise_sampler = torch.distributions.normal.Normal(loc=0.0, scale=self.eps) elif (self.noise_type == 'uniform'): self.noise_sampler = torch.distributions.uniform.Uniform(low=(- self.eps), high=self.eps) else: raise Exception(f'unrecognized noise type {self.noise_type}') def add_args(parser): parser.add_argument('--eps', type=float, default=1e-05, help='noise eps') parser.add_argument('--r3f-lambda', type=float, default=1.0, help='lambda for combining logistic loss and noisy KL loss') parser.add_argument('--noise-type', type=str, default='uniform', choices=['normal', 'uniform'], help='type of noises for RXF methods') parser.add_argument('--classification-head-name', default='sentence_classification_head', help='name of the classification head to use') def _get_symm_kl(self, noised_logits, input_logits): return ((F.kl_div(F.log_softmax(noised_logits, dim=(- 1), dtype=torch.float32), F.softmax(input_logits, dim=(- 1), dtype=torch.float32), None, None, 'sum') + F.kl_div(F.log_softmax(input_logits, dim=(- 1), dtype=torch.float32), F.softmax(noised_logits, dim=(- 1), dtype=torch.float32), None, None, 'sum')) / noised_logits.size(0)) def forward(self, model, sample, reduce=True): assert (hasattr(model, 'classification_heads') and (self.classification_head_name in model.classification_heads)), 'model must provide sentence classification head for --criterion=sentence_prediction' token_embeddings = model.encoder.sentence_encoder.embed_tokens(sample['net_input']['src_tokens']) (input_logits, _) = model(**sample['net_input'], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=token_embeddings) if (model.training and self.noise_sampler): noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(token_embeddings) noised_embeddings = (token_embeddings.detach().clone() + noise) (noised_logits, _) = model(**sample['net_input'], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=noised_embeddings) symm_kl = self._get_symm_kl(noised_logits, input_logits) else: symm_kl = 0 targets = model.get_targets(sample, [input_logits]).view((- 1)) sample_size = targets.numel() if (not self.regression_target): loss = F.nll_loss(F.log_softmax(input_logits, dim=(- 1), dtype=torch.float32), targets, reduction='sum') if model.training: symm_kl = (symm_kl * sample_size) loss = (loss + (self.r3f_lambda * symm_kl)) else: logits = input_logits.squeeze().float() targets = targets.float() loss = F.mse_loss(logits, targets, reduction='sum') logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size} if (not self.regression_target): preds = input_logits.max(dim=1)[1] logging_output.update(ncorrect=(preds == targets).sum().item()) if (model.training and self.noise_sampler): logging_output.update(symm_kl=(utils.item(symm_kl.data) if reduce else symm_kl.data)) return (loss, sample_size, logging_output) def aggregate_logging_outputs(logging_outputs): loss_sum = sum((log.get('loss', 0) for log in logging_outputs)) symm_kl_sum = sum((log.get('symm_kl', 0) for log in logging_outputs)) ntokens = sum((log.get('ntokens', 0) for log in logging_outputs)) nsentences = sum((log.get('nsentences', 0) for log in logging_outputs)) sample_size = sum((log.get('sample_size', 0) for log in logging_outputs)) agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'symm_kl': (symm_kl_sum / sample_size), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size} if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])): ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs)) agg_output.update(accuracy=(ncorrect / nsentences)) if (sample_size != ntokens): agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2)) return agg_output
def find_k_clicks(pairs, k): if (k == 1): for word in pairs.keys(): (yield [word]) else: for click in find_k_clicks(pairs, (k - 1)): if (len(click) > (k - 1)): continue for word in pairs[click[(- 1)]]: if (word in click): continue bad = False for c in click[:(- 2)]: if (not (word in pairs[c])): bad = True if (not bad): click.append(word) (yield click)