code
stringlengths
17
6.64M
def run(config_dict): data_module = config_dict['data-module'] model_module = config_dict['model-module'] training_module = config_dict['training-module'] evaluation_module = config_dict.get('evaluation-module', None) mode = config_dict['mode'] DataClass = importlib.import_module(data_module).component ModelClass = importlib.import_module(model_module).component TrainingClass = importlib.import_module(training_module).component EvaluationClass = (importlib.import_module(evaluation_module).component if evaluation_module else None) model_dirname = make_model_dir(config_dict) logger = set_logger(config_dict['log_level'], os.path.join(model_dirname, 'log.txt')) data = DataClass(config_dict['data_params']) data.setup() fix_seed(config_d['random_seed']) model = ModelClass(config_dict['model_params']) print('build model done') model.setup(data) print('setup data done') if (mode == 'train'): training_params = config_dict['training_params'] trainer = TrainingClass(training_params) trainer.training_start(model, data) save_config(config_dict, os.path.join(model_dirname, 'config.json')) elif (mode == 'predict'): assert (evaluation_module is not None), 'No evaluation module -- check config file!' evaluator = EvaluationClass(config_dict) model_fname = config_dict['model_fn'] load_model(model, model_fname) model = model.to('cuda') id2word = data.vocab.id2tok beam_size = None alpha = 0.2 if ('dev' in data.fnames): logger.info('Predicting on dev data') (dec_snt_beam, fw_beam) = ([], []) (predicted_ids, fw_beam) = evaluator.evaluate_model(model, data.dev[0], data.uni_mr['dev'], beam_size=beam_size, alpha=alpha) data_lexicalizations = data.lexicalizations['dev'] predicted_snts = evaluator.lexicalize_predictions(predicted_ids, data_lexicalizations, id2word) if (beam_size is None): save_predictions_txt(predicted_snts, ('%s.devset.predictions.txt_incre_%.1f_new' % (model_fname, alpha))) else: save_beam_fw(fw_beam, dec_snt_beam, beam_size, ('%s.devset.recs.txt' % model_fname)) exit() if ('test' in data.fnames): logger.info('Predicting on test data') print(len(data.test[0])) (predicted_ids, attention_weights) = evaluator.evaluate_model(model, data.test[0], data.uni_mr['test'], beam_size=beam_size, alpha=alpha) data_lexicalizations = data.lexicalizations['test'] print(len(predicted_ids), len(data_lexicalizations)) predicted_snts = evaluator.lexicalize_predictions(predicted_ids, data_lexicalizations, id2word) save_predictions_txt(predicted_snts, ('%s.testset.predictions.txt_inre_%.1f' % (model_fname, alpha))) else: logger.warning(("Check the 'mode' field in the config file: %s" % mode)) logger.info('DONE')
class Data(): def __init__(self, args): kwargs = {} if (not args.cpu): kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False self.loader_train = None if (not args.test_only): module_train = import_module(('data.' + args.data_train.lower())) trainset = getattr(module_train, args.data_train)(args) self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) if (args.data_test in ['CBSD68', 'Set14', 'B100', 'Urban100']): if (not args.benchmark_noise): module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')(args, train=False) else: module_test = import_module(('data.' + args.data_test.lower())) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
class Benchmark(srdata.SRData): def __init__(self, args, train=True): super(Benchmark, self).__init__(args, train, benchmark=True) def _scan(self): list_hr = [] list_lr = [[] for _ in self.noise_g] for entry in os.scandir(self.dir_hr): filename = os.path.splitext(entry.name)[0] list_hr.append(os.path.join(self.dir_hr, (filename + self.ext))) for (si, s) in enumerate(self.noise_g): list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}_s{}{}'.format(s, filename, s, self.ext))) list_hr.sort() for l in list_lr: l.sort() return (list_hr, list_lr) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'benchmark', self.args.data_test) self.dir_hr = os.path.join(self.apath, 'Clean') self.dir_lr = os.path.join(self.apath, 'Noisy') self.ext = '.png'
class Demo(data.Dataset): def __init__(self, args, train=False): self.args = args self.name = 'Demo' self.scale = args.scale self.idx_scale = 0 self.train = False self.benchmark = False self.filelist = [] for f in os.listdir(args.dir_demo): if ((f.find('.png') >= 0) or (f.find('.jp') >= 0)): self.filelist.append(os.path.join(args.dir_demo, f)) self.filelist.sort() def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[(- 1)] (filename, _) = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename) def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
class DIV2K(srdata.SRData): def __init__(self, args, train=True): super(DIV2K, self).__init__(args, train) self.repeat = (args.test_every // (args.n_train // args.batch_size)) def _scan(self): list_hr = [] list_lr = [[] for _ in self.noise_g] if self.train: idx_begin = 0 idx_end = self.args.n_train else: idx_begin = self.args.n_train idx_end = (self.args.offset_val + self.args.n_val) for i in range((idx_begin + 1), (idx_end + 1)): filename = '{:0>4}'.format(i) list_hr.append(os.path.join(self.dir_hr, (filename + self.ext))) for (si, s) in enumerate(self.noise_g): list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}_s{}{}'.format(s, filename, s, self.ext))) return (list_hr, list_lr) def _set_filesystem(self, dir_data): self.apath = (dir_data + '/DIV2K_train') self.dir_hr = os.path.join(self.apath, 'Clean') self.dir_lr = os.path.join(self.apath, 'Noisy') self.ext = '.png' def _name_hrbin(self): return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split)) def _name_lrbin(self, scale): return os.path.join(self.apath, 'bin', '{}_bin_LR_X{}.npy'.format(self.split, scale)) def __len__(self): if self.train: return (len(self.images_hr) * self.repeat) else: return len(self.images_hr) def _get_index(self, idx): if self.train: return (idx % len(self.images_hr)) else: return idx
class MyImage(data.Dataset): def __init__(self, args, train=False): self.args = args self.train = False self.name = 'MyImage' self.noise_g = args.noise_g self.idx_scale = 0 apath = ((((args.testpath + '/') + args.testset) + '/X') + str(args.noise_g[0])) self.filelist = [] self.imnamelist = [] if (not train): for f in os.listdir(apath): try: filename = os.path.join(apath, f) misc.imread(filename) self.filelist.append(filename) self.imnamelist.append(f) except: pass def __getitem__(self, idx): filename = os.path.split(self.filelist[idx])[(- 1)] (filename, _) = os.path.splitext(filename) lr = misc.imread(self.filelist[idx]) lr = common.set_channel([lr], self.args.n_colors)[0] return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename) def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
def _ms_loop(dataset, index_queue, data_queue, collate_fn, scale, seed, init_fn, worker_id): global _use_shared_memory _use_shared_memory = True _set_worker_signal_handlers() torch.set_num_threads(1) torch.manual_seed(seed) while True: r = index_queue.get() if (r is None): break (idx, batch_indices) = r try: idx_scale = 0 if ((len(scale) > 1) and dataset.train): idx_scale = random.randrange(0, len(scale)) dataset.set_scale(idx_scale) samples = collate_fn([dataset[i] for i in batch_indices]) samples.append(idx_scale) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples))
class _MSDataLoaderIter(_DataLoaderIter): def __init__(self, loader): self.dataset = loader.dataset self.noise_g = loader.noise_g self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = (loader.pin_memory and torch.cuda.is_available()) self.timeout = loader.timeout self.done_event = threading.Event() self.sample_iter = iter(self.batch_sampler) if (self.num_workers > 0): self.worker_init_fn = loader.worker_init_fn self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)] self.worker_queue_idx = 0 self.worker_result_queue = multiprocessing.SimpleQueue() self.batches_outstanding = 0 self.worker_pids_set = False self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} base_seed = torch.LongTensor(1).random_()[0] self.workers = [multiprocessing.Process(target=_ms_loop, args=(self.dataset, self.index_queues[i], self.worker_result_queue, self.collate_fn, self.noise_g, (base_seed + i), self.worker_init_fn, i)) for i in range(self.num_workers)] if (self.pin_memory or (self.timeout > 0)): self.data_queue = queue.Queue() if self.pin_memory: maybe_device_id = torch.cuda.current_device() else: maybe_device_id = None self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id)) self.worker_manager_thread.daemon = True self.worker_manager_thread.start() else: self.data_queue = self.worker_result_queue for w in self.workers: w.daemon = True w.start() _update_worker_pids(id(self), tuple((w.pid for w in self.workers))) _set_SIGCHLD_handler() self.worker_pids_set = True for _ in range((2 * self.num_workers)): self._put_indices()
class MSDataLoader(DataLoader): def __init__(self, args, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): super(MSDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=args.n_threads, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn) self.noise_g = args.noise_g def __iter__(self): return _MSDataLoaderIter(self)
class Adversarial(nn.Module): def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.discriminator = discriminator.Discriminator(args, gan_type) if (gan_type != 'WGAN_GP'): self.optimizer = utility.make_optimizer(args, self.discriminator) else: self.optimizer = optim.Adam(self.discriminator.parameters(), betas=(0, 0.9), eps=1e-08, lr=1e-05) self.scheduler = utility.make_scheduler(args, self.optimizer) def forward(self, fake, real): fake_detach = fake.detach() self.loss = 0 for _ in range(self.gan_k): self.optimizer.zero_grad() d_fake = self.discriminator(fake_detach) d_real = self.discriminator(real) if (self.gan_type == 'GAN'): label_fake = torch.zeros_like(d_fake) label_real = torch.ones_like(d_real) loss_d = (F.binary_cross_entropy_with_logits(d_fake, label_fake) + F.binary_cross_entropy_with_logits(d_real, label_real)) elif (self.gan_type.find('WGAN') >= 0): loss_d = (d_fake - d_real).mean() if (self.gan_type.find('GP') >= 0): epsilon = torch.rand_like(fake).view((- 1), 1, 1, 1) hat = (fake_detach.mul((1 - epsilon)) + real.mul(epsilon)) hat.requires_grad = True d_hat = self.discriminator(hat) gradients = torch.autograd.grad(outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True)[0] gradients = gradients.view(gradients.size(0), (- 1)) gradient_norm = gradients.norm(2, dim=1) gradient_penalty = (10 * gradient_norm.sub(1).pow(2).mean()) loss_d += gradient_penalty self.loss += loss_d.item() loss_d.backward() self.optimizer.step() if (self.gan_type == 'WGAN'): for p in self.discriminator.parameters(): p.data.clamp_((- 1), 1) self.loss /= self.gan_k d_fake_for_g = self.discriminator(fake) if (self.gan_type == 'GAN'): loss_g = F.binary_cross_entropy_with_logits(d_fake_for_g, label_real) elif (self.gan_type.find('WGAN') >= 0): loss_g = (- d_fake_for_g.mean()) return loss_g def state_dict(self, *args, **kwargs): state_discriminator = self.discriminator.state_dict(*args, **kwargs) state_optimizer = self.optimizer.state_dict() return dict(**state_discriminator, **state_optimizer)
class Discriminator(nn.Module): def __init__(self, args, gan_type='GAN'): super(Discriminator, self).__init__() in_channels = 3 out_channels = 64 depth = 7 bn = True act = nn.LeakyReLU(negative_slope=0.2, inplace=True) m_features = [common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)] for i in range(depth): in_channels = out_channels if ((i % 2) == 1): stride = 1 out_channels *= 2 else: stride = 2 m_features.append(common.BasicBlock(in_channels, out_channels, 3, stride=stride, bn=bn, act=act)) self.features = nn.Sequential(*m_features) patch_size = (args.patch_size // (2 ** ((depth + 1) // 2))) m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), act, nn.Linear(1024, 1)] self.classifier = nn.Sequential(*m_classifier) def forward(self, x): features = self.features(x) output = self.classifier(features.view(features.size(0), (- 1))) return output
class VGG(nn.Module): def __init__(self, conv_index, rgb_range=1): super(VGG, self).__init__() vgg_features = models.vgg19(pretrained=True).features modules = [m for m in vgg_features] if (conv_index == '22'): self.vgg = nn.Sequential(*modules[:8]) elif (conv_index == '54'): self.vgg = nn.Sequential(*modules[:35]) vgg_mean = (0.485, 0.456, 0.406) vgg_std = ((0.229 * rgb_range), (0.224 * rgb_range), (0.225 * rgb_range)) self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std) self.vgg.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) loss = F.mse_loss(vgg_sr, vgg_hr) return loss
def default_conv(in_channels, out_channels, kernel_size, bias=True): return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias)
class MeanShift(nn.Conv2d): def __init__(self, rgb_range, rgb_mean, rgb_std, sign=(- 1)): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) self.weight.data.div_(std.view(3, 1, 1, 1)) self.bias.data = ((sign * rgb_range) * torch.Tensor(rgb_mean)) self.bias.data.div_(std) self.requires_grad = False
class BasicBlock(nn.Sequential): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=False, bn=True, act=nn.ReLU(True)): m = [nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), stride=stride, bias=bias)] if bn: m.append(nn.BatchNorm2d(out_channels)) if (act is not None): m.append(act) super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module): def __init__(self, conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feat)) if (i == 0): m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res
class Upsampler(nn.Sequential): def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True): m = [] if ((scale & (scale - 1)) == 0): for _ in range(int(math.log(scale, 2))): m.append(conv(n_feat, (4 * n_feat), 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(act()) elif (scale == 3): m.append(conv(n_feat, (9 * n_feat), 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(act()) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
def init_weights(modules): pass
class MeanShift(nn.Module): def __init__(self, mean_rgb, sub): super(MeanShift, self).__init__() sign = ((- 1) if sub else 1) r = (mean_rgb[0] * sign) g = (mean_rgb[1] * sign) b = (mean_rgb[2] * sign) self.shifter = nn.Conv2d(3, 3, 1, 1, 0) self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1) self.shifter.bias.data = torch.Tensor([r, g, b]) for params in self.shifter.parameters(): params.requires_grad = False def forward(self, x): x = self.shifter(x) return x
class Merge_Run(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1, dilation=1): super(Merge_Run, self).__init__() self.body1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.ReLU(inplace=True)) self.body2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, 2, 2), nn.ReLU(inplace=True)) self.body3 = nn.Sequential(nn.Conv2d((in_channels * 2), out_channels, ksize, stride, pad), nn.ReLU(inplace=True)) init_weights(self.modules) def forward(self, x): out1 = self.body1(x) out2 = self.body2(x) c = torch.cat([out1, out2], dim=1) c_out = self.body3(c) out = (c_out + x) return out
class Merge_Run_dual(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1, dilation=1): super(Merge_Run_dual, self).__init__() self.body1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.ReLU(inplace=True), nn.Conv2d(in_channels, out_channels, ksize, stride, 2, 2), nn.ReLU(inplace=True)) self.body2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, 3, 3), nn.ReLU(inplace=True), nn.Conv2d(in_channels, out_channels, ksize, stride, 4, 4), nn.ReLU(inplace=True)) self.body3 = nn.Sequential(nn.Conv2d((in_channels * 2), out_channels, ksize, stride, pad), nn.ReLU(inplace=True)) init_weights(self.modules) def forward(self, x): out1 = self.body1(x) out2 = self.body2(x) c = torch.cat([out1, out2], dim=1) c_out = self.body3(c) out = (c_out + x) return out
class BasicBlock(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1): super(BasicBlock, self).__init__() self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.ReLU(inplace=True)) init_weights(self.modules) def forward(self, x): out = self.body(x) return out
class BasicBlockSig(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1): super(BasicBlockSig, self).__init__() self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.Sigmoid()) init_weights(self.modules) def forward(self, x): out = self.body(x) return out
class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ResidualBlock, self).__init__() self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 3, 1, 1)) init_weights(self.modules) def forward(self, x): out = self.body(x) out = F.relu((out + x)) return out
class EResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, group=1): super(EResidualBlock, self).__init__() self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 1, 1, 0)) init_weights(self.modules) def forward(self, x): out = self.body(x) out = F.relu((out + x)) return out
class UpsampleBlock(nn.Module): def __init__(self, n_channels, scale, multi_scale, group=1): super(UpsampleBlock, self).__init__() if multi_scale: self.up2 = _UpsampleBlock(n_channels, scale=2, group=group) self.up3 = _UpsampleBlock(n_channels, scale=3, group=group) self.up4 = _UpsampleBlock(n_channels, scale=4, group=group) else: self.up = _UpsampleBlock(n_channels, scale=scale, group=group) self.multi_scale = multi_scale def forward(self, x, scale): if self.multi_scale: if (scale == 2): return self.up2(x) elif (scale == 3): return self.up3(x) elif (scale == 4): return self.up4(x) else: return self.up(x)
class _UpsampleBlock(nn.Module): def __init__(self, n_channels, scale, group=1): super(_UpsampleBlock, self).__init__() modules = [] if ((scale == 2) or (scale == 4) or (scale == 8)): for _ in range(int(math.log(scale, 2))): modules += [nn.Conv2d(n_channels, (4 * n_channels), 3, 1, 1, groups=group), nn.ReLU(inplace=True)] modules += [nn.PixelShuffle(2)] elif (scale == 3): modules += [nn.Conv2d(n_channels, (9 * n_channels), 3, 1, 1, groups=group), nn.ReLU(inplace=True)] modules += [nn.PixelShuffle(3)] self.body = nn.Sequential(*modules) init_weights(self.modules) def forward(self, x): out = self.body(x) return out
def make_model(args, parent=False): return RIDNET(args)
class CALayer(nn.Module): def __init__(self, channel, reduction=16): super(CALayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.c1 = ops.BasicBlock(channel, (channel // reduction), 1, 1, 0) self.c2 = ops.BasicBlockSig((channel // reduction), channel, 1, 1, 0) def forward(self, x): y = self.avg_pool(x) y1 = self.c1(y) y2 = self.c2(y1) return (x * y2)
class Block(nn.Module): def __init__(self, in_channels, out_channels, group=1): super(Block, self).__init__() self.r1 = ops.Merge_Run_dual(in_channels, out_channels) self.r2 = ops.ResidualBlock(in_channels, out_channels) self.r3 = ops.EResidualBlock(in_channels, out_channels) self.ca = CALayer(in_channels) def forward(self, x): r1 = self.r1(x) r2 = self.r2(r1) r3 = self.r3(r2) out = self.ca(r3) return out
class RIDNET(nn.Module): def __init__(self, args): super(RIDNET, self).__init__() n_feats = args.n_feats kernel_size = 3 reduction = args.reduction rgb_mean = (0.4488, 0.4371, 0.404) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = ops.BasicBlock(3, n_feats, kernel_size, 1, 1) self.b1 = Block(n_feats, n_feats) self.b2 = Block(n_feats, n_feats) self.b3 = Block(n_feats, n_feats) self.b4 = Block(n_feats, n_feats) self.tail = nn.Conv2d(n_feats, 3, kernel_size, 1, 1, 1) def forward(self, x): s = self.sub_mean(x) h = self.head(s) b1 = self.b1(h) b2 = self.b2(b1) b3 = self.b3(b2) b_out = self.b4(b3) res = self.tail(b_out) out = self.add_mean(res) f_out = (out + x) return f_out
def set_template(args): if (args.template.find('jpeg') >= 0): args.data_train = 'DIV2K_jpeg' args.data_test = 'DIV2K_jpeg' args.epochs = 200 args.lr_decay = 100 if (args.template.find('EDSR_paper') >= 0): args.model = 'EDSR' args.n_resblocks = 32 args.n_feats = 256 args.res_scale = 0.1 if (args.template.find('MDSR') >= 0): args.model = 'MDSR' args.patch_size = 48 args.epochs = 650 if (args.template.find('DDBPN') >= 0): args.model = 'DDBPN' args.patch_size = 128 args.scale = '4' args.data_test = 'Set5' args.batch_size = 20 args.epochs = 1000 args.lr_decay = 500 args.gamma = 0.1 args.weight_decay = 0.0001 args.loss = '1*MSE' if (args.template.find('GAN') >= 0): args.epochs = 200 args.lr = 5e-05 args.lr_decay = 150
class Trainer(): def __init__(self, args, loader, my_model, my_loss, ckp): self.args = args self.noise_g = args.noise_g self.ckp = ckp self.loader_train = loader.loader_train self.loader_test = loader.loader_test self.model = my_model self.loss = my_loss self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) if (self.args.load != '.'): self.optimizer.load_state_dict(torch.load(os.path.join(ckp.dir, 'optimizer.pt'))) for _ in range(len(ckp.log)): self.scheduler.step() self.error_last = 100000000.0 def train(self): self.scheduler.step() self.loss.step() epoch = (self.scheduler.last_epoch + 1) lr = self.scheduler.get_lr()[0] self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))) self.loss.start_log() self.model.train() (timer_data, timer_model) = (utility.timer(), utility.timer()) for (batch, (lr, hr, _, idx_scale)) in enumerate(self.loader_train): (lr, hr) = self.prepare([lr, hr]) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, idx_scale) loss = self.loss(sr, hr) if (loss.item() < (self.args.skip_threshold * self.error_last)): loss.backward() self.optimizer.step() else: print('Skip this batch {}! (Loss: {})'.format((batch + 1), loss.item())) timer_model.hold() if (((batch + 1) % self.args.print_every) == 0): self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[((- 1), (- 1))] def test(self): epoch = (self.scheduler.last_epoch + 1) self.ckp.write_log('\nEvaluation:') self.ckp.add_log(torch.zeros(1, len(self.noise_g))) self.model.eval() timer_test = utility.timer() with torch.no_grad(): for (idx_scale, scale) in enumerate(self.noise_g): eval_acc = 0 self.loader_test.dataset.set_scale(idx_scale) tqdm_test = tqdm(self.loader_test, ncols=80) for (idx_img, (lr, hr, filename, _)) in enumerate(tqdm_test): filename = filename[0] no_eval = (hr.nelement() == 1) if (not no_eval): (lr, hr) = self.prepare([lr, hr]) else: lr = self.prepare([lr])[0] sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] if (not no_eval): eval_acc += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark) save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(filename, save_list, scale) self.ckp.log[((- 1), idx_scale)] = (eval_acc / len(self.loader_test)) best = self.ckp.log.max(0) self.ckp.write_log('[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(self.args.data_test, scale, self.ckp.log[((- 1), idx_scale)], best[0][idx_scale], (best[1][idx_scale] + 1))) self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True) if (not self.args.test_only): self.ckp.save(self, epoch, is_best=((best[1][0] + 1) == epoch)) def prepare(self, l, volatile=False): device = torch.device(('cpu' if self.args.cpu else 'cuda')) def _prepare(tensor): if (self.args.precision == 'half'): tensor = tensor.half() return tensor.to(device) return [_prepare(_l) for _l in l] def terminate(self): if self.args.test_only: self.test() return True else: epoch = (self.scheduler.last_epoch + 1) return (epoch >= self.args.epochs)
def main(_): pp.pprint(flags.FLAGS.__flags) if (not os.path.exists(FLAGS.checkpoint_dir)): os.makedirs(FLAGS.checkpoint_dir) if (not os.path.exists(FLAGS.sample_dir)): os.makedirs(FLAGS.sample_dir) filenames = os.listdir('test_real') data_dir = os.path.join(os.getcwd(), 'test_real') data = glob.glob(os.path.join(data_dir, '*.bmp')) test_data_list = (data + glob.glob(os.path.join(data_dir, '*.png'))) for ide in range(0, len(test_data_list)): image_test = get_image(test_data_list[ide], is_grayscale=False) shape = image_test.shape with tf.Session() as sess: srcnn = T_CNN(sess, image_height=shape[0], image_width=shape[1], label_height=FLAGS.label_height, label_width=FLAGS.label_width, batch_size=FLAGS.batch_size, c_dim=FLAGS.c_dim, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir, test_image_name=test_data_list[ide], id=ide) srcnn.train(FLAGS) sess.close()
class batch_norm(object): def __init__(self, epsilon=1e-05, momentum=0.9, name='batch_norm'): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x, train=True): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=train, scope=self.name)
def conv_cond_concat(x, y): 'Concatenate conditioning vector on feature map axis.' x_shapes = x.get_shape() y_shapes = y.get_shape() return concat([x, (y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]]))], 3)
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, padding='SAME', name='conv2d'): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def conv2d_xavier(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, padding='SAME', name='conv2d'): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.contrib.layers.xavier_initializer_conv2d()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding) biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv
def deconv2d(input_, output_shape, k_h=3, k_w=3, d_h=1, d_w=1, stddev=0.02, name='deconv2d', with_w=False): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.random_normal_initializer(stddev=stddev)) try: deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) except AttributeError: deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) if with_w: return (deconv, w, biases) else: return deconv
def lrelu(x, leak=0.2, name='lrelu'): return tf.maximum(x, (leak * x))
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): shape = input_.get_shape().as_list() with tf.variable_scope((scope or 'Linear')): matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev)) bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start)) if with_w: return ((tf.matmul(input_, matrix) + bias), matrix, bias) else: return (tf.matmul(input_, matrix) + bias)
def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv(inputs, kernel_size, output_num, stride_size=1, init_bias=0.0, conv_padding='SAME', stddev=0.01, activation_func=tf.nn.relu): input_size = inputs.get_shape().as_list()[(- 1)] conv_weights = tf.Variable(tf.random_normal([kernel_size, kernel_size, input_size, output_num], dtype=tf.float32, stddev=stddev), name='weights') conv_biases = tf.Variable(tf.constant(init_bias, shape=[output_num], dtype=tf.float32), 'biases') conv_layer = tf.nn.conv2d(inputs, conv_weights, [1, stride_size, stride_size, 1], padding=conv_padding) conv_layer = tf.nn.bias_add(conv_layer, conv_biases) if activation_func: conv_layer = activation_func(conv_layer) return conv_layer
def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01): input_shape = inputs.get_shape().as_list() if (len(input_shape) == 4): fc_weights = tf.Variable(tf.random_normal([((input_shape[1] * input_shape[2]) * input_shape[3]), output_size], dtype=tf.float32, stddev=stddev), name='weights') inputs = tf.reshape(inputs, [(- 1), fc_weights.get_shape().as_list()[0]]) else: fc_weights = tf.Variable(tf.random_normal([input_shape[(- 1)], output_size], dtype=tf.float32, stddev=stddev), name='weights') fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases') fc_layer = tf.matmul(inputs, fc_weights) fc_layer = tf.nn.bias_add(fc_layer, fc_biases) return fc_layer
def lrn(inputs, depth_radius=2, alpha=0.0001, beta=0.75, bias=1.0): return tf.nn.local_response_normalization(inputs, depth_radius=depth_radius, alpha=alpha, beta=beta, bias=bias)
def transform(images): return ((np.array(images) / 127.5) - 1.0)
def inverse_transform(images): return ((images + 1.0) / 2)
def prepare_data(sess, dataset): "\n Args:\n dataset: choose train dataset or test dataset\n \n For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp', ..., '.../t99.bmp']\n \n " filenames = os.listdir(dataset) data_dir = os.path.join(os.getcwd(), dataset) data = glob.glob(os.path.join(data_dir, '*.bmp')) data = (data + glob.glob(os.path.join(data_dir, '*.jpg'))) return data
def imread(path, is_grayscale=False): '\n Read image using its path.\n Default value is gray-scale, and image is read by YCbCr format as the paper said.\n ' if is_grayscale: return scipy.misc.imread(path, flatten=True).astype(np.float) else: return scipy.misc.imread(path).astype(np.float)
def imsave(image, path): imsaved = inverse_transform(image).astype(np.float) return scipy.misc.imsave(path, imsaved)
def get_image(image_path, is_grayscale=False): image = imread(image_path, is_grayscale) return transform(image)
def get_lable(image_path, is_grayscale=False): image = imread(image_path, is_grayscale) return (image / 255.0)
def imsave_lable(image, path): return scipy.misc.imsave(path, (image * 255))
def _extract_target_file_name(img_src, img_dst, method=None): '\n ' spl_src = img_src.split('/') spl_dst = img_dst.split('/') if ((len(spl_src) > 1) and (len(spl_dst) > 1)): tmp = ((((((spl_src[(- 2)] + '_') + spl_src[(- 1)][:(- 4)]) + '__') + spl_dst[(- 2)]) + '_') + spl_dst[(- 1)][:(- 4)]) else: tmp = ((spl_src[(- 1)][:(- 4)] + '__') + spl_dst[(- 1)][:(- 4)]) return (tmp if (method is None) else ((method + '_') + tmp))
def average_checkpoints(model_checkpoints): params_dict = collections.OrderedDict() params_keys = None new_model_params = None num_models = len(model_checkpoints) for ckpt in model_checkpoints: model_params = torch.load(ckpt, map_location='cpu') if (new_model_params is None): new_model_params = model_params model_params_keys = list(model_params.keys()) if (params_keys is None): params_keys = model_params_keys elif (params_keys != model_params_keys): raise KeyError('For checkpoint {}, expected list of params: {}, but found: {}'.format(ckpt, params_keys, model_params_keys)) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if (k not in params_dict): params_dict[k] = p.clone() else: params_dict[k] += p averaged_params = collections.OrderedDict() for (k, v) in params_dict.items(): averaged_params[k] = v averaged_params[k].div_(num_models) return averaged_params
def main(): parser = argparse.ArgumentParser(description='Average checkpoints') parser.add_argument('--checkpoint-dir', required=True, type=str, default='results', help='Checkpoint directory location.') parser.add_argument('--best-n', required=True, type=int, default=5, help='Num of epochs to average') parser.add_argument('--after-ep', type=int, default=(- 1), help='Ignore first k epoch') args = parser.parse_args() checkpoint_dir = args.checkpoint_dir epoch_acc = json.load(open(glob.glob('{}/val*.json'.format(checkpoint_dir))[0], 'r')) epoch_acc = [(int(ep), acc) for (ep, acc) in epoch_acc.items() if (int(ep) > args.after_ep)] sorted_epoch_acc = sorted(epoch_acc, key=(lambda x: (x[1], x[0])), reverse=True) epoch_numbers = [x[0] for x in sorted_epoch_acc[:args.best_n]] print(f'epochs to average: {epoch_numbers}') checkpoints = [] file_names = glob.glob('{}/model_*.pth'.format(checkpoint_dir)) for f_name in file_names: if (('model' not in f_name) or ('best' in f_name)): continue ep_no = int(f_name.split('_')[(- 1)].split('.pth')[0]) if (ep_no in epoch_numbers): if (not os.path.isfile(f_name)): print('File does not exist. {}'.format(f_name)) else: checkpoints.append(f_name) assert (len(checkpoints) > 1), 'Atleast two checkpoints are required for averaging' averaged_weights = average_checkpoints(checkpoints) ckp_name = ('{}/averaged_model_best{}.pth'.format(checkpoint_dir, args.best_n) if (args.after_ep == (- 1)) else '{}/averaged_model_best{}_after{}.pth'.format(checkpoint_dir, args.best_n, args.after_ep)) torch.save(averaged_weights, ckp_name) print('Finished writing averaged checkpoint')
def general_opts(parser): group = parser.add_argument_group('General Options') group.add_argument('--log-interval', type=int, default=5, help='After how many iterations, we should print logs') group.add_argument('--epochs', type=int, default=100, help='Number of training epochs') group.add_argument('--seed', type=int, default=1882, help='Random seed') group.add_argument('--config-file', type=str, default='', help='Config file if exists') group.add_argument('--msc-eval', action='store_true', default=False, help='Multi-scale evaluation') return parser
def visualization_opts(parser): group = parser.add_argument_group('Visualization options') group.add_argument('--im-or-file', type=str, required=True, help='Name of the image or list of images in file to be visualized') group.add_argument('--is-type-file', action='store_true', default=False, help='Is it a file? ') group.add_argument('--img-extn-vis', type=str, required=True, help='Image extension without dot (example is png)') group.add_argument('--vis-res-dir', type=str, default='results_vis', help='Results after visualization') group.add_argument('--no-pt-files', action='store_true', default=False, help='Do not save data using torch.save') return parser
def get_opts(parser): 'General options' parser = general_opts(parser) 'Optimzier options' parser = get_optimizer_opts(parser) 'Loss function options' parser = get_criteria_opts(parser) 'Medical Image model options' parser = get_model_opts(parser) 'Dataset related options' parser = get_dataset_opts(parser) ' LR scheduler details' parser = get_scheduler_opts(parser) 'Base feature extractor options' parser = get_base_extractor_opts(parser) return parser
def get_config(is_visualization=False): parser = argparse.ArgumentParser(description='Medical Imaging') parser = get_opts(parser) if is_visualization: parser = visualization_opts(parser) args = parser.parse_args() random.seed(args.seed) torch.manual_seed(args.seed) torch.set_num_threads(args.data_workers) timestr = time.strftime('%Y%m%d-%H%M%S') args.savedir = 'results_{}/{}_s_{}/sch_{}/{}/'.format(args.dataset, args.base_extractor, args.s, args.scheduler, timestr) return (args, parser)
class CrossEntropyWithLabelSmoothing(nn.Module): def __init__(self, ls_eps=0.1, ignore_idx=None, reduce=True, reduction='mean', *args, **kwargs): super(CrossEntropyWithLabelSmoothing, self).__init__() self.ls_eps = ls_eps self.ignore_idx = ignore_idx self.reduce = reduce self.reduction = reduction def compute_loss(self, log_probs, target): if (target.dim() == (log_probs.dim() - 1)): target = target.unsqueeze((- 1)) nll_loss = (- log_probs.gather(dim=(- 1), index=target)) smooth_loss = (- log_probs.sum(dim=(- 1), keepdim=True)) if (self.ignore_idx is not None): pad_mask = target.eq(self.ignore_idx) if pad_mask.any(): nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze((- 1)) smooth_loss = smooth_loss.squeeze((- 1)) if self.reduce: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = (self.ls_eps / log_probs.size((- 1))) loss = (((1.0 - self.ls_eps) * nll_loss) + (eps_i * smooth_loss)) return loss def forward(self, pred, target): assert (pred.dim() == 2), 'Should be B x C' (B, C) = pred.size() log_probs = F.log_softmax(pred, dim=(- 1)) log_probs = log_probs.view((- 1), C) target = target.view((- 1), 1) loss = self.compute_loss(log_probs, target) if (self.reduction == 'mean'): loss /= B return loss
class BBWSIDataset(torch.utils.data.Dataset): '\n This class defines the data loader for Breast biopsy WSIs\n ' def __init__(self, img_dir, split_file, img_extn='tiff', delimeter=','): '\n :param img_dir: Location of the directory that contains WSIs\n :param split_file: Which file to use (train, val, or test)\n :param img_extn: Extension of the image (e.g. tiff) without dot\n :param delimeter: Delimeter that separates the image file name from class label\n ' if (not os.path.isfile(split_file)): print_error_message('{} does not exist.'.format(split_file)) super(BBWSIDataset, self).__init__() wsi_fnames = [] diag_labels = [] with open(split_file, 'r') as fopen: lines = fopen.readlines() for line in lines: (img_id, label) = line.strip().split(delimeter) img_fname = '{}/{}.{}'.format(img_dir, img_id.strip(), img_extn) if (not os.path.isfile(img_fname)): print_error_message('{} file does not exist.'.format(img_fname)) wsi_fnames.append(img_fname) label = int(label.strip()) diag_labels.append(label) self.wsi_fnames = wsi_fnames self.diag_labels = diag_labels self.n_classes = len(np.unique(diag_labels)) self.class_names = ['Benign', 'Atypia', 'DCIS', 'Invasive'] print_info_message('Samples in {}: {}'.format(split_file, len(self.wsi_fnames))) def __len__(self): return len(self.wsi_fnames) def __getitem__(self, index): '\n For a given index value, this function returns the name of WSI and corresponding label\n ' return (self.wsi_fnames[index], self.diag_labels[index])
def get_bag_word_pairs(bag_word_size: tuple, scale_factor: int, scale_multipliers: list): '\n This function returns a list of bag-word size pairs\n :param bag_word_size: Default bag-word size\n :param scale_factor: Factor by which we will increase/decrease the word size\n :param scale_multipliers: List of multipliers.\n :return: A list containing bag-word size pairs\n\n For example, for bag_word_size of (1024, 256), scale_factor of 32, and scale_mulitpliers of [-2, -1, 0, 1, 2],\n this function will generate bag_word pairs as:\n 256 + (32 * i) for all i in [-2, -1, 0, 1, 2]\n resulting in word sizes of [192, 224, 256, 288, 320]\n We maintain the ratio between word and bag size, so our final bag_word pair list will be\n [(192*4, 192), (224*4, 224), (256*4, 256), (288*4, 288), (320*4, 320)]. The 4 comes from the ratio between\n initial bag and word size (1024/256 = 4)\n ' bag_sz = bag_word_size[0] word_sz = bag_word_size[1] assert ((bag_sz % word_sz) == 0), 'Bag size should be divisible by word size. Got B: {}, W: {}'.format(bag_sz, word_sz) num_bags = (bag_sz // word_sz) assert (num_bags >= 1), 'Number of bags should be greater than 0. Got # bags = {}'.format(num_bags) bag_word_pairs = [] for m in scale_multipliers: word_sz_new = (word_sz + (scale_factor * m)) if ((word_sz_new % scale_factor) != 0): word_sz_new = int((math.ceil(((word_sz_new * 1.0) / scale_factor)) * scale_factor)) bag_sz_new = (word_sz_new * num_bags) if ((bag_sz_new, word_sz_new) in bag_word_pairs): continue bag_word_pairs.append((bag_sz_new, word_sz_new)) return bag_word_pairs
def gen_collate_fn(batch, bag_word_size: Optional[tuple]=(1024, 256), is_training: Optional[bool]=False, scale_factor: Optional[int]=32, scale_multipliers: Optional[list]=[(- 2), (- 1), 0, 1, 2]): '\n :param batch: Batch of image names and labels\n :param bag_word_size: Size of the bag and word\n :param is_training: Training or evaluation stage\n :param scale_factor: Factor by which we will increase/decrease the word size during training\n :param scale_multipliers: List of multipliers that allows to generate bags and words of different resolutions\n :return:\n Tensor of size [B x N_w x C x h_w x w_w]\n Tensor of size [B x Diag_classes]\n\n where B is the batch size\n N_w is the total number of words in the image\n C is the number of channels (3 for RGB)\n h_w is the height of the word\n w_w is the width of the word\n Diag_classes is the number of diagnostic classes\n ' if is_training: bag_word_pairs = get_bag_word_pairs(bag_word_size=bag_word_size, scale_factor=scale_factor, scale_multipliers=scale_multipliers) bag_word_size = random.choices(bag_word_pairs) if isinstance(bag_word_size, list): bag_word_size = bag_word_size[0] assert ((bag_word_size[0] % bag_word_size[1]) == 0), 'Bag size should be divisible by word size. B: {}, W: {}'.format(bag_word_size[0], bag_word_size[1]) num_bags = (bag_word_size[0] // bag_word_size[1]) batch_words = [] batch_labels = [] for (b_id, item) in enumerate(batch): img_id = item[0] words = convert_image_to_words(image_name=img_id, bag_width=bag_word_size[0], bag_height=bag_word_size[0], num_bags_h=num_bags, num_bags_w=num_bags, word_width=bag_word_size[1], word_height=bag_word_size[1], is_training=is_training) label = torch.LongTensor(1).fill_(item[1]) batch_words.append(words) batch_labels.append(label) batch_words = torch.stack(batch_words, dim=0) batch_labels = torch.cat(batch_labels, dim=0) return (batch_words, batch_labels)
def _load_image_lessthan_2_29(buf, size): 'buf must be a mutable buffer.' _convert.argb2rgba(buf) return PIL.Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
def _load_image_morethan_2_29(buffer, size): 'buf must be a buffer.' MAX_PIXELS_PER_LOAD = ((1 << 29) - 1) PIXELS_PER_LOAD = (1 << 26) def do_load(buf, size): 'buf can be a string, but should be a ctypes buffer to avoid an\n extra copy in the caller.' rawmode = (((sys.byteorder == 'little') and 'BGRA') or 'ARGB') buf = PIL.Image.frombuffer('RGBA', size, buf, 'raw', rawmode, 0, 1) buf = (getattr(buf, 'tobytes', None) or buf.tostring)() return PIL.Image.frombuffer('RGBA', size, buf, 'raw', 'RGBa', 0, 1) (w, h) = size if ((w * h) <= MAX_PIXELS_PER_LOAD): return do_load(buffer, size) if (w > PIXELS_PER_LOAD): raise ValueError(('Width %d is too large (maximum %d)' % (w, PIXELS_PER_LOAD))) rows_per_load = (PIXELS_PER_LOAD // w) img = PIL.Image.new('RGBA', (w, h)) for y in range(0, h, rows_per_load): rows = min((h - y), rows_per_load) if (sys.version[0] == '2'): chunk = buffer(buffer, ((4 * y) * w), ((4 * rows) * w)) else: chunk = memoryview(buffer)[(y * w):((y + rows) * w)].tobytes() img.paste(do_load(chunk, (w, rows)), (0, y)) return img
class WSIDataset(torch.utils.data.Dataset): '\n This class defines the data loader for Breast biopsy WSIs\n ' def __init__(self, img_dir, split_file, img_extn='tiff', delimeter=','): '\n :param img_dir: Location of the directory that contains WSIs\n :param split_file: Which file to use (train, val, or test)\n :param img_extn: Extension of the image (e.g. tiff) without dot\n :param delimeter: Delimeter that separates the image file name from class label\n ' if (not os.path.isfile(split_file)): print_error_message('{} does not exist.'.format(split_file)) super(WSIDataset, self).__init__() wsi_fnames = [] diag_labels = [] with open(split_file, 'r') as fopen: lines = fopen.readlines() for line in lines: (img_id, label) = line.strip().split(delimeter) img_fname = '{}/{}.{}'.format(img_dir, img_id.strip(), img_extn) if (not os.path.isfile(img_fname)): print_error_message('{} file does not exist.'.format(img_fname)) wsi_fnames.append(img_fname) label = int(label.strip()) diag_labels.append(label) self.wsi_fnames = wsi_fnames self.diag_labels = diag_labels self.n_classes = len(np.unique(diag_labels)) self.class_names = [i for i in range(self.n_classes)] print_info_message('Samples in {}: {}'.format(split_file, len(self.wsi_fnames))) def __len__(self): return len(self.wsi_fnames) def __getitem__(self, index): '\n For a given index value, this function returns the name of WSI and corresponding label\n ' return (self.wsi_fnames[index], self.diag_labels[index])
def compute_micro_stats(values_a, values_b, eps=1e-08): sum_a = np.sum(values_a) sum_b = np.sum(values_b) micro_sc = (sum_a / ((sum_a + sum_b) + eps)) return micro_sc
def compute_macro_stats(values): return np.mean(values)
class CMMetrics(object): '\n Metrics defined here: https://www.sciencedirect.com/science/article/pii/S2210832718301546\n ' def __init__(self): super(CMMetrics, self).__init__() self.eps = 1e-08 def compute_precision(self, tp, fp): '\n Precision = TP/(TP + FP)\n ' class_wise = (tp / ((tp + fp) + self.eps)) micro = compute_micro_stats(tp, fp) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_senstivity(self, tp, fn): '\n Sensitivity = TP/(TP + FN)\n ' class_wise = (tp / ((tp + fn) + self.eps)) micro = compute_micro_stats(tp, fn) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_specificity(self, tn, fp): class_wise = (tn / ((tn + fp) + self.eps)) micro = compute_micro_stats(tn, fp) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_recall(self, tp, fn): class_wise = (tp / ((tp + fn) + self.eps)) micro = compute_micro_stats(tp, fn) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_f1(self, precision, recall): return (((2.0 * precision) * recall) / (precision + recall)) def compute_acc(self, tp, tn, fp, fn): class_wise = ((tp + tn) / ((((tp + tn) + fp) + fn) + self.eps)) micro = compute_micro_stats((tp + tn), (((tp + tn) + fp) + fn)) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_overall_acc(self, tp, N): return (tp.sum() / (N + self.eps)) def compute_tpr(self, tp, fn): class_wise = (tp / ((tp + fn) + self.eps)) micro = compute_micro_stats(tp, fn) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_tnr(self, tn, fp): class_wise = (tn / ((tn + fp) + self.eps)) micro = compute_micro_stats(tn, fp) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_fpr(self, fp, tn): class_wise = (fp / ((fp + tn) + self.eps)) micro = compute_micro_stats(fp, tn) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_fnr(self, fn, tp): class_wise = (fn / ((fn + tp) + self.eps)) micro = compute_micro_stats(fn, tp) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_ppv(self, tp, fp): return self.compute_precision(tp=tp, fp=fp) def compute_npv(self, tn, fn): class_wise = (tn / ((tn + fn) + self.eps)) micro = compute_micro_stats(tn, fn) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_neg_lr(self, tpr, tnr): return ((1.0 - tpr) / (tnr + self.eps)) def compute_pos_lr(self, tpr, tnr): return (tpr / ((1.0 - tnr) + self.eps)) def compute_dor(self, tp, tn, fp, fn): class_wise = ((tp * tn) / ((fp * fn) + self.eps)) micro = compute_micro_stats((tp * tn), (fp * fn)) macro = compute_macro_stats(class_wise) return (micro, macro, class_wise) def compute_younden_index(self, tpr, tnr): return ((tpr + tnr) - 1.0) def compute_metrics(self, conf_mat): num_samples = conf_mat.sum() if (conf_mat.shape[0] > 2): true_positives = np.diag(conf_mat) false_positives = (conf_mat.sum(axis=0) - true_positives) false_negatives = (conf_mat.sum(axis=1) - true_positives) true_negatives = (conf_mat.sum() - ((false_positives + false_negatives) + true_positives)) else: (true_negatives, false_positives, false_negatives, true_positives) = conf_mat.ravel() false_positives = false_positives.astype(float) false_negatives = false_negatives.astype(float) true_positives = true_positives.astype(float) true_negatives = true_negatives.astype(float) (sensitivity_micro, sensitivity_macro, sensitivity_class) = self.compute_senstivity(tp=true_positives, fn=false_negatives) (specificity_micro, specificity_macro, specificity_class) = self.compute_specificity(tn=true_negatives, fp=false_positives) (precision_micro, precision_macro, precision_class) = self.compute_precision(tp=true_positives, fp=false_positives) (recall_micro, recall_macro, recall_class) = self.compute_recall(tp=true_positives, fn=false_negatives) f1_micro = self.compute_f1(precision=precision_micro, recall=recall_micro) f1_macro = compute_macro_stats(self.compute_f1(precision=precision_class, recall=recall_class)) f1_class = self.compute_f1(precision=precision_class, recall=recall_class) (acc_micro, acc_macro, acc_class) = self.compute_acc(tp=true_positives, tn=true_negatives, fp=false_positives, fn=false_negatives) overall_acc = self.compute_overall_acc(tp=true_positives, N=num_samples) (tpr_micro, tpr_macro, tpr_class) = self.compute_tpr(tp=true_positives, fn=false_negatives) (tnr_micro, tnr_macro, tnr_class) = self.compute_tnr(tn=true_negatives, fp=false_positives) (fpr_micro, fpr_macro, fpr_class) = self.compute_fpr(fp=false_positives, tn=true_negatives) (fnr_micro, fnr_macro, fnr_class) = self.compute_fnr(fn=false_negatives, tp=true_positives) (ppv_micro, ppv_macro, ppv_class) = self.compute_ppv(tp=true_positives, fp=false_positives) (npv_micro, npv_macro, npv_class) = self.compute_npv(tn=true_negatives, fn=false_negatives) neg_lr_micro = self.compute_neg_lr(tpr=tpr_micro, tnr=tnr_micro) neg_lr_class = self.compute_neg_lr(tpr=tpr_class, tnr=tnr_class) neg_lr_macro = compute_macro_stats(self.compute_neg_lr(tpr=tpr_class, tnr=tnr_class)) pos_lr_micro = self.compute_pos_lr(tpr=tpr_micro, tnr=tnr_micro) pos_lr_class = self.compute_pos_lr(tpr=tpr_class, tnr=tnr_class) pos_lr_macro = compute_macro_stats(self.compute_pos_lr(tpr=tpr_class, tnr=tnr_class)) (dor_micro, dor_macro, dor_class) = self.compute_dor(tp=true_positives, tn=true_negatives, fp=false_positives, fn=false_negatives) yi_micro = self.compute_younden_index(tpr=tpr_micro, tnr=tnr_micro) yi_class = self.compute_younden_index(tpr=tpr_class, tnr=tnr_class) yi_macro = compute_macro_stats(self.compute_younden_index(tpr=tpr_class, tnr=tnr_class)) return CMResults(overall_accuracy=overall_acc, sensitivity_micro=sensitivity_micro, sensitivity_macro=sensitivity_macro, sensitivity_class=sensitivity_class, specificity_micro=specificity_micro, specificity_macro=specificity_macro, specificity_class=specificity_class, precision_micro=precision_micro, precision_macro=precision_macro, precision_class=precision_class, recall_micro=recall_micro, recall_macro=recall_macro, recall_class=recall_class, f1_micro=f1_micro, f1_macro=f1_macro, f1_class=f1_class, accuracy_micro=acc_micro, accuracy_macro=acc_macro, accuracy_class=acc_class, true_positive_rate_micro=tpr_micro, true_positive_rate_macro=tpr_macro, true_positive_rate_class=tpr_class, true_negative_rate_micro=tnr_micro, true_negative_rate_macro=tnr_macro, true_negative_rate_class=tnr_class, false_positive_rate_micro=fpr_micro, false_positive_rate_macro=fpr_macro, false_positive_rate_class=fpr_class, false_negative_rate_micro=fnr_micro, false_negative_rate_macro=fnr_macro, false_negative_rate_class=fnr_class, positive_pred_value_micro=ppv_micro, positive_pred_value_macro=ppv_macro, positive_pred_value_class=ppv_class, negative_pred_value_micro=npv_micro, negative_pred_value_macro=npv_macro, negative_pred_value_class=npv_class, negative_likelihood_ratio_micro=neg_lr_micro, negative_likelihood_ratio_macro=neg_lr_macro, negative_likelihood_ratio_class=neg_lr_class, positive_likelihood_ratio_micro=pos_lr_micro, positive_likelihood_ratio_macro=pos_lr_macro, positive_likelihood_ratio_class=pos_lr_class, diagnostic_odd_ratio_micro=dor_micro, diagnostic_odd_ratio_macro=dor_macro, diagnostic_odd_ratio_class=dor_class, younden_index_micro=yi_micro, younden_index_macro=yi_macro, younden_index_class=yi_class)
def accuracy(output, target, topk=(1,)): 'Computes the precision@k for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def compute_f1(y_pred: torch.Tensor, y_true: torch.Tensor, n_classes=4, epsilon=1e-07, is_one_hot=False): if is_one_hot: assert (y_pred.dim() == y_true.dim()) else: assert (len(y_pred.size()) == 2) assert (len(y_true.size()) == 1) with torch.no_grad(): y_true = (y_true.to(torch.float32) if is_one_hot else F.one_hot(y_true.to(torch.int64), n_classes).to(torch.float32)) y_pred = y_pred.argmax(dim=1) y_pred = F.one_hot(y_pred.to(torch.int64), n_classes).to(torch.float32) tp = (y_true * y_pred).sum().to(torch.float32) tn = ((1 - y_true) * (1 - y_pred)).sum().to(torch.float32) fp = ((1 - y_true) * y_pred).sum().to(torch.float32) fn = (y_true * (1 - y_pred)).sum().to(torch.float32) precision = (tp / ((tp + fp) + epsilon)) recall = (tp / ((tp + fn) + epsilon)) f1 = ((2 * (precision * recall)) / ((precision + recall) + epsilon)) return (torch.mean(f1) * 100)
class Statistics(object): '\n This class is used to store the training and validation statistics\n ' def __init__(self): super(Statistics, self).__init__() self.loss = 0 self.acc = 0 self.eps = 1e-09 self.counter = 1 def update(self, loss, acc): '\n :param loss: Loss at ith time step\n :param acc: Accuracy at ith time step\n :return:\n ' self.loss += loss self.acc += acc self.counter += 1 def __str__(self): return 'Loss: {}'.format(self.loss) def avg_acc(self): '\n :return: Average Accuracy\n ' return (self.acc / self.counter) def avg_loss(self): '\n :return: Average loss\n ' return (self.loss / self.counter) def output(self, epoch, batch, n_batches, start, lr): '\n Displays the output\n :param epoch: Epoch number\n :param batch: batch number\n :param n_batches: Total number of batches in the dataset\n :param start: Epoch start time\n :param lr: Current LR\n :return:\n ' print_log_message('Epoch: {:3d} [{:8d}/{:8d}], Loss: {:5.2f}, Acc: {:3.2f}, LR: {:1.6f}, Elapsed time: {:5.2f} seconds'.format(epoch, batch, n_batches, self.avg_loss(), self.avg_acc(), lr, (time.time() - start))) sys.stdout.flush()
class BaseFeatureExtractor(torch.nn.Module): '\n This class calls different base feature extractors\n ' def __init__(self, opts): '\n :param opts: Argument list\n ' super(BaseFeatureExtractor, self).__init__() if (opts.base_extractor == 'espnetv2'): from model.feature_extractors.espnetv2 import EESPNet self.base_model = EESPNet(opts) self.initialize_base_model(opts.weights) output_feature_sz = self.base_model.classifier.in_features del self.base_model.classifier elif (opts.base_extractor == 'mobilenetv2'): from model.feature_extractors.mobilenetv2 import MobileNetV2 self.base_model = MobileNetV2() self.initialize_base_model(opts.weights) output_feature_sz = self.base_model.last_channel del self.base_model.classifier elif (opts.base_extractor == 'mnasnet'): from model.feature_extractors.mnasnet import MNASNet assert (opts.s == 1.0), 'We are currently supporting models with scale = 1.0. If you are interested in exploring more models, download those from PyTorch repo and use it after uncommenting this assertion. ' self.base_model = MNASNet(alpha=opts.s) self.initialize_base_model(opts.weights) output_feature_sz = self.base_model.last_channel del self.base_model.classifier else: print_error_message('{} model not yet supported'.format(opts.base_extractor)) self.output_feature_sz = output_feature_sz def initialize_base_model(self, wts_loc): '\n This function initializes the base model\n\n :param wts_loc: Location of the weights file\n ' if (not os.path.isfile(wts_loc)): print_error_message('No file exists here: {}'.format(wts_loc)) print_log_message('Loading Imagenet trained weights') pretrained_dict = torch.load(wts_loc, map_location=torch.device('cpu')) self.base_model.load_state_dict(pretrained_dict) print_log_message('Loading over') def forward(self, words): '\n :param words: Word tensor of shape (N_w x C x w x h)\n :return: Features vector for words (N_w x F)\n ' assert (words.dim() == 4), 'Input should be 4 dimensional tensor (B x 3 X H x W)' words = self.base_model(words) return words
def get_base_extractor_opts(parser): 'Base feature extractor CNN Model details' group = parser.add_argument_group('CNN Model Details') group.add_argument('--base-extractor', default='espnetv2', choices=supported_base_models, help='Which CNN model? Default is espnetv2') group.add_argument('--s', type=float, default=2.0, help='Factor by which channels will be scaled. Default is 2.0 for espnetv2') group.add_argument('--weights', type=str, default='model/model_zoo/espnetv2/espnetv2_s_2.0_imagenet_224x224.pth', help='Location of imagenet pretrained weights') group.add_argument('--num_classes', type=int, default=1000, help='Number of classes in the base feature extractor. Default is 1000 for the ImageNet pretrained model') group.add_argument('--channels', type=int, default=3, help='Number of input image channesl. Default is 3 for RGB image') return parser
class EESPNet(nn.Module): '\n This class defines the ESPNetv2 architecture for the ImageNet classification\n ' def __init__(self, args): '\n :param classes: number of classes in the dataset. Default is 1000 for the ImageNet dataset\n :param s: factor that scales the number of output feature maps\n ' super().__init__() try: num_classes = args.num_classes except: num_classes = 1000 try: channels_in = args.channels except: channels_in = 3 s = args.s if (not (s in config_all.sc_ch_dict.keys())): print_error_message('Model at scale s={} is not suppoerted yet'.format(s)) exit((- 1)) out_channel_map = config_all.sc_ch_dict[args.s] reps_at_each_level = config_all.rep_layers recept_limit = config_all.recept_limit K = ([config_all.branches] * len(recept_limit)) self.input_reinforcement = config_all.input_reinforcement assert (len(K) == len(recept_limit)), 'Length of branching factor array and receptive field array should be the same.' self.level1 = CBR(channels_in, out_channel_map[0], 3, 2) self.level2_0 = DownSampler(out_channel_map[0], out_channel_map[1], k=K[0], r_lim=recept_limit[0], reinf=self.input_reinforcement) self.level3_0 = DownSampler(out_channel_map[1], out_channel_map[2], k=K[1], r_lim=recept_limit[1], reinf=self.input_reinforcement) self.level3 = nn.ModuleList() for i in range(reps_at_each_level[1]): self.level3.append(EESP(out_channel_map[2], out_channel_map[2], stride=1, k=K[2], r_lim=recept_limit[2])) self.level4_0 = DownSampler(out_channel_map[2], out_channel_map[3], k=K[2], r_lim=recept_limit[2], reinf=self.input_reinforcement) self.level4 = nn.ModuleList() for i in range(reps_at_each_level[2]): self.level4.append(EESP(out_channel_map[3], out_channel_map[3], stride=1, k=K[3], r_lim=recept_limit[3])) self.level5_0 = DownSampler(out_channel_map[3], out_channel_map[4], k=K[3], r_lim=recept_limit[3]) self.level5 = nn.ModuleList() for i in range(reps_at_each_level[3]): self.level5.append(EESP(out_channel_map[4], out_channel_map[4], stride=1, k=K[4], r_lim=recept_limit[4])) self.level5.append(CBR(out_channel_map[4], out_channel_map[4], 3, 1, groups=out_channel_map[4])) self.level5.append(CBR(out_channel_map[4], out_channel_map[5], 1, 1, groups=K[4])) self.classifier = nn.Linear(out_channel_map[5], num_classes) self.config = out_channel_map self.init_params() def init_params(self): '\n Function to initialze the parameters\n ' for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if (m.bias is not None): init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if (m.bias is not None): init.constant_(m.bias, 0) def forward(self, input, p=0.2): '\n :param input: Receives the input RGB image\n :return: a C-dimensional vector, C=# of classes\n ' out_l1 = self.level1(input) if (not self.input_reinforcement): del input input = None out_l2 = self.level2_0(out_l1, input) out_l3_0 = self.level3_0(out_l2, input) for (i, layer) in enumerate(self.level3): if (i == 0): out_l3 = layer(out_l3_0) else: out_l3 = layer(out_l3) out_l4_0 = self.level4_0(out_l3, input) for (i, layer) in enumerate(self.level4): if (i == 0): out_l4 = layer(out_l4_0) else: out_l4 = layer(out_l4) out_l5_0 = self.level5_0(out_l4) for (i, layer) in enumerate(self.level5): if (i == 0): out_l5 = layer(out_l5_0) else: out_l5 = layer(out_l5) output_g = F.adaptive_avg_pool2d(out_l5, output_size=1) output_1x1 = output_g.view(output_g.size(0), (- 1)) return output_1x1
class _InvertedResidual(nn.Module): def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, bn_momentum=0.1): super(_InvertedResidual, self).__init__() assert (stride in [1, 2]) assert (kernel_size in [3, 5]) mid_ch = (in_ch * expansion_factor) self.apply_residual = ((in_ch == out_ch) and (stride == 1)) self.layers = nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=True), nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=(kernel_size // 2), stride=stride, groups=mid_ch, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=True), nn.Conv2d(mid_ch, out_ch, 1, bias=False), nn.BatchNorm2d(out_ch, momentum=bn_momentum)) def forward(self, input): if self.apply_residual: return (self.layers(input) + input) else: return self.layers(input)
def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats, bn_momentum): ' Creates a stack of inverted residuals. ' assert (repeats >= 1) first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum) remaining = [] for _ in range(1, repeats): remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum)) return nn.Sequential(first, *remaining)
def _round_to_multiple_of(val, divisor, round_up_bias=0.9): ' Asymmetric rounding to make `val` divisible by `divisor`. With default\n bias, will round up, unless the number is no more than 10% greater than the\n smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. ' assert (0.0 < round_up_bias < 1.0) new_val = max(divisor, ((int((val + (divisor / 2))) // divisor) * divisor)) return (new_val if (new_val >= (round_up_bias * val)) else (new_val + divisor))
def _get_depths(alpha): ' Scales tensor depths as in reference MobileNet code, prefers rouding up\n rather than down. ' depths = [32, 16, 24, 40, 80, 96, 192, 320] return [_round_to_multiple_of((depth * alpha), 8) for depth in depths]
class MNASNet(torch.nn.Module): ' MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This\n implements the B1 variant of the model.\n >>> model = MNASNet(1000, 1.0)\n >>> x = torch.rand(1, 3, 224, 224)\n >>> y = model(x)\n >>> y.dim()\n 1\n >>> y.nelement()\n 1000\n ' _version = 2 def __init__(self, alpha, num_classes=1000, dropout=0.2): super(MNASNet, self).__init__() assert (alpha > 0.0) self.alpha = alpha self.num_classes = num_classes depths = _get_depths(alpha) layers = [nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, groups=depths[0], bias=False), nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM), _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM), _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM), _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM), _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM), _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM), _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM), nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True)] self.layers = nn.Sequential(*layers) self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes)) self._initialize_weights() self.last_channel = 1280 def forward(self, x): x = self.layers(x) x = x.mean([2, 3]) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if (m.bias is not None): nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity='sigmoid') nn.init.zeros_(m.bias) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) assert (version in [1, 2]) if ((version == 1) and (not (self.alpha == 1.0))): depths = _get_depths(self.alpha) v1_stem = [nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False), nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False), nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(16, momentum=_BN_MOMENTUM), _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM)] for (idx, layer) in enumerate(v1_stem): self.layers[idx] = layer self._version = 1 warnings.warn('A new version of MNASNet model has been implemented. Your checkpoint was saved using the previous version. This checkpoint will load and work as before, but you may want to upgrade by training a newer model or transfer learning from an updated ImageNet checkpoint.', UserWarning) super(MNASNet, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def _make_divisible(v, divisor, min_value=None): '\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n ' if (min_value is None): min_value = divisor new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor)) if (new_v < (0.9 * v)): new_v += divisor return new_v
class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = ((kernel_size - 1) // 2) super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True))
class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert (stride in [1, 2]) hidden_dim = int(round((inp * expand_ratio))) self.use_res_connect = ((self.stride == 1) and (inp == oup)) layers = [] if (expand_ratio != 1): layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return (x + self.conv(x)) else: return self.conv(x)
class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8, block=None): '\n MobileNet V2 main class\n\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n block: Module specifying inverted residual building block for mobilenet\n\n ' super(MobileNetV2, self).__init__() if (block is None): block = InvertedResidual input_channel = 32 last_channel = 1280 if (inverted_residual_setting is None): inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)): raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting)) input_channel = _make_divisible((input_channel * width_mult), round_nearest) self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] for (t, c, n, s) in inverted_residual_setting: output_channel = _make_divisible((c * width_mult), round_nearest) for i in range(n): stride = (s if (i == 0) else 1) features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) self.features = nn.Sequential(*features) self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes)) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if (m.bias is not None): nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x): x = self.features(x) x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], (- 1)) return x def forward(self, x): return self._forward_impl(x)
class MIModel(torch.nn.Module): '\n Hollistic Attention Network\n ' def __init__(self, n_classes, cnn_feature_sz, out_features, num_bags_words, num_heads=2, dropout=0.4, attn_type='l2', attn_dropout=0.2, attn_fn='tanh', *args, **kwargs): super(MIModel, self).__init__() self.project_cnn_words = nn.Linear(cnn_feature_sz, out_features) self.attn_ovr_words = SelfAttention(in_dim=out_features, num_heads=num_heads, p=dropout) self.attn_ovr_bags = ContextualAttention(in_dim=out_features, num_heads=num_heads, p=dropout) self.attn_dropout = nn.Dropout(p=attn_dropout) self.ffn_w2b_sa = FFN(input_dim=out_features, scale=2, p=dropout) self.ffn_w2b_cnn = FFN(input_dim=out_features, scale=2, p=dropout) self.ffn_b2s = FFN(input_dim=out_features, scale=2, p=dropout) self.bag_word_wt = nn.Linear((num_bags_words ** 2), (num_bags_words ** 2), bias=False) self.classifier = nn.Linear(out_features, n_classes) self.attn_fn = None if (attn_fn == 'softmax'): self.attn_fn = nn.Softmax(dim=(- 1)) elif (attn_fn == 'sigmoid'): self.attn_fn = nn.Sigmoid() elif (attn_fn == 'tanh'): self.attn_fn = nn.Tanh() else: raise ValueError('Attention function = {} not yet supported'.format(attn_fn)) self.attn_type = attn_type self.reset_params() def reset_params(self): '\n Function to initialze the parameters\n ' from torch.nn import init for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if (m.bias is not None): init.constant_(m.bias, 0.0) elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.LayerNorm)): init.constant_(m.weight, 1.0) if (m.bias is not None): init.constant_(m.bias, 0.0) elif isinstance(m, nn.Linear): init.xavier_uniform_(m.weight) if (m.bias is not None): init.constant_(m.bias, 0.0) def energy_function(self, x, need_attn=False): N = x.size((- 1)) if (self.attn_type == 'l1'): x = torch.norm(x, p=1, dim=(- 1)) elif (self.attn_type == 'l2'): x = torch.norm(x, p=2, dim=(- 1)) else: x = torch.sum(x, dim=(- 1)) x = torch.div(x, N) energy: Tensor[Optional] = None if need_attn: energy = x x = self.bag_word_wt(x) x = self.attn_fn(x).unsqueeze(dim=(- 2)) return (self.attn_dropout(x), energy) def forward(self, words, *args, **kwargs): '\n :param words: Tensor of shape (B x N_b x N_w x F), where F is CNN dimension\n :param need_attn: boolean indicating if attention weights are required or not\n :return: A B x C_d vector, where C_d is the number of diagnostic classes\n ' need_attn = kwargs.get('need_attn', False) words_cnn = self.project_cnn_words(words) (words_self_attn, w2w_attn_wts_unnorm) = self.attn_ovr_words(words_cnn, need_attn=need_attn) (words_sa_energy, words_sa_energy_unnorm) = self.energy_function(words_self_attn, need_attn=need_attn) bags_from_words_self_attn = torch.matmul(words_sa_energy, words_self_attn).squeeze((- 2)) bags_from_words_self_attn = self.ffn_w2b_sa(bags_from_words_self_attn) (words_cnn_energy, words_cnn_energy_unnorm) = self.energy_function(words_cnn, need_attn=need_attn) bags_from_words_cnn = torch.matmul(words_cnn_energy, words_cnn).squeeze((- 2)) bags_from_words_cnn = self.ffn_w2b_cnn(bags_from_words_cnn) (bags_self_attn, b2b_attn_wts_unnorm) = self.attn_ovr_bags(bags_from_words_cnn, bags_from_words_self_attn, need_attn=need_attn) (bags_energy, bags_energy_unnorm) = self.energy_function(bags_self_attn, need_attn=need_attn) bags_to_slide = torch.matmul(bags_energy, bags_self_attn).squeeze((- 2)) out = self.ffn_b2s(bags_to_slide) out = self.classifier(out) if need_attn: words_energy_unnorm = (words_sa_energy_unnorm + words_cnn_energy_unnorm) attn_scores = AttentionScores(w2w_self_attn=w2w_attn_wts_unnorm, b2b_self_attn=b2b_attn_wts_unnorm, word_scores=words_energy_unnorm, bag_scores=bags_energy_unnorm) return (out, attn_scores) else: return out
class SelfAttention(nn.Module): '\n This class implements the transformer block with multi-head attention and Feed forward network\n ' def __init__(self, in_dim, num_heads=8, p=0.1, *args, **kwargs): super(SelfAttention, self).__init__() self.self_attn = MultiHeadAttn(input_dim=in_dim, out_dim=in_dim, num_heads=num_heads) self.ffn = FFN(in_dim, scale=4, p=p, expansion=True) self.layer_norm_1 = nn.LayerNorm(in_dim, eps=1e-06) self.drop = nn.Dropout(p=p) def forward(self, x, need_attn=False): '\n :param x: Input (bags or words)\n :param need_attn: Need attention weights or not\n :return: returns the self attention output and attention weights (optional)\n ' x_norm = self.layer_norm_1(x) (context, attn) = self.self_attn(x_norm, x_norm, x_norm, need_attn=need_attn) out = (self.drop(context) + x) return (self.ffn(out), attn)
class ContextualAttention(torch.nn.Module): '\n This class implements the contextual attention.\n For example, we used this class to compute bag-to-bag attention where\n one set of bag is directly from CNN, while the other set of bag is obtained after self-attention\n ' def __init__(self, in_dim, num_heads=8, p=0.1, *args, **kwargs): super(ContextualAttention, self).__init__() self.self_attn = MultiHeadAttn(input_dim=in_dim, out_dim=in_dim, num_heads=num_heads) self.context_norm = nn.LayerNorm(in_dim) self.context_attn = MultiHeadAttn(input_dim=in_dim, out_dim=in_dim, num_heads=num_heads) self.ffn = FFN(in_dim, scale=4, p=p, expansion=True) self.input_norm = nn.LayerNorm(in_dim, eps=1e-06) self.query_norm = nn.LayerNorm(in_dim, eps=1e-06) self.drop = nn.Dropout(p=p) def forward(self, input, context, need_attn=False): '\n :param input: Tensor of shape (B x N_b x N_w x CNN_DIM) or (B x N_b x CNN_DIM)\n :param context: Tensor of shape (B x N_b x N_w x hist_dim) or (B x N_b x hist_dim)\n :return:\n ' input_norm = self.input_norm(input) (query, _) = self.self_attn(input_norm, input_norm, input_norm, need_attn=need_attn) query = (self.drop(query) + input) query_norm = self.query_norm(query) context_norm = self.context_norm(context) (mid, contextual_attn) = self.context_attn(context_norm, context_norm, query_norm, need_attn=need_attn) output = self.ffn((self.drop(mid) + input)) return (output, contextual_attn)
class EESP(nn.Module): '\n This class defines the EESP block, which is based on the following principle\n REDUCE ---> SPLIT ---> TRANSFORM --> MERGE\n ' def __init__(self, nIn, nOut, stride=1, k=4, r_lim=7, down_method='esp'): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param stride: factor by which we should skip (useful for down-sampling). If 2, then down-samples the feature map by 2\n :param k: # of parallel branches\n :param r_lim: A maximum value of receptive field allowed for EESP block\n :param down_method: Downsample or not (equivalent to say stride is 2 or not)\n ' super().__init__() self.stride = stride n = int((nOut / k)) n1 = (nOut - ((k - 1) * n)) assert (down_method in ['avg', 'esp']), 'One of these is suppported (avg or esp)' assert (n == n1), 'n(={}) and n1(={}) should be equal for Depth-wise Convolution '.format(n, n1) self.proj_1x1 = CBR(nIn, n, 1, stride=1, groups=k) map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8} self.k_sizes = list() for i in range(k): ksize = int((3 + (2 * i))) ksize = (ksize if (ksize <= r_lim) else 3) self.k_sizes.append(ksize) self.k_sizes.sort() self.spp_dw = nn.ModuleList() for i in range(k): d_rate = map_receptive_ksize[self.k_sizes[i]] self.spp_dw.append(CDilated(n, n, kSize=3, stride=stride, groups=n, d=d_rate)) self.conv_1x1_exp = CB(nOut, nOut, 1, 1, groups=k) self.br_after_cat = BR(nOut) self.module_act = nn.PReLU(nOut) self.downAvg = (True if (down_method == 'avg') else False) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output1 = self.proj_1x1(input) output = [self.spp_dw[0](output1)] for k in range(1, len(self.spp_dw)): out_k = self.spp_dw[k](output1) out_k = (out_k + output[(k - 1)]) output.append(out_k) expanded = self.conv_1x1_exp(self.br_after_cat(torch.cat(output, 1))) del output if ((self.stride == 2) and self.downAvg): return expanded if (expanded.size() == input.size()): expanded = (expanded + input) return self.module_act(expanded)
class DownSampler(nn.Module): '\n Down-sampling fucntion that has three parallel branches: (1) avg pooling,\n (2) EESP block with stride of 2 and (3) efficient long-range connection with the input.\n The output feature maps of branches from (1) and (2) are concatenated and then additively fused with (3) to produce\n the final output.\n ' def __init__(self, nin, nout, k=4, r_lim=9, reinf=True): '\n :param nin: number of input channels\n :param nout: number of output channels\n :param k: # of parallel branches\n :param r_lim: A maximum value of receptive field allowed for EESP block\n :param reinf: Use long range shortcut connection with the input or not.\n ' super().__init__() nout_new = (nout - nin) self.eesp = EESP(nin, nout_new, stride=2, k=k, r_lim=r_lim, down_method='avg') self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=2) if reinf: self.inp_reinf = nn.Sequential(CBR(config_inp_reinf, config_inp_reinf, 3, 1), CB(config_inp_reinf, nout, 1, 1)) self.act = nn.PReLU(nout) def forward(self, input, input2=None): '\n :param input: input feature map\n :return: feature map down-sampled by a factor of 2\n ' avg_out = self.avg(input) eesp_out = self.eesp(input) output = torch.cat([avg_out, eesp_out], 1) if (input2 is not None): w1 = avg_out.size(2) while True: input2 = F.avg_pool2d(input2, kernel_size=3, padding=1, stride=2) w2 = input2.size(2) if (w2 == w1): break output = (output + self.inp_reinf(input2)) return self.act(output)
class CBR(nn.Module): '\n This class defines the convolution layer with batch normalization and PReLU activation\n ' def __init__(self, nIn, nOut, kSize, stride=1, groups=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: stride rate for down-sampling. Default is 1\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups) self.bn = nn.BatchNorm2d(nOut) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) output = self.act(output) return output
class BR(nn.Module): '\n This class groups the batch normalization and PReLU activation\n ' def __init__(self, nOut): '\n :param nOut: output feature maps\n ' super().__init__() self.bn = nn.BatchNorm2d(nOut) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: normalized and thresholded feature map\n ' output = self.bn(input) output = self.act(output) return output
class CB(nn.Module): '\n This class groups the convolution and batch normalization\n ' def __init__(self, nIn, nOut, kSize, stride=1, groups=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optinal stide for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups) self.bn = nn.BatchNorm2d(nOut) def forward(self, input): '\n\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) return output
class C(nn.Module): '\n This class is for a convolutional layer.\n ' def __init__(self, nIn, nOut, kSize, stride=1, groups=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class CDilated(nn.Module): '\n This class defines the dilated convolution.\n ' def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n :param d: optional dilation rate\n ' super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, dilation=d, groups=groups) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class CDilatedB(nn.Module): '\n This class defines the dilated convolution with batch normalization.\n ' def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n :param d: optional dilation rate\n ' super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, dilation=d, groups=groups) self.bn = nn.BatchNorm2d(nOut) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' return self.bn(self.conv(input))
class FFN(nn.Module): def __init__(self, input_dim, scale, output_dim=None, p=0.1, expansion=False): super(FFN, self).__init__() output_dim = (input_dim if (output_dim is None) else output_dim) proj_features = ((input_dim * scale) if expansion else (input_dim // scale)) self.w_1 = nn.Linear(input_dim, proj_features) self.w_2 = nn.Linear(proj_features, output_dim) self.layer_norm = nn.LayerNorm(input_dim, eps=1e-06) self.dropout_1 = nn.Dropout(p) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(p) self.residual = (True if (input_dim == output_dim) else False) def forward(self, x): 'Layer definition.\n Args:\n x: ``(batch_size, input_len, model_dim)``\n Returns:\n (FloatTensor): Output ``(batch_size, input_len, model_dim)``.\n ' inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return ((output + x) if self.residual else output)
class MultiHeadAttn(torch.nn.Module): def __init__(self, input_dim, out_dim, num_heads=8, dropout=0.1, *args, **kwargs): super(MultiHeadAttn, self).__init__() assert ((input_dim % num_heads) == 0) self.num_heads = num_heads self.dim_per_head = (input_dim // num_heads) self.linear_keys = nn.Linear(input_dim, (num_heads * self.dim_per_head)) self.linear_values = nn.Linear(input_dim, (num_heads * self.dim_per_head)) self.linear_query = nn.Linear(input_dim, (num_heads * self.dim_per_head)) self.softmax = nn.Softmax(dim=(- 1)) self.dropout = nn.Dropout(dropout) self.final_linear = nn.Linear(input_dim, out_dim) self.scaling_factor = math.sqrt(self.dim_per_head) def forward(self, key, value, query, need_attn=False): "\n :param key: A tensor of shape [B x N_b x N_w x d] or [B x N_b x d]\n :param value: A tensor of shape [B x N_b x N_w x d] or [B x N_b x d]\n :param query: A tensor of shape [B x N_b x N_w x d] or [B x N_b x d]\n :param need_attn: Need attention weights or not\n :return: Tuple containing output and mean attention scores across all heads (optional)\n Output size is [B x N_b x N_w x d'] or [B x N_b x d']\n Attention score size is [B x N_b*N_w x N_b*N_w] or [B x N_b x N_b]\n " dim_size = key.size() reshape = False if (key.dim() == 4): key = key.view(dim_size[0], (- 1), dim_size[3]) value = key.view(dim_size[0], (- 1), dim_size[3]) query = key.view(dim_size[0], (- 1), dim_size[3]) reshape = True batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.num_heads key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) query = (query / self.scaling_factor) key = key.contiguous().view(batch_size, (- 1), head_count, dim_per_head).transpose(1, 2) value = value.contiguous().view(batch_size, (- 1), head_count, dim_per_head).transpose(1, 2) query = query.contiguous().view(batch_size, (- 1), head_count, dim_per_head).transpose(1, 2) scores = torch.matmul(query, key.transpose(2, 3)).float() attn = self.softmax(scores).to(query.dtype) drop_attn = self.dropout(attn) context = torch.matmul(drop_attn, value) context = context.transpose(1, 2).contiguous().view(batch_size, (- 1), (head_count * dim_per_head)) output = self.final_linear(context) attn_scores: Tensor[Optional] = None if need_attn: attn_scores = torch.mean(scores, dim=1) if reshape: output = output.contiguous().view(dim_size[0], dim_size[1], dim_size[2], (- 1)).contiguous() return (output, attn_scores)
def features_from_cnn(input_words, cnn_model, max_bsz_cnn_gpu0, num_gpus, device): (batch_size, num_bags, num_words, word_channels, word_height, word_height) = input_words.size() input_words = input_words.contiguous().view((- 1), word_channels, word_height, word_height) with torch.no_grad(): b_sz = input_words.size(0) indexes = (np.arange(0, b_sz, (max_bsz_cnn_gpu0 * num_gpus)) if (num_gpus > 0) else np.arange(0, b_sz, 1)) cnn_outputs = [] for i in range(len(indexes)): start = indexes[i] if (i < (len(indexes) - 1)): end = indexes[(i + 1)] batch = input_words[start:end].to(device) else: batch = input_words[start:].to(device) cnn_out = cnn_model(batch).cpu() cnn_outputs.append(cnn_out) cnn_outputs = torch.cat(cnn_outputs, dim=0) cnn_outputs = cnn_outputs.contiguous().view(batch_size, num_bags, num_words, (- 1)) return cnn_outputs.detach()
def prediction(words, cnn_model, mi_model, max_bsz_cnn_gpu0, num_gpus, device, *args, **kwargs): word_features = features_from_cnn(input_words=words, cnn_model=cnn_model, max_bsz_cnn_gpu0=max_bsz_cnn_gpu0, num_gpus=num_gpus, device=device) word_features = word_features.to(device=device) output = mi_model(word_features, *args, **kwargs) return output