| import torch |
| from collections import OrderedDict |
|
|
| from basicsr.archs import build_network |
| from basicsr.losses import build_loss |
| from basicsr.utils import get_root_logger |
| from basicsr.utils.registry import MODEL_REGISTRY |
| from .sr_model import SRModel |
|
|
|
|
| @MODEL_REGISTRY.register() |
| class SRGANModel(SRModel): |
| """SRGAN model for single image super-resolution.""" |
|
|
| def init_training_settings(self): |
| train_opt = self.opt['train'] |
|
|
| self.ema_decay = train_opt.get('ema_decay', 0) |
| if self.ema_decay > 0: |
| logger = get_root_logger() |
| logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}') |
| |
| |
| |
| self.net_g_ema = build_network(self.opt['network_g']).to(self.device) |
| |
| load_path = self.opt['path'].get('pretrain_network_g', None) |
| if load_path is not None: |
| self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema') |
| else: |
| self.model_ema(0) |
| self.net_g_ema.eval() |
|
|
| |
| self.net_d = build_network(self.opt['network_d']) |
| self.net_d = self.model_to_device(self.net_d) |
| self.print_network(self.net_d) |
|
|
| |
| load_path = self.opt['path'].get('pretrain_network_d', None) |
| if load_path is not None: |
| param_key = self.opt['path'].get('param_key_d', 'params') |
| self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True), param_key) |
|
|
| self.net_g.train() |
| self.net_d.train() |
|
|
| |
| if train_opt.get('pixel_opt'): |
| self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device) |
| else: |
| self.cri_pix = None |
|
|
| if train_opt.get('ldl_opt'): |
| self.cri_ldl = build_loss(train_opt['ldl_opt']).to(self.device) |
| else: |
| self.cri_ldl = None |
|
|
| if train_opt.get('perceptual_opt'): |
| self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device) |
| else: |
| self.cri_perceptual = None |
|
|
| if train_opt.get('gan_opt'): |
| self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device) |
|
|
| self.net_d_iters = train_opt.get('net_d_iters', 1) |
| self.net_d_init_iters = train_opt.get('net_d_init_iters', 0) |
|
|
| |
| self.setup_optimizers() |
| self.setup_schedulers() |
|
|
| def setup_optimizers(self): |
| train_opt = self.opt['train'] |
| |
| optim_type = train_opt['optim_g'].pop('type') |
| self.optimizer_g = self.get_optimizer(optim_type, self.net_g.parameters(), **train_opt['optim_g']) |
| self.optimizers.append(self.optimizer_g) |
| |
| optim_type = train_opt['optim_d'].pop('type') |
| self.optimizer_d = self.get_optimizer(optim_type, self.net_d.parameters(), **train_opt['optim_d']) |
| self.optimizers.append(self.optimizer_d) |
|
|
| def optimize_parameters(self, current_iter): |
| |
| for p in self.net_d.parameters(): |
| p.requires_grad = False |
|
|
| self.optimizer_g.zero_grad() |
| self.output = self.net_g(self.lq) |
|
|
| l_g_total = 0 |
| loss_dict = OrderedDict() |
| if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): |
| |
| if self.cri_pix: |
| l_g_pix = self.cri_pix(self.output, self.gt) |
| l_g_total += l_g_pix |
| loss_dict['l_g_pix'] = l_g_pix |
| |
| if self.cri_perceptual: |
| l_g_percep, l_g_style = self.cri_perceptual(self.output, self.gt) |
| if l_g_percep is not None: |
| l_g_total += l_g_percep |
| loss_dict['l_g_percep'] = l_g_percep |
| if l_g_style is not None: |
| l_g_total += l_g_style |
| loss_dict['l_g_style'] = l_g_style |
| |
| fake_g_pred = self.net_d(self.output) |
| l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) |
| l_g_total += l_g_gan |
| loss_dict['l_g_gan'] = l_g_gan |
|
|
| l_g_total.backward() |
| self.optimizer_g.step() |
|
|
| |
| for p in self.net_d.parameters(): |
| p.requires_grad = True |
|
|
| self.optimizer_d.zero_grad() |
| |
| real_d_pred = self.net_d(self.gt) |
| l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) |
| loss_dict['l_d_real'] = l_d_real |
| loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) |
| l_d_real.backward() |
| |
| fake_d_pred = self.net_d(self.output.detach()) |
| l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) |
| loss_dict['l_d_fake'] = l_d_fake |
| loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) |
| l_d_fake.backward() |
| self.optimizer_d.step() |
|
|
| self.log_dict = self.reduce_loss_dict(loss_dict) |
|
|
| if self.ema_decay > 0: |
| self.model_ema(decay=self.ema_decay) |
|
|
| def save(self, epoch, current_iter): |
| if hasattr(self, 'net_g_ema'): |
| self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema']) |
| else: |
| self.save_network(self.net_g, 'net_g', current_iter) |
| self.save_network(self.net_d, 'net_d', current_iter) |
| self.save_training_state(epoch, current_iter) |
|
|